Ambari 1.2.2 Release Candidate 1

git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/tags/release-1.2.2-rc1@1465031 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/InOperator.java b/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/InOperator.java
index af14831..a879d7f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/InOperator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/InOperator.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.ambari.server.api.predicate.operators;
 
 import org.apache.ambari.server.api.predicate.InvalidQueryException;
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/CategoryPredicateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/CategoryPredicateTest.java
index 9abdd4e..7774e2d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/CategoryPredicateTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/CategoryPredicateTest.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.ambari.server.controller.predicate;
 
 import junit.framework.Assert;
diff --git a/branch-1.2/.gitignore b/branch-1.2/.gitignore
deleted file mode 100644
index f4274d9..0000000
--- a/branch-1.2/.gitignore
+++ /dev/null
@@ -1,20 +0,0 @@
-.classpath
-.project
-.settings
-.idea/
-.iml/
-.DS_Store
-target
-/ambari-server/derby.log
-/ambari-server/pass.txt
-/ambari-web/npm-debug.log
-/ambari-web/public/
-/ambari-web/node_modules/
-*.pyc
-*.py~
-*.iml
-.hg
-.hgignore
-.hgtags
-derby.log
-pass.txt
diff --git a/branch-1.2/CHANGES.txt b/branch-1.2/CHANGES.txt
deleted file mode 100644
index 2a07324..0000000
--- a/branch-1.2/CHANGES.txt
+++ /dev/null
@@ -1,1575 +0,0 @@
-Ambari Change Log
-
-Notes:
- - Committers should be listed using their login and non-committers
-should be listed by their full name.
- - Please keep the file to a max of 80 characters wide.
- - Put latest commits first in each section.
-
-Trunk (unreleased changes):
-
- INCOMPATIBLE CHANGES 
-
- NEW FEATURES
-
- AMBARI-1349. Expose host-specific Nagios alerts in Ambari Web. (yusaku)
-
- AMBARI-1294. Add isEmpty() query operator support. (jspeidel)
-
- AMBARI-1280. Support explicit predicate grouping in API queries. (jspeidel)
-
- AMBARI-1180. Display host check status results given by the agent as part
- of host registration. (yusaku)
-
- AMBARI-1252. Fetch Nagios alerts through Ambari Server and not directly
- from Nagios Server. (srimanth via yusaku)
-
- AMBARI-1237. Expose Nagios alerts via Rest API. (Nate Cole via jspeidel)
-
- AMBARI-1163. During agent registration and heartbeat, send information about
- various hadoop artifacts back to Ambari. (Nate Cole via mahadev)
-
- AMBARI-1194. API support for cascade delete of a specified cluster
- (Tom Beerbower via mahadev)
-
- AMBARI-1255. Make the agent hostname determination scriptable. 
- (mahadev)
-
- AMBARI-1267. Store example Hive Queries somewhere in Ambari that's easily
- accessible for demo/test purposes. (mahadev)
-
- IMPROVEMENTS
-
- AMBARI-1437. Update stack version. (yusaku)
-
- AMBARI-1429. Update API docs. (jspeidel)
-
- AMBARI-1430. Increase UI timeout for long running API operations. (yusaku)
-
- AMBARI-1427. Add ability to increase the time range for the zoomed-in graphs
- beyond last one hour. (yusaku) 
-
- AMBARI-1375. Remove text from templates (main). (jaimin)
-
- AMBARI-1374. Add filter by alerts on the Hosts page. (jaimin)
-
- AMBARI-1373. Since there is the ability to log in to Ambari Web as 
- different users the current user should be indicated. (jaimin)
-
- AMBARI-1366. Nagios alert tweaks. (jaimin)
-
- AMBARI-1365. Make Hosts table update dynamically. (jaimin)
-
- AMBARI-1361. Install progress dialog WARN icon + color. (jaimin)
-
- AMBARI-1347. Expose host-level alerts via nagios_alerts.php with associated
- service component names. (yusaku)
-   
- AMBARI-1348. Externalize strings to messages.js. (yusaku)
-
- AMBARI-1342. Hive client is not installed on Nagios server host.
- (jaimin)
-
- AMBARI-1341. Add Hosts: update the API call for new operator precedence.
- (yusaku) 
-
- AMBARI-1340. Enhance Install/Start/Test progress display. (yusaku) 
-
- AMBARI-1339. Validate usernames in Misc section of Customize Services step
- in Install Wizard. (yusaku)
-
- AMBARI-1335. Show validation error when the user specifies target hosts that
- are already part of the cluster. (yusaku)
-
- AMBARI-1337. Refactor Job Browser filter. (yusaku)
-
- AMBARI-1336. Externalize text to messages.js. (yusaku)
-
- AMBARI-1334. Show hosts that have failed install tasks as "red" to allow the
- user to easily identify source of failure. (yusaku)
-
- AMBARI-1333. Add username validation for Ambari local users. (yusaku) 
-
- AMBARI-1329. Adjust job browser column sizing. (yusaku)
- 
- AMBARI-1327. Add Hosts. Remove existig hosts display. (Alexandr Antonenko via jspeidel)
-
- AMBARI-1326. Remake clearFilters function in app_view (part 3). (srimanth)
- 
- AMBARI-1305. Make sure that Ambari Web renders all elements correctly when
- the browser width is 1024px or narrower (refactor). (Arun Kandregula via 
- yusaku) 
-
- AMBARI-1312. Remake clearFilters function in app_view (part2). (Arun Kandregula
- via yusaku) 
- 
- AMBARI-1309. Remove all text from Apps views, controllers, templates to 
- messages.js. (Arun Kandregula via yusaku)
-
- AMBARI-1308. Properly display Apps page aggregate summary and data table when
- there are no data to be show. (Arun Kandregula via yusaku)
-
- AMBARI-1306. Change color of rack_local_map to #66B366. (yusaku)
-
- AMBARI-1311. Host health indicator should have a tooltip showing few details - 
- refactoring. (Arun Kandregula via yusaku)
-
- AMBARI-1303. Remake clearFilters function in app_view. (Arun Kandregula via
- yusaku)
-
- AMBARI-1302. Minor label cleanup on Jobs Charts popup. (Arun Kandregula via
- yusaku)
-
- AMBARI-1296. Task log popup footer should be statically placed only the
- content should scroll vertically. (Jaimin Jetly via yusaku)
-
- AMBARI-1295. Move cluster name display from the main nav to the top nav.
- (Jaimin Jetly via yusaku)
-
- AMBARI-1268. Improve DAG UI. (billie via yusaku)
-
- AMBARI-1289. App page: remove old ode and fix test mode. (srimanth via
- yusaku)
-
- AMBARI-1279. Make sure that Ambari Web renders all elements correctly when
- the browser width is 1024px or narrower. (srimanth via yusaku)
-
- AMBARI-1274. Shrink top nav height. (srimanth)
- 
- AMBARI-1272. Controller javascripts need comments. (srimanth)
- 
- AMBARI-1271. On Confirm Hosts page, add a link to show the Host Checks popup 
- in the success message. (yusaku via srimanth)
-
- AMBARI-1193. If Install fails, allow user to go back to any previous step so 
- that the user can retry install with different configuration parameters.
- (yusaku via srimanth)
-
- AMBARI-1265. Job Browser - Filter by Input, output and duration. (yusaku)
-
- AMBARI-1263. Refactoring of User Management code. (yusaku)
-
- AMBARI-1254. Modify App Browser to use server-side paging/sorting/filtering.
- (yusaku)
-
- AMBARI-1258. Minor refactoring of User Management code. (yusaku)
-
- AMBARI-1253. Use ember-precompiler-brunch npm plugin. (yusaku)
-
- AMBARI-1236. Display a progress bar during deploy prep. (yusaku)
-
- AMBARI-1249. Update mock data to make App.testMode work. (yusaku)
-
- AMBARI-1239. Host health status should show orange when there is at least one
- slave component on the host with state!=STARTED. (yusaku)
-
- AMBARI-1248. Refactoring of update, services and hosts mapper. (yusaku)
-
- AMBARI-1247. Disable links for previous steps in left nav on Summary step.
- (yusaku)
-
- AMBARI-1246. Add user minor improvements. (yusaku)
-
- AMBARI-1245. Do not let the user go back to the previous step while host
- bootstrap is in progress. (yusaku)
-
- AMBARI-1244. Install Options - line up the Target Hosts section with the rest
- of the page. (yusaku)
-
- AMBARI-1235. Host health indicator should have a tooltip showing details.
- (yusaku)
- 
- AMBARI-1234. On Heatmap host hover, including list of components running.
- (yusaku)
-
- AMBARI-1229. Dashboard - make disk usage pie chart in HDFS summary easier
- to understand. (yusaku)
-
- AMBARI-1228. During Install, show "warn" on hosts that have tasks cancelled.
- (yusaku)
-
- AMBARI-1225. Add Hosts wizard popup is too small. (yusaku)
-
- AMBARI-1224. Drop the "all" option from Hosts > Component Filter and
- Jobs > Users Filter. (yusaku)
-
- AMBARI-1223. Confirm Hosts page: It looks like hosts disappear if you are
- on "Fail" filter and click on "Retry Failed" button. (yusaku)
-
- AMBARI-1222. DAG, Jobs Timeline, and Tasks graphs UI cleanup. (yusaku)
-
- AMBARI-1221. There is no default sort order on Hosts table and the order
- changes on every page refresh - should sort by hostname. (yusaku)
-
- AMBARI-1220. Oozie service summary update. (yusaku)
-
- AMBARI-1218. Refactor Job Browser User filter. (yusaku)
-
- AMBARI-1217. Tighten up spacing for the rows in the Hosts table. (yusaku)
-
- AMBARI-1216. Add filters module. (yusaku)
-
- AMBARI-1215. Refactor hostComponent isSlaves and isMaster and add update
- methods for server mapper. (yusaku)
-
- AMBARI-1214. In any starts fails, "warn" the host and the overall install.
- (yusaku)
-
- AMBARI-1204. Install Wizard: Re-enable configuration of user/group names for
- master component daemons. (yusaku)
-
- AMBARI-1197. Refactor code for graphs. (yusaku)
-
- AMBARI-1196. Automatically update host-level popup info/logs. (yusaku)
-
- AMBARI-1189. Add App.Job class. (yusaku)
-
- AMBARI-1188. Refactor isClient computed property for HostComponent class.
- (yusaku)
-
- AMBARI-1186. Add Run class to represent a job run. (yusaku)
-
- AMBARI-1185. Refactor the method to check if the user is an admin.
- (yusaku)
-
- AMBARI-1183. Directories in the service config textarea should not wrap.
- (yusaku)
-
- AMBARI-1182. Clean up table header UI for sorting and filter clear "x" for
- Jobs table. (yusaku)
-
- AMBARI-1181. Clean up table header UI for sorting and filter clear "x" for
- Hosts table. (yusaku)
-
- AMBARI-1198. Ambari API Performance: Parsing of Ganglia json data is slow.
- (jspeidel via mahadev)
-
- AMBARI-1213. Cleanup python test cases and introduce third party library for
- mock testing python code. (mahadev)
-
- AMBARI-1206. Expose missing metrics on host components. (tbeerbower via
- mahadev)
-
- AMBARI-1205. Cannot persist service configuration when service is started
- (Siddharth Wagle via mahadev)
-
- AMBARI-1262. Apache Ambari point to dev url, need fix in pom.xml. 
- (mahadev)
-
- AMBARI-1207. Remove /hdp as the httpd conf for any of the nagios urls -
- should replace it with ambarinagios or something else.
- (mahadev)
-
- AMBARI-1277. Failing build due to url moved on Suse. (mahadev)
-
- AMBARI-1288. Change "authorization" to "authentication" in props setup for
- LDAP. (mahadev)
-
- AMBARI-1269. Refactor ResourceProvider SPI. (tbeerbower)
- 
- AMBARI-1270. Add predicate objects for checking empty resource category.
- (tbeerbower)
-
- AMBARI-1286. Set version number property in gsInstaller cluster resource
- provider. (tbeerbower)
-
- AMBARI-1287. Monitor for component/service state for gsInstaller resource provider. (tbeerbower)
-
- AMBARI-1260. Remove hard coded JMX port mappings. (Siddharth Wagle via
- mahadev)
-
- AMBARI-1411. Missing unit test coverage for resource providers. (tbeerbower)
-
- AMBARI-1433. Allow capacity scheduler to be configurable via the API's.
- (mahadev)
-
- AMBARI-1435. L2 Cache does not work due to Eclipse Link exception.
- (Sid Wagle via mahadev)
-
- AMBARI-1436. Threads blocking on ClustersImpl.getHost for several minutes.
- (Sid Wagle via mahadev)
-
- AMBARI-1438. Add new stack definition for new stacks. (mahadev)
-
- AMBARI-1448. Enabling stack upgrade via Ambari Server. (mahadev)
-
- AMBARI-1439. rrd file location should be read from global config. (Siddharth
- Wagle via mahadev).
-
- AMBARI-1466. Optimize ganglia rrd script to be able to respond within
- reasonable time to queries made by the UI. (mahadev)
-
- AMBARI-1474. Upgrade stack definition for HBase for 1.2.2 since the version
- is upgraded. (mahadev)
-
- AMBARI-1475. Update the version of ambari artifacts to 1.2.2 snapshot.
- (mahadev)
-
- AMBARI-1489. Add hadoop-lzo to be one of the rpms to check for before
- installation. (mahadev)
-
- BUG FIXES
-
- AMBARI-1795. Add Hosts - retrying install shows all progress bars in red
- and causes the wizard to get stuck on the progress page. (yusaku)
-
- AMBARI-1794. Add Host install retry shuts down all services in the cluster.
- (yusaku)
-
- AMBARI-1815. After modifying custom configs, properties are shown as blank,
- leading to corrupt core-site.xml upon save. (yusaku)
-
- AMBARI-1748. JDK option on the UI when used is not passed onto the global 
- parameters. (srimanth)
- 
- AMBARI-1463. State of HBase region server not updated when instance is shut down on a cluster not installed via Ambari. (tbeerbower)
-
- AMBARI-1446. URL used by API to invoke Ganglia rrd script may exceed max length 
-              for query string for large clusters. (jspeidel)
-
- AMBARI-1431. Hosts table no longer allows sorting. (yusaku)
-
- AMBARI-1376. Wrong calculation of duration filter on apps page. (jaimin via
- yusaku)
-
- AMBARI-1165. Change the dashboard graph for HBase since its using cumulative
- metrics. (yusaku)
-
- AMBARI-1372. three sorting states on jobs table. (jaimin)
- 
- AMBARI-1350. UI screen shifts left-right depending on scrollbar. (jaimin)
-
- AMBARI-1367. Job# for Mapreduce jobs is seen as x. (jaimin)
-
- AMBARI-1363. Graphs jump around upon loading. (jaimin)
-
- AMBARI-1362. Alerts for the hosts with ZooKeeper Server grows on every poll. (jaimin)
-
- AMBARI-1360. Mouse cursor hover behavior is strange on Job Browser. (jaimin) 
-
- AMBARI-1359. App Browser rows colours should alternate from dark grey to light 
- grey and back. (jaimin)
-
- AMBARI-1356. Error in filtering Configuration properties maintained at UI for 
- WebHcat service. (jaimin)
-
- AMBARI-1352. Host-level alert badges should only show the total number
- of CRIT and WARN alerts for the host excluding OK. (jaimin)
-
- AMBARI-1355. Inconsistent casing and component name for alert title. (jaimin)
-
- AMBARI-1354. "No alerts" badge on the Host Detail page should be green, not red. (jaimin)
-
- AMBARI-1353. "Missing translation" shown in Job Browser. (jaimin)
-
- AMBARI-1351. Provide consistent ordering of hosts in heatmap. (jaimin)
-
- AMBARI_1344. mapred.tasktracker.reduce.tasks.maximum in mapred-site.xml is not
- taking effect. (yusaku)
-
- AMBARI-1345. Alerts are not showing up at all in Service pages. (yusaku)
-
- AMBARI-1346. The number of task trackers does not reflect the actual number
- in MapReduce service summary after new TaskTrackers have been added until
- page refresh. (yusaku)
-
- AMBARI-1331. Step 8 hangs on deploy task 2 of 59, server has exception (tbeerbower)
-
- AMBARI-1164. Disk info ganglia metrics is broken for some OS. (Dmytro Shkvyra via jspeidel)
-
- AMBARI-1325. Left border is missing from the main nav. (srimanth)
- 
- AMBARI-1324. Job Browser default sort order should be Run Date DESC. (srimanth)
- 
- AMBARI-1323. Job Browser's column sizing needs to be improved on Firefox. (srimanth)
-
- AMBARI-1321. Switching out of Jobs page does not launch popup anymore.
- (srimanth via yusaku) 
-
- AMBARI-1313. Alert time jumps between 'less than a minute ago' and 'about a
- minute ago'. (srimanth via yusaku) 
-
- AMBARI-1304. When switching jobs in timeline + tasks charts, blank charts show.
- (Arun Kandregula via yusaku) 
-
- AMBARI-1317. Deploy progress returns to deploy screen (momentarily).
- (Arun Kandregula via yusaku) 
-
- AMBARI-1316. Vertical scrollbar shows regardless of how tall the browser height
- is (content height is always slightly taller than viewport). (Arun Kandregula
- via yusaku)
-
- AMBARI-1315. Inconsistent error/warning status in Deploy step; install
- stalls. (Arun Kandregula via yusaku)
-
- AMBARI-1281. Heatmap does not show up if the cluster was installed by going
- back to a previous step from the Deploy step after an install failure.
- (yusaku)
-
- AMBARI-1300. Service status / host component status can get stuck in the
- green blinking state if stop fails - no further operation can be performed.
- (srimanth via yusaku) 
-
- AMBARI-1297. Edit User: if "old password" is not specified and "new
- password" is specified, password update silently fails. (Jaimin Jetly via
- yusaku)
-
- AMBARI-1282. Admin user can lose its own admin privilege. (Jaimin Jetly
- via yusaku)
-
- AMBARI-1292. Add hosts should skip host checks on existing list of cluster
- nodes. (srimanth via yusaku)
-
- AMBARI-1290. Left border is missing from the summary section on Jobs page.
- (srimanth via yusaku)
-
- AMBARI-1278. Cannot proceed from Step 3 to Step 4 in App.testMode (Next 
- button is disabled). (srimanth)
-
- AMBARI-1276. Job Graphs need to show x-axis ticks for elapsed time since 
- submission. (srimanth)
-
- AMBARI-1275. Incorrect displaying "Background operations" window after 
- changing state of component. (srimanth)
-
- AMBARI-1273. Edit User: No error message is shown when the user does not 
- enter the correct "old password". (srimanth)
-
- AMBARI-1172. Alert status change does not change time for the alerts.
- (srimanth via yusaku) 
-
- AMBARI-1264. Service graphs refresh with spinners. (yusaku)
-
- AMBARI-1257. Separator missing in between Oozie and ZooKeeper. (yusaku)
-
- AMBARI-1251. Fix routing issues on Add Host Wizard. (yusaku)
-
- AMBARI-1230. There is a big gap in the lower part of the Jobs table header.
- (yusaku)
-
- AMBARI-1212. After successful install with Ambari, the user is taken to the
- welcome page of the Install Wizard upon browser relaunch if the HTTP session
- is expired. (yusaku)
-
- AMBARI-1227. Host-level task popup is not showing the display name for
- components. (yusaku)
-
- AMBARI-1226. On Dashboard, links to host components are missing. (yusaku)
-
- AMBARI-1219. After adding hosts, the number of live TaskTrackers is not
- updated. (yusaku)
-
- AMBARI-1176. In some cases, once Add Hosts wizard has run once, it requires
- a log out before the Add Hosts wizard can be run again. (yusaku)
-
- AMBARI-1203. mapred-site.xml default system directory is not set
- to /mapred/system. (yusaku)
-
- AMBARI-1200. On some clusters, Nagios alerts show up about 30 seconds after
- page load, while on others the alerts show up immediately. (srimanth via
- yusaku)
-
- AMBARI-1190. Detailed log view dialogs are not center-aligned. (yusaku)
-
- AMBARI-1187. Dashboard > MapReduce mini chart sometimes shows partial graph and hides recent data. (yusaku)
-
- AMBARI-1184. After adding hosts, the host count shown in the Dashboard is
- incorrect. (yusaku)
-
- AMBARI-1178. Fix use of use ip address for JMX metrics request. (tbeerbower
- via mahadev)
-
- AMBARI-1191. Datatable API needs work. (Billie Rinaldi via mahadev)
-
- AMBARI-1211. Ability to configure the same username for all the services in
- Ambari. (mahadev)
-
- AMBARI-1231. Replace sudo with su in the ambari setup script since ambari
- server setup is already run as root. (mahadev)
-
- AMBARI-1201. Improve Agent Registration and Heartbeat json. (Nate Cole via
- mahadev)
-
- AMBARI-1238. AmbariMetaInfoTest getServices() acceptance test failure. 
- (Siddharth Wagle via mahadev)
-
- AMBARI-1243. Remove unwanted import causing the builds to fail on linux.
- (mahadev)
-
- AMBARI-1233.  Directory permissions on httpd /var/www/cgi-bin should not be
- touched by Ambari. (mahadev)
-
- AMBARI-1170. For live status checks we should only look at the run
- directories that we get from the server (only for hadoop and its eco system)
- and not all. (mahadev)
-
- AMBARI-1250. Upgrade the posgres connector to 9.1.
- (mahadev)
-
- AMBARI-1259. Fix the host roles live status not go back to INSTALLED if it
- was in START_FAILED state. (mahadev)
-
- AMBARI-1210. Allow capacity scheduler to be attached to host role configs for
- CS configurability in the API's. (mahadev)
-
- AMBARI-1256. Host registration can fail due to mount point info not fitting
- ambari.hosts::disks_info column. (Sumit Mohanty via mahadev)
-
- AMBARI-1266. Agent checks packages as part of host check but doesn't tell
- which ones are needed or conflicting. (mahadev)
-
- AMBARI-1291. Incorrect directory for MySQL component on SLES-11.1sp1.
- (mahadev)
-
- AMBARI-1301. Live status checks dont get triggered on server restart.
- (mahadev)
-
- AMBARI-1285. Some host Ganglia metrics may be missing in some cases. (tbeerbower)
-
- AMBARI-1310. Get rid of mvn warnings. (Arun Kumar via mahadev)
-
- AMBARI-1314. Hostname test is failing in some environments. (Nate Cole via
- mahadev) 
-
- AMBARI-1330. Cluster missing hosts after successful install and restart.
- (mahadev)
-
- AMBARI-1358. Clean up alert messages. (Yusaku Sako via mahadev)
-
- AMBARI-1432. Ambari Agent registration hangs due to Acceptor bug in Jetty for
- not reading through accepted connections. (mahadev)
-
- AMBARI-1434. Change state to installed from start_failed if there is any
- issue in starting a host component. (mahadev)
-
- AMBARI-1476. Change webhcat-env.sh to export HADOOP_HOME
- (mahadev)
-
- AMBARI-1486. Fix TestHostName to take care of issues when gethostname and
- getfqdn do not match. (mahadev)
-
- AMBARI-1495. Out of Memory Issues on Ambari Server when server is running on
- single core. (mahadev)
-
- AMBARI-1487. Fix alerts at host level if MapReduce is not selected not to
- alert for tasktrackers not running. (mahadev)
-
- AMBARI-1488. Nagios script causes unwanted Datanode logs. (mahadev)
-
- AMBARI-1497. Fix start up option for ambari-server where there is a missing
- space. (mahadev)
-
-AMBARI-1.2.0 branch:
-
- INCOMPATIBLE CHANGES
- 
- NEW FEATURES
-
- AMBARI-1108. PUT call to change the state on host_components collection
- returns 200 (no op), even though GET with the same predicate returns a number
- of host_components. (Tom Beerbower via mahadev)
-
- AMBARI-1114. BootStrap fails but the api says thats its done and exit status
- is 0. (Nate Cole via mahadev)
-
- AMBARI-1136. Add gsInstaller resource provider. (Tom Beerbower via mahadev)
-
- AMBARI-1202. Unncessary use of xml tree python library in ambari-server
- setup. Its not being used. (Siddharth Wagle via mahadev)
-
- IMPROVEMENTS
-
- BUG FIXES
-
- AMBARI-1179. ambari-web does not compile due to less-brunch package update.
- (yusaku)
-
- AMBARI-1126. Change SUSE lzo dependency to only lzo-devel. (nate cole via
- mahadev)
-
-AMBARI-666 branch:
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-  AMBARI-1147. Handling Hive/HCat/WebHCat configuration parameters with
-  Ambari Web. (yusaku)
-
-  AMBARI-946. Support retrieving information for multiple requests.
-  (hitesh via mahadev)
-
-  AMBARI-1065. Provide Datanode decommission & recommission capability in UI.
-  (Srimanth Gunturi via mahadev)
-
-  AMBARI-985. Support OR in API query. (Tom Beerbower via mahadev)
-
-  AMBARI-1029. Add api support for updating multiple host_component resources
-  for multiple hosts in a single request. (John Speidel via mahadev)
-
-  AMBARI-1018. Add API support for creating multiple sub-resources to
-  multiple resources in a single request. (John Speidel via mahadev)
-
-  AMBARI-950. Provide API support for 'OR' predicate. (John Speidel via
-  mahadev)
-
-  AMBARI-935. Provide API support for updates of multiple resources in a
-  single request. (John Speidel via mahadev)
-
-  AMBARI-926. Provide API support for asynchronous requests.
-  (John Speidel via mahadev)
-
-  AMBARI-1054. Implement retrying of bootstrap on confirm host page.
-  (Jaimin Jetly via yusaku)
-
-  AMBARI-1048. Integrate slave configuration parameters with respective
-  service on step7 of installer wizard. (Jaimin Jetly via yusaku)
-
-  AMBARI-1031. Check for host registration at step3 of installer wizard 
-  and retrieve information for RAM and no. of cores. (Jaimin Jetly via
-  yusaku)
-
-  AMBARI-1022. Integrate Heatmap UI to backend API. (Srimanth
-  Gunturi via yusaku)
-
-  AMBARI-1015. Create HBase summary section in Dashboard & Service
-  pages. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1014. Hook service summary sections in service pages to API.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1008. Populate dashboard>MapReduce section with API data.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1006. Populate dashboard>HDFS section with API data.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1004. Allow properties entered in custom config
-  (ex: hdfs-site.xml) to override existing or create new properties.
-  (Jaimin Jetly via yusaku)
-
-  AMBARI-1002. Integrate Installer with config APIs. (Jaimin Jetly
-  via yusaku)
-
-  AMBARI-989. Show task logs for each host in the Deploy step of the
-  wizard. (yusaku)
-
-  AMBARI-976.  Hook HDFS/MapReduce/HBase/Host graphs to backend API
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-964. Implement summary page of installer wizard. (Jaimin Jetly
-  via yusaku)
-
-  AMBARI-974. Decommissioning of datanodes. (Jitendra Pandey via mahadev)
-
-  AMBARI-975. Fix support for cascading updates to configs. (Hitesh Shah
-  via mahadev)
-
-  AMBARI-971. Add api support for creating multiple resources in a single
-  request. (John Speidel via mahadev)
-
-  AMBARI-970. Add additional Ganglia metrics and JMX properties. (Tom
-  Beerbower via mahadev)
-
-  AMBARI-967. Enhance predicate comparison. (Tom Beerbower via mahadev)
-
-  AMBARI-954. Support installation of Ganglia master and slaves via
-  Ambari Web. (yusaku)
-
-  AMBARI-980. Allow installation of various service components. (yusaku)
-
-  AMBARI-949. Provide metric graphs for individual hosts. (Srimanth
-  Gunturi via yusaku)
-
-  AMBARI-948. Invoke service starts after services are installed in the
-  wizard. (yusaku)
-
-  AMBARI-942. Integrate Install and Start APIs with the installer wizard.
-  (Jaimin Jetly via yusaku)
-
-  AMBARI-936. Provide HBase service specific graphs. (Srimanth Gunturi
-  via yusaku)
-
-  AMBARI-933. Provide service-specific alerts in the service summary
-  pages. (Srimanth Gunturi via yusaku)
-
-  AMBARI-938. Hardcode service name and client component mapping while
-  awaiting meta data integration. (hitesh)
-
-  AMBARI-927. Provide metrics graphs on the MapReduce services page. 
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-916. Provide metrics graphs in HDFS services page. (Srimanth
-  Gunturi via yusaku)
-
-  AMBARI-908. Add ui option to either create a Postgres database for Hive
-  and Oozie or choose existing database. (Jaimin Jetly via yusaku)
-
-  AMBARI-915. Implement App Browser for displaying and navigating Pig/Hive
-  workflows. (yusaku)
-
-  AMBARI-907. Add support for getting multiple objects in controller.
-  (hitesh)
-
-  AMBARI-906. Util to extract hosts for various components. (jitendra)
-
-  AMBARI-903. Various fixes for config handling integration. (Hitesh Shah via 
-  mahadev)
-
-  AMBARI-900. Add configuration mapping support. (Nate Cole via mahadev)
-
-  AMBARI-895. Need name consistency for metrics. (Tom Beerbower via mahadev)
-
-  AMBARI-893. provide api support for temporal queries. (John Speidel via 
-  mahadev)
-
-  AMBARI-897. Operations request object and skeleton management methods.
-  (jitendra)
-
-  AMBARI-894. TestHeartBeatMonitor fails intermittently. (jitendra)
-
-  AMBARI-892. Add puppet executor at the agent to be able to run various
-  commands from the server. (mahadev)
-
-  AMBARI-887. Ability to save configuration. (Nate Cole via mahadev)
-
-  AMBARI-877. Refactor resource provider implementation for changes to
-  management interface. (Tom Beerbower via mahadev)
-
-  AMBARI-876. Put metrics under metrics category. (Tom Beerbower via 
-  mahadev)
-
-  AMBARI-890. Add client library option to step6 (Assign slaves) of
-  installer wizard. Also add indicator and popovers for hosts with
-  master component. (Jaimin Jetly via yusaku)  
-
-  AMBARI-889. Provide cluster metric graphs on Ambari main dashboard.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-886. Support filters in controller get* apis. (hitesh)
-
-  AMBARI-880. Implement Review Page (Step 8) for the Ambari Installer
-  Wizard. (Jaimin Jetly via yusaku)
-
-  AMBARI-872. Hookup Nagios alerts section in Ambari UI to backend
-  server. (Srimanth Gunturi via yusaku)
-
-  AMBARI-871. Integrate basic set of rest APIs with ambari-web
-  installer wizardi. (Jaimin Jetly via yusaku)
-
-  AMBARI-884. Implement Dashboard/Service summary. (yusaku)
-
-  AMBARI-882. Group-based DataNode/TaskTracker/RegionServer overrides.
-  (yusaku)
-
-  AMBARI-881. Implement Add Hosts Wizard. (yusaku)
-
-  AMBARI-869. Util to deserialize ExecutionCommand. (jitendra)
-
-  AMBARI-874. Fix hostinfo reporting at the server and add a unit test for
-  deserilization for the host information from agent. (mahadev)
-
-  AMBARI-873. Support for multiple objects' updates in controller. (hitesh)
-
-  AMBARI-870. Support metric types other than string (Tom Beerbower via
-  mahadev)
-
-  AMBARI-868. Clean up site.pp generation on the agent and remove the imports
-  in the sample site.pp. (mahadev)
-
-  AMBARI-862. API query against /clusters doesn't return any data.
-  (John Speidel via mahadev)
-
-  AMBARI-866. Add ORM layer for the FSM's in the server. (mahadev)
-
-  AMBARI-853. Add more complete JMX metrics. (Tom Beerbower via mahadev)
-
-  AMBARI-852. Improve REST API functionality regarding query and partial
-  response (John Speidel via mahadev)
-
-  AMBARI-865. Add unit test for action queue on the agent. (mahadev)
-
-  AMBARI-851. Hook up Ganglia property provider. (Tom Beerbower via mahadev)
-
-  AMBARI-863. Fix mvn tests to be able to run the python tests cleanly.
-  (mahadev)
-
-  AMBARI-849. Stage planner implementation. (jitendra)
-
-  AMBARI-860. Remove code that adds fake stages for testing. (jitendra)
-
-  AMBARI-856. Add cluster heatmap. (yusaku)
-
-  AMBARI-855. Create the skeleton for a custom data adapter in Ambari Web.
-  (yusaku)
-
-  AMBARI-854. Serve ambari-web from jetty. (Jaimin Jely via yusaku)
- 
-  AMBARI-850. Flatten ExecutionCommand structure. (jitendra)
-  
-  AMBARI-848. Various tests for FSM and Controller impl. (hitesh)
-
-  AMBARI-847. Run pyunit tests from maven test target and also autogenerated
-  openssl passphrase. (mahadev)
-
-  AMBARI-845. Fix NPE in the server to be able to run the server api's.
-  (mahadev)
-
-  AMBARI-844. Mock JMX provider for manual tests. (Tom Beerbower via mahadev)
-
-  AMBARI-841. Fix comparison predicates in case where resource does not have
-  property value. (Tom Beerbower via mahadev)
-
-  AMBARI-833. Add missing Path annotation to rest services for
-  put/post/delete. (John Speidel via mahadev)
-
-  AMBARI-838. HACK: Add a thread in server to inject requests for testing.
-  (Jitendra via mahadev)
-
-  AMBARI-835. Update JMXPropertyProvider. (Tom Beerbower via hitesh)
-
-  AMBARI-832. Merge ambari-api with ambari-server (mahadev)
-
-  AMBARI-822. Implement an agent simulator for unit testing. (jitendra)
-  
-  AMBARI-829. Add unit tests for ResourceProviderImpl. (Tom Beerbower via
-  mahadev)
-
-  AMBARI-831. Move manifest generation into the ambari agent directory.
-  (mahadev)
-
-  AMBARI-828. Manifest generation for various actions from the server.
-  (mahadev)
-
-  AMBARI-827. Add clusterName to the status of the commands run by the agent.
-  (mahadev)
-
-  AMBARI-824. Provide basic management functionality (create/update) in the
-  rest api (John Speidel via mahadev)
-
-  AMBARI-826. Bug in processing command reports. (jitendra)
-
-  AMBARI-825. Controller layer implementation part 3. (hitesh)
-
-  AMBARI-823. Fix security filter on the server agent ports and remove
-  duplication on servlet contexts for certs signing. (mahadev)
-
-  AMBARI-821. Implement basic service state update and verify flow to
-  ActionManager. (hitesh)
-
-  AMBARI-812. In API , improve partial response support to drill down n levels
-  (John Spiedel)
-
-  AMBARI-791. Add unit tests and java docs for SPI code. (Tom Beerbower)
-
-  AMBARI-820. Remove JAXB dependencies in Server Agent protocol and move to
-  POJO based jackson serializer. (mahadev)
-
-  AMBARI-819. Management controller implemenation work. (hitesh)
-
-  AMBARI-811. Bug fix in jaxb serialization for maps. (jitendra)
-
-  AMBARI-810. Controller layer implementation part 1. (hitesh)
-
-  AMBARI-807. Fix Action scheduler tests because of fsm interface changes.
-  (jitendra)
-
-  AMBARI-806. Remove State object as configs/stack version/running state are
-  handled as standalone entities. (hitesh)
-
-  AMBARI-805. Add requestId tracking objects for management spi. (hitesh)
-
-  AMBARI-803. FSM initial refactoring for eventual live/desired objects. (hitesh)
-
-  AMBARI-800. Hack to add a stage for testing in in-memory db. (jitendra)
-  
-  AMBARI-801. Fix heartbeat message from the agent which is causing NPE at the
-  server. (mahadev)
-
-  AMBARI-778. Ensure data flows across all steps in installer wizard.
-  (Jaimin Jetly via yusaku)
-
-  AMBARI-799. Prototype for management spi part 3. (hitesh)
-
-  AMBARI-797. Prototype for management spi interface continued. (hitesh)
-
-  AMBARI-795. Fix failing tests for AgentResource and BootStrap. (mahadev)
-
-  AMBARI-793. Make MapReduce, Nagios, and Ganglia optional during cluster
-  install. (yusaku)
-
-  AMBARI-794. Add log4j properties for logging at the server. (mahadev)
-
-  AMBARI-790. OK in registration response. (jitendra)
-
-  AMBARI-787. Registration throws HostNotFoundException for new hosts. (jitendra)
-  
-  AMBARI-788. Fix server and agent startup for end to end testing. (mahadev)
-
-  AMBARI-785. Action response unit test. (jitendra)
-
-  AMBARI-783. Fix guice injection in the server. (mahadev)
-
-  AMBARI-784. Add Resource download API on the server. (mahadev)
-
-  AMBARI-781. Registration unit test. (jitendra)
-
-  AMBARI-754. Heartbeat handler: Registration response should query component 
-  status. (jitendra)
-
-  AMBARI-755. Heartbeat handler: Update state as reported in heartbeat. 
-  (jitendra)
-
-  AMBARI-756. Heartbeat handler: Handle heartbeat timeout. (jitendra)
-
-  AMBARI-767. Add bootstrap script to ssh in parallel and setup agents on a
-  list of hosts. (mahadev)
-
-  AMBARI-764. Integrate REST API (Tom Beerbower via mahadev)
-
-  AMBARI-762. Implement Confirm Hosts page for Ambari installer
-  (Jaimin Jetly via yusaku)
-
-  AMBARI-763. Implement Installer Step 6 (Assign Slaves). (yusaku)
-
-  AMBARI-760. Fix injection in data access objects to use guice provider.
-  (mahadev)
-
-  AMBARI-759. Add puppet scripts to the agent for configuring/installing
-  various services and add security aspects to api's and server/agent.
-  (mahadev)
-
-  AMBARI-749. Complete Java side implementation of bootstrapping agent hosts.
-  (mahadev)
-
-  AMBARI-757. Implement Installer Step 4 (Select Services). (yusaku)
-
-  AMBARI-751. Re-structure servicecomponenthost fsm layout. (hitesh)
-
-  AMBARI-732. Action scheduler unit tests. (jitendra)
-
-  AMBARI-739. Cluster fsm implementation. (hitesh)
-
-  AMBARI-738. s/Node/Host/g. (hitesh)
-
-  AMBARI-737. ServiceComponentNode FSM implementation. (hitesh)
-
-  AMBARI-722. Action scheduler implementation. (jitendra)
-  
-  AMBARI-733. Add Jersey Resource for BootStrapping and JAXB elements for API
-  entities. (mahadev)
-
-  AMBARI-730. Add unit tests for jersey apis on the server. (mahadev)
-
-  AMBARI-725. Add commandstatus/result/error objects into the rest API between
-  server and agent. (mahadev)
-
-  AMBARI-723. Implement Installer Welcome page and Install Options page
- (Jaimin Jetly via yusaku)
-
-  AMBARI-726. ORM-based data access layer for new design (Jaimin Jetly via hitesh)
-
-  AMBARI-728. Initial work on ServiceComponentNode FSM. (hitesh)
-
-  AMBARI-724. Add tabs, dynamic form generation, validation errors, and info
-  popovers for Customize Services page in Installer (yusaku) 
-
-  AMBARI-714. Job FSM Impl and tests. (hitesh)
-
-  AMBARI-721. Remove Hardwareprofile class since its not needed anymore.
-  (mahadev)
-
-  AMBARI-720. Tweak basic styles for Installer. (yusaku)
-
-  AMBARI-719. Enable Responsive Design. (yusaku)
-
-  AMBARI-716. Add back TestNodeImpl and fix memory types and disk info
-  serialization. (mahadev)
-
-  AMBARI-717. Starter implementation for Installer Customize Services page.
-  Stylize top nav and implement static footer.  Stylize login page. (yusaku)
-
-  AMBARI-711. Create utility functions related to localStorage for first two
-  steps: cluster name and Install options. Also develop view logic with
-  preliminary validations for these two steps. (Jaimin Jetly via yusaku)
-
-  AMBARI-715. Integrate domain objects and Rest serialized objects. (mahadev)
-
-  AMBARI-713. Initial work on Job FSM. (hitesh)
-
-  AMBARI-712. Action manager skeleton. (jitendra)
-
-  AMBARI-710. Basic registration and heartbeat protocol implementation between
-  the server and the agent. (mahadev)
-
-  AMBARI-709. Getting hardware info on disks/cpu/others using facter and using
-  it during registeration. (mahadev)
-
-  AMBARI-707. More work on Node FSM and additional tests/cleanup. (hitesh)
-
-  AMBARI-706. Basic tests for Node FSM. (hitesh)
-
-  AMBARI-705. Initial work on Node FSM. (hitesh)
-
-  AMBARI-703. Heartbeat handler classes. (jitendra)
-
-  AMBARI-702. Add skeleton for Ambari agent that talks to the server and
-  collects information for host. (mahadev)
-
-  AMBARI-696. Add interface for ActionManager to access live state. (hitesh)
-
-  AMBARI-698. Add a simple server and artifact generation to run a server with
-  a simple api check. (mahadev)
-
-  AMBARI-697. Ambari Web (browser-based UI) skeleton. (Jaimin Jetly and yusaku)
-
-  AMBARI-695. More basic class restructuring for new design. (hitesh)
-
-  AMBARI-694. Class to encapsulate stage. (jitendra)
-
-  AMBARI-691. More basic classes for new design. (hitesh)
-
-  AMBARI-693. Classes for request objects. (jitendra)
-
-  AMBARI-685. Basic classes. (hitesh via jitendra)
-
-  AMBARI-676. Seperate directory for ambari-server. (jitendra)
-
-  IMPROVEMENTS
-
-  AMBARI-1159. Check the log/run dir locations to make sure its an abs path.
-  (yusaku)
-
-  AMBARI-1156. Dashboard > HDFS pie chart should hover with details. (yusaku)
-
-  AMBARI-1154. The check boxes to check/uncheck one of the members in a multi
-  artifact graphs is not very readable. It should be more apparent on which
-  one the user clicked on. (yusaku)
-
-  AMBARI-1106. User-specified custom configs (such as hdfs-site.xml overrides)
-  should be persisted to maintain what the user specified.
-  (Jaimin Jetly via yusaku)
-
-  AMBARI-1103. Need to be able to reliably recover from the case when the browser
-  is closed during deploy (Step 8 post submission, Step 9) of the wizard.
-  (Arun Kandregula via yusaku)
-
-  AMBARI-1099. Hive Service Summary needs to show service components better.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1097.  Restrict user to proceed ahead of step 7 (Service configuration)
-  when properties specified in custom-box are already exposed on the page.
-  (Jaimin Jetly via yusaku)
-
-  AMBARI-1102. Error handling when errors are encountered during preparation
-  for deploy. (Arun Kandregula via yusaku)
-
-  AMBARI-1096. Create heatmap legend entries for missing data/invalid hosts.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1078. Improve graph message when data is not available.
-  (Srimanth Gunturi via yusaku)
- 
-  AMBARI-1146. Exclude hosts and include hosts config parameters need 
-  clarification. (yusaku)
-  
-  AMBARI-1074. CPU Usage chart needs better idle time display. (Srimanth
-  Gunturi via yusaku)
-
-  AMBARI-1072. Change text on alerts "about XX hours ago". (Srimanth Gunturi
-  via yusaku)
-
-  AMBARI-1145. Cluster Management refactoring. (yusaku)
-
-  AMBARI-984. Add support for exposing filesystem type for mount points
-  at host level. (hitesh via mahadev)
-
-  AMBARI-973. Ensure zookeeper service check run after zookeeper start.
-  (hitesh via mahadev)
-
-  AMBARI-965. Stop events should be handled at all valid points for safe
-  recovery. (hitesh via mahadev)
-
-  AMBARI-959. Handle passing repo information from server to agent.
-  (hitesh via mahadev)
-
-  AMBARI-951. Integrate meta data to be able to run service checks after
-  a start of a service. (hitesh via mahadev)
-
-  AMBARI-932. Add initial hooks to trigger smoke tests on service starts.
-  (hitesh via mahadev)
-
-  AMBARI-924. Rename job to action. (hitesh via mahadev)
-
-  AMBARI-922. Use stack name and version in fsm layer. (hitesh via mahadev)
-
-  AMBARI-982. Add ability to set rack info for a host.
-  (Nate Cole via mahadev)
-
-  AMBARI-981. Add configuration to host_component request.
-  (Nate Cole via mahadev)
-
-  AMBARI-931. Support for /hosts end point. (Nate Cole via mahadev)
-
-  AMBARI-912. Test case for ConfigurationService.
-  (Nate Cole via mahadev)
-
-  AMBARI-1021. Ambari-agent init script needs to be aware of already
-  running/not running agent process. (Pramod Thangali via mahadev)
-
-  AMBARI-1019. Add methods to get postgres user name/password or any
-  regex validated string input from user.
-  (Pramod Thangali via mahadev)
-
-  AMBARI-1007. Add aggregate IO stats to workflow data web service
-  (Pramod Thangali via mahadev)
-
-  AMBARI-1000. Use FQDN instead of hostname when registering an agent with
-  Ambari server. (Pramod Thangali via mahadev)
-
-  AMBARI-1066. Rename Charts section to Heatmaps. (Srimanth Gunturi via
-  mahadev)
-
-  AMBARI-1056. Expose CapacityRemaining JMX metric to NAMENODE
-  ServiceComponentInfo. (Tom Beerbower via mahadev)
-
-  AMBARI-1055. Refactor SPI Request interface to remove PropertyId.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-1049. Tune Ganglia request. (Tom Beerbower via mahadev)
-
-  AMBARI-1037. Implement an efficient way to provide Ganglia data for
-  heatmap and other cluster visualization tools.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-1001. Cluster level Network, Load, CPU and Memory metrics in API
-  needed for dashboard page. (Tom Beerbower via mahadev)
-
-  AMBARI-996. Expose metrics and properties for UI. (Tom Beerbower via mahadev)
-
-  AMBARI-972. Refactor resource provider implementation to move inner classes.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-943. Add Host metrics. (Tom Beerbower via mahadev)
-
-  AMBARI-929. Show HBASE_REGIONSERVER metrics. (Tom Beerbower via mahadev)
-
-  AMBARI-928. Enable end to end testing of Request and Task resources.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-925. Return RequestStatus through ClusterController.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-923. ResourceProvider changes for Request and Task resources.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-911. Implement an efficient way to provide Ganglia data for heatmap
-  and other cluster visualization tools. (Tom Beerbower via mahadev)
-
-  AMBARI-930. Map update to PUT and create to POST.
-  (John Speidel via mahadev)
-
-  AMBARI-1053. Dashboard page loads very slow due to hosts?fields=* API call
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1051. Dashboard page takes long time to load. (Srimanth Gunturi via
-  yusaku)
-
-  AMBARI-1041. Additional metrics need to be added to Heatmap UI. (Srimanth
-  Gunturi via yusaku)
-
-  AMBARI-1040. Cluster heatmap: green should always mean "good". (Srimanth
-  Gunturi via yusaku)
-
-  AMBARI-1039. Improve Nagios alerts time display. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1036. Service Info/Quick Links do not display external hostnames.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1035. Aggregate creation of multiple services and assignment of host
-  to cluster. (Jaimin Jetly via yusaku)
-
-  AMBARI-1034. Metric Charts - display local time rather than UTC.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1033. Nagios and Ganglia links should use public host names in URLs.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1030. Metrics links in web ui should link to Ganglia UI. (Srimanth
-  Gunturi via yusaku)
-
-  AMBARI-1025. Display total install and start services time on summary page
-  and polish summary page ui. (Jaimin Jetly via yusaku)
-
-  AMBARI-1023. Dashboard page should handle API sending JSON as strings and
-  object. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1011. Create 2 missing HDFS service graphs. (Srimanth Gunturi via
-  yusaku)
-
-  AMBARI-1003. Nagios sections should use backend API to populate. (Srimanth
-  Gunturi via yusaku)
-
-  AMBARI-1062. Convert Apache license header comment style in Handlebars files
-  to Handlebars comments rather than JavaScript comments. (yusaku)
-
-  AMBARI-1061. Data loading refactoring for cluster management. (yusaku)
-
-  AMBARI-1060. Data loading for App Browser. (yusaku)
-
-  AMBARI-993. Hook up login with server authentication. (yusaku)
-
-  AMBARI-1059. Refactor cluster management. (yusaku)
-
-  AMBARI-1058. Implement data loading. (yusaku)
-
-  AMBARI-956. On unavailability of non-master components, host with least
-  number of master components should install all slave and client components. 
-  (Jaimin Jetly via yusaku)
- 
-  AMBARI-990. Refactor App Browser. (yusaku)
-
-  AMBARI-979. More refactoring of App Browser code. (yusaku)
-
-  AMBARI-947. Make it easier to test Deploy (Install, Start + Test) step
-  of the wizard. (yusaku)
-
-  AMBARI-978. Refactor App Browser code. (yusaku)
-
-  AMBARI-977. Refactor Wizard and Cluster Management code. (yusaku)
-
-  AMBARI-941. More refactoring of Wizards in Ambari Web. (yusaku)
-
-  AMBARI-919. Partial refactoring and consolidation of code for various
-  wizards. (yusaku)
-
-  AMBARI-918. Update styles in Cluster Management. (yusaku)
-
-  AMBARI-917. Update layout and flow for App Browser. (yusaku)
-
-  AMBARI-888. Add more tests for controller implementation. (hitesh)
-
-  AMBARI-891. Initial work to refactor the Wizards in Ambari Web. (yusaku)
-
-  AMBARI-883. Improve user interactions on Confirm Hosts page of the
-  Installer. (yusaku)
-
-  AMBARI-859. Tighten up the layout for the Install page of the Installer.
-  (yusaku)
-
-  AMBARI-857. Refactor Install Options page for the Install Wizard. (yusaku)
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-
-  AMBARI-1160. Cannot add a hostname that has a number next to . in it.
-  (yusaku)
-
-  AMBARI-1139. Disable Misc section in Customize Services page of the Install
-  Wizard. (Srimanth Gunturi via yusaku) 
-
-  AMBARI-1158. Fiters are not working correctly on Hosts page. (yusaku)
-
-  AMBARI-1157. Host component operation causes lags in status/action pulldown
-  update. (yusaku)
-
-  AMBARI-1144. Cannot save changes to ZooKeeper configuration.
-  (Arun Kandregula via yusaku)
-
-  AMBARI-1155. Change "Save and apply changes" button on configs section to
-  "Save". (yusaku)
-
-  AMBARI-1153. Host jams in status 'Preparing' if host name is wrong.
-  (Arun Kandregula via yusaku)
-
-  AMBARI-1132. Stopping service doesn't cause blinking status until refresh.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1143. tmpfs filesystem being added to the list in the dir used by
-  Ambari. (Arun Kandregula via yusaku) 
-
-  AMBARI-1142. On Notification Popup, clicking "go to nagios UI" doesn't
-  load nagios UI. (Arun Kandregula via yusaku)
-
-  AMBARI-1125. Graphs "degrade" over time. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1124. Boxes on services page need min height or something to keep
-  it from visually cutting off info. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1123. Ambari heatmaps and host information shows infinity for disk
-  space used. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1141. In some cases, clicking "Register and Confirm" button does
-  not do anything. (Arun Kandregula via yusaku)
- 
-  AMBARI-1140. Resuming deploy for Installer/Add Hosts does not work if the
-  browser is shut down during the start phase of deploy.
-  (Arun Kandregula via yusaku)
-  
-  AMBARI-1120. Key spikes in HDFS IO missing from IO summary graphs.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1119. Service Summary pages no longer show service-specific info.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1118. Dashboard > HDFS title's free capacity doesn't match summary.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1115. Host component live status is broken. (Srimanth Gunturi via
-  yusaku)
-
-  AMBARI-1113. Install Wizard: Confirm host stuck at Preparing stage.
-  (Arun Kandregula via yusaku)
-
-  AMBARI-1112. Add hosts fails second time around. (Srimanth Gunturi via
-  yusaku)
-
-  AMBARI-1111. Install wizard step 9 gets stuck at 0% and other issues on
-  page refresh. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1110. After clicking the deploy button on the Add Hosts wizard, the
-  user is always taken to the Installer Wizard Step 8 upon login.
-  (Arun Kandregula via yusaku)
-
-  AMBARI-1152. Add Hosts wizard - Retry button does not trigger call to
-  backend. (yusaku)
-
-  AMBARI-1104. Webhcat configuration not setting templeton-libjars.
-  (Jaimin Jetly via yusaku)
-
-  AMBARI-1151. Reconfigure fails silently; it's not firing any API calls due
-  to a JS error. (yusaku)
-
-  AMBARI-1098. Switching services does not update various UI elements.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1150. Installer Wizard - Retry feature in Deploy step (Step 9) is
-  broken. (yusaku)
-
-  AMBARI-1092. dashboard > Summary > capacity pie chart keeps changing colors.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1091. 2 parallel requests for service information resulting in JS
-  exception. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1090. Restrict user to apply service configuration when custom box
-  properties are already exposed on the management config page.
-  (Jaimin Jetly via yusaku)
-
-  AMBARI-1149. HIVE_METASTORE needs to be started as a Hive component.
-  (yusaku)
-
-  AMBARI-1088. HDFS capacity chart numbers are incorrect. (Srimanth Gunturi
-  via yusaku)
-
-  AMBARI-1084. Heatmap displays NaN. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1081. HDFS disk capacity on dashboard is seen as negative number.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1148. Fix incorrect labels for configuration parameters. (yusaku)
-
-  AMBARI-1080. Host disk & memory graphs have incorrect values.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1077. The value for dead nodes is not getting populated on UI.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1073. Remove cluster name entry from top titlebar. (Srimanth Gunturi
-  via yusaku)
-
-  AMBARI-1071. Nagios alerts not updating in UI. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1063. Workflow Web Service potentially leaks DB connections upon
-  exceptions. (yusaku)
-
-  AMBARI-962. Update of host components fail when service name is not passed
-  in. (hitesh via mahadev)
-
-  AMBARI-945. Fix 500 errors on get resources api. (hitesh via mahadev)
-
-  AMBARI-944. Fixes for meta info layer. (hitesh via mahadev)
-
-  AMBARI-913. Fix all apis to return correctly filled status response.
-  (hitesh via mahadev)
-
-  AMBARI-999. RUBYLIB env variable expanding forever. (Pramod Thangali via
-  mahadev)
-
-  AMBARI-1069. HDFS Disk Capacity in HDFS Service Summary is totally off.
-  (Srimanth Gunturi via mahadev)
-
-  AMBARI-1068. Dashboard cluster level graphs showing only 45 minutes of data.
-  (Srimanth Gunturi via mahadev)
-
-  AMBARI-1067. Service > MapReduce map slots reserved / occupied are backwards.
-  (Srimanth Gunturi via mahadev)
-
-  AMBARI-1057. Can't reset ambari-server due to fail drop/create database DDLs.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-1052. UnsupportedPropertyException thrown from update.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-1050. Host metric values coming in with 0 values.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-1047. Create Configuration API call is throwing 500:
-  UnsupportedPropertyException. (Tom Beerbower via mahadev)
-
-  AMBARI-1044. API is not returning Ganglia metrics for one of the hosts
-  in the cluster. (Tom Beerbower via mahadev)
-
-  AMBARI-1043. Updates with queries that contain non primary key fields
-  may update resources that don't satisfy the query.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-1038. Ganglia setup does not allow for subsequent changes to the
-  cluster. (Tom Beerbower via mahadev)
-
-  AMBARI-1027. Fix missing block metrics for NAMENODE.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-1026. Resolve overlap between JMX and Ganglia metrics.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-1020. Start time, end time and resolution not set correctly for
-  rrd.py call in Ganglia property. (Tom Beerbower via mahadev)
-
-  AMBARI-1016. Initial API calls after Ambari Web install resulted in 500
-  (ArrayIndexOutOfBoundsException); Ambari Web stuck at "Loading..."
-  (Tom Beerbower via mahadev)
-
-  AMBARI-1012. Fix race condition in DefaultProviderModule.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-1010. Fix extra comma in rrd.py output. (Tom Beerbower via mahadev)
-
-  AMBARI-1005. No Ganglia/JMX metrics data are coming through.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-997. Fix HBASE JMX properties. (Tom Beerbower via mahadev)
-
-  AMBARI-994. Host metrics API servlet not filtering on given fields.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-988. Update resource drops property values.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-969. GET on temporal data throws 500. (Tom Beerbower via mahadev)
-
-  AMBARI-968. Task resources not returned under request resources.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-961. Sub-resources and metrics through API are broken.
-  (Tom Beerbower via mahadev)
-
-  AMBARI-1046. Heatmap with no numbers on the hover. (Srimanth Gunturi via
-  yusaku)
-
-  AMBARI-1045. Service summary sections have incorrect values displayed.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1042. Heatmap UI fluctuates between white and green colors
-  intermittently. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1032. Host hover in cluster heatmap showing mock data. (Srimanth
-  Gunturi via yusaku)
- 
-  AMBARI-1028. MapReduce & HDFS summaries should use ServiceComponentInfo
-  values. (Srimanth Gunturi via yusaku)
-
-  AMBARI-1017. Alerts not showing up in Ambari UI due to model refactoring.
-  (Srimanth Gunturi via yusaku)
-
-  AMBARI-1013. Host metrics charts should use live data. (Srimanth Gunturi
-  via yusaku)
-
-  AMBARI-1009. Cluster level graphs need to use API for data. (Srimanth
-  Gunturi via yusaku)
-
-  AMBARI-1064. App Browser fixes. (yusaku)
-
-  AMBARI-995. Deploy logs not shown for failed tasks. (yusaku)
-
-  AMBARI-992. Logout does not clean application state properly. (yusaku)
-
-  AMBARI-957. Adding a host whose hostname is the same as the one the user 
-  is accessing Ambari Web with breaks the Installer. (yusaku)
-
-  AMBARI-953. Fix navigation issues in installer wizard due to regression.
-  (Jaimin Jetly via yusaku)
-
-  AMBARI-899. Use slf4j-api 1.5.5 rather than 1.6.6. (yusaku)
-
-  AMBARI-902. Fix ZooKeeper badge allocation logic for single host and 
-  muliple hosts installation in step5 installer wizard. (Jaimin Jetly via
-  yusaku)
-
-  AMBARI-896. Resolve all navigation related issues for Step6 (Slave and
-  Client component) of installer wizard. (Jaimin Jetly via yusaku)
-
-  AMBARI-914. Fix issues related to Slave Component Group in Installer.
-  (yusaku)
-
-  AMBARI-909. Pass correct cluster info to Action Manager. (hitesh)
-
-  AMBARI-904. Ensure state changes only happen after actionmanager persists
-  actions. (hitesh)
-
-  AMBARI-905. Fix puppet site creation with flattening of execution commands
-  send from the server. (mahadev)
-
-  AMBARI-885. Fix miscellaneous issues related to Ambari Web. (yusaku)
-
-  AMBARI-879. Installer skips Confirm Hosts page of the wizard when testing
-  locally without Ambari Server. (yusaku)
-
-  AMBARI-878. Various tests for FSM, controller and state objects. (hitesh)
-
-  AMBARI-858. Installer -> Select Services page: warning popups are no longer
-  appearing. (yusaku)
-
-  AMBARI-846. Select Masters Page: make ZooKeeper addition/removal UI more
-  organized. (Jaimin Jetly via yusaku)
-
-  AMBARI-840. Hitting browser refresh should not clear present step data that 
-  had already been persisted to local DB. (Jaimin Jetly via yusaku)
-
-  AMBARI-843. Fix more null pointers for partial request objects. (hitesh)
-
-  AMBARI-842. Fix null point exception during adding of hosts to cluster. (hitesh)
-
-  AMBARI-839. Temporary fix for server start order. (hitesh)
-
-  AMBARI-837. Fix basic injection issues for controller impl. (hitesh)
-
-  AMBARI-836. Fix generation of requestId to be unique across restarts. (hitesh)
-
-  AMBARI-834. Use RoleCommand instead of ServiceComponentHostEventType for HostAction
-  in Controller. (hitesh)
-
-  AMBARI-830. Various fixes and tests for controller implementation. (hitesh)
-
-  AMBARI-808. Handle appropriate start/stop/install/.. events at their respective
-  failed states. (hitesh)
-
-  AMBARI-798. Fix import issue due to move of Predicate class. (hitesh)
-
-  AMBARI-780. Make FSM related changes for heartbeat handler. (hitesh)
-
-  AMBARI-774. Renable and fix AgentResourceTest. (mahadev) 
-
-  AMBARI-773. Change Host FSM as per new requirements of heartbeat handler. (hitesh)
-
-  AMBARI-753. Fix broken compile as a result of re-factor of FSM layout. (hitesh)
-
-  AMBARI-752. Add missing license header to TestServiceComponentHostState. (hitesh)
-
-  AMBARI-718. Fix installer navigation. (yusaku)
-
-  AMBARI-684. Remove non-required dependencies from pom files (hitesh via jitendra)
-
-  AMBARI-680. Fix pom structure. (hitesh)
-
diff --git a/branch-1.2/DISCLAIMER.txt b/branch-1.2/DISCLAIMER.txt
deleted file mode 100644
index 886d5f9..0000000
--- a/branch-1.2/DISCLAIMER.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-Apache Ambari is an effort undergoing incubation at the Apache Software 
-Foundation (ASF), sponsored by the Apache Incubator PMC. 
-
-Incubation is required of all newly accepted projects until a further review 
-indicates that the infrastructure, communications, and decision making process 
-have stabilized in a manner consistent with other successful ASF projects. 
-
-While incubation status is not necessarily a reflection of the completeness 
-or stability of the code, it does indicate that the project has yet to be 
-fully endorsed by the ASF.
-
-For more information about the incubation status of the Apache Ambari project you
-can go to the following page:
-
-http://incubator.apache.org/ambari/
-
diff --git a/branch-1.2/KEYS b/branch-1.2/KEYS
deleted file mode 100644
index b64f0cc..0000000
--- a/branch-1.2/KEYS
+++ /dev/null
@@ -1,198 +0,0 @@
-pub   4096R/876EF43B 2012-07-02
-uid                  Vikram Dixit <vikram@apache.org>
-sub   4096R/5403F44B 2012-07-02
-
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v2.0.18 (Darwin)
-
-mQINBE/x5NsBEADYgkrr80ezsl80Zn7D9bNk5IjFMjMLYlUMR1vVcF55UPe0eFce
-ZMWkOsdRxRl9oTYY9fYm3R6ph2gl7GaiDItTfr1NzIM8jwfGGQWD0bt8bMDzGGWh
-gcs/XUYbmYoisoC6LfElAmUktCvZH7H6nVNQjCWFtNcg5EWWAOwnmsUJiJSDbnaN
-XKmqzBCrtel4hJduXGG5aWwDGTJR9qvtKK/ROWho4RUNBBWg3UARCl0PGGcZ3oyE
-uAtbm5xGjmhi1H6JloAm/zQEABIYt8Jaqt6yn8pQWBoTWlt8cVSDCU63VnPC+v7v
-ydnweaLPuKT+tDekaoeLP5OhL72pp5Z+ncRQrbcy2p0XZSUTgUZ7l6LYIFMnDadU
-QRI/mXxtaDQOE6VzwH33GjXMMnweqiUYOWCfMBQUhLL5WGSs16lLOl1OdA7YCYEy
-uWQ8KewGkghcUM0HhTKI9FDAl4skMeDRpadtJmCttV0QpQpvf4AePzkzwZ+nq8Yg
-1scf3O/nVIK2xDxmOarCVVtI67DCkgEMg8evMy7Qi/6CA/arLz0IulczRAQ7+Xp/
-/iWowVbM5dRs9xOWMJLU4pU6EL2HFUyAbbXsvsJ3cX2z50i4yhFR6f9HipyK57M9
-CEReUkW8biwbdmxAl8fNNz9eBBiH0jgveSrTjMNqXwBBivwJHZrUlRmYRQARAQAB
-tCBWaWtyYW0gRGl4aXQgPHZpa3JhbUBhcGFjaGUub3JnPokCOAQTAQIAIgUCT/Hk
-2wIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQqbtzWodu9DvXXRAAsb/T
-N3u0rFaMqBre1+l53yRqUpSfFbgz83gZdJnT5UcqlPReyKKpFCDSkkFGL9jtre3z
-5NRKKyCnqkiWrfVWiBN4S30ByWo3nFY8Tjk2sve32PbHRzyPF7UiPoXf1DgNHGsu
-0FGjhHrNV8+Bfs11JCRAbZD3FMmplUTjxpRXgHnR3Rj1ZSfynDwNfdmFbwezZ7VB
-8VR4m/YYejU4q0YRhN8PkNqBuNJaNaSNJLr2f7MUVSJBfZ/Mim87o7/FM41QyZwC
-GYCJngTNwVYursbMrXwHlhFT4AdGZEzohTL95TGMGCz2ENddIJchhO94W/Qi7i++
-seDndFjucZC5f69f8ao/ztutdC8KFZm5TnrKOjs1j5L23PKENsgjuSDPLy+hFFSl
-wasQ2QF9XhCMd7X09ApvxUKig7uZqxHWIopIpJNiL2zMfykcNIbgGrq1YoMDJsAC
-TYuo9nlzG9MiYv6n+8waMgdISNh9S0fM2xqhTmtSkenh3gstpYbQrh9YIv8N//RO
-GselFgcefgo8f0cG8ke/LQXVMFaB7Rc0N48NvszHJdUh4+qFwcWLRCS12tLNK0Y6
-sRGCn+4AJucyLNE/TtNFqKyzsTkFaFiMhGJ+n/tC6t0ZXpVN9VOEhTXMBopBImrC
-jS2PuQl7wBt4vhq3YFJwvVnpTBcqeOND4ytLBAaJAhwEEAECAAYFAk/x5qIACgkQ
-vlqqC6IQwJWG4BAAoSPx09cWfTaOxdsXKIJH3rFyU1bMSBstAu3mE88DcnxSdcCl
-z00BKMSW9ErOMjGP5t3heQM3ojkF7Z5Gs+jLFRimWWztgo8n7Dr6Advs+vLRBc/z
-AXgfmCZlWwe+OiSQfu0WFjVCGPa9NYGv2X6b537XkCAkpfK0pCR3EHCzjt8DRyup
-Itllb6Yf4n0MoEBX+8XGGXpybpP5oy/xti96Qm/EtSIOdZTYUIEIHIgxqICKfa43
-WpL+Dw4uWMtrB4O7aUZ4bT7w/UKpSAyJgDf6SaDM9h/KZsNGPi5+YLNIVaL6XtHu
-nsn/MMVssE2UjFQf2kErkh5cfUqkRT9G7HCNVlrRFmaxTL5ZzVmqpgUnVmJSJeWU
-hyN7CM7UPY8NhZwgPr8T1/JA+L66bwdfx+WIHqFVwTf1qU2nLNFV8MDz2Tf2fgB4
-R4tEwF1jgNXmwaZqKNttR4EwROy0vv1vbxOfjtczrneiDBKKF+g/ygH8t6/q+t3p
-uvg+7M0wV6oPQgWqThTcA6geM+NJBa/pwUVJVPUDOiouybJpG4wacxYc39YXEgbQ
-K+tSOu37ETmmWcW9ytu4UdZSHn+L+hf9SznRd7hmUKi2AIf+8w7jXn2WRkB8FC/E
-1z+8oNLcJJaWfhfVLXrMPXsvirG7tfdPYmSDtUimbjIOQ6e7PzaO7l7mw+G5Ag0E
-T/Hk2wEQAKKeSNhAfQWWBRWm0T6dRk+oA3OZs6wNUWM138sFdivKGP+r6RotLjV6
-OKBJ1RlVMnCSdQ2e0UKcu+X4HfpFannbgAxbP33/B8xRZssIgGQ7yEimTyVcSWsf
-6gOFnN62ahvpU0BSdlFv0dxb6zgmJK9YWrWzPHDpu1EwPE6BbDGF81Hp1xTuiCkW
-Cy7ZD8tZne/mPksh1zePHL+e4QzScgEJFgfEDfwXNKnkFVoI7NkTfvDBQgZnVZeI
-QUQv5xItFidW8clJRJTuq3t6JVlrjSE/5fhrbCM77r1LK1G7FUfX222i1sPP5UZJ
-rYNpXoDSR4HKF31t3q0qWl8xWLOfYlL7ZkPTC4nunjQzLd2DCXX6Ej3pgP2m9KBw
-5L3kNvv9BUrHLHZHJ21kfxu0hhJ5vKDFtmHcx2K4pi4MqZ6TL5mzTwllevKbdfV3
-yITzFbWby4sEhPEpSCDTzbr4KO0SyaS26r5MmOy/OVArFrS+R88NkidTnoMMi7Rz
-fvnIm/6s0zfYw1/vH/ROaLKqAzXJ+GL8xpxP78TOoXdBXVxgAnXM2QwqKSjuYMbW
-sSF7RnJ/sUjHderRjFqEdxB+04SMT9rzmOgnd24+pvkioXQ3p1WzbHErBRVZOqGf
-DZLYFbWG6EimusKTHrwIIcrJ9QvMbu/ecIIVBv+ykAc62vUhrpRfABEBAAGJAh8E
-GAECAAkFAk/x5NsCGwwACgkQqbtzWodu9Dsw9xAAttckPz9L024vOlBhXNexWKX9
-UPkuR+3T8y0DqJryTMFt/3YEiQhmkc/0qywHUt9kO+J6cutTBjJfiZuOW8BBI5pl
-oE9rYLXpoamAOaE7ZzDq6/qKbo9f1Iz/H8SOh2qwrtfZT/8A66/mBJRL5j5H9RsD
-+HYWHNEDs2Dp94RGI0k74NHJ5j0ui4e/X97D43Nk3Pvdo272dGAlF5r4dJ03MVYK
-HPa4NsscRFafjvzQEo7rIHJduplrllfEuqitJkVAZVwdecwZP0DSMCQCcP71CExN
-hYs8wkRjayfEDLU/KW3C31CS/9fJzNE+3NLDa4mcovK8nMm3T8+pz6wzYGo6nH0H
-P9AbVlLqhaG8MfSOG8p6MsxcfV+WP5ITVmisHy70v+pY2y/rPwZD74s2WxNt3mU2
-WQ9DhbbensIyZnrqmUOStipGy7diMS9wt6s34vYVfMQudpWDBlkZZP+1bXbbNkMy
-zWxIjrDoB4QNJAcUqdsPoYo26731gQhpfc0WRHnXWb4AX6nOCgCmCPpUnWB8fzzV
-q8NjvW09GT7Ls9llrf4IXG8kjX2PZRIfaGSa556PJjdD3xJWgTEP78i0zJTWQLku
-9Iu+B/4xqFS/wF4572o7n3K4b+zA3ZIN6h5SlT+DpMwy8+rymDgkf7Gk3whvl9Hq
-IWC8DKsT+np6ZGfoE58=
-=zb0j
------END PGP PUBLIC KEY BLOCK-----
-
-pub   4096R/8EE2F25C 2011-10-24
-uid                  Mahadev Konar (CODE SIGNING KEY) <mahadev@apache.org>
-sub   4096R/1F35DF4C 2011-10-24
-
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: SKS 1.1.0
-
-mQINBE6lnjMBEADP0KP2/sPGL/wDz9a8zYB0z1jyMxCXg9s5FFRNEbzPb9UGfyPtvkhC4yay
-Mn72Adq4XO+PbEWCG/aNopGC28kFYOCFDurx8LkwIQQ7/JiBZymvEcUNhgP4NNK770MKUNqZ
-FD5XJGNqJXLbNhrzJxTeqmuqjFy8uwiIvB2DNKWeW5kpxuzI9sAYpDZ3VDLsxl2lFINk8+PZ
-Fc531iqL0lOBR1q6deBL6bAP4os/Qz3Tx58GBv5MS9yg6GjAubCHD9WokAoOw7DECROOsoZA
-WQo5rVCQC8h10Az7nzJSS1W/f7XcK7BkMMN2oNDG+VTDqaohw/cGfFiQMnoIdgD9PZHF9Z8O
-Um1jLKHjGFS/gQW0i1F7IlwCsWDa7xBnnWZpo2Y+QKbI4UBDL8yfss/3m82xKeeODn7mhXsj
-iVOohAWUHczv3QgsIB6VXjloO7FelnR0QUxqvOfBdpuApLa/eSBGz0WD/OMXRPzNMz9xNfGm
-zbJMWcCZbD/m/f8OQxEJq3Psg6j1cWtzgUgeB/E6wt5zd/6IJEt7yqa03iR+0F6lwcN5NE5b
-AnAIbQUWZNNeDCdGP9dyDHtl95ATzbZo1vNC70YGNR2Xolt2luZ3YcL3ml/enrEpbYuk7WFS
-BNPJNHe+Yz3+Z/TI+8X/4CrY4X4OZ/q2SPGllLZhf3Pjgnjp9QARAQABtDVNYWhhZGV2IEtv
-bmFyIChDT0RFIFNJR05JTkcgS0VZKSA8bWFoYWRldkBhcGFjaGUub3JnPokCHAQQAQoABgUC
-TqX/iAAKCRD60FwqxSaUuIOmEACJcD7+qzpWYUBjfbRtUUs8y2H7D286UkPprDmHof7Gp2L8
-T57jXans/g1cxBVX8YBBGV1LE3fIjpW02xFElGAKBJzlT8ovTHvJk2bDQTfjPeK8Kdd0a61x
-u2Zuij7YVkyIZzdTOTz+g/QeSzH60jkEg7z5n0pNwLUbF2q2mM2RYLaIwSuGyP1Z2Rx2kC7I
-9/v7/RHUuSjT1QKZSOLf7IRR+v8InlkYGKFGdpmr4Gc3CI7xTGP89nn7eV1Y4/s0vE2WqoPV
-Pt+NLbEoPTlM6EI4Ejuz5SwmcB6AvRt53haBpOyCyjz9jn+dgSSay/hBeTQzrtmWa5ZEx8gg
-RntGhHx3cnNfs4UrXXmZ79tLaEpjJf3TnRUeEI8rfapSmITq0ZudWk8oidHaoNqOgKJdTELd
-AS94Cx6flqHt/I1XiNH7+VwKLMLqQYYjZ9H8dJSbwVqGy9E2H2hkiSGIcaiv9UrFoakhoFyd
-TGMnH9e9Q/puDcEmCZBvHZJwLpmog/XorgoNH3aenOZvhMFEaSopXR4JWmLRkrdtF2mOa1N1
-+6/NNuEIcGgswjIdVN0TCcns/yD7Oj3yiiLVpfZ4ropker75f6jKvOGyVCP8lrnw38lTPHgz
-xTI3Sw91dHeUEfloiIkzArOBeKXAG1lHvIVUydveYewcXl6nwQ3mU6r7ra17EIkCHAQQAQoA
-BgUCTqZWogAKCRASCefxPQySuRaQD/9Gq/qthzQjsVdNnFMNzcm14fQngyP0aJTHhSVAv+G5
-5pTdh8Xwgc4RNoNKzr3jCcRyDakGwkKgpcS86AbKnFXF4+bHftCEZvXGB2a2RxOmwaDt/cyR
-QzMW5FoxMFN3Wan9hIAMzya9KL6zf5Ok+IgBJ4vZvbtamkSCokLtVH74CvxvaoCiFcPLe7Io
-pkvmXXM2W25V3HC0hUG+e1DvJO4AFOnOluyxp56mLL2AoDxZKKCqq8GWYsn1wfE6YYeg3yzs
-FFpTM6D7YVhjz4LnGBkesbSUXffzR0O44CP/R94Rh4ZzisRFKB+2TivMtneWXZ+xIOgDoki5
-JnZezGeEADPjs+cXCZVlL7een8xVkexO42/NBRHFnXFmAfg8aEWC5ECIOYELJX6WKmRCVJZs
-uqbTNjUcdSLXXgG8Kk5Q1zPoCRvkICaQ0ZsMz/fop1zalsGau+GozicDD6ETW5yEC+ZejvOc
-zEEET0gTpWYUXE1b/SXw2/30X3MPj2XToj/5BYbMUvRLSF3D5srNYisDw/jXXxNkmNkklDsQ
-sIMfdyzh/8ePfBtw2ozn+QhYh1aq8PX4ykGbpiI4gIcPNIbiMJfb2Pza1PA+kdq3qF7EhVqm
-jrhIRUkoHs8AErPIAdaR91ZtOLKR7DZpzls7EYf1OYw4JnevRR9sLY/xHbZ0GNDpSokCOAQT
-AQIAIgUCTqWeMwIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQDf9JLY7i8lxC/Q//
-SDU98cEzcAOj+i4aM5HYQlixtX5twk/bc/UnteklN2Kj/v+a12JVZO/PUxEIl+Bjo2Vqg4i0
-lQq6izHQTSesy1X9/XB3KqKujfVzSxtxgJ8TadcqYF02jt+dtg9DxlnOSU6D+KJJiYXMCsWD
-7cRMA7IRckXjuM+4adfgrYKYbIQaxNkzgwuUVMf++iHo40jM/xz/GrWaqqLiKhoCKlCtoW7/
-TsA174BxbtcYo2733uAiGbV3fFjS6fJ5uF1qKNWqpnMIacVPj2K+rHXEbauYN76fCqjU8PsK
-oO/M4FjnQHXp2LfOaVEhg6NTI1/kwTuiNHartGzUPi2kEZALml3/H9rcaSDnN6iifynMgzHh
-gFg1HCticWHuDtdPSVUnGRvTZ6aLjNd2KdVezy8yOTh1UFVNwRC1uRMqGZilXXL3azhl29T3
-c1OLcK+/QdyRSo27Q55NdHR1F+UJy65o1w8VsXwO1FQ8GFWWCcGTmPMTj33on12ujImCrPEE
-NzZXbj4K1QvpnuIXN9mnK4YGzhdxCc0aPUwXn5bD/SMurKn0WR0MUC63N9dVzZYqJNQ0Uoq1
-HwJfXVaOYKIr46ca4R8QGsMWjMKm4Dfp9R95Tw4a79WL/GU1+fSejYcJL6uExLTNlMohQFEw
-3c2UvsPM2QeF3sWKCFLcc2sSN7EuZj2dXhO5Ag0ETqWeMwEQAO0w6KIsF8ktj1If0x4NoNdV
-RN8/zHuVbmCOxa9djvYQJVi+Rj1iBypH7ndTSPZc+61wARfBGolLZwOKSOy5OEeXFCHhI6Px
-ony0nD/r+t6IL5m1l8CJgzAL0H2cEiLCPP+PkDNfpAOeOnyD/jBzy6+sHR7bIUAilGeDGO9T
-V3PGrNhKGeXhqbCbTyNjrRA7wdFJzRU3UZr3Yh393BVLAhOvG1zvXWuOehNutfEJ5lpeblEV
-PN1ik46uRqXQIxn/2bqh7Y3PmZEPe0/X9p1AFqGKDOlbT4EmiSLzYEhtuyK3FgAZi7N4jkNC
-AaNO0o9obu22ZsVtXkEb77ilomgCUsUSll32YYJOJXE4Jkne4fb9F5WUxVmRPyI19X4v2kMO
-x8/DnH12vvZXO/9W3yL/PJSn00ZJYJ4ZgFw1XTMelZLSVm2ddZAuontWcBWFUh01/KWgCV5g
-oz7jwYmEfr+TAFpqG2EpG6h0f76AP0RdzB+BK/SmErauAR9p8Jjlc0G8U7iqi/qD78f88DPf
-G4PChir31Ir6h8yJUPWZawlwesYrMsTEXiQ1QZUjK+xGIHCJGLa0crzXRxeQC7rDe5s/gn/d
-/qUcqLc8MeiOYdJosag2XydE589Up3vHvsKlIzZcSwti5D0ygKO+T/T8PHBxLGGRrFzzxSEP
-iq8NsTCHU1HfABEBAAGJAh8EGAECAAkFAk6lnjMCGwwACgkQDf9JLY7i8lx/kw//Xm3rS74H
-+mBGJVZVMn6MRnOzemPCgh7xe54mm/G2yZUf+xMfngL8axFEce7BeHSVTvmXnzGbcLgW0L4E
-uboSQE1xPbJgH91/6NS6qMXk4dqi71asSW4lEW73PcI6ndCoh7XIcBmD4jv78KktNThNIIW1
-eVj4EeCceJjQ3tkwyDeny5WYeS/qk8BFnBaDR8A6n5AToR/SdsDIFTbQlsaweFjDUt1fEr2x
-7euZm9VzblMXle0FCzNVrIHpi/uE+QuI1dbXKML06VhQ25FdBe3rKtYcxXs0+OumvdpKDP/b
-DhKGfjhYvsPlechIAUntL3QDQzGIJT8DaI1m+laSwL5hwULtNB1kWtXwsGzdnqKaGYVSfKzt
-LKq8cMsd1I+a1TMmF+G85Rs6/ehy0B3P7IA5z9HUd2/jWck6GyaXoek9gILMZRvJB/mW+rKR
-PDjMykmAhIrc7XeE91gH9mAuxRDfzRL8NSS6afjYE6YBFHuZvmfJD1PDyWo1Bx/hPIe0+7aT
-dBoiSpylJQu8D/iHUrPCqPKgcPk20I55bZi0A56mxKu0hTr6a1dFLXBKQdw668SilPzSKoYS
-M1W8Qb/rtySBESobUQYeTceavNl+hXBoGBDQXnSWSIHMFdVc+euwLPgHKVLiXqMtOha/XnE+
-7kmI6Gh95bLf1om+Bc8ajpm9Zsw=
-=OYSX
------END PGP PUBLIC KEY BLOCK-----
-
-pub   4096R/3ABE18B3 2013-03-21
-uid                  Yusaku Sako (CODE SIGNING KEY) <yusaku@apache.org>
-sub   4096R/F74E5802 2013-03-21
-
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG/MacGPG2 v2.0.18 (Darwin)
-Comment: GPGTools - http://gpgtools.org
-
-mQINBFFKjM8BEACsAehi8WEZQP+PCWOIpHNjnDTWpw9sKysh67Hyl4WixfDpBFdH
-3C631y1FZU+XUD5v+mr0o3tS10zwq5KMIxE/roDZ4GZyQsGW2NPuZ4PN6rouMg7c
-jYMFcEemr/WrhSlaTnCwfyzxGGiSW7BWnEGjBsHnjzNVEQAjcxNTLeG1dDK6TqTN
-LRfNDliC+zFyBVs0jq6p/MLfxa9Y5U3CNTzcReax7txH/EaLNNUSnAWAodHiTXIE
-19OPjO9ivpq5xnsdbUp6EUp3O0wMkhZjsROT+cj/XyaoRu38kPMULC+YNQnrkiFs
-6bfx+yCaw3v8V6JB/pi0SFcLxIp5FxERgVaumQG8wcNQQW7JRayYmevj0r6mPJWS
-+0ks5U/BE30MYBWk8YEfSBUXGbahBgSN9NbVGiUT2GNAz5Md1jyLqUcpt5oPHpMy
-WbbLhRtbI+VWEPoL42WX6G3nJXuBT85do15agycR9insW8y4jW5C8sAJsCJG2K2s
-9DB7W141W7isU6430c0KOLs0dGWbdWAcHiGGHmmvUvRBZGZzcD7EL45ODqzkTmUD
-Mkhg9ErvlhZBi81GqdHURj2QIjsustFgq2ZQjFRzS3Joc2yR057VpTfymqS2fUvs
-Zcw2tiA6SEm12X/W91fre0Uh/BbRIpJKW3LxdxKyGbotnY3w6BaAf/qJqwARAQAB
-tDJZdXNha3UgU2FrbyAoQ09ERSBTSUdOSU5HIEtFWSkgPHl1c2FrdUBhcGFjaGUu
-b3JnPokCNwQTAQIAIQIbAwIeAQIXgAUCUUqOXwULCQgHAwUVCgkICwUWAgMBAAAK
-CRBMOqJyOr4Ys0eUD/9n+aEFiWlCWJtO9aLLKaVXJ0f1IcMktChg6NeD6eNMGzMH
-Gc9W8XaTWT0M1Rfs5mJRqwEAFgDArYlbvlBeRxfs8P7gG15lSWUvEdsD3P+yt8CT
-qK5Z/gtwfQqvuokyPICrAqNuUcsHuixzc91poQjMfTDIVWwtIITWUR/bPrnwdZmd
-oF1yDoytMtktR9uJo3zLMj/u2BNO6A5brE2HMhZOfhnNQbbI4ckMPGigolt5JG/Y
-2797AZk+svF2E8dCCIifrigCMNqNhsQszeeAC8EsqUOLQod3j1vvPQlUuk/xrLnf
-YQlE1FsUzOHoPxP/PZNmUrdZ8bKgh1fo+ESyLnMdCzmnmiZex7l7/ifF7iVcV3MB
-or8WhwYtmMeCQXBT9+tXWEWtVaS6CLE1B9QLP12UdUhkKOgiPNRWXy7N3Bk3CnVn
-EAp0dpJy5qTyXeiYwntt7UV8JYwlt9QkgzptGFTi4SiA75Y2PutySWs2NyHD/cdl
-m+QySnLyn6evlVeCT48keFwNqTvQAiNlm7kEIvupkURjZXzuON9qpzQqkvEsRmfT
-HscUgztb4QWGJ2ONuWHGonn8/Q+ISh1pbq3u11m4Ak5lfZEH35dBpLWz3FfgH1Cs
-0CfW1Wam75/wk8UXCPDUD+MpNzNo2O7URivnd7co/e1+YuC1r61RnlWoUV7gf7kC
-DQRRSozPARAA2Fe1LNgN8q3fx1pbwZXA8wVds5ryVno3HAXf7tYvOSL7lHfYTSGj
-8KIucQA16vz11H2h3YagGkhc+JrdomwSdp1GupK2WcTr3ineS4zym0QXOJcfLKqo
-hkMCepDb4dIEPMqG/ACneSxSBzekT2FueIPFSk30Y2TqVMsiOR2ewo/FNQxLXrb7
-b7UdFotwiKfQRoedjATMZf8xXTfFrIDuyGZYUOfwH5BRAawVm96yfongp/6UYqdY
-h0Voo0f2kRGfORT+9D6Y7ezG9Y+RcxXxHrZe2Fzy0kiJ1/sXuvzEQdaeboi7g3Rs
-s8B94kvwrxfRdkB7XQzxc0IilUhfBbanRirpb3b+lTzeTrSBVf3kj5k5YlQeGr5n
-crzf5JfjuRyOZhpYZkSV2QPSjLqhuQY8S/TRQurd6DfNZDCaJIptZFIc4ucDcPn7
-KGh4mw93RB429aiv/JcuQ77HZgnxYLFhHhzXjPeMVKR9WTz4EXhuc87VB8yJuq+G
-AT0VBagqbSo5+Tako7fuSTagzGUOA0JE3XR8kfXptuwF4kYc5/WxHWsGR/Eti6ib
-9ls1mKD/13yT4rdWSyBo1WLlk8q/g5tTwy471OcHuzouj4je74zzC/xcURnMHM91
-y6PsG1ECAq0rmeGTQxjuLaOaJfj3NAGRMHgpWW6ar2ULL/zKep3am+8AEQEAAYkC
-HwQYAQIACQUCUUqMzwIbDAAKCRBMOqJyOr4YswWWD/9QFyHVOC9yvjY9TI66zfgj
-g1scibK/x41DqMeo3h8F+lkmydrsWBXny3WUv2g9MqliOXsMDZiVs1oHIis1JJgj
-YcvTP6AXQExd+WFOxzW+tZT0ZlJlQ32nrjGvnsCpGEOKMzDf1xLQZ+0VGQpVsgk5
-zQQ6AxRwXB3diYDHRsC6z1seGF8CJgmIRP+Fd5HehvLiyKI+87slUEsMIgSpmg/X
-G2xhXjiLFpR902BWECFJOfpYg3PG+7k5WfcVLdvPvt0WoYal+Wiv8bKqroC73gwA
-avcJWATNpaNvl8sZyaEIh2tHDmLudBxz1F7aloyIymAoWtqNfuHlrlfq3bPrj/tU
-7u8qxzPiPdtSl8d57B8EXShXN4HW+oSyWGbZ17jJEt+xWlE+bwHmW1hXSeU6h4e9
-NK6910/cgyJq1VgAeE5HG82cR7W+2oOaDLs3Eu71X3bphVS7zlD56GviRnZFyjM3
-6FtAsKKyeEQLzwEzTGMGijgaQfre/Fy7XrsaEcSEDTFWEs1oFtQm0Ny4auC2Tr+z
-VCaqH5uFM5jJUgxSBK30zn/MqT3u48/Ow3mTdoMEudaw7pffe4TavSHGSeh25iuk
-pE7N7T0mS0q7k/k2yMjcc5D6sWqnWPqjxNvgVkz4OZx/Zii8oAYqIoMStZFiNNAd
-Ww+qLwIsYPC6XQNrVJpbzg==
-=dBi7
------END PGP PUBLIC KEY BLOCK-----
-
diff --git a/branch-1.2/LICENSE.txt b/branch-1.2/LICENSE.txt
deleted file mode 100644
index 4a51656..0000000
--- a/branch-1.2/LICENSE.txt
+++ /dev/null
@@ -1,262 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
-
-APACHE AMBARI SUBCOMPONENTS:
-
-The Apache Ambari project contains subcomponents with separate copyright
-notices and license terms. Your use of the source code for the these
-subcomponents is subject to the terms and conditions of the following
-licenses. 
-
-For the stdlib in puppet modules 
-
-Copyright (C) 2011 Puppet Labs Inc
-
-and some parts:
-
-Copyright (C) 2011 Krzysztof Wilczynski
-
-Puppet Labs can be contacted at: info@puppetlabs.com
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
-For ambari-common/src/test/python:
-
-Copyright (c) 2003-2012, Michael Foord
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-
-    * Redistributions in binary form must reproduce the above
-      copyright notice, this list of conditions and the following
-      disclaimer in the documentation and/or other materials provided
-      with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/branch-1.2/NOTICE.txt b/branch-1.2/NOTICE.txt
deleted file mode 100644
index 747f4a5..0000000
--- a/branch-1.2/NOTICE.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-Apache Ambari
-Copyright 2011-2013 The Apache Software Foundation
-
-This product includes software developed at The Apache Software
-Foundation (http://www.apache.org/).
-
-Component ambari-common/src/test/python are under the following copyright:
-
-Copyright (c) 2003-2012, Michael Foord
-All rights reserved.
diff --git a/branch-1.2/ambari-agent/conf/unix/ambari-agent b/branch-1.2/ambari-agent/conf/unix/ambari-agent
deleted file mode 100644
index ef868b6..0000000
--- a/branch-1.2/ambari-agent/conf/unix/ambari-agent
+++ /dev/null
@@ -1,172 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific
-
-# description: ambari-agent daemon
-# processname: ambari-agent
-
-# /etc/init.d/ambari-agent
-
-export PATH=/usr/lib/ambari-server/*:$PATH
-export AMBARI_CONF_DIR=/etc/ambari-server/conf:$PATH
-
-AMBARI_AGENT=ambari-agent
-PIDFILE=/var/run/ambari-agent/$AMBARI_AGENT.pid
-LOGFILE=/var/log/ambari-agent/ambari-agent.out
-AGENT_SCRIPT=/usr/lib/python2.6/site-packages/ambari_agent/main.py
-OK=1
-NOTOK=0
-
-
-if [ -a /usr/bin/python2.6 ]; then
-  PYTHON=/usr/bin/python2.6
-fi
-
-if [ "x$PYTHON" == "x" ]; then
-  PYTHON=/usr/bin/python
-fi
-
-# Trying to read the passphrase from an environment
-if [ ! -z $AMBARI_PASSPHRASE ]; then
-  RESOLVED_AMBARI_PASSPHRASE=$AMBARI_PASSPHRASE
-fi
-
-# Reading the environment file
-if [ -a /var/lib/ambari-agent/ambari-env.sh ]; then
-  . /var/lib/ambari-agent/ambari-env.sh
-fi
-
-if [ -z $RESOLVED_AMBARI_PASSPHRASE ] &&  [ ! -z $AMBARI_PASSPHRASE ]; then
-  RESOLVED_AMBARI_PASSPHRASE=$AMBARI_PASSPHRASE
-  # If the passphrase is not defined yet, use the value from the env file
-elif [ -z $RESOLVED_AMBARI_PASSPHRASE ]; then
-  # Passphrase is not defined anywhere, set the default value
-  RESOLVED_AMBARI_PASSPHRASE="DEV"
-fi
-
-export AMBARI_PASSPHRASE=$RESOLVED_AMBARI_PASSPHRASE
-
-#echo $AMBARI_PASSPHRASE
-
-# check for version
-check_python_version ()
-{
-  echo "Verifying Python version compatibility..."
-  majversion=`$PYTHON -V 2>&1 | awk '{print $2}' | cut -d'.' -f1`
-  minversion=`$PYTHON -V 2>&1 | awk '{print $2}' | cut -d'.' -f2`
-  numversion=$(( 10 * $majversion + $minversion))
-  if (( $numversion < 26 )); then
-    echo "ERROR: Found Python version $majversion.$minversion. Ambari Agent requires Python version > 2.6"
-    return $NOTOK
-  fi
-  echo "Using python " $PYTHON
-  return $OK
-}
-
-case "$1" in
-  start)
-        check_python_version
-        if [ "$?" -eq "$NOTOK" ]; then
-          exit -1
-        fi
-        echo "Checking for previously running Ambari Agent..."
-        if [ -f $PIDFILE ]; then
-          PID=`cat $PIDFILE`
-          if [ -z "`ps ax -o pid | grep $PID`" ]; then
-            echo "$PIDFILE found with no process. Removing $PID..."
-            rm -f $PIDFILE
-          else
-            tput bold
-            echo "ERROR: $AMBARI_AGENT already running"
-            tput sgr0
-            echo "Check $PIDFILE for PID."
-            exit -1
-          fi
-        fi
-        echo "Starting ambari-agent"
-        nohup $PYTHON $AGENT_SCRIPT > $LOGFILE 2>&1 &
-        sleep 2
-        PID=$!
-        echo "Verifying $AMBARI_AGENT process status..."
-        if [ -z "`ps ax -o pid | grep $PID`" ]; then
-          echo "ERROR: $AMBARI_AGENT start failed for unknown reason"
-          exit -1
-        fi
-        tput bold
-        echo "Ambari Agent successfully started"
-        tput sgr0
-        echo "Agent PID at: $PIDFILE"
-        echo "Agent log at: $LOGFILE"
-        ;;
-  status)
-        if [ -f $PIDFILE ]; then
-          PID=`cat $PIDFILE`
-          echo "Found $AMBARI_AGENT PID: $PID"
-          if [ -z "`ps ax -o pid | grep $PID`" ]; then
-            echo "$AMBARI_AGENT not running. Stale PID File at: $PIDFILE"
-          else
-            tput bold
-            echo "$AMBARI_AGENT running."
-            tput sgr0
-            echo "Agent PID at: $PIDFILE"
-            echo "Agent log at: $LOGFILE"
-          fi
-        else
-          tput bold
-          echo "$AMBARI_AGENT currently not running"
-          tput sgr0
-          echo "Usage: /usr/sbin/ambari-agent {start|stop|restart|status}"
-        fi
-        ;;
-  stop)
-        check_python_version
-        if [ "$?" -eq "$NOTOK" ]; then
-          exit -1
-        fi
-        if [ -f $PIDFILE ]; then
-          PID=`cat $PIDFILE`
-          echo "Found $AMBARI_AGENT PID: $PID"
-          if [ -z "`ps ax -o pid | grep $PID`" ]; then
-            tput bold
-            echo "ERROR: $AMBARI_AGENT not running. Stale PID File at: $PIDFILE"
-            tput sgr0
-          else
-            echo "Stopping $AMBARI_AGENT"
-            $PYTHON $AGENT_SCRIPT stop
-          fi
-          echo "Removing PID file at $PIDFILE"
-          rm -f $PIDFILE
-          tput bold
-          echo "$AMBARI_AGENT successfully stopped"
-          tput sgr0
-        else
-          tput bold
-          echo "$AMBARI_AGENT is not running. No PID found at $PIDFILE"
-          tput sgr0
-        fi
-        ;;
-  restart)
-        echo -e "Restarting $AMBARI_AGENT"
-        $0 stop
-        $0 start
-        ;;     
-  *)
-        tput bold
-        echo "Usage: /usr/sbin/ambari-agent {start|stop|restart|status}"
-        tput sgr0
-        exit 1
-esac
-
-exit 0
diff --git a/branch-1.2/ambari-agent/conf/unix/ambari-agent.ini b/branch-1.2/ambari-agent/conf/unix/ambari-agent.ini
deleted file mode 100644
index 35f5d23..0000000
--- a/branch-1.2/ambari-agent/conf/unix/ambari-agent.ini
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific
-
-[server]
-hostname=localhost
-url_port=8440
-secured_url_port=8441
-
-[agent]
-prefix=/var/lib/ambari-agent/data
-
-[stack]
-installprefix=/var/ambari-agent/
-
-[puppet]
-puppetmodules=/var/lib/ambari-agent/puppet
-ruby_home=/usr/lib/ambari-agent/lib/ruby-1.8.7-p370
-puppet_home=/usr/lib/ambari-agent/lib/puppet-2.7.9
-facter_home=/usr/lib/ambari-agent/lib/facter-1.6.10
-
-[command]
-maxretries=2
-sleepBetweenRetries=1
-
-[security]
-keysdir=/var/lib/ambari-agent/keys
-server_crt=ca.crt
-passphrase_env_var_name=AMBARI_PASSPHRASE
-
-[services]
-pidLookupPath=/var/run/
-
-[heartbeat]
-state_interval=6
-dirs=/etc/hadoop,/etc/hadoop/conf,/etc/hbase,/etc/hcatalog,/etc/hive,/etc/oozie,
-  /etc/sqoop,/etc/ganglia,/etc/nagios,
-  /var/run/hadoop,/var/run/zookeeper,/var/run/hbase,/var/run/templeton,/var/run/oozie,
-  /var/log/hadoop,/var/log/zookeeper,/var/log/hbase,/var/run/templeton,/var/log/hive,
-  /var/log/nagios
-rpms=nagios,ganglia,
-  hadoop,hadoop-lzo,hbase,oozie,sqoop,pig,zookeeper,hive,libconfuse,ambari-log4j
diff --git a/branch-1.2/ambari-agent/conf/unix/ambari-env.sh b/branch-1.2/ambari-agent/conf/unix/ambari-env.sh
deleted file mode 100644
index 50c8fa4..0000000
--- a/branch-1.2/ambari-agent/conf/unix/ambari-env.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-# To change a passphrase used by the agent adjust the line below. This value is used when no passphrase is
-# given through environment variable
-AMBARI_PASSPHRASE="DEV"
diff --git a/branch-1.2/ambari-agent/pom.xml b/branch-1.2/ambari-agent/pom.xml
deleted file mode 100644
index 55e9330..0000000
--- a/branch-1.2/ambari-agent/pom.xml
+++ /dev/null
@@ -1,326 +0,0 @@
-<?xml version="1.0"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-  <!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-  <parent>
-    <groupId>org.apache.ambari</groupId>
-    <artifactId>ambari-project</artifactId>
-    <version>1.2.2-SNAPSHOT</version>
-    <relativePath>../ambari-project</relativePath>
-  </parent>
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.apache.ambari</groupId>
-  <artifactId>ambari-agent</artifactId>
-  <packaging>pom</packaging>
-  <version>1.2.2-SNAPSHOT</version>
-  <name>Ambari Agent</name>
-  <description>Ambari Agent</description>
-  <properties>
-    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-    <final.name>${project.artifactId}-${project.version}</final.name>
-    <package.release>1</package.release>
-    <package.prefix>/usr</package.prefix>
-    <package.conf.dir>/etc/ambari-agent/conf</package.conf.dir>
-    <package.log.dir>/var/log/ambari-agent</package.log.dir>
-    <package.pid.dir>/var/run/ambari-agent</package.pid.dir>
-    <skipTests>false</skipTests>
-    <facter.tar>http://downloads.puppetlabs.com/facter/facter-1.6.10.tar.gz</facter.tar>
-    <puppet.tar>http://downloads.puppetlabs.com/puppet/puppet-2.7.9.tar.gz</puppet.tar>
-    <install.dir>/usr/lib/python2.6/site-packages/ambari_agent</install.dir>
-    <ruby.tar>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6/ruby-1.8.7-p370.tar.gz</ruby.tar>
-    <lib.dir>/usr/lib/ambari-agent/lib</lib.dir>
-  </properties>
-  <profiles>
-    <profile>
-      <id>suse11</id>
-      <properties>
-        <ruby.tar>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11/ruby-1.8.7-p370.tar.gz</ruby.tar>
-      </properties>
-    </profile>
-    <profile>
-      <id>centos5</id>
-      <properties>
-        <ruby.tar>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5/ruby-1.8.7-p370.tar.gz</ruby.tar>
-      </properties>
-    </profile>
-  </profiles>
-  <build>
-    <plugins>
-      <plugin>
-        <artifactId>maven-compiler-plugin</artifactId>
-        <version>3.0</version>
-      </plugin>
-      <plugin>
-        <artifactId>maven-assembly-plugin</artifactId>
-        <configuration>
-          <tarLongFileMode>gnu</tarLongFileMode>
-          <descriptors>
-            <descriptor>src/packages/tarball/all.xml</descriptor>
-          </descriptors>
-        </configuration>
-        <executions>
-          <execution>
-            <id>build-tarball</id>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>single</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>exec-maven-plugin</artifactId>
-        <version>1.2</version>
-        <executions>
-          <execution>
-            <configuration>
-              <executable>python2.6</executable>
-              <workingDirectory>src/test/python</workingDirectory>
-              <arguments>
-                <argument>unitTests.py</argument>
-              </arguments>
-              <environmentVariables>
-                <PYTHONPATH>${project.basedir}/../ambari-common/src/test/python:${project.basedir}/src/main/python/ambari_agent:$PYTHONPATH</PYTHONPATH>
-              </environmentVariables>
-              <skip>${skipTests}</skip>
-            </configuration>
-            <id>python-test</id>
-            <phase>test</phase>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-          </execution>
-          <execution>
-            <configuration>
-              <executable>python2.6</executable>
-              <workingDirectory>target/ambari-agent-${project.version}</workingDirectory>
-              <arguments>
-                <argument>${project.basedir}/src/main/python/setup.py</argument>
-                <argument>clean</argument>
-                <argument>bdist_dumb</argument>
-              </arguments>
-              <environmentVariables>
-                <PYTHONPATH>target/ambari-agent-${project.version}:$PYTHONPATH</PYTHONPATH>
-              </environmentVariables>
-            </configuration>
-            <id>python-package</id>
-            <phase>package</phase>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>rpm-maven-plugin</artifactId>
-        <version>2.0.1</version>
-        <executions>
-          <execution>
-            <phase>none</phase>
-            <goals>
-              <goal>rpm</goal>
-            </goals>
-          </execution>
-        </executions>
-        <configuration>
-          <copyright>2012, Apache Software Foundation</copyright>
-          <group>Development</group>
-          <description>Maven Recipe: RPM Package.</description>
-          <requires>
-            <require>openssl</require>
-            <require>zlib</require>
-            <require>${python.ver}</require>
-          </requires>
-           <postinstallScriptlet>
-            <scriptFile>src/main/package/rpm/postinstall.sh</scriptFile>
-            <fileEncoding>utf-8</fileEncoding>
-          </postinstallScriptlet>
-          <preinstallScriptlet>
-            <scriptFile>src/main/package/rpm/preinstall.sh</scriptFile>
-            <fileEncoding>utf-8</fileEncoding>
-          </preinstallScriptlet>
-          <preremoveScriptlet>
-            <script>mv /etc/ambari-agent/conf /etc/ambari-agent/conf.save</script>
-          </preremoveScriptlet>
-
-          <needarch>x86_64</needarch>
-          <autoRequires>false</autoRequires>
-          <mappings>
-            <mapping>
-              <directory>${install.dir}</directory>
-              <sources>
-                <source>
-                  <location>${project.build.directory}/${project.artifactId}-${project.version}/ambari_agent</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>${lib.dir}</directory>
-              <sources>
-                <source>
-                  <location>${project.build.directory}/lib</location>
-                </source>
-              </sources>
-            </mapping>
-             <mapping>
-              <directory>${lib.dir}/examples</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>src/examples</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>/var/lib/${project.artifactId}/puppet</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>src/main/puppet</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>${package.conf.dir}</directory>
-              <configuration>true</configuration>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>conf/unix/ambari-agent.ini</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>/usr/sbin</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>conf/unix/ambari-agent</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>/var/lib/ambari-agent</directory>
-              <filemode>700</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>conf/unix/ambari-env.sh</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>${package.pid.dir}</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-            </mapping>
-            <mapping>
-              <directory>/var/lib/${project.artifactId}/data</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-            </mapping>
-            <mapping>
-              <directory>/var/lib/${project.artifactId}/keys</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-            </mapping>
-            <mapping>
-              <directory>${package.log.dir}</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-            </mapping>
-            <mapping>
-              <directory>/var/ambari-agent</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-            </mapping>
-            <!-- -->
-          </mappings>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>com.github.goldin</groupId>
-        <artifactId>copy-maven-plugin</artifactId>
-        <version>0.2.5</version>
-        <executions>
-          <execution>
-            <id>create-archive</id>
-            <phase>package</phase>
-            <goals>
-              <goal>copy</goal>
-            </goals>
-            <configuration>
-              <resources>
-                <resource>
-                  <targetPath>${project.build.directory}/lib</targetPath>
-                  <file>${ruby.tar}</file>
-                  <unpack>true</unpack>
-                </resource>
-                <resource>
-                  <targetPath>${project.build.directory}/lib</targetPath>
-                  <file>${facter.tar}</file>
-                  <unpack>true</unpack>
-                </resource>
-                <resource>
-                  <targetPath>${project.build.directory}/lib</targetPath>
-                  <file>${puppet.tar}</file>
-                  <unpack>true</unpack>
-                </resource>
-              </resources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <configuration>
-          <excludes>
-            <exclude>src/examples/*</exclude>
-            <exclude>src/test/python/dummy*.txt</exclude>
-            <exclude>src/main/python/ambari_agent/imports.txt</exclude>
-            <exclude>src/main/puppet/modules/stdlib/**</exclude>
-            <exclude>**/*.erb</exclude>
-            <exclude>**/*.json</exclude>
-          </excludes>
-        </configuration>
-      </plugin>
-    </plugins>
-    <extensions>
-      <extension>
-        <groupId>org.apache.maven.wagon</groupId>
-        <artifactId>wagon-ssh-external</artifactId>
-      </extension>
-    </extensions>
-  </build>
-</project>
diff --git a/branch-1.2/ambari-agent/src/examples/query_with3jobs.txt b/branch-1.2/ambari-agent/src/examples/query_with3jobs.txt
deleted file mode 100644
index 8385739..0000000
--- a/branch-1.2/ambari-agent/src/examples/query_with3jobs.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-SELECT 
-   i_item_id,
-   s_state, 
-   avg(ss_quantity) agg1,
-   avg(ss_list_price) agg2,
-   avg(ss_coupon_amt) agg3,
-   avg(ss_sales_price) agg4
-FROM
-   (SELECT /*+ MAPJOIN(item) */ i_item_id, s_state, ss_quantity, ss_list_price, ss_coupon_amt, ss_sales_price
-   FROM item
-   JOIN
-      (SELECT /*+ MAPJOIN(customer_demographics) */ s_state, ss_quantity, ss_list_price, ss_coupon_amt, ss_sales_price, ss_item_sk
-      FROM customer_demographics
-      JOIN
-         (SELECT /*+ MAPJOIN(store) */ s_state, ss_quantity, ss_list_price, ss_coupon_amt, ss_sales_price, ss_item_sk, ss_cdemo_sk
-         FROM store
-         JOIN
-            (SELECT /*+ MAPJOIN(date_dim) */ ss_quantity, ss_list_price, ss_coupon_amt, ss_sales_price, ss_item_sk, ss_cdemo_sk, ss_store_sk
-             FROM date_dim
-             JOIN store_sales
-	     ON (store_sales.ss_sold_date_sk = date_dim.d_date_sk) WHERE d_year = 2002) a
-         ON (a.ss_store_sk = store.s_store_sk)
-         WHERE s_state in ('TN', 'SD')) b
-      ON (b.ss_cdemo_sk = customer_demographics.cd_demo_sk)
-      WHERE
-         cd_gender = 'M' and
-         cd_marital_status = 'S' and
-         cd_education_status = 'College') c
-   ON (c.ss_item_sk = item.i_item_sk)) d
-GROUP BY
-   i_item_id,
-   s_state
-ORDER BY
-   i_item_id,
-   s_state
-LIMIT 100;
\ No newline at end of file
diff --git a/branch-1.2/ambari-agent/src/examples/query_with6jobs.txt b/branch-1.2/ambari-agent/src/examples/query_with6jobs.txt
deleted file mode 100644
index 3f437f5..0000000
--- a/branch-1.2/ambari-agent/src/examples/query_with6jobs.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-SELECT 
-   i_item_id,
-   s_state, 
-   avg(ss_quantity) agg1,
-   avg(ss_list_price) agg2,
-   avg(ss_coupon_amt) agg3,
-   avg(ss_sales_price) agg4
-FROM
-   (SELECT i_item_id, s_state, ss_quantity, ss_list_price, ss_coupon_amt, ss_sales_price
-   FROM item
-   JOIN
-      (SELECT  s_state, ss_quantity, ss_list_price, ss_coupon_amt, ss_sales_price, ss_item_sk
-      FROM customer_demographics
-      JOIN
-         (SELECT  s_state, ss_quantity, ss_list_price, ss_coupon_amt, ss_sales_price, ss_item_sk, ss_cdemo_sk
-         FROM store
-         JOIN
-            (SELECT  ss_quantity, ss_list_price, ss_coupon_amt, ss_sales_price, ss_item_sk, ss_cdemo_sk, ss_store_sk
-             FROM date_dim
-             JOIN store_sales  ON (store_sales.ss_sold_date_sk = date_dim.d_date_sk) WHERE d_year = 2002) a
-         ON (a.ss_store_sk = store.s_store_sk)
-         WHERE s_state in ('TN', 'SD')) b
-      ON (b.ss_cdemo_sk = customer_demographics.cd_demo_sk)
-      WHERE
-         cd_gender = 'M' and
-         cd_marital_status = 'S' and
-         cd_education_status = 'College') c
-   ON (c.ss_item_sk = item.i_item_sk)) d
-GROUP BY
-   i_item_id,
-   s_state
-ORDER BY
-   i_item_id,
-   s_state
-LIMIT 100;
\ No newline at end of file
diff --git a/branch-1.2/ambari-agent/src/examples/tpcds_ss_tables.sql b/branch-1.2/ambari-agent/src/examples/tpcds_ss_tables.sql
deleted file mode 100644
index e6a3b61..0000000
--- a/branch-1.2/ambari-agent/src/examples/tpcds_ss_tables.sql
+++ /dev/null
@@ -1,226 +0,0 @@
-create  table store_sales
-(
-    ss_sold_date_sk           int,
-    ss_sold_time_sk           int,
-    ss_item_sk                int,
-    ss_customer_sk            int,
-    ss_cdemo_sk               int,
-    ss_hdemo_sk               int,
-    ss_addr_sk                int,
-    ss_store_sk               int,
-    ss_promo_sk               int,
-    ss_ticket_number          int,
-    ss_quantity               int,
-    ss_wholesale_cost         float,
-    ss_list_price             float,
-    ss_sales_price            float,
-    ss_ext_discount_amt       float,
-    ss_ext_sales_price        float,
-    ss_ext_wholesale_cost     float,
-    ss_ext_list_price         float,
-    ss_ext_tax                float,
-    ss_coupon_amt             float,
-    ss_net_paid               float,
-    ss_net_paid_inc_tax       float,
-    ss_net_profit             float                  
-)
-row format delimited fields terminated by '|' ;
-
-create  table customer_demographics
-(
-    cd_demo_sk                int,
-    cd_gender                 string,
-    cd_marital_status         string,
-    cd_education_status       string,
-    cd_purchase_estimate      int,
-    cd_credit_rating          string,
-    cd_dep_count              int,
-    cd_dep_employed_count     int,
-    cd_dep_college_count      int 
-)
-row format delimited fields terminated by '|' ;
-
-create  table date_dim
-(
-    d_date_sk                 int,
-    d_date_id                 string,
-    d_date                    timestamp,
-    d_month_seq               int,
-    d_week_seq                int,
-    d_quarter_seq             int,
-    d_year                    int,
-    d_dow                     int,
-    d_moy                     int,
-    d_dom                     int,
-    d_qoy                     int,
-    d_fy_year                 int,
-    d_fy_quarter_seq          int,
-    d_fy_week_seq             int,
-    d_day_name                string,
-    d_quarter_name            string,
-    d_holiday                 string,
-    d_weekend                 string,
-    d_following_holiday       string,
-    d_first_dom               int,
-    d_last_dom                int,
-    d_same_day_ly             int,
-    d_same_day_lq             int,
-    d_current_day             string,
-    d_current_week            string,
-    d_current_month           string,
-    d_current_quarter         string,
-    d_current_year            string 
-)
-row format delimited fields terminated by '|' ;
-
-create  table time_dim
-(
-    t_time_sk                 int,
-    t_time_id                 string,
-    t_time                    int,
-    t_hour                    int,
-    t_minute                  int,
-    t_second                  int,
-    t_am_pm                   string,
-    t_shift                   string,
-    t_sub_shift               string,
-    t_meal_time               string
-)
-row format delimited fields terminated by '|' ;
-
-create  table item
-(
-    i_item_sk                 int,
-    i_item_id                 string,
-    i_rec_start_date          timestamp,
-    i_rec_end_date            timestamp,
-    i_item_desc               string,
-    i_current_price           float,
-    i_wholesale_cost          float,
-    i_brand_id                int,
-    i_brand                   string,
-    i_class_id                int,
-    i_class                   string,
-    i_category_id             int,
-    i_category                string,
-    i_manufact_id             int,
-    i_manufact                string,
-    i_size                    string,
-    i_formulation             string,
-    i_color                   string,
-    i_units                   string,
-    i_container               string,
-    i_manager_id              int,
-    i_product_name            string
-)
-row format delimited fields terminated by '|' ;
-
-create  table store
-(
-    s_store_sk                int,
-    s_store_id                string,
-    s_rec_start_date          timestamp,
-    s_rec_end_date            timestamp,
-    s_closed_date_sk          int,
-    s_store_name              string,
-    s_number_employees        int,
-    s_floor_space             int,
-    s_hours                   string,
-    s_manager                 string,
-    s_market_id               int,
-    s_geography_class         string,
-    s_market_desc             string,
-    s_market_manager          string,
-    s_division_id             int,
-    s_division_name           string,
-    s_company_id              int,
-    s_company_name            string,
-    s_street_number           string,
-    s_street_name             string,
-    s_street_type             string,
-    s_suite_number            string,
-    s_city                    string,
-    s_county                  string,
-    s_state                   string,
-    s_zip                     string,
-    s_country                 string,
-    s_gmt_offset              float,
-    s_tax_precentage          float                  
-)
-row format delimited fields terminated by '|' ;
-
-create  table customer
-(
-    c_customer_sk             int,
-    c_customer_id             string,
-    c_current_cdemo_sk        int,
-    c_current_hdemo_sk        int,
-    c_current_addr_sk         int,
-    c_first_shipto_date_sk    int,
-    c_first_sales_date_sk     int,
-    c_salutation              string,
-    c_first_name              string,
-    c_last_name               string,
-    c_preferred_cust_flag     string,
-    c_birth_day               int,
-    c_birth_month             int,
-    c_birth_year              int,
-    c_birth_country           string,
-    c_login                   string,
-    c_email_address           string,
-    c_last_review_date        string
-)
-row format delimited fields terminated by '|' ;
-
-create  table promotion
-(
-    p_promo_sk                int,
-    p_promo_id                string,
-    p_start_date_sk           int,
-    p_end_date_sk             int,
-    p_item_sk                 int,
-    p_cost                    float,
-    p_response_target         int,
-    p_promo_name              string,
-    p_channel_dmail           string,
-    p_channel_email           string,
-    p_channel_catalog         string,
-    p_channel_tv              string,
-    p_channel_radio           string,
-    p_channel_press           string,
-    p_channel_event           string,
-    p_channel_demo            string,
-    p_channel_details         string,
-    p_purpose                 string,
-    p_discount_active         string 
-)
-row format delimited fields terminated by '|' ;
-
-create  table household_demographics
-(
-    hd_demo_sk                int,
-    hd_income_band_sk         int,
-    hd_buy_potential          string,
-    hd_dep_count              int,
-    hd_vehicle_count          int
-)
-row format delimited fields terminated by '|' ;
-
-create  table customer_address
-(
-    ca_address_sk             int,
-    ca_address_id             string,
-    ca_street_number          string,
-    ca_street_name            string,
-    ca_street_type            string,
-    ca_suite_number           string,
-    ca_city                   string,
-    ca_county                 string,
-    ca_state                  string,
-    ca_zip                    string,
-    ca_country                string,
-    ca_gmt_offset             float,
-    ca_location_type          string
-)
-row format delimited fields terminated by '|' ;
-
diff --git a/branch-1.2/ambari-agent/src/main/package/rpm/postinstall.sh b/branch-1.2/ambari-agent/src/main/package/rpm/postinstall.sh
deleted file mode 100644
index 38634cf..0000000
--- a/branch-1.2/ambari-agent/src/main/package/rpm/postinstall.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-chmod 755 /usr/lib/ambari-agent/lib/facter-1.6.10/bin/facter /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/filebucket /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/pi /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/puppet /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/puppetdoc /usr/lib/ambari-agent/lib/puppet-2.7.9/bin/ralsh /usr/lib/ambari-agent/lib/ruby-1.8.7-p370/bin/*
diff --git a/branch-1.2/ambari-agent/src/main/package/rpm/preinstall.sh b/branch-1.2/ambari-agent/src/main/package/rpm/preinstall.sh
deleted file mode 100644
index 228b835..0000000
--- a/branch-1.2/ambari-agent/src/main/package/rpm/preinstall.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-getent group puppet >/dev/null || groupadd -r puppet
-getent passwd puppet >/dev/null || /usr/sbin/useradd -g puppet puppet
diff --git a/branch-1.2/ambari-agent/src/main/puppet/manifestloader/site.pp b/branch-1.2/ambari-agent/src/main/puppet/manifestloader/site.pp
deleted file mode 100644
index 7a208a6..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/manifestloader/site.pp
+++ /dev/null
@@ -1,48 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class manifestloader () {
-    file { '/etc/puppet/agent/modules.tgz':
-      ensure => present,
-      source => "puppet:///modules/catalog/modules.tgz",  
-      mode => '0755',
-    }
-
-    exec { 'untar_modules':
-      command => "rm -rf /etc/puppet/agent/modules ; tar zxf /etc/puppet/agent/modules.tgz -C /etc/puppet/agent/ --strip-components 3",
-      path    => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-    } 
-
-    exec { 'puppet_apply':
-      command   => "sh /etc/puppet/agent/modules/puppetApply.sh",
-      timeout   => 1800,
-      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-      logoutput => "true"
-    }
-
-    File['/etc/puppet/agent/modules.tgz'] -> Exec['untar_modules'] -> Exec['puppet_apply']
-}
-
-node default {
- stage{1 :}
- class {'manifestloader': stage => 1}
-}
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp
deleted file mode 100644
index e927cf5..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/configgenerator/manifests/configfile.pp
+++ /dev/null
@@ -1,67 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-#
-# Generates xml configs from the given key-value hash maps
-#
-# Config file format:
-#
-# <configuration>
-#   <property>
-#     <name>name1</name><value>value1</value>
-#   </property>
-#     ..
-#   <property>
-#     <name>nameN</name><value>valueN</value>
-#   </property>
-# </configuration>
-#
-# Params:
-# - configname - name of the config file (class title by default)
-# - modulespath - modules path ('/etc/puppet/modules' by default)
-# - module - module name
-# - properties - set of the key-value pairs (puppet hash) which corresponds to property name - property value pairs of config file
-#
-# Note: Set correct $modulespath in the configgenerator (or pass it as parameter)
-#
-
-define configgenerator::configfile ($modulespath='/etc/puppet/modules', $filename, $module, $configuration, $owner = "root", $group = "root") {
-  $configcontent = inline_template('<!--<%=Time.now.asctime %>-->
-  <configuration>
-  <% configuration.each do |key,value| -%>
-  <property>
-    <name><%=key %></name>
-    <value><%=value %></value>
-  </property>
-  <% end -%>
-</configuration>')
- 
-
-debug("Generating config: ${modulespath}/${filename}")
-
-file {"${modulespath}/${filename}":
-  ensure  => present,
-  content => $configcontent,
-  path => "${modulespath}/${filename}",
-  owner => $owner,
-  group => $group
-}
-} 
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/configgenerator/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/configgenerator/manifests/init.pp
deleted file mode 100644
index bac20c0..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/configgenerator/manifests/init.pp
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class configgenerator() {
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/dashboard/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/dashboard/service_check.pp
deleted file mode 100644
index ebc52e1..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/dashboard/service_check.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-dashboard::dashboard::service_check(){}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/init.pp
deleted file mode 100644
index 0770252..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/init.pp
+++ /dev/null
@@ -1,76 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-dashboard(
-  $service_state = $hdp::params::cluster_client_state,
-  $opts = {}
-) inherits hdp-dashboard::params
-{
-   if ($service_state == 'no_op') {
-   } elsif ($service_state == 'uninstalled') {
-    hdp::package { 'dashboard' :
-      ensure => 'uninstalled',
-      java_needed => 'false',
-      size   => 64
-    }
-    hdp::directory_recursive_create { $conf_dir :
-      service_state => $service_state,
-      force => true
-    }
-
-    Hdp::Package['dashboard'] -> Hdp::Directory_recursive_create[$conf_dir]
-
-   } elsif ($service_state in ['running','installed_and_configured','stopped']) {
-      hdp::package { 'dashboard' :
-        java_needed => 'false',
-        size => 64
-       }
-     $conf_dir =  $hdp-dashboard::params::conf_dir
-  
-     hdp::directory_recursive_create { $conf_dir :
-       service_state => $service_state,
-       force => true
-     }
- 
-     hdp-dashboard::configfile { 'cluster_configuration.json' : }
-     Hdp-Dashboard::Configfile<||>{dashboard_host => $hdp::params::host_address}
-  
-     #top level does not need anchors
-     Hdp::Package['dashboard'] -> Hdp::Directory_recursive_create[$conf_dir] -> Hdp-Dashboard::Configfile<||> 
-    } else {
-     hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-   }
-}
-
-###config file helper
-define hdp-dashboard::configfile(
-  $dashboard_host = undef
-)
-{
-  
-  hdp::configfile { "${hdp-dashboard::params::conf_dir}/${name}":
-    component      => 'dashboard',
-    owner          => root,
-    group          => root,
-    dashboard_host => $dashboard_host
-  }
-}
-
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/params.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/params.pp
deleted file mode 100644
index b39d6b9..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-dashboard/manifests/params.pp
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-dashboard::params()
-{
-  
-  $conf_dir = "/usr/share/hdp/dashboard/dataServices/conf/" #cannot change since hard coded in rpm
-
-  $hdp_cluster_name = hdp_default("hadoop/cluster_configuration/hdp_cluster_name")
-  $scheduler_name = hdp_default("hadoop/cluster_configuration/scheduler_name")
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-dashboard/templates/cluster_configuration.json.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-dashboard/templates/cluster_configuration.json.erb
deleted file mode 100644
index d04774d..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-dashboard/templates/cluster_configuration.json.erb
+++ /dev/null
@@ -1,97 +0,0 @@
-{
-  "config_version": 1,
-  "stack_version": "1.0.2",
-  "overall": {
-    "cluster_name": "<%=scope.function_hdp_template_var("hdp_cluster_name")%>",
-    "dashboard_host": "<%=scope.function_hdp_host("public_dashboard_host")%>",
-    "dashboard_port": 80,
-    "dataservices_host": "<%=scope.function_hdp_host("public_dashboard_host")%>",
-    "dataservices_port": 80,
-    "ganglia" : {
-      "web_host": "<%=scope.function_hdp_host("public_ganglia_server_host")%>",
-      "web_port": 80,
-      "web_root": "/ganglia/?t=yes",
-      "grid_name": "HDP_GRID"
-    },
-    "nagios": {
-      "nagiosserver_host": "<%=scope.function_hdp_host("public_nagios_server_host")%>",
-      "nagiosserver_port": 80,
-      "web_root": "/nagios"
-    },
-    "jmx": {
-      "timeout": 3
-    },
-    "services": {
-	  "HDFS" : [
-        {
-          "installed": true,
-          "name": "HDFS",
-          "namenode_host": "<%=scope.function_hdp_host("public_namenode_host")%>",
-          "namenode_port": 50070,
-          "snamenode_host": "<%=scope.function_hdp_host("public_snamenode_host")%>",
-          "snamenode_port": 50090,
-          "total_datanodes": "<%=h=scope.function_hdp_template_var("slave_hosts");h.kind_of?(Array) ? h.size : ''%>",
-          "ganglia_clusters": {
-            "slaves": "HDPSlaves",
-            "namenode": "HDPNameNode"
-          }
-        }
-      ],
-      "MAPREDUCE" : [
-        {
-          "installed": true,
-          "name": "MAPREDUCE",
-          "jobtracker_host": "<%=scope.function_hdp_host("public_jtnode_host")%>",
-          "jobtracker_port": 50030,
-          "total_tasktrackers": "<%=h=scope.function_hdp_template_var("slave_hosts");h.kind_of?(Array) ? h.size : ''%>",
-          "jobhistory_host": "<%=scope.function_hdp_host("public_jtnode_host")%>",
-          "jobhistory_port": 51111,
-          "ganglia_clusters": {
-            "slaves": "HDPSlaves",
-            "jobtracker": "HDPJobTracker"
-          },
-          "scheduler_type": "<%=scope.function_hdp_template_var("scheduler_name")%>"
-        }
-      ],
-      "HBASE" : [
-        {
-          "installed": <%=not scope.function_hdp_no_hosts("public_hbase_master_host")%>,
-          "name": "HBASE",
-          "hbasemaster_host": "<%=scope.function_hdp_host("public_hbase_master_host")%>",
-          "hbasemaster_port": 60010,
-          "total_regionservers": "<%=h=scope.function_hdp_template_var("slave_hosts");h.kind_of?(Array) ? h.size : ''%>",
-          "ganglia_clusters": {
-            "slaves": "HDPSlaves",
-            "hbasemaster": "HDPHBaseMaster"
-          }
-        }
-      ],
-      "ZOOKEEPER" : [
-        {
-          "installed": <%=not scope.function_hdp_no_hosts("public_zookeeper_hosts")%>,
-          "name": "ZOOKEEPER"
-        }
-      ],
-      "HIVE" : [
-        {
-          "installed": <%=not scope.function_hdp_no_hosts("public_hive_server_host")%>,
-          "name": "HIVE"
-        }
-      ],
-      "TEMPLETON" : [
-        {
-          "installed": <%=not scope.function_hdp_no_hosts("public_webhcat_server_host")%>,
-          "name": "TEMPLETON"
-        }
-      ],
-      "OOZIE" : [
-        {
-          "installed": <%=not scope.function_hdp_no_hosts("public_oozie_server")%>,
-          "name": "OOZIE",
-          "oozie_host": "<%=scope.function_hdp_host("public_oozie_server")%>",
-          "oozie_port": 11000
-        }
-      ]
-    }
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmetad.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmetad.sh
deleted file mode 100644
index e60eb31..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmetad.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# Before checking gmetad, check rrdcached.
-./checkRrdcached.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-if [ -n "${gmetadRunningPid}" ]
-then
-  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
-else
-  echo "Failed to find running ${GMETAD_BIN}";
-  exit 1;
-fi
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmond.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmond.sh
deleted file mode 100644
index 0cec8dc..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkGmond.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function checkGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
-    # Skip over (purported) Clusters that don't have their core conf file present.
-    if [ -e "${gmondCoreConfFileName}" ]
-    then 
-      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-      if [ -n "${gmondRunningPid}" ]
-      then
-        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
-      else
-        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
-        exit 1;
-      fi
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so check
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        checkGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just check the one ${gmondClusterName} that was asked for.
-    checkGmondForCluster ${gmondClusterName};
-fi
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkRrdcached.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkRrdcached.sh
deleted file mode 100644
index d94db5d..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/checkRrdcached.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-if [ -n "${rrdcachedRunningPid}" ]
-then
-  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
-else
-  echo "Failed to find running ${RRDCACHED_BIN}";
-  exit 1;
-fi
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetad.init b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetad.init
deleted file mode 100644
index 5e33ea3..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetad.init
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
-HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
-HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmetad..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmetad..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmetad..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetadLib.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetadLib.sh
deleted file mode 100644
index fa529af..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmetadLib.sh
+++ /dev/null
@@ -1,196 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMETAD_BIN=/usr/sbin/gmetad;
-GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
-GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
-
-function getGmetadLoggedPid()
-{
-    if [ -e "${GMETAD_PID_FILE}" ]
-    then
-        echo `cat ${GMETAD_PID_FILE}`;
-    fi
-}
-
-function getGmetadRunningPid()
-{
-    gmetadLoggedPid=`getGmetadLoggedPid`;
-
-    if [ -n "${gmetadLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmetadConf()
-{
-    now=`date`;
-
-    cat <<END_OF_GMETAD_CONF_1
-#################### Generated by ${0} on ${now} ####################
-#
-#-------------------------------------------------------------------------------
-# Setting the debug_level to 1 will keep daemon in the forground and
-# show only error messages. Setting this value higher than 1 will make 
-# gmetad output debugging information and stay in the foreground.
-# default: 0
-# debug_level 10
-#
-#-------------------------------------------------------------------------------
-# What to monitor. The most important section of this file. 
-#
-# The data_source tag specifies either a cluster or a grid to
-# monitor. If we detect the source is a cluster, we will maintain a complete
-# set of RRD databases for it, which can be used to create historical 
-# graphs of the metrics. If the source is a grid (it comes from another gmetad),
-# we will only maintain summary RRDs for it.
-#
-# Format: 
-# data_source "my cluster" [polling interval] address1:port addreses2:port ...
-# 
-# The keyword 'data_source' must immediately be followed by a unique
-# string which identifies the source, then an optional polling interval in 
-# seconds. The source will be polled at this interval on average. 
-# If the polling interval is omitted, 15sec is asssumed. 
-#
-# If you choose to set the polling interval to something other than the default,
-# note that the web frontend determines a host as down if its TN value is less
-# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
-# to something around or greater than 80sec, this will cause the frontend to
-# incorrectly display hosts as down even though they are not.
-#
-# A list of machines which service the data source follows, in the 
-# format ip:port, or name:port. If a port is not specified then 8649
-# (the default gmond port) is assumed.
-# default: There is no default value
-#
-# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
-# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
-# data_source "another source" 1.3.4.7:8655  1.3.4.8
-END_OF_GMETAD_CONF_1
-
-    # Get info about all the configured Ganglia clusters.
-    getGangliaClusterInfo | while read gangliaClusterInfoLine
-    do
-        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
-        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
-        # ...and generate a corresponding data_source line for gmetad.conf. 
-        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
-    done
-
-    cat <<END_OF_GMETAD_CONF_2
-#
-# Round-Robin Archives
-# You can specify custom Round-Robin archives here (defaults are listed below)
-#
-# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-#      "RRA:AVERAGE:0.5:5760:374"
-#
-#-------------------------------------------------------------------------------
-# Scalability mode. If on, we summarize over downstream grids, and respect
-# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
-# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
-# we are the "authority" on data source feeds. This approach does not scale to
-# large groups of clusters, but is provided for backwards compatibility.
-# default: on
-# scalable off
-#
-#-------------------------------------------------------------------------------
-# The name of this Grid. All the data sources above will be wrapped in a GRID
-# tag with this name.
-# default: unspecified
-gridname "HDP_GRID"
-#
-#-------------------------------------------------------------------------------
-# The authority URL for this grid. Used by other gmetads to locate graphs
-# for our data sources. Generally points to a ganglia/
-# website on this machine.
-# default: "http://hostname/ganglia/",
-#   where hostname is the name of this machine, as defined by gethostname().
-# authority "http://mycluster.org/newprefix/"
-#
-#-------------------------------------------------------------------------------
-# List of machines this gmetad will share XML with. Localhost
-# is always trusted. 
-# default: There is no default value
-# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
-#
-#-------------------------------------------------------------------------------
-# If you want any host which connects to the gmetad XML to receive
-# data, then set this value to "on"
-# default: off
-# all_trusted on
-#
-#-------------------------------------------------------------------------------
-# If you don't want gmetad to setuid then set this to off
-# default: on
-# setuid off
-#
-#-------------------------------------------------------------------------------
-# User gmetad will setuid to (defaults to "nobody")
-# default: "nobody"
-setuid_username "${GMETAD_USER}"
-#
-#-------------------------------------------------------------------------------
-# Umask to apply to created rrd files and grid directory structure
-# default: 0 (files are public)
-# umask 022
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer requests for XML
-# default: 8651
-# xml_port 8651
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer queries for XML. This facility allows
-# simple subtree and summation views of the XML tree.
-# default: 8652
-# interactive_port 8652
-#
-#-------------------------------------------------------------------------------
-# The number of threads answering XML requests
-# default: 4
-# server_threads 10
-#
-#-------------------------------------------------------------------------------
-# Where gmetad stores its round-robin databases
-# default: "/var/lib/ganglia/rrds"
-# rrd_rootdir "/some/other/place"
-#
-#-------------------------------------------------------------------------------
-# In earlier versions of gmetad, hostnames were handled in a case
-# sensitive manner
-# If your hostname directories have been renamed to lower case,
-# set this option to 0 to disable backward compatibility.
-# From version 3.2, backwards compatibility will be disabled by default.
-# default: 1   (for gmetad < 3.2)
-# default: 0   (for gmetad >= 3.2)
-case_sensitive_hostnames 1
-END_OF_GMETAD_CONF_2
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmond.init b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmond.init
deleted file mode 100644
index 4bedd62..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmond.init
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
-HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
-HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmond..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmond..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmond..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmondLib.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmondLib.sh
deleted file mode 100644
index 155b9a1..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/gmondLib.sh
+++ /dev/null
@@ -1,540 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMOND_BIN=/usr/sbin/gmond;
-GMOND_CORE_CONF_FILE=gmond.core.conf;
-GMOND_MASTER_CONF_FILE=gmond.master.conf;
-GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
-GMOND_PID_FILE=gmond.pid;
-
-# Functions.
-function getGmondCoreConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
-    fi
-}
-
-function getGmondMasterConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    fi
-}
-
-function getGmondSlaveConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    fi
-}
-
-function getGmondPidFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
-    else
-        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
-    fi
-}
-
-function getGmondLoggedPid()
-{
-    gmondPidFile=`getGmondPidFileName ${1}`;
-
-    if [ -e "${gmondPidFile}" ]
-    then
-        echo `cat ${gmondPidFile}`;
-    fi
-}
-
-function getGmondRunningPid()
-{
-    gmondLoggedPid=`getGmondLoggedPid ${1}`;
-
-    if [ -n "${gmondLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmondCoreConf()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_CORE_CONF
-#################### Generated by ${0} on ${now} ####################
-#
-/* This configuration is as close to 2.5.x default behavior as possible
-   The values closely match ./gmond/metric.h definitions in 2.5.x */
-globals {
-  daemonize = yes
-  setuid = yes
-  user = ${GMOND_USER}
-  debug_level = 0
-  max_udp_msg_len = 1472
-  mute = no
-  deaf = no 
-  allow_extra_data = yes
-  host_dmax = 0 /*secs */
-  host_tmax = 20 /*secs */
-  cleanup_threshold = 300 /*secs */
-  gexec = no
-  send_metadata_interval = 30 /*secs */
-}
-
-/*
- * The cluster attributes specified will be used as part of the <CLUSTER>
- * tag that will wrap all hosts collected by this instance.
- */
-cluster {
-  name = "${gmondClusterName}"
-  owner = "unspecified"
-  latlong = "unspecified"
-  url = "unspecified"
-}
-
-/* The host section describes attributes of the host, like the location */
-host {
-  location = "unspecified"
-}
-
-/* You can specify as many tcp_accept_channels as you like to share
- * an XML description of the state of the cluster.
- *
- * At the very least, every gmond must expose its XML state to 
- * queriers from localhost.
- */
-tcp_accept_channel {
-  bind = localhost
-  port = ${gmondPort}
-}
-
-/* Each metrics module that is referenced by gmond must be specified and
-   loaded. If the module has been statically linked with gmond, it does
-   not require a load path. However all dynamically loadable modules must
-   include a load path. */
-modules {
-  module {
-    name = "core_metrics"
-  }
-  module {
-    name = "cpu_module"
-    path = "modcpu.so"
-  }
-  module {
-    name = "disk_module"
-    path = "moddisk.so"
-  }
-  module {
-    name = "load_module"
-    path = "modload.so"
-  }
-  module {
-    name = "mem_module"
-    path = "modmem.so"
-  }
-  module {
-    name = "net_module"
-    path = "modnet.so"
-  }
-  module {
-    name = "proc_module"
-    path = "modproc.so"
-  }
-  module {
-    name = "sys_module"
-    path = "modsys.so"
-  }
-}
-
-/* The old internal 2.5.x metric array has been replaced by the following
-   collection_group directives.  What follows is the default behavior for
-   collecting and sending metrics that is as close to 2.5.x behavior as
-   possible. */
-
-/* This collection group will cause a heartbeat (or beacon) to be sent every
-   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
-   the age of the running gmond. */
-collection_group {
-  collect_once = yes
-  time_threshold = 20
-  metric {
-    name = "heartbeat"
-  }
-}
-
-/* This collection group will send general info about this host total memory every
-   180 secs.
-   This information doesn't change between reboots and is only collected
-   once. This information needed for heatmap showing */
- collection_group {
-   collect_once = yes
-   time_threshold = 180
-   metric {
-    name = "mem_total"
-    title = "Memory Total"
-   }
- }
-
-/* This collection group will send general info about this host every
-   1200 secs.
-   This information doesn't change between reboots and is only collected
-   once. */
-collection_group {
-  collect_once = yes
-  time_threshold = 1200
-  metric {
-    name = "cpu_num"
-    title = "CPU Count"
-  }
-  metric {
-    name = "cpu_speed"
-    title = "CPU Speed"
-  }
-  /* Should this be here? Swap can be added/removed between reboots. */
-  metric {
-    name = "swap_total"
-    title = "Swap Space Total"
-  }
-  metric {
-    name = "boottime"
-    title = "Last Boot Time"
-  }
-  metric {
-    name = "machine_type"
-    title = "Machine Type"
-  }
-  metric {
-    name = "os_name"
-    title = "Operating System"
-  }
-  metric {
-    name = "os_release"
-    title = "Operating System Release"
-  }
-  metric {
-    name = "location"
-    title = "Location"
-  }
-}
-
-/* This collection group will send the status of gexecd for this host
-   every 300 secs.*/
-/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
-collection_group {
-  collect_once = yes
-  time_threshold = 300
-  metric {
-    name = "gexec"
-    title = "Gexec Status"
-  }
-}
-
-/* This collection group will collect the CPU status info every 20 secs.
-   The time threshold is set to 90 seconds.  In honesty, this
-   time_threshold could be set significantly higher to reduce
-   unneccessary  network chatter. */
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* CPU status */
-  metric {
-    name = "cpu_user"
-    value_threshold = "1.0"
-    title = "CPU User"
-  }
-  metric {
-    name = "cpu_system"
-    value_threshold = "1.0"
-    title = "CPU System"
-  }
-  metric {
-    name = "cpu_idle"
-    value_threshold = "5.0"
-    title = "CPU Idle"
-  }
-  metric {
-    name = "cpu_nice"
-    value_threshold = "1.0"
-    title = "CPU Nice"
-  }
-  metric {
-    name = "cpu_aidle"
-    value_threshold = "5.0"
-    title = "CPU aidle"
-  }
-  metric {
-    name = "cpu_wio"
-    value_threshold = "1.0"
-    title = "CPU wio"
-  }
-  /* The next two metrics are optional if you want more detail...
-     ... since they are accounted for in cpu_system.
-  metric {
-    name = "cpu_intr"
-    value_threshold = "1.0"
-    title = "CPU intr"
-  }
-  metric {
-    name = "cpu_sintr"
-    value_threshold = "1.0"
-    title = "CPU sintr"
-  }
-  */
-}
-
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* Load Averages */
-  metric {
-    name = "load_one"
-    value_threshold = "1.0"
-    title = "One Minute Load Average"
-  }
-  metric {
-    name = "load_five"
-    value_threshold = "1.0"
-    title = "Five Minute Load Average"
-  }
-  metric {
-    name = "load_fifteen"
-    value_threshold = "1.0"
-    title = "Fifteen Minute Load Average"
-  }
-}
-
-/* This group collects the number of running and total processes */
-collection_group {
-  collect_every = 80
-  time_threshold = 950
-  metric {
-    name = "proc_run"
-    value_threshold = "1.0"
-    title = "Total Running Processes"
-  }
-  metric {
-    name = "proc_total"
-    value_threshold = "1.0"
-    title = "Total Processes"
-  }
-}
-
-/* This collection group grabs the volatile memory metrics every 40 secs and
-   sends them at least every 180 secs.  This time_threshold can be increased
-   significantly to reduce unneeded network traffic. */
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "mem_free"
-    value_threshold = "1024.0"
-    title = "Free Memory"
-  }
-  metric {
-    name = "mem_shared"
-    value_threshold = "1024.0"
-    title = "Shared Memory"
-  }
-  metric {
-    name = "mem_buffers"
-    value_threshold = "1024.0"
-    title = "Memory Buffers"
-  }
-  metric {
-    name = "mem_cached"
-    value_threshold = "1024.0"
-    title = "Cached Memory"
-  }
-  metric {
-    name = "swap_free"
-    value_threshold = "1024.0"
-    title = "Free Swap Space"
-  }
-}
-
-collection_group {
-  collect_every = 40
-  time_threshold = 300
-  metric {
-    name = "bytes_out"
-    value_threshold = 4096
-    title = "Bytes Sent"
-  }
-  metric {
-    name = "bytes_in"
-    value_threshold = 4096
-    title = "Bytes Received"
-  }
-  metric {
-    name = "pkts_in"
-    value_threshold = 256
-    title = "Packets Received"
-  }
-  metric {
-    name = "pkts_out"
-    value_threshold = 256
-    title = "Packets Sent"
-  }
-}
-
-
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "disk_free"
-    value_threshold = 1.0
-    title = "Disk Space Available"
-  }
-  metric {
-    name = "part_max_used"
-    value_threshold = 1.0
-    title = "Maximum Disk Space Used"
-  }
-  metric {
-    name = "disk_total"
-    value_threshold = 1.0
-    title = "Total Disk Space"
-  }
-}
-
-include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
-END_OF_GMOND_CORE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondMasterConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_MASTER_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Masters only receive; they never send. */
-udp_recv_channel {
-  bind = ${gmondMasterIP}
-  port = ${gmondPort}
-}
-
-/* The gmond cluster master must additionally provide an XML 
- * description of the cluster to the gmetad that will query it.
- */
-tcp_accept_channel {
-  bind = ${gmondMasterIP}
-  port = ${gmondPort}
-}
-END_OF_GMOND_MASTER_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondSlaveConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_SLAVE_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Slaves only send; they never receive. */
-udp_send_channel {
-  #bind_hostname = yes # Highly recommended, soon to be default.
-                       # This option tells gmond to use a source address
-                       # that resolves to the machine's hostname.  Without
-                       # this, the metrics may appear to come from any
-                       # interface and the DNS names associated with
-                       # those IPs will be used to create the RRDs.
-  host = ${gmondMasterIP}
-  port = ${gmondPort}
-  ttl = 1
-}
-END_OF_GMOND_SLAVE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrd.py b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrd.py
deleted file mode 100644
index e8fa370..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrd.py
+++ /dev/null
@@ -1,170 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import cgi
-import os
-import rrdtool
-import sys
-import time
-
-# place this script in /var/www/cgi-bin of the Ganglia collector
-# requires 'yum install rrdtool-python' on the Ganglia collector
-
-
-def printMetric(clusterName, hostName, metricName, file, cf, start, end, resolution, pointInTime):
-  if clusterName.endswith("rrds"):
-    clusterName = ""
-
-  args = [file, cf]
-
-  if start is not None:
-    args.extend(["-s", start])
-
-  if end is not None:
-    args.extend(["-e", end])
-
-  if resolution is not None:
-    args.extend(["-r", resolution])
-
-  rrdMetric = rrdtool.fetch(args)
-  # ds_name
-  sys.stdout.write(rrdMetric[1][0])
-  sys.stdout.write("\n")
-
-  sys.stdout.write(clusterName)
-  sys.stdout.write("\n")
-  sys.stdout.write(hostName)
-  sys.stdout.write("\n")
-  sys.stdout.write(metricName)
-  sys.stdout.write("\n")
-
-  # write time
-  sys.stdout.write(str(rrdMetric[0][0]))
-  sys.stdout.write("\n")
-  # write step
-  sys.stdout.write(str(rrdMetric[0][2]))
-  sys.stdout.write("\n")
-
-  if not pointInTime:
-    for tuple in rrdMetric[2]:
-      if tuple[0] is not None:
-        sys.stdout.write(str(tuple[0]))
-        sys.stdout.write("\n")
-  else:
-    value = None
-    idx   = -1
-    tuple = rrdMetric[2]
-    tupleLastIdx = len(tuple) * -1
-
-    while value is None and idx >= tupleLastIdx:
-      value = tuple[idx][0]
-      idx-=1
-
-    if value is not None:
-      sys.stdout.write(str(value))
-      sys.stdout.write("\n")
-
-  sys.stdout.write("[AMBARI_DP_END]\n")
-  return
-
-def stripList(l):
-  return([x.strip() for x in l])
-
-sys.stdout.write("Content-type: text/plain\n\n")
-
-# write start time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
-
-if "m" in queryString:
-  metricParts = queryString["m"].split(",")
-else:
-  metricParts = [""]
-metricParts = stripList(metricParts)
-
-hostParts = []
-if "h" in queryString:
-  hostParts = queryString["h"].split(",")
-hostParts = stripList(hostParts)
-
-if "c" in queryString:
-  clusterParts = queryString["c"].split(",")
-else:
-  clusterParts = [""]
-clusterParts = stripList(clusterParts)
-
-if "p" in queryString:
-  rrdPath = queryString["p"]
-else:
-  rrdPath = "/var/lib/ganglia/rrds/"
-
-start = None
-if "s" in queryString:
-  start = queryString["s"]
-
-end = None
-if "e" in queryString:
-  end = queryString["e"]
-
-resolution = None
-if "r" in queryString:
-  resolution = queryString["r"]
-
-if "cf" in queryString:
-  cf = queryString["cf"]
-else:
-  cf = "AVERAGE"
-
-if "pt" in queryString:
-  pointInTime = True
-else:
-  pointInTime = False
-
-
-host_metrics = ["boottime", "bytes_in", "bytes_out", "cpu_aidle", "cpu_idle",
-                "cpu_nice", "cpu_num", "cpu_speed", "cpu_system", "cpu_user",
-                "cpu_wio", "disk_free", "disk_total", "load_fifteen", "load_five",
-                "load_one", "mem_buffers", "mem_cached", "mem_free", "mem_shared",
-                "mem_total", "part_max_used", "pkts_in", "pkts_out", "proc_run",
-                "proc_total", "swap_free", "swap_total"]
-
-for cluster in clusterParts:
-  for path, dirs, files in os.walk(rrdPath + cluster):
-    pathParts = path.split("/")
-    if len(hostParts) == 0 or pathParts[-1] in hostParts:
-      for file in files:
-        for metric in metricParts:
-          if file.endswith(metric + ".rrd"):
-            if not (metric in host_metrics):
-              printMetric(pathParts[-2], pathParts[-1], file[:-4],
-                    os.path.join(path, file), cf, start, end, resolution, pointInTime)
-            else:
-              if (cluster == "HDPSlaves"):
-                 printMetric(pathParts[-2], pathParts[-1], file[:-4],
-                    os.path.join(path, file), cf, start, end, resolution, pointInTime)
-                
-sys.stdout.write("[AMBARI_END]\n")
-# write end time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-sys.stdout.flush
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrdcachedLib.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrdcachedLib.sh
deleted file mode 100644
index 8b7c257..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/rrdcachedLib.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-RRDCACHED_BIN=/usr/bin/rrdcached;
-RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
-RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
-RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
-
-function getRrdcachedLoggedPid()
-{
-    if [ -e "${RRDCACHED_PID_FILE}" ]
-    then
-        echo `cat ${RRDCACHED_PID_FILE}`;
-    fi
-}
-
-function getRrdcachedRunningPid()
-{
-    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
-
-    if [ -n "${rrdcachedLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/setupGanglia.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/setupGanglia.sh
deleted file mode 100644
index 93ea922..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/setupGanglia.sh
+++ /dev/null
@@ -1,128 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh
-
-function usage()
-{
-  cat << END_USAGE
-Usage: ${0} [-c <gmondClusterName> [-m]] [-t]
-
-Options:
-  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
-
-  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
-                          Cluster. Without this, we generate slave gmond configuration.
-
-  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
-                          gmond configuration that is generated without this).
-END_USAGE
-}
-
-function instantiateGmetadConf()
-{
-  # gmetad utility library.
-  source ./gmetadLib.sh;
-
-  generateGmetadConf > ${GMETAD_CONF_FILE};
-}
-
-function instantiateGmondConf()
-{
-  # gmond utility library.
-  source ./gmondLib.sh;
- 
-  gmondClusterName=${1};
-
-  if [ "x" != "x${gmondClusterName}" ]
-  then
-
-    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
-    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
-    
-    # Always blindly generate the core gmond config - that goes on every box running gmond. 
-    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
-
-    isMasterGmond=${2};
-
-    # Decide whether we want to add on the master or slave gmond config.
-    if [ "0" -eq "${isMasterGmond}" ]
-    then
-      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
-    else
-      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
-    fi
-
-  else
-    echo "No gmondClusterName passed in, nothing to instantiate";
-  fi
-}
-
-# main()
-
-gmondClusterName=;
-isMasterGmond=0;
-configureGmetad=0;
-
-while getopts ":c:mt" OPTION
-do
-  case ${OPTION} in
-    c) 
-      gmondClusterName=${OPTARG};
-      ;;
-    m)
-      isMasterGmond=1;
-      ;;
-    t)
-      configureGmetad=1;
-      ;;
-    ?)
-      usage;
-      exit 1;
-  esac
-done
-
-# Initialization.
-createDirectory ${GANGLIA_CONF_DIR};
-createDirectory ${GANGLIA_RUNTIME_DIR};
-# So rrdcached can drop its PID files in here.
-chmod a+w ${GANGLIA_RUNTIME_DIR};
-
-if [ -n "${gmondClusterName}" ]
-then
-
-  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
-  if [ "1" -eq "${configureGmetad}" ]
-  then
-    instantiateGmetadConf;
-  else
-    instantiateGmondConf ${gmondClusterName} ${isMasterGmond};
-  fi
-
-elif [ "1" -eq "${configureGmetad}" ]
-then
-  instantiateGmetadConf;
-else
-  usage;
-  exit 2;
-fi
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmetad.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmetad.sh
deleted file mode 100644
index 1cfd3f1..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmetad.sh
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
-source ./rrdcachedLib.sh;
-
-# Before starting gmetad, start rrdcached.
-./startRrdcached.sh;
-
-if [ $? -eq 0 ] 
-then
-    gmetadRunningPid=`getGmetadRunningPid`;
-
-    # Only attempt to start gmetad if there's not already one running.
-    if [ -z "${gmetadRunningPid}" ]
-    then
-        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
-
-        gmetadRunningPid=`getGmetadRunningPid`;
-
-        if [ -n "${gmetadRunningPid}" ]
-        then
-            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
-        else
-            echo "Failed to start ${GMETAD_BIN}";
-            exit 1;
-        fi
-    else
-        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
-    fi
-else
-    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
-    exit 2;
-fi
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmond.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmond.sh
deleted file mode 100644
index cf0e8f6..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startGmond.sh
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function startGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-    # Only attempt to start gmond if there's not already one running.
-    if [ -z "${gmondRunningPid}" ]
-    then
-      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
-      if [ -e "${gmondCoreConfFileName}" ]
-      then 
-        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
-
-        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
-  
-        gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-  
-        if [ -n "${gmondRunningPid}" ]
-        then
-            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
-        else
-            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
-            exit 1;
-        fi
-      fi 
-    else
-      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so start 
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        startGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just start the one ${gmondClusterName} that was asked for.
-    startGmondForCluster ${gmondClusterName};
-fi
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startRrdcached.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startRrdcached.sh
deleted file mode 100644
index 0581594..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/startRrdcached.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only attempt to start rrdcached if there's not already one running.
-if [ -z "${rrdcachedRunningPid}" ]
-then
-    #changed because problem puppet had with nobody user
-    #sudo -u ${GMETAD_USER} ${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
-    #         -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-    #         -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-    #         -b /var/lib/ganglia/rrds -B
-    su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
-             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-             -b ${RRDCACHED_BASE_DIR} -B"
-
-    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
-    # this, but it doesn't take sometimes due to a lack of permissions,
-    # so perform the operation explicitly to be super-sure.
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
-
-    # Check to make sure rrdcached actually started up.
-    rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-    if [ -n "${rrdcachedRunningPid}" ]
-    then
-        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
-    else
-        echo "Failed to start ${RRDCACHED_BIN}";
-        exit 1;
-    fi
-else
-    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
-fi
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmetad.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmetad.sh
deleted file mode 100644
index 2764e0e..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmetad.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${gmetadRunningPid}" ]
-then
-    kill -KILL ${gmetadRunningPid};
-    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
-fi
-
-# Poll again.
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Once we've killed gmetad, there should no longer be a running PID.
-if [ -z "${gmetadRunningPid}" ]
-then
-    # It's safe to stop rrdcached now.
-    ./stopRrdcached.sh;
-fi
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh
deleted file mode 100644
index 6aa9040..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function stopGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-    # Only go ahead with the termination if we could find a running PID.
-    if [ -n "${gmondRunningPid}" ]
-    then
-      kill ${gmondRunningPid};
-      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so stop
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        stopGmondForCluster ${gmondClusterName};
-    done
-else
-    stopGmondForCluster ${gmondClusterName};
-fi
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopRrdcached.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopRrdcached.sh
deleted file mode 100644
index 0a0d8d8..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopRrdcached.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${rrdcachedRunningPid}" ]
-then
-    kill -TERM ${rrdcachedRunningPid};
-    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
-    # until we're sure it's well and truly dead. 
-    #
-    # Without this, an immediately following startRrdcached.sh won't do
-    # anything, because it still sees this soon-to-die instance alive,
-    # and the net result is that after a few seconds, there's no
-    # ${RRDCACHED_BIN} running on the box anymore.
-    sleep 5;
-    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
-fi 
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/teardownGanglia.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/teardownGanglia.sh
deleted file mode 100644
index b27f7a2..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/teardownGanglia.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh;
-
-# Undo what we did while setting up Ganglia on this box.
-rm -rf ${GANGLIA_CONF_DIR};
-rm -rf ${GANGLIA_RUNTIME_DIR};
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config.pp
deleted file mode 100644
index bf69f51..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config.pp
+++ /dev/null
@@ -1,79 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::config(
-  $ganglia_server_host = undef,
-  $service_state = $hdp::params::cluster_service_state
-)
-{
- if ($service_state in ['running','installed_and_configured','stopped']) {
-    #TODO: divide into what is needed on server vs what is needed on monitored nodes
-    $shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
-    $shell_files = ['checkGmond.sh','checkRrdcached.sh','gmetadLib.sh','gmondLib.sh','rrdcachedLib.sh' ,'setupGanglia.sh','startGmetad.sh','startGmond.sh','startRrdcached.sh','stopGmetad.sh','stopGmond.sh','stopRrdcached.sh','teardownGanglia.sh']
-
-    hdp::directory_recursive_create { $shell_cmds_dir :
-      owner => root,
-      group => root
-    } 
-
-     hdp-ganglia::config::init_file { ['gmetad','gmond']: }
-
-     hdp-ganglia::config::shell_file { $shell_files: }                       
-
-     hdp-ganglia::config::file { ['gangliaClusters.conf','gangliaEnv.sh','gangliaLib.sh']: 
-       ganglia_server_host => $ganglia_server_host
-     }
- 
-     anchor{'hdp-ganglia::config::begin':} -> Hdp::Directory_recursive_create[$shell_cmds_dir] -> Hdp-ganglia::Config::Shell_file<||> -> anchor{'hdp-ganglia::config::end':}
-     Anchor['hdp-ganglia::config::begin'] -> Hdp-ganglia::Config::Init_file<||> -> Anchor['hdp-ganglia::config::end']
-     Anchor['hdp-ganglia::config::begin'] -> Hdp-ganglia::Config::File<||> -> Anchor['hdp-ganglia::config::end']
-  }
-}
-
-define hdp-ganglia::config::shell_file()
-{
-  file { "${hdp-ganglia::params::ganglia_shell_cmds_dir}/${name}":
-    source => "puppet:///modules/hdp-ganglia/${name}", 
-    mode => '0755'
-  }
-}
-
-define hdp-ganglia::config::init_file()
-{
-  file { "/etc/init.d/hdp-${name}":
-    source => "puppet:///modules/hdp-ganglia/${name}.init", 
-    mode => '0755'
-  }
-}
-
-### config files
-define hdp-ganglia::config::file(
-  $ganglia_server_host = undef
-)
-{
-  hdp::configfile { "${hdp-ganglia::params::ganglia_shell_cmds_dir}/${name}":
-    component           => 'ganglia',
-    owner               => root,
-    group               => root
-  }
-  if ($ganglia_server_host != undef) {
-    Hdp::Configfile<||>{ganglia_server_host => $ganglia_server_host}
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_monitor.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_monitor.pp
deleted file mode 100644
index 192cd29..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_monitor.pp
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#TODO: these scripts called shoudl be converetd to native puppet
-define hdp-ganglia::config::generate_monitor(
-  $ganglia_service,
-  $role
-)
-{
-  $shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
-  $cmd = $ganglia_service ? {
-    'gmond'  => $role ? {
-      'server' => "${shell_cmds_dir}/setupGanglia.sh -c ${name} -m",
-       default =>  "${shell_cmds_dir}/setupGanglia.sh -c ${name}"
-    },
-    'gmetad' => "${shell_cmds_dir}/setupGanglia.sh -t",
-     default => hdp_fail("Unexpected ganglia service: ${$ganglia_service}")	
-  }
-
-  #TODO: put in test condition
-  hdp::exec { $cmd:
-    command => $cmd
- }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_server.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_server.pp
deleted file mode 100644
index 069f0d8..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/config/generate_server.pp
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#TODO: these scripts called shoudl be converetd to native puppet
-define hdp-ganglia::config::generate_server(
-  $ganglia_service,
-  $role
-)
-{
-  $shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
-
-  $cmd = $ganglia_service ? {
-    'gmond'  => $role ? {
-      'server' => "${shell_cmds_dir}/setupGanglia.sh -c ${name} -m",
-       default =>  "${shell_cmds_dir}/setupGanglia.sh -c ${name}"
-    },
-    'gmetad' => "${shell_cmds_dir}/setupGanglia.sh -t",
-     default => hdp_fail("Unexpected ganglia service: ${$ganglia_service}")	
-  }
-
-  #TODO: put in test condition
-  hdp::exec { $cmd:
-    command => $cmd
- }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmetad/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmetad/service_check.pp
deleted file mode 100644
index 15cbe36..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmetad/service_check.pp
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::hdp-gmetad::service_check() 
-{
-  
-  anchor { 'hdp-ganglia::hdp-gmetad::service_check::begin':}
-
-  exec { 'hdp-gmetad':
-    command   => "/etc/init.d/hdp-gmetad status | grep -v failed",
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    before      => Anchor['hdp-ganglia::hdp-gmetad::service_check::end'],
-    logoutput => "true"
-  }
-
-  anchor{ 'hdp-ganglia::hdp-gmetad::service_check::end':}
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmond/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmond/service_check.pp
deleted file mode 100644
index 8c1ed52..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/hdp-gmond/service_check.pp
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::hdp-gmond::service_check() 
-{
-  
-  anchor { 'hdp-ganglia::hdp-gmond::service_check::begin':}
-
-  exec { 'hdp-gmond':
-    command   => "/etc/init.d/hdp-gmond status | grep -v failed",
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    before      => Anchor['hdp-ganglia::hdp-gmond::service_check::end'],
-    logoutput => "true"
-  }
-
-  anchor{ 'hdp-ganglia::hdp-gmond::service_check::end':}
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/init.pp
deleted file mode 100644
index 4c406da..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/init.pp
+++ /dev/null
@@ -1,37 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia(
-  $service_state
-)
-{
-  if (($service_state != 'no_op') or ($service_state != 'uninstalled')) {
-    include hdp-ganglia::params
-    $gmetad_user = $hdp-ganglia::params::gmetad_user
-    $gmond_user = $hdp-ganglia::params::gmond_user
-  
-    user { $gmond_user : shell => '/bin/bash'} #provision for nobody user
-    if ( $gmetad_user != $gmond_user) {
-      user { $gmetad_user : shell => '/bin/bash'} #provision for nobody user
-    }
-    anchor{'hdp-ganglia::begin':} -> User<|title == $gmond_user or title == $gmetad_user|> ->  anchor{'hdp-ganglia::end':}
-  }
-}
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp
deleted file mode 100644
index 27334ac..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp
+++ /dev/null
@@ -1,125 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::monitor(
-  $service_state = $hdp::params::cluster_service_state,
-  $ganglia_server_host = undef,
-  $opts = {}
-) inherits hdp-ganglia::params
-{
-  if  ($service_state == 'no_op') {
-  } elsif ($service_state == 'uninstalled') {     
-
-   hdp::package { 'ganglia-monitor':         
-       ensure      => 'uninstalled', 
-      java_needed => false      
-   }
-
-  } else {
-    if ($hdp::params::service_exists['hdp-ganglia::server'] != true) {
-      class { 'hdp-ganglia':
-       service_state => $service_state
-      }
-    }
-
-    hdp::package { 'ganglia-monitor': }
-
-    if ($hdp::params::service_exists['hdp-ganglia::server'] != true) {
-      class { 'hdp-ganglia::config': ganglia_server_host => $ganglia_server_host}
-    }
-
-    if (($hdp::params::service_exists['hdp-hadoop::datanode'] == true) or ($hdp::params::service_exists['hdp-hadoop::namenode'] == true) or ($hdp::params::service_exists['hdp-hadoop::jobtracker'] == true) or ($hdp::params::service_exists['hdp-hadoop::tasktracker'] == true) or ($hdp::params::service_exists['hdp-hadoop::client'] == true) or ($hdp::params::service_exists['hdp-hadoop::snamenode'] == true)) {
-     class { 'hdp-hadoop::enable-ganglia': }
-   }
-
-    if ($service_exists['hdp-hbase::master'] == true) {
-      class { 'hdp-hbase::master::enable-ganglia': }
-    }
-  
-    if ($service_exists['hdp-hbase::regionserver'] == true) {
-      class { 'hdp-hbase::regionserver::enable-ganglia': }
-    }
-
-    class { 'hdp-ganglia::monitor::config-gen': }
-  
-    class { 'hdp-ganglia::monitor::gmond': ensure => $service_state}
-
-    if ($hdp::params::service_exists['hdp-ganglia::server'] != true) {
-      Class['hdp-ganglia'] -> Hdp::Package['ganglia-monitor'] -> Class['hdp-ganglia::config'] -> 
-      Class['hdp-ganglia::monitor::config-gen'] -> Class['hdp-ganglia::monitor::gmond']
-    } else {
-      Hdp::Package['ganglia-monitor'] ->  Class['hdp-ganglia::monitor::config-gen'] -> Class['hdp-ganglia::monitor::gmond']
-    }
-  }
-}
-
-
-class hdp-ganglia::monitor::config-gen()
-{
-
-  $service_exists = $hdp::params::service_exists
-
-   #FIXME currently hacking this to make it work
-
-#  if ($service_exists['hdp-hadoop::namenode'] == true) {
-#    hdp-ganglia::config::generate_monitor { 'HDPNameNode':}
-#  }
-#  if ($service_exists['hdp-hadoop::jobtracker'] == true){
-#    hdp-ganglia::config::generate_monitor { 'HDPJobTracker':}
-#  }
-#  if ($service_exists['hdp-hbase::master'] == true) {
-#    hdp-ganglia::config::generate_monitor { 'HDPHBaseMaster':}
-#  }
-#  if ($service_exists['hdp-hadoop::datanode'] == true) {
-#    hdp-ganglia::config::generate_monitor { 'HDPSlaves':}
-#  }
-
-  # FIXME
-  # this will be enable gmond for all clusters on the node
-  # should be selective based on roles present
-  hdp-ganglia::config::generate_monitor { 'HDPNameNode':}
-  hdp-ganglia::config::generate_monitor { 'HDPJobTracker':}
-  hdp-ganglia::config::generate_monitor { 'HDPHBaseMaster':}
-  hdp-ganglia::config::generate_monitor { 'HDPSlaves':}
-
-  Hdp-ganglia::Config::Generate_monitor<||>{
-    ganglia_service => 'gmond',
-    role => 'monitor'
-  }
-   # 
-  anchor{'hdp-ganglia::monitor::config-gen::begin':} -> Hdp-ganglia::Config::Generate_monitor<||> -> anchor{'hdp-ganglia::monitor::config-gen::end':}
-}
-
-class hdp-ganglia::monitor::gmond(
-  $ensure
-  )
-{
-  if ($ensure == 'running') {
-    $command = "service hdp-gmond start >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"
-   } elsif  ($ensure == 'stopped') {
-    $command = "service hdp-gmond stop >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"
-  }
-  if ($ensure == 'running' or $ensure == 'stopped') {
-    hdp::exec { "hdp-gmond service" :
-      command => $command,
-      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-    }
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor_and_server.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor_and_server.pp
deleted file mode 100644
index 43161c3..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor_and_server.pp
+++ /dev/null
@@ -1,90 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::monitor_and_server(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-ganglia::params
-{
-  $ganglia_shell_cmds_dir = $hdp-ganglia::params::ganglia_shell_cmds_dir
-  $ganglia_conf_dir = $hdp-ganglia::params::ganglia_conf_dir
-  $ganglia_runtime_dir = $hdp-ganglia::params::ganglia_runtime_dir
-
-  #note: includes the common package ganglia-monitor
-  class { 'hdp-ganglia':
-    service_state => $service_state
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['uninstalled']) {
-    class { 'hdp-ganglia::server::packages':
-      ensure => 'uninstalled'
-      }
-
-    hdp::directory { [$ganglia_conf_dir,$ganglia_runtime_dir]:
-      service_state => $service_state,
-      force => true
-    }
-    
-    class { 'hdp-ganglia::config':
-      service_state => $service_state
-    }
-
-    Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> 
-      Hdp::Directory[$ganglia_conf_dir] -> Hdp::Directory[$ganglia_runtime_dir] ->
-      Class['hdp-ganglia::config']
-  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
-    class { 'hdp-ganglia::server::packages': }
-
-    class { 'hdp-ganglia::config': 
-     ganglia_server_host => $hdp::params::host_address,
-     service_state       => $service_state
-     }
-
-    class {'hdp-ganglia::monitor::config-gen': }      
-
-    class {'hdp-ganglia::server::config-gen': }      
-    
-    hdp-ganglia::config::generate_server { 'gmetad':
-      ganglia_service => 'gmetad'
-    }
-
-    class { 'hdp-ganglia::service::gmond': 
-      ensure => $service_state
-    }
-
-    class { 'hdp-ganglia::server::services' : 
-      service_state => $service_state,
-      monitor_and_server_single_node => true
-    }
-
-    class { 'hdp-ganglia::service::change_permission':
-      ensure => $service_state
-    }
-
-    #top level no anchors needed
-    Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> Class['hdp-ganglia::config'] -> 
-      Class['hdp-ganglia::monitor::config-gen'] -> Class['hdp-ganglia::server::config-gen'] -> Hdp-ganglia::Config::Generate_server['gmetad'] ->
-      Class['hdp-ganglia::service::gmond'] -> Class['hdp-ganglia::server::services'] ->
-      Class['hdp-ganglia::service::change_permission']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/params.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/params.pp
deleted file mode 100644
index 66688f5..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/params.pp
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::params() inherits hdp::params
-{
-  $ganglia_conf_dir = "/etc/ganglia/hdp"
-  $ganglia_runtime_dir = "/var/run/ganglia/hdp"
-
-  $ganglia_shell_cmds_dir = hdp_default("ganglia_shell_cmd_dir","/usr/libexec/hdp/ganglia")
-  
-  $gmetad_user = $hdp::params::gmetad_user
-  $gmond_user = $hdp::params::gmond_user
-
-  $webserver_group = hdp_default("hadoop/gangliaEnv/webserver_group","apache")
-  $rrdcached_base_dir = hdp_default("rrdcached_base_dir", "/var/lib/ganglia/rrds")
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp
deleted file mode 100644
index 38ef501..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp
+++ /dev/null
@@ -1,182 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-ganglia::server(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits  hdp-ganglia::params
-{
-  $hdp::params::service_exists['hdp-ganglia::server'] = true
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state == 'uninstalled') {
-
-   class { 'hdp-ganglia::server::packages':
-      ensure => 'uninstalled',
-      service_state => $service_state
-   }
-
-   class { 'hdp-ganglia::server::files':
-      ensure => 'absent'
-   }
-
-  } else {
-  class { 'hdp-ganglia':
-    service_state => $service_state
-  }
-
-  class { 'hdp-ganglia::server::packages':
-    ensure => 'present',
-    service_state => $service_state
-  }
-
-  class { 'hdp-ganglia::config': 
-    ganglia_server_host => $hdp::params::host_address,
-    service_state       => $service_state 
-  }
-
-  hdp-ganglia::config::generate_server { ['HDPHBaseMaster','HDPJobTracker','HDPNameNode','HDPSlaves']:
-    ganglia_service => 'gmond',
-    role => 'server'
-  }
-  hdp-ganglia::config::generate_server { 'gmetad':
-    ganglia_service => 'gmetad',
-    role => 'server'
-  }
-
-  class { 'hdp-ganglia::server::gmetad': ensure => $service_state}
-
-  class { 'hdp-ganglia::service::change_permission': ensure => $service_state }
-  
-  if ($service_state == 'installed_and_configured') {
-    $webserver_state = 'restart'
-  } elsif ($service_state == 'running') {
-    $webserver_state = 'running'
-  } else {
-    # We are never stopping httpd
-    #$webserver_state = $service_state
-  }
-
-  class { 'hdp-monitor-webserver': service_state => $webserver_state}
-
-   class { 'hdp-ganglia::server::files':
-      ensure => 'present'
-   }
-
-  #top level does not need anchors
-  Class['hdp-ganglia'] -> Class['hdp-ganglia::server::packages'] -> Class['hdp-ganglia::config'] ->
- Hdp-ganglia::Config::Generate_server<||> ->
- Class['hdp-ganglia::server::gmetad'] -> Class['hdp-ganglia::service::change_permission'] -> Class['hdp-ganglia::server::files'] -> Class['hdp-monitor-webserver']
- }
-}
-
-class hdp-ganglia::server::packages(
-  $ensure = present,
-  $service_state = 'installed_and_configured'
-)
-{
-  hdp::package { ['ganglia-server','ganglia-gweb','ganglia-hdp-gweb-addons']: 
-    ensure      => $ensure,
-    java_needed => false,
-    require => Hdp::Package ['rrdtool-python']
-  }
-
-  # Removing conflicting packages only once to workaround "/bin/rpm -e absent-absent-absent.absent" bug (BUG-2881)
-  if ($service_state == 'installed_and_configured' and $hdp::params::hdp_os_type == 'centos5') {
-    # Remove conflicting 32bit package
-    hdp::package { ['rrdtool-devel']:
-      ensure      => 'absent',
-      java_needed => false,
-      before => Hdp::Package ['rrdtool']
-    }
-
-    # Remove conflicting 32bit package
-    hdp::package { ['rrdtool']:
-      ensure      => 'absent',
-      java_needed => false,
-      before => Hdp::Package ['rrdtool-python']
-    }
-  }
-
-  hdp::package { ['rrdtool-python']:
-    ensure      => $ensure,
-    java_needed => false
-  }
-
-}
-
-class hdp-ganglia::server::files(
-  $ensure = present 
-)
-{
-  $rrd_py_path = $hdp::params::rrd_py_path [$hdp::params::hdp_os_type]
-  hdp::directory_recursive_create{$rrd_py_path:
-    ensure => "directory", 
-    override_owner => false 
-  }
-
-  $rrd_py_file_path = "${rrd_py_path}/rrd.py"
-
-  file{$rrd_py_file_path :
-    ensure => $ensure,
-    source => "puppet:///modules/hdp-ganglia/rrd.py",
-    mode   => '0755',
-    require => Hdp::Directory_recursive_create[$rrd_py_path]
-  }
-
-  $rrd_files_dir = $hdp-ganglia::params::rrdcached_base_dir
-  $rrd_file_owner = $hdp-ganglia::params::gmetad_user
-  hdp::directory_recursive_create{ $rrd_files_dir :
-    ensure => "directory",
-    owner => $rrd_file_owner,
-    group => $rrd_file_owner,
-    mode => 755
-  }
-
-}
-
-
-class hdp-ganglia::service::change_permission(
-  $ensure
-)
-{
-  if ($ensure == 'running' or $ensure == 'installed_and_configured') {
-    hdp::directory_recursive_create { '/var/lib/ganglia/dwoo' :
-      mode => '0777'
-      }
-  }
-}
-
-class hdp-ganglia::server::gmetad(
-  $ensure
-)
-{
-  if ($ensure == 'running') {
-    $command = "service hdp-gmetad start >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
-   } elsif  ($ensure == 'stopped') {
-    $command = "service hdp-gmetad stop >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
-  }
-  if ($ensure == 'running' or $ensure == 'stopped') {
-    hdp::exec { "hdp-gmetad service" :
-      command => "$command",
-      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-    }
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaClusters.conf.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaClusters.conf.erb
deleted file mode 100644
index c92b6ef..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaClusters.conf.erb
+++ /dev/null
@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#########################################################
-### ClusterName           GmondMasterHost   GmondPort ###
-#########################################################
-    HDPSlaves       		<%=scope.function_hdp_host("ganglia_server_host")%>  8660
-    HDPNameNode         <%=scope.function_hdp_host("ganglia_server_host")%>  8661
-    HDPJobTracker     	<%=scope.function_hdp_host("ganglia_server_host")%>  8662
-    HDPHBaseMaster      <%=scope.function_hdp_host("ganglia_server_host")%>  8663
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaEnv.sh.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaEnv.sh.erb
deleted file mode 100644
index 4be541d..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaEnv.sh.erb
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Unix users and groups for the binaries we start up.
-GMETAD_USER=<%=scope.function_hdp_template_var("gmetad_user")%>;
-GMOND_USER=<%=scope.function_hdp_template_var("gmond_user")%>;
-WEBSERVER_GROUP=<%=scope.function_hdp_template_var("webserver_group")%>;
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaLib.sh.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaLib.sh.erb
deleted file mode 100644
index f129e37..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaLib.sh.erb
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-GANGLIA_CONF_DIR=<%=scope.function_hdp_template_var("ganglia_conf_dir")%>;
-GANGLIA_RUNTIME_DIR=<%=scope.function_hdp_template_var("ganglia_runtime_dir")%>;
-RRDCACHED_BASE_DIR=<%=scope.function_hdp_template_var("rrdcached_base_dir")%>;
-
-# This file contains all the info about each Ganglia Cluster in our Grid.
-GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
-
-function createDirectory()
-{
-    directoryPath=${1};
-
-    if [ "x" != "x${directoryPath}" ]
-    then
-        mkdir -p ${directoryPath};
-    fi
-}
-
-function getGangliaClusterInfo()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    else
-        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    fi
-}
-
-function getConfiguredGangliaClusterNames()
-{
-  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
-  # the subdirectory name from each.
-  if [ -e ${GANGLIA_CONF_DIR} ]
-  then  
-    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
-  fi
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkForFormat.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkForFormat.sh
deleted file mode 100644
index d14091a..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/checkForFormat.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb
deleted file mode 100644
index 828b593..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb
+++ /dev/null
@@ -1,65 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to handle differences in how args passed in
-module Puppet::Parser::Functions
-  newfunction(:hdp_hadoop_get_mode, :type => :rvalue) do |args|
-  
-    dir = args[0]
-
-    oozie_dir = lookupvar("::hdp::params::oozie_hdfs_user_dir")
-    oozie_dir_mode = lookupvar("::hdp::params::oozie_hdfs_user_mode") 
-    
-    hcat_dir = lookupvar("::hdp::params::hcat_hdfs_user_dir")
-    hcat_dir_mode = lookupvar("::hdp::params::hcat_hdfs_user_mode") 
-    
-    webhcat_dir = lookupvar("::hdp::params::webhcat_hdfs_user_dir")
-    webhcat_dir_mode = lookupvar("::hdp::params::webhcat_hdfs_user_mode") 
-    
-    hive_dir = lookupvar("::hdp::params::hive_hdfs_user_dir")
-    hive_dir_mode = lookupvar("::hdp::params::hive_hdfs_user_mode") 
-    
-    smoke_dir = lookupvar("::hdp::params::smoke_hdfs_user_dir")
-    smoke_dir_mode = lookupvar("::hdp::params::smoke_hdfs_user_mode") 
-    
-    modes = []
-    modes.push({:dir => oozie_dir, :mode => oozie_dir_mode})
-    modes.push({:dir => hcat_dir, :mode => hcat_dir_mode})
-    modes.push({:dir => webhcat_dir, :mode => webhcat_dir_mode})
-    modes.push({:dir => hive_dir, :mode => hive_dir_mode})
-    modes.push({:dir => smoke_dir, :mode => smoke_dir_mode})
-
-    modes_grouped = {}
-    modes.each do |item|
-      if modes_grouped[item[:dir]].nil?
-        modes_grouped[item[:dir]]=[]
-      end
-      modes_grouped[item[:dir]]=modes_grouped[item[:dir]] + [(item[:mode])]
-    end
-
-    modes_max = {}
-    
-    modes_grouped.each_key do |key|
-      modes_max[key] = modes_grouped[key].max
-    end
-
-    modes_max[dir]
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb
deleted file mode 100644
index 9ae36ef..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to handle differences in how args passed in
-module Puppet::Parser::Functions
-  newfunction(:hdp_hadoop_get_owner, :type => :rvalue) do |args|
-  
-    dir = args[0]
-    
-    oozie_dir = lookupvar("::hdp::params::oozie_hdfs_user_dir")
-    oozie_user = lookupvar("::hdp::params::oozie_user") 
-
-    hcat_dir = lookupvar("::hdp::params::hcat_hdfs_user_dir")
-    hcat_user = lookupvar("::hdp::params::hcat_user") 
-
-    webhcat_dir = lookupvar("::hdp::params::webhcat_hdfs_user_dir")
-    webhcat_user = lookupvar("::hdp::params::webhcat_user") 
-
-    hive_dir = lookupvar("::hdp::params::hive_hdfs_user_dir")
-    hive_user = lookupvar("::hdp::params::hive_user") 
-
-    smoke_dir = lookupvar("::hdp::params::smoke_hdfs_user_dir")
-    smoke_user = lookupvar("::hdp::params::smokeuser") 
-
-    dirs_to_owners = {}
-    dirs_to_owners[oozie_dir] = oozie_user
-    dirs_to_owners[hcat_dir] = hcat_user
-    dirs_to_owners[webhcat_dir] = webhcat_user
-    dirs_to_owners[hive_dir] = hive_user
-    dirs_to_owners[smoke_dir] = smoke_user
-
-    dirs_to_owners[dir]
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp
deleted file mode 100644
index 8ed93f5..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::client(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::client'] = true
-
-  Hdp-hadoop::Common<||>{service_states +> $service_state}
-
-  if ($hdp::params::use_32_bits_on_slaves == true) {
-    Hdp-hadoop::Package<||>{include_32_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 32}
-  } else {
-    Hdp-hadoop::Package<||>{include_64_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 64}
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['installed_and_configured','uninstalled']) {
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'hadoop_client_ambari_qa_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/ambari_qa.headless.keytab",
-        keytabfile => 'ambari_qa.headless.keytab',
-        owner => 'ambari_qa',
-        hostnameInPrincipals => 'no'
-      }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp
deleted file mode 100644
index 0b26c61..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp
+++ /dev/null
@@ -1,101 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::datanode(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params 
-{
-
-  $hdp::params::service_exists['hdp-hadoop::datanode'] = true
-
-  Hdp-hadoop::Common<||>{service_states +> $service_state}
-
-  if ($hdp::params::use_32_bits_on_slaves == true) {
-    Hdp-hadoop::Package<||>{include_32_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 32}
-  } else {
-    Hdp-hadoop::Package<||>{include_64_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 64}
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-    $dfs_data_dir = $hdp-hadoop::params::dfs_data_dir
-  
-    if (($hdp::params::service_exists['hdp-hadoop::namenode'] == true) or ($hdp::params::service_exists['hdp-hadoop::snamenode'] == true)){
-      $a_namenode_on_node = true
-    } else {
-      $a_namenode_on_node = false
-    }
-
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'datanode_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/dn.service.keytab",
-        keytabfile => 'dn.service.keytab',
-        owner => $hdp-hadoop::params::hdfs_user
-      }
-    }
-
-  
-    hdp-hadoop::datanode::create_data_dirs { $dfs_data_dir: 
-      service_state => $service_state
-    }
-
-    if ($a_namenode_on_node == true){
-      $create_pid_dir = false
-      $create_log_dir = false
-    } else {
-      $create_pid_dir = true
-      $create_log_dir = true
-    }
-    
-    hdp-hadoop::service{ 'datanode':
-      ensure         => $service_state,
-      user           => $hdp-hadoop::params::hdfs_user,
-      create_pid_dir => $create_pid_dir,
-      create_log_dir => $create_log_dir
-    }
-    
-    #top level does not need anchors
-    Class['hdp-hadoop'] -> Hdp-hadoop::Service['datanode']
-    Hdp-hadoop::Datanode::Create_data_dirs<||> -> Hdp-hadoop::Service['datanode']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::datanode::create_data_dirs($service_state)
-{
-  $dirs = hdp_array_from_comma_list($name)
-  hdp::directory_recursive_create_ignore_failure { $dirs :
-    owner => $hdp-hadoop::params::hdfs_user,
-    mode => '0750',
-    service_state => $service_state,
-    force => true
-  }
-
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp
deleted file mode 100644
index 72cedf6..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp
+++ /dev/null
@@ -1,77 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp-hadoop::hdfs::copyfromlocal(
-  $service_state,
-  $owner = unset,
-  $group = unset,
-  $recursive_chown = false,
-  $mode = undef,
-  $recursive_chmod = false,
-  $dest_dir = undef 
-) 
-{
- 
-  if ($service_state == 'running') {
-    $copy_cmd = "fs -copyFromLocal ${name} ${dest_dir}"
-    hdp-hadoop::exec-hadoop { $copy_cmd:
-      command => $copy_cmd,
-      unless => "hadoop fs -ls ${dest_dir} >/dev/null 2>&1",
-      user => $owner
-    }
-    if ($owner == unset) {
-      $chown = ""
-    } else {
-      if ($group == unset) {
-        $chown = $owner
-      } else {
-        $chown = "${owner}:${group}"
-     } 
-    }  
- 
-    if (chown != "") {
-      #TODO: see if there is a good 'unless test'
-      if ($recursive_chown == true) {
-        $chown_cmd = "fs -chown -R ${chown} ${dest_dir}"
-      } else {
-        $chown_cmd = "fs -chown ${chown} ${dest_dir}"
-      }
-      hdp-hadoop::exec-hadoop {$chown_cmd :
-        command => $chown_cmd,
-        user => $owner
-      }
-      Hdp-hadoop::Exec-hadoop[$copy_cmd] -> Hdp-hadoop::Exec-hadoop[$chown_cmd]
-    }
-  
-    if ($mode != undef) {
-      #TODO: see if there is a good 'unless test'
-      if ($recursive_mode == true) {
-        $chmod_cmd = "fs -chmod -R ${mode} ${dest_dir}"
-      } else {
-        $chmod_cmd = "fs -chmod ${mode} ${dest_dir}"
-      }
-      hdp-hadoop::exec-hadoop {$chmod_cmd :
-        command => $chmod_cmd,
-        user => $owner
-      }
-      Hdp-hadoop::Exec-hadoop[$copy_cmd] -> Hdp-hadoop::Exec-hadoop[$chmod_cmd]
-    }
-  }       
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp
deleted file mode 100644
index da7b63e..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::hdfs::decommission(
-) inherits hdp-hadoop::params
-{
-  if hdp_is_empty($configuration[hdfs-site]['dfs.hosts.exclude']) {
-    hdp_fail("There is no path to exclude file in configuration!")
-  }
-
-  hdp-hadoop::hdfs::generate_exclude_file{'exclude_file':}
-
-  hdp::exec{"hadoop dfsadmin -refreshNodes":
-      command => "hadoop dfsadmin -refreshNodes",
-      user => $hdp::params::hdfs_user,
-      require => Hdp-Hadoop::Hdfs::Generate_Exclude_File['exclude_file']
-    }
-  
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp
deleted file mode 100644
index eeac5fd..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp
+++ /dev/null
@@ -1,74 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#TODO: unset should br changed to undef; just to be consistent
-define hdp-hadoop::hdfs::directory(
-  $service_state = 'running',
-  $owner = unset,
-  $group = unset,
-  $recursive_chown = false,
-  $mode = undef,
-  $recursive_chmod = false
-) 
-{
- 
-  if ($service_state == 'running') {
-    $mkdir_cmd = "fs -mkdir ${name}"
-    hdp-hadoop::exec-hadoop { $mkdir_cmd:
-      command => $mkdir_cmd,
-      unless => "hadoop fs -ls ${name} >/dev/null 2>&1"
-    }
-    if ($owner == unset) {
-      $chown = ""
-    } else {
-      if ($group == unset) {
-        $chown = $owner
-      } else {
-        $chown = "${owner}:${group}"
-     } 
-    }  
- 
-    if (chown != "") {
-      #TODO: see if there is a good 'unless test'
-      if ($recursive_chown == true) {
-        $chown_cmd = "fs -chown -R ${chown} ${name}"
-      } else {
-        $chown_cmd = "fs -chown ${chown} ${name}"
-      }
-      hdp-hadoop::exec-hadoop {$chown_cmd :
-        command => $chown_cmd
-      }
-      Hdp-hadoop::Exec-hadoop[$mkdir_cmd] -> Hdp-hadoop::Exec-hadoop[$chown_cmd]
-    }
-  
-    if ($mode != undef) {
-      #TODO: see if there is a good 'unless test'
-      if ($recursive_chmod == true) {
-        $chmod_cmd = "fs -chmod -R ${mode} ${name}"
-      } else {
-        $chmod_cmd = "fs -chmod ${mode} ${name}"
-      }
-      hdp-hadoop::exec-hadoop {$chmod_cmd :
-        command => $chmod_cmd
-      }
-      Hdp-hadoop::Exec-hadoop[$mkdir_cmd] -> Hdp-hadoop::Exec-hadoop[$chmod_cmd]
-    }
-  }       
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp
deleted file mode 100644
index 5053e73..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-define hdp-hadoop::hdfs::generate_exclude_file()
-{
-  $exlude_file_path = $configuration['hdfs-site']['dfs.hosts.exclude']
-  ## Generate exclude file if exists value of $configuration['hdfs-exclude-file']['datanodes']
-  ## or value for $configuration['hdfs-exclude-file']['datanodes'] is empty
-  if (hdp_is_empty($configuration) == false and
-    hdp_is_empty($configuration['hdfs-exclude-file']) == false) and
-    (hdp_is_empty($configuration['hdfs-exclude-file']['datanodes']) == false)
-    or has_key($configuration['hdfs-exclude-file'], 'datanodes') {
-    ##Create file with list of excluding hosts
-    $exlude_hosts_list = hdp_array_from_comma_list($configuration['hdfs-exclude-file']['datanodes'])
-    file { $exlude_file_path :
-      ensure => file,
-      content => template('hdp-hadoop/exclude_hosts_list.erb')
-    }
-  }
-}
-
-
-
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp
deleted file mode 100644
index ace972d..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp
+++ /dev/null
@@ -1,83 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::hdfs::service_check()
-{
-  $unique = hdp_unique_id_and_date()
-  $dir = '/tmp'
-  $tmp_file = "${dir}/${unique}"
-
-  $safemode_command = "dfsadmin -safemode get | grep OFF"
-
-  $create_dir_cmd = "fs -mkdir ${dir} ; hadoop fs -chmod -R 777 ${dir}"
-  $test_dir_exists = "hadoop fs -test -e ${dir}" #TODO: may fix up fact that test needs explicit hadoop while omamnd does not
-  $cleanup_cmd = "fs -rm ${tmp_file}"
-  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-  $create_file_cmd = "${cleanup_cmd}; hadoop fs -put /etc/passwd ${tmp_file}" #TODO: inconsistent that second comamnd needs hadoop
-  $test_cmd = "fs -test -e ${tmp_file}"
-
-  anchor { 'hdp-hadoop::hdfs::service_check::begin':}
-
-  hdp-hadoop::exec-hadoop { 'hdfs::service_check::check_safemode':
-    command   => $safemode_command,
-    tries     => 40,
-    try_sleep => 15,
-    logoutput => true,
-    user      => $hdp::params::smokeuser,
-    require   => Anchor['hdp-hadoop::hdfs::service_check::begin']
-  }
-
-  hdp-hadoop::exec-hadoop { 'hdfs::service_check::create_dir':
-    command   => $create_dir_cmd,
-    unless    => $test_dir_exists,
-    tries     => 3,
-    try_sleep => 5,
-    user      => $hdp::params::smokeuser,
-    require   => Hdp-hadoop::Exec-hadoop['hdfs::service_check::check_safemode']
-  }
-
-  hdp-hadoop::exec-hadoop { 'hdfs::service_check::create_file':
-    command   => $create_file_cmd,
-    tries     => 3,
-    try_sleep => 5,
-    user      => $hdp::params::smokeuser,
-    require   => Hdp-hadoop::Exec-hadoop['hdfs::service_check::create_dir'],
-    notify    => Hdp-hadoop::Exec-hadoop['hdfs::service_check::test']
-  }
-
-  hdp-hadoop::exec-hadoop { 'hdfs::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    user      => $hdp::params::smokeuser,
-    require     => Hdp-hadoop::Exec-hadoop['hdfs::service_check::create_file'],
-    #notify      => Hdp-hadoop::Exec-hadoop['hdfs::service_check::cleanup']  #TODO: put in after testing
-    before      => Anchor['hdp-hadoop::hdfs::service_check::end'] #TODO: remove after testing
-  }
-
-   #TODO: put in after testing
- #  hdp-hadoop::exec-hadoop { 'hdfs::service_check::cleanup':
- #   command     => $cleanup_cmd,
- #   refreshonly => true,
- #   require     => Hdp-hadoop::Exec-hadoop['hdfs::service_check::test'],
- #   before      => Anchor['hdp-hadoop::hdfs::service_check::end']
-  #}
-  anchor{ 'hdp-hadoop::hdfs::service_check::end':}
-
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
deleted file mode 100644
index 020e51c..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
+++ /dev/null
@@ -1,321 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#singleton for use with <||> form so that namenode, datanode, etc can pass state to hdp-hadoop and still use include
-define hdp-hadoop::common(
-  $service_states = []
-)
-{
-  class { 'hdp-hadoop':
-    service_states => $service_states    
-  }
-  anchor{'hdp-hadoop::common::begin':} -> Class['hdp-hadoop'] -> anchor{'hdp-hadoop::common::end':} 
-}
-
-class hdp-hadoop::initialize()
-{
-  if ($hdp::params::component_exists['hdp-hadoop'] == true) {
-  } else {
-    $hdp::params::component_exists['hdp-hadoop'] = true
-  }
-  hdp-hadoop::common { 'common':}
-  anchor{'hdp-hadoop::initialize::begin':} -> Hdp-hadoop::Common['common'] -> anchor{'hdp-hadoop::initialize::end':}
-
-# Configs generation  
-
-debug('##Configs generation for hdp-hadoop')
-
-
-  if has_key($configuration, 'mapred-queue-acls') {
-    configgenerator::configfile{'mapred-queue-acls': 
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'mapred-queue-acls.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['mapred-queue-acls'],
-      owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/mapred-queue-acls.xml":
-      owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::user_group
-    }
-  }
-  
-  if has_key($configuration, 'hadoop-policy') {
-    configgenerator::configfile{'hadoop-policy': 
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'hadoop-policy.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['hadoop-policy'],
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'core-site') {
-      configgenerator::configfile{'core-site': 
-        modulespath => $hdp-hadoop::params::conf_dir,
-        filename => 'core-site.xml',
-        module => 'hdp-hadoop',
-        configuration => $configuration['core-site'],
-        owner => $hdp-hadoop::params::hdfs_user,
-        group => $hdp::params::user_group
-      }
-    }
-
-  if has_key($configuration, 'mapred-site') {
-    configgenerator::configfile{'mapred-site': 
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'mapred-site.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['mapred-site'],
-      owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-
-  if has_key($configuration, 'capacity-scheduler') {
-    configgenerator::configfile{'capacity-scheduler':
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'capacity-scheduler.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['capacity-scheduler'],
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group,
-    }
-  } 
-
-
-  if has_key($configuration, 'hdfs-site') {
-    configgenerator::configfile{'hdfs-site': 
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'hdfs-site.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['hdfs-site'],
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'hdfs-exclude-file') {
-    hdp-hadoop::hdfs::generate_exclude_file{'exclude_file':}
-  }
-
-  hdp::package {'ambari-log4j':
-    package_type  => 'ambari-log4j'
-  }
-
-  file { '/usr/lib/hadoop/lib/hadoop-tools.jar':
-    ensure => 'link',
-    target => '/usr/lib/hadoop/hadoop-tools.jar',
-    mode => 755,
-  }
-}
-
-class hdp-hadoop(
-  $service_states  = []
-)
-{
-  include hdp-hadoop::params
-  $hadoop_config_dir = $hdp-hadoop::params::conf_dir
-  $mapred_user = $hdp-hadoop::params::mapred_user  
-  $hdfs_user = $hdp-hadoop::params::hdfs_user  
-
-  anchor{'hdp-hadoop::begin':} 
-  anchor{'hdp-hadoop::end':} 
-
-  if ('uninstalled' in $service_states) {
-    hdp-hadoop::package { 'hadoop':
-      ensure => 'uninstalled'
-    }
-
-    hdp::directory_recursive_create { $hadoop_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> -> Hdp::Directory_recursive_create[$hadoop_config_dir] -> Anchor['hdp-hadoop::end']
-  } else {
-    
-    hdp-hadoop::package { 'hadoop':}
-
-
-    hdp::directory_recursive_create { $hadoop_config_dir:
-      service_state => $service_state,
-      force => true
-    }
- 
-    hdp::user{ $hdfs_user:}
-    if ($hdfs_user != $mapred_user) {
-      hdp::user { $mapred_user:}
-    }
-
-    $logdirprefix = $hdp-hadoop::params::hdfs_log_dir_prefix
-    hdp::directory_recursive_create { $logdirprefix: 
-        owner => 'root'
-    }
-    $piddirprefix = $hdp-hadoop::params::hadoop_pid_dir_prefix
-    hdp::directory_recursive_create { $piddirprefix: 
-        owner => 'root'
-    }
- 
-    #taskcontroller.cfg properties conditional on security
-    if ($hdp::params::security_enabled == true) {
-      file { "${hdp::params::hadoop_bin}/task-controller":
-        owner   => 'root',
-        group   => $hdp::params::user_group,
-        mode    => '6050',
-        require => Hdp-hadoop::Package['hadoop'],
-        before  => Anchor['hdp-hadoop::end']
-      }
-      $tc_owner = 'root'
-      $tc_mode = '0400'
-    } else {
-      $tc_owner = $hdfs_user
-      $tc_mode = undef
-    }
-    hdp-hadoop::configfile { 'taskcontroller.cfg' :
-      tag   => 'common',
-      owner => $tc_owner,
-      mode  => $tc_mode
-    }
-
-    $template_files = [ 'hadoop-env.sh', 'health_check', 'commons-logging.properties', 'log4j.properties', 'slaves']
-    hdp-hadoop::configfile { $template_files:
-      tag   => 'common', 
-      owner => $hdfs_user
-    }
-    
-    hdp-hadoop::configfile { 'hadoop-metrics2.properties' : 
-      tag   => 'common', 
-      owner => $hdfs_user,
-    }
-
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> ->  Hdp::Directory_recursive_create[$hadoop_config_dir] ->  Hdp::User<|title == $hdfs_user or title == $mapred_user|> 
-    -> Hdp-hadoop::Configfile<|tag == 'common'|> -> Anchor['hdp-hadoop::end']
-    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$logdirprefix] -> Anchor['hdp-hadoop::end']
-    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$piddirprefix] -> Anchor['hdp-hadoop::end']
-  }
-}
-
-class hdp-hadoop::enable-ganglia()
-{
-  Hdp-hadoop::Configfile<|title  == 'hadoop-metrics2.properties'|>{template_tag => 'GANGLIA'}
-}
-
-###config file helper
-define hdp-hadoop::configfile(
-  $owner = undef,
-  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir,
-  $mode = undef,
-  $namenode_host = undef,
-  $jtnode_host = undef,
-  $snamenode_host = undef,
-  $template_tag = undef,
-  $size = undef, #TODO: deprecate
-  $sizes = []
-) 
-{
-  #TODO: may need to be fixed 
-  if ($jtnode_host == undef) {
-    $calc_jtnode_host = $namenode_host
-  } else {
-    $calc_jtnode_host = $jtnode_host 
-  }
- 
-  #only set 32 if theer is a 32 bit component and no 64 bit components
-  if (64 in $sizes) {
-    $common_size = 64
-  } elsif (32 in $sizes) {
-    $common_size = 32
-  } else {
-    $common_size = 6
-  }
-  
-  hdp::configfile { "${hadoop_conf_dir}/${name}":
-    component      => 'hadoop',
-    owner          => $owner,
-    mode           => $mode,
-    namenode_host  => $namenode_host,
-    snamenode_host => $snamenode_host,
-    jtnode_host    => $calc_jtnode_host,
-    template_tag   => $template_tag,
-    size           => $common_size
-  }
-}
-
-#####
-define hdp-hadoop::exec-hadoop(
-  $command,
-  $unless = undef,
-  $refreshonly = undef,
-  $echo_yes = false,
-  $kinit_override = false,
-  $tries = 1,
-  $timeout = 900,
-  $try_sleep = undef,
-  $user = undef,
-  $logoutput = undef
-)
-{
-  include hdp-hadoop::params
-  $security_enabled = $hdp::params::security_enabled
-  $conf_dir = $hdp-hadoop::params::conf_dir
-  $hdfs_user = $hdp-hadoop::params::hdfs_user
-
-  if ($user == undef) {
-    $run_user = $hdfs_user
-  } else {
-    $run_user = $user
-  }
-
-  if (($security_enabled == true) and ($kinit_override == false)) {
-    #TODO: may figure out so dont need to call kinit if auth in caceh already
-    if ($run_user in [$hdfs_user,'root']) {
-      $keytab = "${hdp-hadoop::params::keytab_path}/${hdfs_user}.headless.keytab"
-      $principal = $hdfs_user
-    } else {
-      $keytab = "${hdp-hadoop::params::keytab_path}/${user}.headless.keytab" 
-      $principal = $user
-    }
-    $kinit_if_needed = "/usr/kerberos/bin/kinit  -kt ${keytab} ${principal}; "
-  } else {
-    $kinit_if_needed = ""
-  }
- 
-  if ($echo_yes == true) {
-    $cmd = "${kinit_if_needed}yes Y | hadoop --config ${conf_dir} ${command}"
-  } else {
-    $cmd = "${kinit_if_needed}hadoop --config ${conf_dir} ${command}"
-  }
-
-  hdp::exec { $cmd:
-    command     => $cmd,
-    user        => $run_user,
-    unless      => $unless,
-    refreshonly => $refreshonly,
-    tries       => $tries,
-    timeout     => $timeout,
-    try_sleep   => $try_sleep,
-    logoutput   => $logoutput
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp
deleted file mode 100644
index 3c8c158..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp
+++ /dev/null
@@ -1,94 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::jobtracker(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::jobtracker'] = true
-  Hdp-hadoop::Common<||>{service_states +> $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
-    $mapred_user = $hdp-hadoop::params::mapred_user
-    $mapred_local_dir = $hdp-hadoop::params::mapred_local_dir 
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'jobtracker_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/jt.service.keytab",
-        keytabfile => 'jt.service.keytab',
-        owner => $hdp-hadoop::params::mapred_user
-      }
-    }
-     
-    hdp-hadoop::jobtracker::create_local_dirs { $mapred_local_dir: 
-      service_state => $service_state
-    }
-
-    #TODO: cleanup 
-    Hdp-Hadoop::Configfile<||>{jtnode_host => $hdp::params::host_address}
-
-    #TODO: do we keep precondition here?
-    if ($service_state == 'running' and $hdp-hadoop::params::use_preconditions == true) {
-      class { 'hdp-hadoop::hdfs::service_check':
-        before => Hdp-hadoop::Service['jobtracker'],
-        require => Class['hdp-hadoop']
-      }
-    }
-
-    hdp-hadoop::service{ 'jobtracker':
-      ensure       => $service_state,
-      user         => $mapred_user
-    }
-  
-    hdp-hadoop::service{ 'historyserver':
-      ensure         => $service_state,
-      user           => $mapred_user,
-      create_pid_dir => false,
-      create_log_dir => false
-    }
-
-    #top level does not need anchors
-    Class['hdp-hadoop'] -> Hdp-hadoop::Service['jobtracker'] -> Hdp-hadoop::Service['historyserver']
-    Hdp-hadoop::Jobtracker::Create_local_dirs<||> -> Hdp-hadoop::Service['jobtracker']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::jobtracker::create_local_dirs($service_state)
-{
-    $dirs = hdp_array_from_comma_list($name)
-    hdp::directory_recursive_create { $dirs :
-      owner => $hdp-hadoop::params::mapred_user,
-      mode => '0755',
-      service_state => $service_state,
-      force => true
-    }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp
deleted file mode 100644
index af5e095..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::jobtracker::service_check()
-{
-  hdp-hadoop::exec-hadoop { 'jobtracker::service_check':
-    command   => 'job -list',
-    tries     => 3,
-    try_sleep => 5,
-    user => $hdp::params::smokeuser
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp
deleted file mode 100644
index df4ba7b..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp
+++ /dev/null
@@ -1,75 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::mapred::service_check() 
-{
-  $smoke_test_user = $hdp::params::smokeuser
-  $jar_location = $hdp::params::hadoop_jar_location
-  $input_file = 'mapredsmokeinput'
-  $output_file = "mapredsmokeoutput"
-
-  $cleanup_cmd = "dfs -rmr ${output_file} ${input_file}"
-  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-  $create_file_cmd = "$cleanup_cmd ; hadoop dfs -put /etc/passwd ${input_file} " #TODO: inconsistent that second comamnd needs hadoop
-  $test_cmd = "fs -test -e ${output_file}" 
-  $run_wordcount_job = "jar ${jar_location}/hadoop-examples.jar  wordcount ${input_file} ${output_file}"
-  
-  anchor { 'hdp-hadoop::mapred::service_check::begin':}
-
-  hdp-hadoop::exec-hadoop { 'mapred::service_check::create_file':
-    command   => $create_file_cmd,
-    tries     => 1,
-    try_sleep => 5,
-    require   => Anchor['hdp-hadoop::mapred::service_check::begin'],
-  #  notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'],
-    user      => $smoke_test_user
-  }
-
-  hdp-hadoop::exec-hadoop { 'mapred::service_check::run_wordcount':
-    command   => $run_wordcount_job,
-    tries     => 1,
-    try_sleep => 5,
-    require   => Hdp-hadoop::Exec-hadoop['mapred::service_check::create_file'],
-    notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::test'],
-    user      => $smoke_test_user,
-    logoutput => "true"
-  }
-
-#  exec { 'runjob':
-#    command   => "hadoop jar ${jar_location}/hadoop-examples.jar  wordcount ${input_file} ${output_file}",
-#    tries     => 1,
-#    try_sleep => 5,
-#    require   => Hdp-hadoop::Exec-hadoop['mapred::service_check::create_file'],
-#    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-#    notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::test'],
-#    logoutput => "true",
-#    user      => $smoke_test_user
-#  }
-
-  hdp-hadoop::exec-hadoop { 'mapred::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    require     => Hdp-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'],
-    before      => Anchor['hdp-hadoop::mapred::service_check::end'], #TODO: remove after testing
-    user        => $smoke_test_user
-  }
-  
-  anchor{ 'hdp-hadoop::mapred::service_check::end':}
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp
deleted file mode 100644
index 909b071..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp
+++ /dev/null
@@ -1,230 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::namenode(
-  $service_state = $hdp::params::cluster_service_state,
-  $slave_hosts = [],
-  $format = true,
-  $opts = {}
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::namenode'] = true
-
-  Hdp-hadoop::Common<||>{service_states +> $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-    $dfs_name_dir = $hdp-hadoop::params::dfs_name_dir
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and 
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'namenode_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/nn.service.keytab",
-        keytabfile => 'nn.service.keytab',
-        owner => $hdp-hadoop::params::hdfs_user
-      }
-      hdp::download_keytab { 'namenode_hdfs_headless_keytab' :   
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/hdfs.headless.keytab",
-        keytabfile => 'hdfs.headless.keytab', 
-        owner => $hdp-hadoop::params::hdfs_user, 
-        hostnameInPrincipals => 'no'
-      }
-      hdp::download_keytab { 'namenode_spnego_keytab' :   
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/spnego.service.keytab",
-        keytabfile => 'spnego.service.keytab', 
-        owner => $hdp-hadoop::params::hdfs_user, 
-        mode => '0440',
-        group => 'hadoop'
-      }
-    }
- 
-    hdp-hadoop::namenode::create_name_dirs { $dfs_name_dir: 
-      service_state => $service_state
-    }
-   
-    Hdp-Hadoop::Configfile<||>{namenode_host => $hdp::params::host_address}
-    Hdp::Configfile<||>{namenode_host => $hdp::params::host_address} #for components other than hadoop (e.g., hbase) 
-  
-    if ($service_state == 'running' and $format == true) {
-      class {'hdp-hadoop::namenode::format' : }
-    }
-
-    hdp-hadoop::service{ 'namenode':
-      ensure       => $service_state,
-      user         => $hdp-hadoop::params::hdfs_user,
-      initial_wait => hdp_option_value($opts,'wait')
-    }
-
-    hdp-hadoop::namenode::create_app_directories { 'create_app_directories' :
-       service_state => $service_state
-    }
-
-    hdp-hadoop::namenode::create_user_directories { 'create_user_directories' :
-       service_state => $service_state
-    }
-
-    #top level does not need anchors
-    Class['hdp-hadoop'] ->  Hdp-hadoop::Service['namenode']
-    Hdp-hadoop::Namenode::Create_name_dirs<||> -> Hdp-hadoop::Service['namenode'] 
-    Hdp-hadoop::Service['namenode'] -> Hdp-hadoop::Namenode::Create_app_directories<||> -> Hdp-hadoop::Namenode::Create_user_directories<||>
-    if ($service_state == 'running' and $format == true) {
-      Class['hdp-hadoop'] -> Class['hdp-hadoop::namenode::format'] -> Hdp-hadoop::Service['namenode']
-      Hdp-hadoop::Namenode::Create_name_dirs<||> -> Class['hdp-hadoop::namenode::format']
-    } 
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::namenode::create_name_dirs($service_state)
-{
-  $dirs = hdp_array_from_comma_list($name)
-  hdp::directory_recursive_create { $dirs :
-    owner => $hdp-hadoop::params::hdfs_user,
-    mode => '0755',
-    service_state => $service_state,
-    force => true
-  }
-}
-
-define hdp-hadoop::namenode::create_app_directories($service_state)
-{
-
-  if ($service_state == 'running') {
-   
-    hdp-hadoop::hdfs::directory{ "/tmp" :
-      service_state => $service_state,
-      owner => $hdp-hadoop::params::hdfs_user,
-      mode => '777'
-    }
-
-    hdp-hadoop::hdfs::directory{ '/mapred' :
-      service_state => $service_state,
-      owner         => $hdp-hadoop::params::mapred_user
-    }
-    hdp-hadoop::hdfs::directory{ '/mapred/system' :
-      service_state => $service_state,
-      owner         => $hdp-hadoop::params::mapred_user
-    }
-    Hdp-hadoop::Hdfs::Directory['/mapred'] -> Hdp-hadoop::Hdfs::Directory['/mapred/system']
-
-    if ($hdp::params::hbase_master_host != "") {
-      $hdfs_root_dir = $hdp::params::hbase_hdfs_root_dir
-      hdp-hadoop::hdfs::directory { $hdfs_root_dir:
-        owner         => $hdp::params::hbase_user,
-        service_state => $service_state
-      }
-    }
-
-    if ($hdp::params::hive_server_host != "") {
-      $hive_user = $hdp::params::hive_user
-      $hive_apps_whs_dir = $hdp::params::hive_apps_whs_dir
-
-      hdp-hadoop::hdfs::directory{ $hive_apps_whs_dir:
-        service_state   => $service_state,
-        owner            => $hive_user,
-        mode             => '777',
-        recursive_chmod  => true
-      }
-    }
-
-    if ($hdp::params::webhcat_server_host != "") {
-      $webhcat_user = $hdp::params::webhcat_user
-      $webhcat_apps_dir = $hdp::params::webhcat_apps_dir
-
-      hdp-hadoop::hdfs::directory{ $webhcat_apps_dir:
-        service_state => $service_state,
-        owner => $webhcat_user,
-        mode  => '755',
-        recursive_chmod => true
-      }
-    }
-  }
-}
-
-
-define hdp-hadoop::namenode::create_user_directories($service_state)
-{
-  if ($service_state == 'running') {
-    $smoke_hdfs_user_dir = $hdp::params::smoke_hdfs_user_dir
-
-    $smoke_user_dir_item="$smoke_hdfs_user_dir,"
-
-    if ($hdp::params::hive_server_host != "") {
-      $hive_hdfs_user_dir = $hdp::params::hive_hdfs_user_dir
-      $hive_dir_item="$hive_hdfs_user_dir,"
-    } else {
-    $hive_dir_item=""
-    }
-
-    if ($hdp::params::oozie_server != "") {
-      $oozie_hdfs_user_dir = $hdp::params::oozie_hdfs_user_dir
-      $oozie_dir_item="$oozie_hdfs_user_dir,"
-    } else {
-      $oozie_dir_item=""
-    }
-    
-    if ($hdp::params::webhcat_server_host != "") {
-      $hcat_hdfs_user_dir = $hdp::params::hcat_hdfs_user_dir
-      $webhcat_hdfs_user_dir = $hdp::params::webhcat_hdfs_user_dir
-      $webhcat_dir_item="$webhcat_hdfs_user_dir,"
-      if ($hcat_hdfs_user_dir != webhcat_hdfs_user_dir) {
-        $hcat_dir_item="$hcat_hdfs_user_dir,"
-      } else {
-        $hcat_dir_item=""
-      }
-    } else {
-      $webhcat_dir_item=""
-    }
-
-    $users_dir_list_comm_sep = "$smoke_user_dir_item $hive_dir_item $oozie_dir_item $hcat_dir_item $webhcat_dir_item"
-
-    #Get unique users directories set
-    $users_dirs_set = hdp_set_from_comma_list($users_dir_list_comm_sep)
-
-    hdp-hadoop::namenode::create_user_directory{$users_dirs_set:
-      service_state => $service_state}
-  }
-  
-}
-
-define hdp-hadoop::namenode::create_user_directory($service_state)
-{
-  
-  $owner = hdp_hadoop_get_owner($name)
-  $mode = hdp_hadoop_get_mode($name)
-  debug("## Creating user directory: $name, owner: $owner, mode: $mode")
-  hdp-hadoop::hdfs::directory{ $name:
-   service_state   => $service_state,
-   mode            => $mode,
-   owner           => $owner,
-   recursive_chmod => true
-  }
-}
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp
deleted file mode 100644
index 481e822..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::namenode::format(
-  $force = false
-)
-{
-  $mark_dir = $hdp-hadoop::params::namenode_formatted_mark_dir
-  $dfs_name_dir = $hdp-hadoop::params::dfs_name_dir
-  $hdfs_user = $hdp::params::hdfs_user
-  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir
-
-  if ($force == true) {
-      hdp-hadoop::exec-hadoop { 'namenode -format' :
-      command => 'namenode -format',
-      kinit_override => true,
-      notify  => Hdp::Exec['set namenode mark']
-    }
-  } else {
-      file { '/tmp/checkForFormat.sh':
-      ensure => present,
-      source => "puppet:///modules/hdp-hadoop/checkForFormat.sh",
-      mode => '0755'
-    }
-
-    exec { '/tmp/checkForFormat.sh':
-      command   => "sh /tmp/checkForFormat.sh ${hdfs_user} ${hadoop_conf_dir} ${mark_dir} ${dfs_name_dir} ",
-      unless   => "test -d ${mark_dir}",
-      require   => File['/tmp/checkForFormat.sh'],
-      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-      logoutput => "true",
-      notify   => Hdp::Exec['set namenode mark']
-    }
-  }
-
-  hdp::exec { 'set namenode mark' :
-    command     => "mkdir -p ${mark_dir}",
-    refreshonly => true
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp
deleted file mode 100644
index d4c0523..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::namenode::service_check()
-{
-  hdp-hadoop::exec-hadoop { 'namenode::service_check':
-    command   => 'dfs -ls /',
-    tries     => 3,
-    try_sleep => 5
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp
deleted file mode 100644
index 4beaafd..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#singleton, but using define so can use collections to override params
-define hdp-hadoop::package(
-  $ensure = 'present',
-  $include_32_bit = false,
-  $include_64_bit = false
-)
-{
-  #just use 32 if its specifically requested and no 64 bit requests
-  if ($include_32_bit == true) and ($include_64_bit != true) {
-    $size = 32
-  } else  {
-    $size = 64
-  }
-  $package = "hadoop ${size}"
-  $lzo_enabled = $hdp::params::lzo_enabled
-
-  hdp::package{ $package:
-    ensure       => $ensure,
-    package_type => 'hadoop',
-    size         => $size,
-    lzo_needed   => $lzo_enabled
-  }
-  anchor{ 'hdp-hadoop::package::helper::begin': } -> Hdp::Package[$package] -> anchor{ 'hdp-hadoop::package::helper::end': }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
deleted file mode 100644
index 171dedb..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
+++ /dev/null
@@ -1,180 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::params(
-) inherits hdp::params 
-{
-
-  ##TODO: for testing in masterless mode
-  $use_preconditions = false
-  ####  
-  $conf_dir = $hdp::params::hadoop_conf_dir 
-
-  ####### users
-
-  $mapred_user = $hdp::params::mapred_user
-  $hdfs_user = $hdp::params::hdfs_user
-  
-  ##### security related
-  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
- 
-  if ($hdp::params::security_enabled == true) {
-    $enable_security_authorization = true
-    $security_type = "kerberos"
-    $task_controller = "org.apache.hadoop.mapred.LinuxTaskController"
-    $dfs_datanode_address = 1019
-    $dfs_datanode_http_address = 1022
-  } else {
-    $enable_security_authorization = false
-    $security_type = "simple"
-    $task_controller = "org.apache.hadoop.mapred.DefaultTaskController"
-    $dfs_datanode_address = 50075
-    $dfs_datanode_http_address = 50075
-  }
-
-  ### hadoop-env
-  
-  $dtnode_heapsize = hdp_default("hadoop/hadoop-env/dtnode_heapsize","1024m")
-  $ttnode_heapsize = hdp_default("hadoop/hadoop-env/ttnode_heapsize","1024m")
-
-  $hadoop_heapsize = hdp_default("hadoop/hadoop-env/hadoop_heapsize","1024m")
-
-  $hdfs_log_dir_prefix = hdp_default("hadoop/hadoop-env/hdfs_log_dir_prefix","/var/log/hadoop")
-
-  $hadoop_pid_dir_prefix = hdp_default("hadoop/hadoop-env/hadoop_pid_dir_prefix","/var/run/hadoop")
-  $run_dir = $hadoop_pid_dir_prefix
-
-  $namenode_formatted_mark_dir = "${run_dir}/hdfs/namenode/formatted/"
-
-  $jtnode_heapsize = hdp_default("hadoop/hadoop-env/jtnode_heapsize","1024m")
-
-  $jtnode_opt_maxnewsize = hdp_default("hadoop/hadoop-env/jtnode_opt_maxnewsize","200m")
-
-  $jtnode_opt_newsize = hdp_default("hadoop/hadoop-env/jtnode_opt_newsize","200m")
-
-  $namenode_heapsize = hdp_default("hadoop/hadoop-env/namenode_heapsize","1024m")
-
-  $namenode_opt_maxnewsize = hdp_default("hadoop/hadoop-env/namenode_opt_maxnewsize","640m")
-
-  $namenode_opt_newsize = hdp_default("hadoop/hadoop-env/namenode_opt_newsize","640m")
-  
-  ### compression related
-  if (($hdp::params::lzo_enabled == true) and ($hdp::params::snappy_enabled == true)) {
-    $mapred_compress_map_output = true
-    $compression_codecs =  "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec"
-    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
-  } elsif ($hdp::params::snappy_enabled == true) {
-    $mapred_compress_map_output = true
-    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec" 
-    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
-  } elsif ($hdp::params::lzo_enabled == true) {
-    $mapred_compress_map_output = true
-    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec"
-    $mapred_map_output_compression_codec = "com.hadoop.compression.lzo.LzoCodec"
-  } else { 
-    $mapred_compress_map_output = false
-    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec"
-    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.DefaultCodec"
-  }
-
-  ### core-site
-  $fs_checkpoint_dir = hdp_default("hadoop/core-site/fs_checkpoint_dir","/tmp/hadoop-hdfs/dfs/namesecondary")
-
-  $proxyuser_group = hdp_default("hadoop/core-site/proxyuser_group","users")
-
-  ### hdfs-site
-  $datanode_du_reserved = hdp_default("hadoop/hdfs-site/datanode_du_reserved",1073741824)
-
-  $dfs_block_local_path_access_user = hdp_default("hadoop/hdfs-site/dfs_block_local_path_access_user","hbase")
-
-  $dfs_data_dir = $hdp::params::dfs_data_dir
-
-  $dfs_datanode_data_dir_perm = hdp_default("hadoop/hdfs-site/dfs_datanode_data_dir_perm",750)
-
-  $dfs_datanode_failed_volume_tolerated = hdp_default("hadoop/hdfs-site/dfs_datanode_failed_volume_tolerated",0)
-
-  $dfs_exclude = hdp_default("hadoop/hdfs-site/dfs_exclude","dfs.exclude")
-
-  $dfs_include = hdp_default("hadoop/hdfs-site/dfs_include","dfs.include")
-  
-  $dfs_name_dir = hdp_default("hadoop/hdfs-site/dfs_name_dir","/tmp/hadoop-hdfs/dfs/name")
-  
-  $dfs_replication = hdp_default("hadoop/hdfs-site/dfs_replication",3)
-
-  $dfs_support_append = hdp_default("hadoop/hdfs-site/dfs_support_append",true)
-
-  $dfs_webhdfs_enabled = hdp_default("hadoop/hdfs-site/dfs_webhdfs_enabled",false)
-
-
- ######### mapred #######
-   ### mapred-site
-
-  $mapred_system_dir = '/mapred/system'
-
-  $io_sort_mb = hdp_default("hadoop/mapred-site/io_sort_mb","200")
-
-  $io_sort_spill_percent = hdp_default("hadoop/mapred-site/io_sort_spill_percent","0.9")
-
-  $mapred_child_java_opts_sz = hdp_default("hadoop/mapred-site/mapred_child_java_opts_sz","-Xmx768m")
-
-  $mapred_cluster_map_mem_mb = hdp_default("hadoop/mapred-site/mapred_cluster_map_mem_mb","-1")
-
-  $mapred_cluster_max_map_mem_mb = hdp_default("hadoop/mapred-site/mapred_cluster_max_map_mem_mb","-1")
-
-  $mapred_cluster_max_red_mem_mb = hdp_default("hadoop/mapred-site/mapred_cluster_max_red_mem_mb","-1")
-
-  $mapred_cluster_red_mem_mb = hdp_default("hadoop/mapred-site/mapred_cluster_red_mem_mb","-1")
-
-  $mapred_hosts_exclude = hdp_default("hadoop/mapred-site/mapred_hosts_exclude","mapred.exclude")
-
-  $mapred_hosts_include = hdp_default("hadoop/mapred-site/mapred_hosts_include","mapred.include")
-
-  $mapred_job_map_mem_mb = hdp_default("hadoop/mapred-site/mapred_job_map_mem_mb","-1")
-
-  $mapred_job_red_mem_mb = hdp_default("hadoop/mapred-site/mapred_job_red_mem_mb","-1")
-
-  $mapred_jobstatus_dir = hdp_default("hadoop/mapred-site/mapred_jobstatus_dir","file:////mapred/jobstatus")
-
-  $mapred_local_dir = hdp_default("hadoop/mapred-site/mapred_local_dir","/tmp/hadoop-mapred/mapred/local")
-   
-  $mapred_map_tasks_max = hdp_default("hadoop/mapred-site/mapred_map_tasks_max",4)
-
-  $mapred_red_tasks_max = hdp_default("hadoop/mapred-site/mapred_red_tasks_max",4)
-
-  $mapreduce_userlog_retainhours = hdp_default("hadoop/mapred-site/mapreduce_userlog_retainhours",24)
-
-  $maxtasks_per_job = hdp_default("hadoop/mapred-site/maxtasks_per_job","-1")
-
-  $scheduler_name = hdp_default("hadoop/mapred-site/scheduler_name","org.apache.hadoop.mapred.CapacityTaskScheduler")
-
-  #### health_check
-
-  $security_enabled = $hdp::params::security_enabled
-
-  $task_bin_exe = hdp_default("hadoop/health_check/task_bin_exe")
-
-  $rca_enabled = hdp_default("rca_enabled", false)
-  if ($rca_enabled == true) {
-    $rca_prefix = ""
-  } else {
-    $rca_prefix = "###"
-  }
-  $ambari_db_server_host = hdp_default("ambari_db_server_host", "localhost")
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp
deleted file mode 100644
index 6d7cc36..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp
+++ /dev/null
@@ -1,118 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp-hadoop::service(
-  $ensure = 'running',
-  $user,
-  $initial_wait = undef,
-  $create_pid_dir = true,
-  $create_log_dir = true
-)
-{
-
-  $security_enabled = $hdp::params::security_enabled
-
-  #NOTE does not work if namenode and datanode are on same host 
-  $pid_dir = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${user}"
-  
-  if (($security_enabled == true) and ($name == 'datanode')) {
-    $run_as_root = true
-  } else {       
-    $run_as_root = false
-  }
-
-  if (($security_enabled == true) and ($name == 'datanode')) {
-    $hdfs_user = $hdp::params::hdfs_user
-    $pid_file = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${hdfs_user}/hadoop-${hdfs_user}-${name}.pid"
-  } else {
-    $pid_file = "${pid_dir}/hadoop-${user}-${name}.pid"
-  } 
-
-  $log_dir = "${hdp-hadoop::params::hdfs_log_dir_prefix}/${user}"
-  $hadoop_daemon = "${hdp::params::hadoop_bin}/hadoop-daemon.sh"
-   
-  $cmd = "${hadoop_daemon} --config ${hdp-hadoop::params::conf_dir}"
-  if ($ensure == 'running') {
-    if ($run_as_root == true) {
-      $daemon_cmd = "${cmd} start ${name}"
-    } else {
-      $daemon_cmd = "su - ${user} -c  '${cmd} start ${name}'"
-    }
-    $service_is_up = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-  } elsif ($ensure == 'stopped') {
-    if ($run_as_root == true) {
-      $daemon_cmd = "${cmd} stop ${name}"
-    } else {
-      $daemon_cmd = "su - ${user} -c  '${cmd} stop ${name}'"
-    }
-    $service_is_up = undef
-  } else {
-    $daemon_cmd = undef
-  }
- 
-  if ($create_pid_dir == true) {
-    hdp::directory_recursive_create { $pid_dir: 
-      owner       => $user,
-      context_tag => 'hadoop_service',
-      service_state => $service_state,
-      force => true
-    }
-  }
-  
-  if ($create_log_dir == true) {
-    hdp::directory_recursive_create { $log_dir: 
-      owner       => $user,
-      context_tag => 'hadoop_service',
-      service_state => $service_state,
-      force => true
-    }
-  }
-  if ($daemon_cmd != undef) {  
-    hdp::exec { $daemon_cmd:
-      command      => $daemon_cmd,
-      unless       => $service_is_up,
-      initial_wait => $initial_wait
-    }
-  }
-
-  anchor{"hdp-hadoop::service::${name}::begin":}
-  anchor{"hdp-hadoop::service::${name}::end":}
-  if ($daemon_cmd != undef) {
-    Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Exec[$daemon_cmd] -> Anchor["hdp-hadoop::service::${name}::end"]
-
-    if ($create_pid_dir == true) {
-      Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Directory_recursive_create[$pid_dir] -> Hdp::Exec[$daemon_cmd] 
-    }
-     if ($create_log_dir == true) {
-      Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Directory_recursive_create[$log_dir] -> Hdp::Exec[$daemon_cmd] 
-    }
-  }
-  if ($ensure == 'running') {
-    #TODO: look at Puppet resource retry and retry_sleep
-    #TODO: can make sleep contingent on $name
-    $sleep = 5
-    $post_check = "sleep ${sleep}; ${service_is_up}"
-    hdp::exec { $post_check:
-      command => $post_check,
-      unless  => $service_is_up
-    }
-    Hdp::Exec[$daemon_cmd] -> Hdp::Exec[$post_check] -> Anchor["hdp-hadoop::service::${name}::end"]
-  }  
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp
deleted file mode 100644
index f0338f9..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::slave::jobtracker-conn($jobtracker_host)
-{
-  Hdp-Hadoop::Configfile<||>{jtnode_host => $jobtracker_host}
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp
deleted file mode 100644
index 326f31d..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::slave::master-conn($master_host)
-{
-  Hdp-Hadoop::Configfile<||>{
-    namenode_host => $master_host,
-    jtnode_host   => $master_host
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp
deleted file mode 100644
index 8047c05..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#TODO: this might be replaced by just using hdp::namenode-conn
-class hdp-hadoop::slave::namenode-conn($namenode_host)
-{
-  #TODO: check if can get rido of both
-  Hdp-Hadoop::Configfile<||>{namenode_host => $namenode_host}
-  Hdp::Configfile<||>{namenode_host => $namenode_host} #for components other than hadoop (e.g., hbase) 
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp
deleted file mode 100644
index 296a0d4..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::smoketest(
-  $opts={}
-)
-{
-  #TODO: put in wait
-  #TODO: look for better way to compute outname
-  $date_format = '"%M%d%y"'
-  $outname = inline_template("<%=  `date +${date_format}`.chomp %>")
-
-  #TODO: hardwired to run on namenode and to use user hdfs
-
-  $put = "dfs -put /etc/passwd passwd-${outname}"
-  $exec = "jar /usr/share/hadoop/hadoop-examples-*.jar wordcount passwd-${outname} ${outname}.out"
-  $result = "fs -test -e ${outname}.out /dev/null 2>&1"
-  anchor{ "hdp-hadoop::smoketest::begin" :} ->
-  hdp-hadoop::exec-hadoop{ $put:
-    command => $put
-  } ->
-  hdp-hadoop::exec-hadoop{ $exec:
-    command =>  $exec
-  } ->
-  hdp-hadoop::exec-hadoop{ $result:
-    command =>  $result
-  } ->
-  anchor{ "hdp-hadoop::smoketest::end" :}
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp
deleted file mode 100644
index ab4f491..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp
+++ /dev/null
@@ -1,98 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::snamenode(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params  
-{
-  $hdp::params::service_exists['hdp-hadoop::snamenode'] = true
-
-  Hdp-hadoop::Common<||>{service_states +> $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
-    $fs_checkpoint_dir = $hdp-hadoop::params::fs_checkpoint_dir
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      if ($hdp::params::service_exists['hdp-hadoop::namenode'] != true) {
-        $masterHost = $kerberos_adminclient_host[0]
-        hdp::download_keytab { 'snamenode_service_keytab' :
-          masterhost => $masterHost,
-          keytabdst => "${$keytab_path}/nn.service.keytab",
-          keytabfile => 'nn.service.keytab',
-          owner => $hdp-hadoop::params::hdfs_user
-        }
-        hdp::download_keytab { 'snamenode_spnego_keytab' :   
-          masterhost => $masterHost,
-          keytabdst => "${$keytab_path}/spnego.service.keytab",
-          keytabfile => 'spnego.service.keytab', 
-          owner => $hdp-hadoop::params::hdfs_user,
-          mode => '0440',
-          group => 'hadoop'
-        }
-      }
-    }
- 
-    Hdp-Hadoop::Configfile<||>{snamenode_host => $hdp::params::host_address}
-  
-    hdp-hadoop::snamenode::create_name_dirs { $fs_checkpoint_dir: 
-      service_state => $service_state
-    }
-    
-    if ($hdp::params::service_exists['hdp-hadoop::namenode'] == true) {
-      $create_pid_dir = false
-      $create_log_dir = false
-    } else {
-      $create_pid_dir = true
-      $create_log_dir = true
-    }
-    
-    hdp-hadoop::service{ 'secondarynamenode':
-      ensure         => $service_state,
-      user           => $hdp-hadoop::params::hdfs_user,
-      create_pid_dir => $create_pid_dir,
-      create_log_dir => $create_log_dir
-    }
-  
-    #top level does not need anchors
-    Class['hdp-hadoop'] -> Hdp-hadoop::Service['secondarynamenode']
-    Hdp-hadoop::Namenode::Create_name_dirs<||> -> Hdp-hadoop::Service['secondarynamenode']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::snamenode::create_name_dirs($service_state)
-{
-   $dirs = hdp_array_from_comma_list($name)
-   hdp::directory_recursive_create { $dirs :
-     owner => $hdp-hadoop::params::hdfs_user,
-     mode => '0755',
-     service_state => $service_state,
-     force => true
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp
deleted file mode 100644
index f9904f4..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp
+++ /dev/null
@@ -1,94 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::tasktracker(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::tasktracker'] = true
-
-  Hdp-hadoop::Common<||>{service_states +> $service_state}
-
-  if ($hdp::params::use_32_bits_on_slaves == true) {
-    Hdp-hadoop::Package<||>{include_32_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 32}
-  } else {
-    Hdp-hadoop::Package<||>{include_64_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 64}
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-    $mapred_local_dir = $hdp-hadoop::params::mapred_local_dir
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'tasktracker_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/tt.service.keytab",
-        keytabfile => 'tt.service.keytab',
-        owner => $hdp-hadoop::params::mapred_user
-      }
-    }
-  
-    hdp-hadoop::tasktracker::create_local_dirs { $mapred_local_dir: 
-      service_state => $service_state
-    }
-    
-    if ($hdp::params::service_exists['hdp-hadoop::jobtracker'] == true) {
-      $create_pid_dir = false
-      $create_log_dir = false
-    } else {
-      $create_pid_dir = true
-      $create_log_dir = true
-    }
-
-    hdp-hadoop::service{ 'tasktracker':
-      ensure => $service_state,
-      user   => $hdp-hadoop::params::mapred_user,
-      create_pid_dir => $create_pid_dir,
-      create_log_dir => $create_log_dir
-    }
-  
-    #top level does not need anchors
-    Class['hdp-hadoop'] -> Hdp-hadoop::Service['tasktracker']
-    Hdp-hadoop::Tasktracker::Create_local_dirs<||> -> Hdp-hadoop::Service['tasktracker']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::tasktracker::create_local_dirs($service_state)
-{
-  if ($hdp::params::service_exists['hdp-hadoop::jobtracker'] != true) {
-    $dirs = hdp_array_from_comma_list($name)
-    hdp::directory_recursive_create_ignore_failure { $dirs :
-      owner => $hdp-hadoop::params::mapred_user,
-      mode => '0755',
-      service_state => $service_state,
-      force => true
-    }
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb
deleted file mode 100644
index 77e458f..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb
+++ /dev/null
@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#Logging Implementation
-
-#Log4J
-org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
-
-#JDK Logger
-#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb
deleted file mode 100644
index 750549c..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb
+++ /dev/null
@@ -1,3 +0,0 @@
-<% exlude_hosts_list.each do |val| -%>
-<%= val%>
-<% end -%>
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
deleted file mode 100644
index f4bfb89..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
+++ /dev/null
@@ -1,89 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-export JAVA_HOME=<%=scope.function_hdp_java_home()%>
-export HADOOP_HOME_WARN_SUPPRESS=1
-
-# Hadoop Configuration Directory
-#TODO: if env var set that can cause problems
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-<%=scope.function_hdp_template_var("conf_dir")%>}
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE="<%=scope.function_hdp_template_var("hadoop_heapsize")%>"
-
-export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms<%=scope.function_hdp_template_var("namenode_heapsize")%>"
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms<%=scope.function_hdp_template_var("namenode_heapsize")%> -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
-HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("jtnode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("jtnode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx<%=scope.function_hdp_template_var("jtnode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
-
-HADOOP_TASKTRACKER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("ttnode_heapsize")%> -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
-HADOOP_DATANODE_OPTS="-Xmx<%=scope.function_hdp_template_var("dtnode_heapsize")%> -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
-HADOOP_BALANCER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("hadoop_heapsize")%>m ${HADOOP_BALANCER_OPTS}"
-
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx128m ${HADOOP_CLIENT_OPTS}"
-#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData ${HADOOP_JAVA_PLATFORM_OPTS}"
-
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER=<%=scope.function_hdp_template_var("hdfs_user")%>
-
-# Extra ssh options.  Empty by default.
-export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER
-
-
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$HADOOP_SECURE_DN_USER
-
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-export HADOOP_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$USER
-export HADOOP_SECURE_DN_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$HADOOP_SECURE_DN_USER
-
-# A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-
-# export HADOOP_NICENESS=10
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb
deleted file mode 100644
index a1e0038..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb
+++ /dev/null
@@ -1,37 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
-datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-jobtracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
-tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb
deleted file mode 100644
index a1e0038..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb
+++ /dev/null
@@ -1,37 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
-datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-jobtracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
-tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb
deleted file mode 100644
index 7b0c463..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-function check_taskcontroller {
-  if [ "<%=scope.function_hdp_template_var("security_enabled")%>" == "true" ]; then
-    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
-    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
-      echo "taskcontroller ok"
-    else
-      echo 'check taskcontroller' ; exit 1
-    fi
-  fi
-}
-
-function check_jetty {
-  hname=`hostname`
-  jmx=`curl -s -S -m 5 "http://$hname:50060/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
-  if [ $? -eq 0 ] ; then
-    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
-    e=${e:-0} # no jmx servlet ?
-    if [ $e -gt 10 ] ; then
-      echo "check jetty: shuffle_exceptions=$e" ; exit 1
-    else
-      echo "jetty ok"
-    fi
-  else
-    echo "check jetty: ping failed" ; exit 1
-  fi
-}
-
-function check_link {
-  snmp=/usr/bin/snmpwalk
-  if [ -e $snmp ] ; then
-    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
-    awk ' {
-      split($1,a,".") ;
-      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
-      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
-      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
-      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
-      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
-      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
-    }
-    END {
-      up=0;
-      for (i in ifIndex ) {
-      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
-      up=i;
-      }
-      }
-      if ( up == 0 ) { print "check link" ; exit 2 }
-      else { print ifDescr[up],"ok" }
-    }'
-    exit $? ;
-  fi
-}
-
-# Run all checks
-# Disabled 'check_link' for now... 
-for check in disks taskcontroller jetty; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb
deleted file mode 100644
index 3bb6bba..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb
+++ /dev/null
@@ -1,187 +0,0 @@
-# Copyright 2011 The Apache Software Foundation
-# 
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-#Security audit appender
-#
-hadoop.security.logger=INFO,console
-log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth.audit
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-#
-# hdfs audit logging
-#
-hdfs.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
-log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# mapred audit logging
-#
-mapred.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
-log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
-log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.JSA.DatePattern=.yyyy-MM-dd
-log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
-log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
-
-
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.database=jdbc:postgresql://<%=scope.function_hdp_host("ambari_db_server_host")%>:5432/ambarirca
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.driver=org.postgresql.Driver
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.user=mapred
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.password=mapred
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.logger=DEBUG,JHA
-
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.database=${ambari.jobhistory.database}
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.driver=${ambari.jobhistory.driver}
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.user=${ambari.jobhistory.user}
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.password=${ambari.jobhistory.password}
-
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=${ambari.jobhistory.logger}
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=true
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb
deleted file mode 100644
index 3cd38b3..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb
+++ /dev/null
@@ -1,3 +0,0 @@
-<%h=scope.function_hdp_host("slave_hosts"); (h.kind_of?(Array) ? h : []).each do |host|-%>
-<%= host %>
-<%end-%>
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb
deleted file mode 100644
index d7dffef..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb
+++ /dev/null
@@ -1,20 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-mapred.local.dir=<%=scope.function_hdp_template_var("mapred_local_dir")%>
-mapreduce.tasktracker.group=hadoop
-hadoop.log.dir=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/<%=scope.function_hdp_template_var("mapred_user")%>
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh
deleted file mode 100644
index 36ff10e..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-disable 'usertable'
-drop 'usertable'
-create 'usertable','family'
-put 'usertable','row01','family:col01','value1'
-scan 'usertable'
-exit
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
deleted file mode 100644
index 69c4bb8..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::client(
-  $service_state = $hdp::params::cluster_client_state,
-  $opts = {}
-)
-{
-  #assumption is there are no other hbase components on node
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['installed_and_configured','uninstalled']) {
-    if (($hdp::params::service_exists['hdp-hbase::master'] != true) and ($hdp::params::service_exists['hdp-hbase::regionserver'] != true)) {
-      #adds package, users, directories, and common configs
-      class { 'hdp-hbase': 
-        type          => 'client',
-        service_state => $service_state
-      }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp
deleted file mode 100644
index b39a212..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::hbase::service_check() 
-{
-  $smoke_test_user = $hdp::params::smokeuser
-
-  $output_file = "/apps/hbase/data/usertable"
-  $conf_dir = $hdp::params::hbase_conf_dir
-
-  $test_cmd = "fs -test -e ${output_file}" 
-  
-  anchor { 'hdp-hbase::hbase::service_check::begin':}
-
-  file { '/tmp/hbaseSmoke.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-hbase/hbaseSmoke.sh",
-    mode => '0755',
-  }
-
-  exec { '/tmp/hbaseSmoke.sh':
-    command   => "su - ${smoke_test_user} -c 'hbase --config $conf_dir  shell /tmp/hbaseSmoke.sh'",
-    tries     => 3,
-    try_sleep => 5,
-    require   => File['/tmp/hbaseSmoke.sh'],
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    notify    => Hdp-hadoop::Exec-hadoop['hbase::service_check::test'],
-    logoutput => "true"
-  }
-
-  hdp-hadoop::exec-hadoop { 'hbase::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    require     => Exec['/tmp/hbaseSmoke.sh'],
-    before      => Anchor['hdp-hbase::hbase::service_check::end'] #TODO: remove after testing
-  }
-  
-  anchor{ 'hdp-hbase::hbase::service_check::end':}
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp
deleted file mode 100644
index 3646aab..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp
+++ /dev/null
@@ -1,110 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase(
-  $type,
-  $service_state) 
-{
-  include hdp-hbase::params
- 
-  $hbase_user = $hdp-hbase::params::hbase_user
-  $config_dir = $hdp-hbase::params::conf_dir
-  
-  $hdp::params::component_exists['hdp-hbase'] = true
-
-
-  #Configs generation  
-
-  if has_key($configuration, 'hbase-site') {
-    configgenerator::configfile{'hbase-site': 
-      modulespath => $hdp-hbase::params::conf_dir,
-      filename => 'hbase-site.xml',
-      module => 'hdp-hbase',
-      configuration => $configuration['hbase-site']
-      }
-    }
-
-  if has_key($configuration, 'hbase-policy') {
-    configgenerator::configfile{'hbase-policy': 
-      modulespath => $hdp-hbase::params::conf_dir,
-      filename => 'hbase-policy.xml',
-      module => 'hdp-hbase',
-      configuration => $configuration['hbase-policy']
-      }
-    }
-
-  anchor{'hdp-hbase::begin':}
-  anchor{'hdp-hbase::end':}
-
-  if ($service_state == 'uninstalled') {
-    hdp::package { 'hbase':
-      ensure => 'uninstalled'
-    }
-    hdp::directory { $config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    Anchor['hdp-hbase::begin'] -> Hdp::Package['hbase'] -> Hdp::Directory[$config_dir] -> Anchor['hdp-hbase::end']
-
-  } else {  
-    hdp::package { 'hbase': }
-  
-    hdp::user{ $hbase_user:}
- 
-    hdp::directory { $config_dir: 
-      service_state => $service_state,
-      force => true
-    }
-
-   hdp-hbase::configfile { ['hbase-env.sh','log4j.properties','hadoop-metrics.properties']: 
-      type => $type
-    }
-    hdp-hbase::configfile { 'regionservers':}
-    Anchor['hdp-hbase::begin'] -> Hdp::Package['hbase'] -> Hdp::User[$hbase_user] -> Hdp::Directory[$config_dir] -> 
-    Hdp-hbase::Configfile<||> ->  Anchor['hdp-hbase::end']
-  }
-}
-
-### config files
-define hdp-hbase::configfile(
-  $mode = undef,
-  $hbase_master_host = undef,
-  $template_tag = undef,
-  $type = undef
-) 
-{
-  if ($name == 'hadoop-metrics.properties') {
-    if ($type == 'master') {
-    $tag = GANGLIA-MASTER
-  } else {
-     $tag = GANGLIA-RS
-  }
-   } else {
-    $tag = $template_tag
-}
-  hdp::configfile { "${hdp-hbase::params::conf_dir}/${name}":
-    component         => 'hbase',
-    owner             => $hdp-hbase::params::hbase_user,
-    mode              => $mode,
-    hbase_master_host => $hbase_master_host,
-    template_tag      => $tag
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master-conn.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master-conn.pp
deleted file mode 100644
index fe2ecc6..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master-conn.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::master-conn($hbase_master_host)
-{
-  Hdp-Hbase::Configfile<||>{hbase_master_host => $hbase_master_host}
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master.pp
deleted file mode 100644
index 14ea0bc..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/master.pp
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::master(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hbase::params 
-{
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {    
-    $hdp::params::service_exists['hdp-hbase::master'] = true
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-       $masterHost = $kerberos_adminclient_host[0]
-       hdp::download_keytab { 'hbase_master_service_keytab' :
-         masterhost => $masterHost,
-         keytabdst => "${$keytab_path}/hm.service.keytab",
-         keytabfile => 'hm.service.keytab',
-         owner => $hdp::params::hbase_user
-       }
-    }
-  
-    #adds package, users, directories, and common configs
-    class { 'hdp-hbase': 
-      type          => 'master',
-      service_state => $service_state
-    }
-
-    Hdp-hbase::Configfile<||>{hbase_master_host => $hdp::params::host_address}
-  
-    hdp-hbase::service{ 'master':
-      ensure => $service_state
-    }
-
-    #top level does not need anchors
-    Class['hdp-hbase'] -> Hdp-hbase::Service['master'] 
-    } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-#assumes that master and regionserver will not be on same machine
-class hdp-hbase::master::enable-ganglia()
-{
-  Hdp-hbase::Configfile<|title  == 'hadoop-metrics.properties'|>{template_tag => 'GANGLIA-MASTER'}
-}
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/params.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/params.pp
deleted file mode 100644
index 96e79b0..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/params.pp
+++ /dev/null
@@ -1,86 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::params() inherits hdp::params 
-{
-  
-  ####### users
-  $hbase_user = $hdp::params::hbase_user
-  
-  ### hbase-env
-  $hadoop_conf_dir = hdp_default("hadoop/hbase-env/hadoop_conf_dir")
-  $conf_dir = $hdp::params::hbase_conf_dir
-
-  $hbase_log_dir = hdp_default("hadoop/hbase-env/hbase_log_dir","/var/log/hbase")
-
-  $hbase_master_heapsize = hdp_default("hadoop/hbase-env/hbase_master_heapsize","1000m")
-
-  $hbase_pid_dir = hdp_default("hadoop/hbase-env/hbase_pid_dir","/var/run/hbase")
-
-  $hbase_regionserver_heapsize = hdp_default("hadoop/hbase-env/hbase_regionserver_heapsize","1000m")
-
-  $hbase_regionserver_xmn_size = hdp_calc_xmn_from_xms("$hbase_regionserver_heapsize","0.2","512")
-
-  ### hbase-site.xml
-  $hbase_hdfs_root_dir = hdp_default("hadoop/hbase-site/hbase_hdfs_root_dir","/apps/hbase/data")
-
-  $hbase_tmp_dir = hdp_default("hadoop/hbase-site/hbase_tmp_dir","$hbase_log_dir")
-
-
-  #TODO: check if any of these 'hdfs' vars need to be euated with vars in hdp-hadoop
-  $hdfs_enable_shortcircuit_read = hdp_default("hadoop/hbase-site/hdfs_enable_shortcircuit_read",true)
-
-  $hdfs_enable_shortcircuit_skipchecksum = hdp_default("hadoop/hbase-site/hdfs_enable_shortcircuit_skipchecksum",false)
-
-  $hdfs_support_append = hdp_default("hadoop/hbase-site/hdfs_support_append",true)
-
-  $hfile_blockcache_size = hdp_default("hadoop/hbase-site/hfile_blockcache_size","0.25")
-
-  $hfile_max_keyvalue_size = hdp_default("hadoop/hbase-site/hfile_max_keyvalue_size",10485760)
-
-  $zookeeper_sessiontimeout = hdp_default("hadoop/hbase-site/zookeeper_sessiontimeout",60000)
-
-  $client_scannercaching = hdp_default("hadoop/hbase-site/client_scannercaching",100)
-
-  $hstore_blockingstorefiles = hdp_default("hadoop/hbase-site/hstore_blockingstorefiles",7)
-
-  $hstore_compactionthreshold = hdp_default("hadoop/hbase-site/hstore_compactionthreshold",3)
-
-  $hstorefile_maxsize = hdp_default("hadoop/hbase-site/hstorefile_maxsize",1073741824)
-
-  $hregion_blockmultiplier = hdp_default("hadoop/hbase-site/hregion_blockmultiplier",2)
-
-  $hregion_memstoreflushsize = hdp_default("hadoop/hbase-site/hregion_memstoreflushsize",134217728)
-
-  $regionserver_handlers = hdp_default("hadoop/hbase-site/regionserver_handlers", 30)
-
-  $hregion_majorcompaction = hdp_default("hadoop/hbase-site/hregion_majorcompaction", 86400000)
-
-  $preloaded_mastercoprocessor_classes = hdp_default("hadoop/hbase-site/preloaded_mastercoprocessor_classes")
-
-  $preloaded_regioncoprocessor_classes = hdp_default("hadoop/hbase-site/preloaded_regioncoprocessor_classes")
-
-  $regionserver_memstore_lab = hdp_default("hadoop/hbase-site/regionserver_memstore_lab",true)
-
-  $regionserver_memstore_lowerlimit = hdp_default("hadoop/hbase-site/regionserver_memstore_lowerlimit","0.35")
-
-  $regionserver_memstore_upperlimit = hdp_default("hadoop/hbase-site/regionserver_memstore_upperlimit","0.4")
-
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/regionserver.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/regionserver.pp
deleted file mode 100644
index 61771ca..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/regionserver.pp
+++ /dev/null
@@ -1,73 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::regionserver(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hbase::params
-{
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {    
-    $hdp::params::service_exists['hdp-hbase::regionserver'] = true       
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-       $masterHost = $kerberos_adminclient_host[0]
-       hdp::download_keytab { 'hbase_rs_service_keytab' :
-         masterhost => $masterHost,
-         keytabdst => "${$keytab_path}/rs.service.keytab",
-         keytabfile => 'rs.service.keytab',
-         owner => $hdp::params::hbase_user
-       }
-    }
-
-    if ($hdp::params::service_exists['hdp-hbase::master'] != true) {
-      #adds package, users, directories, and common configs
-      class { 'hdp-hbase': 
-        type          => 'regionserver',
-        service_state => $service_state
-      } 
-      $create_pid_dir = true
-      $create_log_dir = true
-    } else {
-      $create_pid_dir = false
-      $create_log_dir = false
-    }
-
-
-    hdp-hbase::service{ 'regionserver':
-      ensure         => $service_state,
-      create_pid_dir => $create_pid_dir,
-      create_log_dir => $create_log_dir
-    }
-
-    #top level does not need anchors
-    Class['hdp-hbase'] ->  Hdp-hbase::Service['regionserver']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-#assumes that master and regionserver will not be on same machine
-class hdp-hbase::regionserver::enable-ganglia()
-{
-  Hdp-hbase::Configfile<|title  == 'hadoop-metrics.properties'|>{template_tag => 'GANGLIA-RS'}
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/service.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/service.pp
deleted file mode 100644
index 7f4259b..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/service.pp
+++ /dev/null
@@ -1,76 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp-hbase::service(
-  $ensure = 'running',
-  $create_pid_dir = true,
-  $create_log_dir = true,
-  $initial_wait = undef)
-{
-  include hdp-hbase::params
-
-  $role = $name
-  $user = $hdp-hbase::params::hbase_user
-
-  $conf_dir = $hdp::params::hbase_conf_dir
-  $hbase_daemon = $hdp::params::hbase_daemon_script
-  $cmd = "$hbase_daemon --config ${conf_dir}"
-  $pid_dir = $hdp-hbase::params::hbase_pid_dir
-  $pid_file = "${pid_dir}/hbase-hbase-${role}.pid"
-
-  if ($ensure == 'running') {
-    $daemon_cmd = "su - ${user} -c  '${cmd} start ${role}'"
-    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-  } elsif ($ensure == 'stopped') {
-    $daemon_cmd = "su - ${user} -c  '${cmd} stop ${role}'"
-    $no_op_test = undef
-  } else {
-    $daemon_cmd = undef
-  }
-
-  $tag = "hbase_service-${name}"
-  
-  if ($create_pid_dir == true) {
-    hdp::directory_recursive_create { $pid_dir: 
-      owner => $user,
-      tag   => $tag,
-      service_state => $ensure,
-      force => true
-    }
-  }
-  if ($create_log_dir == true) {
-    hdp::directory_recursive_create { $hdp-hbase::params::hbase_log_dir: 
-      owner => $user,
-      tag   => $tag,
-      service_state => $ensure,
-      force => true
-    }
-  }
-
-  anchor{"hdp-hbase::service::${name}::begin":} -> Hdp::Directory_recursive_create<|tag == $tag|> -> anchor{"hdp-hbase::service::${name}::end":}
-  if ($daemon_cmd != undef) { 
-    hdp::exec { $daemon_cmd:
-      command      => $daemon_cmd,
-      unless       => $no_op_test,
-      initial_wait => $initial_wait
-    }
-    Hdp::Directory_recursive_create<|context_tag == 'hbase_service'|> -> Hdp::Exec[$daemon_cmd] -> Anchor["hdp-hbase::service::${name}::end"]
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/zk-conn.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/zk-conn.pp
deleted file mode 100644
index 6c67cd4..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/zk-conn.pp
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::zk-conn(
- $zookeeper_hosts
-)
-{
-  Hdp::Configfile<||>{zookeeper_hosts => $zookeeper_hosts}
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-MASTER.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-MASTER.erb
deleted file mode 100644
index f3988d9..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-MASTER.erb
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-RS.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-RS.erb
deleted file mode 100644
index 386376d..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties-GANGLIA-RS.erb
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties.erb
deleted file mode 100644
index f3988d9..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hadoop-metrics.properties.erb
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase-env.sh.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase-env.sh.erb
deleted file mode 100644
index db771f2..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase-env.sh.erb
+++ /dev/null
@@ -1,76 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Set environment variables here.
-
-# The java implementation to use. Java 1.6 required.
-export JAVA_HOME=<%=scope.function_hdp_java_home()%>
-
-# HBase Configuration directory
-export HBASE_CONF_DIR=${HBASE_CONF_DIR:-<%=scope.function_hdp_template_var("hbase_conf_dir")%>}
-
-# Extra Java CLASSPATH elements. Optional.
-export HBASE_CLASSPATH=${HBASE_CLASSPATH}:<%=scope.function_hdp_template_var("::hdp-hadoop::params::conf_dir")%>
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HBASE_HEAPSIZE=1000
-
-# Extra Java runtime options.
-# Below are what we set by default. May only work with SUN JVM.
-# For more on why as well as other possible settings,
-# see http://wiki.apache.org/hadoop/PerformanceTuning
-export HBASE_OPTS="-ea -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode"
-
-# Uncomment below to enable java garbage collection logging.
-# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
-
-# Uncomment and adjust to enable JMX exporting
-# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
-# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-#
-# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
-export HBASE_MASTER_OPTS="-Xmx<%=scope.function_hdp_template_var("hbase_master_heapsize")%>"
-export HBASE_REGIONSERVER_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseParNewGC -Xmn<%=scope.function_hdp_template_var("hbase_regionserver_xmn_size")%> -XX:CMSInitiatingOccupancyFraction=80 -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:<%=scope.function_hdp_template_var("hbase_log_dir")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -Xms<%=scope.function_hdp_template_var("hbase_regionserver_heapsize")%> -Xmx<%=scope.function_hdp_template_var("hbase_regionserver_heapsize")%> -XX:ErrorFile=<%=scope.function_hdp_template_var("hbase_log_dir")%>/$USER/hs_err_pid%p.log"
-# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
-# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
-
-# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
-export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
-
-# Extra ssh options. Empty by default.
-# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
-
-# Where log files are stored. $HBASE_HOME/logs by default.
-export HBASE_LOG_DIR=<%=scope.function_hdp_template_var("hbase_log_dir")%>
-
-# A string representing this instance of hbase. $USER by default.
-# export HBASE_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes. See 'man nice'.
-# export HBASE_NICENESS=10
-
-# The directory where pid files are stored. /tmp by default.
-export HBASE_PID_DIR=<%=scope.function_hdp_template_var("hbase_pid_dir")%>
-
-# Seconds to sleep between slave commands. Unset by default. This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HBASE_SLAVE_SLEEP=0.1
-
-# Tell HBase whether it should manage it's own instance of Zookeeper or not.
-export HBASE_MANAGES_ZK=false
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/log4j.properties.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/log4j.properties.erb
deleted file mode 100644
index 5227c9a..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/log4j.properties.erb
+++ /dev/null
@@ -1,80 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# Define some default values that can be overridden by system properties
-hbase.root.logger=INFO,console
-hbase.log.dir=.
-hbase.log.file=hbase.log
-
-# Define the root logger to the system property "hbase.root.logger".
-log4j.rootLogger=${hbase.root.logger}
-
-# Logging Threshold
-log4j.threshold=ALL
-
-#
-# Daily Rolling File Appender
-#
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-# Custom Logging levels
-
-log4j.logger.org.apache.zookeeper=INFO
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.hbase=INFO
-# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
-log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
-log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
-#log4j.logger.org.apache.hadoop.dfs=DEBUG
-# Set this class to log INFO only otherwise its OTT
-
-# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
-#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
-
-# Uncomment the below if you want to remove logging of client region caching'
-# and scan of .META. messages
-# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
-# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/regionservers.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/regionservers.erb
deleted file mode 100644
index 159a2f6..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hbase/templates/regionservers.erb
+++ /dev/null
@@ -1,3 +0,0 @@
-<%h=scope.function_hdp_host("hbase_rs_hosts"); (h.kind_of?(Array) ? h : []).each do |host|-%>
-<%= host %>
-<%end-%>
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/files/hcatSmoke.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/files/hcatSmoke.sh
deleted file mode 100644
index 695d56a..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/files/hcatSmoke.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-
-case "$2" in
-
-prepare)
-  hcat -e "show tables"
-  hcat -e "drop table IF EXISTS ${tablename}"
-  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
-;;
-
-cleanup)
-  hcat -e "drop table IF EXISTS ${tablename}"
-;;
-
-esac
\ No newline at end of file
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/files/pigSmoke.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/files/pigSmoke.sh
deleted file mode 100644
index 2e90ac0..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/hcat/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/hcat/service_check.pp
deleted file mode 100644
index 3f3432e..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/hcat/service_check.pp
+++ /dev/null
@@ -1,73 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hcat::hcat::service_check() 
-{
-  include hdp-hcat::params
-  $unique = hdp_unique_id_and_date()
-  $smoke_test_user = $hdp::params::smokeuser
-  $output_file = "/apps/hive/warehouse/hcatsmoke${unique}"
-  $security_enabled=$hdp::params::security_enabled
-  $smoke_user_keytab = "${hdp-hcat::params::keytab_path}/${smoke_test_user}.headless.keytab"
-
-  if ($security_enabled == true) {
-    $smoke_user_kinitcmd="/usr/kerberos/bin/kinit  -kt ${smoke_user_keytab} ${smoke_test_user}; "
-  } else {
-    $smoke_user_kinitcmd=""
-  }
-
-  $test_cmd = "fs -test -e ${output_file}" 
-  
-  anchor { 'hdp-hcat::hcat::service_check::begin':}
-
-  file { '/tmp/hcatSmoke.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-hcat/hcatSmoke.sh",
-    mode => '0755',
-  }
-
-  exec { 'hcatSmoke.sh prepare':
-    command   => "su - ${smoke_test_user} -c '${smoke_user_kinitcmd}sh /tmp/hcatSmoke.sh hcatsmoke${unique} prepare'",
-    tries     => 3,
-    try_sleep => 5,
-    require   => File['/tmp/hcatSmoke.sh'],
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    notify    => Hdp-hadoop::Exec-hadoop['hcat::service_check::test'],
-    logoutput => "true"
-  }
-
-  hdp-hadoop::exec-hadoop { 'hcat::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    require     => Exec['hcatSmoke.sh prepare'],
-  }
-
-  exec { 'hcatSmoke.sh cleanup':
-    command   => "su - ${smoke_test_user} -c '${smoke_user_kinitcmd}sh /tmp/hcatSmoke.sh hcatsmoke${unique} cleanup'",
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    require   => Hdp-hadoop::Exec-hadoop['hcat::service_check::test'],
-    before    => Anchor['hdp-hcat::hcat::service_check::end'],
-    logoutput => "true"
-  }
-  
-  anchor{ 'hdp-hcat::hcat::service_check::end':}
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/init.pp
deleted file mode 100644
index 576cd84..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/init.pp
+++ /dev/null
@@ -1,90 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hcat(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp-hcat::params
-{
-  $hcat_config_dir = $hdp-hcat::params::hcat_conf_dir
-  $hcat_pid_dir = $hdp-hcat::params::hcat_pid_dir
-
-  if ($hdp::params::use_32_bits_on_slaves == false) {
-    $size = 64
-  } else {
-    $size = 32
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state == 'uninstalled') {
-    hdp::package { 'hcat' :
-      ensure => 'uninstalled', 
-      size   => $size
-    }
-
-    hdp::directory { $hcat_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp::directory { $hcat_pid_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    Hdp::Package['hcat'] -> Hdp::Directory[$hcat_config_dir] -> Hdp::Directory[$hcat_pid_dir]
-
-  } elsif ($service_state == 'installed_and_configured') {
-    hdp::package { 'hcat' : 
-      size => $size
-    }
-
-    hdp::directory { $hcat_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp::directory_recursive_create { $hcat_pid_dir:
-      owner => $webhcat_user,
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp::user{ $webhcat_user:}
-
-    if ($webhcat_user != $hcat_user) {
-      hdp::user { $hcat_user:}
-    }
-
-    hdp-hcat::configfile { 'hcat-env.sh':}
-  
-    Hdp::Package['hcat'] -> Hdp::User<|title == $webhcat_user or title == $hcat_user|>  -> Hdp::Directory[$hcat_config_dir] -> Hdp::Directory_recursive_create[$hcat_pid_dir] -> Hdp-hcat::Configfile<||> 
-
- } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-### config files
-define hdp-hcat::configfile()
-{
-  hdp::configfile { "${hdp::params::hcat_conf_dir}/${name}":
-    component => 'hcat'
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/params.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/params.pp
deleted file mode 100644
index a44486a..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/params.pp
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hcat::params() inherits hdp::params
-{
-  $hcat_conf_dir = $hdp::params::hcat_conf_dir
-
-  $hcat_metastore_port = hdp_default("hcat_metastore_port",9933)
-  $hcat_lib = hdp_default("hcat_lib","/usr/lib/hcatalog/share/hcatalog") #TODO: should I remove and just use hcat_dbroot
-
-  ### hcat-env
-  $hcat_dbroot = hdp_default("hadoop/hcat-env/hcat_dbroot",$hcat_lib)
-
-  $hcat_log_dir = hdp_default("hadoop/hcat-env/hcat_log_dir","/var/log/hcatalog")
-
-  $hcat_pid_dir = hdp_default("hadoop/hcat-env/hcat_pid_dir","/var/run/hcatalog")
-
-  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/templates/hcat-env.sh.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/templates/hcat-env.sh.erb
deleted file mode 100644
index c80be00..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/templates/hcat-env.sh.erb
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-JAVA_HOME=<%=scope.function_hdp_java_home()%>
-HCAT_PID_DIR=<%=scope.function_hdp_template_var("hcat_pid_dir")%>/
-HCAT_LOG_DIR=<%=scope.function_hdp_template_var("hcat_log_dir")%>/
-HCAT_CONF_DIR=<%=scope.function_hdp_template_var("hcat_conf_dir")%>
-HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp::params::hadoop_home")%>}
-#DBROOT is the path where the connector jars are downloaded
-DBROOT=<%=scope.function_hdp_template_var("hcat_dbroot")%>
-USER=<%=scope.function_hdp_user("hcat_user")%>
-METASTORE_PORT=<%=scope.function_hdp_template_var("hcat_metastore_port")%>
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveSmoke.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveSmoke.sh
deleted file mode 100644
index 7e03524..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveSmoke.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
-echo "DESCRIBE ${tablename};" | hive
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2.sql b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2.sql
deleted file mode 100644
index 99a3865..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2.sql
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
-DESCRIBE hiveserver2smoke20408;
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2Smoke.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2Smoke.sh
deleted file mode 100644
index 051a21e..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/files/hiveserver2Smoke.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
-
-if [ "x$smokeout" == "x" ]; then
-  echo "Smoke test of hiveserver2 passed"
-  exit 0
-else
-  echo "Smoke test of hiveserver2 wasnt passed"
-  echo $smokeout
-  exit 1
-fi
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/files/startHiveserver2.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/files/startHiveserver2.sh
deleted file mode 100644
index 98efbd1..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/files/startHiveserver2.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-/usr/lib/hive/bin/hiveserver2 -hiveconf hive.metastore.uris=' ' > $1 2> $2 &
-echo $!|cat>$3
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/files/startMetastore.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/files/startMetastore.sh
deleted file mode 100644
index 32944b2..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/files/startMetastore.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-hive --service metastore > $1 2> $2 &
-echo $!|cat>$3
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/client.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/client.pp
deleted file mode 100644
index 9e95e25..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/client.pp
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive::client(
-  $service_state = $hdp::params::cluster_client_state,
-  $hive_server_host = undef
-) inherits hdp::params
-{ 
-  if ($service_state == 'no_op') {
-   } elsif ($service_state in ['installed_and_configured','uninstalled']) {
-    if ($hdp::params::service_exists['hdp-hive::server'] != true) {
-      #installs package, creates user, sets configuration
-      class { 'hdp-hive':
-        service_state => $service_state
-      } 
-      if ($hive_server_host != undef) {
-        Hdp-Hive::Configfile<||>{hive_server_host => $hive_server_host}
-      }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/hive/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/hive/service_check.pp
deleted file mode 100644
index 97553e7..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/hive/service_check.pp
+++ /dev/null
@@ -1,80 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive::hive::service_check() inherits hdp-hive::params
-{
-  $smoke_test_user = $hdp::params::smokeuser
-  $smoke_test_sql = "/tmp/$smoke_test_sql_file"
-  $smoke_test_path = "/tmp/$smoke_test_script"
-
-
-  $smoke_cmd = "env JAVA_HOME=$hdp::params::java64_home $smoke_test_path $hive_url $smoke_test_sql"
-
-
-  file { $smoke_test_path:
-    ensure => present,
-    source => "puppet:///modules/hdp-hive/$smoke_test_script",
-    mode => '0755',
-  }
-
-  file { $smoke_test_sql:
-    ensure => present,
-    source => "puppet:///modules/hdp-hive/$smoke_test_sql_file"
-  }
-
-  exec { $smoke_test_path:
-    command   => $smoke_cmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true",
-    user => $smoke_test_user
-  }
-
-#  $unique = hdp_unique_id_and_date()
-#  $output_file = "/apps/hive/warehouse/hivesmoke${unique}"
-#  $test_cmd = "fs -test -e ${output_file}"
-
-#  file { '/tmp/hiveSmoke.sh':
-#    ensure => present,
-#    source => "puppet:///modules/hdp-hive/hiveSmoke.sh",
-#    mode => '0755',
-#  }
-#
-#  exec { '/tmp/hiveSmoke.sh':
-#    command => "su - ${smoke_test_user} -c 'env JAVA_HOME=$hdp::params::java64_home sh /tmp/hiveSmoke.sh hivesmoke${unique}'",
-#    tries => 3,
-#    try_sleep => 5,
-#    path => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-#    notify => Hdp-hadoop::Exec-hadoop['hive::service_check::test'],
-#    logoutput => "true"
-#  }
-
-#  hdp-hadoop::exec-hadoop { 'hive::service_check::test':
-#    command => $test_cmd,
-#    refreshonly => true
-#  }
-
-#  File[$smoke_test_path] -> File[$smoke_test_sql] -> Exec[$smoke_test_path] -> File['/tmp/hiveSmoke.sh'] -> Exec['/tmp/hiveSmoke.sh'] -> Hdp-Hadoop::Exec-Hadoop['hive::service_check::test']
-
-  include hdp-hcat::hcat::service_check  
-
-  File[$smoke_test_path] -> File[$smoke_test_sql] -> Exec[$smoke_test_path]
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp
deleted file mode 100644
index 92b4023..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/init.pp
+++ /dev/null
@@ -1,93 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive(
-  $service_state,
-  $server = false
-) 
-{
-  include hdp-hive::params
-  
-  $hive_user = $hdp-hive::params::hive_user
-  $hive_config_dir = $hdp-hive::params::hive_conf_dir
-
-# Configs generation  
-
-  if has_key($configuration, 'hive-site') {
-    configgenerator::configfile{'hive-site':
-      modulespath => $hdp-hive::params::hive_conf_dir, 
-      filename => 'hive-site.xml',
-      module => 'hdp-hive',
-      configuration => $configuration['hive-site']
-    }
-  }
-
-  anchor { 'hdp-hive::begin': }
-  anchor { 'hdp-hive::end': } 
-
-  if ($service_state == 'uninstalled') {
-    hdp::package { 'hive' : 
-      ensure => 'uninstalled'
-    }
-
-    hdp::directory { $hive_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    Anchor['hdp-hive::begin'] -> Hdp::Package['hive'] -> Hdp::Directory[$hive_config_dir] ->  Anchor['hdp-hive::end']
-
-  } else {
-    hdp::package { 'hive' : }
-    if ($server == true ) {
-      class { 'hdp-hive::mysql-connector': }
-    }
-  
-    hdp::user{ $hive_user:}
-  
-    hdp::directory { $hive_config_dir: 
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp-hive::configfile { ['hive-env.sh']: }
-  
-    Anchor['hdp-hive::begin'] -> Hdp::Package['hive'] -> Hdp::User[$hive_user] ->  
-     Hdp::Directory[$hive_config_dir] -> Hdp-hive::Configfile<||> ->  Anchor['hdp-hive::end']
-
-     if ($server == true ) {
-       Hdp::Package['hive'] -> Hdp::User[$hive_user] -> Class['hdp-hive::mysql-connector'] -> Anchor['hdp-hive::end']
-    }
-  }
-}
-
-### config files
-define hdp-hive::configfile(
-  $mode = undef,
-  $hive_server_host = undef
-) 
-{
-  hdp::configfile { "${hdp-hive::params::hive_conf_dir}/${name}":
-    component        => 'hive',
-    owner            => $hdp-hive::params::hive_user,
-    mode             => $mode,
-    hive_server_host => $hive_server_host 
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/metastore.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/metastore.pp
deleted file mode 100644
index 29bfd97..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/metastore.pp
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive::metastore(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits  hdp-hive::params
-{ 
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-
-    $hdp::params::service_exists['hdp-hive::server'] = true
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'hive_server_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/hive.service.keytab",
-        keytabfile => 'hive.service.keytab',
-        owner => $hdp-hive::params::hive_user
-      }
-    }
-
-    #installs package, creates user, sets configuration
-    class{ 'hdp-hive' : 
-      service_state => $service_state,
-      server        => true
-    } 
-  
-    Hdp-Hive::Configfile<||>{hive_server_host => $hdp::params::host_address}
-
-    class { 'hdp-hive::service' :
-      ensure => $service_state,
-      service_type => "metastore"
-    }
-  
-    #top level does not need anchors
-    Class['hdp-hive'] -> Class['hdp-hive::service']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/mysql-connector.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/mysql-connector.pp
deleted file mode 100644
index 77a7f70..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/mysql-connector.pp
+++ /dev/null
@@ -1,45 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive::mysql-connector()
-{
-  include hdp-hive::params
-
-  $hive_lib = $hdp-hive::params::hive_lib
-  $target = "${hive_lib}/mysql-connector-java.jar"
-  
-  anchor { 'hdp-hive::mysql-connector::begin':}
-
-   hdp::package { 'mysql-connector-java' :
-     require   => Anchor['hdp-hive::mysql-connector::begin']
-   }
-
-   hdp::exec { 'hive mkdir -p ${artifact_dir} ;  cp /usr/share/java/mysql-connector-java.jar  ${target}':
-       command => "mkdir -p ${artifact_dir} ;  cp /usr/share/java/mysql-connector-java.jar  ${target}",
-       unless  => "test -f ${target}",
-       creates => $target,
-       path    => ["/bin","/usr/bin/"],
-       require => Hdp::Package['mysql-connector-java'],
-       notify  =>  Anchor['hdp-hive::mysql-connector::end'],
-   }
-
-   anchor { 'hdp-hive::mysql-connector::end':}
-
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/params.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/params.pp
deleted file mode 100644
index a63c4a8..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/params.pp
+++ /dev/null
@@ -1,76 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive::params() inherits hdp::params
-{
-
-  #TODO: will move to globals
-  $hive_metastore_user_name = hdp_default("hadoop/hive-site/hive_metastore_user_name","dbusername")
-  $hive_metastore_user_passwd = hdp_default("hadoop/hive-site/hive_metastore_user_passwd","dbpassword")
-
-  ### users
-  $hive_user = $hdp::params::hive_user 
-
-  ### common
-  $hive_metastore_port = hdp_default("hive_metastore_port",9083)
-  $hive_lib = hdp_default("hive_lib","/usr/lib/hive/lib/") #TODO: should I remove and just use hive_dbroot
-  $hive_var_lib = hdp_default("hive_var_lib","/var/lib/hive")  
-  $hive_url = "jdbc:hive2://${hive_server_host}:10000"
-
-  ### hive-env
-  $hive_conf_dir = $hdp::params::hive_conf_dir
-
-  $hive_dbroot = hdp_default("hadoop/hive-env/hive_dbroot",$hive_lib)
-
-  $hive_log_dir = hdp_default("hadoop/hive-env/hive_log_dir","/var/log/hive")
-
-  $hive_pid_dir = hdp_default("hadoop/hive-env/hive_pid_dir","/var/run/hive")
-  $hive_pid = hdp_default("hadoop/hive-env/hive_pid","hive-server.pid")
-
-  
-  ### hive-site
-  $hive_database_name = hdp_default("hadoop/hive-site/hive_database_name","hive")
-
-  if ($hdp::params::security_enabled == true) {
-    $hive_metastore_sasl_enabled = true
-  } else {
-    $hive_metastore_sasl_enabled = false
-  }
-
-  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
-
-  #TODO: using instead hive_server_host in hdp::params 
-  #$hive_metastore_server_host = hdp_default("hadoop/hive-site/hive_metastore_server_host")
-  
-  ###mysql connector
-  $download_url = $hdp::params::gpl_artifacts_download_url
-  $mysql_connector_url = "${download_url}/mysql-connector-java-5.1.18.zip"
-  $hive_aux_jars_path =  hdp_default("hive_aux_jars_path","/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar")
-
-  ##smoke test
-  $smoke_test_sql_file = 'hiveserver2.sql'
-  $smoke_test_script = 'hiveserver2Smoke.sh'
-
-  ##Starting hiveserver2
-  $start_hiveserver2_script = 'startHiveserver2.sh'
-
-  ##Starting metastore
-  $start_metastore_script = 'startMetastore.sh'
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/server.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/server.pp
deleted file mode 100644
index 7f4db1f..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/server.pp
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive::server(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits  hdp-hive::params
-{ 
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-
-    $hdp::params::service_exists['hdp-hive::server'] = true
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'hive_server_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/hive.service.keytab",
-        keytabfile => 'hive.service.keytab',
-        owner => $hdp-hive::params::hive_user
-      }
-    }
-
-    #installs package, creates user, sets configuration
-    class{ 'hdp-hive' : 
-      service_state => $service_state,
-      server        => true
-    } 
-  
-    Hdp-Hive::Configfile<||>{hive_server_host => $hdp::params::host_address}
-
-    class { 'hdp-hive::service' :
-      ensure => $service_state,
-      service_type => "hiveserver2"
-    }
-  
-    #top level does not need anchors
-    Class['hdp-hive'] -> Class['hdp-hive::service']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/service.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/service.pp
deleted file mode 100644
index 7480a81..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/service.pp
+++ /dev/null
@@ -1,110 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hive::service(
-  $ensure,
-  $service_type
-)
-{
-  include $hdp-hive::params
-  
-  $user = $hdp-hive::params::hive_user
-  $hadoop_home = $hdp::params::hadoop_home
-  $hive_log_dir = $hdp-hive::params::hive_log_dir
-
-  $start_hiveserver2_path = "/tmp/$start_hiveserver2_script"
-  $start_metastore_path = "/tmp/$start_metastore_script"
-
-  if ($service_type == 'metastore') {
-
-    $pid_file = "${hdp-hive::params::hive_pid_dir}/hive.pid" 
-    $cmd = "env HADOOP_HOME=${hadoop_home} JAVA_HOME=$hdp::params::java64_home $start_metastore_path ${hive_log_dir}/hive.out ${hive_log_dir}/hive.log $pid_file"
-    
-  } elsif ($service_type == 'hiveserver2') {
-    $pid_file = "$hive_pid_dir/$hive_pid" 
-    $cmd = "env JAVA_HOME=$hdp::params::java64_home $start_hiveserver2_path ${hive_log_dir}/hive-server2.out  ${hive_log_dir}/hive-server2.log $pid_file"
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_type}")
-  }
-
-
-  $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-
-  if ($ensure == 'running') {
-    $daemon_cmd = "su - ${user} -c  '${cmd} '"
-  } elsif ($ensure == 'stopped') {
-    $daemon_cmd = "kill `cat $pid_file` >/dev/null 2>&1"
-  } else {
-    $daemon_cmd = undef
-  }
-
-  hdp-hive::service::directory { $hive_pid_dir : }
-  hdp-hive::service::directory { $hive_log_dir : }
-  hdp-hive::service::directory { $hive_var_lib : }
-
-  file { $start_hiveserver2_path:
-    ensure => present,
-    source => "puppet:///modules/hdp-hive/$start_hiveserver2_script",
-    mode => '0755',
-  }
-
-  file { $start_metastore_path:
-    ensure => present,
-    source => "puppet:///modules/hdp-hive/$start_metastore_script",
-    mode => '0755',
-  }
-
-  anchor{'hdp-hive::service::begin':} -> Hdp-hive::Service::Directory<||> -> anchor{'hdp-hive::service::end':}
-  
-  if ($daemon_cmd != undef) {
-    if ($ensure == 'running') {
-
-      $pid_file_state = 'present'
-      hdp::exec { $daemon_cmd:
-        command => $daemon_cmd,
-        unless  => $no_op_test
-      }
-    } elsif ($ensure == 'stopped') {
-      $pid_file_state = 'absent'
-      hdp::exec { $daemon_cmd:
-        command => $daemon_cmd,
-        onlyif  => $no_op_test
-      }
-    }
-
-
-  file { $pid_file:
-    ensure => $pid_file_state
-  }
-
-    Hdp-hive::Service::Directory<||> File[ $start_metastore_path]-> File[ $start_hiveserver2_path]-> Hdp::Exec[$daemon_cmd] -> File[$pid_file] -> Anchor['hdp-hive::service::end']
-  }
-}
-
-define hdp-hive::service::directory()
-{
-  hdp::directory_recursive_create { $name: 
-    owner => $hive_user,
-    mode => '0755',
-    service_state => $ensure,
-    force => true
-  }
-}
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/templates/hive-env.sh.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/templates/hive-env.sh.erb
deleted file mode 100644
index ac56a40..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hive/templates/hive-env.sh.erb
+++ /dev/null
@@ -1,55 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hive and Hadoop environment variables here. These variables can be used
-# to control the execution of Hive. It should be used by admins to configure
-# the Hive installation (so that users do not have to set environment variables
-# or set command line parameters to get correct behavior).
-#
-# The hive service being invoked (CLI/HWI etc.) is available via the environment
-# variable SERVICE
-
-# Hive Client memory usage can be an issue if a large number of clients
-# are running at the same time. The flags below have been useful in
-# reducing memory usage:
-#
- if [ "$SERVICE" = "cli" ]; then
-   if [ -z "$DEBUG" ]; then
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-   else
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-   fi
- fi
-
-# The heap size of the jvm stared by hive shell script can be controlled via:
-
-export HADOOP_HEAPSIZE="<%=scope.function_hdp_template_var("::hdp::params::hadoop_heapsize")%>"
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-
-# Larger heap size may be required when running queries over large number of files or partitions.
-# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-# appropriate for hive server (hwi etc).
-
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp::params::hadoop_home")%>}
-
-# Hive Configuration Directory can be controlled by:
-export HIVE_CONF_DIR=<%=scope.function_hdp_template_var("hive_conf_dir")%>
-
-# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
-# export HIVE_AUX_JARS_PATH=
-export HIVE_AUX_JARS_PATH=<%=scope.function_hdp_template_var("hive_aux_jars_path")%>
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/facter/kadm_keytab.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/facter/kadm_keytab.rb
deleted file mode 100644
index 0b63bf9..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/facter/kadm_keytab.rb
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-require 'facter'
-Facter.add("kadm_keytab") do
-  setcode do
-     %x{[ -f /etc/kadm5.keytab ] && base64 </etc/kadm5.keytab 2>/dev/null} + "\n"
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/puppet/parser/functions/kerberos_keytabs_input.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/puppet/parser/functions/kerberos_keytabs_input.rb
deleted file mode 100644
index 406cb2c..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/lib/puppet/parser/functions/kerberos_keytabs_input.rb
+++ /dev/null
@@ -1,34 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-module Puppet::Parser::Functions
-  newfunction(:kerberos_keytabs_input, :type => :rvalue) do |args|
-    fqdn,node_components,keytab_map = args 
-    ndx_ret = Hash.new
-    node_components.each do |cmp|
-      if info = keytab_map[cmp]
-        keytab = info["keytab"]
-        ndx_ret[keytab] ||= {"keytab" => keytab, "principals" => info["primaries"].map{|p|"#{p}/#{fqdn}"}}
-      end
-    end
-    ndx_ret.values
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/adminclient.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/adminclient.pp
deleted file mode 100644
index 9176de3..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/adminclient.pp
+++ /dev/null
@@ -1,140 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class hdp-kerberos::adminclient(
-  $service_state = $hdp::params::cluster_service_state
-) inherits hdp-kerberos::params
-{
-  import 'hdp'
-
-  $kadmin_pw = "bla123"
-  $kadmin_admin = "kadmin/admin"
-  $realm = $kerberos_domain
-  $krb_realm = $kerberos_domain
-  $hdp::params::service_exists['hdp-kerberos::adminclient'] = true
-  $krbContext = {}
-  $krbContext['kadmin_pw'] = $kadmin_pw
-  $krbContext['kadmin_admin'] = $kadmin_admin
-  $krbContext['realm' ] = $kerberos_domain
-  $krbContext['local_or_remote'] = 'remote'
-  $krbContext['principals_to_create'] = $principals_to_create
-  $krbContext['keytabs_to_create'] = $keytabs_to_create
-  $krbContext['principals_in_keytabs'] = $principals_in_keytabs
-
-  $kdc_server = $kdc_host
-
-  package { $package_name_client:
-    ensure => installed,
-  }
-  if ($hdp::params::service_exists['hdp-kerberos::server'] != true) {
-    file { "/etc/krb5.conf":
-      content => template('hdp-kerberos/krb5.conf'),
-      owner => "root",
-      group => "root",
-      mode => "0644",
-      require => Package[$package_name_client],
-    }
-  }
- 
-  if ($create_principals_keytabs == "yes") {
-    notice("Creating principals and keytabs..")
-    hdp-kerberos::principals_and_keytabs::services { 'alphabeta': 
-      krb_context => $krbContext
-    }
-  }
-}
-
-
-define hdp-kerberos::principals_and_keytabs::services(
-  $krb_context
-)
-{
-  include hdp-kerberos::params
-  $principals_to_create = $krb_context[principals_to_create]
-  $keytabs_to_create = $krb_context[keytabs_to_create]
-
-  hdp-kerberos::principal {$principals_to_create:
-    krb_context => $krb_context,
-  }
-  
-  hdp-kerberos::keytab { $keytabs_to_create :
-    krb_context => $krb_context,
-    require => Hdp-kerberos::Principal[$principals_to_create]
-  }
-}
-
-define hdp-kerberos::keytab(
-  $krb_context,
-  $keytable_file_owner = undef,
-  $keytable_file_mode  = undef
-)
-{
-  include hdp-kerberos::params
-  $keytab = $name
-  $realm = $krb_context['realm']
-  $local_or_remote = $krb_context['local_or_remote']
-  $kadmin_pw = $krb_context['kadmin_pw']
-  $kadmin_admin = $krb_context['kadmin_admin']
-  $kadmin_cmd = "kadmin -w ${kadmin_pw} -p ${kadmin_admin}"
-  if ($local_or_remote == 'local') {
-    $kadmin_cmd = 'kadmin.local'
-  }
-  $principals_in_keytabs = $krb_context['principals_in_keytabs']
-
-  $principals = $principals_in_keytabs[$keytab]
-  $principals_list = inline_template("<%= principals.join(' ')%>")
-  $keytab_filename = $keytab
-
-  exec { "xst ${keytab}":
-    command => "rm -rf ${keytab_filename}; ${kadmin_cmd} -q 'xst -k ${keytab_filename} ${principals_list}'; chown puppet:apache ${keytab_filename}",
-    unless  => "klist -kt ${keytab_filename} 2>/dev/null | grep -q ' ${principals[0]}'", #TODO may make more robust test
-    path   => $hdp-kerberos::params::exec_path,
-  }
-
-  if (($keytable_file_owner != undef) or ($keytable_file_mode != undef)) {
-    file { $keytab_filename:
-      owner => $keytable_file_owner,
-      mode  => $keytable_file_mode,
-      require => Exec["xst ${keytab}"]
-    }
-  }
-}
-
-define hdp-kerberos::principal(
-  $krb_context
-)
-{
-  include hdp-kerberos::params
-  $realm = $krb_context['realm']
-  $local_or_remote = $krb_context['local_or_remote']
-  $kadmin_pw = $krb_context['kadmin_pw']
-  $kadmin_admin = $krb_context['kadmin_admin']
-  $kadmin_cmd =  "kadmin -w ${kadmin_pw} -p ${kadmin_admin}"
-  if ($local_or_remote == 'local') {
-    $kadmin_cmd = 'kadmin.local'
-  }
-  $principal = $name
-  exec { "addprinc ${principal}":
-    command => "${kadmin_cmd} -q 'addprinc -randkey ${principal}'",
-    unless => "${kadmin_cmd} -q listprincs | grep -q '^${principal}$'",
-    path => $hdp-kerberos::params::exec_path
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/bigtop/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/bigtop/init.pp
deleted file mode 100644
index 54bef7d..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/bigtop/init.pp
+++ /dev/null
@@ -1,217 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-class kerberos {
-  class site {
-    # The following is our interface to the world. This is what we allow
-    # users to tweak from the outside (see tests/init.pp for a complete
-    # example) before instantiating target classes.
-    # Once we migrate to Puppet 2.6 we can potentially start using 
-    # parametrized classes instead.
-    $domain     = $kerberos_domain     ? { '' => inline_template('<%= domain %>'),
-                                           default => $kerberos_domain }
-    $realm      = $kerberos_realm      ? { '' => inline_template('<%= domain.upcase %>'),
-                                           default => $kerberos_realm } 
-    $kdc_server = $kerberos_kdc_server ? { '' => 'localhost',
-                                           default => $kerberos_kdc_server }
-    $kdc_port   = $kerberos_kdc_port   ? { '' => '88', 
-                                           default => $kerberos_kdc_port } 
-    $admin_port = 749 /* BUG: linux daemon packaging doesn't let us tweak this */
-
-    $keytab_export_dir = "/var/lib/bigtop_keytabs"
-
-    case $operatingsystem {
-        'ubuntu': {
-            $package_name_kdc    = 'krb5-kdc'
-            $service_name_kdc    = 'krb5-kdc'
-            $package_name_admin  = 'krb5-admin-server'
-            $service_name_admin  = 'krb5-admin-server'
-            $package_name_client = 'krb5-user'
-            $exec_path           = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
-            $kdc_etc_path        = '/etc/krb5kdc/'
-        }
-        # default assumes CentOS, Redhat 5 series (just look at how random it all looks :-()
-        default: {
-            $package_name_kdc    = 'krb5-server'
-            $service_name_kdc    = 'krb5kdc'
-            $package_name_admin  = 'krb5-libs'
-            $service_name_admin  = 'kadmin'
-            $package_name_client = 'krb5-workstation'
-            $exec_path           = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/kerberos/sbin:/usr/kerberos/bin'
-            $kdc_etc_path        = '/var/kerberos/krb5kdc/'
-        }
-    }
-
-    file { "/etc/krb5.conf":
-      content => template('kerberos/krb5.conf'),
-      owner => "root",
-      group => "root",
-      mode => "0644",
-    }
-
-    @file { $keytab_export_dir:
-      ensure => directory,
-      owner  => "root",
-      group  => "root",
-    }
-
-    # Required for SPNEGO
-    @principal { "HTTP": 
-
-    }
-  }
-
-  class kdc inherits kerberos::site {
-    package { $package_name_kdc:
-      ensure => installed,
-    }
-
-    file { $kdc_etc_path:
-    	ensure => directory,
-        owner => root,
-        group => root,
-        mode => "0700",
-        require => Package["$package_name_kdc"],
-    }
-    file { "${kdc_etc_path}/kdc.conf":
-      content => template('kerberos/kdc.conf'),
-      require => Package["$package_name_kdc"],
-      owner => "root",
-      group => "root",
-      mode => "0644",
-    }
-    file { "${kdc_etc_path}/kadm5.acl":
-      content => template('kerberos/kadm5.acl'),
-      require => Package["$package_name_kdc"],
-      owner => "root",
-      group => "root",
-      mode => "0644",
-    }
-
-    exec { "kdb5_util":
-      path => $exec_path,
-      command => "rm -f /etc/kadm5.keytab ; kdb5_util -P cthulhu -r ${realm} create -s && kadmin.local -q 'cpw -pw secure kadmin/admin'",
-      
-      creates => "${kdc_etc_path}/stash",
-
-      subscribe => File["${kdc_etc_path}/kdc.conf"],
-      # refreshonly => true, 
-
-      require => [Package["$package_name_kdc"], File["${kdc_etc_path}/kdc.conf"], File["/etc/krb5.conf"]],
-    }
-
-    service { $service_name_kdc:
-      ensure => running,
-      require => [Package["$package_name_kdc"], File["${kdc_etc_path}/kdc.conf"], Exec["kdb5_util"]],
-      subscribe => File["${kdc_etc_path}/kdc.conf"],
-      hasrestart => true,
-    }
-
-
-    class admin_server inherits kerberos::kdc {
-      $se_hack = "setsebool -P kadmind_disable_trans  1 ; setsebool -P krb5kdc_disable_trans 1"
-
-      package { "$package_name_admin":
-        ensure => installed,
-        require => Package["$package_name_kdc"],
-      } 
-  
-      service { "$service_name_admin":
-        ensure => running,
-        require => [Package["$package_name_admin"], Service["$service_name_kdc"]],
-        hasrestart => true,
-        restart => "${se_hack} ; service ${service_name_admin} restart",
-        start => "${se_hack} ; service ${service_name_admin} start",
-      }
-    }
-  }
-
-  class client inherits kerberos::site {
-    package { $package_name_client:
-      ensure => installed,
-    }
-  }
-
-  class server {
-    include kerberos::client
-
-    class { "kerberos::kdc": } 
-    ->
-    Class["kerberos::client"] 
-
-    class { "kerberos::kdc::admin_server": }
-    -> 
-    Class["kerberos::client"]
-  }
-
-  define principal {
-    require "kerberos::client"
-
-    realize(File[$kerberos::site::keytab_export_dir])
-
-    $principal = "$title/$::fqdn"
-    $keytab    = "$kerberos::site::keytab_export_dir/$title.keytab"
-
-    exec { "addprinc.$title":
-      path => $kerberos::site::exec_path,
-      command => "kadmin -w secure -p kadmin/admin -q 'addprinc -randkey $principal'",
-      unless => "kadmin -w secure -p kadmin/admin -q listprincs | grep -q $principal",
-      require => Package[$kerberos::site::package_name_client],
-    } 
-    ->
-    exec { "xst.$title":
-      path    => $kerberos::site::exec_path, 
-      command => "kadmin -w secure -p kadmin/admin -q 'xst -k $keytab $principal'",
-      unless  => "klist -kt $keytab 2>/dev/null | grep -q $principal",
-      require => File[$kerberos::site::keytab_export_dir],
-    }
-  }
-
-  define host_keytab($princs = undef, $spnego = disabled) {
-    $keytab = "/etc/$title.keytab"
-
-    $requested_princs = $princs ? { 
-      undef   => [ $title ],
-      default => $princs,
-    }
-
-    $internal_princs = $spnego ? {
-      /(true|enabled)/ => [ 'HTTP' ],
-      default          => [ ],
-    }
-    realize(Kerberos::Principal[$internal_princs])
-
-    $includes = inline_template("<%=
-      [requested_princs, internal_princs].flatten.map { |x|
-        \"rkt $kerberos::site::keytab_export_dir/#{x}.keytab\"
-      }.join(\"\n\")
-    %>")
-
-    kerberos::principal { $requested_princs:
-    }
-
-    exec { "ktinject.$title":
-      path     => $kerberos::site::exec_path,
-      command  => "/usr/bin/ktutil <<EOF
-        $includes
-        wkt $keytab
-EOF
-        chown $title $keytab",
-      creates => $keytab,
-      require => [ Kerberos::Principal[$requested_princs],
-                   Kerberos::Principal[$internal_princs] ],
-    }
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/client.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/client.pp
deleted file mode 100644
index b77585f..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/client.pp
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class hdp-kerberos::client(
-  $service_state = $hdp::params::cluster_service_state
-) inherits hdp-kerberos::params
-{
-  import 'hdp'
-
-  $hdp::params::service_exists['hdp-kerberos::client'] = true
-
-  $kdc_server = $kdc_host
-  $krb_realm = $kerberos_domain
-  $realm = $kerberos_domain
-
-  if ($hdp::params::service_exists['hdp-kerberos::adminclient'] != true)  {
-    package { $package_name_client:
-      ensure => installed,
-    }
-  }
-
-  if (($hdp::params::service_exists['hdp-kerberos::server'] != true) and
-      ($hdp::params::service_exists['hdp-kerberos::adminclient'] != true) ) {
-    file { "/etc/krb5.conf":
-      content => template('hdp-kerberos/krb5.conf'),
-      owner => "root",
-      group => "root",
-      mode => "0644",
-      require => Package[$package_name_client],
-    }
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/init.pp
deleted file mode 100644
index 70ed6ef..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/init.pp
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class hdp-kerberos()
-{
-}
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/params.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/params.pp
deleted file mode 100644
index 0e0603a..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/params.pp
+++ /dev/null
@@ -1,70 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class hdp-kerberos::params(
-) inherits hdp::params
-{
-  $domain  = 'hadoop.com'
-  $realm = inline_template('<%= @domain.upcase %>')
-  $kdc_server = $::fqdn
-  $kdc_port = 88
-  $keytab_export_base_dir = '/etc/security/'
-  $keytab_export_dir = "${keytab_export_base_dir}/keytabs"
-
-  $keytab_map = {
-    'hdp-hadoop::namenode' =>  
-      {keytab    => 'nn.service.keytab',
-       primaries => ['nn', 'host', 'HTTP']},
-    'hdp-hadoop::snamenode' =>  
-      {keytab    => 'nn.service.keytab',
-       primaries => ['nn', 'host', 'HTTP']},
-    'hdp-hadoop::datanode' =>  
-      {keytab    => 'dn.service.keytab',
-       primaries => ['dn']},
-    'hdp-hadoop::jobtracker' =>  
-      {keytab    => 'jt.service.keytab',
-       primaries => ['jt']},
-    'hdp-hadoop::tasktracker' =>  
-      {keytab    => 'tt.service.keytab',
-       primaries => ['tt']}
-  }
-
-  case $::operatingsystem {
-    'ubuntu': {
-      $package_name_kdc    = 'krb5-kdc'
-      $service_name_kdc    = 'krb5-kdc'
-      $package_name_admin  = 'krb5-admin-server'
-      $service_name_admin  = 'krb5-admin-server'
-      $package_name_client = 'krb5-user'
-      $exec_path           = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
-      $kdc_etc_path        = '/etc/krb5kdc/'
-     }
-     default: {
-       $package_name_kdc    = 'krb5-server'
-       $service_name_kdc    = 'krb5kdc'
-       $package_name_admin  = 'krb5-libs'
-       $service_name_admin  = 'kadmin'
-       $package_name_client = 'krb5-workstation' 
-       $exec_path           = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/kerberos/sbin:/usr/kerberos/bin'
-       $kdc_etc_path        = '/var/kerberos/krb5kdc/'
-    }
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/server.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/server.pp
deleted file mode 100644
index ae2f421..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/manifests/server.pp
+++ /dev/null
@@ -1,116 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-class hdp-kerberos::server(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-kerberos::params
-{ 
-  import 'hdp'
-
-  $hdp::params::service_exists['hdp-kerberos::server'] = true
-
-  $krb_realm = $kerberos_domain
-  $kadmin_pw = "bla123"
-  $kadmin_admin = "kadmin/admin"
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
-    # Install kdc server and client
-    package { $package_name_kdc:
-      ensure => installed
-    }
-
-    # set the realm
-    $realm = $krb_realm
-    # SUHAS: This should be set on all the nodes in addition to kdc server
-    file { "/etc/krb5.conf":
-      content => template('hdp-kerberos/krb5.conf'),
-      owner => "root",
-      group => "root",
-      mode => "0644",
-      require => Package[$package_name_kdc],
-      }
-
-    file { $kdc_etc_path:
-      ensure => directory,
-      owner => root,
-      group => root,
-      mode => "0700",
-      require => Package[$package_name_kdc],
-    }
-
-    file { "${kdc_etc_path}/kdc.conf":
-      content => template('hdp-kerberos/kdc.conf'),
-      require => Package["$package_name_kdc"],
-      owner => "root",
-      group => "root",
-      mode => "0644",
-    }
-
-    # SUHAS: kadm5.acl file template is missing in gsInsaller
-    # SUHAS: gsInstaller stops stopIptables at this point (sequence is not relevant here).
-    file { "${kdc_etc_path}/kadm5.acl":
-      content => template('hdp-kerberos/kadm5.acl'),
-      require => Package["$package_name_kdc"],
-      owner => "root",
-      group => "root",
-      mode => "0644",
-    }
-
-    exec { "kdb5_util":
-      path => $exec_path,
-      command => "rm -f ${kdc_etc_path}/kadm5.keytab; kdb5_util -P x86yzh12 -r ${realm} create -s && kadmin.local -q 'cpw -pw ${kadmin_pw} ${kadmin_admin}'",
-      creates => "${kdc_etc_path}/stash",
-      subscribe => File["${kdc_etc_path}/kdc.conf"],
-      require => [Package[$package_name_kdc], File["${kdc_etc_path}/kdc.conf"], File["/etc/krb5.conf"]]
-    }
-
-    # SUHAS: gsInstaller has checkconfig_on
-    exec { "chkconfig_krb5kdc_on":
-      path => $exec_path,
-      command => "chkconfig krb5kdc on",
-      require => [Package["$package_name_kdc"], File["${kdc_etc_path}/kdc.conf"], Exec["kdb5_util"]],
-    }
-    
-    # Start KDC Server
-    if ($service_state in ['running','stopped']) {
-      service { $service_name_kdc:
-        ensure => $service_state,
-        require => [Exec["chkconfig_krb5kdc_on"]],
-        subscribe => File["${kdc_etc_path}/kdc.conf"],
-        hasrestart => true,
-      }
-
-      # SUHAS: This is to be done on HMC not KDC Server??
-      $se_hack = "setsebool -P kadmind_disable_trans  1 ; setsebool -P krb5kdc_disable_trans 1"
-      service { $service_name_admin:
-        ensure => $service_state,
-        require => Service[$service_name_kdc],
-        hasrestart => true,
-        restart => "${se_hack} ; service ${service_name_admin} restart",
-        start => "${se_hack} ; service ${service_name_admin} start",
-      }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kadm5.acl b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kadm5.acl
deleted file mode 100644
index d91d076..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kadm5.acl
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This file Is the access control list for krb5 administration.
-# When this file is edited run /etc/init.d/krb5-admin-server restart to activate
-# One common way to set up Kerberos administration is to allow any principal 
-# ending in /admin  is given full administrative rights.
-# To enable this, uncomment the following line:
-*/admin *
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kdc.conf b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kdc.conf
deleted file mode 100644
index 18f15d5..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/kdc.conf
+++ /dev/null
@@ -1,36 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-default_realm = <%= realm %>
-
-[kdcdefaults]
-    v4_mode = nopreauth
-    kdc_ports = 0
-    kdc_tcp_ports = 88 
-
-[realms]
-    <%= realm %> = {
-        acl_file = <%= kdc_etc_path %>/kadm5.acl
-        dict_file = /usr/share/dict/words
-        admin_keytab = <%= kdc_etc_path %>/kadm5.keytab
-        supported_enctypes = des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal des-cbc-crc:v4 des-cbc-crc:afs3
-        kdc_ports = <%= kdc_port %>
-        database_name = <%= kdc_etc_path %>/principal
-        key_stash_file = <%= kdc_etc_path %>/stash
-        max_life = 10h 0m 0s
-        max_renewable_life = 7d 0h 0m 0s
-        master_key_type = des3-hmac-sha1
-        default_principal_flags = +preauth
-    }
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/krb5.conf b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/krb5.conf
deleted file mode 100644
index 04ce978..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/templates/krb5.conf
+++ /dev/null
@@ -1,47 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-[libdefaults]
-    default_realm = <%= realm %>
-    dns_lookup_realm = false
-    dns_lookup_kdc = false
-    ticket_lifetime = 24h
-    forwardable = yes
-    udp_preference_limit = 1
-
-[realms]
-    <%= realm %> = {
-        kdc = <%= kdc_server %>:<%= kdc_port %>
-        admin_server = <%= kdc_server %>:749
-        default_domain = <%= domain %>
-    }
-
-[appdefaults] 
-    pam = {
-        debug = false 
-        ticket_lifetime = 36000 
-        renew_lifetime = 36000 
-        forwardable = true 
-        krb4_convert = false 
-    }
-
-[domain_realm]
-    .<%= domain %> = <%= realm %>
-     <%= domain %> = <%= realm %>
-
-[logging]
-    default = FILE:/var/log/krb5libs.log
-    kdc = FILE:/var/log/krb5kdc.log
-    admin_server = FILE:/var/log/kadmind.log
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/tests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/tests/init.pp
deleted file mode 100644
index fb8434b..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-kerberos/tests/init.pp
+++ /dev/null
@@ -1,31 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-$kerberos_domain = "krb.test.com"
-$kerberos_realm = "KRB.TEST.COM"
-$kerberos_kdc_server = "localhost"
-$kerberos_kdc_port = 88
-# the following turns a node into a fully functional KDC 
-include kerberos::kdc
-# the following opens up KDC principle datbase for remote
-# administration (it really should be optional, but it is
-# required for now in order to make kerberos::client::host_keytab
-# work)
-include kerberos::kdc::admin_server
-
-# the following turns a node into a Kerberos client hosts with.. 
-include kerberos::client
-# ...an optional host_keytab for as many services as you want:
-kerberos::client::host_keytab { ["host", "hdfs", "mapred"]: }
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-monitor-webserver/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-monitor-webserver/manifests/init.pp
deleted file mode 100644
index 1c30e75..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-monitor-webserver/manifests/init.pp
+++ /dev/null
@@ -1,85 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-monitor-webserver( 
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp::params 
-{
-
-  
-  if hdp_is_empty($hdp::params::services_names[httpd]) {
-      hdp_fail("There is no service name for service httpd")
-    }
-    else {
-      $service_name_by_os = $hdp::params::services_names[httpd]
-    }
-
-    if hdp_is_empty($service_name_by_os[$hdp::params::hdp_os_type]) {
-      
-      if hdp_is_empty($service_name_by_os['ALL']) {
-        hdp_fail("There is no service name for service httpd")
-      }
-      else {
-        $service_name = $service_name_by_os['ALL']
-      }
-    }
-    else {
-      $service_name = $service_name_by_os[$hdp::params::hdp_os_type]
-    }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured', 'restart']) {
-
-
-    if ($service_state == 'running') {
-      #TODO: refine by using notify/subscribe
-      hdp::exec { 'monitor webserver start':
-        command => "/etc/init.d/$service_name start",
-        unless => "/etc/init.d/$service_name status",
-        require => Hdp::Package['httpd']
-        
-      } 
-
-      hdp::package { 'httpd' :
-        size   => 64
-      }
-    } elsif ($service_state == 'stopped') {
-      # stop should never fail if process already stopped
-      hdp::exec { 'monitor webserver stop':
-        command => "/etc/init.d/$service_name stop"
-      }
-    } elsif ($service_state == 'restart') {
-      hdp::exec { 'monitor webserver restart':
-        command => "/etc/init.d/$service_name restart",
-        require => Hdp::Package['httpd']
-      }
-      hdp::package { 'httpd' :
-        size   => 64
-      }
-    } elsif ($service_state == 'installed_and_configured') {
-      hdp::package { 'httpd' :
-        size   => 64
-      }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-mysql/files/addMysqlUser.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-mysql/files/addMysqlUser.sh
deleted file mode 100644
index 710ce58..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-mysql/files/addMysqlUser.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-mysqldservice=$1
-mysqldbuser=$2
-mysqldbpasswd=$3
-mysqldbhost=$4
-
-service $mysqldservice start
-echo "Adding user $mysqldbuser@$mysqldbhost and $mysqldbuser@localhost"
-mysql -u root -e "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';"
-mysql -u root -e "CREATE USER '$mysqldbuser'@'localhost' IDENTIFIED BY '$mysqldbpasswd';"
-mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';"
-mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'localhost';"
-mysql -u root -e "flush privileges;"
-service $mysqldservice stop
\ No newline at end of file
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/init.pp
deleted file mode 100644
index 2af7c53..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/init.pp
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-mysql(){}
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/params.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/params.pp
deleted file mode 100644
index 9528934..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/params.pp
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-mysql::params() inherits hdp-hive::params
-{
-   $db_name = "$hdp-hive::params::hive_database_name"
-   $db_user = $hdp-hive::params::hive_metastore_user_name
-   $db_pw = $hdp-hive::params::hive_metastore_user_passwd
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/server.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/server.pp
deleted file mode 100644
index 016bdd0..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/server.pp
+++ /dev/null
@@ -1,130 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-mysql::server(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits  hdp-mysql::params
-{ 
-  if ($service_state in ['no_op','uninstalled']) {
-   } elsif ($service_state in ['running','stopped','installed_and_configured']) {
-   
-    $db_user = $hdp-mysql::params::db_user
-    $db_pw = $hdp-mysql::params::db_pw
-    $db_name = $hdp-mysql::params::db_name
-    $host = $hdp::params::hive_mysql_host 
-
-    anchor { 'hdp-mysql::server::begin':}
-
-    hdp::package { 'mysql' :
-      size   => 64,
-      require   => Anchor['hdp-mysql::server::begin']
-    }
-
-
-    if ($hdp::params::hdp_os_type == "suse") {
-      # On Suse, creating symlink from default mysqld pid file to expected /var/run location
-	  
-      hdp::directory_recursive_create {'/var/run/mysqld/':
-        require => Hdp::Package['mysql']
-      }
-	  
-      file { '/var/run/mysqld/mysqld.pid':
-        ensure => 'link',
-        target => '/var/lib/mysql/mysqld.pid',
-        require => Hdp::Directory_recursive_create['/var/run/mysqld/'],
-      }
-    }
-
-
-    if hdp_is_empty($hdp::params::services_names[mysql]) {
-      hdp_fail("There is no service name for service mysql")
-    }
-    else {
-      $service_name_by_os = $hdp::params::services_names[mysql]
-    }
-
-    if hdp_is_empty($service_name_by_os[$hdp::params::hdp_os_type]) {
-      
-      if hdp_is_empty($service_name_by_os['ALL']) {
-        hdp_fail("There is no service name for service mysql")
-      }
-      else {
-        $service_name = $service_name_by_os['ALL']
-      }
-    }
-    else {
-      $service_name = $service_name_by_os[$hdp::params::hdp_os_type]
-    }
-
-    $mysqld_state = $service_state ? {
-     'running' => 'running',
-      default =>  'stopped',
-    }
-
-    if ($hdp::params::hdp_os_type == "suse") {
-      service {$service_name:
-        ensure => $mysqld_state,
-        require => File['/var/run/mysqld/mysqld.pid'],
-        notify  => File['/tmp/addMysqlUser.sh']
-      }
-    } else {
-        service {$service_name:
-        ensure => $mysqld_state,
-        require => Hdp::Package['mysql'],
-        notify  => File['/tmp/addMysqlUser.sh']
-      }
-    }
-
-
-    if ($service_state == 'installed_and_configured') {
-
-      file {'/tmp/addMysqlUser.sh':
-        ensure => present,
-        source => "puppet:///modules/hdp-mysql/addMysqlUser.sh",
-        mode => '0755',
-        require => Service[$service_name],
-        notify => Exec['/tmp/addMysqlUser.sh'],
-      }
-      # We start the DB and add a user
-      exec { '/tmp/addMysqlUser.sh':
-        command   => "sh /tmp/addMysqlUser.sh ${service_name} ${db_user} ${db_pw} ${host}",
-        tries     => 3,
-        try_sleep => 5,
-        require   => File['/tmp/addMysqlUser.sh'],
-        path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-        notify   => Anchor['hdp-mysql::server::end'],
-        logoutput => "true",
-      }
-    } else {
-      # Now MySQL is running so we remove the temporary file
-      file {'/tmp/addMysqlUser.sh':
-        ensure => absent,
-        require => Service[$service_name],
-        notify => Anchor['hdp-mysql::server::end'],
-      }
-    }
-
-    anchor { 'hdp-mysql::server::end':}
-
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_aggregate.php b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_aggregate.php
deleted file mode 100644
index f4063fb..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_aggregate.php
+++ /dev/null
@@ -1,243 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-  $options = getopt ("f:s:n:w:c:t:");
-  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-  $status_file=$options['f'];
-  $status_code=$options['s'];
-  $type=$options['t'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  if ($type == "service" && !array_key_exists('n', $options)) {
-    echo "Service description not provided -n option\n";
-    exit(3);
-  }
-  if ($type == "service") {
-    $service_name=$options['n'];
-    /* echo "DESC: " . $service_name . "\n"; */
-  }
-
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  $counts;
-  if ($type == "service") {
-    $counts=query_alert_count($status_file_content, $service_name, $status_code);
-  } else {
-    $counts=query_host_count($status_file_content, $status_code);
-  }
-
-  if ($counts['total'] == 0) {
-    $percent = 0;
-  } else {
-    $percent = ($counts['actual']/$counts['total'])*100;
-  }
-  if ($percent >= $crit) {
-    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (1);
-  }
-  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-  exit(0);
-
-
-  # Functions
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content, $status_code) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $total_hosts = 0;
-    $hosts = 0;
-    foreach ($matches[0] as $object) {
-      $total_hosts++;
-      if (getParameter($object, "current_state") == $status_code) {
-        $hosts++;
-      }
-    }
-    $hostcounts_object['total'] = $total_hosts;
-    $hostcounts_object['actual'] = $hosts;
-    return $hostcounts_object;
-  }
-
-  /* Query Alert counts */
-  function query_alert_count ($status_file_content, $service_name, $status_code) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $alertcounts_objects = array ();
-    $total_alerts=0;
-    $alerts=0;
-    foreach ($matches[0] as $object) {
-      if (getParameter($object, "service_description") == $service_name) {
-        $total_alerts++;
-        if (getParameter($object, "current_state") >= $status_code) {
-          $alerts++;
-        }
-      }
-    }
-    $alertcounts_objects['total'] = $total_alerts;
-    $alertcounts_objects['actual'] = $alerts;
-    return $alertcounts_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-      case "NAMENODE":
-        $pieces[0] = "HDFS";
-        break;
-      case "JOBTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break;
-      case "HBASEMASTER":
-        $pieces[0] = "HBASE";
-        break;
-      case "SYSTEM":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-        break;
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-
-/* JSON documment format */
-/*
-{
-  "programstatus":{
-    "last_command_check":"1327385743"
-  },
-  "hostcounts":{
-    "up_nodes":"",
-    "down_nodes":""
-  },
-  "hoststatus":[
-    {
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_state":"0",
-      "last_hard_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_check":"1327385564",
-      "current_attempt":"1",
-      "last_hard_state_change":"1327362079",
-      "last_time_up":"1327385574",
-      "last_time_down":"0",
-      "last_time_unreachable":"0",
-      "is_flapping":"0",
-      "last_check":"1327385574",
-      "servicestatus":[
-      ]
-    }
-  ],
-  "servicestatus":[
-    {
-      "service_type":"HDFS",  {HBASE, MAPREDUCE, HIVE, ZOOKEEPER}
-      "service_description":"HDFS Current Load",
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_attempt":"1",
-      "current_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_hard_state_change":"1327362079",
-      "last_time_ok":"1327385479",
-      "last_time_warning":"0",
-      "last_time_unknown":"0",
-      "last_time_critical":"0",
-      "last_check":"1327385574",
-      "is_flapping":"0"
-    }
-  ]
-}
-*/
-
-?>
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_cpu.pl b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_cpu.pl
deleted file mode 100644
index a5680f7..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_cpu.pl
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/perl -w 
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-use strict;
-use Net::SNMP;
-use Getopt::Long;
-
-# Variable
-my $base_proc = "1.3.6.1.2.1.25.3.3.1";   
-my $proc_load = "1.3.6.1.2.1.25.3.3.1.2"; 
-my $o_host = 	undef;
-my $o_community = undef;
-my $o_warn=	undef;
-my $o_crit=	undef;
-my $o_timeout = 15;
-my $o_port = 161;
-
-sub Usage {
-    print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
-}
-
-Getopt::Long::Configure ("bundling");
-GetOptions(
-  'H:s'   => \$o_host,	
-  'C:s'   => \$o_community,	
-  'c:s'   => \$o_crit,        
-  'w:s'   => \$o_warn
-          );
-if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
-  Usage();
-  exit 3;
-}
-$o_warn =~ s/\%//g; 
-$o_crit =~ s/\%//g;
-alarm ($o_timeout);
-$SIG{'ALRM'} = sub {
- print "Unable to contact host: $o_host\n";
- exit 3;
-};
-
-# Connect to host
-my ($session,$error);
-($session, $error) = Net::SNMP->session(
-		-hostname  => $o_host,
-		-community => $o_community,
-		-port      => $o_port,
-		-timeout   => $o_timeout
-	  );
-if (!defined($session)) {
-   printf("Error opening session: %s.\n", $error);
-   exit 3;
-}
-
-my $exit_val=undef;
-my $resultat =  (Net::SNMP->VERSION < 4) ?
-	  $session->get_table($base_proc)
-	: $session->get_table(Baseoid => $base_proc);
-
-if (!defined($resultat)) {
-   printf("ERROR: Description table : %s.\n", $session->error);
-   $session->close;
-   exit 3;
-}
-
-$session->close;
-
-my ($cpu_used,$ncpu)=(0,0);
-foreach my $key ( keys %$resultat) {
-  if ($key =~ /$proc_load/) {
-    $cpu_used += $$resultat{$key};
-    $ncpu++;
-  }
-}
-
-if ($ncpu==0) {
-  print "Can't find CPU usage information : UNKNOWN\n";
-  exit 3;
-}
-
-$cpu_used /= $ncpu;
-
-print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
-printf(" %.1f%%",$cpu_used);
-$exit_val=0;
-
-if ($cpu_used > $o_crit) {
- print " > $o_crit% : CRITICAL\n";
- $exit_val=2;
-} else {
-  if ($cpu_used > $o_warn) {
-   print " > $o_warn% : WARNING\n";
-   $exit_val=1;
-  }
-}
-print " < $o_warn% : OK\n" if ($exit_val eq 0);
-exit $exit_val;
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_datanode_storage.php b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_datanode_storage.php
deleted file mode 100644
index 079d904..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_datanode_storage.php
+++ /dev/null
@@ -1,63 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the storage capacity remaining on local datanode storage
- */
-
-  $options = getopt ("h:p:w:c:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-
-  /* Get the json document */
-  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-DS-*");
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
-  $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
-  $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
-
-  $out_msg = "Capacity:[" . $cap_total . 
-             "], Remaining Capacity:[" . $cap_remain . 
-             "], percent_full:[" . $percent_full  . "]";
-  
-  if ($percent_full > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent_full > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%>\n";
-  }
-?>
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hdfs_blocks.php b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hdfs_blocks.php
deleted file mode 100644
index bc34fe8..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hdfs_blocks.php
+++ /dev/null
@@ -1,72 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the corrupt or missing blocks % is > threshod
- * check_jmx -H hostaddress -p port -w 1% -c 1%
- */
-
-  $options = getopt ("h:p:w:c:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-
-  /* Get the json document */
-  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemMetrics");
-  $json_array = json_decode($json_string, true);
-  $m_percent = 0;
-  $c_percent = 0;
-  $object = $json_array['beans'][0];
-  $missing_blocks = $object['MissingBlocks'];
-  $corrupt_blocks = $object['CorruptBlocks'];
-  $total_blocks = $object['BlocksTotal'];
-  if($total_blocks == 0) {
-    $m_percent = 0;
-    $c_percent = 0;
-  } else {
-    $m_percent = ($missing_blocks/$total_blocks)*100;
-    $c_percent = ($corrupt_blocks/$total_blocks)*100;
-  }
-  $out_msg = "corrupt_blocks:<" . $corrupt_blocks .
-             ">, missing_blocks:<" . $missing_blocks .
-             ">, total_blocks:<" . $total_blocks . ">";
-
-  if ($m_percent > $crit || $c_percent > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($m_percent > $warn || $c_percent > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%>\n";
-  }
-?>
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hdfs_capacity.php b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hdfs_capacity.php
deleted file mode 100644
index 31ed463..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hdfs_capacity.php
+++ /dev/null
@@ -1,68 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the % HDFS capacity used >= warn and critical limits.
- * check_jmx -H hostaddress -p port -w 1 -c 1
- */
-
-  $options = getopt ("h:p:w:c:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-
-  /* Get the json document */
-  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState");
-  $json_array = json_decode($json_string, true);
-  $percent = 0;
-  $object = $json_array['beans'][0];
-  $CapacityUsed = $object['CapacityUsed'];
-  $CapacityRemaining = $object['CapacityRemaining'];
-  $CapacityTotal = $CapacityUsed + $CapacityRemaining;
-  if($CapacityTotal == 0) {
-    $percent = 0;
-  } else {
-    $percent = ($CapacityUsed/$CapacityTotal)*100;
-  }
-  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) .
-             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
-
-  if ($percent >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%>\n";
-  }
-?>
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hive_metastore_status.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hive_metastore_status.sh
deleted file mode 100644
index 0762bcc..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_hive_metastore_status.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#The uri is of the form thrift://<hostname>:<port>
-HOST=$1
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  out1=`/usr/kerberos/bin/kinit -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
-export JAVA_HOME=$JAVA_HOME
-out=`hcat $HCAT_URL -e "show databases" 2>&1`
-if [[ "$?" -ne 0 ]]; then
-  echo "CRITICAL: Error accessing hive-metaserver status [$out]";
-  exit 2;
-fi
-echo "OK: Hive metaserver status OK";
-exit 0;
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_name_dir_status.php b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_name_dir_status.php
deleted file mode 100644
index 3f38c98..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_name_dir_status.php
+++ /dev/null
@@ -1,59 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to namenode, get the jmx-json document
- * check the NameDirStatuses to find any offline (failed) directories
- * check_jmx -H hostaddress -p port
- */
-
-  $options = getopt ("h:p:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-
-  /* Get the json document */
-  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo");
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if ($object['NameDirStatuses'] == "") {
-    echo "WARNING: Namenode directory status not available via http://".$host.":".$port."/jmx url" . "\n";
-    exit(1);
-  }
-  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
-  $failed_dir_count = count($NameDirStatuses['failed']);
-  $out_msg = "CRITICAL: Offline Namenode directories: ";
-  if ($failed_dir_count > 0) {
-    foreach ($NameDirStatuses['failed'] as $key => $value) {
-      $out_msg = $out_msg . $key . ":" . $value . ", ";
-    }
-    echo $out_msg . "\n";
-    exit (2);
-  }
-  echo "OK: All Namenode directories are active" . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port\n";
-  }
-?>
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_oozie_status.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_oozie_status.sh
deleted file mode 100644
index 1c04f77..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_oozie_status.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# OOZIE_URL is of the form http://<hostname>:<port>/oozie
-HOST=$1
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  out1=`/usr/kerberos/bin/kinit -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-OOZIE_URL="http://$HOST:$PORT/oozie"
-export JAVA_HOME=$JAVA_HOME
-out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
-if [[ "$?" -ne 0 ]]; then 
-  echo "CRITICAL: Error accessing oozie server status [$out]";
-  exit 2;
-fi
-echo "OK: Oozie server status [$out]";
-exit 0;
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_rpcq_latency.php b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_rpcq_latency.php
deleted file mode 100644
index 9ed07cc..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_rpcq_latency.php
+++ /dev/null
@@ -1,67 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
- * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
- * Warning and Critical values are in seconds
- * Service Name = JobTracker, NameNode
- */
-
-  $options = getopt ("h:p:w:c:n:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $master=$options['n'];
-  $warn=$options['w'];
-  $crit=$options['c'];
-
-  /* Get the json document */
-  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*");
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-
-  $RpcQueueTime_avg_time = round($object['RpcQueueTime_avg_time'], 2); 
-  $RpcProcessingTime_avg_time = round($object['RpcProcessingTime_avg_time'], 2);
-
-  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
-             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
-             "> Secs";
-
-  if ($RpcQueueTime_avg_time >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($RpcQueueTime_avg_time >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode> -w <warn_in_sec> -c <crit_in_sec>\n";
-  }
-?>
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_templeton_status.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_templeton_status.sh
deleted file mode 100644
index c06753c..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_templeton_status.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# out='{"status":"ok","version":"v1"}<status_code:200>'
-HOST=$1
-PORT=$2
-VERSION=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then 
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  out1=`/usr/kerberos/bin/kinit -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-regex="^.*\"status\":\"ok\".*<status_code:200>$"
-out=`curl --negotiate -u : -s -w '<status_code:%{http_code}>' http://$HOST:$PORT/templeton/$VERSION/status 2>&1`
-if [[ $out =~ $regex ]]; then 
-  echo "OK: Templeton server status [$out]";
-  exit 0;
-fi
-echo "CRITICAL: Error accessing Templeton server, status [$out]";
-exit 2;
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_webui.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_webui.sh
deleted file mode 100644
index 59ca473..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/files/check_webui.sh
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-checkurl () {
-  url=$1
-  curl $url -o /dev/null
-  echo $?
-}
-
-service=$1
-host=$2
-
-if [[ -z "$service" || -z "$host" ]]; then
-  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
-  exit 3;
-fi
-
-case "$service" in
-
-jobtracker) 
-    jtweburl="http://$host:50030"
-    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
-      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
-      exit 1;
-    fi
-    ;;
-namenode)
-    nnweburl="http://$host:50070"
-    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
-      echo "WARNING: NameNode web UI not accessible : $nnweburl";
-      exit 1;
-    fi
-    ;;
-jobhistory)
-    jhweburl="http://$host:51111/jobhistoryhome.jsp"
-    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
-      echo "WARNING: Jobhistory web UI not accessible : $jhweburl";
-      exit 1;
-    fi
-    ;;
-hbase)
-    hbaseweburl="http://$host:60010/master-status"
-    jhweburl="http://domU-12-31-39-16-DC-FB.compute-1.internal:51111/jobhistoryhome.jsp"
-    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
-      echo "WARNING: HBase Master web UI not accessible : $hbaseweburl"; 
-      exit 1;
-    fi
-    ;;
-*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode]"
-   exit 3
-   ;;
-esac
-
-echo "OK: Successfully accessed $service Web UI"
-exit 0;
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_all_hosts.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_all_hosts.rb
deleted file mode 100644
index 658c2ae..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_all_hosts.rb
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_nagios_all_hosts, :type => :rvalue) do 
-    hg_defs = function_hdp_template_var("hostgroup_defs")
-    ret = Array.new
-    if hg_defs.kind_of?(Hash)
-      hg_defs.each_value do |info|
-        h = function_hdp_host(info['host_member_info'])
-        unless function_hdp_is_empty(h)
-          ret += [h].flatten 
-        end
-      end
-    end
-    ret.uniq
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_compute_target_hosts.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_compute_target_hosts.rb
deleted file mode 100644
index 3a81d62..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_compute_target_hosts.rb
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_nagios_compute_target_hosts, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    monitored_hosts = args[0]
-    component_name_mapping = args[1]
-    ret = Hash.new
-    monitored_hosts.each do |host_info|
-      hostname = host_info.keys.first
-      cmps = host_info.values.first
-      cmps.each do |cmp|
-        next unless host_var_info = component_name_mapping[cmp]
-        host_var = host_var_info['host_var']
-	if host_var_info['type'] == 'array'
-          (ret[host_var] ||= Array.new) << hostname
-	elsif host_var_info['type'] == 'scalar'
-	  ret[host_var] = hostname
-        end
-      end
-    end	
-    ret
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_members_exist.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_members_exist.rb
deleted file mode 100644
index 58fd0c2..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_members_exist.rb
+++ /dev/null
@@ -1,34 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_nagios_members_exist, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    host_type = args[0]
-    hg_defs = function_hdp_template_var("hostgroup_defs")
-    if  hg_defs.kind_of?(Hash)
-      #TODO: see if needed    Puppet::Parser::Functions.autoloader.loadall
-      member_info = (hg_defs[host_type]||{})['host_member_info']
-      member_info and not function_hdp_is_empty(function_hdp_host(member_info))
-    else
-      nil
-    end
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_target_hosts.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_target_hosts.rb
deleted file mode 100644
index 5d777af..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/lib/puppet/parser/functions/hdp_nagios_target_hosts.rb
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_nagios_target_hosts, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    host_types = args[0]
-#TODO: see if needed       Puppet::Parser::Functions.autoloader.loadall
-    host_types.map{|t|function_hdp_host(t)}.map{|h|function_hdp_is_empty(h) ? [] : [h].flatten}.flatten
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/init.pp
deleted file mode 100644
index 313ea90..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/init.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-nagios(){}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/nagios/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/nagios/service_check.pp
deleted file mode 100644
index c3b57c3..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/nagios/service_check.pp
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-nagios::nagios::service_check() 
-{
-  
-  anchor { 'hdp-nagios::nagios::service_check::begin':}
-
-  exec { 'nagios':
-    command   => "/etc/init.d/nagios status | grep 'is running'",
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    before      => Anchor['hdp-nagios::nagios::service_check::end'],
-    logoutput => "true"
-  }
-
-  anchor{ 'hdp-nagios::nagios::service_check::end':}
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/params.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/params.pp
deleted file mode 100644
index d628897..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/params.pp
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-nagios::params() inherits hdp::params
-{   
- 
-  $nagios_user = "nagios"
-  $nagios_group = hdp_default("smoke_user_group","nagios")
-  
-  $conf_dir = hdp_default("nagios_conf_dir","/etc/nagios")
-
-  $plugins_dir = "/usr/lib64/nagios/plugins"
-  $eventhandlers_dir = "/usr/lib64/nagios/eventhandlers"  # Does not exist yet
-  $nagios_pid_dir = "/var/run/nagios"
-
-  $nagios_obj_dir = hdp_default("nagios_obj_dir","/etc/nagios/objects")
-  $nagios_var_dir = hdp_default("nagios_var_dir","/var/nagios")
-  $nagios_rw_dir = hdp_default("nagios_var_dir","/var/nagios/rw")
-  $nagios_host_cfg = hdp_default("nagios_host_cfg","${nagios_obj_dir}/hadoop-hosts.cfg")
-  $nagios_hostgroup_cfg = hdp_default("nagios_hostgroup_cfg","${nagios_obj_dir}/hadoop-hostgroups.cfg")
-  $nagios_servicegroup_cfg = hdp_default("nagios_servicegroup_cfg","${nagios_obj_dir}/hadoop-servicegroups.cfg")
-  $nagios_service_cfg = hdp_default("nagios_service_cfg","${nagios_obj_dir}/hadoop-services.cfg")
-  $nagios_command_cfg = hdp_default("nagios_command_cfg","${nagios_obj_dir}/hadoop-commands.cfg")
-  $nagios_resource_cfg = hdp_default("nagios_resource_cfg","${conf_dir}/resource.cfg")
-
-  $nagios_web_login = hdp_default("nagios_web_login","nagiosadmin")
-  $nagios_web_password = hdp_default("nagios_web_password","admin")
-  
-  $dfs_data_dir = $hdp::params::dfs_data_dir
-
-  $check_result_path = hdp_default("nagios_check_result_path","/var/nagios/spool/checkresults")
-   
-  $nagios_contact = hdp_default("nagios/nagios-contacts/nagios_contact","monitor\\@monitor.com")
-
-  $hostgroup_defs = {
-    namenode => {host_member_info => 'namenode_host'},
-    snamenode => {host_member_info => 'snamenode_host'},
-    slaves => {host_member_info => 'slave_hosts'},
-    nagios-server => {host_member_info => 'nagios_server_host'},
-    jobtracker  => {host_member_info => 'jtnode_host'},
-    ganglia-server => {host_member_info => 'ganglia_server_host'},
-    zookeeper-servers => {host_member_info => 'zookeeper_hosts'},
-    hbasemaster => {host_member_info => 'hbase_master_host'},
-    hiveserver => {host_member_info => 'hive_server_host'},
-    region-servers => {host_member_info => 'hbase_rs_hosts'},
-    oozie-server => {host_member_info => 'oozie_server'},
-    webhcat-server => {host_member_info => 'webhcat_server_host'}
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server.pp
deleted file mode 100644
index 7f6a080..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server.pp
+++ /dev/null
@@ -1,230 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-nagios::server(
-  $service_state = $hdp::params::cluster_service_state
-) inherits hdp-nagios::params
-{
-  $nagios_var_dir = $hdp-nagios::params::nagios_var_dir
-  $nagios_rw_dir = $hdp-nagios::params::nagios_rw_dir
-  $nagios_config_dir = $hdp-nagios::params::conf_dir
-  $plugins_dir = $hdp-nagios::params::plugins_dir
-  $nagios_obj_dir = $hdp-nagios::params::nagios_obj_dir
-  $check_result_path = $hdp-nagios::params::check_result_path
-
-
-  if hdp_is_empty($hdp::params::pathes[nagios_p1_pl]) {
-    hdp_fail("There is no path to p1.pl file for nagios")
-  }
-  else {
-    $nagios_p1_pl_by_os = $hdp::params::pathes[nagios_p1_pl]
-  }
-
-  if hdp_is_empty($nagios_p1_pl_by_os[$hdp::params::hdp_os_type]) {
-    if hdp_is_empty($nagios_p1_pl_by_os['ALL']) {
-      hdp_fail("There is no path to p1.pl file for nagios")
-    }
-      else {
-        $nagios_p1_pl = $nagios_p1_pl_by_os['ALL']
-      }
-    }
-    else {
-      $nagios_p1_pl = $nagios_p1_pl_by_os[$hdp::params::hdp_os_type]
-    }
-
-
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['uninstalled']) {
-    class { 'hdp-nagios::server::packages' : 
-      service_state => uninstalled
-    }
-
-    hdp::exec { "rm -f /var/nagios/rw/nagios.cmd" :
-      command => "rm -f /var/nagios/rw/nagios.cmd",
-      unless => "test ! -e  /var/nagios/rw/nagios.cmd"
-    }
-
-    hdp::exec { "rm -rf /tmp/hadoop-nagios" :
-      command => "rm -rf /tmp/hadoop-nagios",
-      unless => "test ! -e  /tmp/hadoop-nagios"
-    }
-
-    hdp::directory { $nagios_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp::directory { $plugins_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp::directory { $nagios_obj_dir:
-      service_state => $service_state,
-      force => true
-    }
-	
-	hdp::directory_recursive_create { nagios_pid_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp::directory { $nagios_var_dir:
-      service_state => $service_state,
-      force => true
-    }
-    
-
-
-
-     Class['hdp-nagios::server::packages'] -> Exec['rm -f /var/nagios/rw/nagios.cmd'] -> Hdp::Directory[$nagios_config_dir] -> Hdp::Directory[$plugins_dir] -> Hdp::Directory[$nagios_obj_dir] ->  Hdp::Directory_recursive_create[$nagios_pid_dir] -> Hdp::Directory[$nagios_var_dir]
-
-  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
-    class { 'hdp-nagios::server::packages' : service_state => $service_state}
-
-    hdp::directory { $nagios_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp::directory { $plugins_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp::directory { $nagios_obj_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-	hdp::directory_recursive_create { $nagios_pid_dir:
-      service_state => $service_state,
-      owner => $nagios_user,
-      group => $nagios_group,
-      ensure => "directory",
-      mode => '0755',
-      force => true
-    }
-
-
-    hdp::directory_recursive_create { $nagios_var_dir:
-      service_state => $service_state,
-      force => true,
-      owner => $hdp-nagios::params::nagios_user,
-      group => $hdp-nagios::params::nagios_group
-    }
-
-    hdp::directory_recursive_create { $check_result_path:
-      service_state => $service_state,
-      force => true,
-      owner => $hdp-nagios::params::nagios_user,
-      group => $hdp-nagios::params::nagios_group
-    }
-
-    hdp::directory_recursive_create { $nagios_rw_dir:
-      service_state => $service_state,
-      force => true,
-      owner => $hdp-nagios::params::nagios_user,
-      group => $hdp-nagios::params::nagios_group
-    }
-	
-    if ($service_state == 'installed_and_configured') {
-      $webserver_state = 'restart'
-    } elsif ($service_state == 'running') {
-      $webserver_state = 'restart'
-    } else {
-      # We are never stopping httpd
-      #$webserver_state = $service_state
-    }
-
-    class { 'hdp-monitor-webserver': service_state => $webserver_state}
-
-
-    class { 'hdp-nagios::server::config': 
-      notify => Class['hdp-nagios::server::services']
-    }
-
-    class { 'hdp-nagios::server::enable_snmp': }
-
-    class { 'hdp-nagios::server::web_permisssions': }
-
-    class { 'hdp-nagios::server::services': ensure => $service_state}
-	
-	
-	Class['hdp-nagios::server::packages'] -> Class['hdp-nagios::server::enable_snmp']-> Hdp::Directory[$nagios_config_dir] -> Hdp::Directory[$plugins_dir] -> Hdp::Directory_recursive_create[$nagios_pid_dir] ->
-	Hdp::Directory[$nagios_obj_dir] -> Hdp::Directory_Recursive_Create[$nagios_var_dir] ->
-	Hdp::Directory_Recursive_Create[$check_result_path] -> Hdp::Directory_Recursive_Create[$nagios_rw_dir] ->
-	Class['hdp-nagios::server::config'] -> Class['hdp-nagios::server::web_permisssions'] -> Class['hdp-nagios::server::services'] -> Class['hdp-monitor-webserver']
-
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-class hdp-nagios::server::web_permisssions()
-{
-  $web_login = $hdp-nagios::params::nagios_web_login
-  $htpasswd_cmd_os = $hdp::params::cmds[htpasswd]#[$hdp::params::hdp_os_type]
-
-
-  if hdp_is_empty($hdp::params::cmds[htpasswd]) {
-    hdp_fail("There is no htpasswd command mapping")
-  }
-  else {
-    $htpasswd_cmd_by_os = $hdp::params::cmds[htpasswd]
-  }
-
-  if hdp_is_empty($htpasswd_cmd_by_os[$hdp::params::hdp_os_type]) {
-    if hdp_is_empty($htpasswd_cmd_by_os['ALL']) {
-      hdp_fail("There is no htpasswd command mapping")
-    }
-    else {
-      $htpasswd_cmd = $htpasswd_cmd_by_os['ALL']
-    }
-  }
-  else {
-    $htpasswd_cmd = $htpasswd_cmd_by_os[$hdp::params::hdp_os_type]
-  }
-
-  $cmd = "$htpasswd_cmd -c -b  /etc/nagios/htpasswd.users ${web_login} ${hdp-nagios::params::nagios_web_password}"
-  $test = "grep ${web_user} /etc/nagios/htpasswd.users"
-  hdp::exec { $cmd :
-    command => $cmd,
-    unless => $test
-  }
-}
-
-class hdp-nagios::server::services($ensure)
-{
-  if ($ensure in ['running','stopped']) {
-    service { 'nagios': ensure => $ensure}
-    anchor{'hdp-nagios::server::services::begin':} ->  Service['nagios'] ->  anchor{'hdp-nagios::server::services::end':}
-  }
-}
-
-class hdp-nagios::server::enable_snmp() {
-
-  exec { "enable_snmp":
-    command => "service snmpd start; chkconfig snmpd on",
-    path    => "/usr/local/bin/:/bin/:/sbin/",
-  }
-
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/config.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/config.pp
deleted file mode 100644
index f765e02..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/config.pp
+++ /dev/null
@@ -1,76 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-nagios::server::config()
-{
-
-  $host_cfg = $hdp-nagios::params::nagios_host_cfg
-  $nagios_lookup_daemon_str = $hdp::params::nagios_lookup_daemon_strs[$hdp::params::hdp_os_type]
-  
-  hdp-nagios::server::configfile { 'nagios.cfg': conf_dir => $hdp-nagios::params::conf_dir }
-  hdp-nagios::server::configfile { 'resource.cfg': conf_dir => $hdp-nagios::params::conf_dir }
-  hdp-nagios::server::configfile { 'hadoop-hosts.cfg': }
-  hdp-nagios::server::configfile { 'hadoop-hostgroups.cfg': }
-  hdp-nagios::server::configfile { 'hadoop-servicegroups.cfg': }
-  hdp-nagios::server::configfile { 'hadoop-services.cfg': }
-  hdp-nagios::server::configfile { 'hadoop-commands.cfg': }
-  hdp-nagios::server::configfile { 'contacts.cfg': }
-
-  hdp-nagios::server::check { 'check_cpu.pl': }
-  hdp-nagios::server::check { 'check_datanode_storage.php': }
-  hdp-nagios::server::check { 'check_aggregate.php': }
-  hdp-nagios::server::check { 'check_hdfs_blocks.php': }
-  hdp-nagios::server::check { 'check_hdfs_capacity.php': }
-  hdp-nagios::server::check { 'check_rpcq_latency.php': }
-  hdp-nagios::server::check { 'check_webui.sh': }
-  hdp-nagios::server::check { 'check_name_dir_status.php': }
-  hdp-nagios::server::check { 'check_oozie_status.sh': }
-  hdp-nagios::server::check { 'check_templeton_status.sh': }
-  hdp-nagios::server::check { 'check_hive_metastore_status.sh': }
-
-  anchor{'hdp-nagios::server::config::begin':} -> Hdp-nagios::Server::Configfile<||> -> anchor{'hdp-nagios::server::config::end':}
-  Anchor['hdp-nagios::server::config::begin'] -> Hdp-nagios::Server::Check<||> -> Anchor['hdp-nagios::server::config::end']
-}
-
-
-###config file helper
-define hdp-nagios::server::configfile(
-  $owner = $hdp-nagios::params::nagios_user,
-  $conf_dir = $hdp-nagios::params::nagios_obj_dir,
-  $mode = undef
-) 
-{
-  
-  hdp::configfile { "${conf_dir}/${name}":
-    component      => 'nagios',
-    owner          => $owner,
-    mode           => $mode
-  }
-
-  
-}
-
-define hdp-nagios::server::check()
-{
-  file { "${hdp-nagios::params::plugins_dir}/${name}":
-    source => "puppet:///modules/hdp-nagios/${name}", 
-    mode => '0755'
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/packages.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/packages.pp
deleted file mode 100644
index 5e42a0c..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/packages.pp
+++ /dev/null
@@ -1,105 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-nagios::server::packages(
-  $service_state = $hdp::params::cluster_service_state
-)
-{
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['uninstalled']) {
-     hdp-nagios::server::package { ['nagios-server','nagios-fping','nagios-plugins','nagios-addons']:
-      ensure => 'uninstalled'
-    }
-  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
-
-  hdp::package { 'perl':
-    ensure      => present,
-    java_needed => false
-  }
-
-  hdp::package { 'perl-Net-SNMP':
-    ensure      => present,
-    java_needed => false
-  }
-
-  hdp::package { 'nagios-server':
-    ensure      => present,
-    java_needed => false
-  }
-  
-  hdp::package { 'nagios-fping': 
-    ensure      => present,
-    java_needed => false
-  }
-  
-  hdp::package { 'nagios-addons': 
-    ensure      => present,
-    java_needed => false
-  }
-	
-  hdp::package { 'nagios-plugins': 
-    ensure      => present,
-    java_needed => false
-  }
-  
-  hdp::package { 'nagios-php-pecl-json': 
-    ensure      => present,
-    java_needed => false
-  }
-  
-  
-debug("## state: $service_state")
-  if ($service_state == 'installed_and_configured') {
-
-    hdp::package::remove_pkg { 'hdp_mon_nagios_addons':
-      package_type => 'hdp_mon_nagios_addons'
-    }
-
-    hdp::package::remove_pkg { 'nagios-plugins':
-      package_type => 'nagios-plugins'
-    }
-
-    hdp::package::remove_pkg { 'nagios':
-      package_type => 'nagios'
-    }
-
-    debug("##Adding removing dep")
-    # Removing conflicting packages. Names of packages being removed are hardcoded and not resolved via hdp::params
-    Hdp::Package::Remove_pkg['hdp_mon_nagios_addons'] -> Hdp::Package::Remove_pkg['nagios-plugins'] -> Hdp::Package::Remove_pkg['nagios'] -> Hdp::Package['nagios-plugins']
-  }
-
-  Hdp::Package['nagios-plugins'] -> Hdp::Package['nagios-server'] -> Hdp::Package['nagios-fping'] -> Hdp::Package['nagios-addons'] -> Hdp::Package['nagios-php-pecl-json']
-
-    
-
-} 
-
-}
-
-
-define hdp-nagios::server::package(
-  $ensure = present
-)
-{
-  hdp::package { $name: 
-    ensure      => $ensure,
-    java_needed => false
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/target.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/target.pp
deleted file mode 100644
index 4de784f..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/target.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-nagios::target(){}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/contacts.cfg.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/contacts.cfg.erb
deleted file mode 100644
index e967457..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/contacts.cfg.erb
+++ /dev/null
@@ -1,76 +0,0 @@
-###############################################################################
-# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
-#
-# Last Modified: 05-31-2007
-#
-# NOTES: This config file provides you with some example contact and contact
-#        group definitions that you can reference in host and service
-#        definitions.
-#       
-#        You don't need to keep these definitions in a separate file from your
-#        other object definitions.  This has been done just to make things
-#        easier to understand.
-#
-###############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-
-###############################################################################
-###############################################################################
-#
-# CONTACTS
-#
-###############################################################################
-###############################################################################
-
-# Just one contact defined by default - the Nagios admin (that's you)
-# This contact definition inherits a lot of default values from the 'generic-contact' 
-# template which is defined elsewhere.
-
-define contact{
-        contact_name                    <%=scope.function_hdp_template_var("nagios_web_login")%>		; Short name of user
-	use				generic-contact		; Inherit default values from generic-contact template (defined above)
-        alias                           Nagios Admin		; Full name of user
-
-        email                           <%=scope.function_hdp_template_var("nagios_contact")%>	; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
-        }
-
-
-
-###############################################################################
-###############################################################################
-#
-# CONTACT GROUPS
-#
-###############################################################################
-###############################################################################
-
-# We only have one contact in this simple configuration file, so there is
-# no need to create more than one contact group.
-
-define contactgroup {
-        contactgroup_name       admins
-        alias                   Nagios Administrators
-        members                 <%=scope.function_hdp_template_var("nagios_web_login")%>
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb
deleted file mode 100644
index aedbba6..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb
+++ /dev/null
@@ -1,85 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-<% if scope.function_hdp_template_var("hdp_os_type") != "suse"%>
-# 'check_cpu' check remote cpu load
-define command {
-        command_name    check_cpu
-        command_line    $USER1$/check_cpu.pl -H $HOSTADDRESS$ -C hadoop -w $ARG1$ -c $ARG2$
-       }
-<% end %>
-
-# Check data node storage full 
-define command {
-        command_name    check_datanode_storage
-        command_line    php $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$
-       }
-
-define command{
-        command_name    check_hdfs_blocks
-        command_line    php $USER1$/check_hdfs_blocks.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$
-       }
-
-define command{
-        command_name    check_hdfs_capacity
-        command_line    php $USER1$/check_hdfs_capacity.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$
-       }
-
-define command{
-        command_name    check_aggregate
-        command_line    php $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
-       }
-
-define command{
-        command_name    check_rpcq_latency
-        command_line    php $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$
-       }
-
-define command{
-        command_name    check_nagios
-        command_line    $USER1$/check_nagios -e $ARG1$ -F $ARG2$ -C $ARG3$ 
-       }
-
-define command{
-        command_name    check_webui
-        command_line    $USER1$/check_webui.sh $ARG1$ $HOSTADDRESS$
-       }
-
-define command{
-        command_name    check_name_dir_status
-        command_line    php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$
-       }
-
-define command{
-        command_name    check_oozie_status
-        command_line    $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$
-       }
-
-define command{
-        command_name    check_templeton_status
-        command_line    $USER1$/check_templeton_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$
-       }
-
-define command{
-        command_name    check_hive_metastore_status
-        command_line    $USER1$/check_hive_metastore_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$
-       }
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-hostgroups.cfg.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-hostgroups.cfg.erb
deleted file mode 100644
index 9bac137..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-hostgroups.cfg.erb
+++ /dev/null
@@ -1,20 +0,0 @@
-<% all_hosts = Array.new -%>
-<%scope.function_hdp_template_var("hostgroup_defs").each do |name,info|-%>
-<%members = scope.function_hdp_host(info['host_member_info'])-%>
-<%unless scope.function_hdp_is_empty(members) -%>
-<% all_hosts += [members].flatten-%>
-define hostgroup {
-        hostgroup_name  <%=name%>
-        alias           <%=name%>
-        members         <%=[members].flatten.join(',')%>
-}
-
-<%end-%>
-<%end%>
-<%unless all_hosts.empty?-%>
-define hostgroup {
-        hostgroup_name  all-servers
-        alias           All Servers
-        members         <%=all_hosts.uniq.join(',')%>
-}
-<%end%>
\ No newline at end of file
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-hosts.cfg.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-hosts.cfg.erb
deleted file mode 100644
index 4e97548..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-hosts.cfg.erb
+++ /dev/null
@@ -1,16 +0,0 @@
-<%scope.function_hdp_nagios_all_hosts().each do |host|-%>
-define host {
-        alias        <%=host%>
-        host_name    <%=host%>
-        use          linux-server
-        address      <%=host%>
-        check_interval         0.25
-        retry_interval         0.25
-        max_check_attempts     4
-        notifications_enabled     1
-        first_notification_delay  0     # Send notification soon after change in the hard state
-        notification_interval     0     # Send the notification once
-        notification_options      d,u,r
-}
-
-<%end%>
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-servicegroups.cfg.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-servicegroups.cfg.erb
deleted file mode 100644
index d31a0c6..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-servicegroups.cfg.erb
+++ /dev/null
@@ -1,36 +0,0 @@
-define servicegroup {
-  servicegroup_name  HDFS
-  alias  HDFS Checks
-}
-define servicegroup {
-  servicegroup_name  MAPREDUCE
-  alias  MAPREDUCE Checks
-}
-define servicegroup {
-  servicegroup_name  HBASE
-  alias  HBASE Checks
-}
-define servicegroup {
-  servicegroup_name  OOZIE
-  alias  OOZIE Checks
-}
-define servicegroup {
-  servicegroup_name  WEBHCAT
-  alias  WEBHCAT Checks
-}
-define servicegroup {
-  servicegroup_name  NAGIOS
-  alias  NAGIOS Checks
-}
-define servicegroup {
-  servicegroup_name  GANGLIA
-  alias  GANGLIA Checks
-}
-define servicegroup {
-  servicegroup_name  HIVE-METASTORE
-  alias  HIVE-METASTORE Checks
-}
-define servicegroup {
-  servicegroup_name  ZOOKEEPER
-  alias  ZOOKEEPER Checks
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb
deleted file mode 100644
index 5acd280..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb
+++ /dev/null
@@ -1,472 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# NAGIOS SERVER Check (status log update)
-<%if scope.function_hdp_nagios_members_exist('nagios-server')-%>
-define service {
-        name                            hadoop-service
-        use                             generic-service
-        notification_options            w,u,c
-        first_notification_delay        0
-        notification_interval           0     # Send the notification once
-}
-
-define service {        
-        hostgroup_name          nagios-server        
-        use                     hadoop-service
-        service_description     NAGIOS::Nagios status log staleness
-        servicegroups           NAGIOS
-        check_command           check_nagios!10!/var/nagios/status.dat!<%=nagios_lookup_daemon_str%>
-        normal_check_interval   5
-        retry_check_interval    0.5
-        max_check_attempts      2
-}
-
-# NAGIOS SERVER HDFS Checks
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes storage full
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode storage full"!10%!30%
-        normal_check_interval   2
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes down
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode process down"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-# NAGIOS SERVER MAPREDUCE Checks
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     MAPREDUCE::Percent TaskTrackers down
-        servicegroups           MAPREDUCE
-        check_command           check_aggregate!"TASKTRACKER::TaskTracker process down"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-# NAGIOS SERVER ZOOKEEPER Checks
-<%if scope.function_hdp_nagios_members_exist('zookeeper-servers')-%>
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     ZOOKEEPER::Percent ZooKeeper Servers down
-        servicegroups           ZOOKEEPER
-        check_command           check_aggregate!"ZOOKEEPER::ZooKeeper Server process down"!35%!70%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-<%end-%>
-
-# NAGIOS SERVER HBASE Checks
-<%if scope.function_hdp_nagios_members_exist('hbasemaster')-%>
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HBASE::Percent RegionServers down
-        servicegroups           HBASE
-        check_command           check_aggregate!"REGIONSERVER::RegionServer process down"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-<%end-%>
-<%end-%>
-
-# GANGLIA SERVER Checks
-<%if scope.function_hdp_nagios_members_exist('ganglia-server')-%>
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia [gmetad] process down
-        servicegroups           GANGLIA
-        check_command           check_tcp!8651!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Collector [gmond] process down alert for slaves
-        servicegroups           GANGLIA
-        check_command           check_tcp!8660!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Collector [gmond] process down alert for NameNode
-        servicegroups           GANGLIA
-        check_command           check_tcp!8661!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Collector [gmond] process down alert for JobTracker
-        servicegroups           GANGLIA
-        check_command           check_tcp!8662!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-<%if scope.function_hdp_nagios_members_exist('hbasemaster')-%>
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Collector [gmond] process down alert for HBase Master
-        servicegroups           GANGLIA
-        check_command           check_tcp!8663!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-<%end-%>
-<%end-%>
-
-<%if scope.function_hdp_nagios_members_exist('snamenode')-%>
-# Secondary namenode checks
-define service {
-        hostgroup_name          snamenode
-        use                     hadoop-service
-        service_description     NAMENODE::Secondary NameNode process down
-        servicegroups           HDFS
-        check_command           check_tcp!50090!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-<%end-%>
-<%if scope.function_hdp_nagios_members_exist('namenode')-%>
-# HDFS Checks
-define service {
-        hostgroup_name          namenode
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode Web UI down
-        servicegroups           HDFS
-        check_command           check_webui!namenode
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          namenode
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode edit logs directory status
-        servicegroups           HDFS
-        check_command           check_name_dir_status!50070
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-<% if scope.function_hdp_template_var("hdp_os_type") != "suse"%>
-define service {        
-        hostgroup_name          namenode        
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode host CPU utilization
-        servicegroups           HDFS
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-<% end %>
-
-define service {
-        hostgroup_name          namenode
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode process down
-        servicegroups           HDFS
-        check_command           check_tcp!8020!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          namenode
-        use                     hadoop-service
-        service_description     HDFS::Corrupt/Missing blocks
-        servicegroups           HDFS
-        check_command           check_hdfs_blocks!50070!0%!0%
-        normal_check_interval   2
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          namenode
-        use                     hadoop-service
-        service_description     HDFS::HDFS capacity utilization
-        servicegroups           HDFS
-        check_command           check_hdfs_capacity!50070!80%!90%
-        normal_check_interval   10
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          namenode
-        use                     hadoop-service
-        service_description     HDFS::NameNode RPC latency
-        servicegroups           HDFS
-        check_command           check_rpcq_latency!NameNode!50070!3000!5000
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-<%end-%>
-
-# MAPREDUCE Checks
-<%if scope.function_hdp_nagios_members_exist('jobtracker')-%>
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     JOBTRACKER::JobTracker Web UI down
-        servicegroups           MAPREDUCE
-        check_command           check_webui!jobtracker
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     JOBTRACKER::JobHistory Web UI down
-        servicegroups           MAPREDUCE
-        check_command           check_webui!jobhistory
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
- <% if scope.function_hdp_template_var("hdp_os_type") != "suse"%>
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     JOBTRACKER::JobTracker CPU utilization
-        servicegroups           MAPREDUCE
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-<% end %>
-
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     JOBTRACKER::JobTracker process down
-        servicegroups           MAPREDUCE
-        check_command           check_tcp!50030!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     MAPREDUCE::JobTracker RPC latency
-        servicegroups           MAPREDUCE
-        check_command           check_rpcq_latency!JobTracker!50030!3000!5000
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-# MAPREDUCE::TASKTRACKER Checks 
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     TASKTRACKER::TaskTracker process down
-        servicegroups           MAPREDUCE
-        check_command           check_tcp!50060!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-<%end-%>
-
-<%if scope.function_hdp_nagios_members_exist('slaves')-%>
-# HDFS::DATANODE Checks
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode process down
-        servicegroups           HDFS
-        check_command           check_tcp!<%=scope.function_hdp_template_var("dfs_datanode_address")%>!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode storage full
-        servicegroups           HDFS
-        check_command           check_datanode_storage!<%=scope.function_hdp_template_var("dfs_datanode_http_address")%>!90%!90%
-        normal_check_interval   5
-        retry_check_interval    1
-        max_check_attempts      2
-}
-
-<%end-%>
-
-<%if scope.function_hdp_nagios_members_exist('zookeeper-servers')-%>
-# ZOOKEEPER Checks
-define service {
-        hostgroup_name          zookeeper-servers
-        use                     hadoop-service
-        service_description     ZOOKEEPER::ZooKeeper Server process down
-        servicegroups           ZOOKEEPER
-        check_command           check_tcp!2181!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-<%end-%>
-
-<%if scope.function_hdp_nagios_members_exist('hbasemaster')-%>
-# HBASE::REGIONSERVER Checks
-define service {
-        hostgroup_name          region-servers
-        use                     hadoop-service
-        service_description     REGIONSERVER::RegionServer process down
-        servicegroups           HBASE
-        check_command           check_tcp!60020!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-# HBASE:: MASTER Checks
-define service {
-        hostgroup_name          hbasemaster
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master Web UI down
-        servicegroups           HBASE
-        check_command           check_webui!hbase
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-<% if scope.function_hdp_template_var("hdp_os_type") != "suse"%>
-define service {
-        hostgroup_name          hbasemaster
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master CPU utilization
-        servicegroups           HBASE
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-<% end %>
-define service {
-        hostgroup_name          hbasemaster
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master process down
-        servicegroups           HBASE
-        check_command           check_tcp!60000!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-<%end-%>
-
-<%if scope.function_hdp_nagios_members_exist('hiveserver')-%>
-# HIVE Metastore check
-define service {
-        hostgroup_name          hiveserver
-        use                     hadoop-service
-        service_description     HIVE-METASTORE::Hive Metastore status check
-        servicegroups           HIVE-METASTORE
-        <%if scope.function_hdp_template_var("security_enabled")-%>
-        check_command           check_hive_metastore_status!9083!<%=scope.function_hdp_template_var("java64_home")%>!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>
-        <%else-%>
-        check_command           check_hive_metastore_status!9083!<%=scope.function_hdp_template_var("java64_home")%>!false
-        <%end-%>
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-<%end-%>
-<%if scope.function_hdp_nagios_members_exist('oozie-server')-%>
-# Oozie check
-define service {
-        hostgroup_name          oozie-server
-        use                     hadoop-service
-        service_description     OOZIE::Oozie Server status check
-        servicegroups           OOZIE
-        <%if scope.function_hdp_template_var("security_enabled")-%>
-        check_command           check_oozie_status!11000!<%=scope.function_hdp_template_var("java64_home")%>!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>
-        <%else-%>
-        check_command           check_oozie_status!11000!<%=scope.function_hdp_template_var("java64_home")%>!false
-        <%end-%>
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-<%end-%>
-<%if scope.function_hdp_nagios_members_exist('webhcat-server')-%>
-# WEBHCAT check
-define service {
-        hostgroup_name          webhcat-server
-        use                     hadoop-service
-        service_description     WEBHCAT::WebHCat Server status check
-        servicegroups           WEBHCAT 
-        <%if scope.function_hdp_template_var("security_enabled")-%>
-        check_command           check_templeton_status!50111!v1!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>
-        <%else-%>
-        check_command           check_templeton_status!50111!v1!false
-        <%end-%>
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-<%end-%>
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.cfg.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.cfg.erb
deleted file mode 100644
index 38b7678..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/nagios.cfg.erb
+++ /dev/null
@@ -1,1349 +0,0 @@
-##############################################################################
-#
-# NAGIOS.CFG - Sample Main Config File for Nagios 3.2.3
-#
-# Read the documentation for more information on this configuration
-# file.  I've provided some comments here, but things may not be so
-# clear without further explanation.
-#
-# Last Modified: 12-14-2008
-#
-##############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-# LOG FILE
-# This is the main log file where service and host events are logged
-# for historical purposes.  This should be the first option specified 
-# in the config file!!!
-
-log_file=/var/log/nagios/nagios.log
-
-
-
-# OBJECT CONFIGURATION FILE(S)
-# These are the object configuration files in which you define hosts,
-# host groups, contacts, contact groups, services, etc.
-# You can split your object definitions across several config files
-# if you wish (as shown below), or keep them all in a single config file.
-
-# You can specify individual object config files as shown below:
-cfg_file=/etc/nagios/objects/commands.cfg
-cfg_file=/etc/nagios/objects/contacts.cfg
-cfg_file=/etc/nagios/objects/timeperiods.cfg
-cfg_file=/etc/nagios/objects/templates.cfg
-
-# Definitions for monitoring the local (Linux) host
-#cfg_file=/etc/nagios/objects/localhost.cfg
-
-# Definitions for monitoring a Windows machine
-#cfg_file=/etc/nagios/objects/windows.cfg
-
-# Definitions for monitoring a router/switch
-#cfg_file=/etc/nagios/objects/switch.cfg
-
-# Definitions for monitoring a network printer
-#cfg_file=/etc/nagios/objects/printer.cfg
-
-# Definitions for hadoop servers
-cfg_file=<%=scope.function_hdp_template_var("nagios_host_cfg")%>
-cfg_file=<%=scope.function_hdp_template_var("nagios_hostgroup_cfg")%>
-cfg_file=<%=scope.function_hdp_template_var("nagios_servicegroup_cfg")%>
-cfg_file=<%=scope.function_hdp_template_var("nagios_service_cfg")%>
-cfg_file=<%=scope.function_hdp_template_var("nagios_command_cfg")%>
-
-
-# You can also tell Nagios to process all config files (with a .cfg
-# extension) in a particular directory by using the cfg_dir
-# directive as shown below:
-
-#cfg_dir=/etc/nagios/servers
-#cfg_dir=/etc/nagios/printers
-#cfg_dir=/etc/nagios/switches
-#cfg_dir=/etc/nagios/routers
-
-
-
-
-# OBJECT CACHE FILE
-# This option determines where object definitions are cached when
-# Nagios starts/restarts.  The CGIs read object definitions from 
-# this cache file (rather than looking at the object config files
-# directly) in order to prevent inconsistencies that can occur
-# when the config files are modified after Nagios starts.
-
-object_cache_file=/var/nagios/objects.cache
-
-
-
-# PRE-CACHED OBJECT FILE
-# This options determines the location of the precached object file.
-# If you run Nagios with the -p command line option, it will preprocess
-# your object configuration file(s) and write the cached config to this
-# file.  You can then start Nagios with the -u option to have it read
-# object definitions from this precached file, rather than the standard
-# object configuration files (see the cfg_file and cfg_dir options above).
-# Using a precached object file can speed up the time needed to (re)start 
-# the Nagios process if you've got a large and/or complex configuration.
-# Read the documentation section on optimizing Nagios to find our more
-# about how this feature works.
-
-precached_object_file=/var/nagios/objects.precache
-
-
-
-# RESOURCE FILE
-# This is an optional resource file that contains $USERx$ macro
-# definitions. Multiple resource files can be specified by using
-# multiple resource_file definitions.  The CGIs will not attempt to
-# read the contents of resource files, so information that is
-# considered to be sensitive (usernames, passwords, etc) can be
-# defined as macros in this file and restrictive permissions (600)
-# can be placed on this file.
-
-resource_file=<%=scope.function_hdp_template_var("nagios_resource_cfg")%>
-
-
-
-# STATUS FILE
-# This is where the current status of all monitored services and
-# hosts is stored.  Its contents are read and processed by the CGIs.
-# The contents of the status file are deleted every time Nagios
-#  restarts.
-
-status_file=/var/nagios/status.dat
-
-
-
-# STATUS FILE UPDATE INTERVAL
-# This option determines the frequency (in seconds) that
-# Nagios will periodically dump program, host, and 
-# service status data.
-
-status_update_interval=10
-
-
-
-# NAGIOS USER
-# This determines the effective user that Nagios should run as.  
-# You can either supply a username or a UID.
-
-nagios_user=nagios
-
-
-
-# NAGIOS GROUP
-# This determines the effective group that Nagios should run as.  
-# You can either supply a group name or a GID.
-
-nagios_group=<%=scope.function_hdp_template_var("nagios_group")%>
-
-
-
-# EXTERNAL COMMAND OPTION
-# This option allows you to specify whether or not Nagios should check
-# for external commands (in the command file defined below).  By default
-# Nagios will *not* check for external commands, just to be on the
-# cautious side.  If you want to be able to use the CGI command interface
-# you will have to enable this.
-# Values: 0 = disable commands, 1 = enable commands
-
-check_external_commands=1
-
-
-
-# EXTERNAL COMMAND CHECK INTERVAL
-# This is the interval at which Nagios should check for external commands.
-# This value works of the interval_length you specify later.  If you leave
-# that at its default value of 60 (seconds), a value of 1 here will cause
-# Nagios to check for external commands every minute.  If you specify a
-# number followed by an "s" (i.e. 15s), this will be interpreted to mean
-# actual seconds rather than a multiple of the interval_length variable.
-# Note: In addition to reading the external command file at regularly 
-# scheduled intervals, Nagios will also check for external commands after
-# event handlers are executed.
-# NOTE: Setting this value to -1 causes Nagios to check the external
-# command file as often as possible.
-
-#command_check_interval=15s
-command_check_interval=-1
-
-
-
-# EXTERNAL COMMAND FILE
-# This is the file that Nagios checks for external command requests.
-# It is also where the command CGI will write commands that are submitted
-# by users, so it must be writeable by the user that the web server
-# is running as (usually 'nobody').  Permissions should be set at the 
-# directory level instead of on the file, as the file is deleted every
-# time its contents are processed.
-
-command_file=/var/nagios/rw/nagios.cmd
-
-
-
-# EXTERNAL COMMAND BUFFER SLOTS
-# This settings is used to tweak the number of items or "slots" that
-# the Nagios daemon should allocate to the buffer that holds incoming 
-# external commands before they are processed.  As external commands 
-# are processed by the daemon, they are removed from the buffer.  
-
-external_command_buffer_slots=4096
-
-
-
-# LOCK FILE
-# This is the lockfile that Nagios will use to store its PID number
-# in when it is running in daemon mode.
-
-lock_file=/var/run/nagios/nagios.pid
-
-
-
-# TEMP FILE
-# This is a temporary file that is used as scratch space when Nagios
-# updates the status log, cleans the comment file, etc.  This file
-# is created, used, and deleted throughout the time that Nagios is
-# running.
-
-temp_file=/var/nagios/nagios.tmp
-
-
-
-# TEMP PATH
-# This is path where Nagios can create temp files for service and
-# host check results, etc.
-
-temp_path=/tmp
-
-
-
-# EVENT BROKER OPTIONS
-# Controls what (if any) data gets sent to the event broker.
-# Values:  0      = Broker nothing
-#         -1      = Broker everything
-#         <other> = See documentation
-
-event_broker_options=-1
-
-
-
-# EVENT BROKER MODULE(S)
-# This directive is used to specify an event broker module that should
-# by loaded by Nagios at startup.  Use multiple directives if you want
-# to load more than one module.  Arguments that should be passed to
-# the module at startup are seperated from the module path by a space.
-#
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-#
-# Do NOT overwrite modules while they are being used by Nagios or Nagios
-# will crash in a fiery display of SEGFAULT glory.  This is a bug/limitation
-# either in dlopen(), the kernel, and/or the filesystem.  And maybe Nagios...
-#
-# The correct/safe way of updating a module is by using one of these methods:
-#    1. Shutdown Nagios, replace the module file, restart Nagios
-#    2. Delete the original module file, move the new module file into place, restart Nagios
-#
-# Example:
-#
-#   broker_module=<modulepath> [moduleargs]
-
-#broker_module=/somewhere/module1.o
-#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
-
-
-
-# LOG ROTATION METHOD
-# This is the log rotation method that Nagios should use to rotate
-# the main log file. Values are as follows..
-#	n	= None - don't rotate the log
-#	h	= Hourly rotation (top of the hour)
-#	d	= Daily rotation (midnight every day)
-#	w	= Weekly rotation (midnight on Saturday evening)
-#	m	= Monthly rotation (midnight last day of month)
-
-log_rotation_method=d
-
-
-
-# LOG ARCHIVE PATH
-# This is the directory where archived (rotated) log files should be 
-# placed (assuming you've chosen to do log rotation).
-
-log_archive_path=/var/log/nagios/archives
-
-
-
-# LOGGING OPTIONS
-# If you want messages logged to the syslog facility, as well as the
-# Nagios log file set this option to 1.  If not, set it to 0.
-
-use_syslog=1
-
-
-
-# NOTIFICATION LOGGING OPTION
-# If you don't want notifications to be logged, set this value to 0.
-# If notifications should be logged, set the value to 1.
-
-log_notifications=1
-
-
-
-# SERVICE RETRY LOGGING OPTION
-# If you don't want service check retries to be logged, set this value
-# to 0.  If retries should be logged, set the value to 1.
-
-log_service_retries=1
-
-
-
-# HOST RETRY LOGGING OPTION
-# If you don't want host check retries to be logged, set this value to
-# 0.  If retries should be logged, set the value to 1.
-
-log_host_retries=1
-
-
-
-# EVENT HANDLER LOGGING OPTION
-# If you don't want host and service event handlers to be logged, set
-# this value to 0.  If event handlers should be logged, set the value
-# to 1.
-
-log_event_handlers=1
-
-
-
-# INITIAL STATES LOGGING OPTION
-# If you want Nagios to log all initial host and service states to
-# the main log file (the first time the service or host is checked)
-# you can enable this option by setting this value to 1.  If you
-# are not using an external application that does long term state
-# statistics reporting, you do not need to enable this option.  In
-# this case, set the value to 0.
-
-log_initial_states=0
-
-
-
-# EXTERNAL COMMANDS LOGGING OPTION
-# If you don't want Nagios to log external commands, set this value
-# to 0.  If external commands should be logged, set this value to 1.
-# Note: This option does not include logging of passive service
-# checks - see the option below for controlling whether or not
-# passive checks are logged.
-
-log_external_commands=1
-
-
-
-# PASSIVE CHECKS LOGGING OPTION
-# If you don't want Nagios to log passive host and service checks, set
-# this value to 0.  If passive checks should be logged, set
-# this value to 1.
-
-log_passive_checks=1
-
-
-
-# GLOBAL HOST AND SERVICE EVENT HANDLERS
-# These options allow you to specify a host and service event handler
-# command that is to be run for every host or service state change.
-# The global event handler is executed immediately prior to the event
-# handler that you have optionally specified in each host or
-# service definition. The command argument is the short name of a
-# command definition that you define in your host configuration file.
-# Read the HTML docs for more information.
-
-#global_host_event_handler=somecommand
-#global_service_event_handler=somecommand
-
-
-
-# SERVICE INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" service checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all service checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!  This is not a
-# good thing for production, but is useful when testing the
-# parallelization functionality.
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-service_inter_check_delay_method=s
-
-
-
-# MAXIMUM SERVICE CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all services should
-# be completed.  Default is 30 minutes.
-
-max_service_check_spread=30
-
-
-
-# SERVICE CHECK INTERLEAVE FACTOR
-# This variable determines how service checks are interleaved.
-# Interleaving the service checks allows for a more even
-# distribution of service checks and reduced load on remote
-# hosts.  Setting this value to 1 is equivalent to how versions
-# of Nagios previous to 0.0.5 did service checks.  Set this
-# value to s (smart) for automatic calculation of the interleave
-# factor unless you have a specific reason to change it.
-#       s       = Use "smart" interleave factor calculation
-#       x       = Use an interleave factor of x, where x is a
-#                 number greater than or equal to 1.
-
-service_interleave_factor=s
-
-
-
-# HOST INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" host checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all host checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-host_inter_check_delay_method=s
-
-
-
-# MAXIMUM HOST CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all hosts should
-# be completed.  Default is 30 minutes.
-
-max_host_check_spread=30
-
-
-
-# MAXIMUM CONCURRENT SERVICE CHECKS
-# This option allows you to specify the maximum number of 
-# service checks that can be run in parallel at any given time.
-# Specifying a value of 1 for this variable essentially prevents
-# any service checks from being parallelized.  A value of 0
-# will not restrict the number of concurrent checks that are
-# being executed.
-
-max_concurrent_checks=0
-
-
-
-# HOST AND SERVICE CHECK REAPER FREQUENCY
-# This is the frequency (in seconds!) that Nagios will process
-# the results of host and service checks.
-
-check_result_reaper_frequency=10
-
-
-
-
-# MAX CHECK RESULT REAPER TIME
-# This is the max amount of time (in seconds) that  a single
-# check result reaper event will be allowed to run before 
-# returning control back to Nagios so it can perform other
-# duties.
-
-max_check_result_reaper_time=30
-
-
-
-
-# CHECK RESULT PATH
-# This is directory where Nagios stores the results of host and
-# service checks that have not yet been processed.
-#
-# Note: Make sure that only one instance of Nagios has access
-# to this directory!  
-
-check_result_path=/var/nagios/spool/checkresults
-
-
-
-
-# MAX CHECK RESULT FILE AGE
-# This option determines the maximum age (in seconds) which check
-# result files are considered to be valid.  Files older than this 
-# threshold will be mercilessly deleted without further processing.
-
-max_check_result_file_age=3600
-
-
-
-
-# CACHED HOST CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous host check is considered current.
-# Cached host states (from host checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to the host check logic.
-# Too high of a value for this option may result in inaccurate host
-# states being used by Nagios, while a lower value may result in a
-# performance hit for host checks.  Use a value of 0 to disable host
-# check caching.
-
-cached_host_check_horizon=15
-
-
-
-# CACHED SERVICE CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous service check is considered current.
-# Cached service states (from service checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to predictive dependency checks.
-# Use a value of 0 to disable service check caching.
-
-cached_service_check_horizon=15
-
-
-
-# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of hosts when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# host dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_host_dependency_checks=1
-
-
-
-# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of service when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# service dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_service_dependency_checks=1
-
-
-
-# SOFT STATE DEPENDENCIES
-# This option determines whether or not Nagios will use soft state 
-# information when checking host and service dependencies. Normally 
-# Nagios will only use the latest hard host or service state when 
-# checking dependencies. If you want it to use the latest state (regardless
-# of whether its a soft or hard state type), enable this option. 
-# Values:
-#  0 = Don't use soft state dependencies (default) 
-#  1 = Use soft state dependencies 
-
-soft_state_dependencies=0
-
-
-
-# TIME CHANGE ADJUSTMENT THRESHOLDS
-# These options determine when Nagios will react to detected changes
-# in system time (either forward or backwards).
-
-#time_change_threshold=900
-
-
-
-# AUTO-RESCHEDULING OPTION
-# This option determines whether or not Nagios will attempt to
-# automatically reschedule active host and service checks to
-# "smooth" them out over time.  This can help balance the load on
-# the monitoring server.  
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_reschedule_checks=0
-
-
-
-# AUTO-RESCHEDULING INTERVAL
-# This option determines how often (in seconds) Nagios will
-# attempt to automatically reschedule checks.  This option only
-# has an effect if the auto_reschedule_checks option is enabled.
-# Default is 30 seconds.
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_interval=30
-
-
-
-# AUTO-RESCHEDULING WINDOW
-# This option determines the "window" of time (in seconds) that
-# Nagios will look at when automatically rescheduling checks.
-# Only host and service checks that occur in the next X seconds
-# (determined by this variable) will be rescheduled. This option
-# only has an effect if the auto_reschedule_checks option is
-# enabled.  Default is 180 seconds (3 minutes).
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_window=180
-
-
-
-# SLEEP TIME
-# This is the number of seconds to sleep between checking for system
-# events and service checks that need to be run.
-
-sleep_time=0.25
-
-
-
-# TIMEOUT VALUES
-# These options control how much time Nagios will allow various
-# types of commands to execute before killing them off.  Options
-# are available for controlling maximum time allotted for
-# service checks, host checks, event handlers, notifications, the
-# ocsp command, and performance data commands.  All values are in
-# seconds.
-
-service_check_timeout=60
-host_check_timeout=30
-event_handler_timeout=30
-notification_timeout=30
-ocsp_timeout=5
-perfdata_timeout=5
-
-
-
-# RETAIN STATE INFORMATION
-# This setting determines whether or not Nagios will save state
-# information for services and hosts before it shuts down.  Upon
-# startup Nagios will reload all saved service and host state
-# information before starting to monitor.  This is useful for 
-# maintaining long-term data on state statistics, etc, but will
-# slow Nagios down a bit when it (re)starts.  Since its only
-# a one-time penalty, I think its well worth the additional
-# startup delay.
-
-retain_state_information=1
-
-
-
-# STATE RETENTION FILE
-# This is the file that Nagios should use to store host and
-# service state information before it shuts down.  The state 
-# information in this file is also read immediately prior to
-# starting to monitor the network when Nagios is restarted.
-# This file is used only if the retain_state_information
-# variable is set to 1.
-
-state_retention_file=/var/nagios/retention.dat
-
-
-
-# RETENTION DATA UPDATE INTERVAL
-# This setting determines how often (in minutes) that Nagios
-# will automatically save retention data during normal operation.
-# If you set this value to 0, Nagios will not save retention
-# data at regular interval, but it will still save retention
-# data before shutting down or restarting.  If you have disabled
-# state retention, this option has no effect.
-
-retention_update_interval=60
-
-
-
-# USE RETAINED PROGRAM STATE
-# This setting determines whether or not Nagios will set 
-# program status variables based on the values saved in the
-# retention file.  If you want to use retained program status
-# information, set this value to 1.  If not, set this value
-# to 0.
-
-use_retained_program_state=1
-
-
-
-# USE RETAINED SCHEDULING INFO
-# This setting determines whether or not Nagios will retain
-# the scheduling info (next check time) for hosts and services
-# based on the values saved in the retention file.  If you
-# If you want to use retained scheduling info, set this
-# value to 1.  If not, set this value to 0.
-
-use_retained_scheduling_info=1
-
-
-
-# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
-# The following variables are used to specify specific host and
-# service attributes that should *not* be retained by Nagios during
-# program restarts.
-#
-# The values of the masks are bitwise ANDs of values specified
-# by the "MODATTR_" definitions found in include/common.h.  
-# For example, if you do not want the current enabled/disabled state
-# of flap detection and event handlers for hosts to be retained, you
-# would use a value of 24 for the host attribute mask...
-# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
-
-# This mask determines what host attributes are not retained
-retained_host_attribute_mask=0
-
-# This mask determines what service attributes are not retained
-retained_service_attribute_mask=0
-
-# These two masks determine what process attributes are not retained.
-# There are two masks, because some process attributes have host and service
-# options.  For example, you can disable active host checks, but leave active
-# service checks enabled.
-retained_process_host_attribute_mask=0
-retained_process_service_attribute_mask=0
-
-# These two masks determine what contact attributes are not retained.
-# There are two masks, because some contact attributes have host and
-# service options.  For example, you can disable host notifications for
-# a contact, but leave service notifications enabled for them.
-retained_contact_host_attribute_mask=0
-retained_contact_service_attribute_mask=0
-
-
-
-# INTERVAL LENGTH
-# This is the seconds per unit interval as used in the
-# host/contact/service configuration files.  Setting this to 60 means
-# that each interval is one minute long (60 seconds).  Other settings
-# have not been tested much, so your mileage is likely to vary...
-
-interval_length=60
-
-
-
-# CHECK FOR UPDATES
-# This option determines whether Nagios will automatically check to
-# see if new updates (releases) are available.  It is recommend that you
-# enable this option to ensure that you stay on top of the latest critical
-# patches to Nagios.  Nagios is critical to you - make sure you keep it in
-# good shape.  Nagios will check once a day for new updates. Data collected
-# by Nagios Enterprises from the update check is processed in accordance 
-# with our privacy policy - see http://api.nagios.org for details.
-
-check_for_updates=1
-
-
-
-# BARE UPDATE CHECK
-# This option deterines what data Nagios will send to api.nagios.org when
-# it checks for updates.  By default, Nagios will send information on the 
-# current version of Nagios you have installed, as well as an indicator as
-# to whether this was a new installation or not.  Nagios Enterprises uses
-# this data to determine the number of users running specific version of 
-# Nagios.  Enable this option if you do not want this information to be sent.
-
-bare_update_check=0
-
-
-
-# AGGRESSIVE HOST CHECKING OPTION
-# If you don't want to turn on aggressive host checking features, set
-# this value to 0 (the default).  Otherwise set this value to 1 to
-# enable the aggressive check option.  Read the docs for more info
-# on what aggressive host check is or check out the source code in
-# base/checks.c
-
-use_aggressive_host_checking=0
-
-
-
-# SERVICE CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# service checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of service checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_service_checks=1
-
-
-
-# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# service checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_service_checks=1
-
-
-
-# HOST CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# host checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of host checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_host_checks=1
-
-
-
-# PASSIVE HOST CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# host checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_host_checks=1
-
-
-
-# NOTIFICATIONS OPTION
-# This determines whether or not Nagios will sent out any host or
-# service notifications when it is initially (re)started.
-# Values: 1 = enable notifications, 0 = disable notifications
-
-enable_notifications=1
-
-
-
-# EVENT HANDLER USE OPTION
-# This determines whether or not Nagios will run any host or
-# service event handlers when it is initially (re)started.  Unless
-# you're implementing redundant hosts, leave this option enabled.
-# Values: 1 = enable event handlers, 0 = disable event handlers
-
-enable_event_handlers=1
-
-
-
-# PROCESS PERFORMANCE DATA OPTION
-# This determines whether or not Nagios will process performance
-# data returned from service and host checks.  If this option is
-# enabled, host performance data will be processed using the
-# host_perfdata_command (defined below) and service performance
-# data will be processed using the service_perfdata_command (also
-# defined below).  Read the HTML docs for more information on
-# performance data.
-# Values: 1 = process performance data, 0 = do not process performance data
-
-process_performance_data=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
-# These commands are run after every host and service check is
-# performed.  These commands are executed only if the
-# enable_performance_data option (above) is set to 1.  The command
-# argument is the short name of a command definition that you 
-# define in your host configuration file.  Read the HTML docs for
-# more information on performance data.
-
-#host_perfdata_command=process-host-perfdata
-#service_perfdata_command=process-service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILES
-# These files are used to store host and service performance data.
-# Performance data is only written to these files if the
-# enable_performance_data option (above) is set to 1.
-
-#host_perfdata_file=/tmp/host-perfdata
-#service_perfdata_file=/tmp/service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
-# These options determine what data is written (and how) to the
-# performance data files.  The templates may contain macros, special
-# characters (\t for tab, \r for carriage return, \n for newline)
-# and plain text.  A newline is automatically added after each write
-# to the performance data file.  Some examples of what you can do are
-# shown below.
-
-#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
-#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE MODES
-# This option determines whether or not the host and service
-# performance data files are opened in write ("w") or append ("a")
-# mode. If you want to use named pipes, you should use the special
-# pipe ("p") mode which avoid blocking at startup, otherwise you will
-# likely want the defult append ("a") mode.
-
-#host_perfdata_file_mode=a
-#service_perfdata_file_mode=a
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
-# These options determine how often (in seconds) the host and service
-# performance data files are processed using the commands defined
-# below.  A value of 0 indicates the files should not be periodically
-# processed.
-
-#host_perfdata_file_processing_interval=0
-#service_perfdata_file_processing_interval=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
-# These commands are used to periodically process the host and
-# service performance data files.  The interval at which the
-# processing occurs is determined by the options above.
-
-#host_perfdata_file_processing_command=process-host-perfdata-file
-#service_perfdata_file_processing_command=process-service-perfdata-file
-
-
-
-# OBSESS OVER SERVICE CHECKS OPTION
-# This determines whether or not Nagios will obsess over service
-# checks and run the ocsp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over services, 0 = do not obsess (default)
-
-obsess_over_services=0
-
-
-
-# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
-# This is the command that is run for every service check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_services option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ocsp_command=somecommand
-
-
-
-# OBSESS OVER HOST CHECKS OPTION
-# This determines whether or not Nagios will obsess over host
-# checks and run the ochp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over hosts, 0 = do not obsess (default)
-
-obsess_over_hosts=0
-
-
-
-# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
-# This is the command that is run for every host check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_hosts option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ochp_command=somecommand
-
-
-
-# TRANSLATE PASSIVE HOST CHECKS OPTION
-# This determines whether or not Nagios will translate
-# DOWN/UNREACHABLE passive host check results into their proper
-# state for this instance of Nagios.  This option is useful
-# if you have distributed or failover monitoring setup.  In
-# these cases your other Nagios servers probably have a different
-# "view" of the network, with regards to the parent/child relationship
-# of hosts.  If a distributed monitoring server thinks a host
-# is DOWN, it may actually be UNREACHABLE from the point of
-# this Nagios instance.  Enabling this option will tell Nagios
-# to translate any DOWN or UNREACHABLE host states it receives
-# passively into the correct state from the view of this server.
-# Values: 1 = perform translation, 0 = do not translate (default)
-
-translate_passive_host_checks=0
-
-
-
-# PASSIVE HOST CHECKS ARE SOFT OPTION
-# This determines whether or not Nagios will treat passive host
-# checks as being HARD or SOFT.  By default, a passive host check
-# result will put a host into a HARD state type.  This can be changed
-# by enabling this option.
-# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
-
-passive_host_checks_are_soft=0
-
-
-
-# ORPHANED HOST/SERVICE CHECK OPTIONS
-# These options determine whether or not Nagios will periodically 
-# check for orphaned host service checks.  Since service checks are
-# not rescheduled until the results of their previous execution 
-# instance are processed, there exists a possibility that some
-# checks may never get rescheduled.  A similar situation exists for
-# host checks, although the exact scheduling details differ a bit
-# from service checks.  Orphaned checks seem to be a rare
-# problem and should not happen under normal circumstances.
-# If you have problems with service checks never getting
-# rescheduled, make sure you have orphaned service checks enabled.
-# Values: 1 = enable checks, 0 = disable checks
-
-check_for_orphaned_services=1
-check_for_orphaned_hosts=1
-
-
-
-# SERVICE FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of service results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_service_freshness=1
-
-
-
-# SERVICE FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of service check results.  If you have
-# disabled service freshness checking, this option has no effect.
-
-service_freshness_check_interval=60
-
-
-
-# HOST FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of host results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_host_freshness=0
-
-
-
-# HOST FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of host check results.  If you have
-# disabled host freshness checking, this option has no effect.
-
-host_freshness_check_interval=60
-
-
-
-
-# ADDITIONAL FRESHNESS THRESHOLD LATENCY
-# This setting determines the number of seconds that Nagios
-# will add to any host and service freshness thresholds that
-# it calculates (those not explicitly specified by the user).
-
-additional_freshness_latency=15
-
-
-
-
-# FLAP DETECTION OPTION
-# This option determines whether or not Nagios will try
-# and detect hosts and services that are "flapping".  
-# Flapping occurs when a host or service changes between
-# states too frequently.  When Nagios detects that a 
-# host or service is flapping, it will temporarily suppress
-# notifications for that host/service until it stops
-# flapping.  Flap detection is very experimental, so read
-# the HTML documentation before enabling this feature!
-# Values: 1 = enable flap detection
-#         0 = disable flap detection (default)
-
-enable_flap_detection=1
-
-
-
-# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
-# Read the HTML documentation on flap detection for
-# an explanation of what this option does.  This option
-# has no effect if flap detection is disabled.
-
-low_service_flap_threshold=5.0
-high_service_flap_threshold=20.0
-low_host_flap_threshold=5.0
-high_host_flap_threshold=20.0
-
-
-
-# DATE FORMAT OPTION
-# This option determines how short dates are displayed. Valid options
-# include:
-#	us		(MM-DD-YYYY HH:MM:SS)
-#	euro    	(DD-MM-YYYY HH:MM:SS)
-#	iso8601		(YYYY-MM-DD HH:MM:SS)
-#	strict-iso8601	(YYYY-MM-DDTHH:MM:SS)
-#
-
-date_format=us
-
-
-
-
-# TIMEZONE OFFSET
-# This option is used to override the default timezone that this
-# instance of Nagios runs in.  If not specified, Nagios will use
-# the system configured timezone.
-#
-# NOTE: In order to display the correct timezone in the CGIs, you
-# will also need to alter the Apache directives for the CGI path 
-# to include your timezone.  Example:
-#
-#   <Directory "/usr/local/nagios/sbin/">
-#      SetEnv TZ "Australia/Brisbane"
-#      ...
-#   </Directory>
-
-#use_timezone=US/Mountain
-#use_timezone=Australia/Brisbane
-
-
-
-
-# P1.PL FILE LOCATION
-# This value determines where the p1.pl perl script (used by the
-# embedded Perl interpreter) is located.  If you didn't compile
-# Nagios with embedded Perl support, this option has no effect.
-
-p1_file = <%=nagios_p1_pl %>
-
-
-
-# EMBEDDED PERL INTERPRETER OPTION
-# This option determines whether or not the embedded Perl interpreter
-# will be enabled during runtime.  This option has no effect if Nagios
-# has not been compiled with support for embedded Perl.
-# Values: 0 = disable interpreter, 1 = enable interpreter
-
-enable_embedded_perl=1
-
-
-
-# EMBEDDED PERL USAGE OPTION
-# This option determines whether or not Nagios will process Perl plugins
-# and scripts with the embedded Perl interpreter if the plugins/scripts
-# do not explicitly indicate whether or not it is okay to do so. Read
-# the HTML documentation on the embedded Perl interpreter for more 
-# information on how this option works.
-
-use_embedded_perl_implicitly=1
-
-
-
-# ILLEGAL OBJECT NAME CHARACTERS
-# This option allows you to specify illegal characters that cannot
-# be used in host names, service descriptions, or names of other
-# object types.
-
-illegal_object_name_chars=`~!$%^&*|'"<>?,()=
-
-
-
-# ILLEGAL MACRO OUTPUT CHARACTERS
-# This option allows you to specify illegal characters that are
-# stripped from macros before being used in notifications, event
-# handlers, etc.  This DOES NOT affect macros used in service or
-# host check commands.
-# The following macros are stripped of the characters you specify:
-#	$HOSTOUTPUT$
-#	$HOSTPERFDATA$
-#	$HOSTACKAUTHOR$
-#	$HOSTACKCOMMENT$
-#	$SERVICEOUTPUT$
-#	$SERVICEPERFDATA$
-#	$SERVICEACKAUTHOR$
-#	$SERVICEACKCOMMENT$
-
-illegal_macro_output_chars=`~$&|'"<>
-
-
-
-# REGULAR EXPRESSION MATCHING
-# This option controls whether or not regular expression matching
-# takes place in the object config files.  Regular expression
-# matching is used to match host, hostgroup, service, and service
-# group names/descriptions in some fields of various object types.
-# Values: 1 = enable regexp matching, 0 = disable regexp matching
-
-use_regexp_matching=0
-
-
-
-# "TRUE" REGULAR EXPRESSION MATCHING
-# This option controls whether or not "true" regular expression 
-# matching takes place in the object config files.  This option
-# only has an effect if regular expression matching is enabled
-# (see above).  If this option is DISABLED, regular expression
-# matching only occurs if a string contains wildcard characters
-# (* and ?).  If the option is ENABLED, regexp matching occurs
-# all the time (which can be annoying).
-# Values: 1 = enable true matching, 0 = disable true matching
-
-use_true_regexp_matching=0
-
-
-
-# ADMINISTRATOR EMAIL/PAGER ADDRESSES
-# The email and pager address of a global administrator (likely you).
-# Nagios never uses these values itself, but you can access them by
-# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
-# commands.
-
-admin_email=nagios@localhost
-admin_pager=pagenagios@localhost
-
-
-
-# DAEMON CORE DUMP OPTION
-# This option determines whether or not Nagios is allowed to create
-# a core dump when it runs as a daemon.  Note that it is generally
-# considered bad form to allow this, but it may be useful for
-# debugging purposes.  Enabling this option doesn't guarantee that
-# a core file will be produced, but that's just life...
-# Values: 1 - Allow core dumps
-#         0 - Do not allow core dumps (default)
-
-daemon_dumps_core=0
-
-
-
-# LARGE INSTALLATION TWEAKS OPTION
-# This option determines whether or not Nagios will take some shortcuts
-# which can save on memory and CPU usage in large Nagios installations.
-# Read the documentation for more information on the benefits/tradeoffs
-# of enabling this option.
-# Values: 1 - Enabled tweaks
-#         0 - Disable tweaks (default)
-
-use_large_installation_tweaks=0
-
-
-
-# ENABLE ENVIRONMENT MACROS
-# This option determines whether or not Nagios will make all standard
-# macros available as environment variables when host/service checks
-# and system commands (event handlers, notifications, etc.) are
-# executed.  Enabling this option can cause performance issues in 
-# large installations, as it will consume a bit more memory and (more
-# importantly) consume more CPU.
-# Values: 1 - Enable environment variable macros (default)
-#         0 - Disable environment variable macros
-
-enable_environment_macros=1
-
-
-
-# CHILD PROCESS MEMORY OPTION
-# This option determines whether or not Nagios will free memory in
-# child processes (processed used to execute system commands and host/
-# service checks).  If you specify a value here, it will override
-# program defaults.
-# Value: 1 - Free memory in child processes
-#        0 - Do not free memory in child processes
-
-#free_child_process_memory=1
-
-
-
-# CHILD PROCESS FORKING BEHAVIOR
-# This option determines how Nagios will fork child processes
-# (used to execute system commands and host/service checks).  Normally
-# child processes are fork()ed twice, which provides a very high level
-# of isolation from problems.  Fork()ing once is probably enough and will
-# save a great deal on CPU usage (in large installs), so you might
-# want to consider using this.  If you specify a value here, it will
-# program defaults.
-# Value: 1 - Child processes fork() twice
-#        0 - Child processes fork() just once
-
-#child_processes_fork_twice=1
-
-
-
-# DEBUG LEVEL
-# This option determines how much (if any) debugging information will
-# be written to the debug file.  OR values together to log multiple
-# types of information.
-# Values: 
-#          -1 = Everything
-#          0 = Nothing
-#	   1 = Functions
-#          2 = Configuration
-#          4 = Process information
-#	   8 = Scheduled events
-#          16 = Host/service checks
-#          32 = Notifications
-#          64 = Event broker
-#          128 = External commands
-#          256 = Commands
-#          512 = Scheduled downtime
-#          1024 = Comments
-#          2048 = Macros
-
-debug_level=0
-
-
-
-# DEBUG VERBOSITY
-# This option determines how verbose the debug log out will be.
-# Values: 0 = Brief output
-#         1 = More detailed
-#         2 = Very detailed
-
-debug_verbosity=1
-
-
-
-# DEBUG FILE
-# This option determines where Nagios should write debugging information.
-
-debug_file=/var/log/nagios/nagios.debug
-
-
-
-# MAX DEBUG FILE SIZE
-# This option determines the maximum size (in bytes) of the debug file.  If
-# the file grows larger than this size, it will be renamed with a .old
-# extension.  If a file already exists with a .old extension it will
-# automatically be deleted.  This helps ensure your disk space usage doesn't
-# get out of control when debugging Nagios.
-
-max_debug_file_size=1000000
-
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/resource.cfg.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/resource.cfg.erb
deleted file mode 100644
index b6a9a7b..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/resource.cfg.erb
+++ /dev/null
@@ -1,33 +0,0 @@
-###########################################################################
-#
-# RESOURCE.CFG - Sample Resource File for Nagios 3.2.3
-#
-# Last Modified: 09-10-2003
-#
-# You can define $USERx$ macros in this file, which can in turn be used
-# in command definitions in your host config file(s).  $USERx$ macros are
-# useful for storing sensitive information such as usernames, passwords,
-# etc.  They are also handy for specifying the path to plugins and
-# event handlers - if you decide to move the plugins or event handlers to
-# a different directory in the future, you can just update one or two
-# $USERx$ macros, instead of modifying a lot of command definitions.
-#
-# The CGIs will not attempt to read the contents of resource files, so
-# you can set restrictive permissions (600 or 660) on them.
-#
-# Nagios supports up to 32 $USERx$ macros ($USER1$ through $USER32$)
-#
-# Resource files may also be used to store configuration directives for
-# external data sources like MySQL...
-#
-###########################################################################
-
-# Sets $USER1$ to be the path to the plugins
-$USER1$=<%=scope.function_hdp_template_var("plugins_dir")%>
-
-# Sets $USER2$ to be the path to event handlers
-#$USER2$=<%=scope.function_hdp_template_var("eventhandlers_dir")%>
-
-# Store some usernames and passwords (hidden from the CGIs)
-#$USER3$=someuser
-#$USER4$=somepassword
\ No newline at end of file
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/files/oozieSmoke.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/files/oozieSmoke.sh
deleted file mode 100644
index 939174d..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/files/oozieSmoke.sh
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-function getValueFromField {
-  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
-  return $?
-}
-
-function checkOozieJobStatus {
-  local job_id=$1
-  local num_of_tries=$2
-  #default num_of_tries to 10 if not present
-  num_of_tries=${num_of_tries:-10}
-  local i=0
-  local rc=1
-  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
-  su - ${smoke_test_user} -c "$cmd"
-  while [ $i -lt $num_of_tries ] ; do
-    cmd_output=`su - ${smoke_test_user} -c "$cmd"`
-    (IFS='';echo $cmd_output)
-    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
-    echo "workflow_status=$act_status"
-    if [ "RUNNING" == "$act_status" ]; then
-      #increment the couner and get the status again after waiting for 15 secs
-      sleep 15
-      (( i++ ))
-      elif [ "SUCCEEDED" == "$act_status" ]; then
-        rc=0;
-        break;
-      else
-        rc=1
-        break;
-      fi
-    done
-    return $rc
-}
-
-export oozie_conf_dir=$1
-export hadoop_conf_dir=$2
-export smoke_test_user=$3
-export security_enabled=$4
-export smoke_user_keytab=$5
-export realm=$6
-export JTHOST=$7
-export NNHOST=$8
-
-export OOZIE_EXIT_CODE=0
-export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/mapred-site.xml mapred.job.tracker`
-export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.default.name`
-export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url`
-export OOZIE_EXAMPLES_DIR=`rpm -ql oozie-client | grep 'oozie-examples.tar.gz$' | xargs dirname`
-cd $OOZIE_EXAMPLES_DIR
-
-tar -zxf oozie-examples.tar.gz
-sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="/usr/kerberos/bin/kinit  -kt ${smoke_user_keytab} ${smoke_test_user}; "
-  echo "dfs.namenode.kerberos.principal=nn/`echo ${NNHOST} | tr '[:upper:]' '[:lower:]'`@${realm}" >> examples/apps/map-reduce/job.properties
-  echo "mapreduce.jobtracker.kerberos.principal=jt/`echo ${JTHOST} | tr '[:upper:]' '[:lower:]'`@${realm}" >> examples/apps/map-reduce/job.properties
-else 
-  kinitcmd=""
-fi
-
-su - ${smoke_test_user} -c "hadoop dfs -rmr examples"
-su - ${smoke_test_user} -c "hadoop dfs -rmr input-data"
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
-
-cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
-job_info=`su - ${smoke_test_user} -c "$cmd" | grep "job:"`
-job_id="`echo $job_info | cut -d':' -f2`"
-checkOozieJobStatus "$job_id"
-OOZIE_EXIT_CODE="$?"
-exit $OOZIE_EXIT_CODE
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/client.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/client.pp
deleted file mode 100644
index f80c356..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/client.pp
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-oozie::client(
-  $service_state = $hdp::params::cluster_client_state,
-  $oozie_server = undef
-) inherits hdp::params
-{ 
-  if ($service_state == 'no_op') {
-   } elsif ($service_state in ['installed_and_configured','uninstalled']) {
-     if ($hdp::params::service_exists['hdp-oozie::server'] != true) {
-       #installs package, creates user, sets configuration
-       class { 'hdp-oozie' :
-         service_state => $service_state
-       }
-      if ($oozie_server != undef) {
-        Hdp-Oozie::Configfile<||>{oozie_server => $oozie_server}
-      }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/download-ext-zip.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/download-ext-zip.pp
deleted file mode 100644
index c63ebd1..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/download-ext-zip.pp
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-oozie::download-ext-zip()
-{
-  anchor { 'hdp-oozie::download-ext-zip::begin':}
-
-   hdp::package { 'extjs' :
-     require   => Anchor['hdp-oozie::download-ext-zip::begin']
-   }
-
-   anchor { 'hdp-oozie::download-ext-zip::end':}
-
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp
deleted file mode 100644
index 7b576c5..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/init.pp
+++ /dev/null
@@ -1,98 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-oozie(
-  $service_state = undef,
-  $server = false,
-  $setup = false
-)
-{
-  include hdp-oozie::params 
-
-# Configs generation  
-
-  if has_key($configuration, 'oozie-site') {
-    configgenerator::configfile{'oozie-site':
-      modulespath => $hdp-oozie::params::conf_dir, 
-      filename => 'oozie-site.xml',
-      module => 'hdp-oozie',
-      configuration => $configuration['oozie-site']
-    }
-  }
-
-  $oozie_user = $hdp-oozie::params::oozie_user
-  $oozie_config_dir = $hdp-oozie::params::conf_dir
-  
-  if ($service_state == 'uninstalled') {
-    hdp::package { 'oozie-client' : 
-      ensure => 'uninstalled'
-    }
-    if ($server == true ) {
-      hdp::package { 'oozie-server' :
-        ensure => 'uninstalled'
-      }
-    }
-    hdp::directory { $oozie_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    anchor { 'hdp-oozie::begin': } -> Hdp::Package['oozie-client'] -> Hdp::Directory[$oozie_config_dir] ->  anchor { 'hdp-oozie::end': }
-
-    if ($server == true ) {
-       Hdp::Package['oozie-server'] -> Hdp::Package['oozie-client'] ->  Anchor['hdp-oozie::end']
-     }
-  } else {
-    hdp::package { 'oozie-client' : }
-    if ($server == true ) {
-      hdp::package { 'oozie-server':}
-      class { 'hdp-oozie::download-ext-zip': }
-    }
-
-     hdp::user{ $oozie_user:}
-
-     hdp::directory { $oozie_config_dir: 
-      service_state => $service_state,
-      force => true
-    }
-
-     hdp-oozie::configfile { ['oozie-env.sh','oozie-log4j.properties']: }
-
-    anchor { 'hdp-oozie::begin': } -> Hdp::Package['oozie-client'] -> Hdp::User[$oozie_user] -> Hdp::Directory[$oozie_config_dir] -> Hdp-oozie::Configfile<||> -> anchor { 'hdp-oozie::end': }
-
-     if ($server == true ) { 
-       Hdp::Package['oozie-server'] -> Hdp::Package['oozie-client'] -> Hdp::User[$oozie_user] ->   Class['hdp-oozie::download-ext-zip'] ->  Anchor['hdp-oozie::end']
-     }
- }
-}
-
-### config files
-define hdp-oozie::configfile(
-  $mode = undef,
-  $oozie_server = undef
-) 
-{
-  hdp::configfile { "${hdp-oozie::params::conf_dir}/${name}":
-    component       => 'oozie',
-    owner           => $hdp-oozie::params::oozie_user,
-    mode            => $mode,
-    oozie_server    => $oozie_server
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/oozie/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/oozie/service_check.pp
deleted file mode 100644
index b7d9da8..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/oozie/service_check.pp
+++ /dev/null
@@ -1,63 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-oozie::oozie::service_check()
-{
-  include hdp-oozie::params
-
-  $smoke_shell_files = ['oozieSmoke.sh']
-  anchor { 'hdp-oozie::oozie::service_check::begin':}
-
-  hdp-oozie::smoke_shell_file { $smoke_shell_files: }
-
-  anchor{ 'hdp-oozie::oozie::service_check::end':}
-}
-
-define hdp-oozie::smoke_shell_file()
-{
-  $smoke_test_user = $hdp::params::smokeuser
-  $conf_dir = $hdp::params::oozie_conf_dir
-  $hadoopconf_dir = $hdp::params::hadoop_conf_dir 
-  $security_enabled=$hdp::params::security_enabled
-  $jt_host=$hdp::params::jtnode_host
-  $nn_host=$hdp::params::namenode_host
-  if ($security_enabled == true) {
-    $security = "true"
-  } else {
-    $security = "false"
-  }
-  $smoke_user_keytab = "${hdp-oozie::params::keytab_path}/${smoke_test_user}.headless.keytab"
-  $realm=$hdp::params::kerberos_domain
-
-  file { '/tmp/oozieSmoke.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-oozie/oozieSmoke.sh",
-    mode => '0755'
-  }
-
-  exec { '/tmp/oozieSmoke.sh':
-    command   => "sh /tmp/oozieSmoke.sh ${conf_dir} ${hadoopconf_dir} ${smoke_test_user} ${security} ${smoke_user_keytab} ${realm} $jt_host $nn_host",
-    tries     => 3,
-    try_sleep => 5,
-    require   => File['/tmp/oozieSmoke.sh'],
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/params.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/params.pp
deleted file mode 100644
index de04036..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/params.pp
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-oozie::params() inherits hdp::params
-{
-  $oozie_user = $hdp::params::oozie_user 
-
-  ###ext url
-  $download_url = $hdp::params::gpl_artifacts_download_url
-  $ext_zip_url = "${download_url}/ext-2.2.zip"
-  $ext_zip_name = hdp_default("ext_zip_name","ext-2.2.zip")
-
-  ### oozie-env
-  $conf_dir = $hdp::params::oozie_conf_dir
-  $hadoop_prefix = hdp_default("hadoop_prefix","/usr")
-
-  ### oozie-env
-  $oozie_log_dir = hdp_default("hadoop/oozie-env/oozie_log_dir","/var/log/oozie")
-
-  $oozie_pid_dir = hdp_default("oozie_pid_dir","/var/run/oozie/")
-  $oozie_pid_file = hdp_default("hadoop/oozie-env/oozie_pid_file","$oozie_pid_dir/oozie.pid")
-
-  $oozie_data_dir = hdp_default("hadoop/oozie-env/oozie_data_dir","/var/data/oozie")
-
-  $oozie_tmp_dir = hdp_default("hadoop/oozie-env/oozie_tmp_dir","/var/tmp/oozie")
-
-  $oozie_lib_dir = hdp_default("hadoop/oozie-env/oozie_lib_dir","/var/lib/oozie/")
-  
-  $oozie_webapps_dir = hdp_default("hadoop/oozie-env/oozie_webapps_dir","/var/lib/oozie/oozie-server/webapps/")
-  
-  ### oozie-site
-  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
-  if ($security_enabled == true) {
-    $oozie_sasl_enabled = "true"
-    $oozie_security_type = "kerberos"
-  } else {
-    $oozie_sasl_enabled = "false"
-    $oozie_security_type = "simple"
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/server.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/server.pp
deleted file mode 100644
index 4879af4..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/server.pp
+++ /dev/null
@@ -1,72 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-oozie::server(
-  $service_state = $hdp::params::cluster_service_state,
-  $setup = false,
-  $opts = {}
-) inherits  hdp-oozie::params
-{   
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
-    $hdp::params::service_exists['hdp-oozie::server'] = true
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-       $masterHost = $kerberos_adminclient_host[0]
-       hdp::download_keytab { 'oozie_service_keytab' :
-         masterhost => $masterHost,
-         keytabdst => "${$keytab_path}/oozie.service.keytab",
-         keytabfile => 'oozie.service.keytab',
-         owner => $hdp::params::oozie_user
-       }
-
-       if ( ($hdp::params::service_exists['hdp-hadoop::namenode'] != true) and
-            ($hdp::params::service_exists['hdp-hadoop::snamenode'] != true) ) {
-         hdp::download_keytab { 'oozie_spnego_keytab' :
-           masterhost => $masterHost,
-           keytabdst => "${$keytab_path}/spnego.service.keytab",
-           keytabfile => 'spnego.service.keytab',
-           owner => $hdp::params::oozie_user,
-           group => 'hadoop',
-           mode => '0440'
-         }
-      }
-    }
-
-    #installs package, creates user, sets configuration
-    class{ 'hdp-oozie' : 
-      service_state => $service_state,
-      server        => true
-    } 
-  
-    Hdp-Oozie::Configfile<||>{oozie_server => $hdp::params::oozie_server}
-
-    class { 'hdp-oozie::service' :
-      ensure       => $service_state,
-      setup         => $setup
-    }
-  
-    #top level does not need anchors
-    Class['hdp-oozie'] -> Class['hdp-oozie::service']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp
deleted file mode 100644
index 379c749..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp
+++ /dev/null
@@ -1,133 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-oozie::service(
-  $ensure,
-  $setup,
-  $initial_wait = undef
-)
-{
-  include $hdp-oozie::params
-  
-  $user = "$hdp-oozie::params::oozie_user"
-  $hadoop_home = $hdp-oozie::params::hadoop_prefix
-  $oozie_tmp = $hdp-oozie::params::oozie_tmp_dir
-  $oozie_hdfs_user_dir = $hdp::params::oozie_hdfs_user_dir
-  $cmd = "env HADOOP_HOME=${hadoop_home} /usr/sbin/oozie_server.sh"
-  $pid_file = "${hdp-oozie::params::oozie_pid_dir}/oozie.pid" 
-  $jar_location = $hdp::params::hadoop_jar_location
-  $ext_js_path = "/usr/share/HDP-oozie/ext.zip"
-  
-  if ($lzo_enabled == true) {
-    $lzo_jar_suffix = "-jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar"
-  } else {
-    $lzo_jar_suffix = ""
-  }
-
-  $cmd1 = "cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz"
-  $cmd2 =  "cd /usr/lib/oozie && mkdir -p ${oozie_tmp}"
-  $cmd3 =  "cd /usr/lib/oozie && chown ${user}:${hdp::params::user_group} ${oozie_tmp}"    
-  $cmd4 =  "cd ${oozie_tmp} && /usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 $jar_location -extjs $ext_js_path $lzo_jar_suffix"
-  $cmd5 =  "cd ${oozie_tmp} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; echo 0"
-  $cmd6 =  "su - ${user} -c 'hadoop dfs -put /usr/lib/oozie/share ${oozie_hdfs_user_dir} ; hadoop dfs -chmod -R 755 ${oozie_hdfs_user_dir}/share'"
-  #$cmd7 = "/usr/lib/oozie/bin/oozie-start.sh"
-
-  if ($ensure == 'installed_and_configured') {
-    $sh_cmds = [$cmd1, $cmd2, $cmd3]
-    $user_cmds = [$cmd4, $cmd5]
-  } elsif ($ensure == 'running') {   
-    $start_cmd = "su - ${user} -c  'cd ${oozie_tmp} && /usr/lib/oozie/bin/oozie-start.sh'"
-    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-  } elsif ($ensure == 'stopped') {
-    $stop_cmd  = "su - ${user} -c  'cd ${oozie_tmp} && /usr/lib/oozie/bin/oozie-stop.sh'"
-    $no_op_test = undef
-  } else {
-    $daemon_cmd = undef
-  }
-
-  hdp-oozie::service::directory { $hdp-oozie::params::oozie_pid_dir : }
-  hdp-oozie::service::directory { $hdp-oozie::params::oozie_log_dir : }
-  hdp-oozie::service::directory { $hdp-oozie::params::oozie_tmp_dir : }
-  hdp-oozie::service::directory { $hdp-oozie::params::oozie_data_dir : }
-  hdp-oozie::service::directory { $hdp-oozie::params::oozie_lib_dir : }
-  hdp-oozie::service::directory { $hdp-oozie::params::oozie_webapps_dir : }
-
-  anchor{'hdp-oozie::service::begin':} -> Hdp-oozie::Service::Directory<||> -> anchor{'hdp-oozie::service::end':}
-  
-  if ($ensure == 'installed_and_configured') {
-    hdp-oozie::service::exec_sh{$sh_cmds:}
-    hdp-oozie::service::exec_user{$user_cmds:}
-    Hdp-oozie::Service::Directory<||> -> Hdp-oozie::Service::Exec_sh[$cmd1] -> Hdp-oozie::Service::Exec_sh[$cmd2] ->Hdp-oozie::Service::Exec_sh[$cmd3] -> Hdp-oozie::Service::Exec_user[$cmd4] ->Hdp-oozie::Service::Exec_user[$cmd5] -> Anchor['hdp-oozie::service::end']
-  } elsif ($ensure == 'running') {
-    hdp::exec { "exec $cmd6" :
-      command => $cmd6,
-      unless => "hadoop dfs -ls /user/oozie/share | awk 'BEGIN {count=0;} /share/ {count++} END {if (count > 0) {exit 0} else {exit 1}}'"
-    }
-    hdp::exec { "exec $start_cmd":
-      command => $start_cmd,
-      unless  => $no_op_test,
-      initial_wait => $initial_wait,
-      require => Exec["exec $cmd6"]
-    }
-  } elsif ($ensure == 'stopped') {
-    hdp::exec { "exec $stop_cmd":
-      command => $stop_cmd,
-      unless  => $no_op_test,
-      initial_wait => $initial_wait
-   }
-  }
-}
-
-define hdp-oozie::service::directory()
-{
-  hdp::directory_recursive_create { $name: 
-    owner => $hdp-oozie::params::oozie_user,
-    mode => '0755',
-    service_state => $ensure,
-    force => true
-  }
-}
-define hdp-oozie::service::createsymlinks()
-{
-  hdp::exec { '/usr/lib/oozie/oozie-server/lib/mapred-site.xml':
-    command => "ln -sf /etc/hadoop/conf/mapred-site.xml /usr/lib/oozie/oozie-server/lib/mapred-site.xml",
-    unless => "test -e /usr/lib/oozie/oozie-server/lib/mapred-site.xml"
-  }
-}
-
-define hdp-oozie::service::exec_sh()
-{
-  $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-  hdp::exec { "exec $name":
-    command => "/bin/sh -c '$name'",
-    unless  => $no_op_test,
-    initial_wait => $initial_wait
-  }
-}
-
-define hdp-oozie::service::exec_user()
-{
-  $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-  hdp::exec { "exec $name":
-    command => "su - ${user} -c '$name'",
-    unless  => $no_op_test,
-    initial_wait => $initial_wait
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/templates/oozie-env.sh.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/templates/oozie-env.sh.erb
deleted file mode 100644
index 431f26f..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/templates/oozie-env.sh.erb
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#      http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#Set JAVA HOME
-export JAVA_HOME=<%=scope.function_hdp_java_home()%>
-
-# Set Oozie specific environment variables here.
-
-# Settings for the Embedded Tomcat that runs Oozie
-# Java System properties for Oozie should be specified in this variable
-#
-# export CATALINA_OPTS=
-
-# Oozie configuration file to load from Oozie configuration directory
-#
-# export OOZIE_CONFIG_FILE=oozie-site.xml
-
-# Oozie logs directory
-#
-export OOZIE_LOG=<%=scope.function_hdp_template_var("oozie_log_dir")%>/
-
-# Oozie pid directory
-#
-export CATALINA_PID=<%=scope.function_hdp_template_var("oozie_pid_file")%>
-
-#Location of the data for oozie
-export OOZIE_DATA=<%=scope.function_hdp_template_var("oozie_data_dir")%>/
-
-# Oozie Log4J configuration file to load from Oozie configuration directory
-#
-# export OOZIE_LOG4J_FILE=oozie-log4j.properties
-
-# Reload interval of the Log4J configuration file, in seconds
-#
-# export OOZIE_LOG4J_RELOAD=10
-
-# The port Oozie server runs
-#
-# export OOZIE_HTTP_PORT=11000
-
-# The host name Oozie server runs on
-#
-# export OOZIE_HTTP_HOSTNAME=`hostname -f`
-
-# The base URL for callback URLs to Oozie
-#
-# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
-export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/templates/oozie-log4j.properties.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/templates/oozie-log4j.properties.erb
deleted file mode 100644
index a14ae89..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-oozie/templates/oozie-log4j.properties.erb
+++ /dev/null
@@ -1,74 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. See accompanying LICENSE file.
-#
-
-# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
-# XLogService sets its value to '${oozie.home}/logs'
-
-log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
-log4j.appender.oozie.Append=true
-log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
-log4j.appender.oozieops.Append=true
-log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
-log4j.appender.oozieinstrumentation.Append=true
-log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
-log4j.appender.oozieaudit.Append=true
-log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
-log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
-log4j.appender.openjpa.Append=true
-log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
-log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.logger.openjpa=INFO, openjpa
-log4j.logger.oozieops=DEBUG, oozieops
-log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
-log4j.logger.oozieaudit=ALL, oozieaudit
-log4j.logger.org.apache.oozie=DEBUG, oozie
-log4j.logger.org.apache.hadoop=WARN, oozie
-log4j.logger.org.mortbay=WARN, oozie
-log4j.logger.org.hsqldb=WARN, oozie
-log4j.logger.org.apache.hadoop.security.authentication.server=DEBUG, oozie
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/files/pigSmoke.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/files/pigSmoke.sh
deleted file mode 100644
index a22456e..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-/*Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License */
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/init.pp
deleted file mode 100644
index 474e8b9..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/init.pp
+++ /dev/null
@@ -1,72 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-pig(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp-pig::params
-{  
-  $pig_config_dir = $hdp-pig::params::pig_conf_dir
- 
-  if ($hdp::params::use_32_bits_on_slaves == false) {
-    $size = 64
-  } else {
-    $size = 32
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state == 'uninstalled') {
-    hdp::package { 'pig' :
-      ensure => 'uninstalled',
-      size   => $size
-    }
-    hdp::directory_recursive_create { $pig_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-   anchor { 'hdp-pig::begin': } -> Hdp::Package['pig'] -> Hdp::Directory_recursive_create[$pig_conf_dir] -> anchor { 'hdp-pig::end': }
-
-  } elsif ($service_state == 'installed_and_configured') {
-    hdp::package { 'pig' : 
-      size => $size
-    }
-
-    hdp::directory { $pig_config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp-pig::configfile { ['pig-env.sh','pig.properties','log4j.properties']:}
-  
-    anchor { 'hdp-pig::begin': } -> Hdp::Package['pig'] -> Hdp::Directory[$pig_conf_dir] -> Hdp-pig::Configfile<||> -> anchor { 'hdp-pig::end': }
- } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-### config files
-define hdp-pig::configfile()
-{
-  hdp::configfile { "${hdp::params::pig_conf_dir}/${name}":
-    component => 'pig'
-  }
-}
-
-
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/params.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/params.pp
deleted file mode 100644
index cd94408..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/params.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-pig::params() inherits hdp::params
-{
-  $pig_conf_dir = $hdp::params::pig_conf_dir
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/pig/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/pig/service_check.pp
deleted file mode 100644
index 89abcfa..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/pig/service_check.pp
+++ /dev/null
@@ -1,70 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-pig::pig::service_check() 
-{
-  $smoke_test_user = $hdp::params::smokeuser
-  $input_file = 'passwd'
-  $output_file = "pigsmoke.out"
-
-  $cleanup_cmd = "dfs -rmr ${output_file} ${input_file}"
-  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-  $create_file_cmd = "${cleanup_cmd}; hadoop dfs -put /etc/passwd ${input_file} " #TODO: inconsistent that second comamnd needs hadoop
-  $test_cmd = "fs -test -e ${output_file}" 
-  
-  anchor { 'hdp-pig::pig::service_check::begin':}
-
-
-  hdp-hadoop::exec-hadoop { 'pig::service_check::create_file':
-    command   => $create_file_cmd,
-    tries     => 3,
-    try_sleep => 5,
-    require   => Anchor['hdp-pig::pig::service_check::begin'],
-    notify    => File['/tmp/pigSmoke.sh'],
-    user      => $smoke_test_user
-  }
-
-  file { '/tmp/pigSmoke.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-pig/pigSmoke.sh",
-    mode => '0755',
-    require     => Hdp-hadoop::Exec-hadoop['pig::service_check::create_file']
-  }
-
-  exec { '/tmp/pigSmoke.sh':
-    command   => "su - ${smoke_test_user} -c 'pig /tmp/pigSmoke.sh'",
-    tries     => 3,
-    try_sleep => 5,
-    require   => File['/tmp/pigSmoke.sh'],
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    notify    => Hdp-hadoop::Exec-hadoop['pig::service_check::test'],
-    logoutput => "true"
-  }
-
-  hdp-hadoop::exec-hadoop { 'pig::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    require     => Exec['/tmp/pigSmoke.sh'],
-    before      => Anchor['hdp-pig::pig::service_check::end'], #TODO: remove after testing
-    user      => $smoke_test_user
-  }
-  
-  anchor{ 'hdp-pig::pig::service_check::end':}
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/templates/log4j.properties.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/templates/log4j.properties.erb
deleted file mode 100644
index 9ef6e2c..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/templates/log4j.properties.erb
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# ***** Set root logger level to DEBUG and its only appender to A.
-log4j.logger.org.apache.pig=info, A
-
-# ***** A is set to be a ConsoleAppender.
-log4j.appender.A=org.apache.log4j.ConsoleAppender
-# ***** A uses PatternLayout.
-log4j.appender.A.layout=org.apache.log4j.PatternLayout
-log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/templates/pig-env.sh.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/templates/pig-env.sh.erb
deleted file mode 100644
index 883ba0a..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/templates/pig-env.sh.erb
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-JAVA_HOME=<%=scope.function_hdp_java_home()%>
-HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp::params::hadoop_home")%>}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/templates/pig.properties.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/templates/pig.properties.erb
deleted file mode 100644
index 1933982..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-pig/templates/pig.properties.erb
+++ /dev/null
@@ -1,54 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# Pig configuration file. All values can be overwritten by command line arguments.
-
-# log4jconf log4j configuration file
-# log4jconf=./conf/log4j.properties
-
-# a file that contains pig script
-#file=
-
-# load jarfile, colon separated
-#jar=
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-#verbose=true
-
-#exectype local|mapreduce, mapreduce is default
-#exectype=local
-
-#pig.logfile=
-
-#Do not spill temp files smaller than this size (bytes)
-#pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-#pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-#pig.exec.reducers.bytes.per.reducer=1000000000
-#pig.exec.reducers.max=999
-
-#Use this option only when your Pig job will otherwise die because of
-#using more counter than hadoop configured limit
-#pig.disable.counter=true
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/init.pp
deleted file mode 100644
index e005cab..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/init.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-repos() {}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/process_repo.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/process_repo.pp
deleted file mode 100644
index 31ab192..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/process_repo.pp
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-repos::process_repo(
-  $os_type,
-  $repo_id,
-  $base_url,
-  $mirror_list,
-  $repo_name
-) inherits hdp-hadoop::params
-{
-  debug("Getting repo path for os: $hdp_os_type")
-
-  $repo_path = $repos_paths[$hdp_os_type]
-
-  if hdp_is_empty($repo_path) {
-    hdp_fail("There is no repo path for os: $hdp_os_type in hdp::params")
-  }
-
-  file{$repo_name:
-    path => "$repo_path/$repo_name.repo",
-    ensure => file,
-    content => template("hdp-repos/repo.erb")
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-repos/templates/repo.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-repos/templates/repo.erb
deleted file mode 100644
index a5edc55..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-repos/templates/repo.erb
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# 
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-[<%=repo_id%>]
-name=<%=repo_name %>
-<%if scope.function_hdp_is_empty(base_url)%>mirrorlist=<%=mirror_list %><% else %>baseurl=<%=base_url %><% end %>
-path=/
-enabled=1
-gpgcheck=0
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/init.pp
deleted file mode 100644
index 2faf0a2..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/init.pp
+++ /dev/null
@@ -1,79 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-sqoop(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp-sqoop::params
-{
-  if ($hdp::params::use_32_bits_on_slaves == false) {
-    $size = 64
-  } else {
-    $size = 32
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state == 'uninstalled') {
-    hdp::package { 'sqoop' :
-      ensure => 'uninstalled',
-      size   => $size
-    }
-  } elsif ($service_state == 'installed_and_configured') {
-
-    hdp::package { 'sqoop' :
-      size => $size
-    }
-    class { 'hdp-sqoop::mysql-connector': }
-    if ($package_type == 'hdp') {
-      hdp-sqoop::createsymlinks { ['/usr/lib/sqoop/conf']:}
-    }
-
-    hdp-sqoop::configfile { ['sqoop-env.sh']:}
-
-    anchor { 'hdp-sqoop::begin': } -> Hdp::Package['sqoop'] -> Class['hdp-sqoop::mysql-connector'] -> Hdp-sqoop::Configfile<||> -> anchor { 'hdp-sqoop::end': }
- } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-
-define hdp-sqoop::createsymlinks()
-{
-  file { '/usr/lib/sqoop/conf' :
-    #ensure => directory,
-    ensure => link,
-    target => "/etc/sqoop"
-  }
-
-  file { '/etc/default/hadoop' :
-    ensure => link,
-    target => "/usr/bin/hadoop"
-  }
-}
-
-### config files
-define hdp-sqoop::configfile()
-{
-  hdp::configfile { "${hdp::params::sqoop_conf_dir}/${name}":
-    component => 'sqoop'
-  }
-}
-
-
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/mysql-connector.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/mysql-connector.pp
deleted file mode 100644
index 12a3971..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/mysql-connector.pp
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-sqoop::mysql-connector()
-{
-  include hdp-sqoop::params
-  include hdp-hive::params
-
-  $target = "${hdp::params::artifact_dir}/${zip_name}"
-  $sqoop_lib = $hdp-sqoop::params::sqoop_lib
-
-  anchor { 'hdp-sqoop::mysql-connector::begin':}
-
-  hdp::package { 'mysql-connector-java' :
-    require   => Anchor['hdp-sqoop::mysql-connector::begin']
-  }
-
-   file { "${sqoop_lib}/mysql-connector-java.jar" :
-       ensure => link,
-       target => "/usr/share/java/mysql-connector-java.jar",
-       require => Hdp::Package['mysql-connector-java'],
-       notify  =>  Anchor['hdp-sqoop::mysql-connector::end'],
-   }
-
-   anchor { 'hdp-sqoop::mysql-connector::end':}
-  
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/params.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/params.pp
deleted file mode 100644
index 03097c1..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/params.pp
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-sqoop::params() inherits hdp::params
-{
-  $conf_dir = $hdp::params::sqoop_conf_dir
-
-  $hbase_home = hdp_default("hbase_home","/usr")
-  $hive_home = hdp_default("hive_home","/usr")
-  $zoo_conf_dir = $hdp::params::zk_conf_dir 
-  $sqoop_lib = hdp_default("sqoop_lib","/usr/lib/sqoop/lib/") #TODO: should I remove and just use sqoop_dbroot
-  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/sqoop/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/sqoop/service_check.pp
deleted file mode 100644
index 682076c..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/sqoop/service_check.pp
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-sqoop::sqoop::service_check() 
-{
-  include hdp-sqoop::params
-  $smoke_test_user = $hdp::params::smokeuser
-
-  # TODO:SUHAS Move this to hdp::params
-  $security_enabled=$hdp::params::security_enabled
-  $smoke_user_keytab = "${hdp-sqoop::params::keytab_path}/${smoke_test_user}.headless.keytab"
-  if ($security_enabled == true) {
-    $smoke_user_kinitcmd="/usr/kerberos/bin/kinit  -kt ${smoke_user_keytab} ${smoke_test_user}; "
-  } else {
-    $smoke_user_kinitcmd=""
-  }
-
-  $cmd = "${smoke_user_kinitcmd}su - ${smoke_test_user} -c 'sqoop version'"
-  
-  anchor { 'hdp-sqoop::sqoop::service_check::begin':}
-
-  exec { 'sqoop_smoke':
-    command   => $cmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true",
-    require   => Anchor['hdp-sqoop::sqoop::service_check::begin'],
-    before    => Anchor['hdp-sqoop::sqoop::service_check::end']
-  }
-
-  anchor{ 'hdp-sqoop::sqoop::service_check::end':}
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-sqoop/templates/sqoop-env.sh.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-sqoop/templates/sqoop-env.sh.erb
deleted file mode 100644
index b1836e4..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-sqoop/templates/sqoop-env.sh.erb
+++ /dev/null
@@ -1,35 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# included in all the hadoop scripts with source command
-# should not be executable directly
-# also should not be passed any arguments, since we need original $*
-
-# Set Hadoop-specific environment variables here.
-
-#Set path to where bin/hadoop is available
-export HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp::params::hadoop_home")%>}
-
-#set the path to where bin/hbase is available
-export HBASE_HOME=${HBASE_HOME:-<%=scope.function_hdp_template_var("hbase_home")%>}
-
-#Set the path to where bin/hive is available
-export HIVE_HOME=${HIVE_HOME:-<%=scope.function_hdp_template_var("hive_home")%>}
-
-# add libthrift in hive to sqoop class path first so hive imports work
-export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"
-
-#Set the path for where zookeper config dir is
-export ZOOCFGDIR=${ZOOCFGDIR:-<%=scope.function_hdp_template_var("zoo_conf_dir")%>}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/files/templetonSmoke.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/files/templetonSmoke.sh
deleted file mode 100644
index bb605c2..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/files/templetonSmoke.sh
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export ttonhost=$1
-export smoke_test_user=$2
-export smoke_user_keytab=$3
-export security_enabled=$4
-export ttonurl="http://${ttonhost}:50111/templeton/v1"
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="/usr/kerberos/bin/kinit  -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else
-  kinitcmd=""
-fi
-
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    $ttonurl/status 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0
-
-#try hcat ddl command
-echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit  1
-fi
-
-# NOT SURE?? SUHAS
-if [[ $security_enabled == "true" ]]; then
-  echo "Templeton Pig Smoke Tests not run in secure mode"
-  exit 0
-fi
-
-#try pig query
-outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
-ttonTestOutput="/tmp/idtest.${outname}.out";
-ttonTestInput="/tmp/idtest.${outname}.in";
-ttonTestScript="idtest.${outname}.pig"
-
-echo "A = load '$ttonTestInput' using PigStorage(':');"  > /tmp/$ttonTestScript
-echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
-echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
-
-#copy pig script to hdfs
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
-
-#copy input file to hdfs
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
-
-#create, copy post args file
-echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
-
-#submit pig query
-cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/client.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/client.pp
deleted file mode 100644
index 38814e8..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/client.pp
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton::client(
-  $service_state = $hdp::params::cluster_client_state,
-  $templeton_server = undef
-) inherits hdp::params
-{ 
-  if ($service_state == 'no_op') {
-   } elsif ($service_state in ['installed_and_configured','uninstalled']) {
-     if ($hdp::params::service_exists['hdp-templeton::server'] != true) {
-       #installs package, creates user, sets configuration
-       class { 'hdp-templeton' :
-         service_state => $service_state
-       }
-      if ($templeton_server != undef) {
-        Hdp-Templeton::Configfile<||>{templeton_server => $templeton_server}
-      }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-hive-tar.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-hive-tar.pp
deleted file mode 100644
index cc0754c..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-hive-tar.pp
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton::download-hive-tar()
-{
-  include hdp-templeton::params
-
-  $src_tar_name = $hdp-templeton::params::src_hive_tar_name
-  $dest_tar_name = $hdp-templeton::params::dest_hive_tar_name
-  $target = "${hdp::params::artifact_dir}/${dest_tar_name}"
- 
-  anchor { 'hdp-templeton::download-hive-tar::begin':}         
-
-   hdp::package { 'webhcat-tar-hive' :
-     require   => Anchor['hdp-templeton::download-hive-tar::begin']                                                              
-   }
-  
-#   hdp::exec { 'hive mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}':
-#       command => "mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}",
-#       unless  => "test -f ${target}",
-#       creates => $target,
-#       path    => ["/bin","/usr/bin/"],
-#       require => Hdp::Package['webhcat-tar-hive'],
-#       notify  =>  Anchor['hdp-templeton::download-hive-tar::end'],
-#   }
-
-   anchor { 'hdp-templeton::download-hive-tar::end':}       
-
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-pig-tar.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-pig-tar.pp
deleted file mode 100644
index 471ed6e..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/download-pig-tar.pp
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton::download-pig-tar()
-{
-  include hdp-templeton::params
-
-  $src_tar_name = $hdp-templeton::params::src_pig_tar_name
-  $dest_tar_name = $hdp-templeton::params::dest_pig_tar_name
-  $target = "${hdp::params::artifact_dir}/${dest_tar_name}"
-
-  anchor { 'hdp-templeton::download-pig-tar::begin':}
-
-   hdp::package { 'webhcat-tar-pig' :
-     require   => Anchor['hdp-templeton::download-pig-tar::begin']
-   }
-
-#   hdp::exec { 'pig ; mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}':
-#       command => "mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}",
-#       unless  => "test -f ${target}",
-#       creates => $target,
-#       path    => ["/bin","/usr/bin/"],
-#       require => Hdp::Package['webhcat-tar-pig'],
-#       notify  =>  Anchor['hdp-templeton::download-pig-tar::end'],
-#   }
-
-   anchor { 'hdp-templeton::download-pig-tar::end':}
-
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp
deleted file mode 100644
index fe64715..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp
+++ /dev/null
@@ -1,95 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton(
-  $service_state = undef,
-  $server = false
-)
-{
-# Configs generation  
-
-  if has_key($configuration, 'webhcat-site') {
-    configgenerator::configfile{'webhcat-site': 
-      modulespath => $hdp-templeton::params::conf_dir,
-      filename => 'webhcat-site.xml',
-      module => 'hdp-templeton',
-      configuration => $configuration['webhcat-site']
-    }
-  }
-
- include hdp-templeton::params 
- 
-  if ($hdp::params::use_32_bits_on_slaves == false) {
-    $size = 64
-  } else {
-    $size = 32
-  }
-
-  $webhcat_user = $hdp-templeton::params::webhcat_user
-  $templeton_config_dir = $hdp-templeton::params::conf_dir
-
-  if ($service_state == 'uninstalled') {
-      hdp::package { 'webhcat' :
-      size => $size,
-      ensure => 'uninstalled'
-    }
-      hdp::directory { $templeton_config_dir:
-        service_state => $service_state,
-        force => true
-      }
-
-     anchor { 'hdp-templeton::begin': } -> Hdp::Package['webhcat'] -> Hdp::Directory[$templeton_config_dir] ->  anchor { 'hdp-templeton::end': }
-
-  } else {
-    hdp::package { 'webhcat' :
-      size => $size
-    }
-    class { hdp-templeton::download-hive-tar: }
-    class { hdp-templeton::download-pig-tar: }
-
-    hdp::user{ $webhcat_user:}
-
-    hdp::directory { $templeton_config_dir: 
-      service_state => $service_state,
-      force => true
-    }
-
-    hdp-templeton::configfile { ['webhcat-env.sh']: }
-
-    anchor { 'hdp-templeton::begin': } -> Hdp::Package['webhcat'] -> Hdp::User[$webhcat_user] -> Hdp::Directory[$templeton_config_dir] -> Hdp-templeton::Configfile<||> ->  anchor { 'hdp-templeton::end': }
-
-     if ($server == true ) { 
-      Hdp::Package['webhcat'] -> Hdp::User[$webhcat_user] ->   Class['hdp-templeton::download-hive-tar'] -> Class['hdp-templeton::download-pig-tar'] -> Anchor['hdp-templeton::end']
-     }
-  }
-}
-
-### config files
-define hdp-templeton::configfile(
-  $mode = undef
-) 
-{
-  hdp::configfile { "${hdp-templeton::params::conf_dir}/${name}":
-    component       => 'templeton',
-    owner           => $hdp-templeton::params::webhcat_user,
-    mode            => $mode
-  }
-}
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/params.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/params.pp
deleted file mode 100644
index 5c31d89..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/params.pp
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton::params() inherits hdp::params
-{
-  $templeton_user = $hdp::params::templeton_user
-
-  ###pig and hive tar url connector
-  $download_url = $hdp::params::apache_artifacts_download_url
-
-  $dest_pig_tar_name = hdp_default("dest_pig_tar_name","pig.tar.gz")
-  $dest_hive_tar_name = hdp_default("dest_hive_tar_name","hive.tar.gz")
-  $src_pig_tar_name = hdp_default("src_pig_tar_name","pig.tar.gz")
-  $src_hive_tar_name = hdp_default("src_hive_tar_name","hive.tar.gz")
-
-  ### templeton-env
-  $conf_dir = hdp_default("hadoop/templeton-env/conf_dir","/etc/hcatalog/conf")
-
-  ### templeton-env
-  $templeton_log_dir = hdp_default("hadoop/templeton-env/templeton_log_dir","/var/log/webhcat")
-
-  $templeton_pid_dir = hdp_default("hadoop/templeton-env/templeton_pid_dir","/var/run/webhcat")
-
-#  $templeton_jar_name= hdp_default("hadoop/templeton-env/templeton_jar_name","templeton-0.1.4.14.jar")
- 
-#  $hadoop_prefix = hdp_default("hadoop/templeton-env/hadoop_prefix","/usr")
-#  $hive_prefix = hdp_default("hadoop/templeton-env/hive_prefix","/usr")
-  
-  ### templeton-site
-  $hadoop_conf_dir = hdp_default("hadoop/templeton-site/hadoop_conf_dir")
-  $templeton_jar = hdp_default("hadoop/templeton-site/templeton_jar","/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar")
-  $zookeeper_jar = hdp_default("hadoop/templeton-site/zookeeper_jar","/usr/lib/zookeeper/zookeeper.jar")
-  $pig_tar_gz = hdp_default("hadoop/templeton-site/pig_tar_gz","$dest_pig_tar_name")
-  $pig_tar_name_hdfs = hdp_default("hadoop/templeton-site/pig_tar_name_hdfs","pig-0.9.2.14")
-
-  $hive_tar_gz = hdp_default("hadoop/templeton-site/hive_tar_gz","$dest_hive_tar_name")
-  $hive_tar_gz_name = hdp_default("hadoop/templeton-site/hive_tar_gz_name","hive-0.9.0.14")
-  $hive_metastore_sasl_enabled = hdp_default("hadoop/templeton-site/hive_metastore_sasl_enabled",false)
-
-  $templeton_metastore_principal = hdp_default("hadoop/templeton-site/templeton_metastore_principal")
-
-  $keytab_path = hdp_default("hadoop/templeton-site/keytab_path")
-  
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp
deleted file mode 100644
index 426e78e..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp
+++ /dev/null
@@ -1,108 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton::server(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits  hdp-templeton::params
-{  
-
-  if ($service_state == 'no_op') { 
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
-  $hdp::params::service_exists['hdp-templeton::server'] = true
-
-  if ( ($service_state == 'installed_and_configured') and
-       ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-     $masterHost = $kerberos_adminclient_host[0]
-     hdp::download_keytab { 'templeton_headless_keytab' :
-       masterhost => $masterHost,
-       keytabdst => "${$keytab_path}/templeton.headless.keytab",
-       keytabfile => 'templeton.headless.keytab',
-       owner => $hdp::params::templeton_user,
-       hostnameInPrincipals => 'no' 
-     }
-
-     if ( ($hdp::params::service_exists['hdp-hadoop::namenode'] != true) and
-          ($hdp::params::service_exists['hdp-hadoop::snamenode'] != true) and
-          ($hdp::params::service_exists['hdp-oozie::server'] != true) ) {
-       hdp::download_keytab { 'templeton_spnego_service_keytab' :
-         masterhost => $masterHost,
-         keytabdst => "${$keytab_path}/spnego.service.keytab",
-         keytabfile => 'spnego.service.keytab',
-         owner => $hdp::params::templeton_user,
-         group => 'hadoop',
-         mode => '0440'
-       }
-     }
-  }
-
-  class{ 'hdp-templeton' :
-    service_state => $service_state,
-    server        => true
-  }
-
-  class { 'hdp-templeton::copy-hdfs-directories' :
-    service_state => $service_state
-  }
-
-  class { 'hdp-templeton::service' :
-    ensure       => $service_state,
-  }
-  
-  #top level does not need anchors
-  Class['hdp-templeton'] -> Class['hdp-templeton::copy-hdfs-directories'] -> Class['hdp-templeton::service']
-  } else { 
-  hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-class hdp-templeton::copy-hdfs-directories($service_state)
-{
- $webhcat_apps_dir = $hdp::params::webhcat_apps_dir
- $webhcat_user = $hdp::params::webhcat_user
-# $pig_src_tar = "$hdp::params::artifact_dir/pig.tar.gz"
-
-#  hdp-hadoop::hdfs::copyfromlocal { '/usr/share/templeton/templeton*jar':
-#    service_state => $service_state,
-#    owner => $hdp-templeton::params::templeton_user,
-#    mode  => '755',
-#    dest_dir => '/apps/templeton/ugi.jar'
-#  }
-  hdp-hadoop::hdfs::copyfromlocal { '/usr/lib/hadoop/contrib/streaming/hadoop-streaming*.jar':
-   service_state => $service_state,
-   owner => $webhcat_user,
-   mode  => '755',
-   dest_dir => "$webhcat_apps_dir/hadoop-streaming.jar"
-  }
-  #TODO: Use ${hdp::params::artifact_dir}/${hdp-templeton::params::pig_tar_name} instead
-  hdp-hadoop::hdfs::copyfromlocal { '/usr/share/HDP-webhcat/pig.tar.gz' :
-    service_state => $service_state,
-    owner => $webhcat_user,
-    mode  => '755',
-    dest_dir => "$webhcat_apps_dir/pig.tar.gz"
-  }
-  #TODO: Use ${hdp::params::artifact_dir}/${hdp-templeton::params::hive_tar_name} instead
-  hdp-hadoop::hdfs::copyfromlocal { '/usr/share/HDP-webhcat/hive.tar.gz' :
-    service_state => $service_state,
-    owner => $webhcat_user,
-    mode  => '755',
-    dest_dir => "$webhcat_apps_dir/hive.tar.gz"
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/service.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/service.pp
deleted file mode 100644
index 5d3b268..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/service.pp
+++ /dev/null
@@ -1,67 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton::service(
-  $ensure,
-  $initial_wait = undef
-)
-{
-  include $hdp-templeton::params
-  
-  $user = "$hdp-templeton::params::webhcat_user"
-  $hadoop_home = $hdp-templeton::params::hadoop_prefix
-  $cmd = "env HADOOP_HOME=${hadoop_home} /usr/lib/hcatalog/sbin/webhcat_server.sh"
-  $pid_file = "${hdp-templeton::params::templeton_pid_dir}/webhcat.pid" 
-
-  if ($ensure == 'running') {
-    $daemon_cmd = "su - ${user} -c  '${cmd} start'"
-    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-  } elsif ($ensure == 'stopped') {
-    $daemon_cmd = "su - ${user} -c  '${cmd} stop'"
-    $no_op_test = undef
-  } else {
-    $daemon_cmd = undef
-  }
-
-  hdp-templeton::service::directory { $hdp-templeton::params::templeton_pid_dir : }
-  hdp-templeton::service::directory { $hdp-templeton::params::hcat_log_dir : }
-
-  anchor{'hdp-templeton::service::begin':} -> Hdp-templeton::Service::Directory<||> -> anchor{'hdp-templeton::service::end':}
-  
-  if ($daemon_cmd != undef) {
-    hdp::exec { $daemon_cmd:
-      command => $daemon_cmd,
-      unless  => $no_op_test,
-      initial_wait => $initial_wait
-    }
-    Hdp-templeton::Service::Directory<||> -> Hdp::Exec[$daemon_cmd] -> Anchor['hdp-templeton::service::end']
-  }
-}
-
-define hdp-templeton::service::directory()
-{
-  hdp::directory_recursive_create { $name: 
-    owner => $hdp-templeton::params::webhcat_user,
-    mode => '0755',
-    service_state => $ensure,
-    force => true
-  }
-}
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/templeton/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/templeton/service_check.pp
deleted file mode 100644
index 1e1cad4..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/templeton/service_check.pp
+++ /dev/null
@@ -1,60 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-templeton::templeton::service_check()
-{
-  include hdp-templeton::params
-  $smoke_test_user = $hdp::params::smokeuser
-  $security_enabled=$hdp::params::security_enabled
-  if ($security_enabled == true) {
-    $security = "true"
-  } else {
-    $security = "false"
-  }
-  $smoke_user_keytab = "${hdp-templeton::params::keytab_path}/${smoke_test_user}.headless.keytab"
-
-  $templeton_host = $hdp::params::webhcat_server_host
-
-  $smoke_shell_files = ['templetonSmoke.sh']
-
-  anchor { 'hdp-templeton::templeton::service_check::begin':}
-
-  hdp-templeton::smoke_shell_file { $smoke_shell_files: }
-
-  anchor{ 'hdp-templeton::templeton::service_check::end':}
-}
-
-define hdp-templeton::smoke_shell_file()
-{
-  file { '/tmp/templetonSmoke.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-templeton/templetonSmoke.sh",
-    mode => '0755'
-  }
-
-  exec { '/tmp/templetonSmoke.sh':
-    command   => "sh /tmp/templetonSmoke.sh ${templeton_host} ${smoke_test_user} ${smoke_user_keytab} ${security}",
-    tries     => 3,
-    try_sleep => 5,
-    require   => File['/tmp/templetonSmoke.sh'],
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb
deleted file mode 100644
index 6f7d803..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# The file containing the running pid
-PID_FILE=<%=scope.function_hdp_template_var("hcat_pid_dir")%>/webhcat.pid
-
-TEMPLETON_LOG_DIR=<%=scope.function_hdp_template_var("hcat_log_dir")%>/
-
-
-WEBHCAT_LOG_DIR=<%=scope.function_hdp_template_var("hcat_log_dir")%>/
-
-# The console error log
-ERROR_LOG=<%=scope.function_hdp_template_var("hcat_log_dir")%>/webhcat-console-error.log
-
-# The console log
-CONSOLE_LOG=<%=scope.function_hdp_template_var("hcat_log_dir")%>/webhcat-console.log
-
-#TEMPLETON_JAR=<%=scope.function_hdp_template_var("templeton_jar_name")%>
-
-#HADOOP_PREFIX=<%=scope.function_hdp_template_var("hadoop_prefix")%>/
-
-#HCAT_PREFIX=<%=scope.function_hdp_template_var("hive_prefix")%>/
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-export HADOOP_HOME=/usr/lib/hadoop
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkEnv.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkEnv.sh
deleted file mode 100644
index 07017e1..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkEnv.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script should be sourced into other zookeeper
-# scripts to setup the env variables
-
-# We use ZOOCFGDIR if defined,
-# otherwise we use /etc/zookeeper
-# or the conf directory that is
-# a sibling of this script's directory
-if [ "x$ZOOCFGDIR" = "x" ]
-then
-    if [ -d "/etc/zookeeper" ]
-    then
-        ZOOCFGDIR="/etc/zookeeper"
-    else
-        ZOOCFGDIR="$ZOOBINDIR/../conf"
-    fi
-fi
-
-if [ "x$ZOOCFG" = "x" ]
-then
-    ZOOCFG="zoo.cfg"
-fi
-
-ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
-
-if [ -e "$ZOOCFGDIR/zookeeper-env.sh" ]
-then
-    . "$ZOOCFGDIR/zookeeper-env.sh"
-fi
-
-if [ "x${ZOO_LOG_DIR}" = "x" ]
-then
-    ZOO_LOG_DIR="."
-fi
-
-if [ "x${ZOO_LOG4J_PROP}" = "x" ]
-then
-    ZOO_LOG4J_PROP="INFO,CONSOLE"
-fi
-
-#add the zoocfg dir to classpath
-CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
-
-for i in "$ZOOBINDIR"/../src/java/lib/*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work in the release
-for i in "$ZOOBINDIR"/../lib/*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work in the release
-for i in "$ZOOBINDIR"/../zookeeper-*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work for developers
-for d in "$ZOOBINDIR"/../build/lib/*.jar
-do
-   CLASSPATH="$d:$CLASSPATH"
-done
-
-#make it work for developers
-CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
-
-case "`uname`" in
-    CYGWIN*) cygwin=true ;;
-    *) cygwin=false ;;
-esac
-
-if $cygwin
-then
-    CLASSPATH=`cygpath -wp "$CLASSPATH"`
-fi
-
-#echo "CLASSPATH=$CLASSPATH"
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkServer.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkServer.sh
deleted file mode 100644
index 49ceb4d..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkServer.sh
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# If this scripted is run out of /usr/bin or some other system bin directory
-# it should be linked to and not copied. Things like java jar files are found
-# relative to the canonical path of this script.
-#
-
-# See the following page for extensive details on setting
-# up the JVM to accept JMX remote management:
-# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-# by default we allow local JMX connections
-if [ "x$JMXLOCALONLY" = "x" ]
-then
-    JMXLOCALONLY=false
-fi
-
-if [ "x$JMXDISABLE" = "x" ]
-then
-    echo "JMX enabled by default"
-    # for some reason these two options are necessary on jdk6 on Ubuntu
-    #   accord to the docs they are not necessary, but otw jconsole cannot
-    #   do a local attach
-    ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
-else
-    echo "JMX disabled by user request"
-    ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
-fi
-
-# Only follow symlinks if readlink supports it
-if readlink -f "$0" > /dev/null 2>&1
-then
-  ZOOBIN=`readlink -f "$0"`
-else
-  ZOOBIN="$0"
-fi
-ZOOBINDIR=`dirname "$ZOOBIN"`
-
-. "$ZOOBINDIR"/zkEnv.sh
-
-if [ "x$2" != "x" ]
-then
-    ZOOCFG="$ZOOCFGDIR/$2"
-fi
-
-if $cygwin
-then
-    ZOOCFG=`cygpath -wp "$ZOOCFG"`
-    # cygwin has a "kill" in the shell itself, gets confused
-    KILL=/bin/kill
-else
-    KILL=kill
-fi
-
-echo "Using config: $ZOOCFG"
-
-ZOOPIDFILE=$(grep dataDir "$ZOOCFG" | sed -e 's/.*=//')/zookeeper_server.pid
-
-
-case $1 in
-start)
-    echo  "Starting zookeeper ... "
-    $JAVA  "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
-    -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" &
-    /bin/echo -n $! > "$ZOOPIDFILE"
-    echo STARTED
-    ;;
-stop)
-    echo "Stopping zookeeper ... "
-    if [ ! -f "$ZOOPIDFILE" ]
-    then
-    echo "error: could not find file $ZOOPIDFILE"
-    exit 1
-    else
-    $KILL -9 $(cat "$ZOOPIDFILE")
-    rm "$ZOOPIDFILE"
-    echo STOPPED
-    fi
-    ;;
-upgrade)
-    shift
-    echo "upgrading the servers to 3.*"
-    java "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
-    -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
-    echo "Upgrading ... "
-    ;;
-restart)
-    shift
-    "$0" stop ${@}
-    sleep 3
-    "$0" start ${@}
-    ;;
-status)
-    STAT=`echo stat | nc localhost $(grep clientPort "$ZOOCFG" | sed -e 's/.*=//') 2> /dev/null| grep Mode`
-    if [ "x$STAT" = "x" ]
-    then
-        echo "Error contacting service. It is probably not running."
-    else
-        echo $STAT
-    fi
-    ;;
-*)
-    echo "Usage: $0 {start|stop|restart|status}" >&2
-
-esac
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkService.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkService.sh
deleted file mode 100644
index 32dfce4..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkService.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-zkcli_script=$1
-user=$2
-conf_dir=$3
-su - $user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkSmoke.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkSmoke.sh
deleted file mode 100644
index 2097c90..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/files/zkSmoke.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smoke_script=$1
-smoke_user=$2
-conf_dir=$3
-export ZOOKEEPER_EXIT_CODE=0
-zkhosts=` grep server  $conf_dir/zoo.cfg  | cut -f 2 -d '=' | cut -f 1 -d ':' | tr '\n' ' ' `
-zk_node1=`echo $zkhosts | tr ' ' '\n' | head -n 1`  
-echo "zk_node1=$zk_node1"
-# Delete /zk_smoketest znode if exists
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${smoke_script} -server $zk_node1:2181"  
-# Create /zk_smoketest znode on one zookeeper server
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${smoke_script} -server $zk_node1:2181"
-
-for i in $zkhosts ; do
-  echo "Running test on host $i"
-  # Verify the data associated with znode across all the nodes in the zookeeper quorum
-  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:2181"
-  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${smoke_script} -server $i:2181"
-  output=$(su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:2181")
-  echo $output | grep smoke_data
-  if [[ $? -ne 0 ]] ; then
-    echo "Data associated with znode /zk_smoketests is not consistent on host $i"
-    ((ZOOKEEPER_EXIT_CODE=$ZOOKEEPER_EXIT_CODE+1))
-  fi
-done
-
-su - $zmoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${smoke_script} -server $zk_node1:2181"
-if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
-  echo "Zookeeper Smoke Test: Failed" 
-else
-   echo "Zookeeper Smoke Test: Passed" 
-fi
-exit $ZOOKEEPER_EXIT_CODE
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/client.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/client.pp
deleted file mode 100644
index 23eb15b..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/client.pp
+++ /dev/null
@@ -1,47 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-zookeeper::client(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp::params
-{
-  $package_type = $hdp::params::packages
-
-  if ($service_state == 'no_op') {
-  } elsif  ($service_state in ['installed_and_configured','uninstalled']) {
-      if ($package_type == 'hdp') {
-        $cmd = "ln -s /usr/libexec/zkEnv.sh /usr/bin/zkEnv.sh"
-        $test = "test -e /usr/bin/zkEnv.sh"
-        hdp::exec { $cmd :
-           command => $cmd,
-           unless  => $test,
-           require => Class['hdp-zookeeper']
-        }
-      } 
-      if ($hdp::params::service_exists['hdp-zookeeper'] != true) {
-        class { 'hdp-zookeeper' : 
-         type => 'client',
-         service_state => $service_state
-        } 
-      }
-    } else {
-   hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/init.pp
deleted file mode 100644
index a89579a..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/init.pp
+++ /dev/null
@@ -1,121 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-zookeeper(
-  $type = server,
-  $service_state = $hdp::params::cluster_service_state,
-  $myid = 1,
-  $opts = {}
-) inherits hdp-zookeeper::params 
-{
-
- if ($service_state == 'no_op') {
-   if ($type == 'server') {
-     $hdp::params::service_exists['hdp-zookeeper'] = true
-  }
- } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-   $zk_user = $hdp-zookeeper::params::zk_user
-   $zk_config_dir = $hdp-zookeeper::params::conf_dir
- 
-   anchor{'hdp-zookeeper::begin':}
-   anchor{'hdp-zookeeper::end':}
-
-   if ($service_state == 'uninstalled') {
-     if ($type == 'server') {
-       $hdp::params::service_exists['hdp-zookeeper'] = true
-    }
-     hdp::package { 'zookeeper':
-       ensure => 'uninstalled'
-     }
-     hdp::directory_recursive_create { $zk_config_dir:
-       service_state => $service_state,
-       force => true
-     }
-
-     if ($type == 'server') {
-        class { 'hdp-zookeeper::service':
-          ensure => $service_state,
-          myid   => $myid
-        }
-       }
-
-     if ($type == 'server') {
-       Anchor['hdp-zookeeper::begin'] -> Hdp::Package['zookeeper'] -> Hdp::Directory_recursive_create[$zk_config_dir] -> Class['hdp-zookeeper::service']  -> Anchor['hdp-zookeeper::end']
-     } else {
-       Anchor['hdp-zookeeper::begin'] -> Hdp::Package['zookeeper'] -> Hdp::Directory_recursive_create[$zk_config_dir] -> Anchor['hdp-zookeeper::end']
-     }
-   } else {
-     hdp::package { 'zookeeper':}
-
-     hdp::user{ $zk_user:}
-
-     hdp::directory_recursive_create { $zk_config_dir: 
-      service_state => $service_state,
-      force => true
-     }
-
-     hdp-zookeeper::configfile { ['zoo.cfg','zookeeper-env.sh','configuration.xsl','log4j.properties']: }
- 
-     if ($hdp::params::update_zk_shell_files == true) {
-       hdp-zookeeper::shell_file{ ['zkServer.sh','zkEnv.sh']: }
-     }
-
-     if ($type == 'server') {
-       $hdp::params::service_exists['hdp-zookeeper'] = true
-       class { 'hdp-zookeeper::service': 
-         ensure => $service_state,
-         myid   => $myid
-       }
-      }
-
-      Anchor['hdp-zookeeper::begin'] -> Hdp::Package['zookeeper'] -> Hdp::User[$zk_user] -> 
-        Hdp::Directory_recursive_create[$zk_config_dir] -> Hdp-zookeeper::Configfile<||> -> Anchor['hdp-zookeeper::end']
-      if ($type == 'server') {
-        Hdp::Directory_recursive_create[$zk_config_dir] -> Hdp-zookeeper::Configfile<||> -> Class['hdp-zookeeper::service'] -> Anchor['hdp-zookeeper::end']
-      }
-      if ($hdp::params::update_zk_shell_files == true) {
-        Hdp::Package['zookeeper'] -> Hdp-zookeeper::Shell_file<||> -> Anchor['hdp-zookeeper::end']
-      }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-### config files
-define hdp-zookeeper::configfile(
-  $mode = undef
-) 
-{
-  hdp::configfile { "${hdp-zookeeper::params::conf_dir}/${name}":
-    component       => 'zookeeper',
-    owner           => $hdp-zookeeper::params::zk_user,
-    mode            => $mode
-  }
-}
-
-### 
-define hdp-zookeeper::shell_file()
-{
-  file { "${hdp::params::zk_bin}/${name}":
-    source => "puppet:///modules/hdp-zookeeper/${name}", 
-    mode => '0755'
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/params.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/params.pp
deleted file mode 100644
index 515f3e3..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/params.pp
+++ /dev/null
@@ -1,38 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-zookeeper::params() inherits hdp::params 
-{
-  $conf_dir = $hdp::params::zk_conf_dir
-
-  $zk_user = $hdp::params::zk_user
-  
-  $zk_log_dir = hdp_default("zk_log_dir","/var/log/zookeeper")
-  $zk_data_dir = hdp_default("zk_data_dir","/var/lib/zookeeper/data")
-  $zk_pid_dir = hdp_default("zk_pid_dir","/var/run/zookeeper")
-  $zk_pid_file = "${zk_pid_dir}/zookeeper_server.pid"
-
-
-  $tickTime = hdp_default("tickTime","2000")
-  $initLimit = hdp_default("initLimit","10")
-  $syncLimit = hdp_default("syncLimit","5")
-  $clientPort = hdp_default("clientPort","2181")
-
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/quorum/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/quorum/service_check.pp
deleted file mode 100644
index 6d9cea4..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/quorum/service_check.pp
+++ /dev/null
@@ -1,54 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-zookeeper::quorum::service_check()
-{
-  include hdp-zookeeper::params
-  $conf_dir = $hdp-zookeeper::params::conf_dir
-
-  $smoke_test_user = $hdp::params::smokeuser
-
-  $smoke_script = $hdp::params::zk_smoke_test_script
-  $quorum_smoke_shell_files = ['zkSmoke.sh']
-
-  anchor { 'hdp-zookeeper::quorum::service_check::begin':}
-
-  hdp-zookeeper::quorum_smoke_shell_file { $quorum_smoke_shell_files: }
-
-  anchor{ 'hdp-zookeeper::quorum::service_check::end':}
-}
-
-define hdp-zookeeper::quorum_smoke_shell_file()
-{
-  file { '/tmp/zkSmoke.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-zookeeper/zkSmoke.sh",
-    mode => '0755'
-  }
-
-  exec { '/tmp/zkSmoke.sh':
-    command   => "sh /tmp/zkSmoke.sh ${smoke_script} ${smoke_test_user} ${conf_dir}",
-    tries     => 3,
-    try_sleep => 5,
-    require   => File['/tmp/zkSmoke.sh'],
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/service.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/service.pp
deleted file mode 100644
index 6a67517..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/service.pp
+++ /dev/null
@@ -1,96 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-zookeeper::service(
-  $ensure = $hdp::params::cluster_service_state,
-  $myid
-)
-{
-  include $hdp-zookeeper::params
-  $user = $hdp-zookeeper::params::zk_user
-  $conf_dir = $hdp-zookeeper::params::conf_dir
-  $zk_bin = $hdp::params::zk_bin
-  $cmd = "env ZOOCFGDIR=${conf_dir} ZOOCFG=zoo.cfg ${zk_bin}/zkServer.sh"
-
-  $pid_file = $hdp-zookeeper::params::zk_pid_file  
-
-  if ($ensure == 'running') {
-    $daemon_cmd = "su - ${user} -c  'source ${conf_dir}/zookeeper-env.sh ; ${cmd} start'"
-    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-    #not using $no_op_test = "su - ${user} -c  '${cmd} status'" because checks more than whether there is a service started up
-  } elsif ($ensure == 'stopped') {
-    $daemon_cmd = "su - ${user} -c  'source ${conf_dir}/zookeeper-env.sh ; ${cmd} stop'"
-    #TODO: put in no_op_test for stopped
-    $no_op_test = undef
-  } else {
-    $daemon_cmd = undef
-  }
-  hdp::directory_recursive_create { $hdp-zookeeper::params::zk_pid_dir: 
-    owner        => $user,
-    context_tag => 'zk_service',
-    service_state => $ensure,
-    force => true
-  }
-  hdp::directory_recursive_create { $hdp-zookeeper::params::zk_log_dir: 
-    owner        => $user,
-    context_tag => 'zk_service',
-    service_state => $ensure,
-    force => true
-  }
-   hdp::directory_recursive_create { $hdp-zookeeper::params::zk_data_dir: 
-    owner        => $user,
-    context_tag => 'zk_service',
-    service_state => $ensure,
-    force => true
-  }
-  
-  if ($daemon_cmd != undef) {
-    hdp::exec { $daemon_cmd:
-      command => $daemon_cmd,
-      unless  => $no_op_test,
-      initial_wait => $initial_wait
-    }
-  }
-
-  if ($ensure == 'uninstalled') {
-    anchor{'hdp-zookeeper::service::begin':} -> Hdp::Directory_recursive_create<|context_tag == 'zk_service'|> ->  anchor{'hdp-zookeeper::service::end':}
-  } else {
-    class { 'hdp-zookeeper::set_myid': myid => $myid}
-
-    anchor{'hdp-zookeeper::service::begin':} -> Hdp::Directory_recursive_create<|context_tag == 'zk_service'|> -> 
-    Class['hdp-zookeeper::set_myid'] -> anchor{'hdp-zookeeper::service::end':}
-
-    if ($daemon_cmd != undef) {
-      Class['hdp-zookeeper::set_myid'] -> Hdp::Exec[$daemon_cmd] -> Anchor['hdp-zookeeper::service::end']
-    }
-  }
-}
-
-class hdp-zookeeper::set_myid($myid)
-{
-  file {"${hdp-zookeeper::params::zk_data_dir}/myid":
-    ensure  => file,
-    content => $myid,
-    mode    => 0644,
-  }
-}
-
-
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/zookeeper/service_check.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/zookeeper/service_check.pp
deleted file mode 100644
index 9f1dce0..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/zookeeper/service_check.pp
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-zookeeper::zookeeper::service_check()
-{
-  include hdp-zookeeper::params
-  $conf_dir = $hdp-zookeeper::params::conf_dir
-  $smoke_script = $hdp::params::zk_smoke_test_script
-
-  $smoke_test_user = $hdp::params::smokeuser
-  $zookeeper_smoke_shell_files = ['zkService.sh']
-
-  anchor { 'hdp-zookeeper::zookeeper::service_check::begin':}
-
-  hdp-zookeeper::zookeeper_smoke_shell_file { $zookeeper_smoke_shell_files: }
-
-  anchor{ 'hdp-zookeeper::zookeeper::service_check::end':}
-}
-
-define hdp-zookeeper::zookeeper_smoke_shell_file()
-{
-  file { '/tmp/zkService.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-zookeeper/zkService.sh",
-    mode => '0755'
-  }
-
-  exec { '/tmp/zkService.sh':
-    command   => "sh /tmp/zkService.sh ${smoke_script} ${smoke_test_user} ${conf_dir}",
-    tries     => 3,
-    try_sleep => 5,
-    require   => File['/tmp/zkService.sh'],
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/configuration.xsl.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/configuration.xsl.erb
deleted file mode 100644
index c003ba2..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/configuration.xsl.erb
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:output method="html"/>
-<xsl:template match="configuration">
-<html>
-<body>
-<table border="1">
-<tr>
- <td>name</td>
- <td>value</td>
- <td>description</td>
-</tr>
-<xsl:for-each select="property">
-  <tr>
-     <td><a name="{name}"><xsl:value-of select="name"/></a></td>
-     <td><xsl:value-of select="value"/></td>
-     <td><xsl:value-of select="description"/></td>
-  </tr>
-</xsl:for-each>
-</table>
-</body>
-</html>
-</xsl:template>
-</xsl:stylesheet>
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/log4j.properties.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/log4j.properties.erb
deleted file mode 100644
index db69564..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/log4j.properties.erb
+++ /dev/null
@@ -1,71 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-#
-# ZooKeeper Logging Configuration
-#
-
-# Format is "<default threshold> (, <appender>)+
-
-# DEFAULT: console appender only
-log4j.rootLogger=INFO, CONSOLE
-
-# Example with rolling log file
-#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
-
-# Example with rolling log file and tracing
-#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
-
-#
-# Log INFO level and above messages to the console
-#
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-log4j.appender.CONSOLE.Threshold=INFO
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
-
-#
-# Add ROLLINGFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
-log4j.appender.ROLLINGFILE.Threshold=DEBUG
-log4j.appender.ROLLINGFILE.File=zookeeper.log
-
-# Max log file size of 10MB
-log4j.appender.ROLLINGFILE.MaxFileSize=10MB
-# uncomment the next line to limit number of backup files
-#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
-
-log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
-
-
-#
-# Add TRACEFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
-log4j.appender.TRACEFILE.Threshold=TRACE
-log4j.appender.TRACEFILE.File=zookeeper_trace.log
-
-log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
-### Notice we are including log4j's NDC here (%x)
-log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zoo.cfg.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zoo.cfg.erb
deleted file mode 100644
index 25909b5..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zoo.cfg.erb
+++ /dev/null
@@ -1,37 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# The number of milliseconds of each tick
-tickTime=<%=scope.function_hdp_template_var("tickTime")%>
-# The number of ticks that the initial
-# synchronization phase can take
-initLimit=<%=scope.function_hdp_template_var("initLimit")%>
-# The number of ticks that can pass between
-# sending a request and getting an acknowledgement
-syncLimit=<%=scope.function_hdp_template_var("syncLimit")%>
-# the directory where the snapshot is stored.
-dataDir=<%=scope.function_hdp_template_var("zk_data_dir")%>
-# the port at which the clients will connect
-clientPort=<%=scope.function_hdp_template_var("clientPort")%>
-<%(scope.function_hdp_host("zookeeper_hosts")||[]).each_with_index do |host,i|-%>
-server.<%=(i+1).to_s%>=<%=host%>:2888:3888
-<% end -%>
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zookeeper-env.sh.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zookeeper-env.sh.erb
deleted file mode 100644
index 50f0910..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-zookeeper/templates/zookeeper-env.sh.erb
+++ /dev/null
@@ -1,20 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-export JAVA_HOME=<%=scope.function_hdp_java_home()%>
-export ZOO_LOG_DIR=<%=scope.function_hdp_template_var("zk_log_dir")%>
-export ZOOPIDFILE=<%=scope.function_hdp_template_var("zk_pid_file")%>
-export SERVER_JVMFLAGS= 
-export JAVA=$JAVA_HOME/bin/java
-export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
\ No newline at end of file
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_args_as_array.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_args_as_array.rb
deleted file mode 100644
index 46becea..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_args_as_array.rb
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to handle differences in how args passed in
-module Puppet::Parser::Functions
-  newfunction(:hdp_args_as_array, :type => :rvalue) do |args|
-    args.kind_of?(Array) ? args : [args]
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_array_from_comma_list.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_array_from_comma_list.rb
deleted file mode 100644
index 56882f3..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_array_from_comma_list.rb
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_array_from_comma_list, :type => :rvalue) do |args|
-    args = [args].flatten
-    function_hdp_is_empty(args[0]) ? "" : args[0].split(",") 
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_calc_xmn_from_xms.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_calc_xmn_from_xms.rb
deleted file mode 100644
index e83a742..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_calc_xmn_from_xms.rb
+++ /dev/null
@@ -1,37 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_calc_xmn_from_xms, :type => :rvalue) do |args|
-    heapsize_orig_str = args[0].to_s
-    xmn_percent = args[1].to_f
-    xmn_max = args[2].to_i
-    heapsize_str = heapsize_orig_str.gsub(/\D/,"")
-    heapsize = heapsize_str.to_i
-    heapsize_unit = heapsize_orig_str.gsub(/\d/,"")
-    xmn_val = heapsize*xmn_percent
-    xmn_val = xmn_val.floor.to_i
-    xmn_val = xmn_val/8
-    xmn_val = xmn_val*8
-    xmn_val = xmn_val > xmn_max ? xmn_max : xmn_val
-    xmn_val_str = "" + xmn_val.to_s + heapsize_unit
-    xmn_val_str
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_default.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_default.rb
deleted file mode 100644
index 329a361..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_default.rb
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_default, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    scoped_var_name = args[0]
-    var_name = scoped_var_name.split("/").last
-    default = args[1]
-    val = lookupvar("::#{var_name}")
-    # To workaround string-boolean comparison issues,
-    # ensure that we return boolean result if the default value
-    # is also boolean
-    if default == true or default == false # we expect boolean value as a result
-      casted_val = (val == "true" or val == true) # converting to boolean
-    else # default
-      casted_val = val
-    end
-    function_hdp_is_empty(val) ? (default||"") : casted_val
-  end
-end
-
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_fail.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_fail.rb
deleted file mode 100644
index 9241b1d..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_fail.rb
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_fail) do |args|
-    args = [args].flatten
-    msg = args[0]
-    function_fail(msg)
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_host.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_host.rb
deleted file mode 100644
index 4a9b142..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_host.rb
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_host, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    var = args[0]
-    val = lookupvar(var)
-    function_hdp_is_empty(val) ? "" : val 
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_host_attribute.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_host_attribute.rb
deleted file mode 100644
index 1df1392..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_host_attribute.rb
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_host_attribute, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    hash,attr,source = args
-    ret_val = lambda do |hash,attr,s|
-      ret = ""
-      ndx = hash[s]
-      unless function_hdp_is_empty(ndx)
-        val = ndx[attr]
-        ret = function_hdp_is_empty(val) ? "" : val
-      end
-      ret
-    end
-    if source.kind_of?(Array)
-      source.map{|s|ret_val.call(hash,attr,s)}
-    else
-     ret_val.call(hash,attr,source)
-    end
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_is_empty.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_is_empty.rb
deleted file mode 100644
index f57f8eb..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_is_empty.rb
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_is_empty, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    el = args[0]
-    el.nil? or (el.respond_to?(:to_s) and ["undefined","undef",""].include?(el.to_s))
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_java_home.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_java_home.rb
deleted file mode 100644
index f82ee37..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_java_home.rb
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_java_home, :type => :rvalue) do 
-    size = lookupvar("size")
-    if size.nil? or size == :undefined
-      size = "64"
-    end
-    lookupvar("::hdp::params::java#{size.to_s}_home")
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_no_hosts.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_no_hosts.rb
deleted file mode 100644
index 6c1a988..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_no_hosts.rb
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_no_hosts, :type => :rvalue) do |args|
-    args = function_hdp_args_as_array(args)
-    var = args[0]
-    function_hdp_is_empty(function_hdp_host(var))
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_option_value.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_option_value.rb
deleted file mode 100644
index 1348879..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_option_value.rb
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_option_value, :type => :rvalue) do |args|
-    args = [args].flatten
-    opts = args[0]
-    key = args[1]
-    if opts.kind_of?(Hash) and not function_hdp_is_empty(key)
-      opts[key]||:undef
-    else
-      :undef
-    end
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_set_from_comma_list.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_set_from_comma_list.rb
deleted file mode 100644
index 1dc61f3..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_set_from_comma_list.rb
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to handle differences in how args passed in
-require 'set'
-module Puppet::Parser::Functions
-  newfunction(:hdp_set_from_comma_list, :type => :rvalue) do |args|
-    list = function_hdp_array_from_comma_list(args)
-    list.each_index {|i| list [i]=list [i].strip}
-    #Delete empty strings
-    list.reject! { |e| e.empty? }
-    list.uniq   
-  end
-end
\ No newline at end of file
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_template_var.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_template_var.rb
deleted file mode 100644
index 52ab7c9..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_template_var.rb
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_template_var, :type => :rvalue) do |args|
-    args = [args].flatten
-    qualified_var = args[0]
-    unless qualified_var =~ /^::/
-      #module_name = lookupvar("module_name")||"UNKNOWN"
-      #qualified_var = "::#{module_name}::params::#{args[0]}"
-      component = lookupvar("component")||"UNKNOWN"
-      module_name = (component == "base" ? "::hdp" : "::hdp-#{component}")      
-      qualified_var = "#{module_name}::params::#{args[0]}"
-    end
-    val = lookupvar(qualified_var)
-    (val.nil? or val == :undefined) ? "" : val 
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_unique_id_and_date.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_unique_id_and_date.rb
deleted file mode 100644
index 01179e1..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_unique_id_and_date.rb
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_unique_id_and_date, :type => :rvalue) do 
-    id = lookupvar('::uniqueid')
-    date = `date +"%M%d%y"`.chomp
-    "id#{id}_date#{date}"
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_user.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_user.rb
deleted file mode 100644
index a858fbb..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_user.rb
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-module Puppet::Parser::Functions
-  newfunction(:hdp_user, :type => :rvalue) do |args|
-    args = [args].flatten
-    user = args[0]
-    val = lookupvar("::hdp::params::#{user}")
-    (val.nil? or val == :undefined) ? "" : val 
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/.directory b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/.directory
deleted file mode 100644
index 6f816d7..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/.directory
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-[Dolphin]
-Timestamp=2011,3,16,9,26,14
-ViewMode=1
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/configfile.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/configfile.pp
deleted file mode 100644
index 8cd4c7d..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/configfile.pp
+++ /dev/null
@@ -1,85 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp::configfile(
-  $component,
-  $conf_dir = undef, #if this is undef then name is of form conf_dir/file_name
-  $owner = undef, 
-  $group = $hdp::params::user_group,
-  $mode = undef,
-  $size = 64, #32 or 64 bit (used to pick appropriate java_home)
-  $template_tag = undef,
-  $namenode_host = $hdp::params::namenode_host,
-  $jtnode_host = $hdp::params::jtnode_host,
-  $snamenode_host = $hdp::params::snamenode_host,
-  $slave_hosts = $hdp::params::slave_hosts,
-  $hbase_rs_hosts = $hdp::params::hbase_rs_hosts,
-  $zookeeper_hosts = $hdp::params::zookeeper_hosts,
-  $hbase_master_host = $hdp::params::hbase_master_host,
-  $hcat_server_host = $hdp::params::hcat_server_host,
-  $hive_server_host = $hdp::params::hive_server_host,
-  $oozie_server = $hdp::params::oozie_server,
-  $webhcat_server_host = $hdp::params::webhcat_server_host,
-  $hcat_mysql_host = $hdp::params::hcat_mysql_host,
-  $nagios_server_host = $hdp::params::nagios_server_host,
-  $ganglia_server_host = $hdp::params::ganglia_server_host,
-  $dashboard_host = $hdp::params::dashboard_host,
-  $gateway_host = $hdp::params::gateway_host,
-  $public_namenode_host = $hdp::params::public_namenode_host,
-  $public_snamenode_host = $hdp::params::public_snamenode_host,
-  $public_jtnode_host = $hdp::params::public_jtnode_host,
-  $public_hbase_master_host = $hdp::params::public_hbase_master_host,
-  $public_zookeeper_hosts = $hdp::params::public_zookeeper_hosts,
-  $public_ganglia_server_host = $hdp::params::public_ganglia_server_host,
-  $public_nagios_server_host = $hdp::params::public_nagios_server_host,
-  $public_dashboard_host = $hdp::params::public_dashboard_host,
-  $public_hive_server_host = $hdp::params::public_hive_server_host,
-  $public_oozie_server = $hdp::params::public_oozie_server,
-  $public_webhcat_server_host = $hdp::params::public_webhcat_server_host
-) 
-{
-
-   if ($conf_dir == undef) {
-     $qualified_file_name = $name
-     $file_name = regsubst($name,'^.+/([^/]+$)','\1')
-   } else {
-     $qualified_file_name = "${conf_dir}/${name}"
-     $file_name = $name
-   }
-   if ($component == 'base') {
-     $module = 'hdp'
-   } else {
-      $module = "hdp-${component}"   
-   }
-
-   if ($template_tag == undef) {  
-     $template_name = "${module}/${file_name}.erb"
-   } else {
-     $template_name = "${module}/${file_name}-${template_tag}.erb"
-   }
-
-   file{ $qualified_file_name:
-     ensure  => present,
-     owner   => $owner,
-     group   => $group,
-     mode    => $mode,
-     content => template($template_name)
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/download_keytabs.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/download_keytabs.pp
deleted file mode 100644
index fa74d8f..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/download_keytabs.pp
+++ /dev/null
@@ -1,43 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp::download_keytab(
-  $masterhost,
-  $keytabdst,
-  $keytabfile,
-  $owner,
-  $group = undef,
-  $mode = '0400',
-  $hostnameInPrincipals = 'yes'
-)
-{
-  $hostname = $::fqdn
-  if ($hostnameInPrincipals == 'yes') {
-    $keytabsrc = "puppet://${masterhost}/modules/keytabs/${hostname}.${keytabfile}"
-  } else {
-    $keytabsrc = "puppet://${masterhost}/modules/keytabs/${keytabfile}"
-  }
-  file { $keytabdst :
-    ensure => present,
-    source => $keytabsrc,
-    mode => $mode,
-    owner => $owner,
-    group => $group
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp
deleted file mode 100644
index 05c37ac..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp
+++ /dev/null
@@ -1,347 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp(
-  $service_state = undef,
-  $pre_installed_pkgs = undef
-)
-{
-
-  import 'params.pp'
-  include hdp::params
-
-  Exec { logoutput => 'on_failure' }
-
-  group { $hdp::params::user_group :
-    ensure => present
-  }
-
-  #TODO: think not needed and also there seems to be a puppet bug around this and ldap
-  hdp::user { $hdp::params::hadoop_user:
-    gid => $hdp::params::user_group
-  }
-  Group[$hdp::params::user_group] -> Hdp::User[$hdp::params::hadoop_user] 
-  class { 'hdp::snmp': service_state => 'running'}
-
-  class { 'hdp::create_smoke_user': }
-
-  if ($pre_installed_pkgs != undef) {
-    class { 'hdp::pre_install_pkgs': }
-  }
-
-  #turns off selinux
-  class { 'hdp::set_selinux': }
-
-  if ($service_state != 'uninstalled') {
-    if ($hdp::params::lzo_enabled == true) {
-      @hdp::lzo::package{ 32:}
-      @hdp::lzo::package{ 64:}
-    }
-  }
-
-  #TODO: treat consistently 
-  if ($service_state != 'uninstalled') {
-    if ($hdp::params::snappy_enabled == true) {
-      include hdp::snappy::package
-    }
-  }
-
-  Hdp::Package<|title == 'hadoop 32'|> ->   Hdp::Package<|title == 'hbase'|>
-  Hdp::Package<|title == 'hadoop 64'|> ->   Hdp::Package<|title == 'hbase'|>
-
-  #TODO: just for testing
-  class{ 'hdp::iptables': 
-    ensure => stopped,
-  }
-
-
-  
-  hdp::package{ 'glibc':
-    ensure       => 'present',
-    size         => $size,
-    java_needed  => false,
-    lzo_needed   => false
-  }
-
-}
-
-class hdp::pre_install_pkgs
-{
-
-  if ($service_state == 'installed_and_configured') {
-    hdp::exec{ 'yum install $pre_installed_pkgs':
-       command => "yum install -y $pre_installed_pkgs"
-    }
-  } elsif ($service_state == 'uninstalled') {
-    hdp::exec{ 'yum erase $pre_installed_pkgs':
-       command => "yum erase -y $pre_installed_pkgs"
-    }
-  }
-}
-
-class hdp::create_smoke_user()
-{
-
-  $smoke_group = $hdp::params::smoke_user_group
-  $smoke_user = $hdp::params::smokeuser
-  $security_enabled = $hdp::params::security_enabled
-
-  if ( $smoke_group != $proxyuser_group) {
-    group { $smoke_group :
-      ensure => present
-    }
-  }
-  
-  if ($hdp::params::user_group != $proxyuser_group) {
-    group { $proxyuser_group :
-      ensure => present
-    }
-  }
-  
-  hdp::user { $smoke_user: 
-              gid    => $hdp::params::user_group,
-              groups => ["$proxyuser_group"]
-  }
-
-  if ($security_enabled == true) {
-    $secure_uid = $hdp::params::smoketest_user_secure_uid
-    $cmd_set_uid = "usermod -u ${secure_uid} ${smoke_user}"
-    $cmd_set_uid_check = "id -u ${smoke_user} | grep ${secure_uid}"
-     hdp::exec{ $cmd_set_uid:
-       command => $cmd_set_uid,
-       unless => $cmd_set_uid_check,
-       require => Hdp::User[$smoke_user]
-     }
-  }
-
-  Group<||> -> Hdp::User[$smoke_user]
-}
-
-
-class hdp::set_selinux()
-{
- $cmd = "/bin/echo 0 > /selinux/enforce"
- hdp::exec{ $cmd:
-    command => $cmd,
-    unless => "head -n 1 /selinux/enforce | grep ^0$",
-    onlyif => "test -f /selinux/enforce"
- }
-}
-
-define hdp::user(
-  $gid = $hdp::params::user_group,
-  $just_validate = undef,
-  $groups = undef
-)
-{
-  $user_info = $hdp::params::user_info[$name]
-  if ($just_validate != undef) {
-    $just_val  = $just_validate
-  } elsif (($user_info == undef) or ("|${user_info}|" == '||')){ #tests for different versions of Puppet
-    $just_val = false
-  } else {
-    $just_val = $user_info[just_validate]
-  }
-  
-  if ($just_val == true) {
-    exec { "user ${name} exists":
-      command => "su - ${name} -c 'ls /dev/null' >/dev/null 2>&1",
-      path    => ['/bin']
-    }
-  } else {
-    user { $name:
-      ensure     => present,
-      managehome => true,
-      gid        => $gid, #TODO either remove this to support LDAP env or fix it
-      shell      => '/bin/bash',
-      groups     => $groups 
-    }
-  }
-}
-
-     
-define hdp::directory(
-  $owner = $hdp::params::hadoop_user,
-  $group = $hdp::params::user_group,
-  $mode  = undef,
-  $ensure = directory,
-  $force = undef,
-  $service_state = 'running',
-  $override_owner = false
-  )
-{
- if (($service_state == 'uninstalled') and ($wipeoff_data == true)) {
-  file { $name :
-    ensure => absent,
-    owner  => $owner,
-    group  => $group,
-    mode   => $mode,
-    force  => $force
-   }
-  } elsif ($service_state != 'uninstalled') {
-    if $override_owner == true {
-      file { $name :
-      ensure => present,
-      owner  => $owner,
-      group  => $group,
-      mode   => $mode,
-      force  => $force
-     }
-    } else {
-      file { $name :
-      ensure => present,
-      mode   => $mode,
-      force  => $force
-     }
-    }
-  }
-}
-#TODO: check on -R flag and use of recurse
-define hdp::directory_recursive_create(
-  $owner = $hdp::params::hadoop_user,
-  $group = $hdp::params::user_group,
-  $mode = undef,
-  $context_tag = undef,
-  $ensure = directory,
-  $force = undef,
-  $service_state = 'running',
-  $override_owner = true
-  )
-{
-
-  hdp::exec {"mkdir -p ${name}" :
-    command => "mkdir -p ${name}",
-    creates => $name
-  }
-  #to take care of setting ownership and mode
-  hdp::directory { $name :
-    owner => $owner,
-    group => $group,
-    mode  => $mode,
-    ensure => $ensure,
-    force => $force,
-    service_state => $service_state,
-    override_owner => $override_owner
-  }
-  Hdp::Exec["mkdir -p ${name}"] -> Hdp::Directory[$name]
-}
-
-define hdp::directory_recursive_create_ignore_failure(
-  $owner = $hdp::params::hadoop_user,
-  $group = $hdp::params::user_group,
-  $mode = undef,
-  $context_tag = undef,
-  $ensure = directory,
-  $force = undef,
-  $service_state = 'running'
-  )
-{
-  hdp::exec {"mkdir -p ${name} ; exit 0" :
-    command => "mkdir -p ${name} ; exit 0",
-    creates => $name
-  }
-    hdp::exec {"chown ${owner}:${group} ${name}; exit 0" :
-    command => "chown ${owner}:${group} ${name}; exit 0"
-  }
-    hdp::exec {"chmod ${mode} ${name} ; exit 0" :
-    command => "chmod ${mode} ${name} ; exit 0"
-  }
-  Hdp::Exec["mkdir -p ${name} ; exit 0"] -> Hdp::Exec["chown ${owner}:${group} ${name}; exit 0"] -> Hdp::Exec["chmod ${mode} ${name} ; exit 0"]
-}
-
-### helper to do exec
-define hdp::exec(
-  $command,
-  $refreshonly = undef,
-  $unless = undef,
-  $onlyif = undef,
-  $path = $hdp::params::exec_path,
-  $user = undef,
-  $creates = undef,
-  $tries = 1,
-  $timeout = 300,
-  $try_sleep = undef,
-  $initial_wait = undef,
-  $logoutput = 'on_failure',
-  $cwd = undef
-)
-{
-     
-
-
-  if (($initial_wait != undef) and ($initial_wait != "undef")) {
-    #passing in creates and unless so dont have to wait if condition has been acheived already
-    hdp::wait { "service ${name}" : 
-      wait_time => $initial_wait,
-      creates   => $creates,
-      unless    => $unless,
-      onlyif    => $onlyif,
-      path      => $path
-    }
-  }
-  
-  exec { $name :
-    command     => $command,
-    refreshonly => $refreshonly,
-    path        => $path,
-    user        => $user,
-    creates     => $creates,
-    unless      => $unless,
-    onlyif      => $onlyif,
-    tries       => $tries,
-    timeout     => $timeout,
-    try_sleep   => $try_sleep,
-    logoutput   => $logoutput,
-    cwd         => $cwd
-  }
-  
-  anchor{ "hdp::exec::${name}::begin":} -> Exec[$name] -> anchor{ "hdp::exec::${name}::end":} 
-  if (($initial_wait != undef) and ($initial_wait != "undef")) {
-    Anchor["hdp::exec::${name}::begin"] -> Hdp::Wait["service ${name}"] -> Exec[$name]
-  }
-}
-
-#### utilities for waits
-define hdp::wait(
-  $wait_time,
-  $creates = undef,
-  $unless = undef,
-  $onlyif = undef,
-  $path = undef #used for unless
-)   
-{
-  exec { "wait ${name} ${wait_time}" :
-    command => "/bin/sleep ${wait_time}",
-    creates => $creates,
-    unless  => $unless,
-    onlyif  => $onlyif,
-    path    => $path
-  } 
-}
-
-##### temp
-
-class hdp::iptables($ensure)
-{
-  #TODO: just temp so not considering things like saving firewall rules
-  service { 'iptables':
-    ensure => $ensure
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/java/jce/package.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/java/jce/package.pp
deleted file mode 100644
index b9f9829..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/java/jce/package.pp
+++ /dev/null
@@ -1,55 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-define hdp::java::jce::package(
-  $java_home_dir
-)
-{
-  include hdp::params
-
-  $jce_policy_zip = $hdp::params::jce_policy_zip
-  $artifact_dir = $hdp::params::artifact_dir
-  $jce_location = $hdp::params::jce_location
-  $jce_curl_target = "${artifact_dir}/${jce_policy_zip}"
-  
-  #TODO:SUHAS how to avoid redownload and install if correct version already present.
-  # may be check the file sizes for local_policy and export_US policy jars? 
-  # UNLESS  => "test -e ${java_exec}"
-  $curl_cmd = "curl -f --retry 10 ${jce_location}/${jce_policy_zip} -o ${jce_curl_target}"
-  exec{ "jce-download ${name}":
-    command => $curl_cmd,
-    creates => $jce_curl_target,
-    path    => ["/bin","/usr/bin/"],
-  }
-
-  $security_dir = "${java_home_dir}/jre/lib/security"
-  $cmd = "rm -f local_policy.jar; rm -f US_export_policy.jar; unzip -o -j -q ${jce_curl_target}"
-  exec { "jce-install ${name}":
-    command => $cmd,
-    onlyif  => "test -e ${security_dir}",
-    cwd     => $security_dir,
-    path    => ['/bin/','/usr/bin']
-  }
-
-  #TODO: SUHAS add ensure code to check local and us export policy files exist -> File["${java_exec} ${name}"]
-
-  anchor{"hdp::java::jce::package::${name}::begin":} -> Exec["jce-download ${name}"] ->  Exec["jce-install ${name}"] -> anchor{"hdp::java::jce::package::${name}::end":}
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/java/package.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/java/package.pp
deleted file mode 100644
index 818d3ee..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/java/package.pp
+++ /dev/null
@@ -1,71 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp::java::package(
-  $size
-)
-{
-    
-  include hdp::params
-  
-  $security_enabled = $hdp::params::security_enabled
-  $jdk_bin = $hdp::params::jdk_bins[$size]
-  $artifact_dir = $hdp::params::artifact_dir
-  $jdk_location = $hdp::params::jdk_location
-  $jdk_curl_target = "${artifact_dir}/${jdk_bin}"
- 
-  if ($size == "32") {
-    $java_home = $hdp::params::java32_home
-  } else {
-    $java_home = $hdp::params::java64_home
-  }
-  $java_exec = "${java_home}/bin/java"
-  $java_dir = regsubst($java_home,'/[^/]+$','')
-   
-  $curl_cmd = "mkdir -p ${artifact_dir} ; curl -f --retry 10 ${jdk_location}/${jdk_bin} -o ${jdk_curl_target}"
-  exec{ "${curl_cmd} ${name}":
-    command => $curl_cmd,
-    creates => $jdk_curl_target,
-    path    => ["/bin","/usr/bin/"],
-    unless  => "test -e ${java_exec}"
-  }
- 
-  $install_cmd = "mkdir -p ${java_dir} ; chmod +x ${jdk_curl_target}; cd ${java_dir} ; echo A | ${jdk_curl_target} -noregister > /dev/null 2>&1"
-  exec{ "${install_cmd} ${name}":
-    command => $install_cmd,
-    unless  => "test -e ${java_exec}",
-    path    => ["/bin","/usr/bin/"]
-  }
- 
-  file { "${java_exec} ${name}":
-  ensure => present
-  }   
- 
-  if ($security_enabled == true) {
-    hdp::java::jce::package{ $name:
-      java_home_dir  => $java_home
-    }
-  }
-
-  anchor{"hdp::java::package::${name}::begin":} -> Exec["${curl_cmd} ${name}"] ->  Exec["${install_cmd} ${name}"] -> File["${java_exec} ${name}"] ->  anchor{"hdp::java::package::${name}::end":}
-  if ($security_enabled == true) {
-    File["${java_exec} ${name}"] -> Hdp::Java::Jce::Package[$name] 
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/lzo/package.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/lzo/package.pp
deleted file mode 100644
index fe5a764..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/lzo/package.pp
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp::lzo::package()
-{
-  $size = $name
-
-  $pkg_type = "lzo"
-
-  hdp::package {"lzo ${size}":
-    package_type  => "${pkg_type}",
-    size          => $size,
-    java_needed   => false
-  }
-
-  $anchor_beg = "hdp::lzo::package::${size}::begin"
-  $anchor_end = "hdp::lzo::package::${size}::end"
-  anchor{$anchor_beg:} ->  Hdp::Package["lzo ${size}"] -> anchor{$anchor_end:}
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/namenode-conn.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/namenode-conn.pp
deleted file mode 100644
index 268e213..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/namenode-conn.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp::namenode-conn($namenode_host)
-{
-  Hdp::Configfile<||>{namenode_host => $namenode_host}
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/package.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/package.pp
deleted file mode 100644
index 8618e8c..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/package.pp
+++ /dev/null
@@ -1,135 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp::package(
-  $ensure = present,
-  $package_type = undef,
-  $size = 64,
-  $java_needed = true,
-  $lzo_needed = false
-  )
-{
-
-  $pt = $package_type ? {
-    undef  => $name,
-    default  => $package_type
-  }
-  
-  hdp::package::process_pkg { $name:
-    ensure       => $ensure,
-    package_type => $pt,
-    size         => $size,
-    java_needed  => $java_needed,
-    lzo_needed   => $lzo_needed
-  }
-}
-
-define hdp::package::process_pkg(
-  $ensure = present,
-  $package_type,
-  $size,
-  $java_needed,
-  $lzo_needed
-  )
-{
-    
-
-  debug("##Processing package:  $ensure,$package_type,$size,$java_needed,$lzo_needed")
-
-  include hdp::params
-
-  if hdp_is_empty($hdp::params::alt_package_names[$package_type]) {
-    hdp_fail("No packages for $package_type")
-  }
-
-  if hdp_is_empty($hdp::params::alt_package_names[$package_type][$size]) {
-
-    if hdp_is_empty($hdp::params::alt_package_names[$package_type][ALL]) {
-      hdp_fail("No packages for $package_type")
-    }
-    else {
-      $packages_list_by_size = $hdp::params::alt_package_names[$package_type][ALL]
-    }
-  }
-  else {
-    $packages_list_by_size = $hdp::params::alt_package_names[$package_type][$size]
-
-  }
-  if hdp_is_empty($packages_list_by_size[$hdp::params::hdp_os_type]) {
-
-    if hdp_is_empty($packages_list_by_size[ALL]) {
-      hdp_fail("No packages for $package_type")
-    }
-    else {
-      $packages_list = $packages_list_by_size[ALL]
-    }
-  }
-  else {
-    $packages_list = $packages_list_by_size[$hdp::params::hdp_os_type]
-  }
-
-  debug("##Packages list: $packages_list")
-
-  if (($java_needed == true) and ($ensure == 'present')){
-    hdp::java::package{ $name:
-      size                 => $size
-    }
-  }
-
-  if (($lzo_needed == true) and ($ensure == 'present')){
-    Hdp::Lzo::Package<|title == $size|>
-  }
-
-  if ($ensure == 'uninstalled') {
-    $ensure_actual = 'purged'
-  } else {
-    $ensure_actual = $ensure
-  }
-  $tag = regsubst($name,' ','-',G)
-  if $packages_list != $hdp::params::NOTHING {
-    package{ $packages_list:
-      ensure   => $ensure_actual,
-      tag      => $tag
-    }
-  }
-  anchor{ "hdp::package::${name}::begin": } -> Package<|tag == $tag|> -> anchor{ "hdp::package::${name}::end": }
-  
-  if (($java_needed == true)and ($ensure == 'present')) {
-   Anchor["hdp::package::${name}::begin"] -> Hdp::Java::Package[$name] -> Anchor["hdp::package::${name}::end"] 
-  }
-}
-
-# Removes the specified package using shell command appropriate for current OS type.
-# Method DOES NOT resolve package name via hdp::params.
-# If package does not exist or is not installed, command does nothing.
-define hdp::package::remove_pkg(
-    $package_type,
-  )
-{
-
-  # TODO: For non-rpm based systems, provide appropriate command
-  exec { "remove_package ${package_type}":
-    path    => "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-    command => $hdp::params::hdp_os_type ? {
-      default => "rpm -e --allmatches ${package_type} ; true"
-    },
-  }
-
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
deleted file mode 100644
index 45f8337..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
+++ /dev/null
@@ -1,646 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp::params()
-{
-
-  ##Constants##
-  $NOTHING='NOTHING'
-
-  ##### global state defaults ####
-  $cluster_service_state = hdp_default("cluster_service_state","running")
-  $cluster_client_state = hdp_default("cluster_client_state","installed_and_configured")
-
-  ##### for secure install
-  $security_enabled = hdp_default("security_enabled",false)
-  $kerberos_domain = hdp_default("kerberos_domain","EXAMPLE.COM")
-  $smoketest_user_secure_uid = hdp_default("smoketest_user_secure_uid",1012)
-  ## $smoketest_user_secure_uid = 1012
-
-  ###### hostnames
-  $namenode_host = hdp_default("namenode_host")
-  $snamenode_host = hdp_default("snamenode_host")
-  $jtnode_host = hdp_default("jtnode_host")
-  $slave_hosts = hdp_default("slave_hosts")
-  
-  $zookeeper_hosts = hdp_default("zookeeper_hosts")
-
-  $hbase_master_host = hdp_default("hbase_master_host", "")
-  $hbase_rs_hosts = hdp_default("hbase_rs_hosts",$slave_hosts) #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
-
-  $hive_server_host = hdp_default("hive_server_host", "")
-  $oozie_server =  hdp_default("oozie_server", "")
-  $webhcat_server_host = hdp_default("webhcat_server_host", "")
-  $gateway_host = hdp_default("gateway_host")
-  
-  $nagios_server_host = hdp_default("nagios_server_host")
-  $ganglia_server_host = hdp_default("ganglia_server_host")
-  
-  $dashboard_host = hdp_default("dashboard_host")
-
-  $hdp_os = $::operatingsystem
-  $hdp_os_version = $::operatingsystemrelease
-
-  
-  case $::operatingsystem {
-    centos: {
-      case $::operatingsystemrelease {
-        /^5\..+$/: { $hdp_os_type = "centos5" }
-        /^6\..+$/: { $hdp_os_type = "centos6" }
-      }
-    }
-    redhat: {
-      case $::operatingsystemrelease {
-        /^5\..+$/: { $hdp_os_type = "redhat5" }
-        /^6\..+$/: { $hdp_os_type = "redhat6" }
-      }
-    }
-    suse: {
-      $hdp_os_type = "suse"
-    }
-    SLES: {
-      $hdp_os_type = "suse"
-    }
-
-    default: {
-      hdp_fail("No support for os $::operatingsystem  ${hdp_os} ${hdp_os_version}")
-    }
-  }
-
-  if ($hostAttributes != undef) {
-    $public_namenode_host = hdp_host_attribute($hostAttributes,"publicfqdn",$namenode_host)
-    $public_snamenode_host = hdp_host_attribute($hostAttributes,"publicfqdn",$snamenode_host)
-    $public_jtnode_host = hdp_host_attribute($hostAttributes,"publicfqdn",$jtnode_host)
-    $public_hbase_master_host = hdp_host_attribute($hostAttributes,"publicfqdn",$hbase_master_host)
-    $public_zookeeper_hosts = hdp_host_attribute($hostAttributes,"publicfqdn",$zookeeper_hosts)
-    $public_ganglia_server_host = hdp_host_attribute($hostAttributes,"publicfqdn",$ganglia_server_host)
-    $public_nagios_server_host = hdp_host_attribute($hostAttributes,"publicfqdn",$nagios_server_host)
-    $public_dashboard_host = hdp_host_attribute($hostAttributes,"publicfqdn",$dashboard_host)
-    $public_hive_server_host = hdp_host_attribute($hostAttributes,"publicfqdn",$hive_server_host)
-    $public_oozie_server = hdp_host_attribute($hostAttributes,"publicfqdn",$oozie_server)
-    $public_webhcat_server_host = hdp_host_attribute($hostAttributes,"publicfqdn",$webhcat_server_host)
-  } else {
-    $public_namenode_host = hdp_default("namenode_host")
-    $public_snamenode_host = hdp_default("snamenode_host")
-    $public_jtnode_host = hdp_default("jtnode_host")
-    $public_hbase_master_host = hdp_default("hbase_master_host")
-    $public_zookeeper_hosts = hdp_default("zookeeper_hosts")
-    $public_ganglia_server_host = hdp_default("ganglia_server_host")
-    $public_nagios_server_host = hdp_default("nagios_server_host")
-    $public_dashboard_host = hdp_default("dashboard_host")
-    $public_hive_server_host = hdp_default("hive_server_host")
-    $public_oozie_server = hdp_default("oozie_server")
-    $public_webhcat_server_host = hdp_default("webhcat_server_host")
-  }
-
-
-  ############ users
-  $user_info = hdp_default("user_info",{})
-
-  $hdfs_user = hdp_default("hdfs_user","hdfs")
-  $mapred_user = hdp_default("mapred_user","mapred")
-
-  $zk_user = hdp_default("zk_user","zookeeper") 
-  $hbase_user = hdp_default("hbase_user","hbase")
-
-  $hive_user = hdp_default("hive_user","hive")
-  $hcat_user = hdp_default("hcat_user","hcat")
-  $webhcat_user = hdp_default("webhcat_user","hcat")
-
-  $oozie_user = hdp_default("oozie_user","oozie")
-  $templeton_user = hdp_default("templeton_user","hcat")
-
-  $gmetad_user = hdp_default("gmetad_user","nobody")
-  $gmond_user = hdp_default("gmond_user","nobody")
-
-  $smokeuser = hdp_default("smokeuser","ambari_qa")
-  $smoke_user_group = hdp_default("smoke_user_group","users")
-  
-  ############ Hdfs users directories
-  $oozie_hdfs_user_dir = hdp_default("oozie_hdfs_user_dir", "/user/${oozie_user}")
-  $oozie_hdfs_user_mode = 775
-  $hcat_hdfs_user_dir = hdp_default("hcat_hdfs_user_dir", "/user/${hcat_user}")
-  $hcat_hdfs_user_mode = 755
-  $webhcat_hdfs_user_dir = hdp_default("hcat_hdfs_user_dir", "/user/${webhcat_user}")
-  $webhcat_hdfs_user_mode = 755
-  $hive_hdfs_user_dir = hdp_default("hive_hdfs_user_dir", "/user/${hive_user}")
-  $hive_hdfs_user_mode = 700
-  $smoke_hdfs_user_dir = hdp_default("smoke_hdfs_user_dir", "/user/${smokeuser}")
-  $smoke_hdfs_user_mode = 770
-  
-  ############ Hdfs apps directories
-  $hive_apps_whs_dir = hdp_default("hive_apps_whs_dir", "/apps/hive/warehouse")
-  $webhcat_apps_dir = hdp_default("webhcat_apps_dir", "/apps/webhcat")
-  $hbase_hdfs_root_dir = hdp_default("hadoop/hbase-site/hbase_hdfs_root_dir","/apps/hbase/data")
-
-  #because of Puppet user resource issue make sure that $hadoop_user is different from user_group
-  if ($security_enabled == true) {
-    $hadoop_user = "root"
-  } else {
-    $hadoop_user = hdp_default("hadoop_user", "hadoop_deploy")
-  }
-  $user_group = hdp_default("user_group","hadoop")
-
-  $ganglia_enabled = hdp_default("ganglia_enabled",true) 
-
-  #TODO: either remove or make conditional on ec2
-  $host_address = undef 
-
-  ##### java 
-  $java32_home = hdp_default("java32_home","/usr/jdk32/jdk1.6.0_31")
-  $java64_home = hdp_default("java64_home","/usr/jdk64/jdk1.6.0_31")
-  
-  $wipeoff_data =  hdp_default("wipeoff_data",false) 
-
-  $jdk_location = hdp_default("jdk_location","http://download.oracle.com/otn-pub/java/jdk/6u31-b03")
-  $jdk_bins = hdp_default("jdk_bins",{
-    32 => "jdk-6u31-linux-i586.bin",
-    64 => "jdk-6u31-linux-x64.bin"
-  })
-
-  $jce_policy_zip = "jce_policy-6.zip"
-  $jce_location = hdp_default("jce_location","http://download.oracle.com/otn-pub/java/jce_policy/6")
-
-  #####
-  $hadoop_home = hdp_default("hadoop_home","/usr")
-  $hadoop_lib_home = hdp_default("hadoop_lib_home","/usr/lib/hadoop/lib")
-
-  #####compression related
-
-  $lzo_enabled = hdp_default("lzo_enabled",false)
-  $snappy_enabled = hdp_default("snappy_enabled",true)
-  
-  $lzo_compression_so_dirs = {
-    32 => "${hadoop_lib_home}/native/Linux-i386-32/",
-    64 => "${hadoop_lib_home}/native/Linux-amd64-64/"
-  }
-  
-  $snappy_so_src_dir = {
-    32 => "${hadoop_home}/lib",
-    64 => "${hadoop_home}/lib64"
-  }
-  $snappy_compression_so_dirs = {
-    32 => "${hadoop_lib_home}/native/Linux-i386-32/",
-    64 => "${hadoop_lib_home}/native/Linux-amd64-64/"
-  }
-
-  $lzo_tar_name = hdp_default("lzo_tar_name","hadoop-lzo-0.5.0")
-  
-  $snappy_so = hdp_default("snappy_so","libsnappy.so")
-  #####
- 
-  $exec_path = ["/bin","/usr/bin", "/usr/sbin"]
-
-   #### params used on multiple modules
-  $dfs_data_dir = hdp_default("hadoop/hdfs-site/dfs_data_dir","/tmp/hadoop-hdfs/dfs/data")
-
-  ### artifact dir
-  $artifact_dir = hdp_default("artifact_dir","/tmp/HDP-artifacts/")
-
-  ### artifacts download url ##
-  $apache_artifacts_download_url = hdp_default("apache_artifacts_download_url","")
-  $gpl_artifacts_download_url = hdp_default("gpl_artifacts_download_url","") 
-
-  ### related to package resources  
-  #TODO: delete variable $package_names
-  $package_names = {
-   # hadoop => {
-   #   32 => 'hadoop.i386',
-   #   64 => 'hadoop.x86_64'
-   # },
-   # zookeeper => {
-   #   64 => 'zookeeper.x86_64'
-   # },
-   # hbase => {
-   #   64 => 'hbase.x86_64'
-   # },
-   # hcat-server => {
-   #   64 => 'hcatalog-server.x86_64'
-   # },
-   # hcat-base => {
-   #   64 => 'hcatalog.x86_64'
-   # },
-   # pig => {
-   #   32 => 'pig.i386'
-   # },
-    ganglia-monitor => {
-      64 => 'ganglia-gmond-3.2.0'
-    },
-    ganglia-server => {
-      64 => ['ganglia-gmetad-3.2.0']
-    },
-    ganglia-gweb => {
-      64 => 'gweb'
-    },
-    ganglia-hdp-gweb-addons => {
-      64 => 'hdp_mon_ganglia_addons'
-    },
-    glibc-rhel6 => {
-      32 => ['glibc','glibc.i686'],
-      64 => ['glibc','glibc.i686']
-    },
-    nagios-addons => {
-      64 => 'hdp_mon_nagios_addons'
-    },
-    nagios-server => {
-      64 => 'nagios-3.2.3'
-    },
-    nagios-plugins => {
-      64 => 'nagios-plugins'
-    },
-    nagios-fping => {
-      64 =>'fping'
-    },
-    nagios-php-pecl-json => {
-      64 => 'php-pecl-json.x86_64'
-    },
-    snmp => {
-      64 => ['net-snmp'],
-    },
-    dashboard => {
-      64 => 'hdp_mon_dashboard'
-    },
-    # sqoop => {
-    #   32 => 'sqoop-1.4.1-1.noarch'
-    #},
-    webhcat => {
-       32 => 'hcatalog',
-       64 => 'hcatalog'
-    },
-    oozie-client => {
-      64 => 'oozie-client'
-    },
-    oozie-server => {
-      64 => 'oozie'
-    },
-    lzo-rhel5 => {
-      32 => ['lzo','lzo.i386','lzo-devel','lzo-devel.i386'],
-      64 => ['lzo','lzo.i386','lzo-devel','lzo-devel.i386']
-    },
-    lzo-rhel6 => {
-      32 => ['lzo','lzo.i686','lzo-devel','lzo-devel.i686'],
-      64 => ['lzo','lzo.i686','lzo-devel','lzo-devel.i686']
-    },
-    #TODO: make these two consistent on whether case of 64/32 bits
-    snappy => {
-      32 =>  ['snappy','snappy-devel'],
-      64 => ['snappy','snappy-devel']
-    },
-    mysql => {
-      32 =>  ['mysql','mysql-server']
-    },
-    mysql-connector => {
-      64 =>  ['mysql-connector-java']
-    },
-    extjs => {
-      64 =>  ['extjs-2.2-1']
-    },
-    templeton-tar-hive => {
-      64 => ['templeton-tar-hive-0.0.1.14-1']
-    },
-    templeton-tar-pig => {
-      64 => ['templeton-tar-pig-0.0.1.14-1']
-    },
-    rrdtool-python => {
-      64 => ['python-rrdtool.x86_64']
-    },
-    # The 32bit version of package rrdtool-devel is removed on centos 5/6 to prevent conflict ( BUG-2881)
-    rrdtool-devel => {
-      64 => {
-        'ALL' => 'rrdtool-devel.i686',
-        'centos6' => 'rrdtool-devel.i686',
-        'centos5' => 'rrdtool-devel.i386',
-        'redhat6' => 'rrdtool-devel.i686',
-        'redhat5' => 'rrdtool-devel.i386'
-      }
-    },
-    # The 32bit version of package rrdtool is removed on centos 5/6 to prevent conflict ( BUG-2408)
-    rrdtool => {
-      64 => {
-        'ALL' => 'rrdtool.i686',
-        'centos6' => 'rrdtool.i686',
-        'centos5' => 'rrdtool.i386',
-        'redhat6' => 'rrdtool.i686',
-        'redhat5' => 'rrdtool.i386'
-       }
-    },
-    ambari-log4j => {
-      64 => ['ambari-log4j']
-    } 
-  }
-  $packages = 'bigtop' 
-  if ($packages == 'hdp') {
-    $package_names[hadoop] = { 32 => ['hadoop.i386'], 64 => ['hadoop.x86_64']}
-    $mapred_smoke_test_script = "/usr/sbin/hadoop-validate-setup.sh"
-    $hadoop_bin = "/usr/sbin"
-    $hadoop_conf_dir = "/etc/hadoop"
-    $zk_conf_dir = "/etc/zookeeper"
-    $hbase_conf_dir = "/etc/hbase"
-    $sqoop_conf_dir = "/etc/sqoop"
-    $pig_conf_dir = "/etc/pig"
-    $oozie_conf_dir = "/etc/oozie"
-    $hadoop_jar_location = "/usr/share/hadoop"
-    $hbase_daemon_script = "/usr/bin/hbase-daemon.sh"
-    $use_32_bits_on_slaves = false
-    $package_names[zookeeper] = {64 => 'zookeeper.x86_64'}
-    $package_names[hbase] = {64 => 'hbase.x86_64'}
-    $package_names[sqoop] = {32 => 'sqoop-1.4.1-1.noarch'}
-    $package_names[pig] = { 32 => 'pig.i386'}
-    $package_names[hcat-server] = { 64 => 'hcatalog-server.x86_64'}
-    $package_names[hcat-base] = { 64 => 'hcatalog.x86_64'}
-    $zk_bin = '/usr/sbin'
-    $zk_smoke_test_script = '/usr/bin/zkCli.sh'
-    $update_zk_shell_files = false
-
-    $hcat_server_host = hdp_default("hcat_server_host")
-    $hcat_mysql_host = hdp_default("hcat_mysql_host")
-
-  } elsif ($packages == 'bigtop') {  
-
-    $package_names[hadoop] = {32 => ['hadoop','hadoop-libhdfs.i386','hadoop-native.i386','hadoop-pipes.i386','hadoop-sbin.i386','hadoop-lzo', 'hadoop-lzo-native.i386'], 64 => ['hadoop','hadoop-libhdfs','hadoop-native','hadoop-pipes','hadoop-sbin','hadoop-lzo', 'hadoop-lzo-native']}
-    #$package_names[hadoop] = {32 => ['hadoop.i386','hadoop-native.i386'], 64 => ['hadoop.x86_64','hadoop-native.x86_64']}
-   
-    $mapred_smoke_test_script = "/usr/lib/hadoop/sbin/hadoop-validate-setup.sh"
-    $hadoop_bin = "/usr/lib/hadoop/bin"
-    $hadoop_conf_dir = "/etc/hadoop/conf"
-    $zk_conf_dir = "/etc/zookeeper/conf"
-    $hbase_conf_dir = "/etc/hbase/conf"
-    $sqoop_conf_dir = "/usr/lib/sqoop/conf"
-    $pig_conf_dir = "/etc/pig/conf"
-    $oozie_conf_dir = "/etc/oozie/conf"
-    $hive_conf_dir = "/etc/hive/conf"
-    $hcat_conf_dir = "/etc/hcatalog/conf"
-    $hadoop_jar_location = "/usr/lib/hadoop/"
-    $hbase_daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
-    $use_32_bits_on_slaves = false
-    $package_names[zookeeper] = {64 => ['zookeeper']}
-    $package_names[hbase] = {64 => ['hbase']}
-    $package_names[sqoop] = {32 => ['sqoop'], 64 => ['sqoop']}
-    $package_names[pig] = {32 => ['pig.noarch'], 64 => ['pig.noarch']}
-    $package_names[hcat] = {32 => ['hcatalog'], 64 => ['hcatalog']}
-    $package_names[hive] = {64 => ['hive']}
-    $zk_bin = '/usr/lib/zookeeper/bin'
-    $zk_smoke_test_script = "/usr/lib/zookeeper/bin/zkCli.sh"
-    $update_zk_shell_files = false
-
-    $hive_mysql_host = hdp_default("hive_mysql_host","localhost")
-
-    $hcat_server_host = hdp_default("hive_server_host")
-    $hcat_mysql_host = hdp_default("hive_mysql_host")
-
-
-
-    $pathes = {
-      nagios_p1_pl => {
-      'ALL' => '/usr/bin/p1.pl',
-      suse => '/usr/lib/nagios/p1.pl'
-      }
-    }
-
-    $services_names = {
-      mysql => {
-        'ALL' => 'mysqld',
-        suse => 'mysql'},
-      httpd => {  
-      'ALL' => 'httpd',
-      suse => 'apache2'}
-    }
-
-    $cmds = {
-    htpasswd => {
-    'ALL' => 'htpasswd',
-     suse => 'htpasswd2'} 
-
-    }
-    
-
-    $alt_package_names = 
-{
-  snmp => 
-    { 64 => {suse =>['net-snmp'],
-             'ALL' => ['net-snmp', 'net-snmp-utils']}
-    },
-
-  oozie-server => 
-    {
-      64 => {'ALL' => 'oozie.noarch'}
-    },
-
-
-    snappy => {
-      64 => {'ALL' => ['snappy','snappy-devel']}
-    },
-
-
-    hadoop => {
-      32 => {'ALL' => ['hadoop','hadoop-libhdfs.i386','hadoop-native.i386','hadoop-pipes.i386','hadoop-sbin.i386','hadoop-lzo', 'hadoop-lzo-native.i386']},
-      64 => {'ALL' =>['hadoop','hadoop-libhdfs','hadoop-native','hadoop-pipes','hadoop-sbin','hadoop-lzo', 'hadoop-lzo-native']}
-    },
-
-    lzo => {
-      'ALL' => {'ALL' => ['lzo', 'lzo-devel'],
-                suse => ['lzo-devel']},
-    },
-
-    glibc=> {
-      'ALL' => {'ALL' => ['glibc','glibc.i686'],
-                suse => ['glibc']},
-    },
-
-    zookeeper=> {
-      64 => {'ALL' => 'zookeeper'},
-    },
-    hbase=> {
-      64 => {'ALL' => 'hbase'},
-    },
-
-    pig=> {
-      'ALL' => {'ALL'=>['pig.noarch']}
-    },
-
-    sqoop=> {
-      'ALL' =>{'ALL' => ['sqoop']}
-    },
-
-    mysql-connector-java=> {
-      'ALL' =>{'ALL' => ['mysql-connector-java']}
-    },
-    oozie-client=> {
-      '64' =>{'ALL' => ['oozie-client.noarch']}
-    },
-    extjs=> {
-      64 =>{'ALL' => ['extjs-2.2-1']}
-    },
-    hive=> {
-      64 =>{'ALL' => ['hive']}
-    },
-    hcat=> {
-      'ALL' =>{'ALL' => ['hcatalog']}
-    },
-
-    mysql => {
-      64 =>  {'ALL' => ['mysql','mysql-server'],
-              suse => ['mysql-client','mysql']}
-    },
-    webhcat => {
-      'ALL' => {'ALL' => 'hcatalog'}
-    },
-    webhcat-tar-hive => {
-      64 => {'ALL' => 'webhcat-tar-hive'}
-    },
-    webhcat-tar-pig => {
-      64 => {'ALL' =>'webhcat-tar-pig'}
-    },
-    dashboard => {
-      64 => {'ALL' => 'hdp_mon_dashboard'}
-    },
-
-    perl =>
-    {
-      64 => {'ALL' => 'perl'}
-    },
-
-    perl-Net-SNMP =>
-    {
-      64 => {'ALL' => 'perl-Net-SNMP'}
-    },
-
-    nagios-server => {
-      64 => {'ALL' => 'nagios-3.2.3'}
-    },
-
-    nagios-fping => {
-      64 =>{'ALL' => 'fping'}
-    },
-
-    nagios-plugins => {
-      64 => {'ALL' => 'nagios-plugins-1.4.9'}
-    },
-
-    nagios-addons => {
-      64 => {'ALL' => 'hdp_mon_nagios_addons'}
-    },
-    nagios-php-pecl-json => {
-      64 => {'ALL' => $NOTHING,
-             suse => 'php5-json',
-             centos6 => $NOTHING,
-             redhat6 => $NOTHING,
-             centos5 => 'php-pecl-json.x86_64',
-             redhat5 => 'php-pecl-json.x86_64'}
-    },
-
-    ganglia-server => {
-      64 => {'ALL' => 'ganglia-gmetad-3.2.0'}
-    },
-
-    ganglia-gweb => {
-      64 => {'ALL' => 'gweb'}
-    },
-
-    ganglia-hdp-gweb-addons => {
-      64 => {'ALL' => 'hdp_mon_ganglia_addons'}
-    },
-
-    ganglia-monitor => {
-      64 => {'ALL' =>'ganglia-gmond-3.2.0'}
-    },
-	
-    rrdtool-python => {
-      64 => {'ALL' =>'python-rrdtool.x86_64'}
-    },
-
-    # The 32bit version of package rrdtool-devel is removed on centos 5/6 to prevent conflict ( BUG-2881)
-    rrdtool-devel => {
-      64 => {
-        'ALL' => 'rrdtool-devel.i686',
-        'centos6' => 'rrdtool-devel.i686',
-        'centos5' => 'rrdtool-devel.i386',
-        'redhat6' => 'rrdtool-devel.i686',
-        'redhat5' => 'rrdtool-devel.i386'
-        }
-    },
-
-    # The 32bit version of package rrdtool is removed on centos 5/6 to prevent conflict ( BUG-2408)
-    rrdtool => {
-      64 => {
-        'ALL' => 'rrdtool.i686',
-        'centos6' => 'rrdtool.i686',
-        'centos5' => 'rrdtool.i386',
-        'redhat6' => 'rrdtool.i686',
-        'redhat5' => 'rrdtool.i386'
-        }
-    },
-
-    ambari-log4j => {
-      64 => {'ALL' =>'ambari-log4j'}
-    },
-    httpd => {
-      64 => {'ALL' =>'httpd',
-        suse => ['apache2', 'apache2-mod_php5']}
-    }
-
-	
-}
-
-  $repos_paths = 
-  {
-    centos6 => '/etc/yum.repos.d',
-    centos5 => '/etc/yum.repos.d',
-    suse => '/etc/zypp/repos.d',
-    redhat6 => '/etc/yum.repos.d',
-    redhat5 => '/etc/yum.repos.d'
-  }
-
-  $rrd_py_path =
-  {
-    suse => '/srv/www/cgi-bin',
-    centos6 => '/var/www/cgi-bin',
-    centos5 => '/var/www/cgi-bin',
-    redhat6 => '/var/www/cgi-bin',
-    redhat5 => '/var/www/cgi-bin'
-  }
-  
-  $nagios_lookup_daemon_strs = 
-  {
-    suse => '/usr/sbin/nagios',
-    centos6 => '/usr/bin/nagios',
-    centos5 => '/usr/bin/nagios',
-    redhat6 => '/usr/bin/nagios',
-    redhat5 => '/usr/bin/nagios'
-  }
-
-
-
-
-  }
-
- 
-###### snmp
-
-  $snmp_conf_dir = hdp_default("snmp_conf_dir","/etc/snmp/")
-  $snmp_source = hdp_default("snmp_source","0.0.0.0/0") ##TODO!!! for testing needs to be closed up
-  $snmp_community = hdp_default("snmp_community","hadoop")
-
-###### aux
-  #used by ganglia monitor to tell what components and services are present
-  $component_exists = {} 
-  $service_exists = {} 
-}
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/snappy/package.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/snappy/package.pp
deleted file mode 100644
index 52a4094..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/snappy/package.pp
+++ /dev/null
@@ -1,52 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp::snappy::package()
-{
- hdp::package {'snappy':
-    package_type  => 'snappy',
-    java_needed   => false
-  }
-  
-  hdp::snappy::package::ln{ 64:} 
-  hdp::snappy::package::ln{ 32:} 
-  
-  anchor{'hdp::snappy::package::begin':} ->  Hdp::Package['snappy'] -> Hdp::Snappy::Package::Ln<||> -> anchor{'hdp::snappy::package::end':}
-}
-
-define hdp::snappy::package::ln()
-{
-  $size = $name
-  $hadoop_home = $hdp::params::hadoop_home  
-  $snappy_so = $hdp::params::snappy_so
-  $so_target_dir = $hdp::params::snappy_compression_so_dirs[$size]
-  $so_target = "${so_target_dir}/libsnappy.so"
-  $so_src_dir = $hdp::params::snappy_so_src_dir[$size]
-  $so_src = "${so_src_dir}/${snappy_so}" 
-  
-  if ($so_target != $so_src) { 
-    $ln_cmd = "mkdir -p $so_target_dir; ln -sf ${so_src} ${so_target}"
-    hdp::exec{ "hdp::snappy::package::ln ${name}":
-      command => $ln_cmd,
-      unless  => "test -f ${so_target}",
-      creates => $so_target
-    }
-  }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/snmp.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/snmp.pp
deleted file mode 100644
index 24613d8..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/snmp.pp
+++ /dev/null
@@ -1,48 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp::snmp(
-  $service_state = 'running'
-)
-{
-  include hdp::params
-
-  hdp::package {'snmp':}
-
-  hdp::snmp-configfile {'snmpd.conf': 
-    notify => Service['snmpd']    
-  }
-
-  service { 'snmpd' :
-    ensure => $service_state
-  }
-  
-  anchor{'hdp::snmp::begin':} -> Hdp::Package['snmp'] -> Hdp::Snmp-configfile<||> -> Service['snmpd'] -> anchor{'hdp::snmp::end':}
-}
-
-define hdp::snmp-configfile()
-{ 
-  hdp::configfile { "${hdp::params::snmp_conf_dir}/${name}":
-    component     => 'base',
-    owner         => root,
-    group         => root
-  }
-}
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/testing_env_patch.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/testing_env_patch.pp
deleted file mode 100644
index d227382..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/testing_env_patch.pp
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp::testing_env_patch()
-{
-  $cmd = "mkdir /tmp/repos; mv /etc/yum.repos.d/* /tmp/repos"
-  $repo_target = "/etc/yum.repos.d/${hdp::params::hdp_yum_repo}"
-
-  anchor { 'hdp::testing_env_patch::begin' :}
-  class{ 'hdp::iptables': 
-    ensure => stopped,
-    require => Anchor['hdp::testing_env_patch::begin']
-  }
-  exec { '/bin/echo 0 > /selinux/enforce':
-    require => Class['hdp::iptables']
-  }
-  hdp::testing_env_patch::packages { 'common' :
-    require => Exec['/bin/echo 0 > /selinux/enforce']
-  }
-  hdp::exec { $cmd :
-    command => $cmd,
-    unless => "test -e ${repo_target}",
-    require => Hdp::Testing_env_patch::Packages['common']
-  }  
-  anchor { 'hdp::testing_env_patch::end' :
-    require => Exec[$cmd]
-  }
-}
-
-define hdp::testing_env_patch::packages(
-  $needed = false)
-{
- if ($needed == true) {
-   package { ['perl-Digest-HMAC','perl-Socket6','perl-Crypt-DES','xorg-x11-fonts-Type1','libdbi'] :} 
- }
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/templates/snmpd.conf.erb b/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/templates/snmpd.conf.erb
deleted file mode 100644
index 8a93b53..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/templates/snmpd.conf.erb
+++ /dev/null
@@ -1,48 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-com2sec notConfigUser  <%=scope.function_hdp_template_var("snmp_source")%>   <%=scope.function_hdp_template_var("snmp_community")%>
-group   notConfigGroup v1           notConfigUser
-group   notConfigGroup v2c           notConfigUser
-view    systemview    included   .1
-access  notConfigGroup ""      any       noauth    exact  systemview none none
-
-syslocation Hadoop 
-syscontact HadoopMaster 
-dontLogTCPWrappersConnects yes
-
-###############################################################################
-# disk checks
-
-disk / 10000
-
-
-###############################################################################
-# load average checks
-#
-
-# load [1MAX=12.0] [5MAX=12.0] [15MAX=12.0]
-#
-# 1MAX:   If the 1 minute load average is above this limit at query
-#         time, the errorFlag will be set.
-# 5MAX:   Similar, but for 5 min average.
-# 15MAX:  Similar, but for 15 min average.
-
-# Check for loads:
-#load 12 14 14
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/puppetApply.sh b/branch-1.2/ambari-agent/src/main/puppet/modules/puppetApply.sh
deleted file mode 100644
index 559c2e2..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/puppetApply.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-rm -f /var/log/puppet_apply.log
-puppet apply --confdir=/etc/puppet/agent --logdest=/var/log/puppet_apply.log --debug --autoflush --detailed-exitcodes /etc/puppet/agent/modules/catalog/files/site.pp  >> /var/log/puppet_apply.log  2>&1
-ret=$?
-cat /var/log/puppet_apply.log
-if [ "$ret" == "0" ] || [ "$ret" == "2" ]; then
-  exit 0
-else
-  exit 1 
-fi
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/CHANGELOG b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/CHANGELOG
deleted file mode 100644
index ee6d3b5..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/CHANGELOG
+++ /dev/null
@@ -1,20 +0,0 @@
-2011-06-21 Jeff McCune <jeff@puppetlabs.com> - 0.1.7
-* Add validate_hash() and getvar() functions
-
-2011-06-15 Jeff McCune <jeff@puppetlabs.com> - 0.1.6
-* Add anchor resource type to provide containment for composite classes
-
-2011-06-03 Jeff McCune <jeff@puppetlabs.com> - 0.1.5
-* Add validate_bool() function to stdlib
-
-0.1.4 2011-05-26 Jeff McCune <jeff@puppetlabs.com>
-* Move most stages after main
-
-0.1.3 2011-05-25 Jeff McCune <jeff@puppetlabs.com>
-* Add validate_re() function
-
-0.1.2 2011-05-24 Jeff McCune <jeff@puppetlabs.com>
-* Update to add annotated tag
-
-0.1.1 2011-05-24 Jeff McCune <jeff@puppetlabs.com>
-* Add stdlib::stages class with a standard set of stages
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/LICENSE b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/LICENSE
deleted file mode 100644
index 57bc88a..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/Modulefile b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/Modulefile
deleted file mode 100644
index 4927119..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/Modulefile
+++ /dev/null
@@ -1,11 +0,0 @@
-name    'puppetlabs-stdlib'
-version '0.1.7'
-source 'git://github.com/puppetlabs/puppetlabs-stdlib'
-author 'puppetlabs'
-license 'Apache 2.0'
-summary 'Puppet Module Standard Library'
-description 'Standard Library for Puppet Modules'
-project_page 'https://github.com/puppetlabs/puppetlabs-stdlib'
-
-## Add dependencies, if any:
-# dependency 'username/name', '>= 1.2.0'
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/README.markdown b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/README.markdown
deleted file mode 100644
index 1e93c6f..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/README.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
-# Puppet Labs Standard Library #
-
-This module provides a "standard library" of resources for developing Puppet
-Modules.  This modules will include the following additions to Puppet
-
- * Stages
- * Facts
- * Functions
- * Defined resource types
- * Types
- * Providers
-
-This module is officially curated and provided by Puppet Labs.  The modules
-Puppet Labs writes and distributes will make heavy use of this standard
-library.
-
-# Compatibility #
-
-This module is designed to work with Puppet version 2.6 and later.  It may be
-forked if Puppet 2.7 specific features are added.  There are currently no plans
-for a Puppet 0.25 standard library module.
-
-# Overview #
-
-TBA
-
-# Contact Information #
-
-  Jeff McCune <jeff@puppetlabs.com>
-
-# Functions #
-## validate\_hash ##
-
-    $somehash = { 'one' => 'two' }
-    validate\_hash($somehash)
-
-## getvar() ##
-
-This function aims to look up variables in user-defined namespaces within
-puppet.  Note, if the namespace is a class, it should already be evaluated
-before the function is used.
-
-    $namespace = 'site::data'
-    include "${namespace}"
-    $myvar = getvar("${namespace}::myvar")
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/RELEASE_PROCESS.markdown b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/RELEASE_PROCESS.markdown
deleted file mode 100644
index df20730..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/RELEASE_PROCESS.markdown
+++ /dev/null
@@ -1,12 +0,0 @@
-# Releasing this module #
-
- * Work in a topic branch
- * Submit a github pull request
- * Address any comments / feeback
- * Merge into master using --no-fw
- * Update the CHANGELOG
- * Create an annotated tag with git tag -a X.Y.Z -m 'version X.Y.Z'
- * Push the tag with git push origin --tags
- * Build a new package with puppet-module
- * Publish the new package to the forge
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/getvar.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/getvar.rb
deleted file mode 100644
index ffd774d..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/getvar.rb
+++ /dev/null
@@ -1,23 +0,0 @@
-module Puppet::Parser::Functions
-
-  newfunction(:getvar, :type => :rvalue, :doc => <<-'ENDHEREDOC') do |args|
-    Lookup a variable in a remote namespace.
-
-    For example:
-
-      $foo = getvar('site::data::foo')
-
-    This is useful if the namespace itself is stored in a string:
-
-      $bar = getvar("${datalocation}::bar")
-    ENDHEREDOC
-
-    unless args.length == 1
-      raise Puppet::ParseError, ("getvar(): wrong number of arguments (#{args.length}; must be 1)")
-    end
-
-    self.lookupvar("#{args[0]}")
-
-  end
-
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/has_key.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/has_key.rb
deleted file mode 100644
index 9c1c4c3..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/has_key.rb
+++ /dev/null
@@ -1,27 +0,0 @@
-module Puppet::Parser::Functions
-
-  newfunction(:has_key, :type => :rvalue, :doc => <<-'ENDHEREDOC') do |args|
-    determine if a hash has a certain key value.
-
-    Example:
-      $my_hash = {'key_one' => 'value_one'}
-      if has_key($my_hash, 'key_two') {
-        notice('we will not reach here')
-      }
-      if has_key($my_hash, 'key_one') {
-        notice('this will be printed')
-      }
-
-    ENDHEREDOC
-
-    unless args.length == 2
-      raise Puppet::ParseError, ("has_key(): wrong number of arguments (#{args.length}; must be 2)")
-    end
-    unless args[0].is_a?(Hash)
-      raise Puppet::ParseError, "has_key(): expects the first argument to be a hash, got #{args[0].inspect} which is of type #{args[0].class}"
-    end
-    args[0].has_key?(args[1])
-
-  end
-
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/loadyaml.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/loadyaml.rb
deleted file mode 100644
index 0f16f69..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/loadyaml.rb
+++ /dev/null
@@ -1,20 +0,0 @@
-module Puppet::Parser::Functions
-
-  newfunction(:loadyaml, :type => :rvalue, :doc => <<-'ENDHEREDOC') do |args|
-    Load a YAML file and return the data if it contains an Array, String, or Hash
-    as a Puppet variable.
-
-    For example:
-
-      $myhash = loadyaml('/etc/puppet/data/myhash.yaml')
-    ENDHEREDOC
-
-    unless args.length == 1
-      raise Puppet::ParseError, ("loadyaml(): wrong number of arguments (#{args.length}; must be 1)")
-    end
-
-    YAML.load_file(args[0])
-
-  end
-
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/merge.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/merge.rb
deleted file mode 100644
index 6693884..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/merge.rb
+++ /dev/null
@@ -1,28 +0,0 @@
-module Puppet::Parser::Functions
-
-  newfunction(:merge, :type => :rvalue, :doc => <<-'ENDHEREDOC') do |args|
-    Merges two or more hashes together and returns the resulting hash.
-
-    For example:
-
-      $hash1 = {'one' => 1, 'two', => 2}
-      $hash1 = {'two' => 2, 'three', => 2}
-      $merged_hash = merge($hash1, $hash2)
-      # merged_hash =  {'one' => 1, 'two' => 2, 'three' => 2}
-
-    ENDHEREDOC
-
-    if args.length < 2
-      raise Puppet::ParseError, ("merge(): wrong number of arguments (#{args.length}; must be at least 2)")
-    end
-    args.each do |arg|
-      unless arg.is_a?(Hash)
-        raise Puppet::ParseError, "merge: unexpected argument type #{arg.class}, only expects hash arguments"
-      end
-    end
-
-    args.inject({}, :merge)
-
-  end
-
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_bool.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_bool.rb
deleted file mode 100644
index 49e6378..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_bool.rb
+++ /dev/null
@@ -1,39 +0,0 @@
-module Puppet::Parser::Functions
-
-  newfunction(:validate_bool, :doc => <<-'ENDHEREDOC') do |args|
-    Validate all passed values are true or false.  Abort catalog compilation if the
-    value does not pass the check.
-
-    Example:
-
-    These booleans validate
-
-        $iamtrue = true
-        validate_bool(true)
-        validate_bool(true, true, false, $iamtrue)
-
-    These strings do NOT validate and will abort catalog compilation
-
-        $some_array = [ true ]
-        validate_bool("false")
-        validate_bool("true")
-        validate_bool($some_array)
-
-    * Jeff McCune <jeff@puppetlabs.com>
-    * Dan Bode <dan@puppetlabs.com>
-
-    ENDHEREDOC
-
-    unless args.length > 0 then
-      raise Puppet::ParseError, ("validate_bool(): wrong number of arguments (#{args.length}; must be > 0)")
-    end
-
-    args.each do |arg|
-      unless (arg.is_a?(TrueClass) || arg.is_a?(FalseClass))
-        raise Puppet::ParseError, ("#{arg.inspect} is not a boolean.  It looks to be a #{arg.class}")
-      end
-    end
-
-  end
-
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_hash.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_hash.rb
deleted file mode 100644
index 1443318..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_hash.rb
+++ /dev/null
@@ -1,37 +0,0 @@
-module Puppet::Parser::Functions
-
-  newfunction(:validate_hash, :doc => <<-'ENDHEREDOC') do |args|
-    Validate all passed values are a Hash data structure
-    value does not pass the check.
-
-    Example:
-
-    These values validate
-
-        $my_hash = { 'one' => 'two' }
-        validate_hash($my_hash)
-
-    These values do NOT validate
-
-        validate_hash(true)
-        validate_hash('some_string')
-        $undefined = undef
-        validate_hash($undefined)
-
-    * Jeff McCune <jeff@puppetlabs.com>
-
-    ENDHEREDOC
-
-    unless args.length > 0 then
-      raise Puppet::ParseError, ("validate_hash(): wrong number of arguments (#{args.length}; must be > 0)")
-    end
-
-    args.each do |arg|
-      unless arg.is_a?(Hash)
-        raise Puppet::ParseError, ("#{arg.inspect} is not a Hash.  It looks to be a #{arg.class}")
-      end
-    end
-
-  end
-
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_re.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_re.rb
deleted file mode 100644
index 583f26a..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/parser/functions/validate_re.rb
+++ /dev/null
@@ -1,35 +0,0 @@
-module Puppet::Parser::Functions
-
-  newfunction(:validate_re, :doc => <<-'ENDHEREDOC') do |args|
-    Perform simple validation of a string against a regular expression.  The second
-    argument of the function should be a string regular expression (without the //'s)
-    or an array of regular expressions.  If none of the regular expressions in the array
-    match the string passed in, then an exception will be raised.
-
-    Example:
-
-    These strings validate against the regular expressions
-
-        validate_re('one', '^one$')
-        validate_re('one', [ '^one', '^two' ])
-
-    These strings do NOT validate
-
-        validate_re('one', [ '^two', '^three' ])
-
-    Jeff McCune <jeff@puppetlabs.com>
-
-    ENDHEREDOC
-    if args.length != 2 then
-      raise Puppet::ParseError, ("validate_re(): wrong number of arguments (#{args.length}; must be 2)")
-    end
-
-    msg = "validate_re(): #{args[0].inspect} does not match #{args[1].inspect}"
-
-    raise Puppet::ParseError, (msg) unless args[1].any? do |re_str|
-      args[0] =~ Regexp.compile(re_str)
-    end
-
-  end
-
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/provider/append_line/ruby.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/provider/append_line/ruby.rb
deleted file mode 100644
index 5e78659..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/provider/append_line/ruby.rb
+++ /dev/null
@@ -1,15 +0,0 @@
-Puppet::Type.type(:append_line).provide(:ruby) do
-
-  def exists?
-    File.readlines(resource[:path]).find do |line|
-      line.chomp == resource[:line].chomp
-    end
-  end
-
-  def create
-    File.open(resource[:path], 'a') do |fh|
-      fh.puts resource[:line]
-    end
-  end
-
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/type/anchor.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/type/anchor.rb
deleted file mode 100644
index 0c28b1c..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/type/anchor.rb
+++ /dev/null
@@ -1,32 +0,0 @@
-Puppet::Type.newtype(:anchor) do
-  desc <<-'ENDOFDESC'
-  A simple resource type intended to be used as an anchor in a composite class.
-
-      class ntp {
-        class { 'ntp::package': }
-        -> class { 'ntp::config': }
-        -> class { 'ntp::service': }
-
-        # These two resources "anchor" the composed classes
-        # such that the end user may use "require" and "before"
-        # relationships with Class['ntp']
-        anchor { 'ntp::begin': }   -> class  { 'ntp::package': }
-        class  { 'ntp::service': } -> anchor { 'ntp::end': }
-      }
-
-  This resource allows all of the classes in the ntp module to be contained
-  within the ntp class from a dependency management point of view.
-
-  This allows the end user of the ntp module to establish require and before
-  relationships easily:
-
-      class { 'ntp': } -> class { 'mcollective': }
-      class { 'mcollective': } -> class { 'ntp': }
-
-  ENDOFDESC
-
-  newparam :name do
-    desc "The name of the anchor resource."
-  end
-
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/type/append_line.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/type/append_line.rb
deleted file mode 100644
index b3f926c..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/lib/puppet/type/append_line.rb
+++ /dev/null
@@ -1,44 +0,0 @@
-Puppet::Type.newtype(:append_line) do
-
-  desc <<-EOT
-  Type that can append a line to a file if it does not already contain it.
-
-  Example:
-
-  append_line { 'sudo_rule':
-    path => '/etc/sudoers',
-    line => '%admin ALL=(ALL) ALL',
-  }
-
-  EOT
-
-  ensurable do
-    defaultto :present
-    newvalue(:present) do
-      provider.create
-    end
-  end
-
-  newparam(:name, :namevar => true) do
-    desc 'arbitrary name used as identity'
-  end
-
-  newparam(:line) do
-    desc 'The line to be appended to the path.'
-  end
-
-  newparam(:path) do
-    desc 'File to possibly append a line to.'
-    validate do |value|
-      unless (Puppet.features.posix? and value =~ /^\//) or (Puppet.features.microsoft_windows? and (value =~ /^.:\// or value =~ /^\/\/[^\/]+\/[^\/]+/))
-        raise(Puppet::Error, "File paths must be fully qualified, not '#{value}'")
-      end
-    end
-  end
-
-  validate do
-    unless self[:line] and self[:path]
-      raise(Puppet::Error, "Both line and path are required attributes")
-    end
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/manifests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/manifests/init.pp
deleted file mode 100644
index 1f18d8a..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/manifests/init.pp
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Class: stdlib
-#
-# This module manages stdlib
-#
-# Parameters:
-#
-# Actions:
-#
-# Requires:
-#
-# Sample Usage:
-#
-# [Remember: No empty lines between comments and class definition]
-class stdlib {
-
-  class { 'stdlib::stages': }
-
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/manifests/stages.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/manifests/stages.pp
deleted file mode 100644
index 97b9e90..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/manifests/stages.pp
+++ /dev/null
@@ -1,43 +0,0 @@
-#
-# Class: stdlib::stages
-#
-# This class manages a standard set of Run Stages for Puppet.
-#
-# The high level stages are (In order):
-#
-#  * setup
-#  * main
-#  * runtime
-#  * setup_infra
-#  * deploy_infra
-#  * setup_app
-#  * deploy_app
-#  * deploy
-#
-# Parameters:
-#
-# Actions:
-#
-#   Declares various run-stages for deploying infrastructure,
-#   language runtimes, and application layers.
-#
-# Requires:
-#
-# Sample Usage:
-#
-#  node default {
-#    include stdlib::stages
-#    class { java: stage => 'runtime' }
-#  }
-#
-class stdlib::stages {
-
-  stage { 'setup':  before => Stage['main'] }
-  stage { 'runtime': require => Stage['main'] }
-  -> stage { 'setup_infra': }
-  -> stage { 'deploy_infra': }
-  -> stage { 'setup_app': }
-  -> stage { 'deploy_app': }
-  -> stage { 'deploy': }
-
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/spec.opts b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/spec.opts
deleted file mode 100644
index 91cd642..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/spec.opts
+++ /dev/null
@@ -1,6 +0,0 @@
---format
-s
---colour
---loadby
-mtime
---backtrace
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/spec_helper.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/spec_helper.rb
deleted file mode 100644
index a4aeeae..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/spec_helper.rb
+++ /dev/null
@@ -1,18 +0,0 @@
-require 'pathname'
-dir = Pathname.new(__FILE__).parent
-$LOAD_PATH.unshift(dir, dir + 'lib', dir + '../lib')
-
-require 'mocha'
-require 'puppet'
-gem 'rspec', '=1.2.9'
-require 'spec/autorun'
-
-Spec::Runner.configure do |config|
-    config.mock_with :mocha
-end
-
-# We need this because the RAL uses 'should' as a method.  This
-# allows us the same behaviour but with a different method name.
-class Object
-    alias :must :should
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/getvar_spec.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/getvar_spec.rb
deleted file mode 100644
index 16edd98..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/getvar_spec.rb
+++ /dev/null
@@ -1,53 +0,0 @@
-require 'puppet'
-
-# We don't need this for the basic tests we're doing
-# require 'spec_helper'
-
-# Dan mentioned that Nick recommended the function method call
-# to return the string value for the test description.
-# this will not even try the test if the function cannot be
-# loaded.
-describe Puppet::Parser::Functions.function(:getvar) do
-
-  # Pulled from Dan's create_resources function
-  def get_scope
-    @topscope = Puppet::Parser::Scope.new
-    # This is necessary so we don't try to use the compiler to discover our parent.
-    @topscope.parent = nil
-    @scope = Puppet::Parser::Scope.new
-    @scope.compiler = Puppet::Parser::Compiler.new(Puppet::Node.new("floppy", :environment => 'production'))
-    @scope.parent = @topscope
-    @compiler = @scope.compiler
-  end
-
-  describe 'when calling getvar from puppet' do
-
-    it "should not compile when no arguments are passed" do
-      Puppet[:code] = 'getvar()'
-      get_scope
-      expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /wrong number of arguments/)
-    end
-    it "should not compile when too many arguments are passed" do
-      Puppet[:code] = 'getvar("foo::bar", "baz")'
-      get_scope
-      expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /wrong number of arguments/)
-    end
-
-    it "should lookup variables in other namespaces" do
-      pending "Puppet doesn't appear to think getvar is an rvalue function... BUG?"
-      Puppet[:code] = <<-'ENDofPUPPETcode'
-        class site::data { $foo = 'baz' }
-        include site::data
-        $foo = getvar("site::data::foo")
-        if $foo != 'baz' {
-          fail('getvar did not return what we expect')
-        }
-      ENDofPUPPETcode
-      get_scope
-      @scope.compiler.compile
-    end
-
-  end
-
-end
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/has_key_spec.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/has_key_spec.rb
deleted file mode 100644
index d1dcd15..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/has_key_spec.rb
+++ /dev/null
@@ -1,46 +0,0 @@
-require 'puppet'
-require 'mocha'
-describe Puppet::Parser::Functions.function(:has_key) do
-
-  # Pulled from Dan's create_resources function
-  # TODO - this should be moved to spec_helper since the
-  # logic is likely to be applied to multiple rspec files.
-  let(:compiler) {
-    topscope = Puppet::Parser::Scope.new
-    # This is necessary so we don't try to use the compiler to discover our parent.
-    topscope.parent = nil
-    my_scope = Puppet::Parser::Scope.new
-    my_scope.compiler = Puppet::Parser::Compiler.new(Puppet::Node.new("floppy", :environment => 'production'))
-    my_scope.parent = topscope
-    compiler = my_scope.compiler
-  }
-  let(:scope) {
-    scope = Puppet::Parser::Scope.new
-    scope.stubs(:environment).returns(Puppet::Node::Environment.new('production'))
-    scope
-  }
-
-  describe 'when calling has_key from puppet' do
-    it "should not compile when no arguments are passed" do
-      Puppet[:code] = 'has_key()'
-      expect { compiler.compile }.should raise_error(Puppet::ParseError, /wrong number of arguments/)
-    end
-    it "should not compile when 1 argument is passed" do
-      Puppet[:code] = "has_key('foo')"
-      expect { compiler.compile }.should raise_error(Puppet::ParseError, /wrong number of arguments/)
-    end
-    it "should require the first value to be a Hash" do
-      Puppet[:code] = "has_key('foo', 'bar')"
-      expect { compiler.compile }.should raise_error(Puppet::ParseError, /expects the first argument to be a hash/)
-    end
-  end
-  describe 'when calling the function has_key from a scope instance' do
-    it 'should detect existing keys' do
-      scope.function_has_key([{'one' => 1}, 'one']).should be_true
-    end
-    it 'should detect existing keys' do
-      scope.function_has_key([{'one' => 1}, 'two']).should be_false
-    end
-  end
-
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/merge_spec.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/merge_spec.rb
deleted file mode 100644
index 71e1869..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/merge_spec.rb
+++ /dev/null
@@ -1,54 +0,0 @@
-require 'puppet'
-require 'mocha'
-describe Puppet::Parser::Functions.function(:merge) do
-
-  # Pulled from Dan's create_resources function
-  # TODO - these let statements should be moved somewhere
-  # where they can be resued
-  let(:compiler) {
-    topscope = Puppet::Parser::Scope.new
-    # This is necessary so we don't try to use the compiler to discover our parent.
-    topscope.parent = nil
-    my_scope = Puppet::Parser::Scope.new
-    my_scope.compiler = Puppet::Parser::Compiler.new(Puppet::Node.new("floppy", :environment => 'production'))
-    my_scope.parent = topscope
-    compiler = my_scope.compiler
-  }
-  let(:scope) {
-    scope = Puppet::Parser::Scope.new
-    scope.stubs(:environment).returns(Puppet::Node::Environment.new('production'))
-    scope
-  }
-
-  describe 'when calling merge from puppet' do
-    it "should not compile when no arguments are passed" do
-      Puppet[:code] = 'merge()'
-      expect { compiler.compile }.should raise_error(Puppet::ParseError, /wrong number of arguments/)
-    end
-    it "should not compile when 1 argument is passed" do
-      Puppet[:code] = "$my_hash={'one' => 1}\nmerge($my_hash)"
-      expect { compiler.compile }.should raise_error(Puppet::ParseError, /wrong number of arguments/)
-    end
-  end
-  describe 'when calling merge on the scope instance' do
-    it 'should require all parameters are hashes' do
-      expect { new_hash = scope.function_merge([{}, '2'])}.should raise_error(Puppet::ParseError, /unexpected argument type String/)
-
-    end
-    it 'should be able to merge two hashes' do
-      new_hash = scope.function_merge([{'one' => '1', 'two' => '1'}, {'two' => '2', 'three' => '2'}])
-      new_hash['one'].should   == '1'
-      new_hash['two'].should   == '2'
-      new_hash['three'].should == '2'
-    end
-    it 'should merge multiple hashes' do
-      hash = scope.function_merge([{'one' => 1}, {'one' => '2'}, {'one' => '3'}])
-      hash['one'].should == '3'
-    end
-    it 'should accept empty hashes' do
-      scope.function_merge([{},{},{}]).should == {}
-    end
-
-  end
-
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/validate_bool_spec.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/validate_bool_spec.rb
deleted file mode 100644
index e95c396..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/validate_bool_spec.rb
+++ /dev/null
@@ -1,76 +0,0 @@
-require 'puppet'
-
-# We don't need this for the basic tests we're doing
-# require 'spec_helper'
-
-# Dan mentioned that Nick recommended the function method call
-# to return the string value for the test description.
-# this will not even try the test if the function cannot be
-# loaded.
-describe Puppet::Parser::Functions.function(:validate_bool) do
-
-  # Pulled from Dan's create_resources function
-  def get_scope
-    @topscope = Puppet::Parser::Scope.new
-    # This is necessary so we don't try to use the compiler to discover our parent.
-    @topscope.parent = nil
-    @scope = Puppet::Parser::Scope.new
-    @scope.compiler = Puppet::Parser::Compiler.new(Puppet::Node.new("floppy", :environment => 'production'))
-    @scope.parent = @topscope
-    @compiler = @scope.compiler
-  end
-
-  describe 'when calling validate_bool from puppet' do
-
-    %w{ true false }.each do |the_string|
-
-      it "should not compile when #{the_string} is a string" do
-        Puppet[:code] = "validate_bool('#{the_string}')"
-        get_scope
-        expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /is not a boolean/)
-      end
-
-      it "should compile when #{the_string} is a bare word" do
-        Puppet[:code] = "validate_bool(#{the_string})"
-        get_scope
-        @scope.compiler.compile
-      end
-
-    end
-
-    it "should not compile when an arbitrary string is passed" do
-      Puppet[:code] = 'validate_bool("jeff and dan are awesome")'
-      get_scope
-      expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /is not a boolean/)
-    end
-
-    it "should not compile when no arguments are passed" do
-      Puppet[:code] = 'validate_bool()'
-      get_scope
-      expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /wrong number of arguments/)
-    end
-
-    it "should compile when multiple boolean arguments are passed" do
-      Puppet[:code] = <<-'ENDofPUPPETcode'
-        $foo = true
-        $bar = false
-        validate_bool($foo, $bar, true, false)
-      ENDofPUPPETcode
-      get_scope
-      @scope.compiler.compile
-    end
-
-    it "should compile when multiple boolean arguments are passed" do
-      Puppet[:code] = <<-'ENDofPUPPETcode'
-        $foo = true
-        $bar = false
-        validate_bool($foo, $bar, true, false, 'jeff')
-      ENDofPUPPETcode
-      get_scope
-      expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /is not a boolean/)
-    end
-
-  end
-
-end
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/validate_hash_spec.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/validate_hash_spec.rb
deleted file mode 100644
index 8cc0b3d..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/parser/functions/validate_hash_spec.rb
+++ /dev/null
@@ -1,63 +0,0 @@
-require 'puppet'
-
-# We don't need this for the basic tests we're doing
-# require 'spec_helper'
-
-# Dan mentioned that Nick recommended the function method call
-# to return the string value for the test description.
-# this will not even try the test if the function cannot be
-# loaded.
-describe Puppet::Parser::Functions.function(:validate_hash) do
-
-  # Pulled from Dan's create_resources function
-  def get_scope
-    @topscope = Puppet::Parser::Scope.new
-    # This is necessary so we don't try to use the compiler to discover our parent.
-    @topscope.parent = nil
-    @scope = Puppet::Parser::Scope.new
-    @scope.compiler = Puppet::Parser::Compiler.new(Puppet::Node.new("floppy", :environment => 'production'))
-    @scope.parent = @topscope
-    @compiler = @scope.compiler
-  end
-
-  describe 'when calling validate_hash from puppet' do
-
-    %w{ true false }.each do |the_string|
-
-      it "should not compile when #{the_string} is a string" do
-        Puppet[:code] = "validate_hash('#{the_string}')"
-        get_scope
-        expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /is not a Hash/)
-      end
-
-      it "should not compile when #{the_string} is a bare word" do
-        Puppet[:code] = "validate_hash(#{the_string})"
-        get_scope
-        expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /is not a Hash/)
-      end
-
-    end
-
-    it "should compile when multiple hash arguments are passed" do
-      Puppet[:code] = <<-'ENDofPUPPETcode'
-        $foo = {}
-        $bar = { 'one' => 'two' }
-        validate_hash($foo, $bar)
-      ENDofPUPPETcode
-      get_scope
-      @scope.compiler.compile
-    end
-
-    it "should not compile when an undef variable is passed" do
-      Puppet[:code] = <<-'ENDofPUPPETcode'
-        $foo = undef
-        validate_hash($foo)
-      ENDofPUPPETcode
-      get_scope
-      expect { @scope.compiler.compile }.should raise_error(Puppet::ParseError, /is not a Hash/)
-    end
-
-  end
-
-end
-
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/provider/append_line/ruby_spec.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/provider/append_line/ruby_spec.rb
deleted file mode 100644
index ea28c31..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/provider/append_line/ruby_spec.rb
+++ /dev/null
@@ -1,30 +0,0 @@
-require 'puppet'
-require 'tempfile'
-provider_class = Puppet::Type.type(:append_line).provider(:ruby)
-describe provider_class do
-  before :each do
-    tmp = Tempfile.new('tmp')
-    @tmpfile = tmp.path
-    tmp.close!
-    @resource = Puppet::Type::Append_line.new(
-      {:name => 'foo', :path => @tmpfile, :line => 'foo'}
-    )
-    @provider = provider_class.new(@resource)
-  end
-  it 'should detect if the line exists in the file' do
-    File.open(@tmpfile, 'w') do |fh|
-      fh.write('foo')
-    end
-    @provider.exists?.should be_true
-  end
-  it 'should detect if the line does not exist in the file' do
-    File.open(@tmpfile, 'w') do |fh|
-      fh.write('foo1')
-    end
-    @provider.exists?.should be_nil
-  end
-  it 'should append to an existing file when creating' do
-    @provider.create
-    File.read(@tmpfile).chomp.should == 'foo'
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/type/anchor_spec.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/type/anchor_spec.rb
deleted file mode 100644
index 2030b83..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/type/anchor_spec.rb
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env ruby
-
-require 'puppet'
-
-anchor = Puppet::Type.type(:anchor).new(:name => "ntp::begin")
-
-describe anchor do
-  it "should stringify normally" do
-    anchor.to_s.should == "Anchor[ntp::begin]"
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/type/append_line_spec.rb b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/type/append_line_spec.rb
deleted file mode 100644
index d0564c3..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/spec/unit/puppet/type/append_line_spec.rb
+++ /dev/null
@@ -1,24 +0,0 @@
-require 'puppet'
-require 'tempfile'
-describe Puppet::Type.type(:append_line) do
-  before :each do
-    @append_line = Puppet::Type.type(:append_line).new(:name => 'foo', :line => 'line', :path => '/tmp/path')
-  end
-  it 'should accept a line and path' do
-    @append_line[:line] = 'my_line'
-    @append_line[:line].should == 'my_line'
-  end
-  it 'should accept posix filenames' do
-    @append_line[:path] = '/tmp/path'
-    @append_line[:path].should == '/tmp/path'
-  end
-  it 'should not accept unqualified path' do
-    expect { @append_line[:path] = 'file' }.should raise_error(Puppet::Error, /File paths must be fully qualified/)
-  end
-  it 'should require that a line is specified' do
-    expect { Puppet::Type.type(:append_line).new(:name => 'foo', :path => '/tmp/file') }.should raise_error(Puppet::Error, /Both line and path are required attributes/)
-  end
-  it 'should require that a file is specified' do
-    expect { Puppet::Type.type(:append_line).new(:name => 'foo', :line => 'path') }.should raise_error(Puppet::Error, /Both line and path are required attributes/)
-  end
-end
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/tests/append_line.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/tests/append_line.pp
deleted file mode 100644
index f50a833..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/tests/append_line.pp
+++ /dev/null
@@ -1,7 +0,0 @@
-file { '/tmp/dansfile':
-  ensure => present
-}->
-append_line { 'dans_line':
-  line => 'dan is awesome',
-  #path => '/tmp/dansfile',
-}
diff --git a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/tests/init.pp b/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/tests/init.pp
deleted file mode 100644
index 9675d83..0000000
--- a/branch-1.2/ambari-agent/src/main/puppet/modules/stdlib/tests/init.pp
+++ /dev/null
@@ -1 +0,0 @@
-include stdlib
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/ActionQueue.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
deleted file mode 100644
index ceab668..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
+++ /dev/null
@@ -1,234 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import logging
-import traceback
-import logging.handlers
-import Queue
-import threading
-import AmbariConfig
-from LiveStatus import LiveStatus
-from shell import shellRunner
-from FileUtil import writeFile, createStructure, deleteStructure, getFilePath, appendToFile
-import json
-import pprint
-import os
-import time
-import subprocess
-import copy
-import puppetExecutor
-import tempfile
-from Grep import Grep
-
-logger = logging.getLogger()
-installScriptHash = -1
-
-class ActionQueue(threading.Thread):
-  """ Action Queue for the agent. We pick one command at a time from the queue
-  and execute that """
-  global commandQueue, resultQueue #, STATUS_COMMAND, EXECUTION_COMMAND
-  commandQueue = Queue.Queue()
-  resultQueue = Queue.Queue()
-
-  STATUS_COMMAND='STATUS_COMMAND'
-  EXECUTION_COMMAND='EXECUTION_COMMAND'
-  IDLE_SLEEP_TIME = 5
-
-  def __init__(self, config):
-    super(ActionQueue, self).__init__()
-    #threading.Thread.__init__(self)
-    self.config = config
-    self.sh = shellRunner()
-    self._stop = threading.Event()
-    self.maxRetries = config.getint('command', 'maxretries') 
-    self.sleepInterval = config.getint('command', 'sleepBetweenRetries')
-    self.executor = puppetExecutor.puppetExecutor(config.get('puppet', 'puppetmodules'),
-                                   config.get('puppet', 'puppet_home'),
-                                   config.get('puppet', 'facter_home'),
-                                   config.get('agent', 'prefix'), config)
-    self.tmpdir = config.get('agent', 'prefix')
-    self.commandInProgress = None
-
-  def stop(self):
-    self._stop.set()
-
-  def stopped(self):
-    return self._stop.isSet()
-
-  def getshellinstance(self):
-    """ For Testing purpose only.""" 
-    return self.sh
-
-  def put(self, command):
-    logger.info("The " + command['commandType'] + " from the server is \n" + pprint.pformat(command))
-    commandQueue.put(command)
-    pass
-
-  def getCommandQueue(self):
-    """ For Testing purpose only."""
-    return commandQueue
-
-  def run(self):
-    result = []
-    while not self.stopped():
-      while not commandQueue.empty():
-        command = commandQueue.get()
-        logger.info("Took an element of Queue: " + pprint.pformat(command))
-        if command['commandType'] == self.EXECUTION_COMMAND:
-          try:
-            #pass a copy of action since we don't want anything to change in the
-            #action dict
-            result = self.executeCommand(command)
-
-          except Exception, err:
-            traceback.print_exc()
-            logger.warn(err)
-            pass
-
-          for entry in result:
-            resultQueue.put((ActionQueue.EXECUTION_COMMAND, entry))
-          pass
-        elif command['commandType'] == self.STATUS_COMMAND:
-          cluster = command['clusterName']
-          service = command['serviceName']
-          component = command['componentName']
-          globalConfig = command['configurations']['global']
-          try:
-            livestatus = LiveStatus(cluster, service, component, globalConfig)
-            result = livestatus.build()
-            logger.info("Got live status for component " + component + " of service " + str(service) +\
-                        " of cluster " + str(cluster) + "\n" + pprint.pformat(result))
-            if result is not None:
-              resultQueue.put((ActionQueue.STATUS_COMMAND, result))
-          except Exception, err:
-            traceback.print_exc()
-            logger.warn(err)
-            pass
-        else:
-          logger.warn("Unrecognized command " + pprint.pformat(result))
-      if not self.stopped():
-        time.sleep(self.IDLE_SLEEP_TIME)
-
-  # Store action result to agent response queue
-  def result(self):
-    resultReports = []
-    resultComponentStatus = []
-    while not resultQueue.empty():
-      res = resultQueue.get()
-      if res[0] == ActionQueue.EXECUTION_COMMAND:
-        resultReports.append(res[1])
-      elif res[0] == ActionQueue.STATUS_COMMAND:
-        resultComponentStatus.append(res[1])
-
-    # Building report for command in progress
-    if self.commandInProgress is not None:
-      try:
-        tmpout= open(self.commandInProgress['tmpout'], 'r').read()
-        tmperr= open(self.commandInProgress['tmperr'], 'r').read()
-      except Exception, err:
-        logger.warn(err)
-        tmpout='...'
-        tmperr='...'
-      grep = Grep()
-      output = grep.tail(tmpout, puppetExecutor.puppetExecutor.OUTPUT_LAST_LINES)
-      inprogress = {
-        'role' : self.commandInProgress['role'],
-        'actionId' : self.commandInProgress['actionId'],
-        'taskId' : self.commandInProgress['taskId'],
-        'stdout' : grep.filterMarkup(output),
-        'clusterName' : self.commandInProgress['clusterName'],
-        'stderr' : tmperr,
-        'exitCode' : 777,
-        'serviceName' : self.commandInProgress['serviceName'],
-        'status' : 'IN_PROGRESS'
-      }
-      resultReports.append(inprogress)
-    result={
-      'reports' : resultReports,
-      'componentStatus' : resultComponentStatus
-    }
-    return result
-
-  def registerCommand(self, command):
-    return {}
-  
-  def statusCommand(self, command):
-    return {}
-  
-  def executeCommand(self, command):
-    logger.info("Executing command \n" + pprint.pformat(command))
-    clusterName = command['clusterName']
-    commandId = command['commandId']
-    hostname = command['hostname']
-    params = command['hostLevelParams']
-    clusterHostInfo = command['clusterHostInfo']
-    roleCommand = command['roleCommand']
-    serviceName = command['serviceName']
-    configurations = command['configurations']
-    result = []
-
-    taskId = command['taskId']
-    # Preparing 'IN_PROGRESS' report
-    self.commandInProgress = {
-      'role' : command['role'],
-      'actionId' : commandId,
-      'taskId' : taskId,
-      'clusterName' : clusterName,
-      'serviceName' : serviceName,
-      'tmpout': self.tmpdir + os.sep + 'output-' + str(taskId) + '.txt',
-      'tmperr': self.tmpdir + os.sep + 'errors-' + str(taskId) + '.txt'
-    }
-    # running command
-    commandresult = self.executor.runCommand(command, self.commandInProgress['tmpout'], self.commandInProgress['tmperr'])
-    # dumping results
-    self.commandInProgress = None
-    status = "COMPLETED"
-    if commandresult['exitcode'] != 0:
-      status = "FAILED"
-      
-    # assume some puppet pluing to run these commands
-    roleResult = {'role' : command['role'],
-                  'actionId' : commandId,
-                  'taskId' : command['taskId'],
-                  'stdout' : commandresult['stdout'],
-                  'clusterName' : clusterName,
-                  'stderr' : commandresult['stderr'],
-                  'exitCode' : commandresult['exitcode'],
-                  'serviceName' : serviceName,
-                  'status' : status}
-    if roleResult['stdout'] == '':
-      roleResult['stdout'] = 'None'
-    if roleResult['stderr'] == '':
-      roleResult['stderr'] = 'None'
-    result.append(roleResult)
-    pass
-    return result
-
-  def noOpCommand(self, command):
-    result = {'commandId' : command['Id']}
-    return result
-
-  def unknownAction(self, action):
-    logger.error('Unknown action: %s' % action['id'])
-    result = { 'id': action['id'] }
-    return result
-
-  def isIdle(self):
-    return commandQueue.empty()
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/ActionResults.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/ActionResults.py
deleted file mode 100644
index 7603fa1..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/ActionResults.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import logging
-import logging.handlers
-import Queue
-import ActionQueue
-
-logger = logging.getLogger()
-
-class ActionResults:
-  global r
-
-  # Build action results list from memory queue
-  def build(self):
-    results = []
-    while not ActionQueue.r.empty():
-      result = { 
-                 'clusterId': 'unknown',
-                 'id' : 'action-001',
-                 'kind' : 'STOP_ACTION',
-                 'commandResults' : [],
-                 'cleanUpCommandResults' : [],
-                 'serverName' : 'hadoop.datanode'
-               }
-      results.append(result)
-    logger.info(results)
-    return results
-
-def main(argv=None):
-  ar = ActionResults()
-  print ar.build()
-
-if __name__ == '__main__':
-  main()
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
deleted file mode 100644
index 91f3ba1..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
+++ /dev/null
@@ -1,208 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import logging
-import logging.handlers
-import ConfigParser
-import StringIO
-
-config = ConfigParser.RawConfigParser()
-content = """
-
-[server]
-hostname=localhost
-url_port=8440
-secured_url_port=8441
-
-[agent]
-prefix=/tmp/ambari-agent
-
-[services]
-
-[stack]
-installprefix=/tmp
-
-[puppet]
-puppetmodules=/var/lib/ambari-agent/puppet/
-puppet_home=/root/workspace/puppet-install/puppet-2.7.9
-facter_home=/root/workspace/puppet-install/facter-1.6.10
-
-[command]
-maxretries=2
-sleepBetweenRetries=1
-
-[security]
-keysdir=/tmp/ambari-agent
-server_crt=ca.crt
-passphrase_env_var_name=AMBARI_PASSPHRASE
-
-[heartbeat]
-state_interval = 6
-dirs=/etc/hadoop,/etc/hadoop/conf,/var/run/hadoop,/var/log/hadoop
-rpms=hadoop,openssl,wget,net-snmp,ntpd,ruby,ganglia,nagios
-"""
-s = StringIO.StringIO(content)
-config.readfp(s)
-
-imports = [
-  "hdp/manifests/*.pp",
-  "hdp-hadoop/manifests/*.pp",
-  "hdp-hbase/manifests/*.pp",
-  "hdp-zookeeper/manifests/*.pp",
-  "hdp-oozie/manifests/*.pp",
-  "hdp-pig/manifests/*.pp",
-  "hdp-sqoop/manifests/*.pp",
-  "hdp-templeton/manifests/*.pp",
-  "hdp-hive/manifests/*.pp",
-  "hdp-hcat/manifests/*.pp",
-  "hdp-mysql/manifests/*.pp",
-  "hdp-monitor-webserver/manifests/*.pp",
-  "hdp-repos/manifests/*.pp"
-]
-
-rolesToClass = {
-  'NAMENODE': 'hdp-hadoop::namenode',
-  'DATANODE': 'hdp-hadoop::datanode',
-  'SECONDARY_NAMENODE': 'hdp-hadoop::snamenode',
-  'JOBTRACKER': 'hdp-hadoop::jobtracker',
-  'TASKTRACKER': 'hdp-hadoop::tasktracker',
-  'HDFS_CLIENT': 'hdp-hadoop::client',
-  'MAPREDUCE_CLIENT': 'hdp-hadoop::client',
-  'ZOOKEEPER_SERVER': 'hdp-zookeeper',
-  'ZOOKEEPER_CLIENT': 'hdp-zookeeper::client',
-  'HBASE_MASTER': 'hdp-hbase::master',
-  'HBASE_REGIONSERVER': 'hdp-hbase::regionserver',
-  'HBASE_CLIENT': 'hdp-hbase::client',
-  'PIG': 'hdp-pig',
-  'SQOOP': 'hdp-sqoop',
-  'OOZIE_SERVER': 'hdp-oozie::server',
-  'OOZIE_CLIENT': 'hdp-oozie::client',
-  'HIVE_CLIENT': 'hdp-hive::client',
-  'HCAT': 'hdp-hcat',
-  'HIVE_SERVER': 'hdp-hive::server',
-  'HIVE_METASTORE': 'hdp-hive::metastore',
-  'MYSQL_SERVER': 'hdp-mysql::server',
-  'WEBHCAT_SERVER': 'hdp-templeton::server',
-  'DASHBOARD': 'hdp-dashboard',
-  'NAGIOS_SERVER': 'hdp-nagios::server',
-  'GANGLIA_SERVER': 'hdp-ganglia::server',
-  'GANGLIA_MONITOR': 'hdp-ganglia::monitor',
-  'HTTPD': 'hdp-monitor-webserver',
-  'HDFS_SERVICE_CHECK': 'hdp-hadoop::hdfs::service_check',
-  'MAPREDUCE_SERVICE_CHECK': 'hdp-hadoop::mapred::service_check',
-  'ZOOKEEPER_SERVICE_CHECK': 'hdp-zookeeper::zookeeper::service_check',
-  'ZOOKEEPER_QUORUM_SERVICE_CHECK': 'hdp-zookeeper::quorum::service_check',
-  'HBASE_SERVICE_CHECK': 'hdp-hbase::hbase::service_check',
-  'HIVE_SERVICE_CHECK': 'hdp-hive::hive::service_check',
-  'HCAT_SERVICE_CHECK': 'hdp-hcat::hcat::service_check',
-  'OOZIE_SERVICE_CHECK': 'hdp-oozie::oozie::service_check',
-  'PIG_SERVICE_CHECK': 'hdp-pig::pig::service_check',
-  'SQOOP_SERVICE_CHECK': 'hdp-sqoop::sqoop::service_check',
-  'WEBHCAT_SERVICE_CHECK': 'hdp-templeton::templeton::service_check',
-  'DASHBOARD_SERVICE_CHECK': 'hdp-dashboard::dashboard::service_check',
-  'DECOMMISSION_DATANODE': 'hdp-hadoop::hdfs::decommission'
-}
-
-serviceStates = {
-  'START': 'running',
-  'INSTALL': 'installed_and_configured',
-  'STOP': 'stopped'
-}
-
-servicesToPidNames = {
-  'NAMENODE': 'hadoop-[A-Za-z0-9_]+-namenode.pid$',
-  'SECONDARY_NAMENODE': 'hadoop-[A-Za-z0-9_]+-secondarynamenode.pid$',
-  'DATANODE': 'hadoop-[A-Za-z0-9_]+-datanode.pid$',
-  'JOBTRACKER': 'hadoop-[A-Za-z0-9_]+-jobtracker.pid$',
-  'TASKTRACKER': 'hadoop-[A-Za-z0-9_]+-tasktracker.pid$',
-  'OOZIE_SERVER': 'oozie.pid',
-  'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
-  'TEMPLETON_SERVER': 'templeton.pid',
-  'NAGIOS_SERVER': 'nagios.pid',
-  'GANGLIA_SERVER': 'gmetad.pid',
-  'GANGLIA_MONITOR': 'gmond.pid',
-  'HBASE_MASTER': 'hbase-[A-Za-z0-9_]+-master.pid',
-  'HBASE_REGIONSERVER': 'hbase-[A-Za-z0-9_]+-regionserver.pid',
-  'HCATALOG_SERVER': 'webhcat.pid',
-  'KERBEROS_SERVER': 'kadmind.pid',
-  'HIVE_SERVER': 'hive-server.pid',
-  'HIVE_METASTORE': 'hive.pid',
-  'MYSQL_SERVER': 'mysqld.pid'
-}
-
-pidPathesVars = [
-  {'var' : 'hadoop_pid_dir_prefix',
-   'defaultValue' : '/var/run/hadoop'},
-  {'var' : 'hadoop_pid_dir_prefix',
-   'defaultValue' : '/var/run/hadoop'},                 
-  {'var' : 'ganglia_runtime_dir',
-   'defaultValue' : '/var/run/ganglia/hdp'},                 
-  {'var' : 'hbase_pid_dir',
-   'defaultValue' : '/var/run/hbase'},                
-  {'var' : '',
-   'defaultValue' : '/var/run/nagios'},                    
-  {'var' : 'zk_pid_dir',
-   'defaultValue' : '/var/run/zookeeper'},             
-  {'var' : 'oozie_pid_dir',
-   'defaultValue' : '/var/run/oozie'},             
-  {'var' : 'hcat_pid_dir',
-   'defaultValue' : '/var/run/webhcat'},                       
-  {'var' : 'hive_pid_dir',
-   'defaultValue' : '/var/run/hive'},                      
-   {'var' : 'mysqld_pid_dir',
-   'defaultValue' : '/var/run/mysqld'}
-]
-
-class AmbariConfig:
-  def getConfig(self):
-    global config
-    return config
-
-  def getImports(self):
-    global imports
-    return imports
-
-  def getRolesToClass(self):
-    global rolesToClass
-    return rolesToClass
-
-  def getServiceStates(self):
-    global serviceStates
-    return serviceStates
-
-  def getServicesToPidNames(self):
-    global servicesToPidNames
-    return servicesToPidNames
-
-  def getPidPathesVars(self):
-    global pidPathesVars
-    return pidPathesVars
-
-
-def setConfig(customConfig):
-  global config
-  config = customConfig
-
-
-def main():
-  print config
-
-if __name__ == "__main__":
-  main()
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/Controller.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/Controller.py
deleted file mode 100644
index 82746d5..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/Controller.py
+++ /dev/null
@@ -1,261 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import logging
-import logging.handlers
-import signal
-import json
-import hostname
-import sys, traceback
-import time
-import threading
-import urllib2
-from urllib2 import Request, urlopen, URLError
-import httplib
-import ssl
-import AmbariConfig
-import pprint
-import ProcessHelper
-from Heartbeat import Heartbeat
-from Register import Register
-from ActionQueue import ActionQueue
-from optparse import OptionParser
-from wsgiref.simple_server import ServerHandler
-import security
-from NetUtil import NetUtil
-from random import randrange, randint
-
-logger = logging.getLogger()
-
-class Controller(threading.Thread):
-
-  def __init__(self, config, range=120):
-    threading.Thread.__init__(self)
-    logger.debug('Initializing Controller RPC thread.')
-    self.lock = threading.Lock()
-    self.safeMode = True
-    self.credential = None
-    self.config = config
-    self.hostname = hostname.hostname()
-    server_secured_url = 'https://' + config.get('server', 'hostname') + ':' + config.get('server', 'secured_url_port')
-    self.registerUrl = server_secured_url + '/agent/v1/register/' + self.hostname
-    self.heartbeatUrl = server_secured_url + '/agent/v1/heartbeat/' + self.hostname
-    self.netutil = NetUtil()
-    self.responseId = -1
-    self.repeatRegistration = False
-    self.cachedconnect = None
-    self.range = range
-
-  def start(self):
-    self.actionQueue = ActionQueue(self.config)
-    self.actionQueue.start()
-    self.register = Register()
-    self.heartbeat = Heartbeat(self.actionQueue)
-    pass
-  
-  def __del__(self):
-    logger.info("Server connection disconnected.")
-    pass
-  
-  def registerWithServer(self):
-    retry=False
-    firstTime=True
-    registered=False
-    id = -1
-    ret = {}
-
-    while not registered:
-      try:
-        data = json.dumps(self.register.build(id))
-        logger.info("Registering with the server " + pprint.pformat(data))
-        response = self.sendRequest(self.registerUrl, data)
-        ret = json.loads(response)
-
-        logger.info("Registered with the server with " + pprint.pformat(ret))
-        print("Registered with the server")
-        self.responseId= int(ret['responseId'])
-        registered = True
-        if 'statusCommands' in ret.keys():
-          logger.info("Got status commands on registration " + pprint.pformat(ret['statusCommands']) )
-          self.addToQueue(ret['statusCommands'])
-          pass
-        pass
-      except Exception, err:
-        # try a reconnect only after a certain amount of random time
-        delay = randint(0, self.range)
-        logger.info("Unable to connect to: " + self.registerUrl, exc_info = True)
-        """ Sleeping for {0} seconds and then retrying again """.format(delay)
-        time.sleep(delay)
-        pass
-      pass  
-    return ret
-  
-  
-  def addToQueue(self, commands):
-    """Add to the queue for running the commands """
-    """ Put the required actions into the Queue """
-    """ Verify if the action is to reboot or not """
-    if not commands:
-      logger.info("No commands from the server : " + pprint.pformat(commands))
-    else:
-      """Only add to the queue if not empty list """
-      for command in commands:
-        logger.info("Adding command to the action queue: \n" +
-                     pprint.pformat(command))
-        self.actionQueue.put(command)
-        pass
-      pass
-    pass
-
-  # For testing purposes
-  DEBUG_HEARTBEAT_RETRIES = 0
-  DEBUG_SUCCESSFULL_HEARTBEATS = 0
-  DEBUG_STOP_HEARTBITTING = False
-
-  def heartbeatWithServer(self):
-    self.DEBUG_HEARTBEAT_RETRIES = 0
-    self.DEBUG_SUCCESSFULL_HEARTBEATS = 0
-    retry = False
-    certVerifFailed = False
-
-    config = AmbariConfig.config
-    hb_interval = config.get('heartbeat', 'state_interval')
-
-    #TODO make sure the response id is monotonically increasing
-    id = 0
-    while not self.DEBUG_STOP_HEARTBITTING:
-      try:
-        if not retry:
-          data = json.dumps(self.heartbeat.build(self.responseId, int(hb_interval)))
-          pass
-        else:
-          self.DEBUG_HEARTBEAT_RETRIES += 1
-        response = self.sendRequest(self.heartbeatUrl, data)
-        response = json.loads(response)
-
-        logger.info('Got server response: ' + pprint.pformat(response))
-        
-        serverId=int(response['responseId'])
-
-        if 'registrationCommand' in response.keys():
-          # check if the registration command is None. If none skip
-          if response['registrationCommand'] is not None:
-            logger.info("RegistrationCommand received - repeat agent registration")
-            self.repeatRegistration = True
-            return
-
-        if serverId!=self.responseId+1:
-          logger.error("Error in responseId sequence - restarting")
-          self.restartAgent()
-        else:
-          self.responseId=serverId
-
-        if 'executionCommands' in response.keys():
-          self.addToQueue(response['executionCommands'])
-          pass
-        if 'statusCommands' in response.keys():
-          self.addToQueue(response['statusCommands'])
-          pass
-        if "true" == response['restartAgent']:
-          logger.error("Got restartAgent command")
-          self.restartAgent()
-        else:
-          logger.info("No commands sent from the Server.")
-          pass
-
-        if retry:
-          print("Reconnected to the server")
-          logger.info("Reconnected to the server")
-        retry=False
-        certVerifFailed = False
-        self.DEBUG_SUCCESSFULL_HEARTBEATS += 1
-        self.DEBUG_HEARTBEAT_RETRIES = 0
-      except Exception, err:
-        #randomize the heartbeat
-        delay = randint(0, self.range)
-        time.sleep(delay)
-        if "code" in err:
-          logger.error(err.code)
-        else:
-          logger.error("Unable to connect to: " + self.heartbeatUrl + " due to " + str(err))
-          logger.debug("Details: " + str(err), exc_info=True)
-          if not retry:
-            print("Connection to the server was lost. Reconnecting...")
-          if 'certificate verify failed' in str(err) and not certVerifFailed:
-            print("Server certificate verify failed. Did you regenerate server certificate?")
-            certVerifFailed = True
-        self.cachedconnect = None # Previous connection is broken now
-        retry=True
-      if self.actionQueue.isIdle():
-        time.sleep(self.netutil.HEARTBEAT_IDDLE_INTERVAL_SEC)
-      else:
-        time.sleep(self.netutil.HEARTBEAT_NOT_IDDLE_INTERVAL_SEC)
-    pass
-
-  def run(self):
-    opener = urllib2.build_opener()
-    urllib2.install_opener(opener)
-
-    while True:
-      self.repeatRegistration = False
-      self.registerAndHeartbeat()
-      if not self.repeatRegistration:
-        break
-
-    pass
-
-  def registerAndHeartbeat(self):
-    registerResponse = self.registerWithServer()
-    message = registerResponse['response']
-    logger.info("Response from server = " + message)
-    time.sleep(self.netutil.HEARTBEAT_IDDLE_INTERVAL_SEC)
-    self.heartbeatWithServer()
-
-  def restartAgent(self):
-    ProcessHelper.restartAgent()
-    pass
-
-  def sendRequest(self, url, data):
-    if self.cachedconnect is None: # Lazy initialization
-      self.cachedconnect = security.CachedHTTPSConnection(self.config)
-    req = urllib2.Request(url, data, {'Content-Type': 'application/json'})
-    response = self.cachedconnect.request(req)
-    return response
-
-def main(argv=None):
-  # Allow Ctrl-C
-  signal.signal(signal.SIGINT, signal.SIG_DFL)
-
-  logger.setLevel(logging.INFO)
-  formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - \
-    %(message)s")
-  stream_handler = logging.StreamHandler()
-  stream_handler.setFormatter(formatter)
-  logger.addHandler(stream_handler)
-
-  logger.info('Starting Server RPC Thread: %s' % ' '.join(sys.argv))
-
-  config = AmbariConfig.config
-  collector = Controller(config)
-  collector.start()
-  collector.run()
-
-if __name__ == '__main__':
-  main()
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/DaemonHandler.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/DaemonHandler.py
deleted file mode 100644
index b726621..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/DaemonHandler.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import simplejson
-import web
-from mimerender import mimerender
-from Runner import Runner
-
-render_json = lambda **args: simplejson.dumps(args)
-
-class DaemonHandler:
-    @mimerender(
-        default = 'json',
-        json = render_json
-    )
-    
-    def GET(self, cmd, daemonName):
-        data = []
-        data['cmd']=cmd
-        data['daemonName']=daemonName
-        dispatcher = Runner()
-        return dispatcher.run(data)
-    
-    def POST(self, cmd):
-        web.header('Content-Type','application/json')
-        data = web.data();
-        jsonp = simplejson.loads(data)
-        jsonp['cmd']=cmd
-        dispatcher = Runner()
-        return dispatcher.run(jsonp)
-
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/FileUtil.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/FileUtil.py
deleted file mode 100644
index f24046b..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/FileUtil.py
+++ /dev/null
@@ -1,185 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from pwd import getpwnam
-from grp import getgrnam
-import logging
-import logging.handlers
-import getpass
-import os, errno
-import sys, traceback
-import ConfigParser
-import shutil
-import StringIO
-import AmbariConfig
-
-logger = logging.getLogger()
-
-def getFilePath(action, fileName=""):
-  #Change the method signature to take the individual action fields
-  pathComp=""
-  if 'clusterId' in action:
-    pathComp = action['clusterId']
-  if 'role' in action:
-    pathComp = pathComp + "-" + action['role'] 
-  path = os.path.join(AmbariConfig.config.get('agent','prefix'),
-                      "clusters", 
-                      pathComp)
-  fullPathName=""
-  if fileName != "":
-    fullPathName=os.path.join(path, fileName)
-  else:
-    fileInfo = action['file']
-    fullPathName=os.path.join(path, fileInfo['path'])
-  return fullPathName
-  
-def appendToFile(data,absolutePath):
-  f = open(absolutePath, 'a')
-  f.write(data)
-  f.close()
-
-def writeFile(action, result, fileName=""):
-  fileInfo = action['file']
-  pathComp=""
-  if 'clusterId' in action:
-    pathComp = action['clusterId']
-  if 'role' in action:
-    pathComp = pathComp + "-" + action['role'] 
-  try:
-    path = os.path.join(AmbariConfig.config.get('agent','prefix'),
-                        "clusters", 
-                        pathComp)
-    user=getpass.getuser()
-    if 'owner' in fileInfo:
-      user=fileInfo['owner']
-    group=os.getgid()
-    if 'group' in fileInfo:
-      group=fileInfo['group']
-    fullPathName=""
-    if fileName != "":
-      fullPathName=os.path.join(path, fileName)
-    else:
-      fullPathName=os.path.join(path, fileInfo['path'])
-    logger.debug("path in writeFile: %s" % fullPathName)
-    content=fileInfo['data']
-    try:
-      if isinstance(user, int)!=True:
-        user=getpwnam(user)[2]
-      if isinstance(group, int)!=True:
-        group=getgrnam(group)[2]
-    except Exception:
-      logger.warn("can not find user uid/gid: (%s/%s) for writing %s" % (user, group, fullPathName))
-    if 'permission' in fileInfo:
-      if fileInfo['permission'] is not None:
-        permission=fileInfo['permission']
-    else:
-      permission=0750
-    oldMask = os.umask(0)
-    if 'umask' in fileInfo:
-      if fileInfo['umask'] is not None: 
-        umask=int(fileInfo['umask'])
-    else:
-      umask=oldMask 
-    os.umask(int(umask))
-    prefix = os.path.dirname(fullPathName)
-    try:
-      os.makedirs(prefix)
-    except OSError as err:
-      if err.errno == errno.EEXIST:
-        pass
-      else:
-        raise
-    f = open(fullPathName, 'w')
-    f.write(content)
-    f.close()
-    if os.getuid()==0:
-      os.chmod(fullPathName, permission)
-      os.chown(fullPathName, user, group)
-    os.umask(oldMask)
-    result['exitCode'] = 0
-  except Exception, err:
-    traceback.print_exc()
-    result['exitCode'] = 1
-    result['error'] = traceback.format_exc()
-  return result
-
-def createStructure(action, result):
-  try:
-    workdir = action['workDirComponent']
-    path = AmbariConfig.config.get('agent','prefix')+"/clusters/"+workdir
-    shutil.rmtree(path, 1)
-    os.makedirs(path+"/stack")
-    os.makedirs(path+"/logs")
-    os.makedirs(path+"/data")
-    os.makedirs(path+"/pkgs")
-    os.makedirs(path+"/config")
-    result['exitCode'] = 0
-  except Exception, err:
-    traceback.print_exc()
-    result['exitCode'] = 1
-    result['error'] = traceback.format_exc()
-  return result
-
-def deleteStructure(action, result):
-  try:
-    workdir = action['workDirComponent']
-    path = AmbariConfig.config.get('agent','prefix')+"/clusters/"+workdir
-    if os.path.exists(path):
-      shutil.rmtree(path)
-    result['exitCode'] = 0
-  except Exception, err:
-    result['exitCode'] = 1
-    result['error'] = traceback.format_exc()
-  return result
-
-def main():
-
-  action = { 'clusterId' : 'abc', 'role' : 'hdfs' }
-  result = {}
-  print createStructure(action, result)
-
-  configFile = {
-    "data"       : "test", 
-    "owner"      : os.getuid(), 
-    "group"      : os.getgid() , 
-    "permission" : 0700, 
-    "path"       : "/tmp/ambari_file_test/_file_write_test", 
-    "umask"      : 022 
-  }
-  action = { 'file' : configFile }
-  result = { }
-  print writeFile(action, result)
-
-  configFile = { 
-    "data"       : "test", 
-    "owner"      : "eyang", 
-    "group"      : "staff", 
-    "permission" : "0700", 
-    "path"       : "/tmp/ambari_file_test/_file_write_test", 
-    "umask"      : "022" 
-  }
-  result = { }
-  action = { 'file' : configFile }
-  print writeFile(action, result)
-
-  print deleteStructure(action, result)
-
-if __name__ == "__main__":
-  main()
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/Grep.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/Grep.py
deleted file mode 100644
index 32040ac..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/Grep.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import re
-
-class Grep:
-
-  def __init__(self):
-    pass
-
-  def grep(self, string, phrase, before, after):
-    """
-    Tries to find the last occurence of phrase in a given string. String should consist of lines,
-    separated by line separators. String trim is performed before search.
-    If occurence is found, grep () takes not more than 'before' lines above and 'after' lines below.
-    If phrase is not found, returns None. Search is not case sensitive, regexps are not supported.
-    """
-    stripped_string = string.strip()
-    lines = stripped_string.splitlines(True)
-    first_occurence = None
-    for index in range(len(lines)):
-      line = lines[index]
-      if phrase.lower() in line.lower():
-        first_occurence = index
-        break
-    if first_occurence is None:
-      return None
-    bound_a = before
-    if first_occurence < before:
-      bound_a = first_occurence
-    result = None
-    if (len(lines) - first_occurence) < after:
-      result = lines[first_occurence - bound_a :]
-    else:
-      result = lines[first_occurence - bound_a : first_occurence + after + 1]
-    return "".join(result).strip()
-
-
-  def tail(self, string, n):
-    """
-    Copies last n lines from string to result. Also, string trim is performed.
-    """
-    stripped_string = string.strip()
-    lines = stripped_string.splitlines(True)
-    if len(lines) <= n:
-      return stripped_string
-    else:
-      length = len(lines)
-      tailed = lines[length - n:]
-      return "".join(tailed)
-
-  def filterMarkup(self, string):
-    """
-    Filters given string from puppet colour markup done using escape codes like 
-    """
-    if string is None:
-      result = None
-    else:
-      regexp = "\x1b" + r"\[[\d;]{1,4}m"
-      result = re.sub(regexp, '', string)
-    return result
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/Hardware.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/Hardware.py
deleted file mode 100644
index 3db1484..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/Hardware.py
+++ /dev/null
@@ -1,181 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import multiprocessing
-import platform
-import AmbariConfig
-import os.path
-import shell
-import logging
-import subprocess
-import pprint
-import traceback
-
-logger = logging.getLogger()
-
-class Hardware:
-  def __init__(self):
-    self.hardware = {}
-    osdisks = self.osdisks()
-    self.hardware['mounts'] = osdisks
-    otherInfo = self.facterInfo()
-    self.hardware.update(otherInfo)
-    pass
-  
-  def osdisks(self):
-    """ Run df to find out the disks on the host. Only works on linux 
-    platforms. Note that this parser ignores any filesystems with spaces 
-    and any mounts with spaces. """
-    mounts = []
-    df = subprocess.Popen(["df", "-kPT"], stdout=subprocess.PIPE)
-    dfdata = df.communicate()[0]
-    lines = dfdata.splitlines()
-    for l in lines:
-      split = l.split()
-      """ this ignores any spaces in the filesystemname and mounts """
-      if (len(split)) == 7:
-        device, type, size, used, available, percent, mountpoint = split
-        mountinfo = { 
-                     'size' : size,
-                     'used' : used,
-                     'available' : available,
-                     'percent' : percent,
-                     'mountpoint' : mountpoint,
-                     'type': type,
-                     'device' : device }
-        if os.access(mountpoint, os.W_OK):
-          mounts.append(mountinfo)
-        pass
-      pass
-    return mounts
-    
-  def facterBin(self, facterHome):
-    facterBin = facterHome + "/bin/facter"
-    if (os.path.exists(facterBin)):
-      return facterBin
-    else:
-      return "facter"
-    pass
-  
-  def facterLib(self, facterHome):
-    return facterHome + "/lib/"
-    pass
-  
-  def configureEnviron(self, environ):
-    if not AmbariConfig.config.has_option("puppet", "ruby_home"):
-      return environ
-    ruby_home = AmbariConfig.config.get("puppet", "ruby_home")
-    if os.path.exists(ruby_home):
-      """Only update ruby home if the config is configured"""
-      path = os.environ["PATH"]
-      if not ruby_home in path:
-        environ["PATH"] = ruby_home + os.path.sep + "bin"  + ":"+environ["PATH"] 
-      environ["MY_RUBY_HOME"] = ruby_home
-    return environ
-    
-  def parseFacterOutput(self, facterOutput):
-    retDict = {}
-    allLines = facterOutput.splitlines()
-    for line in allLines:
-      keyValue = line.split("=>")
-      if (len(keyValue) == 2):
-        """Ignoring values that are just spaces or do not confirm to the 
-        format"""
-        strippedKey = keyValue[0].strip()
-        logger.info("Stripped key is " + strippedKey)
-        if strippedKey in ["memoryfree", "memorysize", "memorytotal"]:
-          value = keyValue[1].strip()
-          """Convert to KB"""
-          parts = value.split()
-          if len(parts) == 2:
-            mem_size = parts[1].upper()
-            if mem_size in ["GB", "G"]:
-              mem_in_kb = long(float(parts[0]) * 1024 * 1024)
-            elif mem_size in ["MB", "M"]:
-              mem_in_kb = long(float(parts[0]) * 1024)
-            elif mem_size in ["KB", "K"]:
-              mem_in_kb = long(float(parts[0]))
-            else:
-              mem_in_kb = long(float(parts[0]) / 1024)
-          else:
-            mem_in_kb = long(float(parts[0]) / 1024)
-          retDict[strippedKey] = mem_in_kb
-          pass
-        else:
-          retDict[strippedKey] = keyValue[1].strip()
-          pass
-        pass
-      pass
-    """ Convert the needed types to the true values """
-    if 'physicalprocessorcount' in retDict.keys():
-      retDict['physicalprocessorcount'] = int(retDict['physicalprocessorcount'])
-      pass
-    if 'is_virtual' in retDict.keys():
-      retDict['is_virtual'] = ("true" == retDict['is_virtual'])
-      pass
-    
-    logger.info("Facter info : \n" + pprint.pformat(retDict))
-    return retDict  
-  
-  def facterInfo(self):   
-    facterHome = AmbariConfig.config.get("puppet", "facter_home")
-    facterEnv = os.environ
-    logger.info("Using facter home as: " + facterHome)
-    facterInfo = {}
-    try:
-      if os.path.exists(facterHome):
-        rubyLib = ""
-        if os.environ.has_key("RUBYLIB"):
-          rubyLib = os.environ["RUBYLIB"]
-          logger.info("RUBYLIB from Env " + rubyLib)
-        if not (self.facterLib(facterHome) in rubyLib):
-          rubyLib = rubyLib + ":" + self.facterLib(facterHome)
-        
-        facterEnv["RUBYLIB"] = rubyLib
-        facterEnv = self.configureEnviron(facterEnv)
-        logger.info("Setting RUBYLIB as: " + rubyLib)
-        facter = subprocess.Popen([self.facterBin(facterHome)],
-                                  stdout=subprocess.PIPE,
-                                  stderr=subprocess.PIPE,
-                                  env=facterEnv)
-        stderr_out = facter.communicate()
-        if facter.returncode != 0:
-          logging.error("Error getting facter info: " + stderr_out[1])
-          pass
-        facterOutput = stderr_out[0]
-        infoDict = self.parseFacterOutput(facterOutput)
-        facterInfo = infoDict
-        pass
-      else:
-        logger.error("Facter home at " + facterHome + " does not exist")
-    except:
-      logger.info("Traceback " + traceback.format_exc())
-      pass
-    return facterInfo
-  
-  def get(self):
-    return self.hardware
-
-def main(argv=None):
-  hardware = Hardware()
-  print hardware.get()
-
-if __name__ == '__main__':
-  main()
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/Heartbeat.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/Heartbeat.py
deleted file mode 100644
index c2612d9..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/Heartbeat.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import json
-import logging
-from Hardware import Hardware
-from ActionQueue import ActionQueue
-from ServerStatus import ServerStatus
-import NetUtil
-import AmbariConfig
-import hostname
-import time
-import traceback
-from pprint import pprint, pformat
-from HostInfo import HostInfo
-
-logger = logging.getLogger()
-
-firstContact = True
-class Heartbeat:
-
-  def __init__(self, actionQueue):
-    self.actionQueue = actionQueue
-    self.reports = []
-
-  def build(self, id='-1', state_interval=-1):
-    global clusterId, clusterDefinitionRevision, firstContact
-    timestamp = int(time.time()*1000)
-    queueResult = self.actionQueue.result()
-
-    
-    nodeStatus = { "status" : "HEALTHY",
-                   "cause" : "NONE"}
-    
-    heartbeat = { 'responseId'        : int(id),
-                  'timestamp'         : timestamp,
-                  'hostname'          : hostname.hostname(),
-                  'nodeStatus'        : nodeStatus
-                }
-    if (int(id) >= 0) and state_interval > 0 and (int(id) % state_interval) == 0:
-      hostInfo = HostInfo()
-      nodeInfo = { }
-      # for now, just do the same work as registration
-      hostInfo.register(nodeInfo)
-      heartbeat['agentEnv'] = nodeInfo
-
-    if len(queueResult) != 0:
-      heartbeat['reports'] = queueResult['reports']
-      heartbeat['componentStatus'] = queueResult['componentStatus']
-      pass
-    logger.info("Heartbeat dump: " + pformat(heartbeat))
-    return heartbeat
-
-def main(argv=None):
-  actionQueue = ActionQueue(AmbariConfig.config)
-  heartbeat = Heartbeat(actionQueue)
-  print json.dumps(heartbeat.build('3',3))
-
-if __name__ == '__main__':
-  main()
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/HostInfo.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/HostInfo.py
deleted file mode 100644
index 55f33dc..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/HostInfo.py
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-import glob
-import pwd
-import subprocess
-import AmbariConfig
-
-class HostInfo:
-
-  def dirType(self, path):
-    if not os.path.exists(path):
-      return 'not_exist'
-    elif os.path.islink(path):
-      return 'sym_link'
-    elif os.path.isdir(path):
-      return 'directory'
-    elif os.path.isfile(path):
-      return 'file'
-    return 'unknown'
-
-  def rpmInfo(self, rpmList):
-    config = AmbariConfig.config
-
-    try:
-      for rpmName in config.get('heartbeat', 'rpms').split(','):
-        rpmName = rpmName.strip()
-        rpm = { }
-        rpm['name'] = rpmName
-
-        try:
-          osStat = subprocess.Popen(["rpm", "-q", rpmName], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-          out, err = osStat.communicate()
-          if (0 != osStat.returncode or 0 == len(out.strip())):
-            rpm['installed'] = False
-          else:
-            rpm['installed'] = True
-            rpm['version'] = out.strip()
-        except:
-          rpm['available'] = False
-
-        rpmList.append(rpm)
-    except:
-      pass
-
-  def hadoopVarRunCount(self):
-    if not os.path.exists('/var/run/hadoop'):
-      return 0
-    pids = glob.glob('/var/run/hadoop/*/*.pid')
-    return len(pids)
-
-  def hadoopVarLogCount(self):
-    if not os.path.exists('/var/log/hadoop'):
-      return 0
-    logs = glob.glob('/var/log/hadoop/*/*.log')
-    return len(logs)
-  
-  def etcAlternativesConf(self, etcList):
-    if not os.path.exists('/etc/alternatives'):
-      return []
-    confs = glob.glob('/etc/alternatives/*conf')
-
-    for conf in confs:
-      confinfo = { }
-      realconf = conf
-      if os.path.islink(conf):
-        realconf = os.path.realpath(conf)
-      confinfo['name'] = conf
-      confinfo['target'] = realconf
-      etcList.append(confinfo)
-
-  def repos(self):
-    # centos, redhat
-    try:
-      osStat = subprocess.Popen(["yum", "-C", "repolist"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-      out, err = osStat.communicate()
-      return out
-    except:
-      pass
-    # suse, only if above failed
-    try:
-      osStat = subprocess.Popen(["zypper", "repos"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-      out, err = osStat.communicate()
-      return out
-    except:
-      pass
-
-    # default, never return empty
-    return "could_not_determine"
-    
-
-  def register(self, dict):
-    dict['varLogHadoopLogCount'] = self.hadoopVarLogCount()
-    dict['varRunHadoopPidCount'] = self.hadoopVarRunCount()
-    
-    etcs = []
-    self.etcAlternativesConf(etcs)
-    dict['etcAlternativesConf'] = etcs
-
-    dirs = []
-    config = AmbariConfig.config
-    try:
-      for dirName in config.get('heartbeat', 'dirs').split(','):
-        obj = { }
-        obj['type'] = self.dirType(dirName.strip())
-        obj['name'] = dirName.strip()
-        dirs.append(obj)
-    except:
-      pass
-
-    dict['paths'] = dirs
-
-    java = []
-    self.javaProcs(java)
-    dict['javaProcs'] = java
-
-    rpms = []
-    self.rpmInfo(rpms)
-    dict['rpms'] = rpms
-
-    dict['repoInfo'] = self.repos()
-    
-  def javaProcs(self, list):
-    try:
-      pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
-      for pid in pids:
-        cmd = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
-        cmd = cmd.replace('\0', ' ')
-        if 'java' in cmd:
-          dict = { }
-          dict['pid'] = int(pid)
-          dict['hadoop'] = True if 'hadoop' in cmd else False
-          dict['command'] = cmd.strip()
-          for line in open(os.path.join('/proc', pid, 'status')):
-            if line.startswith('Uid:'):
-              uid = int(line.split()[1])
-              dict['user'] = pwd.getpwuid(uid).pw_name
-          list.append(dict)
-    except:
-      pass
-    pass
-
-def main(argv=None):
-  h = HostInfo()
-  struct = { }
-  h.register(struct)
-  print struct
-
-if __name__ == '__main__':
-  main()
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/LiveStatus.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/LiveStatus.py
deleted file mode 100644
index 2dcdc64..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/LiveStatus.py
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import json
-import logging
-from StatusCheck import StatusCheck
-import AmbariConfig
-import socket
-import time
-import traceback
-from pprint import pprint, pformat
-
-logger = logging.getLogger()
-
-class LiveStatus:
-
-  SERVICES = [
-    "HDFS", "MAPREDUCE", "GANGLIA", "HBASE",
-    "NAGIOS", "ZOOKEEPER", "OOZIE", "HCATALOG",
-    "KERBEROS", "TEMPLETON", "HIVE"
-  ]
-
-  COMPONENTS = [
-      {"serviceName" : "HDFS",
-       "componentName" : "DATANODE"},
-      {"serviceName" : "HDFS",
-       "componentName" : "NAMENODE"},
-      {"serviceName" : "HDFS",
-       "componentName" : "SECONDARY_NAMENODE"},
-#      {"serviceName" : "HDFS",
-#       "componentName" : "HDFS_CLIENT"},
-      {"serviceName" : "MAPREDUCE",
-       "componentName" : "JOBTRACKER"},
-      {"serviceName" : "MAPREDUCE",
-       "componentName" : "TASKTRACKER"},
-#      {"serviceName" : "MAPREDUCE",
-#       "componentName" : "MAPREDUCE_CLIENT"},
-      {"serviceName" : "GANGLIA",             #!
-       "componentName" : "GANGLIA_SERVER"},
-      {"serviceName" : "GANGLIA",             #!
-       "componentName" : "GANGLIA_MONITOR"},
-      {"serviceName" : "HBASE",               #!
-       "componentName" : "HBASE_MASTER"},
-      {"serviceName" : "HBASE",              #!
-       "componentName" : "HBASE_REGIONSERVER"},
-#      {"serviceName" : "HBASE",
-#       "componentName" : "HBASE_CLIENT"},
-      {"serviceName" : "NAGIOS",             #!
-       "componentName" : "NAGIOS_SERVER"},
-      {"serviceName" : "ZOOKEEPER",
-       "componentName" : "ZOOKEEPER_SERVER"},
-#      {"serviceName" : "ZOOKEEPER",
-#       "componentName" : "ZOOKEEPER_CLIENT"},
-      {"serviceName" : "OOZIE",
-       "componentName" : "OOZIE_SERVER"},
-#      {"serviceName" : "OOZIE",
-#       "componentName" : "OOZIE_CLIENT"},
-      {"serviceName" : "HCATALOG",            #!
-       "componentName" : "HCATALOG_SERVER"},
-      {"serviceName" : "KERBEROS",
-       "componentName" : "KERBEROS_SERVER"}, #!
-#      {"serviceName" : "TEMPLETON",
-#       "componentName" : "TEMPLETON_SERVER"},
-#      {"serviceName" : "TEMPLETON",
-#       "componentName" : "TEMPLETON_CLIENT"},
-      {"serviceName" : "HIVE",               #!
-       "componentName" : "HIVE_SERVER"},
-      {"serviceName" : "HIVE",               #!
-       "componentName" : "HIVE_METASTORE"},
-      {"serviceName" : "HIVE",               #!
-       "componentName" : "MYSQL_SERVER"},
-  ]
-
-  LIVE_STATUS = "STARTED"
-  DEAD_STATUS = "INSTALLED"
-
-  def __init__(self, cluster, service, component, globalConfig):
-    self.cluster = cluster
-    self.service = service
-    self.component = component
-    self.globalConfig = globalConfig
-
-
-  def belongsToService(self, component):
-    #TODO: Should also check belonging of server to cluster
-    return component['serviceName'] == self.service
-
-  # Live status was stripped from heartbeat after revision e1718dd
-  def build(self):
-    global SERVICES, COMPONENTS, LIVE_STATUS, DEAD_STATUS
-    statusCheck = StatusCheck(AmbariConfig.servicesToPidNames, AmbariConfig.pidPathesVars, self.globalConfig)
-    livestatus = None
-    for component in self.COMPONENTS:
-      if component["serviceName"] == self.service and component["componentName"] == self.component:
-        serviceStatus = statusCheck.getStatus(component["componentName"])
-        if serviceStatus is None:
-          logger.warn("There is no service to pid mapping for " + component["componentName"])
-        status = self.LIVE_STATUS if serviceStatus else self.DEAD_STATUS
-        livestatus ={"componentName" : component["componentName"],
-                       "msg" : "",
-                       "status" : status,
-                       "clusterName" : self.cluster,
-                       "serviceName" : self.service
-        }
-        break
-    logger.info("The live status for component " + str(self.component) + " of service " + \
-                str(self.service) + " is " + str(livestatus))
-    return livestatus
-
-def main(argv=None):
-  for service in SERVICES:
-    livestatus = LiveStatus('', service)
-    print json.dumps(livestatus.build())
-
-if __name__ == '__main__':
-  main()
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/NetUtil.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/NetUtil.py
deleted file mode 100644
index f538399..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/NetUtil.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from httplib import HTTPS
-from urlparse import urlparse
-import time
-import logging
-import pprint
-import traceback
-import httplib
-
-logger = logging.getLogger()
-
-class NetUtil:
-
-  CONNECT_SERVER_RETRY_INTERVAL_SEC = 10
-  HEARTBEAT_IDDLE_INTERVAL_SEC = 10
-  HEARTBEAT_NOT_IDDLE_INTERVAL_SEC = 5
-  HEARTBEAT_STATE_INTERVAL = 6 # default one per minute
-
-  # Url within server to request during status check. This url
-  # should return HTTP code 200
-  SERVER_STATUS_REQUEST = "{0}/cert/ca"
-
-  # For testing purposes
-  DEBUG_STOP_RETRIES_FLAG = False
-
-  def checkURL(self, url):
-    """Try to connect to a given url. Result is True if url returns HTTP code 200, in any other case
-    (like unreachable server or wrong HTTP code) result will be False
-    """
-    logger.info("DEBUG:: Connecting to the following url " + url);
-    try:
-      parsedurl = urlparse(url)
-      ca_connection = httplib.HTTPSConnection(parsedurl[1])
-      ca_connection.request("GET", parsedurl[2])
-      response = ca_connection.getresponse()  
-      status = response.status    
-      logger.info("DEBUG: Calling url received " + str(status))
-      
-      if status == 200: 
-        return True
-      else: 
-        return False
-    except Exception, e:
-      logger.info("Failed to connect to " + str(url) + " due to " + str(e))
-      return False
-
-  def try_to_connect(self, server_url, max_retries, logger = None):
-    """Try to connect to a given url, sleeping for CONNECT_SERVER_RETRY_INTERVAL_SEC seconds
-    between retries. No more than max_retries is performed. If max_retries is -1, connection
-    attempts will be repeated forever until server is not reachable
-    Returns count of retries
-    """
-    if logger is not None:
-      logger.info("DEBUG: Trying to connect to the server at " + server_url)
-    retries = 0
-    while (max_retries == -1 or retries < max_retries) and not self.DEBUG_STOP_RETRIES_FLAG:
-      server_is_up = self.checkURL(self.SERVER_STATUS_REQUEST.format(server_url))
-      if server_is_up:
-        break
-      else:
-        if logger is not None:
-          logger.info('Server at {0} is not reachable, sleeping for {1} seconds...'.format(server_url,
-            self.CONNECT_SERVER_RETRY_INTERVAL_SEC))
-        retries += 1
-        time.sleep(self.CONNECT_SERVER_RETRY_INTERVAL_SEC)
-    return retries
-
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/PackageHandler.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/PackageHandler.py
deleted file mode 100644
index be05575..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/PackageHandler.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import simplejson
-import web
-from mimerender import mimerender
-from Runner import Runner
-
-render_json = lambda **args: simplejson.dumps(args)
-
-class PackageHandler:
-    @mimerender(
-        default = 'json',
-        json = render_json
-    )
-    
-    def GET(self, cmd, packageName):
-        data = []
-        data['cmd'] = cmd
-        data['package'] = { "name" : packageName }
-        dispatcher = Runner()
-        return dispatcher.run(data)
-    
-    def POST(self, cmd):        
-        web.header('Content-Type','application/json')
-        data = web.data()
-        jsonp = simplejson.loads(data)
-        jsonp['cmd']=cmd
-        dispatcher = Runner()
-        return dispatcher.run(jsonp)
-
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/ProcUtil.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/ProcUtil.py
deleted file mode 100644
index d02b331..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/ProcUtil.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import sys
-import os
-
-def get_proc_status(pid):
-  pid = int(pid)
-  path = ("/proc/%d/status" % pid)
-  if not os.path.exists(path):
-    return None
-  status_file = open(path)
-  lines = status_file.readlines()
-  for line in lines:
-    if line.startswith("State:"):
-      return line.split(":",1)[1].strip().split(' ')[0].split(" ",1)[0]
-  return None
-    
-if __name__ == '__main__':
-  state = get_proc_status(sys.argv[1])
-  print state
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/ProcessHelper.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/ProcessHelper.py
deleted file mode 100644
index bfce47b..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/ProcessHelper.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-import logging
-import traceback
-import sys
-from shell import getTempFiles
-
-logger = logging.getLogger()
-
-
-if 'AMBARI_PID_DIR' in os.environ:
-    pidfile = os.environ['AMBARI_PID_DIR'] + "/ambari-agent.pid"
-else:
-    pidfile = "/var/run/ambari-agent/ambari-agent.pid"
-
-
-def _clean():
-
-  logger.info("Removing pid file")
-  try:
-    os.unlink(pidfile)
-  except Exception as ex:
-    traceback.print_exc()
-    logger.warn("Unable to remove pid file: %s", ex)
-
-  logger.info("Removing temp files")
-  for f in getTempFiles():
-    if os.path.exists(f):
-      try:
-        os.unlink(f)
-      except Exception as ex:
-        traceback.print_exc()
-        logger.warn("Unable to remove: %s, %s", f, ex)
-
-
-def stopAgent():
-
-  _clean()
-  os._exit(0)
-  pass
-
-
-def restartAgent():
-
-  _clean()
-
-  executable = sys.executable
-  args = sys.argv[:]
-  args.insert(0, executable)
-
-  logger.info("Restarting self: %s %s", executable, args)
-
-  os.execvp(executable, args)
-
-
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/Register.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/Register.py
deleted file mode 100644
index 340569b..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/Register.py
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import sys
-import json
-from Hardware import Hardware
-from ActionQueue import ActionQueue
-from ServerStatus import ServerStatus
-import hostname
-import time
-import urllib2
-import subprocess
-from HostInfo import HostInfo
-
-
-firstContact = True
-class Register:
-  """ Registering with the server. Get the hardware profile and 
-  declare success for now """
-  def __init__(self):
-    self.hardware = Hardware()
-
-  def build(self, id='-1'):
-    global clusterId, clusterDefinitionRevision, firstContact
-    timestamp = int(time.time()*1000)
-   
-    hostInfo = HostInfo() 
-    agentEnv = { }
-    hostInfo.register(agentEnv)
-    
-    register = { 'responseId'        : int(id),
-                  'timestamp'         : timestamp,
-                  'hostname'          : hostname.hostname(),
-                  'publicHostname'    : hostname.public_hostname(),
-                  'hardwareProfile'   : self.hardware.get(),
-                  'agentEnv'          : agentEnv
-                }
-    return register
-
-def doExec(vals, key, command, preLF=False):
-  template = "{0}: {1} {2}"
-  try:
-    osStat = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    out, err = osStat.communicate()
-    if 0 != osStat.returncode or 0 == len(out.strip()):
-      print template.format(key, "UNAVAILABLE", "")
-    else:
-      if (preLF):
-        print template.format(key, "ok,\n", out.strip())
-      else:
-        print template.format(key, "ok,", out.strip())
-  except:
-    print template.format(key, "UNAVAILABLE", "")
-  
-
-# Linux only
-def machineInfo():
-  vals = { }
-  doExec(vals, 'hostname', ["hostname", "-f"])
-  doExec(vals, 'ip', ["hostname", "-i"])
-  doExec(vals, 'cpu', ["sh", "-c", "cat /proc/cpuinfo | grep 'model name' | awk -F': ' '{ print $2; }'"])
-  doExec(vals, 'memory', ["sh", "-c", "cat /proc/meminfo | grep MemTotal | awk -F': ' '{ print $2/1024/1024 \" GB\"; }'"])
-  doExec(vals, 'disks', ["df", "-h"], True)
-  doExec(vals, 'os', ["sh", "-c", "cat /etc/issue.net | head -1"])
-  doExec(vals, 'iptables', ["iptables", "-vnL"], True)
-  doExec(vals, 'selinux', ["sh", "-c", "cat /etc/selinux/config | grep ^SELINUX"])
-
-  rpm_req = { }
-  for REQ in (["yum", "rpm", "openssl", "curl", "wget", "net-snmp", "net-snmp-utils", "ntpd"]):
-   doExec(rpm_req, REQ, ["rpm", "-qa", REQ])
-  vals["required_packages"] = rpm_req
-
-  rpm_opt = { }
-  for OPT in (["ruby", "puppet", "nagios", "ganglia", "passenger", "hadoop"]):
-   doExec(rpm_opt, OPT, ["rpm", "-qa", OPT])
-  vals["optional_packages"] = rpm_opt
-
-  doExec(vals, "yum_repos", ["sh", "-c", "yum -C repolist enabled | egrep \"(AMBARI|HDP)\""], True)
-  # for SUSE-based agents
-  doExec(vals, "zypper_repos", ["sh", "-c", "zypper repos | egrep \"(AMBARI|HDP)\""], True)
-  
-  
-def main(argv=None):
-  if len(argv) == 1:
-    register = Register()
-    print json.dumps(register.build())
-  else:
-    machineInfo()
-
-if __name__ == '__main__':
-  main(sys.argv)
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/RepoInstaller.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/RepoInstaller.py
deleted file mode 100644
index 08051d6..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/RepoInstaller.py
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import logging
-import os
-import json
-from shell import shellRunner
-from manifestGenerator import writeImports
-from pprint import pprint, pformat
-import ast
-import AmbariConfig
-import urlparse, urllib
-import re
-
-PUPPET_EXT=".pp"
-
-logger = logging.getLogger()
-
-class RepoInstaller:
-  def __init__(self, parsedJson, path, modulesdir, taskId, config):
-    self.parsedJson = parsedJson
-    self.path = path
-    self.modulesdir = modulesdir
-    self.taskId = taskId
-    self.sh = shellRunner()
-    self.config = config
-    
-  def prepareReposInfo(self):
-    params = {}
-    self.repoInfoList = []
-    if self.parsedJson.has_key('hostLevelParams'):
-      params = self.parsedJson['hostLevelParams']
-    if params.has_key('repo_info'):
-      self.repoInfoList = params['repo_info']
-    logger.info("Repo List Info " + pformat(self.repoInfoList))
-    if (isinstance(self.repoInfoList, basestring)):
-      if (self.repoInfoList is not None and (len(self.repoInfoList) > 0)):
-        self.repoInfoList = ast.literal_eval(self.repoInfoList)
-      else:
-        self.repoInfoList = []
-
-  def generateFiles(self):
-    repoPuppetFiles = []
-    for repo in self.repoInfoList:
-      repoFile = open(self.path + os.sep + repo['repoId'] + '-' + 
-                      str(self.taskId) + PUPPET_EXT, 'w+')
-
-      writeImports(repoFile, self.modulesdir, AmbariConfig.imports)
-      
-      baseUrl = ''
-      mirrorList = ''
-      
-      if repo.has_key('baseUrl'):
-        baseUrl = repo['baseUrl']
-        baseUrl = baseUrl.decode('unicode-escape').encode('utf-8')
-        # Hack to take care of $ signs in the repo url
-        baseUrl = baseUrl.replace('$', '\$')
-
-      if repo.has_key('mirrorsList'):
-        mirrorList = repo['mirrorsList']
-        mirrorList = mirrorList.decode('unicode-escape').encode('utf-8')
-        # Hack to take care of $ signs in the repo url
-        mirrorList = mirrorList.replace('$', '\$')
-
-      repoFile.write('node /default/ {')
-      repoFile.write('class{ "hdp-repos::process_repo" : ' + ' os_type => "' + repo['osType'] +
-      '", repo_id => "' + repo['repoId'] + '", base_url => "' + baseUrl +
-      '", mirror_list => "' + mirrorList +'", repo_name => "' + repo['repoName'] + '" }' )
-      repoFile.write('}')
-      repoFile.close()
-      repoPuppetFiles.append(repoFile.name)
-
-    return repoPuppetFiles
-
-  def installRepos(self):
-    self.prepareReposInfo()
-    repoPuppetFiles = self.generateFiles()
-    return repoPuppetFiles
-
-def main():
-  #Test code
-  logging.basicConfig(level=logging.DEBUG)    
-  #test code
-  jsonFile = open('test.json', 'r')
-  jsonStr = jsonFile.read() 
-  parsedJson = json.loads(jsonStr)
-  repoInstaller = RepoInstaller(parsedJson, '/tmp', '/home/centos/ambari_ws/ambari-agent/src/main/puppet/modules',0)
-  repoInstaller.installRepos()
-  
-if __name__ == '__main__':
-  main()
-
-
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/Runner.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/Runner.py
deleted file mode 100644
index 4b86381..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/Runner.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import threading
-from daemon import daemonRunner
-from package import packageRunner
-from shell import shellRunner
-
-class Runner(threading.Thread):
-    """ Runs the actions coming from the server """
-    __instance = None
-    lock = None
-    def __init__(self):
-        if Runner.__instance is None:
-          Runner.lock = threading.RLock()
-          Runner.__instance = self
-    
-    def run(self, data):
-        Runner.lock.acquire()
-        try:
-            if data['actionType']=='info':
-                ph = packageRunner()
-                result = ph.info(data['packages'])
-            elif data['actionType']=='install':
-                ph = packageRunner()
-                if 'dry-run' in data:
-                    opt = data['dry-run']
-                else:
-                    opt = 'false'
-                result = ph.install(data['packages'], opt)
-            elif data['actionType']=='remove':
-                ph = packageRunner()
-                if 'dry-run' in data:
-                    opt = data['dry-run']
-                else:
-                    opt = 'false'
-                result = ph.remove(data['packages'], opt)
-            elif data['actionType']=='status':
-                dh = daemonRunner()
-                result = dh.status(data['daemonName'])
-            elif data['actionType']=='start':
-                dh = daemonRunner()
-                result = dh.start(data['daemonName'])
-            elif data['actionType']=='stop':
-                dh = daemonRunner()
-                result = dh.stop(data['daemonName'])
-            return result
-        finally:
-            Runner.lock.release()
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/ServerStatus.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/ServerStatus.py
deleted file mode 100644
index 53a0a9a..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/ServerStatus.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from shell import shellRunner
-import logging
-import logging.handlers
-
-logger = logging.getLogger()
-global serverTracker
-
-class ServerStatus:
-  def build(self):
-    sh = shellRunner()
-    list = []
-    servers = sh.getServerTracker()
-    for server in servers:
-      (clusterId, clusterDefinitionRevision, component, role) = server.split("/")
-      result = {
-                 'clusterId'                 : clusterId,
-                 'clusterDefinitionRevision' : clusterDefinitionRevision,
-                 'componentName'             : component,
-                 'roleName'                  : role,
-                 'serverStatus'              : 'STARTED'
-               }
-      list.append(result)
-    return list
-
-def main(argv=None):
-  serverStatus = ServerStatus()
-  print serverStatus.build()
-
-if __name__ == '__main__':
-  main()
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/StatusCheck.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/StatusCheck.py
deleted file mode 100644
index daf0c87..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/StatusCheck.py
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from shell import shellRunner
-import logging
-import logging.handlers
-import sys
-import os
-import re
-
-logger = logging.getLogger()
-
-
-class StatusCheck:
-    
-    
-  def listFiles(self, dir):
-    basedir = dir
-    logger.debug("Files in " + os.path.abspath(dir) + ": ")
-    subdirlist = []
-    try:
-      if os.path.isdir(dir):
-        for item in os.listdir(dir):
-            if os.path.isfile(item) and item.endswith('.pid'):
-              self.pidFilesDict[item.split(os.sep).pop()] = os.getcwd() + os.sep + item
-            else:
-              subdirlist.append(os.path.join(basedir, item))
-        for subdir in subdirlist:
-            self.listFiles(subdir)
-      else:
-        if dir.endswith('.pid'):
-          self.pidFilesDict[dir.split(os.sep).pop()] = dir
-    except OSError as e:
-      logger.info(e.strerror + ' to ' + e.filename)
-      
-  def fillDirValues(self):
-    try:
-      for pidVar in self.pidPathesVars:
-        pidVarName = pidVar['var']
-        pidDefaultvalue = pidVar['defaultValue']
-        if self.globalConfig.has_key(pidVarName):
-          self.pidPathes.append(self.globalConfig[pidVarName])
-        else:
-          self.pidPathes.append(pidDefaultvalue)
-    except Exception as e:
-        logger.error("Error while filling directories values " + str(e))
-        
-  def __init__(self, serviceToPidDict, pidPathesVars, globalConfig):
-
-    self.serToPidDict = serviceToPidDict
-    self.pidPathesVars = pidPathesVars
-    self.pidPathes = []
-    self.sh = shellRunner()
-    self.pidFilesDict = {}
-    self.globalConfig = globalConfig
-    
-    self.fillDirValues()
-    
-    for pidPath in self.pidPathes:
-      self.listFiles(pidPath)
-
-  def getIsLive(self, pidPath):
-
-    if not pidPath:
-      return False
-
-    isLive = False
-    pid = -1
-    try:
-      pidFile = open(pidPath, 'r')
-      pid = int(pidFile.readline())
-    except IOError, e:
-      logger.warn("Can not open file " + str(pidPath) + " due to " + str(e))
-      return isLive
-    res = self.sh.run(['ps -p', str(pid), '-f'])
-    lines = res['output'].strip().split(os.linesep)
-    try:
-      procInfo = lines[1]
-      isLive = not procInfo == None
-    except IndexError:
-      logger.info('Process is dead')
-    return isLive
-
-  def getStatus(self, serviceCode):
-    try:
-      pidPath = None
-      pidPattern = self.serToPidDict[serviceCode]
-      logger.info('pidPattern: ' + pidPattern)
-    except KeyError as e:
-      logger.warn('There is no mapping for ' + serviceCode)
-      return None
-    try:
-      for pidFile in self.pidFilesDict.keys():
-        if re.match(pidPattern, pidFile):
-          pidPath = self.pidFilesDict[pidFile]          
-      logger.info('pidPath: ' + str(pidPath))
-      result = self.getIsLive(pidPath)
-      return result
-    except KeyError:
-      logger.info('Pid file was not found')
-      return False
-
-  def getSerToPidDict(self):
-    return self.serToPidDict
-
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/__init__.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/__init__.py
deleted file mode 100644
index 3bfb534..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from __future__ import generators
-
-__version__ = "0.1.0"
-__author__ = [
-    "Eric Yang <eyang@apache.org>",
-    "Kan Zhang <kanzhangmail@yahoo.com>"
-]
-__license__ = "Apache License v2.0"
-__contributors__ = "see http://incubator.apache.org/ambari/contributors"
-
-import logging
-import logging.handlers
-import threading
-import sys
-import time
-import signal
-
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/createDaemon.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/createDaemon.py
deleted file mode 100644
index 764211c..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/createDaemon.py
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-"""Disk And Execution MONitor (Daemon)
-
-Configurable daemon behaviors:
-
-   1.) The current working directory set to the "/" directory.
-   2.) The current file creation mode mask set to 0.
-   3.) Close all open files (1024). 
-   4.) Redirect standard I/O streams to "/dev/null".
-
-A failed call to fork() now raises an exception.
-
-References:
-   1) Advanced Programming in the Unix Environment: W. Richard Stevens
-   2) Unix Programming Frequently Asked Questions:
-         http://www.erlenstar.demon.co.uk/unix/faq_toc.html
-"""
-
-__author__ = "Chad J. Schroeder"
-__copyright__ = "Copyright (C) 2005 Chad J. Schroeder"
-
-__revision__ = "$Id$"
-__version__ = "0.2"
-
-# Standard Python modules.
-import os               # Miscellaneous OS interfaces.
-import sys              # System-specific parameters and functions.
-
-# Default daemon parameters.
-# File mode creation mask of the daemon.
-UMASK = 0022
-
-# Default working directory for the daemon.
-WORKDIR = "/"
-
-# Default maximum for the number of available file descriptors.
-MAXFD = 1024
-
-# The standard I/O file descriptors are redirected to /dev/null by default.
-if (hasattr(os, "devnull")):
-   REDIRECT_TO = os.devnull
-else:
-   REDIRECT_TO = "/dev/null"
-
-def createDaemon():
-   """Detach a process from the controlling terminal and run it in the
-   background as a daemon.
-   """
-
-   try:
-      # Fork a child process so the parent can exit.  This returns control to
-      # the command-line or shell.  It also guarantees that the child will not
-      # be a process group leader, since the child receives a new process ID
-      # and inherits the parent's process group ID.  This step is required
-      # to insure that the next call to os.setsid is successful.
-      pid = os.fork()
-   except OSError, e:
-      raise Exception, "%s [%d]" % (e.strerror, e.errno)
-
-   if (pid == 0):       # The first child.
-      # To become the session leader of this new session and the process group
-      # leader of the new process group, we call os.setsid().  The process is
-      # also guaranteed not to have a controlling terminal.
-      os.setsid()
-
-      # Is ignoring SIGHUP necessary?
-      #
-      # It's often suggested that the SIGHUP signal should be ignored before
-      # the second fork to avoid premature termination of the process.  The
-      # reason is that when the first child terminates, all processes, e.g.
-      # the second child, in the orphaned group will be sent a SIGHUP.
-      #
-      # "However, as part of the session management system, there are exactly
-      # two cases where SIGHUP is sent on the death of a process:
-      #
-      #   1) When the process that dies is the session leader of a session that
-      #      is attached to a terminal device, SIGHUP is sent to all processes
-      #      in the foreground process group of that terminal device.
-      #   2) When the death of a process causes a process group to become
-      #      orphaned, and one or more processes in the orphaned group are
-      #      stopped, then SIGHUP and SIGCONT are sent to all members of the
-      #      orphaned group." [2]
-      #
-      # The first case can be ignored since the child is guaranteed not to have
-      # a controlling terminal.  The second case isn't so easy to dismiss.
-      # The process group is orphaned when the first child terminates and
-      # POSIX.1 requires that every STOPPED process in an orphaned process
-      # group be sent a SIGHUP signal followed by a SIGCONT signal.  Since the
-      # second child is not STOPPED though, we can safely forego ignoring the
-      # SIGHUP signal.  In any case, there are no ill-effects if it is ignored.
-      #
-      # import signal           # Set handlers for asynchronous events.
-      # signal.signal(signal.SIGHUP, signal.SIG_IGN)
-
-      try:
-         # Fork a second child and exit immediately to prevent zombies.  This
-         # causes the second child process to be orphaned, making the init
-         # process responsible for its cleanup.  And, since the first child is
-         # a session leader without a controlling terminal, it's possible for
-         # it to acquire one by opening a terminal in the future (System V-
-         # based systems).  This second fork guarantees that the child is no
-         # longer a session leader, preventing the daemon from ever acquiring
-         # a controlling terminal.
-         pid = os.fork()        # Fork a second child.
-      except OSError, e:
-         raise Exception, "%s [%d]" % (e.strerror, e.errno)
-
-      if (pid == 0):    # The second child.
-         # Since the current working directory may be a mounted filesystem, we
-         # avoid the issue of not being able to unmount the filesystem at
-         # shutdown time by changing it to the root directory.
-         os.chdir(WORKDIR)
-         # We probably don't want the file mode creation mask inherited from
-         # the parent, so we give the child complete control over permissions.
-         os.umask(UMASK)
-      else:
-         # exit() or _exit()?  See below.
-         os._exit(0)    # Exit parent (the first child) of the second child.
-   else:
-      # exit() or _exit()?
-      # _exit is like exit(), but it doesn't call any functions registered
-      # with atexit (and on_exit) or any registered signal handlers.  It also
-      # closes any open file descriptors.  Using exit() may cause all stdio
-      # streams to be flushed twice and any temporary files may be unexpectedly
-      # removed.  It's therefore recommended that child branches of a fork()
-      # and the parent branch(es) of a daemon use _exit().
-      os._exit(0)       # Exit parent of the first child.
-
-   # Close all open file descriptors.  This prevents the child from keeping
-   # open any file descriptors inherited from the parent.  There is a variety
-   # of methods to accomplish this task.  Three are listed below.
-   #
-   # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
-   # number of open file descriptors to close.  If it doesn't exists, use
-   # the default value (configurable).
-   #
-   # try:
-   #    maxfd = os.sysconf("SC_OPEN_MAX")
-   # except (AttributeError, ValueError):
-   #    maxfd = MAXFD
-   #
-   # OR
-   #
-   # if (os.sysconf_names.has_key("SC_OPEN_MAX")):
-   #    maxfd = os.sysconf("SC_OPEN_MAX")
-   # else:
-   #    maxfd = MAXFD
-   #
-   # OR
-   #
-   # Use the getrlimit method to retrieve the maximum file descriptor number
-   # that can be opened by this process.  If there is not limit on the
-   # resource, use the default value.
-   #
-   import resource              # Resource usage information.
-   maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
-   if (maxfd == resource.RLIM_INFINITY):
-      maxfd = MAXFD
-  
-   # Iterate through and close all file descriptors.
-   for fd in range(0, maxfd):
-      try:
-         os.close(fd)
-      except OSError:   # ERROR, fd wasn't open to begin with (ignored)
-         pass
-
-   # Redirect the standard I/O file descriptors to the specified file.  Since
-   # the daemon has no controlling terminal, most daemons redirect stdin,
-   # stdout, and stderr to /dev/null.  This is done to prevent side-effects
-   # from reads and writes to the standard I/O file descriptors.
-
-   # This call to open is guaranteed to return the lowest file descriptor,
-   # which will be 0 (stdin), since it was closed above.
-   os.open(REDIRECT_TO, os.O_RDWR)      # standard input (0)
-
-   # Duplicate standard input to standard output and standard error.
-   os.dup2(0, 1)                        # standard output (1)
-   os.dup2(0, 2)                        # standard error (2)
-
-   return(0)
-
-if __name__ == "__main__":
-
-   retCode = createDaemon()
-
-   sys.exit(retCode)
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/daemon.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/daemon.py
deleted file mode 100644
index 31607f5..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/daemon.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from shell import shellRunner
-
-class daemonRunner:
-    def start(self, name):
-        sh = shellRunner()
-        script = [ '/etc/init.d/'+name, 'start' ]
-        return sh.run(script)
-
-    def stop(self, name):
-        sh = shellRunner()
-        script = [ '/etc/init.d/'+name, 'stop' ]
-        return sh.run(script)
-
-    def status(self, name):
-        sh = shellRunner()
-        script = [ '/etc/init.d/'+name, 'stop' ]
-        return sh.run(script)
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/hostname.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/hostname.py
deleted file mode 100644
index 3389aab..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/hostname.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import socket
-import subprocess
-import AmbariConfig
-import urllib2
-
-def hostname():
-  config = AmbariConfig.config
-  try:
-    scriptname = config.get('agent', 'hostname_script')
-    try: 
-      osStat = subprocess.Popen([scriptname], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-      out, err = osStat.communicate()
-      if (0 == osStat.returncode and 0 != len(out.strip())):
-        return out.strip()
-      else:
-        return socket.getfqdn()
-    except:
-      return socket.getfqdn()
-  except:
-    return socket.getfqdn()
-
-def public_hostname():
-  # future - do an agent entry for this too
-  try:
-    handle = urllib2.urlopen('http://169.254.169.254/latest/meta-data/public-hostname', '', 2)
-    str = handle.read()
-    handle.close()
-    return str
-  except Exception, e:
-    return socket.getfqdn()
-
-def main(argv=None):
-  print hostname()
-  print public_hostname()
-
-if __name__ == '__main__':
-  main()
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/machine.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/machine.py
deleted file mode 100644
index f474b3c..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/machine.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import sys
-import subprocess
-
-# please keep compatible with Python 2.4 or greater
-def doExec(key, command, preLF=False):
-  try:
-    osStat = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    out, err = osStat.communicate()
-    if 0 != osStat.returncode or 0 == len(out.strip()):
-      print "%s: UNAVAILABLE" % (key)
-    else:
-      if (preLF):
-        print "%s: ok\n %s" % (key, out.strip())
-      else:
-        print "%s: ok %s" % (key, out.strip())
-  except:
-    print "%s: UNAVAILABLE" % (key)
-  
-def main(argv=None):
-  doExec('hostname', ["hostname", "-f"])
-  doExec('ip', ["hostname", "-i"])
-  doExec('cpu', ["sh", "-c", "cat /proc/cpuinfo | grep 'model name' | awk -F': ' '{ print $2; }'"])
-  doExec('memory', ["sh", "-c", "cat /proc/meminfo | grep MemTotal | awk -F': ' '{ print $2/1024/1024 \" GB\"; }'"])
-  doExec('disks', ["df", "-h"], True)
-  doExec('os', ["sh", "-c", "cat /etc/issue.net | head -1"])
-  doExec('iptables', ["iptables", "-vnL"], True)
-  doExec('selinux', ["sh", "-c", "cat /etc/selinux/config | grep ^SELINUX"])
-
-  for REQ in (["yum", "rpm", "openssl", "curl", "wget", "net-snmp", "net-snmp-utils", "ntpd"]):
-   doExec(REQ, ["rpm", "-qa", REQ])
-
-  for OPT in (["ruby", "puppet", "nagios", "ganglia", "passenger", "hadoop"]):
-   doExec(OPT, ["rpm", "-qa", OPT])
-
-  doExec("yum_repos", ["sh", "-c", "yum -C repolist enabled | egrep \"(AMBARI|HDP)\""], True)
-  # for SUSE-based agents
-  doExec("zypper_repos", ["sh", "-c", "zypper repos | egrep \"(AMBARI|HDP)\""], True)
-
-if __name__ == '__main__':
-  main(sys.argv)
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/main.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/main.py
deleted file mode 100644
index 527be9d..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/main.py
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import logging
-import logging.handlers
-import code
-import signal
-from optparse import OptionParser
-import sys, traceback
-import os
-import time
-import ConfigParser
-import ProcessHelper
-from createDaemon import createDaemon
-from Controller import Controller
-from shell import killstaleprocesses
-import AmbariConfig
-from security import CertificateManager
-from NetUtil import NetUtil
-
-logger = logging.getLogger()
-agentPid = os.getpid()
-
-if 'AMBARI_LOG_DIR' in os.environ:
-  logfile = os.environ['AMBARI_LOG_DIR'] + "/ambari-agent.log"
-else:
-  logfile = "/var/log/ambari-agent/ambari-agent.log"
-
-def signal_handler(signum, frame):
-  #we want the handler to run only for the agent process and not
-  #for the children (e.g. namenode, etc.)
-  if (os.getpid() != agentPid):
-    os._exit(0)
-  logger.info('signal received, exiting.')
-  ProcessHelper.stopAgent()
-
-def debug(sig, frame):
-    """Interrupt running process, and provide a python prompt for
-    interactive debugging."""
-    d={'_frame':frame}         # Allow access to frame object.
-    d.update(frame.f_globals)  # Unless shadowed by global
-    d.update(frame.f_locals)
-
-    message  = "Signal received : entering python shell.\nTraceback:\n"
-    message += ''.join(traceback.format_stack(frame))
-    logger.info(message)
-
-
-
-
-def main():
-  global config
-  parser = OptionParser()
-  parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="verbose log output", default=False)
-  (options, args) = parser.parse_args()
-
-  formatstr = "%(levelname)s %(asctime)s %(filename)s:%(lineno)d - %(message)s"
-  formatter = logging.Formatter(formatstr)
-  rotateLog = logging.handlers.RotatingFileHandler(logfile, "a", 10000000, 25)
-  rotateLog.setFormatter(formatter)
-  logger.addHandler(rotateLog)
-
-  if options.verbose:
-    logging.basicConfig(format=formatstr, level=logging.DEBUG, filename=logfile)
-    logger.setLevel(logging.DEBUG)
-  else:
-    logging.basicConfig(format=formatstr, level=logging.INFO, filename=logfile)
-    logger.setLevel(logging.INFO)
-
-  logger.debug("loglevel=logging.DEBUG")
-
-  default_cfg = { 'agent' : { 'prefix' : '/home/ambari' } }
-  config = ConfigParser.RawConfigParser(default_cfg)
-  signal.signal(signal.SIGINT, signal_handler)
-  signal.signal(signal.SIGTERM, signal_handler)
-  signal.signal(signal.SIGUSR1, debug)
-  if (len(sys.argv) >1) and sys.argv[1]=='stop':
-    # stop existing Ambari agent
-    pid = -1
-    try:
-      f = open(ProcessHelper.pidfile, 'r')
-      pid = f.read()
-      pid = int(pid)
-      f.close()
-      os.kill(pid, signal.SIGTERM)
-      time.sleep(5)
-      if os.path.exists(ProcessHelper.pidfile):
-        raise Exception("PID file still exists.")
-      os._exit(0)
-    except Exception, err:
-      if pid == -1:
-        print ("Agent process is not running")
-      else:
-        os.kill(pid, signal.SIGKILL)
-      os._exit(1)
-
-  # Check for ambari configuration file.
-  try:
-    config = AmbariConfig.config
-    if os.path.exists('/etc/ambari-agent/conf/ambari-agent.ini'):
-      config.read('/etc/ambari-agent/conf/ambari-agent.ini')
-      AmbariConfig.setConfig(config)
-    else:
-      raise Exception("No config found, use default")
-  except Exception, err:
-    logger.warn(err)
-
-  # Check if there is another instance running
-  if os.path.isfile(ProcessHelper.pidfile):
-    print("%s already exists, exiting" % ProcessHelper.pidfile)
-    sys.exit(1)
-  # check if ambari prefix exists
-  elif not os.path.isdir(config.get("agent", "prefix")):
-    msg = "Ambari prefix dir %s does not exists, can't continue" \
-          % config.get("agent", "prefix")
-    logger.error(msg)
-    print(msg)
-    sys.exit(1)
-  else:
-    # Daemonize current instance of Ambari Agent
-    #retCode = createDaemon()
-    pid = str(os.getpid())
-    file(ProcessHelper.pidfile, 'w').write(pid)
-
-  credential = None
-
-  killstaleprocesses()
-
-  server_url = 'https://' + config.get('server', 'hostname') + ':' + config.get('server', 'url_port')
-  print("Connecting to the server at " + server_url + "...")
-  logger.info('Connecting to the server at: ' + server_url)
-
-  # Wait until server is reachable
-  netutil = NetUtil()
-  netutil.try_to_connect(server_url, -1, logger)
-
-  #Initiate security
-  """ Check if security is enable if not then disable it"""
-  logger.info("Creating certs")
-  certMan = CertificateManager(config)
-  certMan.initSecurity()
-  
-  # Launch Controller communication
-  controller = Controller(config)
-  controller.start()
-  # TODO: is run() call necessary?
-  controller.run()
-  logger.info("finished")
-    
-if __name__ == "__main__":
-  main()
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/manifestGenerator.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/manifestGenerator.py
deleted file mode 100644
index 2ebed09..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/manifestGenerator.py
+++ /dev/null
@@ -1,273 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import json
-import os.path
-import logging
-from uuid import getnode as get_mac
-from shell import shellRunner
-from datetime import datetime
-import AmbariConfig
-
-
-logger = logging.getLogger()
-
-non_global_configuration_types = ["hdfs-site", "core-site", 
-                          "mapred-queue-acls",
-                             "hadoop-policy", "mapred-site", 
-                             "capacity-scheduler", "hbase-site",
-                             "hbase-policy", "hive-site", "oozie-site", 
-                             "webhcat-site", "hdfs-exclude-file"]
-
-#read static imports from file and write them to manifest
-def writeImports(outputFile, modulesdir, importsList):
-  logger.info("Modules dir is " + modulesdir)
-  outputFile.write('#' + datetime.now().strftime('%d.%m.%Y %H:%M:%S') + os.linesep)
-  for line in importsList:
-    modulename = line.rstrip()
-    line = "import '" + modulesdir + os.sep + modulename + "'" + os.linesep
-    outputFile.write(line)
-
-
-def generateManifest(parsedJson, fileName, modulesdir, ambariconfig):
-  logger.info("JSON Received:")
-  logger.info(json.dumps(parsedJson, sort_keys=True, indent=4))
-#reading json
-  hostname = parsedJson['hostname']
-  clusterHostInfo = {} 
-  if 'clusterHostInfo' in parsedJson:
-    if parsedJson['clusterHostInfo']:
-      clusterHostInfo = parsedJson['clusterHostInfo']
-  params = {}
-  if 'hostLevelParams' in parsedJson: 
-    if parsedJson['hostLevelParams']:
-      params = parsedJson['hostLevelParams']
-  configurations = {}
-  if 'configurations' in parsedJson:
-    if parsedJson['configurations']:
-      configurations = parsedJson['configurations']
-  nonGlobalConfigurationsKeys = non_global_configuration_types
-  #hostAttributes = parsedJson['hostAttributes']
-  roleParams = {}
-  if 'roleParams' in parsedJson:
-    if parsedJson['roleParams']:
-      roleParams = parsedJson['roleParams']
-  roles = [{'role' : parsedJson['role'],
-            'cmd' : parsedJson['roleCommand'],
-            'roleParams' : roleParams}]
-  #writing manifest
-  manifest = open(fileName, 'w')
-  #Check for Ambari Config and make sure you pick the right imports file
-    
-  #writing imports from external static file
-  writeImports(outputFile=manifest, modulesdir=modulesdir, importsList=AmbariConfig.imports)
-  
-  #writing nodes
-  writeNodes(manifest, clusterHostInfo)
-  
-  #writing params from map
-  writeParams(manifest, params, modulesdir)
-  
-  
-  nonGlobalConfigurations = {}
-  flatConfigurations = {}
-
-  if configurations: 
-    for configKey in configurations.iterkeys():
-      if configKey in nonGlobalConfigurationsKeys:
-        nonGlobalConfigurations[configKey] = configurations[configKey]
-      else:
-        flatConfigurations[configKey] = configurations[configKey]
-      
-  #writing config maps
-  if (nonGlobalConfigurations):
-    writeNonGlobalConfigurations(manifest, nonGlobalConfigurations)
-  if (flatConfigurations):
-    writeFlatConfigurations(manifest, flatConfigurations)
-
-  #writing host attributes
-  #writeHostAttributes(manifest, hostAttributes)
-
-  #writing task definitions 
-  writeTasks(manifest, roles, ambariconfig, clusterHostInfo, hostname)
-     
-  manifest.close()
-    
-
-  #write nodes
-def writeNodes(outputFile, clusterHostInfo):
-  if clusterHostInfo.has_key('zookeeper_hosts'):
-    clusterHostInfo['zookeeper_hosts'] = sorted(clusterHostInfo['zookeeper_hosts'])
-  
-  for node in clusterHostInfo.iterkeys():
-    outputFile.write('$' + node + '= [')
-    coma = ''
-    
-    for value in clusterHostInfo[node]:
-      outputFile.write(coma + '\'' + value + '\'')
-      coma = ', '
-
-    outputFile.write(']\n')
-
-#write params
-def writeParams(outputFile, params, modulesdir):
-
-  for paramName in params.iterkeys():
-    if paramName == 'repo_info':     
-      continue
-      
-
-    param = params[paramName]
-    if type(param) is dict:
-
-      outputFile.write('$' + paramName + '= {\n')
-
-      coma = ''
-
-      for subParam in param.iterkeys():
-        outputFile.write(coma + '"' + subParam + '" => "' + param[subParam] + '"')
-        coma = ',\n'
-
-      outputFile.write('\n}\n')
-    else:
-      outputFile.write('$' +  paramName + '="' + param + '"\n')
-    
-
-#write host attributes
-def writeHostAttributes(outputFile, hostAttributes):
-  outputFile.write('$hostAttributes={\n')
-
-  coma = ''
-  for attribute in hostAttributes.iterkeys():
-    outputFile.write(coma + '"' +  attribute + '" => "{' + hostAttributes[attribute] + '"}')
-    coma = ',\n'
-
-  outputFile.write('}\n')
-
-#write flat configurations
-def writeFlatConfigurations(outputFile, flatConfigs):
-  flatDict = {}
-  for flatConfigName in flatConfigs.iterkeys():
-    for flatConfig in flatConfigs[flatConfigName].iterkeys():
-      flatDict[flatConfig] = flatConfigs[flatConfigName][flatConfig]
-  for gconfigKey in flatDict.iterkeys():
-    outputFile.write('$' + gconfigKey + ' = "' + flatDict[gconfigKey] + '"' + os.linesep)
-
-#write xml configurations
-def writeNonGlobalConfigurations(outputFile, xmlConfigs):
-  outputFile.write('$configuration =  {\n')
-
-  for configName in xmlConfigs.iterkeys():
-
-    config = xmlConfigs[configName]
-    
-    outputFile.write(configName + '=> {\n')
-    coma = ''
-    for configParam in config.iterkeys():
-      outputFile.write(coma + '"' + configParam + '" => \'' + config[configParam] + '\'')
-      coma = ',\n'
-
-    outputFile.write('\n},\n')
-    
-  outputFile.write('\n}\n')
-
-#write node tasks
-def writeTasks(outputFile, roles, ambariconfig, clusterHostInfo=None, 
-               hostname="localhost"):
-  #reading dictionaries
-  rolesToClass = AmbariConfig.rolesToClass
-
-  serviceStates = AmbariConfig.serviceStates
-
-  outputFile.write('node /default/ {\n ')
-
-  writeStages(outputFile, len(roles) + 1)
-  stageNum = 1
-
-  outputFile.write('class {\'hdp\': stage => ' + str(stageNum) + '}\n')
-  stageNum = stageNum + 1
-  # Need to hack for zookeeper since we need 
-  zk_hosts = []
-  for role in roles :
-    rolename = role['role']
-    command = role['cmd']
-    taskParams = role['roleParams']
-    if (rolename == 'ZOOKEEPER_SERVER'):
-      zk_hosts = clusterHostInfo['zookeeper_hosts']
-      # Sort the list in lexicographical order
-      taskParams['myid'] = str(sorted(zk_hosts).index(hostname) + 1)
-    
-    taskParamsNormalized = normalizeTaskParams(taskParams)
-    taskParamsPostfix = ''
-    
-    if len(taskParamsNormalized) > 0 :
-      taskParamsPostfix = ', ' + taskParamsNormalized
-    
-    className = rolesToClass[rolename]
-   
-    if command in serviceStates:
-      serviceState = serviceStates[command] 
-      outputFile.write('class {\'' + className + '\':' +
-                        ' stage => ' + str(stageNum) + 
-                     ', service_state => ' + serviceState 
-                     + taskParamsPostfix + '}\n')
-    else:
-      outputFile.write('class {\'' + className + '\':' + 
-                       ' stage => ' + str(stageNum) + 
-                       taskParamsPostfix + '}\n')
-
-    stageNum = stageNum + 1
-  outputFile.write('}\n')
-def normalizeTaskParams(taskParams):
-  result = ''
-  coma = ''
-  
-  for paramName in taskParams.iterkeys():
-    result = coma + result + paramName + ' => ' + taskParams[paramName]
-    coma = ','
-    
-  return result
-  
-def writeStages(outputFile, numStages):
-  arrow = ''
-  
-  for i in range(numStages):
-    outputFile.write(arrow + 'stage{' + str(i + 1) + ' :}')
-    arrow = ' -> '
-  
-  outputFile.write('\n')
-
-
-  
-def main():
-  logging.basicConfig(level=logging.DEBUG)    
-  #test code
-  jsonFile = open('test.json', 'r')
-  jsonStr = jsonFile.read() 
-  modulesdir = os.path.abspath(os.getcwd() + ".." + os.sep + ".." + 
-                               os.sep + ".." + os.sep + "puppet" + 
-                               os.sep + "modules" + os.sep)
-  inputJsonStr = jsonStr
-  parsedJson = json.loads(inputJsonStr)
-  generateManifest(parsedJson, 'site.pp', modulesdir)
-
-if __name__ == '__main__':
-  main()
-
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/puppetExecutor.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/puppetExecutor.py
deleted file mode 100644
index 784f4da..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/puppetExecutor.py
+++ /dev/null
@@ -1,242 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-import json
-import os.path
-import logging
-import subprocess
-from manifestGenerator import generateManifest
-from RepoInstaller import RepoInstaller
-import pprint, threading
-from Grep import Grep
-from threading import Thread
-import shell
-import traceback
-
-logger = logging.getLogger()
-
-class puppetExecutor:
-
-  """ Class that executes the commands that come from the server using puppet.
-  This is the class that provides the pluggable point for executing the puppet"""
-
-  # How many lines from command output send to server
-  OUTPUT_LAST_LINES = 10
-  # How many lines from command error output send to server (before Err phrase)
-  ERROR_LAST_LINES_BEFORE = 30
-  # How many lines from command error output send to server (after Err phrase)
-  ERROR_LAST_LINES_AFTER = 30
-
-  # How many seconds will pass before running puppet is terminated on timeout
-  PUPPET_TIMEOUT_SECONDS = 600
-
-  event = threading.Event()
-  last_puppet_has_been_killed = False
-
-  NO_ERROR = "none"
-
-  def __init__(self, puppetModule, puppetInstall, facterInstall, tmpDir, config):
-    self.puppetModule = puppetModule
-    self.puppetInstall = puppetInstall
-    self.facterInstall = facterInstall
-    self.tmpDir = tmpDir
-    self.reposInstalled = False
-    self.config = config
-
-  def configureEnviron(self, environ):
-    if not self.config.has_option("puppet", "ruby_home"):
-      return environ
-    ruby_home = self.config.get("puppet", "ruby_home")
-    if os.path.exists(ruby_home):
-      """Only update ruby home if the config is configured"""
-      path = os.environ["PATH"]
-      if not ruby_home in path:
-        environ["PATH"] = ruby_home + os.path.sep + "bin"  + ":"+environ["PATH"] 
-      environ["MY_RUBY_HOME"] = ruby_home
-    return environ
-    
-  def getPuppetBinary(self):
-    puppetbin = os.path.join(self.puppetInstall, "bin", "puppet") 
-    if (os.path.exists(puppetbin)):
-      return puppetbin
-    else:
-      logger.info("Using default puppet on the host : " + puppetbin 
-                  + " does not exist.")
-      return "puppet"
-     
-  def deployRepos(self, command, tmpDir, modulesdir, taskId):
-    """ Hack to only create the repo files once """
-    result = []
-    if (not self.reposInstalled):
-      repoInstaller = RepoInstaller(command, tmpDir, modulesdir, taskId, self.config)
-      result = repoInstaller.installRepos()
-    return result
-  
-  def puppetCommand(self, sitepp):
-    modules = self.puppetModule
-    puppetcommand = [self.getPuppetBinary(), "apply", "--confdir=" + modules, "--detailed-exitcodes", sitepp]
-    return puppetcommand
-  
-  def facterLib(self):
-    return self.facterInstall + "/lib/"
-    pass
-  
-  def puppetLib(self):
-    return self.puppetInstall + "/lib"
-    pass
-
-  def condenseOutput(self, stdout, stderr, retcode):
-    grep = Grep()
-    if stderr == self.NO_ERROR:
-      result = grep.tail(stdout, self.OUTPUT_LAST_LINES)
-    else:
-      result = grep.grep(stdout, "fail", self.ERROR_LAST_LINES_BEFORE, self.ERROR_LAST_LINES_AFTER)
-      if result is None: # Second try
-       result = grep.grep(stdout, "err", self.ERROR_LAST_LINES_BEFORE, self.ERROR_LAST_LINES_AFTER)
-    filteredresult = grep.filterMarkup(result)
-    return filteredresult
-
-  def isSuccessfull(self, returncode):
-    return not self.last_puppet_has_been_killed and (returncode == 0 or returncode == 2)
-
-  def runCommand(self, command, tmpoutfile, tmperrfile):
-    result = {}
-    taskId = 0
-    if command.has_key("taskId"):
-      taskId = command['taskId']
-      
-    puppetEnv = os.environ
-    #Install repos
-    modulesdir = self.puppetModule + "/modules"
-    puppetFiles = self.deployRepos(command, self.tmpDir, modulesdir, taskId)
-    siteppFileName = os.path.join(self.tmpDir, "site-" + str(taskId) + ".pp") 
-    puppetFiles.append(siteppFileName)
-    generateManifest(command, siteppFileName, modulesdir, self.config)
-    #Run all puppet commands, from manifest generator and for repos installation
-    #Appending outputs and errors, exitcode - maximal from all
-    for puppetFile in puppetFiles:
-      self.runPuppetFile(puppetFile, result, puppetEnv, tmpoutfile, tmperrfile)
-      # Check if one of the puppet command fails and error out
-      if not self.isSuccessfull(result["exitcode"]):
-        break
-
-    if self.isSuccessfull(result["exitcode"]):
-      # Check if all the repos were installed or not and reset the flag
-      self.reposInstalled = True
-      
-    logger.info("ExitCode : "  + str(result["exitcode"]))
-    return result
-
-  def runPuppetFile(self, puppetFile, result, puppetEnv, tmpoutfile, tmperrfile):
-    """ Run the command and make sure the output gets propagated"""
-    puppetcommand = self.puppetCommand(puppetFile)
-    rubyLib = ""
-    if os.environ.has_key("RUBYLIB"):
-      rubyLib = os.environ["RUBYLIB"]
-      logger.info("RUBYLIB from Env " + rubyLib)
-    if not (self.facterLib() in rubyLib):
-      rubyLib = rubyLib + ":" + self.facterLib()
-    if not (self.puppetLib() in rubyLib):
-      rubyLib = rubyLib + ":" + self.puppetLib()
-    tmpout =  open(tmpoutfile, 'w')
-    tmperr =  open(tmperrfile, 'w')
-    puppetEnv["RUBYLIB"] = rubyLib
-    puppetEnv = self.configureEnviron(puppetEnv)
-    logger.info("Setting RUBYLIB as: " + rubyLib)
-    logger.info("Running command " + pprint.pformat(puppetcommand))
-    puppet = self.lauch_puppet_subprocess(puppetcommand,tmpout, tmperr, puppetEnv)
-    logger.info("Launching watchdog thread")
-    self.event.clear()
-    self.last_puppet_has_been_killed = False
-    thread = Thread(target =  self.puppet_watchdog_func, args = (puppet, ))
-    thread.start()
-    # Waiting for process to finished or killed
-    puppet.communicate()
-    self.event.set()
-    thread.join()
-    # Building results
-    error = self.NO_ERROR
-    returncode = 0
-    if not self.isSuccessfull(puppet.returncode):
-      returncode = puppet.returncode
-      error = open(tmperrfile, 'r').read()
-      logging.error("Error running puppet: \n" + str(error))
-      pass
-    if self.last_puppet_has_been_killed:
-      error = str(error) + "\n Puppet has been killed due to timeout"
-      returncode = 999
-    if result.has_key("stderr"):
-      result["stderr"] = result["stderr"] + os.linesep + str(error)
-    else:
-      result["stderr"] = str(error)
-    puppetOutput = open(tmpoutfile, 'r').read()
-    logger.info("Output from puppet :\n" + puppetOutput)
-    logger.info("Puppet exit code is " + str(returncode))
-    if result.has_key("exitcode"):
-      result["exitcode"] = max(returncode, result["exitcode"])
-    else:
-      result["exitcode"] = returncode
-    condensed = self.condenseOutput(puppetOutput, error, returncode)
-    if result.has_key("stdout"):
-      result["stdout"] = result["stdout"] + os.linesep + str(condensed)
-    else:
-      result["stdout"] = str(condensed)
-    return result
-
-  def lauch_puppet_subprocess(self, puppetcommand, tmpout, tmperr, puppetEnv):
-    """
-    Creates subprocess with given parameters. This functionality was moved to separate method
-    to make possible unit testing
-    """
-    return subprocess.Popen(puppetcommand,
-      stdout=tmpout,
-      stderr=tmperr,
-      env=puppetEnv)
-
-  def puppet_watchdog_func(self, puppet):
-    self.event.wait(self.PUPPET_TIMEOUT_SECONDS)
-    if puppet.returncode is None:
-      logger.error("Task timed out and will be killed")
-      self.runShellKillPgrp(puppet)
-      self.last_puppet_has_been_killed = True
-    pass
-
-  def runShellKillPgrp(self, puppet):
-    shell.killprocessgrp(puppet.pid)
-
-def main():
-  logging.basicConfig(level=logging.DEBUG)    
-  #test code
-  jsonFile = open('test.json', 'r')
-  jsonStr = jsonFile.read() 
-  # Below is for testing only.
-  
-  puppetInstance = puppetExecutor("/home/centos/ambari_repo_info/ambari-agent/src/main/puppet/",
-                                  "/usr/",
-                                  "/root/workspace/puppet-install/facter-1.6.10/",
-                                  "/tmp")
-  jsonFile = open('test.json', 'r')
-  jsonStr = jsonFile.read() 
-  parsedJson = json.loads(jsonStr)
-  result = puppetInstance.runCommand(parsedJson, '/tmp/out.txt', '/tmp/err.txt')
-  logger.debug(result)
-  
-if __name__ == '__main__':
-  main()
-
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/security.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/security.py
deleted file mode 100644
index 8c17764..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/security.py
+++ /dev/null
@@ -1,195 +0,0 @@
-#!/usr/bin/env python2.6
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import httplib
-import urllib2
-from urllib2 import Request
-import socket
-import hostname
-import ssl
-import os
-import logging
-from subprocess import Popen, PIPE
-import AmbariConfig
-import json
-import pprint
-import traceback
-logger = logging.getLogger()
-
-GEN_AGENT_KEY="openssl req -new -newkey rsa:1024 -nodes -keyout %(keysdir)s/%(hostname)s.key\
-	-subj /OU=%(hostname)s/\
-        -out %(keysdir)s/%(hostname)s.csr"
-
-
-class VerifiedHTTPSConnection(httplib.HTTPSConnection):
-  """ Connecting using ssl wrapped sockets """
-  def __init__(self, host, port=None, key_file=None, cert_file=None,
-                     strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
-    httplib.HTTPSConnection.__init__(self, host, port=port)
-    pass
-     
-  def connect(self):
-    if self.sock:
-      self.sock.close()
-    logger.info("SSL Connect being called.. connecting to the server")
-    sock = socket.create_connection((self.host, self.port), 60)
-    sock.setsockopt( socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-    if self._tunnel_host:
-      self.sock = sock
-      self._tunnel()
-    agent_key = AmbariConfig.config.get('security', 'keysdir') + os.sep + \
-     hostname.hostname() + ".key"
-    agent_crt = AmbariConfig.config.get('security', 'keysdir') + os.sep \
-    + hostname.hostname() + ".crt" 
-    server_crt = AmbariConfig.config.get('security', 'keysdir') + os.sep \
-    + "ca.crt"
-    
-    self.sock = ssl.wrap_socket(sock,
-                                keyfile=agent_key,
-                                certfile=agent_crt,
-                                cert_reqs=ssl.CERT_REQUIRED,
-                                ca_certs=server_crt)
-
-
-class CachedHTTPSConnection:
-  """ Caches a ssl socket and uses a single https connection to the server. """
-  
-  def __init__(self, config):
-    self.connected = False;
-    self.config = config
-    self.server = config.get('server', 'hostname')
-    self.port = config.get('server', 'secured_url_port')
-    self.connect()
-  
-  def connect(self):
-      if  not self.connected:
-        self.httpsconn = VerifiedHTTPSConnection(self.server, self.port)
-        self.httpsconn.connect()
-        self.connected = True
-      # possible exceptions are catched and processed in Controller
-
-  
-  def forceClear(self):
-    self.httpsconn = VerifiedHTTPSConnection(self.server, self.port)
-    self.connect()
-    
-  def request(self, req): 
-    self.connect()
-    try:
-      self.httpsconn.request(req.get_method(), req.get_full_url(), 
-                                  req.get_data(), req.headers)
-      response = self.httpsconn.getresponse()
-      readResponse = response.read()
-    except Exception as ex:
-      # This exception is catched later in Controller
-      logger.debug("Error in sending/receving data from the server " +
-                   traceback.format_exc())
-      self.connected = False
-      raise IOError("Error occured during connecting to the server: " + str(ex))
-    return readResponse
-  
-class CertificateManager():
-  def __init__(self, config):
-    self.config = config
-    self.keysdir = self.config.get('security', 'keysdir')
-    self.server_crt=self.config.get('security', 'server_crt')
-    self.server_url = 'https://' + self.config.get('server', 'hostname') + ':' \
-       + self.config.get('server', 'url_port')
-    
-  def getAgentKeyName(self):
-    keysdir = self.config.get('security', 'keysdir')
-    return keysdir + os.sep + hostname.hostname() + ".key"
-  def getAgentCrtName(self):
-    keysdir = self.config.get('security', 'keysdir')
-    return keysdir + os.sep + hostname.hostname() + ".crt"
-  def getAgentCrtReqName(self):
-    keysdir = self.config.get('security', 'keysdir')
-    return keysdir + os.sep + hostname.hostname() + ".csr"
-  def getSrvrCrtName(self):
-    keysdir = self.config.get('security', 'keysdir')
-    return keysdir + os.sep + "ca.crt"
-    
-  def checkCertExists(self):
-    
-    s = self.config.get('security', 'keysdir') + os.sep + "ca.crt"
-
-    server_crt_exists = os.path.exists(s)
-    
-    if not server_crt_exists:
-      logger.info("Server certicate not exists, downloading")
-      self.loadSrvrCrt()
-    else:
-      logger.info("Server certicate exists, ok")
-      
-    agent_key_exists = os.path.exists(self.getAgentKeyName())
-    
-    if not agent_key_exists:
-      logger.info("Agent key not exists, generating request")
-      self.genAgentCrtReq()
-    else:
-      logger.info("Agent key exists, ok")
-      
-    agent_crt_exists = os.path.exists(self.getAgentCrtName())
-    
-    if not agent_crt_exists:
-        logger.info("Agent certificate not exists, sending sign request")
-        self.reqSignCrt()
-    else:
-        logger.info("Agent certificate exists, ok")
-            
-  def loadSrvrCrt(self):
-    get_ca_url = self.server_url + '/cert/ca/'
-    logger.info("Downloading server cert from " + get_ca_url)
-    stream = urllib2.urlopen(get_ca_url)
-    response = stream.read()
-    stream.close()
-    srvr_crt_f = open(self.getSrvrCrtName(), 'w+')
-    srvr_crt_f.write(response)
-      
-  def reqSignCrt(self):
-    sign_crt_req_url = self.server_url + '/certs/' + hostname.hostname()
-    agent_crt_req_f = open(self.getAgentCrtReqName())
-    agent_crt_req_content = agent_crt_req_f.read()
-    passphrase_env_var = self.config.get('security', 'passphrase_env_var_name')
-    passphrase = os.environ[passphrase_env_var]
-    register_data = {'csr'       : agent_crt_req_content,
-                    'passphrase' : passphrase}
-    data = json.dumps(register_data)
-    req = urllib2.Request(sign_crt_req_url, data, {'Content-Type': 'application/json'})
-    f = urllib2.urlopen(req)
-    response = f.read()
-    f.close()
-    data = json.loads(response)
-    logger.debug("Sign response from Server: \n" + pprint.pformat(data))
-    result=data['result']
-    if result == 'OK':
-      agentCrtContent=data['signedCa']
-      agentCrtF = open(self.getAgentCrtName(), "w")
-      agentCrtF.write(agentCrtContent)
-    else:
-      logger.error("Certificate signing failed")
-
-  def genAgentCrtReq(self):
-    generate_script = GEN_AGENT_KEY % {'hostname': hostname.hostname(),
-                                     'keysdir' : self.config.get('security', 'keysdir')}
-    logger.info(generate_script)
-    p = Popen([generate_script], shell=True, stdout=PIPE)
-    p.wait()
-      
-  def initSecurity(self):
-    self.checkCertExists()
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/shell.py b/branch-1.2/ambari-agent/src/main/python/ambari_agent/shell.py
deleted file mode 100644
index a44079b..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/shell.py
+++ /dev/null
@@ -1,296 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from pwd import getpwnam
-from grp import getgrnam
-import AmbariConfig
-import logging
-import logging.handlers
-import subprocess
-import os
-import tempfile
-import signal
-import sys
-import threading
-import time
-import traceback
-import shutil
-
-global serverTracker
-serverTracker = {}
-logger = logging.getLogger()
-
-threadLocal = threading.local()
-
-tempFiles = [] 
-def noteTempFile(filename):
-  tempFiles.append(filename)
-
-def getTempFiles():
-  return tempFiles
-
-def killstaleprocesses():
-  logger.info ("Killing stale processes")
-  prefix = AmbariConfig.config.get('stack','installprefix')
-  files = os.listdir(prefix)
-  for file in files:
-    if str(file).endswith(".pid"):
-      pid = str(file).split('.')[0]
-      killprocessgrp(int(pid))
-      os.unlink(os.path.join(prefix,file))
-  logger.info ("Killed stale processes")
-
-def killprocessgrp(pid):
-  try:
-    os.killpg(pid, signal.SIGTERM)
-    time.sleep(5)
-    try:
-      os.killpg(pid, signal.SIGKILL)
-    except:
-      logger.warn("Failed to send SIGKILL to PID %d. Process exited?" % (pid))
-  except:
-    logger.warn("Failed to kill PID %d" % (pid))      
-
-def changeUid():
-  try:
-    os.setuid(threadLocal.uid)
-  except Exception:
-    logger.warn("can not switch user for running command.")
-
-class shellRunner:
-  # Run any command
-  def run(self, script, user=None):
-    try:
-      if user!=None:
-        user=getpwnam(user)[2]
-      else:
-        user = os.getuid()
-      threadLocal.uid = user
-    except Exception:
-      logger.warn("can not switch user for RUN_COMMAND.")
-    code = 0
-    cmd = " "
-    cmd = cmd.join(script)
-    p = subprocess.Popen(cmd, preexec_fn=changeUid, stdout=subprocess.PIPE, 
-                         stderr=subprocess.PIPE, shell=True, close_fds=True)
-    out, err = p.communicate()
-    code = p.wait()
-    logger.debug("Exitcode for %s is %d" % (cmd,code))
-    return {'exitCode': code, 'output': out, 'error': err}
-
-  # dispatch action types
-  def runAction(self, clusterId, component, role, 
-                user, command, cleanUpCommand, result):
-    oldDir = os.getcwd()
-    #TODO: handle this better. Don't like that it is doing a chdir for the main process
-    os.chdir(self.getWorkDir(clusterId, role))
-    try:
-      if user is not None:
-        user=getpwnam(user)[2]
-      else:
-        user = oldUid
-      threadLocal.uid = user
-    except Exception:
-      logger.warn("%s %s %s can not switch user for RUN_ACTION." 
-                  % (clusterId, component, role))
-    code = 0
-    cmd = sys.executable
-    tempfilename = tempfile.mktemp()
-    tmp = open(tempfilename, 'w')
-    tmp.write(command['script'])
-    tmp.close()
-    cmd = "%s %s %s" % (cmd, tempfilename, " ".join(command['param']))
-    commandResult = {}
-    p = subprocess.Popen(cmd, preexec_fn=changeUid, stdout=subprocess.PIPE,
-                          stderr=subprocess.PIPE, shell=True, close_fds=True)
-    out, err = p.communicate()
-    code = p.wait()
-    if code != 0:
-      commandResult['output'] = out
-      commandResult['error'] = err
-    commandResult['exitCode'] = code
-    result['commandResult'] = commandResult
-    os.unlink(tempfilename)
-    if code != 0:
-      tempfilename = tempfile.mktemp()
-      tmp = open(tempfilename, 'w')
-      tmp.write(command['script'])
-      tmp.close()
-      cmd = sys.executable
-      cmd = "%s %s %s" % (cmd, tempfilename, " ".join(cleanUpCommand['param']))
-      cleanUpCode = 0
-      cleanUpResult = {}
-      p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                            shell=True, close_fds=True)
-      out, err = p.communicate()
-      cleanUpCode = p.wait()
-      if cleanUpCode != 0:
-        cleanUpResult['output'] = out
-        cleanUpResult['error'] = err
-      cleanUpResult['exitCode'] = cleanUpCode
-      result['cleanUpResult'] = cleanUpResult
-      os.unlink(tempfilename)
-      os._exit(1)
-    try:
-      os.chdir(oldDir)
-    except Exception:
-      logger.warn("%s %s %s can not restore environment for RUN_ACTION."
-                   % (clusterId, component, role))
-    return result
-
-  # Start a process and presist its state
-  def startProcess(self, clusterId, clusterDefinitionRevision, component,
-                    role, script, user, result):
-    global serverTracker
-    oldDir = os.getcwd()
-    try:
-      os.chdir(self.getWorkDir(clusterId,role))
-    except Exception:
-      logger.warn("%s %s %s can not switch dir for START_ACTION."
-                   % (clusterId, component, role))
-    oldUid = os.getuid()
-    try:
-      if user is not None:
-        user=getpwnam(user)[2]
-      else:
-        user = os.getuid()
-      threadLocal.uid = user
-    except Exception:
-      logger.warn("%s %s %s can not switch user for START_ACTION." 
-                  % (clusterId, component, role))
-    code = 0
-    commandResult = {}
-    process = self.getServerKey(clusterId,clusterDefinitionRevision,
-                                component,role)
-    if not process in serverTracker:
-      try:
-        plauncher = processlauncher(script,user)
-        plauncher.start()
-        plauncher.blockUntilProcessCreation()
-      except Exception:
-        traceback.print_exc()
-        logger.warn("Can not launch process for %s %s %s" 
-                    % (clusterId, component, role))
-        code = -1
-      serverTracker[process] = plauncher
-      commandResult['exitCode'] = code 
-      result['commandResult'] = commandResult
-    try:
-      os.chdir(oldDir)
-    except Exception:
-      logger.warn("%s %s %s can not restore environment for START_ACTION." \
-                   % (clusterId, component, role))
-    return result
-
-  # Stop a process and remove presisted state
-  def stopProcess(self, processKey):
-    global serverTracker
-    keyFragments = processKey.split('/')
-    process = self.getServerKey(keyFragments[0],keyFragments[1],
-                                keyFragments[2],keyFragments[3])
-    if process in serverTracker:
-      logger.info ("Sending %s with PID %d the SIGTERM signal"
-                    % (process,serverTracker[process].getpid()))
-      killprocessgrp(serverTracker[process].getpid())
-      del serverTracker[process]
-
-  def getServerTracker(self):
-    return serverTracker
-
-  def getServerKey(self,clusterId, clusterDefinitionRevision, component, role):
-    return clusterId+"/"+str(clusterDefinitionRevision)+"/"+component+"/"+role
-
-  def getWorkDir(self, clusterId, role):
-    prefix = AmbariConfig.config.get('stack','installprefix')
-    return str(os.path.join(prefix, clusterId, role))
-
-
-class processlauncher(threading.Thread):
-  def __init__(self,script,uid):
-    threading.Thread.__init__(self)
-    self.script = script
-    self.serverpid = -1
-    self.uid = uid
-    self.out = None
-    self.err = None
-
-  def run(self):
-    try:
-      tempfilename = tempfile.mktemp()
-      noteTempFile(tempfilename)
-      pythoncmd = sys.executable
-      tmp = open(tempfilename, 'w')
-      tmp.write(self.script['script'])
-      tmp.close()
-      threadLocal.uid = self.uid
-      self.cmd = "%s %s %s" % (pythoncmd, tempfilename,
-                                " ".join(self.script['param']))
-      logger.info("Launching %s as uid %d" % (self.cmd,self.uid) )
-      p = subprocess.Popen(self.cmd,
-                            preexec_fn=self.changeUidAndSetSid, 
-                            stdout=subprocess.PIPE, 
-                            stderr=subprocess.PIPE, shell=True, close_fds=True)
-      logger.info("Launched %s; PID %d" % (self.cmd,p.pid))
-      self.serverpid = p.pid
-      self.out, self.err = p.communicate()
-      self.code = p.wait()
-      logger.info("%s; PID %d exited with code %d \nSTDOUT: %s\nSTDERR %s" % 
-                 (self.cmd,p.pid,self.code,self.out,self.err))
-    except:
-      logger.warn("Exception encountered while launching : " + self.cmd)
-      traceback.print_exc()
-
-    os.unlink(self.getpidfile())
-    os.unlink(tempfilename)
-
-  def blockUntilProcessCreation(self):
-    self.getpid()
- 
-  def getpid(self):
-    sleepCount = 1
-    while (self.serverpid == -1):
-      time.sleep(1)
-      logger.info("Waiting for process %s to start" % self.cmd)
-      if sleepCount > 10:
-        logger.warn("Couldn't start process %s even after %d seconds"
-                     % (self.cmd,sleepCount))
-        os._exit(1)
-    return self.serverpid
-
-  def getpidfile(self):
-    prefix = AmbariConfig.config.get('stack','installprefix')
-    pidfile = os.path.join(prefix,str(self.getpid())+".pid")
-    return pidfile
- 
-  def changeUidAndSetSid(self):
-    prefix = AmbariConfig.config.get('stack','installprefix')
-    pidfile = os.path.join(prefix,str(os.getpid())+".pid")
-    #TODO remove try/except (when there is a way to provide
-    #config files for testcases). The default config will want
-    #to create files in /var/ambari which may not exist unless
-    #specifically created.
-    #At that point add a testcase for the pid file management.
-    try: 
-      f = open(pidfile,'w')
-      f.close()
-    except:
-      logger.warn("Couldn't write pid file %s for %s" % (pidfile,self.cmd))
-    changeUid()
-    os.setsid() 
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/site.pp b/branch-1.2/ambari-agent/src/main/python/ambari_agent/site.pp
deleted file mode 100644
index a5badea..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/site.pp
+++ /dev/null
@@ -1,92 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-zookeeper/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-pig/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-sqoop/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-hive/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-mysql/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-monitor-webserver/manifests/*.pp'
-import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-repos/manifests/*.pp'
-$NAMENODE= ['h2.hortonworks.com']
-$DATANODE= ['h1.hortonworks.com', 'h2.hortonworks.com']
-$jdk_location="http://hdp1/downloads"
-$jdk_bins= {
-"32" => "jdk-6u31-linux-x64.bin",
-"64" => "jdk-6u31-linux-x64.bin"
-}
-$hdfs_user="hdfs"
-$java32_home="/usr/jdk64/jdk1.6.0_31"
-$java64_home="/usr/jdk64/jdk1.6.0_31"
-$configuration =  {
-capacity-scheduler=> {
-"mapred.capacity-scheduler.queue.default.capacity" => "100",
-"mapred.capacity-scheduler.queue.default.supports-priorit" => "false"
-},
-oozie-site=> {
-"oozie.service.ActionService.executor.ext.classes" => "org.apache.oozie.action.hadoop.HiveActionExecutor, org.apache.oozie.action.hadoop.SqoopActionExecutor,org.apache.oozie.action.email.EmailActionExecutor,"
-},
-mapred-site=> {
-"mapred.queue.names" => "hive,pig,default",
-"mapred.jobtracker.taskScheduler" => "org.apache.hadoop.mapred.CapacityTaskScheduler"
-},
-core-site=> {
-"fs.default.name" => "hrt8n36.cc1.ygridcore.net"
-},
-hbase-policy=> {
-"security.client.protocol.acl" => "*"
-},
-hbase-site=> {
-"hbase.cluster.distributed" => "true"
-},
-hdfs-site=> {
-"dfs.block.size" => "256000000",
-"dfs.replication" => "1"
-},
-hadoop-policy=> {
-"security.client.datanode.protocol.acl" => "*",
-"security.client.protocol.acl" => "*"
-},
-mapred-queue-acls=> {
-"mapred.queue.default.acl-submit-job" => "*",
-"mapred.queue.default.acl-administer-jobs" => "*"
-},
-templeton-site=> {
-"templeton.override.enabled" => "true"
-},
-hive-site=> {
-"hive.exec.scratchdir" => "/tmp"
-},
-
-}
-$security_enabled = "true"
-$task_bin_exe = "ls"
-$hadoop_piddirprefix = "/tmp"
-$ganglia_server_host = "localhost"
-node /default/ {
- stage{1 :} -> stage{2 :}
-class {'hdp': stage => 1}
-class {'hdp-hadoop::namenode': stage => 2, service_state => installed_and_configured}
-}
diff --git a/branch-1.2/ambari-agent/src/main/python/ambari_agent/test.json b/branch-1.2/ambari-agent/src/main/python/ambari_agent/test.json
deleted file mode 100644
index 066d3ee..0000000
--- a/branch-1.2/ambari-agent/src/main/python/ambari_agent/test.json
+++ /dev/null
@@ -1,62 +0,0 @@
-{
-"commandId": "234",
-"hostname":  "h1.hortonworks.com",
-
-"clusterHostInfo" : 
-{
-"NAMENODE": ["h2.hortonworks.com"],
-"DATANODE": ["h1.hortonworks.com", "h2.hortonworks.com"]
-},
-"hostLevelParams": 
-{
-"hdfs_user" : "hdfs",
-"jdk_location" : "http://hdp1/downloads",
-
-"java32_home" : "/usr/jdk64/jdk1.6.0_31",
-"java64_home" : "/usr/jdk64/jdk1.6.0_31",
-"jdk_bins" :  { "32" : "jdk-6u31-linux-x64.bin", "64" : "jdk-6u31-linux-x64.bin" },
-"repo_info" :[
-{
-  "baseUrl":"http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5",
-  "osType":"centos5",
-  "repoId":"HDP-1.1.1.16_TEST",
-  "repoName":"HDP_TEST"
-}
-,
-{
-  "baseUrl":"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5",
-  "osType":"centos5",
-  "repoId":"HDP-UTILS-1.1.0.15_TEST",
-  "repoName":"HDP-UTILS_TEST"
-}]
-
-},
-
-
-"configurations" : {
-"hdfs-site" : { "dfs.block.size" : "256000000", "dfs.replication" : "1" } ,
-"core-site" :  { "fs.default.name" : "hrt8n36.cc1.ygridcore.net" } ,
-"mapred-queue-acls" : {"mapred.queue.default.acl-submit-job" : "*",
-		       "mapred.queue.default.acl-administer-jobs" : "*"},
-"hadoop-policy" : {"security.client.protocol.acl" : "*",
-		   "security.client.datanode.protocol.acl" : "*"},
-"mapred-site" : {"mapred.jobtracker.taskScheduler" : "org.apache.hadoop.mapred.CapacityTaskScheduler",
-		 "mapred.queue.names" : "hive,pig,default"},
-"capacity-scheduler" : {"mapred.capacity-scheduler.queue.default.capacity" : "100",
-			"mapred.capacity-scheduler.queue.default.supports-priorit" : "false"},
-"health_check" : {"security_enabled" : "true",
-                  "task_bin_exe" : "ls"},
-"hadoop_env" : {"hadoop_piddirprefix" : "/tmp"},
-
-"hbase-site" : {"hbase.cluster.distributed" : "true"},
-"hbase-policy" : {"security.client.protocol.acl" : "*"},
-"hadoop_metrics" : {"ganglia_server_host" : "localhost"},
-"hive-site" : {"hive.exec.scratchdir" : "/tmp"},
-"oozie-site" : {"oozie.service.ActionService.executor.ext.classes" : "org.apache.oozie.action.hadoop.HiveActionExecutor, org.apache.oozie.action.hadoop.SqoopActionExecutor,org.apache.oozie.action.email.EmailActionExecutor,"},
-"templeton-site" : {"templeton.override.enabled" : "true"}
-},
-
-"role": "NAMENODE",
-"roleCommand": "INSTALL",
-"roleParams" : {}
-}
diff --git a/branch-1.2/ambari-agent/src/main/python/setup.cfg b/branch-1.2/ambari-agent/src/main/python/setup.cfg
deleted file mode 100644
index a73754e..0000000
--- a/branch-1.2/ambari-agent/src/main/python/setup.cfg
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-[egg_info]
-tag_build =
-tag_date = 0
-tag_svn_revision = 0
diff --git a/branch-1.2/ambari-agent/src/main/python/setup.py b/branch-1.2/ambari-agent/src/main/python/setup.py
deleted file mode 100644
index 41c2097..0000000
--- a/branch-1.2/ambari-agent/src/main/python/setup.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from setuptools import setup
-
-setup(
-    name = "ambari-agent",
-    version = "1.0.3-SNAPSHOT",
-    packages = ['ambari_agent'],
-    # metadata for upload to PyPI
-    author = "Apache Software Foundation",
-    author_email = "ambari-dev@incubator.apache.org",
-    description = "Ambari agent",
-    license = "Apache License v2.0",
-    keywords = "hadoop, ambari",
-    url = "http://incubator.apache.org/ambari",
-    long_description = "This package implements the Ambari agent for installing Hadoop on large clusters.",
-    platforms=["any"],
-    entry_points = {
-        "console_scripts": [
-            "ambari-agent = ambari_agent.main:main",
-        ],
-    }
-)
diff --git a/branch-1.2/ambari-agent/src/packages/tarball/all.xml b/branch-1.2/ambari-agent/src/packages/tarball/all.xml
deleted file mode 100644
index 0e4f34b..0000000
--- a/branch-1.2/ambari-agent/src/packages/tarball/all.xml
+++ /dev/null
@@ -1,34 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1"
-          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-          xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1 http://maven.apache.org/xsd/assembly-1.1.1.xsd">
-  <!--This 'all' id is not appended to the produced bundle because we do this:
-    http://maven.apache.org/plugins/maven-assembly-plugin/faq.html#required-classifiers
-  -->
-  <formats>
-    <format>dir</format>
-  </formats>
-  <includeBaseDirectory>false</includeBaseDirectory>
-  <fileSets>
-    <fileSet>
-      <directory>src/main/python</directory>
-      <outputDirectory>/</outputDirectory>
-    </fileSet>
-  </fileSets>
-</assembly>
diff --git a/branch-1.2/ambari-agent/src/test/python/TestActionQueue.py b/branch-1.2/ambari-agent/src/test/python/TestActionQueue.py
deleted file mode 100644
index 6b0282d..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestActionQueue.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-from ambari_agent.ActionQueue import ActionQueue
-from ambari_agent.AmbariConfig import AmbariConfig
-from ambari_agent.FileUtil import getFilePath
-import os, errno, time, pprint, tempfile, threading
-
-event = threading.Event()
-
-class TestActionQueue(TestCase):
-  def test_ActionQueueStartStop(self):
-    actionQueue = ActionQueue(AmbariConfig().getConfig())
-    actionQueue.IDLE_SLEEP_TIME = 0.01
-    actionQueue.start()
-    actionQueue.stop()
-    actionQueue.join()
-    self.assertEqual(actionQueue.stopped(), True, 'Action queue is not stopped.') 
-
-#This feature is not yet implemented in ActionQueue
-  def test_RetryAction(self):
-    pass
-
-
-  def test_command_in_progress(self):
-    config = AmbariConfig().getConfig()
-    tmpfile = tempfile.gettempdir()
-    config.set('agent', 'prefix', tmpfile)
-    actionQueue = ActionQueue(config)
-    actionQueue.IDLE_SLEEP_TIME = 0.01
-    executor_started_event = threading.Event()
-    end_executor_event = threading.Event()
-    actionQueue.executor = FakeExecutor(executor_started_event, end_executor_event)
-    before_start_result = actionQueue.result()
-
-    command = {
-      'commandId': 17,
-      'role' : "role",
-      'taskId' : "taskId",
-      'clusterName' : "clusterName",
-      'serviceName' : "serviceName",
-      'status' : 'IN_PROGRESS',
-      'hostname' : "localhost.localdomain",
-      'hostLevelParams': "hostLevelParams",
-      'clusterHostInfo': "clusterHostInfo",
-      'roleCommand': "roleCommand",
-      'configurations': "configurations",
-      'commandType': "EXECUTION_COMMAND",
-      'configurations':{'global' : {}}
-    }
-    actionQueue.put(command)
-
-    actionQueue.start()
-    executor_started_event.wait()
-    #print ("ii: " + pprint.pformat(actionQueue.commandInProgress))
-    in_progress_result = actionQueue.result()
-    end_executor_event.set()
-    actionQueue.stop()
-    actionQueue.join()
-    after_start_result = actionQueue.result()
-
-    self.assertEquals(len(before_start_result['componentStatus']), 0)
-    self.assertEquals(len(before_start_result['reports']), 0)
-
-    self.assertEquals(len(in_progress_result['componentStatus']), 0)
-    self.assertEquals(len(in_progress_result['reports']), 1)
-    self.assertEquals(in_progress_result['reports'][0]['status'], "IN_PROGRESS")
-    self.assertEquals(in_progress_result['reports'][0]['stdout'], "Dummy output")
-    self.assertEquals(in_progress_result['reports'][0]['exitCode'], 777)
-    self.assertEquals(in_progress_result['reports'][0]['stderr'], 'Dummy err')
-
-    self.assertEquals(len(after_start_result['componentStatus']), 0)
-    self.assertEquals(len(after_start_result['reports']), 1)
-    self.assertEquals(after_start_result['reports'][0]['status'], "COMPLETED")
-    self.assertEquals(after_start_result['reports'][0]['stdout'], "returned stdout")
-    self.assertEquals(after_start_result['reports'][0]['exitCode'], 0)
-    self.assertEquals(after_start_result['reports'][0]['stderr'], 'returned stderr')
-
-    #print("tmpout: " + pprint.pformat(actionQueue.tmpdir))
-    #print("before: " + pprint.pformat(before_start_result))
-    #print("in_progress: " + pprint.pformat(in_progress_result))
-    #print("after: " + pprint.pformat(after_start_result))
-
-
-class FakeExecutor():
-
-  def __init__(self, executor_started_event, end_executor_event):
-    self.executor_started_event = executor_started_event
-    self.end_executor_event = end_executor_event
-    pass
-
-  def runCommand(self, command, tmpoutpath, tmperrpath):
-    tmpout= open(tmpoutpath, 'w')
-    tmpout.write("Dummy output")
-    tmpout.flush()
-
-    tmperr= open(tmperrpath, 'w')
-    tmperr.write("Dummy err")
-    tmperr.flush()
-
-    self.executor_started_event.set()
-    self.end_executor_event.wait()
-    return {
-      "exitcode": 0,
-      "stdout": "returned stdout",
-      "stderr": "returned stderr"
-    }
diff --git a/branch-1.2/ambari-agent/src/test/python/TestAgentActions.py b/branch-1.2/ambari-agent/src/test/python/TestAgentActions.py
deleted file mode 100644
index 7880f99..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestAgentActions.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-import os, errno, getpass
-from ambari_agent.ActionQueue import ActionQueue
-from ambari_agent.AmbariConfig import AmbariConfig
-from ambari_agent.FileUtil import getFilePath
-from ambari_agent import shell
-from ambari_agent.shell import serverTracker
-import time
-
-class TestAgentActions(TestCase):
-#This feature is not yet implemented in ActionQueue
-  def test_installAndConfigAction(self):
-    pass
-#This feature is not yet implemented in ActionQueue
-  def test_startAndStopAction(self):
-    pass
diff --git a/branch-1.2/ambari-agent/src/test/python/TestCertGeneration.py b/branch-1.2/ambari-agent/src/test/python/TestCertGeneration.py
deleted file mode 100644
index 94bb9f6..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestCertGeneration.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-import os
-import tempfile
-import shutil
-from unittest import TestCase
-import ConfigParser
-import security
-from security import CertificateManager
-from ambari_agent import AmbariConfig
-
-class TestCertGeneration(TestCase):
-  def setUp(self):
-    self.tmpdir = tempfile.mkdtemp()
-    config = ConfigParser.RawConfigParser()
-    config.add_section('server')
-    config.set('server', 'hostname', 'example.com')
-    config.set('server', 'url_port', '777')
-    config.add_section('security')
-    config.set('security', 'keysdir', self.tmpdir)
-    config.set('security', 'server_crt', 'ca.crt')
-    self.certMan = CertificateManager(config)
-    
-  def test_generation(self):
-    self.certMan.genAgentCrtReq()
-    self.assertTrue(os.path.exists(self.certMan.getAgentKeyName()))
-    self.assertTrue(os.path.exists(self.certMan.getAgentCrtReqName()))
-  def tearDown(self):
-    shutil.rmtree(self.tmpdir)
-    
-
diff --git a/branch-1.2/ambari-agent/src/test/python/TestController.py b/branch-1.2/ambari-agent/src/test/python/TestController.py
deleted file mode 100644
index 51fc9d5..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestController.py
+++ /dev/null
@@ -1,334 +0,0 @@
-#!/usr/bin/env python2.6
-# -*- coding: utf-8 -*-
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import StringIO
-import unittest
-from ambari_agent import Controller
-import sys
-from mock.mock import patch, MagicMock, call
-
-
-class TestController(unittest.TestCase):
-
-  @patch("threading.Thread")
-  @patch("threading.Lock")
-  @patch("socket.gethostname")
-  @patch.object(Controller, "NetUtil")
-  def setUp(self, NetUtil_mock, hostnameMock, lockMock, threadMock):
-
-    Controller.logger = MagicMock()
-    hostnameMock.return_value = "test_hostname"
-    lockMock.return_value = MagicMock()
-    NetUtil_mock.return_value = MagicMock()
-
-    config = MagicMock()
-    config.get.return_value = "something"
-
-    self.controller = Controller.Controller(config)
-
-
-  @patch.object(Controller, "Heartbeat")
-  @patch.object(Controller, "Register")
-  @patch.object(Controller, "ActionQueue")
-  def test_start(self, ActionQueue_mock, Register_mock, Heartbeat_mock):
-
-    aq = MagicMock()
-    ActionQueue_mock.return_value = aq
-
-    self.controller.start()
-    self.assertTrue(ActionQueue_mock.called)
-    self.assertTrue(aq.start.called)
-    self.assertTrue(Register_mock.called)
-    self.assertTrue(Heartbeat_mock.called)
-
-  @patch("json.dumps")
-  @patch("json.loads")
-  @patch("time.sleep")
-  @patch("pprint.pformat")
-  @patch.object(Controller, "randint")
-  def test_registerWithServer(self, randintMock, pformatMock, sleepMock,
-                              loadsMock, dumpsMock):
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    register = MagicMock()
-    self.controller.register = register
-
-    sendRequest = MagicMock()
-    self.controller.sendRequest = sendRequest
-
-    dumpsMock.return_value = "request"
-    response = {"responseId":1,}
-    loadsMock.return_value = response
-
-    self.assertEqual(response, self.controller.registerWithServer())
-
-    response["statusCommands"] = "commands"
-    self.controller.addToQueue = MagicMock(name="addToQueue")
-
-    self.assertEqual(response, self.controller.registerWithServer())
-    self.controller.addToQueue.assert_called_with("commands")
-
-    calls = []
-
-    def side_effect(*args):
-      if len(calls) == 0:
-        calls.append(1)
-        raise Exception("test")
-      return "request"
-
-    del response["statusCommands"]
-
-    dumpsMock.side_effect = side_effect
-    self.assertEqual(response, self.controller.registerWithServer())
-    self.assertTrue(randintMock.called)
-    self.assertTrue(sleepMock.called)
-
-    sys.stdout = sys.__stdout__
-
-    self.controller.sendRequest = Controller.Controller.sendRequest
-    self.controller.addToQueue = Controller.Controller.addToQueue
-
-
-  @patch("pprint.pformat")
-  def test_addToQueue(self, pformatMock):
-
-    actionQueue = MagicMock()
-    self.controller.actionQueue = actionQueue
-    self.controller.addToQueue(None)
-    self.assertFalse(actionQueue.put.called)
-    self.controller.addToQueue("cmd")
-    self.assertTrue(actionQueue.put.called)
-
-
-  @patch("urllib2.build_opener")
-  @patch("urllib2.install_opener")
-  def test_run(self, installMock, buildMock):
-
-    buildMock.return_value = "opener"
-    registerAndHeartbeat  = MagicMock("registerAndHeartbeat")
-    calls = []
-    def side_effect():
-      if len(calls) == 0:
-        self.controller.repeatRegistration = True
-      calls.append(1)
-    registerAndHeartbeat.side_effect = side_effect
-    self.controller.registerAndHeartbeat = registerAndHeartbeat
-
-    # repeat registration
-    self.controller.run()
-
-    self.assertTrue(buildMock.called)
-    installMock.called_once_with("opener")
-    self.assertEqual(2, registerAndHeartbeat.call_count)
-
-    # one call, +1
-    registerAndHeartbeat.side_effect = None
-    self.controller.run()
-    self.assertEqual(3, registerAndHeartbeat.call_count)
-
-
-  def test_heartbeatWithServer(self, installMock, buildMock):
-
-    registerAndHeartbeat = MagicMock(name="registerAndHeartbeat")
-
-    self.controller.registerAndHeartbeat = registerAndHeartbeat
-    self.controller.run()
-    self.assertTrue(installMock.called)
-    self.assertTrue(buildMock.called)
-    self.controller.registerAndHeartbeat.assert_called_once_with()
-
-    calls = []
-    def switchBool():
-      if len(calls) == 0:
-        self.controller.repeatRegistration = True
-        calls.append(1)
-      self.controller.repeatRegistration = False
-
-    registerAndHeartbeat.side_effect = switchBool
-    self.controller.run()
-    self.assertEqual(2, registerAndHeartbeat.call_count)
-
-    self.controller.registerAndHeartbeat = \
-      Controller.Controller.registerAndHeartbeat
-
-
-  @patch("time.sleep")
-  def test_registerAndHeartbeat(self, sleepMock):
-
-    registerWithServer = MagicMock(name="registerWithServer")
-    registerWithServer.return_value = {"response":"resp"}
-    self.controller.registerWithServer = registerWithServer
-    heartbeatWithServer = MagicMock(name="heartbeatWithServer")
-    self.controller.heartbeatWithServer = heartbeatWithServer
-
-    self.controller.registerAndHeartbeat()
-    registerWithServer.assert_called_once_with()
-    heartbeatWithServer.assert_called_once_with()
-
-    self.controller.registerWithServer = \
-      Controller.Controller.registerWithServer
-    self.controller.heartbeatWithServer = \
-      Controller.Controller.registerWithServer
-
-
-  @patch.object(Controller, "ProcessHelper")
-  def test_restartAgent(self, ProcessHelper_mock):
-
-    self.controller.restartAgent()
-    self.assertTrue(ProcessHelper_mock.restartAgent.called)
-
-
-  @patch("urllib2.Request")
-  @patch.object(Controller, "security")
-  def test_sendRequest(self, security_mock, requestMock):
-
-    conMock = MagicMock()
-    conMock.request.return_value = "response"
-    security_mock.CachedHTTPSConnection.return_value = conMock
-    url = "url"
-    data = "data"
-    requestMock.return_value = "request"
-
-    self.controller.cachedconnect = None
-
-    self.assertEqual("response", self.controller.sendRequest(url, data))
-    security_mock.CachedHTTPSConnection.assert_called_once_with(
-      self.controller.config)
-    requestMock.called_once_with(url, data,
-      {'Content-Type': 'application/json'})
-
-
-  @patch("time.sleep")
-  @patch("json.loads")
-  @patch("json.dumps")
-  def test_heartbeatWithServer(self, dumpsMock, loadsMock, sleepMock):
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    hearbeat = MagicMock()
-    self.controller.heartbeat = hearbeat
-
-    dumpsMock.return_value = "data"
-
-    sendRequest = MagicMock(name="sendRequest")
-    self.controller.sendRequest = sendRequest
-
-    self.controller.responseId = 1
-    response = {"responseId":"2", "restartAgent":"false"}
-    loadsMock.return_value = response
-
-    def one_heartbeat(*args, **kwargs):
-      self.controller.DEBUG_STOP_HEARTBITTING = True
-      return "data"
-
-    sendRequest.side_effect = one_heartbeat
-
-    actionQueue = MagicMock()
-    actionQueue.isIdle.return_value = True
-
-    # one successful request, after stop
-    self.controller.actionQueue = actionQueue
-    self.controller.heartbeatWithServer()
-    self.assertTrue(sendRequest.called)
-
-    calls = []
-    def retry(*args, **kwargs):
-      if len(calls) == 0:
-        calls.append(1)
-        response["responseId"] = "3"
-        raise Exception()
-      if len(calls) > 0:
-        self.controller.DEBUG_STOP_HEARTBITTING = True
-      return "data"
-
-    # exception, retry, successful and stop
-    sendRequest.side_effect = retry
-    self.controller.DEBUG_STOP_HEARTBITTING = False
-    self.controller.heartbeatWithServer()
-
-    self.assertEqual(1, self.controller.DEBUG_SUCCESSFULL_HEARTBEATS)
-
-    # retry registration
-    response["registrationCommand"] = "true"
-    sendRequest.side_effect = one_heartbeat
-    self.controller.DEBUG_STOP_HEARTBITTING = False
-    self.controller.heartbeatWithServer()
-
-    self.assertTrue(self.controller.repeatRegistration)
-
-    # wrong responseId => restart
-    response = {"responseId":"2", "restartAgent":"false"}
-    loadsMock.return_value = response
-
-    restartAgent = MagicMock(name="restartAgent")
-    self.controller.restartAgent = restartAgent
-    self.controller.DEBUG_STOP_HEARTBITTING = False
-    self.controller.heartbeatWithServer()
-
-    restartAgent.assert_called_once_with()
-
-    # executionCommands, statusCommands
-    self.controller.responseId = 1
-    addToQueue = MagicMock(name="addToQueue")
-    self.controller.addToQueue = addToQueue
-    response["executionCommands"] = "executionCommands"
-    response["statusCommands"] = "statusCommands"
-    self.controller.DEBUG_STOP_HEARTBITTING = False
-    self.controller.heartbeatWithServer()
-
-    addToQueue.assert_has_calls([call("executionCommands"),
-                                 call("statusCommands")])
-
-    # restartAgent command
-    self.controller.responseId = 1
-    self.controller.DEBUG_STOP_HEARTBITTING = False
-    response["restartAgent"] = "true"
-    restartAgent = MagicMock(name="restartAgent")
-    self.controller.restartAgent = restartAgent
-    self.controller.heartbeatWithServer()
-
-    restartAgent.assert_called_once_with()
-
-    # actionQueue not idle
-    self.controller.responseId = 1
-    self.controller.DEBUG_STOP_HEARTBITTING = False
-    actionQueue.isIdle.return_value = False
-    response["restartAgent"] = "false"
-    self.controller.heartbeatWithServer()
-
-    sleepMock.assert_called_with(
-      self.controller.netutil.HEARTBEAT_NOT_IDDLE_INTERVAL_SEC)
-
-    sys.stdout = sys.__stdout__
-    self.controller.sendRequest = Controller.Controller.sendRequest
-    self.controller.sendRequest = Controller.Controller.addToQueue
-
-
-if __name__ == "__main__":
-
-  unittest.main(verbosity=2)
-
-
-
-
diff --git a/branch-1.2/ambari-agent/src/test/python/TestFileUtil.py b/branch-1.2/ambari-agent/src/test/python/TestFileUtil.py
deleted file mode 100644
index 53e55c5..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestFileUtil.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-from ambari_agent.FileUtil import writeFile, createStructure, deleteStructure
-import os, errno
-
-class TestFileUtil(TestCase):
-  def test_createStructure(self):
-    action = { 'clusterId' : 'abc', 'role' : 'hdfs', 'workDirComponent' : 'abc-hdfs' }
-    result = {}
-    result = createStructure(action, result)
-    self.assertEqual(result['exitCode'], 0, 'Create cluster structure failed.')
-
-#  def test_writeFile(self):
-    configFile = {
-      "data"       : "test",
-      "owner"      : os.getuid(),
-      "group"      : os.getgid() ,
-      "permission" : 0700,
-      "path"       : "/tmp/ambari_file_test/_file_write_test",
-      "umask"      : 022
-    }
-    action = { 
-      'clusterId' : 'abc', 
-      'role' : 'hdfs', 
-      'workDirComponent' : 'abc-hdfs',
-      'file' : configFile 
-    }
-    result = { }
-    result = writeFile(action, result)
-    self.assertEqual(result['exitCode'], 0, 'WriteFile test with uid/gid failed.')
-
-#  def test_deleteStructure(self):
-    result = { }
-    action = { 'clusterId' : 'abc', 'role' : 'hdfs', 'workDirComponent' : 'abc-hdfs' }
-    result = deleteStructure(action, result)
-    self.assertEqual(result['exitCode'], 0, 'Delete cluster structure failed.')
-
diff --git a/branch-1.2/ambari-agent/src/test/python/TestGrep.py b/branch-1.2/ambari-agent/src/test/python/TestGrep.py
deleted file mode 100644
index 1d162e6..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestGrep.py
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-from Grep import Grep
-import socket
-import os, sys
-import logging
-
-class TestGrep(TestCase):
-
-  logger = logging.getLogger()
-  string_good = None
-  string_bad = None
-  grep = Grep()
-
-  def setUp(self):
-    self.string_good = open('dummy_puppet_output_good.txt', 'r').read().replace("\n", os.linesep)
-    self.string_bad = open('dummy_puppet_output_error.txt', 'r').read().replace("\n", os.linesep)
-    pass
-
-  def test_grep_many_lines(self):
-    fragment = self.grep.grep(self.string_bad, "err", 1000, 1000)
-    desired = self.string_bad.strip()
-    self.assertEquals(fragment, desired, "Grep grep function should return all lines if there are less lines than n")
-
-
-  def test_grep_few_lines(self):
-    fragment = self.grep.grep(self.string_bad, "Err", 3, 3)
-    desired = """
-debug: /Schedule[never]: Skipping device resources because running on a host
-debug: Exec[command_good](provider=posix): Executing 'wget e432423423xample.com/badurl444111'
-debug: Executing 'wget e432423423xample.com/badurl444111'
-err: /Stage[main]//Exec[command_good]/returns: change from notrun to 0 failed: wget e432423423xample.com/badurl444111 returned 4 instead of one of [0] at /root/puppet-learn/2-bad.pp:5
-debug: /Schedule[weekly]: Skipping device resources because running on a host
-debug: /Schedule[puppet]: Skipping device resources because running on a host
-debug: Finishing transaction 70171639726240
-""".strip()
-    self.assertEquals(fragment, desired, "Grep grep function should return only last 3 lines of file")
-
-  def test_grep_no_result(self):
-    fragment = self.grep.grep(self.string_good, "Err", 3, 3)
-    desired = None
-    self.assertEquals(fragment, desired, 'Grep grep function should return None if result is not found')
-
-  def test_grep_empty_string(self):
-    fragment = self.grep.grep("", "Err", 1000, 1000)
-    desired = None
-    self.assertEquals(fragment, desired, 'Grep grep function should return None for empty string')
-
-  def test_grep_all(self):
-    fragment = self.grep.grep(self.string_bad, "Err", 35, 9)
-    desired = self.string_bad.strip()
-    self.assertEquals(fragment, desired, 'Grep grep function contains bug in index arithmetics')
-
-
-  def test_tail_many_lines(self):
-    fragment = self.grep.tail(self.string_good, 1000)
-    desired = self.string_good.strip()
-    self.assertEquals(fragment, desired, "Grep tail function should return all lines if there are less lines than n")
-
-  def test_tail_few_lines(self):
-    fragment = self.grep.tail(self.string_good, 3)
-    desired = """
-debug: Finishing transaction 70060456663980
-debug: Received report to process from ambari-dmi
-debug: Processing report from ambari-dmi with processor Puppet::Reports::Store
-""".strip()
-    self.assertEquals(fragment, desired, "Grep tail function should return only last 3 lines of file")
-
-  def test_tail_no_lines(self):
-    fragment = self.grep.tail("", 3)
-    desired = ''
-    self.assertEquals(fragment, desired, 'Grep tail function should return "" for empty string')
-
-  def test_tail_all(self):
-    fragment = self.grep.tail("", 47)
-    desired = ''
-    self.assertEquals(fragment, desired, 'Grep tail function contains bug in index arithmetics')
-
-  def test_filterMarkup(self):
-    string = """notice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Process_pkg[hadoop 64]/Package[hadoop-libhdfs]/ensure: created"""
-    desired="""notice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Process_pkg[hadoop 64]/Package[hadoop-libhdfs]/ensure: created"""
-    filtered = self.grep.filterMarkup(string)
-    #sys.stderr.write(filtered)
-    self.assertEquals(filtered, desired)
-
-  def tearDown(self):
-    pass
-
-
diff --git a/branch-1.2/ambari-agent/src/test/python/TestHardware.py b/branch-1.2/ambari-agent/src/test/python/TestHardware.py
deleted file mode 100644
index 89b4bdb..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestHardware.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-from ambari_agent.Hardware import Hardware
-
-class TestHardware(TestCase):
-  def test_build(self):
-    hardware = Hardware()
-    result = hardware.get()
-    osdisks = hardware.osdisks()
-    for dev_item in result['mounts']:
-      self.assertTrue(dev_item['available'] >= 0)
-      self.assertTrue(dev_item['used'] >= 0)
-      self.assertTrue(dev_item['percent'] != None)
-      self.assertTrue(dev_item['device'] != None)
-      self.assertTrue(dev_item['mountpoint'] != None)
-      self.assertTrue(dev_item['type'] != None)
-      self.assertTrue(dev_item['size'] > 0)
-
-    for os_disk_item in osdisks:
-      self.assertTrue(os_disk_item['available'] >= 0)
-      self.assertTrue(os_disk_item['used'] >= 0)
-      self.assertTrue(os_disk_item['percent'] != None)
-      self.assertTrue(os_disk_item['device'] != None)
-      self.assertTrue(os_disk_item['mountpoint'] != None)
-      self.assertTrue(os_disk_item['type'] != None)
-      self.assertTrue(os_disk_item['size'] > 0)
-
-    self.assertTrue(len(result['mounts']) == len(osdisks))
-    
diff --git a/branch-1.2/ambari-agent/src/test/python/TestHeartbeat.py b/branch-1.2/ambari-agent/src/test/python/TestHeartbeat.py
deleted file mode 100644
index 8050674..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestHeartbeat.py
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-from ambari_agent.Heartbeat import Heartbeat
-from ambari_agent.ActionQueue import ActionQueue
-from ambari_agent.LiveStatus import LiveStatus
-from ambari_agent import AmbariConfig
-import socket
-import os
-import time
-
-class TestHeartbeat(TestCase):
-
-  def test_build(self):
-    actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig())
-    heartbeat = Heartbeat(actionQueue)
-    result = heartbeat.build(100)
-    print "Heartbeat: " + str(result)
-    self.assertEquals(result['hostname'] != '', True, "hostname should not be empty")
-    self.assertEquals(result['responseId'], 100)
-    self.assertEquals(result['componentStatus'] is not None, True, "Heartbeat should contain componentStatus")
-    self.assertEquals(result['reports'] is not None, True, "Heartbeat should contain reports")
-    self.assertEquals(result['timestamp'] >= 1353679373880L, True)
-    self.assertEquals(len(result['nodeStatus']), 2)
-    self.assertEquals(result['nodeStatus']['cause'], "NONE")
-    self.assertEquals(result['nodeStatus']['status'], "HEALTHY")
-    # result may or may NOT have an agentEnv structure in it
-    self.assertEquals((len(result) is 6) or (len(result) is 7), True)
-    self.assertEquals(not heartbeat.reports, True, "Heartbeat should not contain task in progress")
-
-
-  def test_heartbeat_with_status(self):
-    actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig())
-    heartbeat = Heartbeat(actionQueue)
-    statusCommand = {
-      "serviceName" : 'HDFS',
-      "commandType" : "STATUS_COMMAND",
-      "clusterName" : "",
-      "componentName" : "DATANODE",
-      'configurations':{'global' : {}}
-    }
-    actionQueue.put(statusCommand)
-    actionQueue.start()
-    time.sleep(0.1)
-    actionQueue.stop()
-    actionQueue.join()
-    result = heartbeat.build(101)
-    self.assertEquals(len(result['componentStatus']) > 0, True, 'Heartbeat should contain status of HDFS components')
-
-  def test_heartbeat_with_status_multiple(self):
-    actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig())
-    actionQueue.IDLE_SLEEP_TIME = 0.01
-    heartbeat = Heartbeat(actionQueue)
-    actionQueue.start()
-    max_number_of_status_entries = 0
-    for i in range(1,5):
-      statusCommand = {
-        "serviceName" : 'HDFS',
-        "commandType" : "STATUS_COMMAND",
-        "clusterName" : "",
-        "componentName" : "DATANODE",
-        'configurations':{'global' : {}}
-      }
-      actionQueue.put(statusCommand)
-      time.sleep(0.1)
-      result = heartbeat.build(101)
-      number_of_status_entries = len(result['componentStatus'])
-#      print "Heartbeat with status: " + str(result) + " XXX " + str(number_of_status_entries)
-      if max_number_of_status_entries < number_of_status_entries:
-        max_number_of_status_entries = number_of_status_entries
-    actionQueue.stop()
-    actionQueue.join()
-
-    NUMBER_OF_COMPONENTS = 1
-    self.assertEquals(max_number_of_status_entries == NUMBER_OF_COMPONENTS, True)
-
-  def test_heartbeat_with_task_in_progress(self):
-    actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig())
-    actionQueue.commandInProgress= {
-      'role' : "role",
-      'actionId' : "actionId",
-      'taskId' : "taskId",
-      'stdout' : "stdout",
-      'clusterName' : "clusterName",
-      'stderr' : 'none',
-      'exitCode' : 777,
-      'serviceName' : "serviceName",
-      'status' : 'IN_PROGRESS',
-      'configurations':{'global' : {}}
-    }
-    heartbeat = Heartbeat(actionQueue)
-    result = heartbeat.build(100)
-    #print "Heartbeat: " + str(result)
-    self.assertEquals(len(result['reports']), 1)
-    self.assertEquals(result['reports'][0]['role'], "role")
-    self.assertEquals(result['reports'][0]['actionId'], "actionId")
-    self.assertEquals(result['reports'][0]['taskId'], "taskId")
-    self.assertEquals(result['reports'][0]['stdout'], "...")
-    self.assertEquals(result['reports'][0]['clusterName'], "clusterName")
-    self.assertEquals(result['reports'][0]['stderr'], "...")
-    self.assertEquals(result['reports'][0]['exitCode'], 777)
-    self.assertEquals(result['reports'][0]['serviceName'], "serviceName")
-    self.assertEquals(result['reports'][0]['status'], "IN_PROGRESS")
-    pass
diff --git a/branch-1.2/ambari-agent/src/test/python/TestHostname.py b/branch-1.2/ambari-agent/src/test/python/TestHostname.py
deleted file mode 100644
index 3198905..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestHostname.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-import ambari_agent.hostname as hostname
-import ambari_agent.AmbariConfig as AmbariConfig
-import socket
-import tempfile
-import shutil
-import os, pprint, json,stat
-
-class TestHostname(TestCase):
-
-  def test_hostname(self):
-    self.assertEquals(hostname.hostname(), socket.getfqdn(), 
-                      "hostname should equal the socket-based hostname")
-    pass
-
-  def test_hostname_override(self):
-    fd = tempfile.mkstemp(text=True)
-    tmpname = fd[1]
-    os.close(fd[0])
-    os.chmod(tmpname, os.stat(tmpname).st_mode | stat.S_IXUSR)
-
-    tmpfile = file(tmpname, "w+")
-
-    config = AmbariConfig.config
-    try:
-      tmpfile.write("#!/bin/sh\n\necho 'test.example.com'")
-      tmpfile.close()
-
-      config.set('agent', 'hostname_script', tmpname)
-
-      self.assertEquals(hostname.hostname(), 'test.example.com', "expected hostname 'test.example.com'")
-    finally:
-      os.remove(tmpname)
-      config.remove_option('agent', 'hostname_script')
-
-    pass
-
-
-
diff --git a/branch-1.2/ambari-agent/src/test/python/TestLiveStatus.py b/branch-1.2/ambari-agent/src/test/python/TestLiveStatus.py
deleted file mode 100644
index e6eb2aa..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestLiveStatus.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-from ambari_agent.LiveStatus import LiveStatus
-from ambari_agent import AmbariConfig
-import socket
-import os
-
-class TestLiveStatus(TestCase):
-  def test_build(self):
-    for component in LiveStatus.COMPONENTS:
-      livestatus = LiveStatus('', component['serviceName'], component['componentName'], {})
-      result = livestatus.build()
-      print "LiveStatus of {0}: {1}".format(component['serviceName'], str(result))
-      self.assertEquals(len(result) > 0, True, 'Livestatus should not be empty')
-  
diff --git a/branch-1.2/ambari-agent/src/test/python/TestManifestGenerator.py b/branch-1.2/ambari-agent/src/test/python/TestManifestGenerator.py
deleted file mode 100644
index 514de24..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestManifestGenerator.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-import os
-
-from unittest import TestCase
-from ambari_agent import manifestGenerator
-import ambari_agent.AmbariConfig
-import tempfile
-import json
-import shutil
-from ambari_agent.AmbariConfig import AmbariConfig
-from mock.mock import patch, MagicMock, call
-
-
-class TestManifestGenerator(TestCase):
-
-  def setUp(self):
-    self.dir = tempfile.mkdtemp()
-    self.config = AmbariConfig()
-    jsonCommand = file('../../main/python/ambari_agent/test.json').read()
-    self.parsedJson = json.loads(jsonCommand)
-
-    pass
-
-  def tearDown(self):
-    shutil.rmtree(self.dir)
-    pass
-
-
-  def testWriteImports(self):
-    tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1]
-    print tmpFileName
-    tmpFile = file(tmpFileName, 'r+')
-
-    manifestGenerator.writeImports(tmpFile, '../../main/puppet/modules', self.config.getImports())
-    tmpFile.seek(0)
-    print tmpFile.read()
-    tmpFile.close()
-
-
-    pass
-
-  @patch.object(manifestGenerator, 'writeImports')
-  @patch.object(manifestGenerator, 'writeNodes')
-  @patch.object(manifestGenerator, 'writeParams')
-  @patch.object(manifestGenerator, 'writeTasks')
-  def testGenerateManifest(self, writeTasksMock, writeParamsMock, writeNodesMock, writeImportsMock):
-    tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1]
-    manifestGenerator.generateManifest(self.parsedJson, tmpFileName, '../../main/puppet/modules', self.config.getConfig())
-
-    self.assertTrue(writeParamsMock.called)
-    self.assertTrue(writeNodesMock.called)
-    self.assertTrue(writeImportsMock.called)
-    self.assertTrue(writeTasksMock.called)
-
-    print file(tmpFileName).read()
-
-    pass
\ No newline at end of file
diff --git a/branch-1.2/ambari-agent/src/test/python/TestNetUtil.py b/branch-1.2/ambari-agent/src/test/python/TestNetUtil.py
deleted file mode 100644
index e1fe02d..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestNetUtil.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from ambari_agent import NetUtil
-from mock.mock import MagicMock, patch
-import unittest
-
-class TestNetUtil(unittest.TestCase):
-
-  @patch("urlparse.urlparse")
-  @patch("httplib.HTTPSConnection")
-  def test_checkURL(self, httpsConMock, parseMock):
-
-    NetUtil.logger = MagicMock()
-    parseMock.return_value = [1, 2]
-    ca_connection = MagicMock()
-    response = MagicMock()
-    response.status = 200
-    ca_connection.getresponse.return_value = response
-    httpsConMock.return_value = ca_connection
-
-    # test 200
-    netutil = NetUtil.NetUtil()
-    self.assertTrue(netutil.checkURL("url"))
-
-    # test fail
-    response.status = 404
-    self.assertFalse(netutil.checkURL("url"))
-
-    # test Exception
-    response.status = 200
-    httpsConMock.side_effect = Exception("test")
-    self.assertFalse(netutil.checkURL("url"))
-
-
-  @patch("time.sleep")
-  def test_try_to_connect(self, sleepMock):
-
-    netutil = NetUtil.NetUtil()
-    checkURL = MagicMock(name="checkURL")
-    checkURL.return_value = True
-    netutil.checkURL = checkURL
-    l = MagicMock()
-
-    # one successful get
-    self.assertEqual(0, netutil.try_to_connect("url", 10))
-
-    # got successful after N retries
-    gets = [True, False, False]
-    def side_effect(*args):
-      return gets.pop()
-    checkURL.side_effect = side_effect
-    self.assertEqual(2, netutil.try_to_connect("url", 10))
-
-    # max retries
-    checkURL.side_effect = None
-    checkURL.return_value = False
-    self.assertEqual(5, netutil.try_to_connect("url", 5))
-
-
diff --git a/branch-1.2/ambari-agent/src/test/python/TestProcessHelper.py b/branch-1.2/ambari-agent/src/test/python/TestProcessHelper.py
deleted file mode 100644
index c7a4261..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestProcessHelper.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python2.6
-# -*- coding: utf-8 -*-
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-import tempfile
-import unittest
-from mock.mock import patch, MagicMock
-from ambari_agent import ProcessHelper
-
-
-class TestProcessHelper(unittest.TestCase):
-
-  @patch.object(ProcessHelper, "getTempFiles")
-  def test_clean(self, getTempFilesMock):
-
-    tf1 = tempfile.NamedTemporaryFile(delete=False)
-    tf2 = tempfile.NamedTemporaryFile(delete=False)
-    tf3 = tempfile.NamedTemporaryFile(delete=False)
-
-    getTempFilesMock.return_value = [tf2.name, tf3.name]
-    ProcessHelper.pidfile = tf1.name
-    ProcessHelper.logger = MagicMock()
-
-    ProcessHelper._clean()
-
-    self.assertFalse(os.path.exists(tf1.name))
-    self.assertFalse(os.path.exists(tf2.name))
-    self.assertFalse(os.path.exists(tf3.name))
-
-
-  @patch("os._exit")
-  @patch.object(ProcessHelper, "_clean")
-  def test_stopAgent(self, _clean_mock, exitMock):
-
-    ProcessHelper.stopAgent()
-    self.assertTrue(_clean_mock.called)
-    self.assertTrue(exitMock.called)
-
-
-  @patch("os.execvp")
-  @patch.object(ProcessHelper, "_clean")
-  def test_restartAgent(self, _clean_mock, execMock):
-
-    ProcessHelper.logger = MagicMock()
-    ProcessHelper.restartAgent()
-
-    self.assertTrue(_clean_mock.called)
-    self.assertTrue(execMock.called)
-    self.assertEqual(2, len(execMock.call_args_list[0]))
-
diff --git a/branch-1.2/ambari-agent/src/test/python/TestPuppetExecutor.py b/branch-1.2/ambari-agent/src/test/python/TestPuppetExecutor.py
deleted file mode 100644
index dcfe17d..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestPuppetExecutor.py
+++ /dev/null
@@ -1,177 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-from puppetExecutor import puppetExecutor
-from Grep import Grep
-from pprint import pformat
-import socket, threading, tempfile
-import os, time
-import sys
-from AmbariConfig import AmbariConfig
-from threading import Thread
-
-grep = Grep()
-
-class TestPuppetExecutor(TestCase):
-
-
-  def test_build(self):
-    puppetexecutor = puppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
-    command = puppetexecutor.puppetCommand("site.pp")
-    self.assertEquals("puppet", command[0], "puppet binary wrong")
-    self.assertEquals("apply", command[1], "local apply called")
-    self.assertEquals("--confdir=/tmp", command[2],"conf dir tmp")
-    self.assertEquals("--detailed-exitcodes", command[3], "make sure output \
-    correct")
-
-  def test_condense_bad2(self):
-    puppetexecutor = puppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
-    puppetexecutor.ERROR_LAST_LINES_BEFORE = 2
-    puppetexecutor.ERROR_LAST_LINES_AFTER = 3
-    string_err = open('dummy_puppet_output_error2.txt', 'r').read().replace("\n", os.linesep)
-    result = puppetexecutor.condenseOutput(string_err, '', 1)
-    stripped_string = string_err.strip()
-    lines = stripped_string.splitlines(True)
-    d = lines[1:6]
-    result_check = True
-    for l in d:
-      result_check &= grep.filterMarkup(l) in result
-    self.assertEquals(result_check, True, "Failed to condence fail log")
-    self.assertEquals(len(result.splitlines(True)), 6, "Failed to condence fail log")
-
-  def test_condense_bad3(self):
-    puppetexecutor = puppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
-    string_err = open('dummy_puppet_output_error3.txt', 'r').read().replace("\n", os.linesep)
-    result = puppetexecutor.condenseOutput(string_err, '', 1)
-    stripped_string = string_err.strip()
-    lines = stripped_string.splitlines(True)
-    #sys.stderr.write(result)
-    d = lines[0:31]
-    result_check = True
-    for l in d:
-      result_check &= grep.filterMarkup(l) in result
-    self.assertEquals(result_check, True, "Failed to condence fail log")
-    self.assertEquals(len(result.splitlines(True)), 33, "Failed to condence fail log")
-
-  def test_condense_good(self):
-    puppetexecutor = puppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
-    puppetexecutor.OUTPUT_LAST_LINES = 2
-    string_good = open('dummy_puppet_output_good.txt', 'r').read().replace("\n", os.linesep)
-    result = puppetexecutor.condenseOutput(string_good, puppetExecutor.NO_ERROR, 0)
-    stripped_string = string_good.strip()
-    lines = stripped_string.splitlines(True)
-    result_check = lines[45].strip() in result and lines[46].strip() in result
-    self.assertEquals(result_check, True, "Failed to condence output log")
-    self.assertEquals(len(result.splitlines(True)), 2, "Failed to condence output log")
-
-  def test_watchdog_1(self):
-    """
-    Tests whether watchdog works
-    """
-    subproc_mock = self.Subprocess_mockup()
-    executor_mock = self.PuppetExecutor_mock("/home/centos/ambari_repo_info/ambari-agent/src/main/puppet/",
-      "/usr/",
-      "/root/workspace/puppet-install/facter-1.6.10/",
-      "/tmp", AmbariConfig().getConfig(), subproc_mock)
-    _, tmpoutfile = tempfile.mkstemp()
-    _, tmperrfile = tempfile.mkstemp()
-    result = {  }
-    puppetEnv = { "RUBYLIB" : ""}
-    executor_mock.PUPPET_TIMEOUT_SECONDS = 0.1
-    subproc_mock.returncode = None
-    thread = Thread(target =  executor_mock.runPuppetFile, args = ("fake_puppetFile", result, puppetEnv, tmpoutfile, tmperrfile))
-    thread.start()
-    time.sleep(0.1)
-    subproc_mock.finished_event.wait()
-    self.assertEquals(subproc_mock.was_terminated, True, "Subprocess should be terminated due to timeout")
-
-
-  def test_watchdog_2(self):
-    """
-    Tries to catch false positive watchdog invocations
-    """
-    subproc_mock = self.Subprocess_mockup()
-    executor_mock = self.PuppetExecutor_mock("/home/centos/ambari_repo_info/ambari-agent/src/main/puppet/",
-    "/usr/",
-    "/root/workspace/puppet-install/facter-1.6.10/",
-    "/tmp", AmbariConfig().getConfig(), subproc_mock)
-    _, tmpoutfile = tempfile.mkstemp()
-    _, tmperrfile = tempfile.mkstemp()
-    result = {  }
-    puppetEnv = { "RUBYLIB" : ""}
-    executor_mock.PUPPET_TIMEOUT_SECONDS = 5
-    subproc_mock.returncode = 0
-    thread = Thread(target =  executor_mock.runPuppetFile, args = ("fake_puppetFile", result, puppetEnv, tmpoutfile, tmperrfile))
-    thread.start()
-    time.sleep(0.1)
-    subproc_mock.should_finish_event.set()
-    subproc_mock.finished_event.wait()
-    print(subproc_mock.was_terminated)
-    self.assertEquals(subproc_mock.was_terminated, False, "Subprocess should not be terminated before timeout")
-    self.assertEquals(subproc_mock.returncode, 0, "Subprocess should not be terminated before timeout")
-
-
-  class  PuppetExecutor_mock(puppetExecutor):
-
-
-
-    def __init__(self, puppetModule, puppetInstall, facterInstall, tmpDir, config, subprocess_mockup):
-      self.subprocess_mockup = subprocess_mockup
-      puppetExecutor.__init__(self, puppetModule, puppetInstall, facterInstall, tmpDir, config)
-      pass
-
-    def lauch_puppet_subprocess(self, puppetcommand, tmpout, tmperr, puppetEnv):
-      self.subprocess_mockup.tmpout = tmpout
-      self.subprocess_mockup.tmperr = tmperr
-      return self.subprocess_mockup
-
-    def runShellKillPgrp(self, puppet):
-      puppet.terminate()  # note: In real code, subprocess.terminate() is not called
-      pass
-
-  class Subprocess_mockup():
-
-    returncode = 0
-
-    started_event = threading.Event()
-    should_finish_event = threading.Event()
-    finished_event = threading.Event()
-    was_terminated = False
-    tmpout = None
-    tmperr = None
-    pid=-1
-
-    def communicate(self):
-      self.started_event.set()
-      self.tmpout.write("Dummy output")
-      self.tmpout.flush()
-
-      self.tmperr.write("Dummy err")
-      self.tmperr.flush()
-      self.should_finish_event.wait()
-      self.finished_event.set()
-      pass
-
-    def terminate(self):
-      self.was_terminated = True
-      self.returncode = 17
-      self.should_finish_event.set()
-
diff --git a/branch-1.2/ambari-agent/src/test/python/TestPuppetExecutorManually.py b/branch-1.2/ambari-agent/src/test/python/TestPuppetExecutorManually.py
deleted file mode 100644
index 4537989..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestPuppetExecutorManually.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-from puppetExecutor import puppetExecutor
-from pprint import pformat
-import socket
-import os
-import sys
-import logging
-from AmbariConfig import AmbariConfig
-import tempfile
-
-FILEPATH="runme.pp"
-logger = logging.getLogger()
-
-class TestPuppetExecutor(TestCase):
-
-  def test_run(self):
-    """
-    Used to run arbitrary puppet manifest. Test tries to find puppet manifest 'runme.pp' and runs it.
-    Test does not make any assertions
-    """
-    if not os.path.isfile(FILEPATH):
-      return
-
-    logger.info("***** RUNNING " + FILEPATH + " *****")
-    cwd = os.getcwd()
-    puppetexecutor = puppetExecutor(cwd, "/x", "/y", "/tmp", AmbariConfig().getConfig())
-    result = {}
-    puppetEnv = os.environ
-    _, tmpoutfile = tempfile.mkstemp()
-    _, tmperrfile = tempfile.mkstemp()
-    result = puppetexecutor.runPuppetFile(FILEPATH, result, puppetEnv, tmpoutfile, tmperrfile)
-    logger.info("*** Puppet output: " + str(result['stdout']))
-    logger.info("*** Puppet errors: " + str(result['stderr']))
-    logger.info("*** Puppet retcode: " + str(result['exitcode']))
-    logger.info("****** DONE *****")
-
-
diff --git a/branch-1.2/ambari-agent/src/test/python/TestRegistration.py b/branch-1.2/ambari-agent/src/test/python/TestRegistration.py
deleted file mode 100644
index 76d638b..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestRegistration.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-from ambari_agent.Register import Register
-import socket
-import os, pprint, json
-
-class TestRegistration(TestCase):
-
-  def test_registration_build(self):
-    register = Register()
-    data = register.build(1)
-    #print ("Register: " + pprint.pformat(data))
-    self.assertEquals(len(data['hardwareProfile']) > 0, True, "hardwareProfile should contain content")
-    self.assertEquals(data['hostname'] != "", True, "hostname should not be empty")
-    self.assertEquals(data['publicHostname'] != "", True, "publicHostname should not be empty")
-    self.assertEquals(data['responseId'], 1)
-    self.assertEquals(data['timestamp'] > 1353678475465L, True, "timestamp should not be empty")
-    self.assertEquals(len(data['agentEnv']) > 0, True, "agentEnv should not be empty")
-    self.assertEquals(len(data), 6)
diff --git a/branch-1.2/ambari-agent/src/test/python/TestRepoInstaller.py b/branch-1.2/ambari-agent/src/test/python/TestRepoInstaller.py
deleted file mode 100644
index 55e1b29..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestRepoInstaller.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-from ambari_agent.RepoInstaller import RepoInstaller
-import tempfile
-import json
-import shutil
-from ambari_agent.AmbariConfig import AmbariConfig
-from mock.mock import patch, MagicMock, call
-
-class TestRepoInstaller(TestCase):
-
-  def setUp(self):
-    self.dir = tempfile.mkdtemp()
-    jsonCommand = file('../../main/python/ambari_agent/test.json').read()
-    self.parsedJson= json.loads(jsonCommand)
-    self.config = AmbariConfig().getConfig()
-    self.repoInstaller = RepoInstaller(self.parsedJson, self.dir, '../../main/puppet/modules', 1, self.config)
-
-    pass
-
-  def tearDown(self):
-    shutil.rmtree(self.dir)
-    pass
-
-
-  @patch.object(RepoInstaller, 'prepareReposInfo')
-  @patch.object(RepoInstaller, 'generateFiles')
-  def testInstallRepos(self, generateFilesMock, prepareReposInfoMock):
-    result = self.repoInstaller.installRepos()
-    self.assertTrue(prepareReposInfoMock.called)
-    self.assertTrue(generateFilesMock.called)
-    print('installRepos result: ' + result.__str__())
-    pass
diff --git a/branch-1.2/ambari-agent/src/test/python/TestServerStatus.py b/branch-1.2/ambari-agent/src/test/python/TestServerStatus.py
deleted file mode 100644
index 8d09037..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestServerStatus.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-from ambari_agent.ServerStatus import ServerStatus
-
-class TestServerStatus(TestCase):
-  def test_build(self):
-    serverStatus = ServerStatus()
-    result = serverStatus.build()
-    self.assertEqual(result, [], 'List of running servers should be 0.')
-
diff --git a/branch-1.2/ambari-agent/src/test/python/TestStatusCheck.py b/branch-1.2/ambari-agent/src/test/python/TestStatusCheck.py
deleted file mode 100644
index 75360c1..0000000
--- a/branch-1.2/ambari-agent/src/test/python/TestStatusCheck.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-import tempfile
-import shutil
-import os
-from unittest import TestCase
-from ambari_agent.StatusCheck import StatusCheck
-import subprocess
-import signal
-from shell import shellRunner
-
-
-MAPPING_FILE_NAME='map.dict'
-
-COMPONENT_LIVE = 'LIVE_COMPONENT'
-COMPONENT_LIVE_PID = 'live_comp.pid'
-COMPONENT_LIVE_CMD='''
-while [ 1==1 ]
-do
-   echo ok
-done
-'''
-
-COMPONENT_DEAD = 'DEAD_COMPONENT'
-COMPONENT_DEAD_PID = 'dead_comp.pid'
-DEAD_PID=0
-
-
-class TestStatusCheck(TestCase):
-
-  def setUp(self):
-
-    self.tmpdir = tempfile.mkdtemp()
-    self.serviceToPidDict = {
-      COMPONENT_LIVE : COMPONENT_LIVE_PID,
-      COMPONENT_DEAD : COMPONENT_DEAD_PID
-    }
-
-    self.pidPathesVars = [
-      {'var' : '',
-      'defaultValue' : self.tmpdir}
-    ]
-
-    self.sh = shellRunner()
-    
-    #Launch eternal process
-    p = subprocess.Popen([COMPONENT_LIVE_CMD], stdout=subprocess.PIPE, 
-                         stderr=subprocess.PIPE, shell=True, close_fds=True)
-
-
-    #Write pid of live process to file
-    live_pid_file = open(self.tmpdir + os.sep + COMPONENT_LIVE_PID, 'w')
-    self.live_pid = p.pid
-    live_pid_file.write(str(self.live_pid))
-    live_pid_file.close()
-
-    #Write pid of dead process to file
-    dead_pid_file = open(self.tmpdir + os.sep + COMPONENT_DEAD_PID, 'w')
-    dead_pid_file.write(str(DEAD_PID))
-    dead_pid_file.close()
-
-    #Init status checker
-    self.statusCheck = StatusCheck(self.serviceToPidDict,self.pidPathesVars,{})
-
-  # Ensure that status checker return True for running process
-  def test_live(self):
-    status = self.statusCheck.getStatus(COMPONENT_LIVE)
-    self.assertEqual(status, True)
-
-  # Ensure that status checker return False for dead process
-  def test_dead(self):
-    status = self.statusCheck.getStatus(COMPONENT_DEAD)
-    self.assertEqual(status, False)
-
-  def tearDown(self):
-    os.kill(self.live_pid, signal.SIGKILL)
-    shutil.rmtree(self.tmpdir)
diff --git a/branch-1.2/ambari-agent/src/test/python/dummy_puppet_output_error.txt b/branch-1.2/ambari-agent/src/test/python/dummy_puppet_output_error.txt
deleted file mode 100644
index 5efa5af..0000000
--- a/branch-1.2/ambari-agent/src/test/python/dummy_puppet_output_error.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-debug: Creating default schedules
-debug: Puppet::Type::User::ProviderDirectoryservice: file /usr/bin/dscl does not exist
-debug: Puppet::Type::User::ProviderUser_role_add: file roledel does not exist
-debug: Puppet::Type::User::ProviderPw: file pw does not exist
-debug: Failed to load library 'ldap' for feature 'ldap'
-debug: Puppet::Type::User::ProviderLdap: feature ldap is missing
-debug: /File[/var/lib/puppet/state/graphs]: Autorequiring File[/var/lib/puppet/state]
-debug: /File[/var/lib/puppet/ssl/crl.pem]: Autorequiring File[/var/lib/puppet/ssl]
-debug: /File[/var/lib/puppet/ssl/private_keys]: Autorequiring File[/var/lib/puppet/ssl]
-debug: /File[/var/lib/puppet/state/resources.txt]: Autorequiring File[/var/lib/puppet/state]
-debug: /File[/var/lib/puppet/ssl/certificate_requests]: Autorequiring File[/var/lib/puppet/ssl]
-debug: /File[/var/lib/puppet/ssl/certs]: Autorequiring File[/var/lib/puppet/ssl]
-debug: /File[/var/lib/puppet/state/state.yaml]: Autorequiring File[/var/lib/puppet/state]
-debug: /File[/var/lib/puppet/client_data]: Autorequiring File[/var/lib/puppet]
-debug: /File[/var/lib/puppet/facts]: Autorequiring File[/var/lib/puppet]
-debug: /File[/var/lib/puppet/ssl]: Autorequiring File[/var/lib/puppet]
-debug: /File[/var/lib/puppet/state]: Autorequiring File[/var/lib/puppet]
-debug: /File[/var/lib/puppet/ssl/private]: Autorequiring File[/var/lib/puppet/ssl]
-debug: /File[/var/lib/puppet/state/last_run_report.yaml]: Autorequiring File[/var/lib/puppet/state]
-debug: /File[/var/lib/puppet/lib]: Autorequiring File[/var/lib/puppet]
-debug: /File[/var/lib/puppet/ssl/public_keys]: Autorequiring File[/var/lib/puppet/ssl]
-debug: /File[/var/lib/puppet/client_yaml]: Autorequiring File[/var/lib/puppet]
-debug: /File[/var/lib/puppet/ssl/certs/ca.pem]: Autorequiring File[/var/lib/puppet/ssl/certs]
-debug: /File[/var/lib/puppet/clientbucket]: Autorequiring File[/var/lib/puppet]
-debug: /File[/var/lib/puppet/state/last_run_summary.yaml]: Autorequiring File[/var/lib/puppet/state]
-debug: Finishing transaction 70171638648540
-debug: Loaded state in 0.00 seconds
-debug: Loaded state in 0.00 seconds
-info: Applying configuration version '1352127563'
-debug: /Schedule[daily]: Skipping device resources because running on a host
-debug: /Schedule[monthly]: Skipping device resources because running on a host
-debug: /Schedule[hourly]: Skipping device resources because running on a host
-debug: /Schedule[never]: Skipping device resources because running on a host
-debug: Exec[command_good](provider=posix): Executing 'wget e432423423xample.com/badurl444111'
-debug: Executing 'wget e432423423xample.com/badurl444111'
-err: /Stage[main]//Exec[command_good]/returns: change from notrun to 0 failed: wget e432423423xample.com/badurl444111 returned 4 instead of one of [0] at /root/puppet-learn/2-bad.pp:5
-debug: /Schedule[weekly]: Skipping device resources because running on a host
-debug: /Schedule[puppet]: Skipping device resources because running on a host
-debug: Finishing transaction 70171639726240
-debug: Storing state
-debug: Stored state in 0.01 seconds
-notice: Finished catalog run in 0.23 seconds
-debug: Finishing transaction 70171638871060
-debug: Received report to process from ambari-dmi
-debug: Processing report from ambari-dmi with processor Puppet::Reports::Store
diff --git a/branch-1.2/ambari-agent/src/test/python/dummy_puppet_output_error2.txt b/branch-1.2/ambari-agent/src/test/python/dummy_puppet_output_error2.txt
deleted file mode 100644
index 19ae347..0000000
--- a/branch-1.2/ambari-agent/src/test/python/dummy_puppet_output_error2.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:57 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 7 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:57 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 7 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:57 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 7 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/returns: Bad connection to FS. command aborted. exception: Call to dev.hortonworks.com/10.0.2.15:8020 failed on connection exception: java.net.ConnectException: Connection refused
-err: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/returns: change from notrun to 0 failed: hadoop --config /etc/hadoop/conf fs -mkdir /mapred returned 255 instead of one of [0] at /var/lib/ambari-agent/puppet/modules/hdp/manifests/init.pp:267
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -mkdir /mapred::end]: Dependency Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred] has failures: true
-warning: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -mkdir /mapred::end]: Skipping because of failed dependencies
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -chown mapred /mapred::begin]: Dependency Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred] has failures: true
-warning: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -chown mapred /mapred::begin]: Skipping because of failed dependencies
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]: Dependency Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred] has failures: true
-warning: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]: Skipping because of failed dependencies
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -chown mapred /mapred::end]: Dependency Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred] has failures: true
-warning: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -chown mapred /mapred::end]: Skipping because of failed dependencies
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:50 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 0 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:51 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 1 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:52 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 2 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:53 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 3 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:54 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 4 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:55 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 5 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:56 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 6 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:57 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 7 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:58 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 8 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: 12/11/10 08:57:59 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 9 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: Bad connection to FS. command aborted. exception: Call to dev.hortonworks.com/10.0.2.15:8020 failed on connection exception: java.net.ConnectException: Connection refused
-err: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: change from notrun to 0 failed: hadoop --config /etc/hadoop/conf fs -mkdir /tmp returned 255 instead of one of [0] at /var/lib/ambari-agent/puppet/modules/hdp/manifests/init.pp:267
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -mkdir /tmp::end]: Dependency Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp] has failures: true
-warning: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -mkdir /tmp::end]: Skipping because of failed dependencies
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system::begin]: Dependency Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred] has failures: true
-warning: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system::begin]: Skipping because of failed dependencies
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]: Dependency Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred] has failures: true
-warning: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]: Skipping because of failed dependencies
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system::end]: Dependency Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred] has failures: true
-warning: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Anchor[hdp::exec::hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system::end]: Skipping because of failed dependencies
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: 12/11/10 08:58:14 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 0 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: 12/11/10 08:58:15 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 1 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: 12/11/10 08:58:16 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 2 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: 12/11/10 08:58:17 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 3 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: 12/11/10 08:58:18 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 4 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: 12/11/10 08:58:19 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 5 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
-notice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: 12/11/10 08:58:20 INFO ipc.Client: Retrying connect to server: dev.hortonworks.com/10.0.2.15:8020. Already tried 6 time(s); retry policy is RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1 SECONDS)
diff --git a/branch-1.2/ambari-agent/src/test/python/dummy_puppet_output_error3.txt b/branch-1.2/ambari-agent/src/test/python/dummy_puppet_output_error3.txt
deleted file mode 100644
index 06b6094..0000000
--- a/branch-1.2/ambari-agent/src/test/python/dummy_puppet_output_error3.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-ESC[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-plugins]/Hdp::Package[nagios-plugins]/Hdp:
-:Package::Process_pkg[nagios-plugins]/Package[nagios-plugins-1.4.9]/ensure: createdESC[0mESC[1;35merr: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-addons]/Hdp::Package[nagios-addons]/Hdp::Pack
-age::Process_pkg[nagios-addons]/Package[hdp_mon_nagios_addons]/ensure: change from absent to present failed: Execution of '/usr/bin/yum -d 0 -e 0 -y install hdp_mon_nagios_addons' returned 1:
-Error Downloading Packages:
-  hdp_mon_nagios_addons-0.0.2.15-1.noarch: failure: noarch/hdp_mon/hdp_mon_nagios_addons-0.0.2.15-1.noarch.rpm from AMBARI.dev-1.x: [Errno 256] No more mirrors to try.
-ESC[0m
-ESC[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-addons]/Hdp::Package[nagios-addons]/Hdp::Package::Process_pkg[nagios-addons]/Anchor[hdp::package::nagios-addons::end]: Dependency Package[hdp_mon_nagios_addons] has failures:
-trueESC[0mESC[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-addons]/Hdp::Package[nagios-addons]/Hdp::
-Package::Process_pkg[nagios-addons]/Anchor[hdp::package::nagios-addons::end]: Skipping because of failed dependenciesESC[0mESC[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Anchor[hdp-nagios::server::packages::end]: Dependency Package[hdp_mon
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::begin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::begin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::begin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::begin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::begin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::begin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Anchor[hdp::exec::htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin::end]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::begin]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Exec[monitor webserver restart]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Dependency Package[hdp_mon_nagios_addons] has failures: true\u001B[0m
-\u001B[0;33mwarning: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver restart]/Anchor[hdp::exec::monitor webserver restart::end]: Skipping because of failed dependencies\u001B[0m
-\u001B[0;36mnotice: Finished catalog run in 49.63
\ No newline at end of file
diff --git a/branch-1.2/ambari-agent/src/test/python/dummy_puppet_output_good.txt b/branch-1.2/ambari-agent/src/test/python/dummy_puppet_output_good.txt
deleted file mode 100644
index c6ecbbc..0000000
--- a/branch-1.2/ambari-agent/src/test/python/dummy_puppet_output_good.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-debug: Creating default schedules
-debug: Puppet::Type::User::ProviderDirectoryservice: file /usr/bin/dscl does not exist
-debug: Puppet::Type::User::ProviderUser_role_add: file roledel does not exist
-debug: Puppet::Type::User::ProviderPw: file pw does not exist
-debug: Failed to load library 'ldap' for feature 'ldap'
-debug: Puppet::Type::User::ProviderLdap: feature ldap is missing
-debug: /File[/var/lib/puppet/ssl/certs/ca.pem]: Autorequiring File[/var/lib/puppet/ssl/certs]
-debug: /File[/var/lib/puppet/ssl/public_keys]: Autorequiring File[/var/lib/puppet/ssl]
-debug: /File[/var/lib/puppet/ssl/crl.pem]: Autorequiring File[/var/lib/puppet/ssl]
-debug: /File[/var/lib/puppet/state/last_run_report.yaml]: Autorequiring File[/var/lib/puppet/state]
-debug: /File[/var/lib/puppet/ssl/certificate_requests]: Autorequiring File[/var/lib/puppet/ssl]
-debug: /File[/var/lib/puppet/state/last_run_summary.yaml]: Autorequiring File[/var/lib/puppet/state]
-debug: /File[/var/lib/puppet/client_data]: Autorequiring File[/var/lib/puppet]
-debug: /File[/var/lib/puppet/state]: Autorequiring File[/var/lib/puppet]
-debug: /File[/var/lib/puppet/ssl/private]: Autorequiring File[/var/lib/puppet/ssl]
-debug: /File[/var/lib/puppet/state/graphs]: Autorequiring File[/var/lib/puppet/state]
-debug: /File[/var/lib/puppet/ssl]: Autorequiring File[/var/lib/puppet]
-debug: /File[/var/lib/puppet/state/state.yaml]: Autorequiring File[/var/lib/puppet/state]
-debug: /File[/var/lib/puppet/client_yaml]: Autorequiring File[/var/lib/puppet]
-debug: /File[/var/lib/puppet/facts]: Autorequiring File[/var/lib/puppet]
-debug: /File[/var/lib/puppet/ssl/private_keys]: Autorequiring File[/var/lib/puppet/ssl]
-debug: /File[/var/lib/puppet/state/resources.txt]: Autorequiring File[/var/lib/puppet/state]
-debug: /File[/var/lib/puppet/clientbucket]: Autorequiring File[/var/lib/puppet]
-debug: /File[/var/lib/puppet/lib]: Autorequiring File[/var/lib/puppet]
-debug: /File[/var/lib/puppet/ssl/certs]: Autorequiring File[/var/lib/puppet/ssl]
-debug: Finishing transaction 70060456464420
-debug: Loaded state in 0.00 seconds
-debug: Loaded state in 0.00 seconds
-info: Applying configuration version '1352127399'
-debug: /Schedule[daily]: Skipping device resources because running on a host
-debug: /Schedule[monthly]: Skipping device resources because running on a host
-debug: /Schedule[hourly]: Skipping device resources because running on a host
-debug: /Schedule[never]: Skipping device resources because running on a host
-debug: Exec[command_good](provider=posix): Executing 'wget example.com'
-debug: Executing 'wget example.com'
-notice: /Stage[main]//Exec[command_good]/returns: executed successfully
-debug: /Stage[main]//Exec[command_good]: The container Class[Main] will propagate my refresh event
-debug: /Schedule[weekly]: Skipping device resources because running on a host
-debug: /Schedule[puppet]: Skipping device resources because running on a host
-debug: Class[Main]: The container Stage[main] will propagate my refresh event
-debug: Finishing transaction 70060457541680
-debug: Storing state
-debug: Stored state in 0.01 seconds
-notice: Finished catalog run in 0.59 seconds
-debug: Finishing transaction 70060456663980
-debug: Received report to process from ambari-dmi
-debug: Processing report from ambari-dmi with processor Puppet::Reports::Store
\ No newline at end of file
diff --git a/branch-1.2/ambari-agent/src/test/python/examples/debug_testcase_example.py b/branch-1.2/ambari-agent/src/test/python/examples/debug_testcase_example.py
deleted file mode 100644
index 74bd817..0000000
--- a/branch-1.2/ambari-agent/src/test/python/examples/debug_testcase_example.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-from unittest import TestCase
-#from Register import Register
-from ambari_agent.Controller import Controller
-from ambari_agent.Heartbeat import Heartbeat
-from ambari_agent.ActionQueue import ActionQueue
-from ambari_agent import AmbariConfig
-from ambari_agent.NetUtil import NetUtil
-import socket, ConfigParser, logging
-import os, pprint, json, sys, unittest
-from threading import Thread
-import time
-import Queue
-
-logger = logging.getLogger()
-
-class TestController(TestCase):
-
-# This file should be put to ambari-agent/src/main/python/ambari-agent/debug_testcase_example.py.
-# After installing python plugin and adjusting test,
-# it may be run in IntelliJ IDEA debugger
-
-  def setUp(self):
-    #logger.disabled = True
-    self.defaulttimeout = -1.0
-    if hasattr(socket, 'getdefaulttimeout'):
-      # Set the default timeout on sockets
-      self.defaulttimeout = socket.getdefaulttimeout()
-
-  def tearDown(self):
-    if self.defaulttimeout is not None and self.defaulttimeout > 0 and hasattr(socket, 'setdefaulttimeout'):
-      # Set the default timeout on sockets
-      socket.setdefaulttimeout(self.defaulttimeout)
-      #logger.disabled = False
-
-  def test_custom(self):
-    '''
-      test to make sure if we can get a re register command, we register with the server
-    '''
-    pass
-
-def main(argv=None):
-  logger.setLevel(logging.INFO)
-  formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - \
-      %(message)s")
-  stream_handler = logging.StreamHandler()
-  stream_handler.setFormatter(formatter)
-  logger.addHandler(stream_handler)
-
-  unittest.main()
-
-if __name__ == '__main__':
-  main()
-
-
diff --git a/branch-1.2/ambari-agent/src/test/python/unitTests.py b/branch-1.2/ambari-agent/src/test/python/unitTests.py
deleted file mode 100644
index 0ba2cea..0000000
--- a/branch-1.2/ambari-agent/src/test/python/unitTests.py
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import unittest
-import doctest
-from os.path import dirname, split, isdir
-import logging.handlers
-import logging
-
-LOG_FILE_NAME='tests.log'
-SELECTED_PREFIX = "_"
-PY_EXT='.py'
-
-class TestAgent(unittest.TestSuite):
-  def run(self, result):
-    run = unittest.TestSuite.run
-    run(self, result)
-    return result
-
-
-def parent_dir(path):
-  if isdir(path):
-    if path.endswith(os.sep):
-      path = os.path.dirname(path)
-    parent_dir = os.path.dirname(path)
-  else:
-    parent_dir = os.path.dirname(os.path.dirname(path))
-
-  return parent_dir
-
-
-def all_tests_suite():
-
-
-  src_dir = os.getcwd()
-  files_list=os.listdir(src_dir)
-  tests_list = []
-
-  logger.info('------------------------TESTS LIST:-------------------------------------')
-  # If test with special name exists, run only this test
-  selected_test = None
-  for file_name in files_list:
-    if file_name.endswith(PY_EXT) and not file_name == __file__ and file_name.startswith(SELECTED_PREFIX):
-      logger.info("Running only selected test " + str(file_name))
-      selected_test = file_name
-  if selected_test is not None:
-      tests_list.append(selected_test.replace(PY_EXT, ''))
-  else:
-    for file_name in files_list:
-      if file_name.endswith(PY_EXT) and not file_name == __file__:
-        logger.info(file_name)
-        tests_list.append(file_name.replace(PY_EXT, ''))
-  logger.info('------------------------------------------------------------------------')
-
-  suite = unittest.TestLoader().loadTestsFromNames(tests_list)
-  return TestAgent([suite])
-
-def main():
-
-  logger.info('------------------------------------------------------------------------')
-  logger.info('PYTHON AGENT TESTS')
-  logger.info('------------------------------------------------------------------------')
-  runner = unittest.TextTestRunner(verbosity=2, stream=sys.stdout)
-  suite = all_tests_suite()
-  status = runner.run(suite).wasSuccessful()
-
-  if not status:
-    logger.error('-----------------------------------------------------------------------')
-    logger.error('Python unit tests failed')
-    logger.error('Find detailed logs in ' + path)
-    logger.error('-----------------------------------------------------------------------')
-    exit(1)
-  else:
-    logger.info('------------------------------------------------------------------------')
-    logger.info('Python unit tests finished succesfully')
-    logger.info('------------------------------------------------------------------------')
-
-if __name__ == '__main__':
-  import os
-  import sys
-  import io
-  sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
-  sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + os.sep + 'main' + os.sep + 'python')
-  sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + os.sep + 'main' + os.sep + 'python' + os.sep + 'ambari_agent')
-  logger = logging.getLogger()
-  logger.setLevel(logging.INFO)
-  formatter = logging.Formatter("[%(levelname)s] %(message)s")
-  src_dir = os.getcwd()
-  target_dir = parent_dir(parent_dir(parent_dir(src_dir))) + os.sep + 'target'
-  if not os.path.exists(target_dir):
-    os.mkdir(target_dir)
-  path = target_dir + os.sep + LOG_FILE_NAME
-  file=open(path, "w")
-  consoleLog = logging.StreamHandler(file)
-  consoleLog.setFormatter(formatter)
-  logger.addHandler(consoleLog)
-  main()
diff --git a/branch-1.2/ambari-common/src/test/python/mock/LICENSE.txt b/branch-1.2/ambari-common/src/test/python/mock/LICENSE.txt
deleted file mode 100644
index 7891703..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/LICENSE.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright (c) 2003-2012, Michael Foord
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-
-    * Redistributions in binary form must reproduce the above
-      copyright notice, this list of conditions and the following
-      disclaimer in the documentation and/or other materials provided
-      with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/branch-1.2/ambari-common/src/test/python/mock/MANIFEST.in b/branch-1.2/ambari-common/src/test/python/mock/MANIFEST.in
deleted file mode 100644
index d52b301..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/MANIFEST.in
+++ /dev/null
@@ -1,2 +0,0 @@
-include LICENSE.txt tox.ini tests/*.py
-recursive-include docs *.txt *.py *.png *.css *.html *.js
diff --git a/branch-1.2/ambari-common/src/test/python/mock/README.txt b/branch-1.2/ambari-common/src/test/python/mock/README.txt
deleted file mode 100644
index 677faf9..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/README.txt
+++ /dev/null
@@ -1,179 +0,0 @@
-mock is a library for testing in Python. It allows you to replace parts of
-your system under test with mock objects and make assertions about how they
-have been used.
-
-mock is now part of the Python standard library, available as `unittest.mock
-<http://docs.python.org/py3k/library/unittest.mock.html#module-unittest.mock>`_
-in Python 3.3 onwards.
-
-mock provides a core `MagicMock` class removing the need to create a host of
-stubs throughout your test suite. After performing an action, you can make
-assertions about which methods / attributes were used and arguments they were
-called with. You can also specify return values and set needed attributes in
-the normal way.
-
-mock is tested on Python versions 2.5-2.7 and Python 3. mock is also tested
-with the latest versions of Jython and pypy.
-
-The mock module also provides utility functions / objects to assist with
-testing, particularly monkey patching.
-
-* `PDF documentation for 1.0.1
-  <http://www.voidspace.org.uk/downloads/mock-1.0.1.pdf>`_
-* `mock on google code (repository and issue tracker)
-  <http://code.google.com/p/mock/>`_
-* `mock documentation
-  <http://www.voidspace.org.uk/python/mock/>`_
-* `mock on PyPI <http://pypi.python.org/pypi/mock/>`_
-* `Mailing list (testing-in-python@lists.idyll.org)
-  <http://lists.idyll.org/listinfo/testing-in-python>`_
-
-Mock is very easy to use and is designed for use with
-`unittest <http://pypi.python.org/pypi/unittest2>`_. Mock is based on
-the 'action -> assertion' pattern instead of 'record -> replay' used by many
-mocking frameworks. See the `mock documentation`_ for full details.
-
-Mock objects create all attributes and methods as you access them and store
-details of how they have been used. You can configure them, to specify return
-values or limit what attributes are available, and then make assertions about
-how they have been used::
-
-    >>> from mock import Mock
-    >>> real = ProductionClass()
-    >>> real.method = Mock(return_value=3)
-    >>> real.method(3, 4, 5, key='value')
-    3
-    >>> real.method.assert_called_with(3, 4, 5, key='value')
-
-`side_effect` allows you to perform side effects, return different values or
-raise an exception when a mock is called::
-
-   >>> mock = Mock(side_effect=KeyError('foo'))
-   >>> mock()
-   Traceback (most recent call last):
-    ...
-   KeyError: 'foo'
-   >>> values = {'a': 1, 'b': 2, 'c': 3}
-   >>> def side_effect(arg):
-   ...     return values[arg]
-   ...
-   >>> mock.side_effect = side_effect
-   >>> mock('a'), mock('b'), mock('c')
-   (3, 2, 1)
-   >>> mock.side_effect = [5, 4, 3, 2, 1]
-   >>> mock(), mock(), mock()
-   (5, 4, 3)
-
-Mock has many other ways you can configure it and control its behaviour. For
-example the `spec` argument configures the mock to take its specification from
-another object. Attempting to access attributes or methods on the mock that
-don't exist on the spec will fail with an `AttributeError`.
-
-The `patch` decorator / context manager makes it easy to mock classes or
-objects in a module under test. The object you specify will be replaced with a
-mock (or other object) during the test and restored when the test ends::
-
-    >>> from mock import patch
-    >>> @patch('test_module.ClassName1')
-    ... @patch('test_module.ClassName2')
-    ... def test(MockClass2, MockClass1):
-    ...     test_module.ClassName1()
-    ...     test_module.ClassName2()
-
-    ...     assert MockClass1.called
-    ...     assert MockClass2.called
-    ...
-    >>> test()
-
-.. note::
-
-   When you nest patch decorators the mocks are passed in to the decorated
-   function in the same order they applied (the normal *python* order that
-   decorators are applied). This means from the bottom up, so in the example
-   above the mock for `test_module.ClassName2` is passed in first.
-
-   With `patch` it matters that you patch objects in the namespace where they
-   are looked up. This is normally straightforward, but for a quick guide
-   read `where to patch
-   <http://www.voidspace.org.uk/python/mock/patch.html#where-to-patch>`_.
-
-As well as a decorator `patch` can be used as a context manager in a with
-statement::
-
-    >>> with patch.object(ProductionClass, 'method') as mock_method:
-    ...     mock_method.return_value = None
-    ...     real = ProductionClass()
-    ...     real.method(1, 2, 3)
-    ...
-    >>> mock_method.assert_called_once_with(1, 2, 3)
-
-There is also `patch.dict` for setting values in a dictionary just during the
-scope of a test and restoring the dictionary to its original state when the
-test ends::
-
-   >>> foo = {'key': 'value'}
-   >>> original = foo.copy()
-   >>> with patch.dict(foo, {'newkey': 'newvalue'}, clear=True):
-   ...     assert foo == {'newkey': 'newvalue'}
-   ...
-   >>> assert foo == original
-
-Mock supports the mocking of Python magic methods. The easiest way of
-using magic methods is with the `MagicMock` class. It allows you to do
-things like::
-
-    >>> from mock import MagicMock
-    >>> mock = MagicMock()
-    >>> mock.__str__.return_value = 'foobarbaz'
-    >>> str(mock)
-    'foobarbaz'
-    >>> mock.__str__.assert_called_once_with()
-
-Mock allows you to assign functions (or other Mock instances) to magic methods
-and they will be called appropriately. The MagicMock class is just a Mock
-variant that has all of the magic methods pre-created for you (well - all the
-useful ones anyway).
-
-The following is an example of using magic methods with the ordinary Mock
-class::
-
-    >>> from mock import Mock
-    >>> mock = Mock()
-    >>> mock.__str__ = Mock(return_value = 'wheeeeee')
-    >>> str(mock)
-    'wheeeeee'
-
-For ensuring that the mock objects your tests use have the same api as the
-objects they are replacing, you can use "auto-speccing". Auto-speccing can
-be done through the `autospec` argument to patch, or the `create_autospec`
-function. Auto-speccing creates mock objects that have the same attributes
-and methods as the objects they are replacing, and any functions and methods
-(including constructors) have the same call signature as the real object.
-
-This ensures that your mocks will fail in the same way as your production
-code if they are used incorrectly::
-
-   >>> from mock import create_autospec
-   >>> def function(a, b, c):
-   ...     pass
-   ...
-   >>> mock_function = create_autospec(function, return_value='fishy')
-   >>> mock_function(1, 2, 3)
-   'fishy'
-   >>> mock_function.assert_called_once_with(1, 2, 3)
-   >>> mock_function('wrong arguments')
-   Traceback (most recent call last):
-    ...
-   TypeError: <lambda>() takes exactly 3 arguments (1 given)
-
-`create_autospec` can also be used on classes, where it copies the signature of
-the `__init__` method, and on callable objects where it copies the signature of
-the `__call__` method.
-
-The distribution contains tests and documentation. The tests require
-`unittest2 <http://pypi.python.org/pypi/unittest2>`_ to run on Python 2.5, 2.6
-or 3.1. For Python 2.7 and 3.2 they can be run with
-`python -m unittest discover`.
-
-Docs from the in-development version of `mock` can be found at
-`mock.readthedocs.org <http://mock.readthedocs.org>`_.
diff --git a/branch-1.2/ambari-common/src/test/python/mock/__init__.py b/branch-1.2/ambari-common/src/test/python/mock/__init__.py
deleted file mode 100644
index 3c61c75..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__author__ = 'Michael Foord'
diff --git a/branch-1.2/ambari-common/src/test/python/mock/docs/changelog.txt b/branch-1.2/ambari-common/src/test/python/mock/docs/changelog.txt
deleted file mode 100644
index 823341a..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/docs/changelog.txt
+++ /dev/null
@@ -1,737 +0,0 @@
-.. currentmodule:: mock
-
-
-CHANGELOG
-=========
-
-2012/11/05 Version 1.0.1
-------------------------
-
-* Functions decorated with `patch` variants have a `__wrapped__` attribute
-  pointing to the original function. This brings compatibility with the
-  default behaviour in Python 3.3 (due to a new feature in `functools.wraps`).
-
-Note that due to changes in `tox`, `mock` is no longer tested with Python 2.4.
-The compatibility code has not been removed so it probably still works, but
-tests are no longer run.
-
-
-2012/10/07 Version 1.0.0
-------------------------
-
-No changes since 1.0.0 beta 1. This version has feature parity with
-`unittest.mock
-<http://docs.python.org/py3k/library/unittest.mock.html#module-unittest.mock>`_
-in Python 3.3.
-
-Full list of changes since 0.8:
-
-* `mocksignature`, along with the `mocksignature` argument to `patch`, removed
-* Support for deleting attributes (accessing deleted attributes will raise an
-  `AttributeError`)
-* Added the `mock_open` helper function for mocking the builtin `open`
-* `__class__` is assignable, so a mock can pass an `isinstance` check without
-  requiring a spec
-* Addition of `PropertyMock`, for mocking properties
-* `MagicMocks` made unorderable by default (in Python 3). The comparison
-  methods (other than equality and inequality) now return `NotImplemented`
-* Propagate traceback info to support subclassing of `_patch` by other
-  libraries
-* `create_autospec` works with attributes present in results of `dir` that
-  can't be fetched from the object's class. Contributed by Konstantine Rybnikov
-* Any exceptions in an iterable `side_effect` will be raised instead of
-  returned
-* In Python 3, `create_autospec` now supports keyword only arguments
-* Added `patch.stopall` method to stop all active patches created by `start`
-* BUGFIX: calling `MagicMock.reset_mock` wouldn't reset magic method mocks
-* BUGFIX: calling `reset_mock` on a `MagicMock` created with autospec could
-  raise an exception
-* BUGFIX: passing multiple spec arguments to patchers (`spec` , `spec_set` and
-  `autospec`) had unpredictable results, now it is an error
-* BUGFIX: using `spec=True` *and* `create=True` as arguments to patchers could
-  result in using `DEFAULT` as the spec. Now it is an error instead
-* BUGFIX: using `spec` or `autospec` arguments to patchers, along with
-  `spec_set=True` did not work correctly
-* BUGFIX: using an object that evaluates to False as a spec could be ignored
-* BUGFIX: a list as the `spec` argument to a patcher would always result in a
-  non-callable mock. Now if `__call__` is in the spec the mock is callable
-
-
-2012/07/13 Version 1.0.0 beta 1
---------------------------------
-
-* Added `patch.stopall` method to stop all active patches created by `start`
-* BUGFIX: calling `MagicMock.reset_mock` wouldn't reset magic method mocks
-* BUGFIX: calling `reset_mock` on a `MagicMock` created with autospec could
-  raise an exception
-
-
-2012/05/04 Version 1.0.0 alpha 2
---------------------------------
-
-* `PropertyMock` attributes are now standard `MagicMocks`
-* `create_autospec` works with attributes present in results of `dir` that
-  can't be fetched from the object's class. Contributed by Konstantine Rybnikov
-* Any exceptions in an iterable `side_effect` will be raised instead of
-  returned
-* In Python 3, `create_autospec` now supports keyword only arguments
-
-
-2012/03/25 Version 1.0.0 alpha 1
---------------------------------
-
-The standard library version!
-
-* `mocksignature`, along with the `mocksignature` argument to `patch`, removed
-* Support for deleting attributes (accessing deleted attributes will raise an
-  `AttributeError`)
-* Added the `mock_open` helper function for mocking the builtin `open`
-* `__class__` is assignable, so a mock can pass an `isinstance` check without
-  requiring a spec
-* Addition of `PropertyMock`, for mocking properties
-* `MagicMocks` made unorderable by default (in Python 3). The comparison
-  methods (other than equality and inequality) now return `NotImplemented`
-* Propagate traceback info to support subclassing of `_patch` by other
-  libraries
-* BUGFIX: passing multiple spec arguments to patchers (`spec` , `spec_set` and
-  `autospec`) had unpredictable results, now it is an error
-* BUGFIX: using `spec=True` *and* `create=True` as arguments to patchers could
-  result in using `DEFAULT` as the spec. Now it is an error instead
-* BUGFIX: using `spec` or `autospec` arguments to patchers, along with
-  `spec_set=True` did not work correctly
-* BUGFIX: using an object that evaluates to False as a spec could be ignored
-* BUGFIX: a list as the `spec` argument to a patcher would always result in a
-  non-callable mock. Now if `__call__` is in the spec the mock is callable
-
-
-2012/02/13 Version 0.8.0
-------------------------
-
-The only changes since 0.8rc2 are:
-
-* Improved repr of :data:`sentinel` objects
-* :data:`ANY` can be used for comparisons against :data:`call` objects
-* The return value of `MagicMock.__iter__` method can be set to
-  any iterable and isn't required to be an iterator
-
-Full List of changes since 0.7:
-
-mock 0.8.0 is the last version that will support Python 2.4.
-
-* Addition of :attr:`~Mock.mock_calls` list for *all* calls (including magic
-  methods and chained calls)
-* :func:`patch` and :func:`patch.object` now create a :class:`MagicMock`
-  instead of a :class:`Mock` by default
-* The patchers (`patch`, `patch.object` and `patch.dict`), plus `Mock` and
-  `MagicMock`, take arbitrary keyword arguments for configuration
-* New mock method :meth:`~Mock.configure_mock` for setting attributes and
-  return values / side effects on the mock and its attributes
-* New mock assert methods :meth:`~Mock.assert_any_call` and
-  :meth:`~Mock.assert_has_calls`
-* Implemented :ref:`auto-speccing` (recursive, lazy speccing of mocks with
-  mocked signatures for functions/methods), as the `autospec` argument to
-  `patch`
-* Added the :func:`create_autospec` function for manually creating
-  'auto-specced' mocks
-* :func:`patch.multiple` for doing multiple patches in a single call, using
-  keyword arguments
-* Setting :attr:`~Mock.side_effect` to an iterable will cause calls to the mock
-  to return the next value from the iterable
-* New `new_callable` argument to `patch` and `patch.object` allowing you to
-  pass in a class or callable object (instead of `MagicMock`) that will be
-  called to replace the object being patched
-* Addition of :class:`NonCallableMock` and :class:`NonCallableMagicMock`, mocks
-  without a `__call__` method
-* Addition of :meth:`~Mock.mock_add_spec` method for adding (or changing) a
-  spec on an existing mock
-* Protocol methods on :class:`MagicMock` are magic mocks, and are created
-  lazily on first lookup. This means the result of calling a protocol method is
-  a `MagicMock` instead of a `Mock` as it was previously
-* Addition of :meth:`~Mock.attach_mock` method
-* Added :data:`ANY` for ignoring arguments in :meth:`~Mock.assert_called_with`
-  calls
-* Addition of :data:`call` helper object
-* Improved repr for mocks
-* Improved repr for :attr:`Mock.call_args` and entries in
-  :attr:`Mock.call_args_list`, :attr:`Mock.method_calls` and
-  :attr:`Mock.mock_calls`
-* Improved repr for :data:`sentinel` objects
-* `patch` lookup is done at use time not at decoration time
-* In Python 2.6 or more recent, `dir` on a mock will report all the dynamically
-  created attributes (or the full list of attributes if there is a spec) as
-  well as all the mock methods and attributes.
-* Module level :data:`FILTER_DIR` added to control whether `dir(mock)` filters
-  private attributes. `True` by default.
-* `patch.TEST_PREFIX` for controlling how patchers recognise test methods when
-  used to decorate a class
-* Support for using Java exceptions as a :attr:`~Mock.side_effect` on Jython
-* `Mock` call lists (`call_args_list`, `method_calls` & `mock_calls`) are now
-  custom list objects that allow membership tests for "sub lists" and have
-  a nicer representation if you `str` or `print` them
-* Mocks attached as attributes or return values to other mocks have calls
-  recorded in `method_calls` and `mock_calls` of the parent (unless a name is
-  already set on the child)
-* Improved failure messages for `assert_called_with` and
-  `assert_called_once_with`
-* The return value of the :class:`MagicMock` `__iter__` method can be set to
-  any iterable and isn't required to be an iterator
-* Added the Mock API (`assert_called_with` etc) to functions created by
-  :func:`mocksignature`
-* Tuples as well as lists can be used to specify allowed methods for `spec` &
-  `spec_set` arguments
-* Calling `stop` on an unstarted patcher fails with  a more meaningful error
-  message
-* Renamed the internal classes `Sentinel` and `SentinelObject` to prevent abuse
-* BUGFIX: an error creating a patch, with nested patch decorators, won't leave
-  patches in place
-* BUGFIX: `__truediv__` and `__rtruediv__` not available as magic methods on
-  mocks in Python 3
-* BUGFIX: `assert_called_with` / `assert_called_once_with` can be used with
-  `self` as a keyword argument
-* BUGFIX: when patching a class with an explicit spec / spec_set (not a
-  boolean) it applies "spec inheritance" to the return value of the created
-  mock (the "instance")
-* BUGFIX: remove the `__unittest` marker causing traceback truncation
-* Removal of deprecated `patch_object`
-* Private attributes `_name`, `_methods`, '_children', `_wraps` and `_parent`
-  (etc) renamed to reduce likelihood of clash with user attributes.
-* Added license file to the distribution
-
-
-2012/01/10 Version 0.8.0 release candidate 2
---------------------------------------------
-
-* Removed the `configure` keyword argument to `create_autospec` and allow
-  arbitrary keyword arguments (for the `Mock` constructor) instead
-* Fixed `ANY` equality with some types in `assert_called_with` calls
-* Switched to a standard Sphinx theme (compatible with
-  `readthedocs.org <http://mock.readthedocs.org>`_)
-
-
-2011/12/29 Version 0.8.0 release candidate 1
---------------------------------------------
-
-* `create_autospec` on the return value of a mocked class will use `__call__`
-  for the signature rather than `__init__`
-* Performance improvement instantiating `Mock` and `MagicMock`
-* Mocks used as magic methods have the same type as their parent instead of
-  being hardcoded to `MagicMock`
-
-Special thanks to Julian Berman for his help with diagnosing and improving
-performance in this release.
-
-
-2011/10/09 Version 0.8.0 beta 4
--------------------------------
-
-* `patch` lookup is done at use time not at decoration time
-* When attaching a Mock to another Mock as a magic method, calls are recorded
-  in mock_calls
-* Addition of `attach_mock` method
-* Renamed the internal classes `Sentinel` and `SentinelObject` to prevent abuse
-* BUGFIX: various issues around circular references with mocks (setting a mock
-  return value to be itself etc)
-
-
-2011/08/15 Version 0.8.0 beta 3
--------------------------------
-
-* Mocks attached as attributes or return values to other mocks have calls
-  recorded in `method_calls` and `mock_calls` of the parent (unless a name is
-  already set on the child)
-* Addition of `mock_add_spec` method for adding (or changing) a spec on an
-  existing mock
-* Improved repr for `Mock.call_args` and entries in `Mock.call_args_list`,
-  `Mock.method_calls` and `Mock.mock_calls`
-* Improved repr for mocks
-* BUGFIX: minor fixes in the way `mock_calls` is worked out,
-  especially for "intermediate" mocks in a call chain
-
-
-2011/08/05 Version 0.8.0 beta 2
--------------------------------
-
-* Setting `side_effect` to an iterable will cause calls to the mock to return
-  the next value from the iterable
-* Added `assert_any_call` method
-* Moved `assert_has_calls` from call lists onto mocks
-* BUGFIX: `call_args` and all members of `call_args_list` are two tuples of
-  `(args, kwargs)` again instead of three tuples of `(name, args, kwargs)`
-
-
-2011/07/25 Version 0.8.0 beta 1
--------------------------------
-
-* `patch.TEST_PREFIX` for controlling how patchers recognise test methods when
-  used to decorate a class
-* `Mock` call lists (`call_args_list`, `method_calls` & `mock_calls`) are now
-  custom list objects that allow membership tests for "sub lists" and have
-  an `assert_has_calls` method for unordered call checks
-* `callargs` changed to *always* be a three-tuple of `(name, args, kwargs)`
-* Addition of `mock_calls` list for *all* calls (including magic methods and
-  chained calls)
-* Extension of `call` object to support chained calls and `callargs` for better
-  comparisons with or without names. `call` object has a `call_list` method for
-  chained calls
-* Added the public `instance` argument to `create_autospec`
-* Support for using Java exceptions as a `side_effect` on Jython
-* Improved failure messages for `assert_called_with` and
-  `assert_called_once_with`
-* Tuples as well as lists can be used to specify allowed methods for `spec` &
-  `spec_set` arguments
-* BUGFIX: Fixed bug in `patch.multiple` for argument passing when creating
-  mocks
-* Added license file to the distribution
-
-
-2011/07/16 Version 0.8.0 alpha 2
---------------------------------
-
-* `patch.multiple` for doing multiple patches in a single call, using keyword
-  arguments
-* New `new_callable` argument to `patch` and `patch.object` allowing you to
-  pass in a class or callable object (instead of `MagicMock`) that will be
-  called to replace the object being patched
-* Addition of `NonCallableMock` and `NonCallableMagicMock`, mocks without a
-  `__call__` method
-* Mocks created by `patch` have a `MagicMock` as the `return_value` where a
-  class is being patched
-* `create_autospec` can create non-callable mocks for non-callable objects.
-  `return_value` mocks of classes will be non-callable unless the class has
-  a `__call__` method
-* `autospec` creates a `MagicMock` without a spec for properties and slot
-  descriptors, because we don't know the type of object they return
-* Removed the "inherit" argument from `create_autospec`
-* Calling `stop` on an unstarted patcher fails with  a more meaningful error
-  message
-* BUGFIX: an error creating a patch, with nested patch decorators, won't leave
-  patches in place
-* BUGFIX: `__truediv__` and `__rtruediv__` not available as magic methods on
-  mocks in Python 3
-* BUGFIX: `assert_called_with` / `assert_called_once_with` can be used with
-  `self` as a keyword argument
-* BUGFIX: autospec for functions / methods with an argument named self that
-  isn't the first argument no longer broken
-* BUGFIX: when patching a class with an explicit spec / spec_set (not a
-  boolean) it applies "spec inheritance" to the return value of the created
-  mock (the "instance")
-* BUGFIX: remove the `__unittest` marker causing traceback truncation
-
-
-2011/06/14 Version 0.8.0 alpha 1
---------------------------------
-
-mock 0.8.0 is the last version that will support Python 2.4.
-
-* The patchers (`patch`, `patch.object` and `patch.dict`), plus `Mock` and
-  `MagicMock`, take arbitrary keyword arguments for configuration
-* New mock method `configure_mock` for setting attributes and return values /
-  side effects on the mock and its attributes
-* In Python 2.6 or more recent, `dir` on a mock will report all the dynamically
-  created attributes (or the full list of attributes if there is a spec) as
-  well as all the mock methods and attributes.
-* Module level `FILTER_DIR` added to control whether `dir(mock)` filters
-  private attributes. `True` by default. Note that `vars(Mock())` can still be
-  used to get all instance attributes and `dir(type(Mock())` will still return
-  all the other attributes (irrespective of `FILTER_DIR`)
-* `patch` and `patch.object` now create a `MagicMock` instead of a `Mock` by
-  default
-* Added `ANY` for ignoring arguments in `assert_called_with` calls
-* Addition of `call` helper object
-* Protocol methods on `MagicMock` are magic mocks, and are created lazily on
-  first lookup. This means the result of calling a protocol method is a
-  MagicMock instead of a Mock as it was previously
-* Added the Mock API (`assert_called_with` etc) to functions created by
-  `mocksignature`
-* Private attributes `_name`, `_methods`, '_children', `_wraps` and `_parent`
-  (etc) renamed to reduce likelihood of clash with user attributes.
-* Implemented auto-speccing (recursive, lazy speccing of mocks with mocked
-  signatures for functions/methods)
-
-  Limitations:
-
-  - Doesn't mock magic methods or attributes (it creates MagicMocks, so the
-    magic methods are *there*, they just don't have the signature mocked nor
-    are attributes followed)
-  - Doesn't mock function / method attributes
-  - Uses object traversal on the objects being mocked to determine types - so
-    properties etc may be triggered
-  - The return value of mocked classes (the 'instance') has the same call
-    signature as the class __init__ (as they share the same spec)
-
-  You create auto-specced mocks by passing `autospec=True` to `patch`.
-
-  Note that attributes that are None are special cased and mocked without a
-  spec (so any attribute / method can be used). This is because None is
-  typically used as a default value for attributes that may be of some other
-  type, and as we don't know what type that may be we allow all access.
-
-  Note that the `autospec` option to `patch` obsoletes the `mocksignature`
-  option.
-
-* Added the `create_autospec` function for manually creating 'auto-specced'
-  mocks
-* Removal of deprecated `patch_object`
-
-
-2011/05/30 Version 0.7.2
-------------------------
-
-* BUGFIX: instances of list subclasses can now be used as mock specs
-* BUGFIX: MagicMock equality / inequality protocol methods changed to use the
-  default equality / inequality. This is done through a `side_effect` on
-  the mocks used for `__eq__` / `__ne__`
-
-
-2011/05/06 Version 0.7.1
-------------------------
-
-Package fixes contributed by Michael Fladischer. No code changes.
-
-* Include template in package
-* Use isolated binaries for the tox tests
-* Unset executable bit on docs
-* Fix DOS line endings in getting-started.txt
-
-
-2011/03/05 Version 0.7.0
-------------------------
-
-No API changes since 0.7.0 rc1. Many documentation changes including a stylish
-new `Sphinx theme <https://github.com/coordt/ADCtheme/>`_.
-
-The full set of changes since 0.6.0 are:
-
-* Python 3 compatibility
-* Ability to mock magic methods with `Mock` and addition of `MagicMock`
-  with pre-created magic methods
-* Addition of `mocksignature` and `mocksignature` argument to `patch` and
-  `patch.object`
-* Addition of `patch.dict` for changing dictionaries during a test
-* Ability to use `patch`, `patch.object` and `patch.dict` as class decorators
-* Renamed ``patch_object`` to `patch.object` (``patch_object`` is
-  deprecated)
-* Addition of soft comparisons: `call_args`, `call_args_list` and `method_calls`
-  now return tuple-like objects which compare equal even when empty args
-  or kwargs are skipped
-* patchers (`patch`, `patch.object` and `patch.dict`) have start and stop
-  methods
-* Addition of `assert_called_once_with` method
-* Mocks can now be named (`name` argument to constructor) and the name is used
-  in the repr
-* repr of a mock with a spec includes the class name of the spec
-* `assert_called_with` works with `python -OO`
-* New `spec_set` keyword argument to `Mock` and `patch`. If used,
-  attempting to *set* an attribute on a mock not on the spec will raise an
-  `AttributeError`
-* Mocks created with a spec can now pass `isinstance` tests (`__class__`
-  returns the type of the spec)
-* Added docstrings to all objects
-* Improved failure message for `Mock.assert_called_with` when the mock
-  has not been called at all
-* Decorated functions / methods have their docstring and `__module__`
-  preserved on Python 2.4.
-* BUGFIX: `mock.patch` now works correctly with certain types of objects that
-  proxy attribute access, like the django settings object
-* BUGFIX: mocks are now copyable (thanks to Ned Batchelder for reporting and
-  diagnosing this)
-* BUGFIX: `spec=True` works with old style classes
-* BUGFIX: ``help(mock)`` works now (on the module). Can no longer use ``__bases__``
-  as a valid sentinel name (thanks to Stephen Emslie for reporting and
-  diagnosing this)
-* BUGFIX: ``side_effect`` now works with ``BaseException`` exceptions like
-  ``KeyboardInterrupt``
-* BUGFIX: `reset_mock` caused infinite recursion when a mock is set as its own
-  return value
-* BUGFIX: patching the same object twice now restores the patches correctly
-* with statement tests now skipped on Python 2.4
-* Tests require unittest2 (or unittest2-py3k) to run
-* Tested with `tox <http://pypi.python.org/pypi/tox>`_ on Python 2.4 - 3.2,
-  jython and pypy (excluding 3.0)
-* Added 'build_sphinx' command to setup.py (requires setuptools or distribute)
-  Thanks to Florian Bauer
-* Switched from subversion to mercurial for source code control
-* `Konrad Delong <http://konryd.blogspot.com/>`_ added as co-maintainer
-
-
-2011/02/16 Version 0.7.0 RC 1
------------------------------
-
-Changes since beta 4:
-
-* Tested with jython, pypy and Python 3.2 and 3.1
-* Decorated functions / methods have their docstring and `__module__`
-  preserved on Python 2.4
-* BUGFIX: `mock.patch` now works correctly with certain types of objects that
-  proxy attribute access, like the django settings object
-* BUGFIX: `reset_mock` caused infinite recursion when a mock is set as its own
-  return value
-
-
-2010/11/12 Version 0.7.0 beta 4
--------------------------------
-
-* patchers (`patch`, `patch.object` and `patch.dict`) have start and stop
-  methods
-* Addition of `assert_called_once_with` method
-* repr of a mock with a spec includes the class name of the spec
-* `assert_called_with` works with `python -OO`
-* New `spec_set` keyword argument to `Mock` and `patch`. If used,
-  attempting to *set* an attribute on a mock not on the spec will raise an
-  `AttributeError`
-* Attributes and return value of a `MagicMock` are `MagicMock` objects
-* Attempting to set an unsupported magic method now raises an `AttributeError`
-* `patch.dict` works as a class decorator
-* Switched from subversion to mercurial for source code control
-* BUGFIX: mocks are now copyable (thanks to Ned Batchelder for reporting and
-  diagnosing this)
-* BUGFIX: `spec=True` works with old style classes
-* BUGFIX: `mocksignature=True` can now patch instance methods via
-  `patch.object`
-
-
-2010/09/18 Version 0.7.0 beta 3
--------------------------------
-
-* Using spec with :class:`MagicMock` only pre-creates magic methods in the spec
-* Setting a magic method on a mock with a ``spec`` can only be done if the
-  spec has that method
-* Mocks can now be named (`name` argument to constructor) and the name is used
-  in the repr
-* `mocksignature` can now be used with classes (signature based on `__init__`)
-  and callable objects (signature based on `__call__`)
-* Mocks created with a spec can now pass `isinstance` tests (`__class__`
-  returns the type of the spec)
-* Default numeric value for MagicMock is 1 rather than zero (because the
-  MagicMock bool defaults to True and 0 is False)
-* Improved failure message for :meth:`~Mock.assert_called_with` when the mock
-  has not been called at all
-* Adding the following to the set of supported magic methods:
-
-  - ``__getformat__`` and ``__setformat__``
-  - pickle methods
-  - ``__trunc__``, ``__ceil__`` and ``__floor__``
-  - ``__sizeof__``
-
-* Added 'build_sphinx' command to setup.py (requires setuptools or distribute)
-  Thanks to Florian Bauer
-* with statement tests now skipped on Python 2.4
-* Tests require unittest2 to run on Python 2.7
-* Improved several docstrings and documentation
-
-
-2010/06/23 Version 0.7.0 beta 2
--------------------------------
-
-* :func:`patch.dict` works as a context manager as well as a decorator
-* ``patch.dict`` takes a string to specify dictionary as well as a dictionary
-  object. If a string is supplied the name specified is imported
-* BUGFIX: ``patch.dict`` restores dictionary even when an exception is raised
-
-
-2010/06/22 Version 0.7.0 beta 1
--------------------------------
-
-* Addition of :func:`mocksignature`
-* Ability to mock magic methods
-* Ability to use ``patch`` and ``patch.object`` as class decorators
-* Renamed ``patch_object`` to :func:`patch.object` (``patch_object`` is
-  deprecated)
-* Addition of :class:`MagicMock` class with all magic methods pre-created for you
-* Python 3 compatibility (tested with 3.2 but should work with 3.0 & 3.1 as
-  well)
-* Addition of :func:`patch.dict` for changing dictionaries during a test
-* Addition of ``mocksignature`` argument to ``patch`` and ``patch.object``
-* ``help(mock)`` works now (on the module). Can no longer use ``__bases__``
-  as a valid sentinel name (thanks to Stephen Emslie for reporting and
-  diagnosing this)
-* Addition of soft comparisons: `call_args`, `call_args_list` and `method_calls`
-  now return tuple-like objects which compare equal even when empty args
-  or kwargs are skipped
-* Added docstrings.
-* BUGFIX: ``side_effect`` now works with ``BaseException`` exceptions like
-  ``KeyboardInterrupt``
-* BUGFIX: patching the same object twice now restores the patches correctly
-* The tests now require `unittest2 <http://pypi.python.org/pypi/unittest2>`_
-  to run
-* `Konrad Delong <http://konryd.blogspot.com/>`_ added as co-maintainer
-
-
-2009/08/22 Version 0.6.0
-------------------------
-
-* New test layout compatible with test discovery
-* Descriptors (static methods / class methods etc) can now be patched and
-  restored correctly
-* Mocks can raise exceptions when called by setting ``side_effect`` to an
-  exception class or instance
-* Mocks that wrap objects will not pass on calls to the underlying object if
-  an explicit return_value is set
-
-
-2009/04/17 Version 0.5.0
-------------------------
-
-* Made DEFAULT part of the public api.
-* Documentation built with Sphinx.
-* ``side_effect`` is now called with the same arguments as the mock is called with and
-  if returns a non-DEFAULT value that is automatically set as the ``mock.return_value``.
-* ``wraps`` keyword argument used for wrapping objects (and passing calls through to the wrapped object).
-* ``Mock.reset`` renamed to ``Mock.reset_mock``, as reset is a common API name.
-* ``patch`` / ``patch_object`` are now context managers and can be used with ``with``.
-* A new 'create' keyword argument to patch and patch_object that allows them to patch
-  (and unpatch) attributes that don't exist. (Potentially unsafe to use - it can allow
-  you to have tests that pass when they are testing an API that doesn't exist - use at
-  your own risk!)
-* The methods keyword argument to Mock has been removed and merged with spec. The spec
-  argument can now be a list of methods or an object to take the spec from.
-* Nested patches may now be applied in a different order (created mocks passed
-  in the opposite order). This is actually a bugfix.
-* patch and patch_object now take a spec keyword argument. If spec is
-  passed in as 'True' then the Mock created will take the object it is replacing
-  as its spec object. If the object being replaced is a class, then the return
-  value for the mock will also use the class as a spec.
-* A Mock created without a spec will not attempt to mock any magic methods / attributes
-  (they will raise an ``AttributeError`` instead).
-
-
-2008/10/12 Version 0.4.0
-------------------------
-
-* Default return value is now a new mock rather than None
-* return_value added as a keyword argument to the constructor
-* New method 'assert_called_with'
-* Added 'side_effect' attribute / keyword argument called when mock is called
-* patch decorator split into two decorators:
-
-    - ``patch_object`` which takes an object and an attribute name to patch
-      (plus optionally a value to patch with which defaults to a mock object)
-    - ``patch`` which takes a string specifying a target to patch; in the form
-      'package.module.Class.attribute'. (plus optionally a value to
-      patch with which defaults to a mock object)
-
-* Can now patch objects with ``None``
-* Change to patch for nose compatibility with error reporting in wrapped functions
-* Reset no longer clears children / return value etc - it just resets
-  call count and call args. It also calls reset on all children (and
-  the return value if it is a mock).
-
-Thanks to Konrad Delong, Kevin Dangoor and others for patches and suggestions.
-
-
-2007/12/03  Version 0.3.1
--------------------------
-
-``patch`` maintains the name of decorated functions for compatibility with nose
-test autodiscovery.
-
-Tests decorated with ``patch`` that use the two argument form (implicit mock
-creation) will receive the mock(s) passed in as extra arguments.
-
-Thanks to Kevin Dangoor for these changes.
-
-
-2007/11/30  Version 0.3.0
--------------------------
-
-Removed ``patch_module``. ``patch`` can now take a string as the first
-argument for patching modules.
-
-The third argument to ``patch`` is optional - a mock will be created by
-default if it is not passed in.
-
-
-2007/11/21  Version 0.2.1
--------------------------
-
-Bug fix, allows reuse of functions decorated with ``patch`` and ``patch_module``.
-
-
-2007/11/20  Version 0.2.0
--------------------------
-
-Added ``spec`` keyword argument for creating ``Mock`` objects from a
-specification object.
-
-Added ``patch`` and ``patch_module`` monkey patching decorators.
-
-Added ``sentinel`` for convenient access to unique objects.
-
-Distribution includes unit tests.
-
-
-2007/11/19  Version 0.1.0
--------------------------
-
-Initial release.
-
-
-TODO and Limitations
-====================
-
-Contributions, bug reports and comments welcomed!
-
-Feature requests and bug reports are handled on the issue tracker:
-
- * `mock issue tracker <http://code.google.com/p/mock/issues/list>`_
-
-`wraps` is not integrated with magic methods.
-
-`patch` could auto-do the patching in the constructor and unpatch in the
-destructor. This would be useful in itself, but violates TOOWTDI and would be
-unsafe for IronPython & PyPy (non-deterministic calling of destructors).
-Destructors aren't called in CPython where there are cycles, but a weak
-reference with a callback can be used to get round this.
-
-`Mock` has several attributes. This makes it unsuitable for mocking objects
-that use these attribute names. A way round this would be to provide methods
-that *hide* these attributes when needed. In 0.8 many, but not all, of these
-attributes are renamed to gain a `_mock` prefix, making it less likely that
-they will clash. Any outstanding attributes that haven't been modified with
-the prefix should be changed.
-
-If a patch is started using `patch.start` and then not stopped correctly then
-the unpatching is not done. Using weak references it would be possible to
-detect and fix this when the patch object itself is garbage collected. This
-would be tricky to get right though.
-
-When a `Mock` is created by `patch`, arbitrary keywords can be used to set
-attributes. If `patch` is created with a `spec`, and is replacing a class, then
-a `return_value` mock is created. The keyword arguments are not applied to the
-child mock, but could be.
-
-When mocking a class with `patch`, passing in `spec=True` or `autospec=True`,
-the mock class has an instance created from the same spec. Should this be the
-default behaviour for mocks anyway (mock return values inheriting the spec
-from their parent), or should it be controlled by an additional keyword
-argument (`inherit`) to the Mock constructor? `create_autospec` does this, so
-an additional keyword argument to Mock is probably unnecessary.
-
-The `mocksignature` argument to `patch` with a non `Mock` passed into
-`new_callable` will *probably* cause an error. Should it just be invalid?
-
-Note that `NonCallableMock` and `NonCallableMagicMock` still have the unused
-(and unusable) attributes: `return_value`, `side_effect`, `call_count`,
-`call_args` and `call_args_list`. These could be removed or raise errors on
-getting / setting. They also have the `assert_called_with` and
-`assert_called_once_with` methods. Removing these would be pointless as
-fetching them would create a mock (attribute) that could be called without
-error.
-
-Some outstanding technical debt. The way autospeccing mocks function
-signatures was copied and modified from `mocksignature`. This could all be
-refactored into one set of functions instead of two. The way we tell if
-patchers are started and if a patcher is being used for a `patch.multiple`
-call are both horrible. There are now a host of helper functions that should
-be rationalised. (Probably time to split mock into a package instead of a
-module.)
-
-Passing arbitrary keyword arguments to `create_autospec`, or `patch` with
-`autospec`, when mocking a *function* works fine. However, the arbitrary
-attributes are set on the created mock - but `create_autospec` returns a
-real function (which doesn't have those attributes). However, what is the use
-case for using autospec to create functions with attributes that don't exist
-on the original?
-
-`mocksignature`, plus the `call_args_list` and `method_calls` attributes of
-`Mock` could all be deprecated.
diff --git a/branch-1.2/ambari-common/src/test/python/mock/docs/compare.txt b/branch-1.2/ambari-common/src/test/python/mock/docs/compare.txt
deleted file mode 100644
index 4155530..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/docs/compare.txt
+++ /dev/null
@@ -1,628 +0,0 @@
-=========================
- Mock Library Comparison
-=========================
-
-
-.. testsetup::
-
-    def assertEqual(a, b):
-        assert a == b, ("%r != %r" % (a, b))
-
-    def assertRaises(Exc, func):
-        try:
-            func()
-        except Exc:
-            return
-        assert False, ("%s not raised" % Exc)
-
-    sys.modules['somemodule'] = somemodule = mock.Mock(name='somemodule')
-    class SomeException(Exception):
-        some_method = method1 = method2 = None
-    some_other_object = SomeObject = SomeException
-
-
-A side-by-side comparison of how to accomplish some basic tasks with mock and
-some other popular Python mocking libraries and frameworks.
-
-These are:
-
-* `flexmock <http://pypi.python.org/pypi/flexmock>`_
-* `mox <http://pypi.python.org/pypi/mox>`_
-* `Mocker <http://niemeyer.net/mocker>`_
-* `dingus <http://pypi.python.org/pypi/dingus>`_
-* `fudge <http://pypi.python.org/pypi/fudge>`_
-
-Popular python mocking frameworks not yet represented here include
-`MiniMock <http://pypi.python.org/pypi/MiniMock>`_.
-
-`pMock <http://pmock.sourceforge.net/>`_ (last release 2004 and doesn't import
-in recent versions of Python) and
-`python-mock <http://python-mock.sourceforge.net/>`_ (last release 2005) are
-intentionally omitted.
-
-.. note::
-
-    A more up to date, and tested for all mock libraries (only the mock
-    examples on this page can be executed as doctests) version of this
-    comparison is maintained by Gary Bernhardt:
-
-    * `Python Mock Library Comparison
-      <http://garybernhardt.github.com/python-mock-comparison/>`_
-
-This comparison is by no means complete, and also may not be fully idiomatic
-for all the libraries represented. *Please* contribute corrections, missing
-comparisons, or comparisons for additional libraries to the `mock issue
-tracker <https://code.google.com/p/mock/issues/list>`_.
-
-This comparison page was originally created by the `Mox project
-<https://code.google.com/p/pymox/wiki/MoxComparison>`_ and then extended for
-`flexmock and mock <http://has207.github.com/flexmock/compare.html>`_ by
-Herman Sheremetyev. Dingus examples written by `Gary Bernhadt
-<http://garybernhardt.github.com/python-mock-comparison/>`_. fudge examples
-provided by `Kumar McMillan <http://farmdev.com/>`_.
-
-.. note::
-
-    The examples tasks here were originally created by Mox which is a mocking
-    *framework* rather than a library like mock. The tasks shown naturally
-    exemplify tasks that frameworks are good at and not the ones they make
-    harder. In particular you can take a `Mock` or `MagicMock` object and use
-    it in any way you want with no up-front configuration. The same is also
-    true for Dingus.
-
-    The examples for mock here assume version 0.7.0.
-
-
-Simple fake object
-~~~~~~~~~~~~~~~~~~
-
-.. doctest::
-
-    >>> # mock
-    >>> my_mock = mock.Mock()
-    >>> my_mock.some_method.return_value = "calculated value"
-    >>> my_mock.some_attribute = "value"
-    >>> assertEqual("calculated value", my_mock.some_method())
-    >>> assertEqual("value", my_mock.some_attribute)
-
-::
-
-    # Flexmock
-    mock = flexmock(some_method=lambda: "calculated value", some_attribute="value")
-    assertEqual("calculated value", mock.some_method())
-    assertEqual("value", mock.some_attribute)
-
-    # Mox
-    mock = mox.MockAnything()
-    mock.some_method().AndReturn("calculated value")
-    mock.some_attribute = "value"
-    mox.Replay(mock)
-    assertEqual("calculated value", mock.some_method())
-    assertEqual("value", mock.some_attribute)
-
-    # Mocker
-    mock = mocker.mock()
-    mock.some_method()
-    mocker.result("calculated value")
-    mocker.replay()
-    mock.some_attribute = "value"
-    assertEqual("calculated value", mock.some_method())
-    assertEqual("value", mock.some_attribute)
-
-::
-
-    >>> # Dingus
-    >>> my_dingus = dingus.Dingus(some_attribute="value",
-    ...                           some_method__returns="calculated value")
-    >>> assertEqual("calculated value", my_dingus.some_method())
-    >>> assertEqual("value", my_dingus.some_attribute)
-
-::
-
-    >>> # fudge
-    >>> my_fake = (fudge.Fake()
-    ...            .provides('some_method')
-    ...            .returns("calculated value")
-    ...            .has_attr(some_attribute="value"))
-    ...
-    >>> assertEqual("calculated value", my_fake.some_method())
-    >>> assertEqual("value", my_fake.some_attribute)
-
-
-Simple mock
-~~~~~~~~~~~
-
-.. doctest::
-
-    >>> # mock
-    >>> my_mock = mock.Mock()
-    >>> my_mock.some_method.return_value = "value"
-    >>> assertEqual("value", my_mock.some_method())
-    >>> my_mock.some_method.assert_called_once_with()
-
-::
-
-    # Flexmock
-    mock = flexmock()
-    mock.should_receive("some_method").and_return("value").once
-    assertEqual("value", mock.some_method())
-
-    # Mox
-    mock = mox.MockAnything()
-    mock.some_method().AndReturn("value")
-    mox.Replay(mock)
-    assertEqual("value", mock.some_method())
-    mox.Verify(mock)
-
-    # Mocker
-    mock = mocker.mock()
-    mock.some_method()
-    mocker.result("value")
-    mocker.replay()
-    assertEqual("value", mock.some_method())
-    mocker.verify()
-
-::
-
-    >>> # Dingus
-    >>> my_dingus = dingus.Dingus(some_method__returns="value")
-    >>> assertEqual("value", my_dingus.some_method())
-    >>> assert my_dingus.some_method.calls().once()
-
-::
-
-    >>> # fudge
-    >>> @fudge.test
-    ... def test():
-    ...     my_fake = (fudge.Fake()
-    ...                .expects('some_method')
-    ...                .returns("value")
-    ...                .times_called(1))
-    ...
-    >>> test()
-    Traceback (most recent call last):
-    ...
-    AssertionError: fake:my_fake.some_method() was not called
-
-
-Creating partial mocks
-~~~~~~~~~~~~~~~~~~~~~~
-
-.. doctest::
-
-    >>> # mock
-    >>> SomeObject.some_method = mock.Mock(return_value='value')
-    >>> assertEqual("value", SomeObject.some_method())
-
-::
-
-    # Flexmock
-    flexmock(SomeObject).should_receive("some_method").and_return('value')
-    assertEqual("value", mock.some_method())
-
-    # Mox
-    mock = mox.MockObject(SomeObject)
-    mock.some_method().AndReturn("value")
-    mox.Replay(mock)
-    assertEqual("value", mock.some_method())
-    mox.Verify(mock)
-
-    # Mocker
-    mock = mocker.mock(SomeObject)
-    mock.Get()
-    mocker.result("value")
-    mocker.replay()
-    assertEqual("value", mock.some_method())
-    mocker.verify()
-
-::
-
-    >>> # Dingus
-    >>> object = SomeObject
-    >>> object.some_method = dingus.Dingus(return_value="value")
-    >>> assertEqual("value", object.some_method())
-
-::
-
-    >>> # fudge
-    >>> fake = fudge.Fake().is_callable().returns("<fudge-value>")
-    >>> with fudge.patched_context(SomeObject, 'some_method', fake):
-    ...     s = SomeObject()
-    ...     assertEqual("<fudge-value>", s.some_method())
-    ...
-
-
-Ensure calls are made in specific order
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. doctest::
-
-    >>> # mock
-    >>> my_mock = mock.Mock(spec=SomeObject)
-    >>> my_mock.method1()
-    <Mock name='mock.method1()' id='...'>
-    >>> my_mock.method2()
-    <Mock name='mock.method2()' id='...'>
-    >>> assertEqual(my_mock.mock_calls, [call.method1(), call.method2()])
-
-::
-
-    # Flexmock
-    mock = flexmock(SomeObject)
-    mock.should_receive('method1').once.ordered.and_return('first thing')
-    mock.should_receive('method2').once.ordered.and_return('second thing')
-
-    # Mox
-    mock = mox.MockObject(SomeObject)
-    mock.method1().AndReturn('first thing')
-    mock.method2().AndReturn('second thing')
-    mox.Replay(mock)
-    mox.Verify(mock)
-
-    # Mocker
-    mock = mocker.mock()
-    with mocker.order():
-        mock.method1()
-        mocker.result('first thing')
-        mock.method2()
-        mocker.result('second thing')
-        mocker.replay()
-        mocker.verify()
-
-::
-
-    >>> # Dingus
-    >>> my_dingus = dingus.Dingus()
-    >>> my_dingus.method1()
-    <Dingus ...>
-    >>> my_dingus.method2()
-    <Dingus ...>
-    >>> assertEqual(['method1', 'method2'], [call.name for call in my_dingus.calls])
-
-::
-
-    >>> # fudge
-    >>> @fudge.test
-    ... def test():
-    ...     my_fake = (fudge.Fake()
-    ...                .remember_order()
-    ...                .expects('method1')
-    ...                .expects('method2'))
-    ...     my_fake.method2()
-    ...     my_fake.method1()
-    ...
-    >>> test()
-    Traceback (most recent call last):
-    ...
-    AssertionError: Call #1 was fake:my_fake.method2(); Expected: #1 fake:my_fake.method1(), #2 fake:my_fake.method2(), end
-
-
-Raising exceptions
-~~~~~~~~~~~~~~~~~~
-
-.. doctest::
-
-    >>> # mock
-    >>> my_mock = mock.Mock()
-    >>> my_mock.some_method.side_effect = SomeException("message")
-    >>> assertRaises(SomeException, my_mock.some_method)
-
-::
-
-    # Flexmock
-    mock = flexmock()
-    mock.should_receive("some_method").and_raise(SomeException("message"))
-    assertRaises(SomeException, mock.some_method)
-
-    # Mox
-    mock = mox.MockAnything()
-    mock.some_method().AndRaise(SomeException("message"))
-    mox.Replay(mock)
-    assertRaises(SomeException, mock.some_method)
-    mox.Verify(mock)
-
-    # Mocker
-    mock = mocker.mock()
-    mock.some_method()
-    mocker.throw(SomeException("message"))
-    mocker.replay()
-    assertRaises(SomeException, mock.some_method)
-    mocker.verify()
-
-::
-
-    >>> # Dingus
-    >>> my_dingus = dingus.Dingus()
-    >>> my_dingus.some_method = dingus.exception_raiser(SomeException)
-    >>> assertRaises(SomeException, my_dingus.some_method)
-
-::
-
-    >>> # fudge
-    >>> my_fake = (fudge.Fake()
-    ...            .is_callable()
-    ...            .raises(SomeException("message")))
-    ...
-    >>> my_fake()
-    Traceback (most recent call last):
-    ...
-    SomeException: message
-
-
-Override new instances of a class
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. doctest::
-
-    >>> # mock
-    >>> with mock.patch('somemodule.Someclass') as MockClass:
-    ...     MockClass.return_value = some_other_object
-    ...     assertEqual(some_other_object, somemodule.Someclass())
-    ...
-
-
-::
-
-    # Flexmock
-    flexmock(some_module.SomeClass, new_instances=some_other_object)
-    assertEqual(some_other_object, some_module.SomeClass())
-
-    # Mox
-    # (you will probably have mox.Mox() available as self.mox in a real test)
-    mox.Mox().StubOutWithMock(some_module, 'SomeClass', use_mock_anything=True)
-    some_module.SomeClass().AndReturn(some_other_object)
-    mox.ReplayAll()
-    assertEqual(some_other_object, some_module.SomeClass())
-
-    # Mocker
-    instance = mocker.mock()
-    klass = mocker.replace(SomeClass, spec=None)
-    klass('expected', 'args')
-    mocker.result(instance)
-
-::
-
-    >>> # Dingus
-    >>> MockClass = dingus.Dingus(return_value=some_other_object)
-    >>> with dingus.patch('somemodule.SomeClass', MockClass):
-    ...     assertEqual(some_other_object, somemodule.SomeClass())
-    ...
-
-::
-
-    >>> # fudge
-    >>> @fudge.patch('somemodule.SomeClass')
-    ... def test(FakeClass):
-    ...     FakeClass.is_callable().returns(some_other_object)
-    ...     assertEqual(some_other_object, somemodule.SomeClass())
-    ...
-    >>> test()
-
-
-Call the same method multiple times
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. note::
-
-    You don't need to do *any* configuration to call `mock.Mock()` methods
-    multiple times. Attributes like `call_count`, `call_args_list` and
-    `method_calls` provide various different ways of making assertions about
-    how the mock was used.
-
-.. doctest::
-
-    >>> # mock
-    >>> my_mock = mock.Mock()
-    >>> my_mock.some_method()
-    <Mock name='mock.some_method()' id='...'>
-    >>> my_mock.some_method()
-    <Mock name='mock.some_method()' id='...'>
-    >>> assert my_mock.some_method.call_count >= 2
-
-::
-
-    # Flexmock # (verifies that the method gets called at least twice)
-    flexmock(some_object).should_receive('some_method').at_least.twice
-
-    # Mox
-    # (does not support variable number of calls, so you need to create a new entry for each explicit call)
-    mock = mox.MockObject(some_object)
-    mock.some_method(mox.IgnoreArg(), mox.IgnoreArg())
-    mock.some_method(mox.IgnoreArg(), mox.IgnoreArg())
-    mox.Replay(mock)
-    mox.Verify(mock)
-
-    # Mocker
-    # (TODO)
-
-::
-
-    >>> # Dingus
-    >>> my_dingus = dingus.Dingus()
-    >>> my_dingus.some_method()
-    <Dingus ...>
-    >>> my_dingus.some_method()
-    <Dingus ...>
-    >>> assert len(my_dingus.calls('some_method')) == 2
-
-::
-
-    >>> # fudge
-    >>> @fudge.test
-    ... def test():
-    ...     my_fake = fudge.Fake().expects('some_method').times_called(2)
-    ...     my_fake.some_method()
-    ...
-    >>> test()
-    Traceback (most recent call last):
-    ...
-    AssertionError: fake:my_fake.some_method() was called 1 time(s). Expected 2.
-
-
-Mock chained methods
-~~~~~~~~~~~~~~~~~~~~
-
-.. doctest::
-
-    >>> # mock
-    >>> my_mock = mock.Mock()
-    >>> method3 = my_mock.method1.return_value.method2.return_value.method3
-    >>> method3.return_value = 'some value'
-    >>> assertEqual('some value', my_mock.method1().method2().method3(1, 2))
-    >>> method3.assert_called_once_with(1, 2)
-
-::
-
-    # Flexmock
-    # (intermediate method calls are automatically assigned to temporary fake objects
-    # and can be called with any arguments)
-    flexmock(some_object).should_receive(
-        'method1.method2.method3'
-    ).with_args(arg1, arg2).and_return('some value')
-    assertEqual('some_value', some_object.method1().method2().method3(arg1, arg2))
-
-::
-
-    # Mox
-    mock = mox.MockObject(some_object)
-    mock2 = mox.MockAnything()
-    mock3 = mox.MockAnything()
-    mock.method1().AndReturn(mock1)
-    mock2.method2().AndReturn(mock2)
-    mock3.method3(arg1, arg2).AndReturn('some_value')
-    self.mox.ReplayAll()
-    assertEqual("some_value", some_object.method1().method2().method3(arg1, arg2))
-    self.mox.VerifyAll()
-
-    # Mocker
-    # (TODO)
-
-::
-
-    >>> # Dingus
-    >>> my_dingus = dingus.Dingus()
-    >>> method3 = my_dingus.method1.return_value.method2.return_value.method3
-    >>> method3.return_value = 'some value'
-    >>> assertEqual('some value', my_dingus.method1().method2().method3(1, 2))
-    >>> assert method3.calls('()', 1, 2).once()
-
-::
-
-    >>> # fudge
-    >>> @fudge.test
-    ... def test():
-    ...     my_fake = fudge.Fake()
-    ...     (my_fake
-    ...      .expects('method1')
-    ...      .returns_fake()
-    ...      .expects('method2')
-    ...      .returns_fake()
-    ...      .expects('method3')
-    ...      .with_args(1, 2)
-    ...      .returns('some value'))
-    ...     assertEqual('some value', my_fake.method1().method2().method3(1, 2))
-    ...
-    >>> test()
-
-
-Mocking a context manager
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Examples for mock, Dingus and fudge only (so far):
-
-.. doctest::
-
-    >>> # mock
-    >>> my_mock = mock.MagicMock()
-    >>> with my_mock:
-    ...     pass
-    ...
-    >>> my_mock.__enter__.assert_called_with()
-    >>> my_mock.__exit__.assert_called_with(None, None, None)
-
-::
-
-
-    >>> # Dingus (nothing special here; all dinguses are "magic mocks")
-    >>> my_dingus = dingus.Dingus()
-    >>> with my_dingus:
-    ...     pass
-    ...
-    >>> assert my_dingus.__enter__.calls()
-    >>> assert my_dingus.__exit__.calls('()', None, None, None)
-
-::
-
-    >>> # fudge
-    >>> my_fake = fudge.Fake().provides('__enter__').provides('__exit__')
-    >>> with my_fake:
-    ...     pass
-    ...
-
-
-Mocking the builtin open used as a context manager
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Example for mock only (so far):
-
-.. doctest::
-
-    >>> # mock
-    >>> my_mock = mock.MagicMock()
-    >>> with mock.patch('__builtin__.open', my_mock):
-    ...     manager = my_mock.return_value.__enter__.return_value
-    ...     manager.read.return_value = 'some data'
-    ...     with open('foo') as h:
-    ...         data = h.read()
-    ...
-    >>> data
-    'some data'
-    >>> my_mock.assert_called_once_with('foo')
-
-*or*:
-
-.. doctest::
-
-    >>> # mock
-    >>> with mock.patch('__builtin__.open') as my_mock:
-    ...     my_mock.return_value.__enter__ = lambda s: s
-    ...     my_mock.return_value.__exit__ = mock.Mock()
-    ...     my_mock.return_value.read.return_value = 'some data'
-    ...     with open('foo') as h:
-    ...         data = h.read()
-    ...
-    >>> data
-    'some data'
-    >>> my_mock.assert_called_once_with('foo')
-
-::
-
-    >>> # Dingus
-    >>> my_dingus = dingus.Dingus()
-    >>> with dingus.patch('__builtin__.open', my_dingus):
-    ...     file_ = open.return_value.__enter__.return_value
-    ...     file_.read.return_value = 'some data'
-    ...     with open('foo') as h:
-    ...         data = f.read()
-    ...
-    >>> data
-    'some data'
-    >>> assert my_dingus.calls('()', 'foo').once()
-
-::
-
-    >>> # fudge
-    >>> from contextlib import contextmanager
-    >>> from StringIO import StringIO
-    >>> @contextmanager
-    ... def fake_file(filename):
-    ...     yield StringIO('sekrets')
-    ...
-    >>> with fudge.patch('__builtin__.open') as fake_open:
-    ...     fake_open.is_callable().calls(fake_file)
-    ...     with open('/etc/password') as f:
-    ...         data = f.read()
-    ...
-    fake:__builtin__.open
-    >>> data
-    'sekrets'
\ No newline at end of file
diff --git a/branch-1.2/ambari-common/src/test/python/mock/docs/conf.py b/branch-1.2/ambari-common/src/test/python/mock/docs/conf.py
deleted file mode 100644
index 62f0491..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/docs/conf.py
+++ /dev/null
@@ -1,209 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Mock documentation build configuration file, created by
-# sphinx-quickstart on Mon Nov 17 18:12:00 2008.
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# The contents of this file are pickled, so don't put values in the namespace
-# that aren't pickleable (module imports are okay, they're removed automatically).
-#
-# All configuration values have a default value; values that are commented out
-# serve to show the default value.
-
-import sys, os
-sys.path.insert(0, os.path.abspath('..'))
-from mock import __version__
-
-# If your extensions are in another directory, add it here. If the directory
-# is relative to the documentation root, use os.path.abspath to make it
-# absolute, like shown here.
-#sys.path.append(os.path.abspath('some/directory'))
-
-# General configuration
-# ---------------------
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.doctest']
-
-doctest_global_setup = """
-import os
-import sys
-import mock
-from mock import * # yeah, I know :-/
-import unittest2
-import __main__
-
-if os.getcwd() not in sys.path:
-    sys.path.append(os.getcwd())
-
-# keep a reference to __main__
-sys.modules['__main'] = __main__
-
-class ProxyModule(object):
-    def __init__(self):
-        self.__dict__ = globals()
-
-sys.modules['__main__'] = ProxyModule()
-"""
-
-doctest_global_cleanup = """
-sys.modules['__main__'] = sys.modules['__main']
-"""
-
-html_theme = 'nature'
-html_theme_options = {}
-
-# Add any paths that contain templates here, relative to this directory.
-#templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.txt'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General substitutions.
-project = u'Mock'
-copyright = u'2007-2012, Michael Foord & the mock team'
-
-# The default replacements for |version| and |release|, also used in various
-# other places throughout the built documents.
-#
-# The short X.Y version.
-version = __version__[:3]
-# The full version, including alpha/beta/rc tags.
-release = __version__
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-today_fmt = '%B %d, %Y'
-
-# List of documents that shouldn't be included in the build.
-#unused_docs = []
-
-# List of directories, relative to source directories, that shouldn't be searched
-# for source files.
-exclude_trees = []
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-add_module_names = False
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'friendly'
-
-
-# Options for HTML output
-# -----------------------
-
-# The style sheet to use for HTML and HTML Help pages. A file of that name
-# must exist either in Sphinx' static/ path, or in one of the custom paths
-# given in html_static_path.
-#html_style = 'adctheme.css'
-
-# The name for this set of Sphinx documents.  If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-#html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-html_use_modindex = False
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, the reST sources are included in the HTML build as _sources/<name>.
-#html_copy_source = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it.  The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'Mockdoc'
-
-
-# Options for LaTeX output
-# ------------------------
-
-# The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
-
-# The font size ('10pt', '11pt' or '12pt').
-latex_font_size = '12pt'
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, document class [howto/manual]).
-latex_documents = [
-  ('index', 'Mock.tex', u'Mock Documentation',
-   u'Michael Foord', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-latex_use_modindex = False
\ No newline at end of file
diff --git a/branch-1.2/ambari-common/src/test/python/mock/docs/examples.txt b/branch-1.2/ambari-common/src/test/python/mock/docs/examples.txt
deleted file mode 100644
index ecb994b..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/docs/examples.txt
+++ /dev/null
@@ -1,1063 +0,0 @@
-.. _further-examples:
-
-==================
- Further Examples
-==================
-
-.. currentmodule:: mock
-
-.. testsetup::
-
-    from datetime import date
-
-    BackendProvider = Mock()
-    sys.modules['mymodule'] = mymodule = Mock(name='mymodule')
-
-    def grob(val):
-        "First frob and then clear val"
-        mymodule.frob(val)
-        val.clear()
-
-    mymodule.frob = lambda val: val
-    mymodule.grob = grob
-    mymodule.date = date
-
-    class TestCase(unittest2.TestCase):
-        def run(self):
-            result = unittest2.TestResult()
-            out = unittest2.TestCase.run(self, result)
-            assert result.wasSuccessful()
-
-    from mock import inPy3k
-
-
-
-For comprehensive examples, see the unit tests included in the full source
-distribution.
-
-Here are some more examples for some slightly more advanced scenarios than in
-the :ref:`getting started <getting-started>` guide.
-
-
-Mocking chained calls
-=====================
-
-Mocking chained calls is actually straightforward with mock once you
-understand the :attr:`~Mock.return_value` attribute. When a mock is called for
-the first time, or you fetch its `return_value` before it has been called, a
-new `Mock` is created.
-
-This means that you can see how the object returned from a call to a mocked
-object has been used by interrogating the `return_value` mock:
-
-.. doctest::
-
-    >>> mock = Mock()
-    >>> mock().foo(a=2, b=3)
-    <Mock name='mock().foo()' id='...'>
-    >>> mock.return_value.foo.assert_called_with(a=2, b=3)
-
-From here it is a simple step to configure and then make assertions about
-chained calls. Of course another alternative is writing your code in a more
-testable way in the first place...
-
-So, suppose we have some code that looks a little bit like this:
-
-.. doctest::
-
-    >>> class Something(object):
-    ...     def __init__(self):
-    ...         self.backend = BackendProvider()
-    ...     def method(self):
-    ...         response = self.backend.get_endpoint('foobar').create_call('spam', 'eggs').start_call()
-    ...         # more code
-
-Assuming that `BackendProvider` is already well tested, how do we test
-`method()`? Specifically, we want to test that the code section `# more
-code` uses the response object in the correct way.
-
-As this chain of calls is made from an instance attribute we can monkey patch
-the `backend` attribute on a `Something` instance. In this particular case
-we are only interested in the return value from the final call to
-`start_call` so we don't have much configuration to do. Let's assume the
-object it returns is 'file-like', so we'll ensure that our response object
-uses the builtin `file` as its `spec`.
-
-To do this we create a mock instance as our mock backend and create a mock
-response object for it. To set the response as the return value for that final
-`start_call` we could do this:
-
-    `mock_backend.get_endpoint.return_value.create_call.return_value.start_call.return_value = mock_response`.
-
-We can do that in a slightly nicer way using the :meth:`~Mock.configure_mock`
-method to directly set the return value for us:
-
-.. doctest::
-
-    >>> something = Something()
-    >>> mock_response = Mock(spec=file)
-    >>> mock_backend = Mock()
-    >>> config = {'get_endpoint.return_value.create_call.return_value.start_call.return_value': mock_response}
-    >>> mock_backend.configure_mock(**config)
-
-With these we monkey patch the "mock backend" in place and can make the real
-call:
-
-.. doctest::
-
-    >>> something.backend = mock_backend
-    >>> something.method()
-
-Using :attr:`~Mock.mock_calls` we can check the chained call with a single
-assert. A chained call is several calls in one line of code, so there will be
-several entries in `mock_calls`. We can use :meth:`call.call_list` to create
-this list of calls for us:
-
-.. doctest::
-
-    >>> chained = call.get_endpoint('foobar').create_call('spam', 'eggs').start_call()
-    >>> call_list = chained.call_list()
-    >>> assert mock_backend.mock_calls == call_list
-
-
-Partial mocking
-===============
-
-In some tests I wanted to mock out a call to `datetime.date.today()
-<http://docs.python.org/library/datetime.html#datetime.date.today>`_ to return
-a known date, but I didn't want to prevent the code under test from
-creating new date objects. Unfortunately `datetime.date` is written in C, and
-so I couldn't just monkey-patch out the static `date.today` method.
-
-I found a simple way of doing this that involved effectively wrapping the date
-class with a mock, but passing through calls to the constructor to the real
-class (and returning real instances).
-
-The :func:`patch decorator <patch>` is used here to
-mock out the `date` class in the module under test. The :attr:`side_effect`
-attribute on the mock date class is then set to a lambda function that returns
-a real date. When the mock date class is called a real date will be
-constructed and returned by `side_effect`.
-
-.. doctest::
-
-    >>> from datetime import date
-    >>> with patch('mymodule.date') as mock_date:
-    ...     mock_date.today.return_value = date(2010, 10, 8)
-    ...     mock_date.side_effect = lambda *args, **kw: date(*args, **kw)
-    ...
-    ...     assert mymodule.date.today() == date(2010, 10, 8)
-    ...     assert mymodule.date(2009, 6, 8) == date(2009, 6, 8)
-    ...
-
-Note that we don't patch `datetime.date` globally, we patch `date` in the
-module that *uses* it. See :ref:`where to patch <where-to-patch>`.
-
-When `date.today()` is called a known date is returned, but calls to the
-`date(...)` constructor still return normal dates. Without this you can find
-yourself having to calculate an expected result using exactly the same
-algorithm as the code under test, which is a classic testing anti-pattern.
-
-Calls to the date constructor are recorded in the `mock_date` attributes
-(`call_count` and friends) which may also be useful for your tests.
-
-An alternative way of dealing with mocking dates, or other builtin classes,
-is discussed in `this blog entry
-<http://williamjohnbert.com/2011/07/how-to-unit-testing-in-django-with-mocking-and-patching/>`_.
-
-
-Mocking a Generator Method
-==========================
-
-A Python generator is a function or method that uses the `yield statement
-<http://docs.python.org/reference/simple_stmts.html#the-yield-statement>`_ to
-return a series of values when iterated over [#]_.
-
-A generator method / function is called to return the generator object. It is
-the generator object that is then iterated over. The protocol method for
-iteration is `__iter__
-<http://docs.python.org/library/stdtypes.html#container.__iter__>`_, so we can
-mock this using a `MagicMock`.
-
-Here's an example class with an "iter" method implemented as a generator:
-
-.. doctest::
-
-    >>> class Foo(object):
-    ...     def iter(self):
-    ...         for i in [1, 2, 3]:
-    ...             yield i
-    ...
-    >>> foo = Foo()
-    >>> list(foo.iter())
-    [1, 2, 3]
-
-
-How would we mock this class, and in particular its "iter" method?
-
-To configure the values returned from the iteration (implicit in the call to
-`list`), we need to configure the object returned by the call to `foo.iter()`.
-
-.. doctest::
-
-    >>> mock_foo = MagicMock()
-    >>> mock_foo.iter.return_value = iter([1, 2, 3])
-    >>> list(mock_foo.iter())
-    [1, 2, 3]
-
-.. [#] There are also generator expressions and more `advanced uses
-    <http://www.dabeaz.com/coroutines/index.html>`_ of generators, but we aren't
-    concerned about them here. A very good introduction to generators and how
-    powerful they are is: `Generator Tricks for Systems Programmers
-    <http://www.dabeaz.com/generators/>`_.
-
-
-Applying the same patch to every test method
-============================================
-
-If you want several patches in place for multiple test methods the obvious way
-is to apply the patch decorators to every method. This can feel like unnecessary
-repetition. For Python 2.6 or more recent you can use `patch` (in all its
-various forms) as a class decorator. This applies the patches to all test
-methods on the class. A test method is identified by methods whose names start
-with `test`:
-
-.. doctest::
-
-    >>> @patch('mymodule.SomeClass')
-    ... class MyTest(TestCase):
-    ...
-    ...     def test_one(self, MockSomeClass):
-    ...         self.assertTrue(mymodule.SomeClass is MockSomeClass)
-    ...
-    ...     def test_two(self, MockSomeClass):
-    ...         self.assertTrue(mymodule.SomeClass is MockSomeClass)
-    ...
-    ...     def not_a_test(self):
-    ...         return 'something'
-    ...
-    >>> MyTest('test_one').test_one()
-    >>> MyTest('test_two').test_two()
-    >>> MyTest('test_two').not_a_test()
-    'something'
-
-An alternative way of managing patches is to use the :ref:`start-and-stop`.
-These allow you to move the patching into your `setUp` and `tearDown` methods.
-
-.. doctest::
-
-    >>> class MyTest(TestCase):
-    ...     def setUp(self):
-    ...         self.patcher = patch('mymodule.foo')
-    ...         self.mock_foo = self.patcher.start()
-    ...
-    ...     def test_foo(self):
-    ...         self.assertTrue(mymodule.foo is self.mock_foo)
-    ...
-    ...     def tearDown(self):
-    ...         self.patcher.stop()
-    ...
-    >>> MyTest('test_foo').run()
-
-If you use this technique you must ensure that the patching is "undone" by
-calling `stop`. This can be fiddlier than you might think, because if an
-exception is raised in the setUp then tearDown is not called. `unittest2
-<http://pypi.python.org/pypi/unittest2>`_ cleanup functions make this simpler:
-
-
-.. doctest::
-
-    >>> class MyTest(TestCase):
-    ...     def setUp(self):
-    ...         patcher = patch('mymodule.foo')
-    ...         self.addCleanup(patcher.stop)
-    ...         self.mock_foo = patcher.start()
-    ...
-    ...     def test_foo(self):
-    ...         self.assertTrue(mymodule.foo is self.mock_foo)
-    ...
-    >>> MyTest('test_foo').run()
-
-
-Mocking Unbound Methods
-=======================
-
-Whilst writing tests today I needed to patch an *unbound method* (patching the
-method on the class rather than on the instance). I needed self to be passed
-in as the first argument because I want to make asserts about which objects
-were calling this particular method. The issue is that you can't patch with a
-mock for this, because if you replace an unbound method with a mock it doesn't
-become a bound method when fetched from the instance, and so it doesn't get
-self passed in. The workaround is to patch the unbound method with a real
-function instead. The :func:`patch` decorator makes it so simple to
-patch out methods with a mock that having to create a real function becomes a
-nuisance.
-
-If you pass `autospec=True` to patch then it does the patching with a
-*real* function object. This function object has the same signature as the one
-it is replacing, but delegates to a mock under the hood. You still get your
-mock auto-created in exactly the same way as before. What it means though, is
-that if you use it to patch out an unbound method on a class the mocked
-function will be turned into a bound method if it is fetched from an instance.
-It will have `self` passed in as the first argument, which is exactly what I
-wanted:
-
-.. doctest::
-
-    >>> class Foo(object):
-    ...   def foo(self):
-    ...     pass
-    ...
-    >>> with patch.object(Foo, 'foo', autospec=True) as mock_foo:
-    ...   mock_foo.return_value = 'foo'
-    ...   foo = Foo()
-    ...   foo.foo()
-    ...
-    'foo'
-    >>> mock_foo.assert_called_once_with(foo)
-
-If we don't use `autospec=True` then the unbound method is patched out
-with a Mock instance instead, and isn't called with `self`.
-
-
-Checking multiple calls with mock
-=================================
-
-mock has a nice API for making assertions about how your mock objects are used.
-
-.. doctest::
-
-    >>> mock = Mock()
-    >>> mock.foo_bar.return_value = None
-    >>> mock.foo_bar('baz', spam='eggs')
-    >>> mock.foo_bar.assert_called_with('baz', spam='eggs')
-
-If your mock is only being called once you can use the
-:meth:`assert_called_once_with` method that also asserts that the
-:attr:`call_count` is one.
-
-.. doctest::
-
-    >>> mock.foo_bar.assert_called_once_with('baz', spam='eggs')
-    >>> mock.foo_bar()
-    >>> mock.foo_bar.assert_called_once_with('baz', spam='eggs')
-    Traceback (most recent call last):
-        ...
-    AssertionError: Expected to be called once. Called 2 times.
-
-Both `assert_called_with` and `assert_called_once_with` make assertions about
-the *most recent* call. If your mock is going to be called several times, and
-you want to make assertions about *all* those calls you can use
-:attr:`~Mock.call_args_list`:
-
-.. doctest::
-
-    >>> mock = Mock(return_value=None)
-    >>> mock(1, 2, 3)
-    >>> mock(4, 5, 6)
-    >>> mock()
-    >>> mock.call_args_list
-    [call(1, 2, 3), call(4, 5, 6), call()]
-
-The :data:`call` helper makes it easy to make assertions about these calls. You
-can build up a list of expected calls and compare it to `call_args_list`. This
-looks remarkably similar to the repr of the `call_args_list`:
-
-.. doctest::
-
-    >>> expected = [call(1, 2, 3), call(4, 5, 6), call()]
-    >>> mock.call_args_list == expected
-    True
-
-
-Coping with mutable arguments
-=============================
-
-Another situation is rare, but can bite you, is when your mock is called with
-mutable arguments. `call_args` and `call_args_list` store *references* to the
-arguments. If the arguments are mutated by the code under test then you can no
-longer make assertions about what the values were when the mock was called.
-
-Here's some example code that shows the problem. Imagine the following functions
-defined in 'mymodule'::
-
-    def frob(val):
-        pass
-
-    def grob(val):
-        "First frob and then clear val"
-        frob(val)
-        val.clear()
-
-When we try to test that `grob` calls `frob` with the correct argument look
-what happens:
-
-.. doctest::
-
-    >>> with patch('mymodule.frob') as mock_frob:
-    ...     val = set([6])
-    ...     mymodule.grob(val)
-    ...
-    >>> val
-    set([])
-    >>> mock_frob.assert_called_with(set([6]))
-    Traceback (most recent call last):
-        ...
-    AssertionError: Expected: ((set([6]),), {})
-    Called with: ((set([]),), {})
-
-One possibility would be for mock to copy the arguments you pass in. This
-could then cause problems if you do assertions that rely on object identity
-for equality.
-
-Here's one solution that uses the :attr:`side_effect`
-functionality. If you provide a `side_effect` function for a mock then
-`side_effect` will be called with the same args as the mock. This gives us an
-opportunity to copy the arguments and store them for later assertions. In this
-example I'm using *another* mock to store the arguments so that I can use the
-mock methods for doing the assertion. Again a helper function sets this up for
-me.
-
-.. doctest::
-
-    >>> from copy import deepcopy
-    >>> from mock import Mock, patch, DEFAULT
-    >>> def copy_call_args(mock):
-    ...     new_mock = Mock()
-    ...     def side_effect(*args, **kwargs):
-    ...         args = deepcopy(args)
-    ...         kwargs = deepcopy(kwargs)
-    ...         new_mock(*args, **kwargs)
-    ...         return DEFAULT
-    ...     mock.side_effect = side_effect
-    ...     return new_mock
-    ...
-    >>> with patch('mymodule.frob') as mock_frob:
-    ...     new_mock = copy_call_args(mock_frob)
-    ...     val = set([6])
-    ...     mymodule.grob(val)
-    ...
-    >>> new_mock.assert_called_with(set([6]))
-    >>> new_mock.call_args
-    call(set([6]))
-
-`copy_call_args` is called with the mock that will be called. It returns a new
-mock that we do the assertion on. The `side_effect` function makes a copy of
-the args and calls our `new_mock` with the copy.
-
-.. note::
-
-    If your mock is only going to be used once there is an easier way of
-    checking arguments at the point they are called. You can simply do the
-    checking inside a `side_effect` function.
-
-    .. doctest::
-
-        >>> def side_effect(arg):
-        ...     assert arg == set([6])
-        ...
-        >>> mock = Mock(side_effect=side_effect)
-        >>> mock(set([6]))
-        >>> mock(set())
-        Traceback (most recent call last):
-            ...
-        AssertionError
-
-An alternative approach is to create a subclass of `Mock` or `MagicMock` that
-copies (using `copy.deepcopy
-<http://docs.python.org/library/copy.html#copy.deepcopy>`_) the arguments.
-Here's an example implementation:
-
-.. doctest::
-
-    >>> from copy import deepcopy
-    >>> class CopyingMock(MagicMock):
-    ...     def __call__(self, *args, **kwargs):
-    ...         args = deepcopy(args)
-    ...         kwargs = deepcopy(kwargs)
-    ...         return super(CopyingMock, self).__call__(*args, **kwargs)
-    ...
-    >>> c = CopyingMock(return_value=None)
-    >>> arg = set()
-    >>> c(arg)
-    >>> arg.add(1)
-    >>> c.assert_called_with(set())
-    >>> c.assert_called_with(arg)
-    Traceback (most recent call last):
-        ...
-    AssertionError: Expected call: mock(set([1]))
-    Actual call: mock(set([]))
-    >>> c.foo
-    <CopyingMock name='mock.foo' id='...'>
-
-When you subclass `Mock` or `MagicMock` all dynamically created attributes,
-and the `return_value` will use your subclass automatically. That means all
-children of a `CopyingMock` will also have the type `CopyingMock`.
-
-
-Raising exceptions on attribute access
-======================================
-
-You can use :class:`PropertyMock` to mimic the behaviour of properties. This
-includes raising exceptions when an attribute is accessed.
-
-Here's an example raising a `ValueError` when the 'foo' attribute is accessed:
-
-.. doctest::
-
-    >>> m = MagicMock()
-    >>> p = PropertyMock(side_effect=ValueError)
-    >>> type(m).foo = p
-    >>> m.foo
-    Traceback (most recent call last):
-    ....
-    ValueError
-
-Because every mock object has its own type, a new subclass of whichever mock
-class you're using, all mock objects are isolated from each other. You can
-safely attach properties (or other descriptors or whatever you want in fact)
-to `type(mock)` without affecting other mock objects.
-
-
-Multiple calls with different effects
-=====================================
-
-.. note::
-
-    In mock 1.0 the handling of iterable `side_effect` was changed. Any
-    exceptions in the iterable will be raised instead of returned.
-
-Handling code that needs to behave differently on subsequent calls during the
-test can be tricky. For example you may have a function that needs to raise
-an exception the first time it is called but returns a response on the second
-call (testing retry behaviour).
-
-One approach is to use a :attr:`side_effect` function that replaces itself. The
-first time it is called the `side_effect` sets a new `side_effect` that will
-be used for the second call. It then raises an exception:
-
-.. doctest::
-
-    >>> def side_effect(*args):
-    ...   def second_call(*args):
-    ...     return 'response'
-    ...   mock.side_effect = second_call
-    ...   raise Exception('boom')
-    ...
-    >>> mock = Mock(side_effect=side_effect)
-    >>> mock('first')
-    Traceback (most recent call last):
-        ...
-    Exception: boom
-    >>> mock('second')
-    'response'
-    >>> mock.assert_called_with('second')
-
-Another perfectly valid way would be to pop return values from a list. If the
-return value is an exception, raise it instead of returning it:
-
-.. doctest::
-
-    >>> returns = [Exception('boom'), 'response']
-    >>> def side_effect(*args):
-    ...   result = returns.pop(0)
-    ...   if isinstance(result, Exception):
-    ...     raise result
-    ...   return result
-    ...
-    >>> mock = Mock(side_effect=side_effect)
-    >>> mock('first')
-    Traceback (most recent call last):
-        ...
-    Exception: boom
-    >>> mock('second')
-    'response'
-    >>> mock.assert_called_with('second')
-
-Which approach you prefer is a matter of taste. The first approach is actually
-a line shorter but maybe the second approach is more readable.
-
-
-Nesting Patches
-===============
-
-Using patch as a context manager is nice, but if you do multiple patches you
-can end up with nested with statements indenting further and further to the
-right:
-
-.. doctest::
-
-    >>> class MyTest(TestCase):
-    ...
-    ...     def test_foo(self):
-    ...         with patch('mymodule.Foo') as mock_foo:
-    ...             with patch('mymodule.Bar') as mock_bar:
-    ...                 with patch('mymodule.Spam') as mock_spam:
-    ...                     assert mymodule.Foo is mock_foo
-    ...                     assert mymodule.Bar is mock_bar
-    ...                     assert mymodule.Spam is mock_spam
-    ...
-    >>> original = mymodule.Foo
-    >>> MyTest('test_foo').test_foo()
-    >>> assert mymodule.Foo is original
-
-With unittest2_ `cleanup` functions and the :ref:`start-and-stop` we can
-achieve the same effect without the nested indentation. A simple helper
-method, `create_patch`, puts the patch in place and returns the created mock
-for us:
-
-.. doctest::
-
-    >>> class MyTest(TestCase):
-    ...
-    ...     def create_patch(self, name):
-    ...         patcher = patch(name)
-    ...         thing = patcher.start()
-    ...         self.addCleanup(patcher.stop)
-    ...         return thing
-    ...
-    ...     def test_foo(self):
-    ...         mock_foo = self.create_patch('mymodule.Foo')
-    ...         mock_bar = self.create_patch('mymodule.Bar')
-    ...         mock_spam = self.create_patch('mymodule.Spam')
-    ...
-    ...         assert mymodule.Foo is mock_foo
-    ...         assert mymodule.Bar is mock_bar
-    ...         assert mymodule.Spam is mock_spam
-    ...
-    >>> original = mymodule.Foo
-    >>> MyTest('test_foo').run()
-    >>> assert mymodule.Foo is original
-
-
-Mocking a dictionary with MagicMock
-===================================
-
-You may want to mock a dictionary, or other container object, recording all
-access to it whilst having it still behave like a dictionary.
-
-We can do this with :class:`MagicMock`, which will behave like a dictionary,
-and using :data:`~Mock.side_effect` to delegate dictionary access to a real
-underlying dictionary that is under our control.
-
-When the `__getitem__` and `__setitem__` methods of our `MagicMock` are called
-(normal dictionary access) then `side_effect` is called with the key (and in
-the case of `__setitem__` the value too). We can also control what is returned.
-
-After the `MagicMock` has been used we can use attributes like
-:data:`~Mock.call_args_list` to assert about how the dictionary was used:
-
-.. doctest::
-
-    >>> my_dict = {'a': 1, 'b': 2, 'c': 3}
-    >>> def getitem(name):
-    ...      return my_dict[name]
-    ...
-    >>> def setitem(name, val):
-    ...     my_dict[name] = val
-    ...
-    >>> mock = MagicMock()
-    >>> mock.__getitem__.side_effect = getitem
-    >>> mock.__setitem__.side_effect = setitem
-
-.. note::
-
-    An alternative to using `MagicMock` is to use `Mock` and *only* provide
-    the magic methods you specifically want:
-
-    .. doctest::
-
-        >>> mock = Mock()
-        >>> mock.__setitem__ = Mock(side_effect=getitem)
-        >>> mock.__getitem__ = Mock(side_effect=setitem)
-
-    A *third* option is to use `MagicMock` but passing in `dict` as the `spec`
-    (or `spec_set`) argument so that the `MagicMock` created only has
-    dictionary magic methods available:
-
-    .. doctest::
-
-        >>> mock = MagicMock(spec_set=dict)
-        >>> mock.__getitem__.side_effect = getitem
-        >>> mock.__setitem__.side_effect = setitem
-
-With these side effect functions in place, the `mock` will behave like a normal
-dictionary but recording the access. It even raises a `KeyError` if you try
-to access a key that doesn't exist.
-
-.. doctest::
-
-    >>> mock['a']
-    1
-    >>> mock['c']
-    3
-    >>> mock['d']
-    Traceback (most recent call last):
-        ...
-    KeyError: 'd'
-    >>> mock['b'] = 'fish'
-    >>> mock['d'] = 'eggs'
-    >>> mock['b']
-    'fish'
-    >>> mock['d']
-    'eggs'
-
-After it has been used you can make assertions about the access using the normal
-mock methods and attributes:
-
-.. doctest::
-
-    >>> mock.__getitem__.call_args_list
-    [call('a'), call('c'), call('d'), call('b'), call('d')]
-    >>> mock.__setitem__.call_args_list
-    [call('b', 'fish'), call('d', 'eggs')]
-    >>> my_dict
-    {'a': 1, 'c': 3, 'b': 'fish', 'd': 'eggs'}
-
-
-Mock subclasses and their attributes
-====================================
-
-There are various reasons why you might want to subclass `Mock`. One reason
-might be to add helper methods. Here's a silly example:
-
-.. doctest::
-
-    >>> class MyMock(MagicMock):
-    ...     def has_been_called(self):
-    ...         return self.called
-    ...
-    >>> mymock = MyMock(return_value=None)
-    >>> mymock
-    <MyMock id='...'>
-    >>> mymock.has_been_called()
-    False
-    >>> mymock()
-    >>> mymock.has_been_called()
-    True
-
-The standard behaviour for `Mock` instances is that attributes and the return
-value mocks are of the same type as the mock they are accessed on. This ensures
-that `Mock` attributes are `Mocks` and `MagicMock` attributes are `MagicMocks`
-[#]_. So if you're subclassing to add helper methods then they'll also be
-available on the attributes and return value mock of instances of your
-subclass.
-
-.. doctest::
-
-    >>> mymock.foo
-    <MyMock name='mock.foo' id='...'>
-    >>> mymock.foo.has_been_called()
-    False
-    >>> mymock.foo()
-    <MyMock name='mock.foo()' id='...'>
-    >>> mymock.foo.has_been_called()
-    True
-
-Sometimes this is inconvenient. For example, `one user
-<https://code.google.com/p/mock/issues/detail?id=105>`_ is subclassing mock to
-created a `Twisted adaptor
-<http://twistedmatrix.com/documents/11.0.0/api/twisted.python.components.html>`_.
-Having this applied to attributes too actually causes errors.
-
-`Mock` (in all its flavours) uses a method called `_get_child_mock` to create
-these "sub-mocks" for attributes and return values. You can prevent your
-subclass being used for attributes by overriding this method. The signature is
-that it takes arbitrary keyword arguments (`**kwargs`) which are then passed
-onto the mock constructor:
-
-.. doctest::
-
-    >>> class Subclass(MagicMock):
-    ...     def _get_child_mock(self, **kwargs):
-    ...         return MagicMock(**kwargs)
-    ...
-    >>> mymock = Subclass()
-    >>> mymock.foo
-    <MagicMock name='mock.foo' id='...'>
-    >>> assert isinstance(mymock, Subclass)
-    >>> assert not isinstance(mymock.foo, Subclass)
-    >>> assert not isinstance(mymock(), Subclass)
-
-.. [#] An exception to this rule are the non-callable mocks. Attributes use the
-    callable variant because otherwise non-callable mocks couldn't have callable
-    methods.
-
-
-Mocking imports with patch.dict
-===============================
-
-One situation where mocking can be hard is where you have a local import inside
-a function. These are harder to mock because they aren't using an object from
-the module namespace that we can patch out.
-
-Generally local imports are to be avoided. They are sometimes done to prevent
-circular dependencies, for which there is *usually* a much better way to solve
-the problem (refactor the code) or to prevent "up front costs" by delaying the
-import. This can also be solved in better ways than an unconditional local
-import (store the module as a class or module attribute and only do the import
-on first use).
-
-That aside there is a way to use `mock` to affect the results of an import.
-Importing fetches an *object* from the `sys.modules` dictionary. Note that it
-fetches an *object*, which need not be a module. Importing a module for the
-first time results in a module object being put in `sys.modules`, so usually
-when you import something you get a module back. This need not be the case
-however.
-
-This means you can use :func:`patch.dict` to *temporarily* put a mock in place
-in `sys.modules`. Any imports whilst this patch is active will fetch the mock.
-When the patch is complete (the decorated function exits, the with statement
-body is complete or `patcher.stop()` is called) then whatever was there
-previously will be restored safely.
-
-Here's an example that mocks out the 'fooble' module.
-
-.. doctest::
-
-    >>> mock = Mock()
-    >>> with patch.dict('sys.modules', {'fooble': mock}):
-    ...    import fooble
-    ...    fooble.blob()
-    ...
-    <Mock name='mock.blob()' id='...'>
-    >>> assert 'fooble' not in sys.modules
-    >>> mock.blob.assert_called_once_with()
-
-As you can see the `import fooble` succeeds, but on exit there is no 'fooble'
-left in `sys.modules`.
-
-This also works for the `from module import name` form:
-
-.. doctest::
-
-    >>> mock = Mock()
-    >>> with patch.dict('sys.modules', {'fooble': mock}):
-    ...    from fooble import blob
-    ...    blob.blip()
-    ...
-    <Mock name='mock.blob.blip()' id='...'>
-    >>> mock.blob.blip.assert_called_once_with()
-
-With slightly more work you can also mock package imports:
-
-.. doctest::
-
-    >>> mock = Mock()
-    >>> modules = {'package': mock, 'package.module': mock.module}
-    >>> with patch.dict('sys.modules', modules):
-    ...    from package.module import fooble
-    ...    fooble()
-    ...
-    <Mock name='mock.module.fooble()' id='...'>
-    >>> mock.module.fooble.assert_called_once_with()
-
-
-Tracking order of calls and less verbose call assertions
-========================================================
-
-The :class:`Mock` class allows you to track the *order* of method calls on
-your mock objects through the :attr:`~Mock.method_calls` attribute. This
-doesn't allow you to track the order of calls between separate mock objects,
-however we can use :attr:`~Mock.mock_calls` to achieve the same effect.
-
-Because mocks track calls to child mocks in `mock_calls`, and accessing an
-arbitrary attribute of a mock creates a child mock, we can create our separate
-mocks from a parent one. Calls to those child mock will then all be recorded,
-in order, in the `mock_calls` of the parent:
-
-.. doctest::
-
-    >>> manager = Mock()
-    >>> mock_foo = manager.foo
-    >>> mock_bar = manager.bar
-
-    >>> mock_foo.something()
-    <Mock name='mock.foo.something()' id='...'>
-    >>> mock_bar.other.thing()
-    <Mock name='mock.bar.other.thing()' id='...'>
-
-    >>> manager.mock_calls
-    [call.foo.something(), call.bar.other.thing()]
-
-We can then assert about the calls, including the order, by comparing with
-the `mock_calls` attribute on the manager mock:
-
-.. doctest::
-
-    >>> expected_calls = [call.foo.something(), call.bar.other.thing()]
-    >>> manager.mock_calls == expected_calls
-    True
-
-If `patch` is creating, and putting in place, your mocks then you can attach
-them to a manager mock using the :meth:`~Mock.attach_mock` method. After
-attaching calls will be recorded in `mock_calls` of the manager.
-
-.. doctest::
-
-    >>> manager = MagicMock()
-    >>> with patch('mymodule.Class1') as MockClass1:
-    ...     with patch('mymodule.Class2') as MockClass2:
-    ...         manager.attach_mock(MockClass1, 'MockClass1')
-    ...         manager.attach_mock(MockClass2, 'MockClass2')
-    ...         MockClass1().foo()
-    ...         MockClass2().bar()
-    ...
-    <MagicMock name='mock.MockClass1().foo()' id='...'>
-    <MagicMock name='mock.MockClass2().bar()' id='...'>
-    >>> manager.mock_calls
-    [call.MockClass1(),
-     call.MockClass1().foo(),
-     call.MockClass2(),
-     call.MockClass2().bar()]
-
-If many calls have been made, but you're only interested in a particular
-sequence of them then an alternative is to use the
-:meth:`~Mock.assert_has_calls` method. This takes a list of calls (constructed
-with the :data:`call` object). If that sequence of calls are in
-:attr:`~Mock.mock_calls` then the assert succeeds.
-
-.. doctest::
-
-    >>> m = MagicMock()
-    >>> m().foo().bar().baz()
-    <MagicMock name='mock().foo().bar().baz()' id='...'>
-    >>> m.one().two().three()
-    <MagicMock name='mock.one().two().three()' id='...'>
-    >>> calls = call.one().two().three().call_list()
-    >>> m.assert_has_calls(calls)
-
-Even though the chained call `m.one().two().three()` aren't the only calls that
-have been made to the mock, the assert still succeeds.
-
-Sometimes a mock may have several calls made to it, and you are only interested
-in asserting about *some* of those calls. You may not even care about the
-order. In this case you can pass `any_order=True` to `assert_has_calls`:
-
-.. doctest::
-
-    >>> m = MagicMock()
-    >>> m(1), m.two(2, 3), m.seven(7), m.fifty('50')
-    (...)
-    >>> calls = [call.fifty('50'), call(1), call.seven(7)]
-    >>> m.assert_has_calls(calls, any_order=True)
-
-
-More complex argument matching
-==============================
-
-Using the same basic concept as `ANY` we can implement matchers to do more
-complex assertions on objects used as arguments to mocks.
-
-Suppose we expect some object to be passed to a mock that by default
-compares equal based on object identity (which is the Python default for user
-defined classes). To use :meth:`~Mock.assert_called_with` we would need to pass
-in the exact same object. If we are only interested in some of the attributes
-of this object then we can create a matcher that will check these attributes
-for us.
-
-You can see in this example how a 'standard' call to `assert_called_with` isn't
-sufficient:
-
-.. doctest::
-
-    >>> class Foo(object):
-    ...     def __init__(self, a, b):
-    ...         self.a, self.b = a, b
-    ...
-    >>> mock = Mock(return_value=None)
-    >>> mock(Foo(1, 2))
-    >>> mock.assert_called_with(Foo(1, 2))
-    Traceback (most recent call last):
-        ...
-    AssertionError: Expected: call(<__main__.Foo object at 0x...>)
-    Actual call: call(<__main__.Foo object at 0x...>)
-
-A comparison function for our `Foo` class might look something like this:
-
-.. doctest::
-
-    >>> def compare(self, other):
-    ...     if not type(self) == type(other):
-    ...         return False
-    ...     if self.a != other.a:
-    ...         return False
-    ...     if self.b != other.b:
-    ...         return False
-    ...     return True
-    ...
-
-And a matcher object that can use comparison functions like this for its
-equality operation would look something like this:
-
-.. doctest::
-
-    >>> class Matcher(object):
-    ...     def __init__(self, compare, some_obj):
-    ...         self.compare = compare
-    ...         self.some_obj = some_obj
-    ...     def __eq__(self, other):
-    ...         return self.compare(self.some_obj, other)
-    ...
-
-Putting all this together:
-
-.. doctest::
-
-    >>> match_foo = Matcher(compare, Foo(1, 2))
-    >>> mock.assert_called_with(match_foo)
-
-The `Matcher` is instantiated with our compare function and the `Foo` object
-we want to compare against. In `assert_called_with` the `Matcher` equality
-method will be called, which compares the object the mock was called with
-against the one we created our matcher with. If they match then
-`assert_called_with` passes, and if they don't an `AssertionError` is raised:
-
-.. doctest::
-
-    >>> match_wrong = Matcher(compare, Foo(3, 4))
-    >>> mock.assert_called_with(match_wrong)
-    Traceback (most recent call last):
-        ...
-    AssertionError: Expected: ((<Matcher object at 0x...>,), {})
-    Called with: ((<Foo object at 0x...>,), {})
-
-With a bit of tweaking you could have the comparison function raise the
-`AssertionError` directly and provide a more useful failure message.
-
-As of version 1.5, the Python testing library `PyHamcrest
-<http://pypi.python.org/pypi/PyHamcrest>`_ provides similar functionality,
-that may be useful here, in the form of its equality matcher
-(`hamcrest.library.integration.match_equality
-<http://packages.python.org/PyHamcrest/integration.html#hamcrest.library.integration.match_equality>`_).
-
-
-Less verbose configuration of mock objects
-==========================================
-
-This recipe, for easier configuration of mock objects, is now part of `Mock`.
-See the :meth:`~Mock.configure_mock` method.
-
-
-Matching any argument in assertions
-===================================
-
-This example is now built in to mock. See :data:`ANY`.
-
-
-Mocking Properties
-==================
-
-This example is now built in to mock. See :class:`PropertyMock`.
-
-
-Mocking open
-============
-
-This example is now built in to mock. See :func:`mock_open`.
-
-
-Mocks without some attributes
-=============================
-
-This example is now built in to mock. See :ref:`deleting-attributes`.
diff --git a/branch-1.2/ambari-common/src/test/python/mock/docs/getting-started.txt b/branch-1.2/ambari-common/src/test/python/mock/docs/getting-started.txt
deleted file mode 100644
index 1b5d289..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/docs/getting-started.txt
+++ /dev/null
@@ -1,479 +0,0 @@
-===========================
- Getting Started with Mock
-===========================
-
-.. _getting-started:
-
-.. index:: Getting Started
-
-.. testsetup::
-
-    class SomeClass(object):
-        static_method = None
-        class_method = None
-        attribute = None
-
-    sys.modules['package'] = package = Mock(name='package')
-    sys.modules['package.module'] = module = package.module
-    sys.modules['module'] = package.module
-
-
-Using Mock
-==========
-
-Mock Patching Methods
----------------------
-
-Common uses for :class:`Mock` objects include:
-
-* Patching methods
-* Recording method calls on objects
-
-You might want to replace a method on an object to check that
-it is called with the correct arguments by another part of the system:
-
-.. doctest::
-
-    >>> real = SomeClass()
-    >>> real.method = MagicMock(name='method')
-    >>> real.method(3, 4, 5, key='value')
-    <MagicMock name='method()' id='...'>
-
-Once our mock has been used (`real.method` in this example) it has methods
-and attributes that allow you to make assertions about how it has been used.
-
-.. note::
-
-    In most of these examples the :class:`Mock` and :class:`MagicMock` classes
-    are interchangeable. As the `MagicMock` is the more capable class it makes
-    a sensible one to use by default.
-
-Once the mock has been called its :attr:`~Mock.called` attribute is set to
-`True`. More importantly we can use the :meth:`~Mock.assert_called_with` or
-:meth:`~Mock.assert_called_once_with` method to check that it was called with
-the correct arguments.
-
-This example tests that calling `ProductionClass().method` results in a call to
-the `something` method:
-
-.. doctest::
-
-    >>> from mock import MagicMock
-    >>> class ProductionClass(object):
-    ...     def method(self):
-    ...         self.something(1, 2, 3)
-    ...     def something(self, a, b, c):
-    ...         pass
-    ...
-    >>> real = ProductionClass()
-    >>> real.something = MagicMock()
-    >>> real.method()
-    >>> real.something.assert_called_once_with(1, 2, 3)
-
-
-
-Mock for Method Calls on an Object
-----------------------------------
-
-In the last example we patched a method directly on an object to check that it
-was called correctly. Another common use case is to pass an object into a
-method (or some part of the system under test) and then check that it is used
-in the correct way.
-
-The simple `ProductionClass` below has a `closer` method. If it is called with
-an object then it calls `close` on it.
-
-.. doctest::
-
-    >>> class ProductionClass(object):
-    ...     def closer(self, something):
-    ...         something.close()
-    ...
-
-So to test it we need to pass in an object with a `close` method and check
-that it was called correctly.
-
-.. doctest::
-
-    >>> real = ProductionClass()
-    >>> mock = Mock()
-    >>> real.closer(mock)
-    >>> mock.close.assert_called_with()
-
-We don't have to do any work to provide the 'close' method on our mock.
-Accessing close creates it. So, if 'close' hasn't already been called then
-accessing it in the test will create it, but :meth:`~Mock.assert_called_with`
-will raise a failure exception.
-
-
-Mocking Classes
----------------
-
-A common use case is to mock out classes instantiated by your code under test.
-When you patch a class, then that class is replaced with a mock. Instances
-are created by *calling the class*. This means you access the "mock instance"
-by looking at the return value of the mocked class.
-
-In the example below we have a function `some_function` that instantiates `Foo`
-and calls a method on it. The call to `patch` replaces the class `Foo` with a
-mock. The `Foo` instance is the result of calling the mock, so it is configured
-by modifying the mock :attr:`~Mock.return_value`.
-
-.. doctest::
-
-    >>> def some_function():
-    ...     instance = module.Foo()
-    ...     return instance.method()
-    ...
-    >>> with patch('module.Foo') as mock:
-    ...     instance = mock.return_value
-    ...     instance.method.return_value = 'the result'
-    ...     result = some_function()
-    ...     assert result == 'the result'
-
-
-Naming your mocks
------------------
-
-It can be useful to give your mocks a name. The name is shown in the repr of
-the mock and can be helpful when the mock appears in test failure messages. The
-name is also propagated to attributes or methods of the mock:
-
-.. doctest::
-
-    >>> mock = MagicMock(name='foo')
-    >>> mock
-    <MagicMock name='foo' id='...'>
-    >>> mock.method
-    <MagicMock name='foo.method' id='...'>
-
-
-Tracking all Calls
-------------------
-
-Often you want to track more than a single call to a method. The
-:attr:`~Mock.mock_calls` attribute records all calls
-to child attributes of the mock - and also to their children.
-
-.. doctest::
-
-    >>> mock = MagicMock()
-    >>> mock.method()
-    <MagicMock name='mock.method()' id='...'>
-    >>> mock.attribute.method(10, x=53)
-    <MagicMock name='mock.attribute.method()' id='...'>
-    >>> mock.mock_calls
-    [call.method(), call.attribute.method(10, x=53)]
-
-If you make an assertion about `mock_calls` and any unexpected methods
-have been called, then the assertion will fail. This is useful because as well
-as asserting that the calls you expected have been made, you are also checking
-that they were made in the right order and with no additional calls:
-
-You use the :data:`call` object to construct lists for comparing with
-`mock_calls`:
-
-.. doctest::
-
-    >>> expected = [call.method(), call.attribute.method(10, x=53)]
-    >>> mock.mock_calls == expected
-    True
-
-
-Setting Return Values and Attributes
-------------------------------------
-
-Setting the return values on a mock object is trivially easy:
-
-.. doctest::
-
-    >>> mock = Mock()
-    >>> mock.return_value = 3
-    >>> mock()
-    3
-
-Of course you can do the same for methods on the mock:
-
-.. doctest::
-
-    >>> mock = Mock()
-    >>> mock.method.return_value = 3
-    >>> mock.method()
-    3
-
-The return value can also be set in the constructor:
-
-.. doctest::
-
-    >>> mock = Mock(return_value=3)
-    >>> mock()
-    3
-
-If you need an attribute setting on your mock, just do it:
-
-.. doctest::
-
-    >>> mock = Mock()
-    >>> mock.x = 3
-    >>> mock.x
-    3
-
-Sometimes you want to mock up a more complex situation, like for example
-`mock.connection.cursor().execute("SELECT 1")`. If we wanted this call to
-return a list, then we have to configure the result of the nested call.
-
-We can use :data:`call` to construct the set of calls in a "chained call" like
-this for easy assertion afterwards:
-
-
-.. doctest::
-
-    >>> mock = Mock()
-    >>> cursor = mock.connection.cursor.return_value
-    >>> cursor.execute.return_value = ['foo']
-    >>> mock.connection.cursor().execute("SELECT 1")
-    ['foo']
-    >>> expected = call.connection.cursor().execute("SELECT 1").call_list()
-    >>> mock.mock_calls
-    [call.connection.cursor(), call.connection.cursor().execute('SELECT 1')]
-    >>> mock.mock_calls == expected
-    True
-
-It is the call to `.call_list()` that turns our call object into a list of
-calls representing the chained calls.
-
-
-
-Raising exceptions with mocks
------------------------------
-
-A useful attribute is :attr:`~Mock.side_effect`. If you set this to an
-exception class or instance then the exception will be raised when the mock
-is called.
-
-.. doctest::
-
-    >>> mock = Mock(side_effect=Exception('Boom!'))
-    >>> mock()
-    Traceback (most recent call last):
-      ...
-    Exception: Boom!
-
-
-Side effect functions and iterables
------------------------------------
-
-`side_effect` can also be set to a function or an iterable. The use case for
-`side_effect` as an iterable is where your mock is going to be called several
-times, and you want each call to return a different value. When you set
-`side_effect` to an iterable every call to the mock returns the next value
-from the iterable:
-
-.. doctest::
-
-    >>> mock = MagicMock(side_effect=[4, 5, 6])
-    >>> mock()
-    4
-    >>> mock()
-    5
-    >>> mock()
-    6
-
-
-For more advanced use cases, like dynamically varying the return values
-depending on what the mock is called with, `side_effect` can be a function.
-The function will be called with the same arguments as the mock. Whatever the
-function returns is what the call returns:
-
-.. doctest::
-
-    >>> vals = {(1, 2): 1, (2, 3): 2}
-    >>> def side_effect(*args):
-    ...     return vals[args]
-    ...
-    >>> mock = MagicMock(side_effect=side_effect)
-    >>> mock(1, 2)
-    1
-    >>> mock(2, 3)
-    2
-
-
-Creating a Mock from an Existing Object
----------------------------------------
-
-One problem with over use of mocking is that it couples your tests to the
-implementation of your mocks rather than your real code. Suppose you have a
-class that implements `some_method`. In a test for another class, you
-provide a mock of this object that *also* provides `some_method`. If later
-you refactor the first class, so that it no longer has `some_method` - then
-your tests will continue to pass even though your code is now broken!
-
-`Mock` allows you to provide an object as a specification for the mock,
-using the `spec` keyword argument. Accessing methods / attributes on the
-mock that don't exist on your specification object will immediately raise an
-attribute error. If you change the implementation of your specification, then
-tests that use that class will start failing immediately without you having to
-instantiate the class in those tests.
-
-.. doctest::
-
-    >>> mock = Mock(spec=SomeClass)
-    >>> mock.old_method()
-    Traceback (most recent call last):
-       ...
-    AttributeError: object has no attribute 'old_method'
-
-If you want a stronger form of specification that prevents the setting
-of arbitrary attributes as well as the getting of them then you can use
-`spec_set` instead of `spec`.
-
-
-
-Patch Decorators
-================
-
-.. note::
-
-   With `patch` it matters that you patch objects in the namespace where they
-   are looked up. This is normally straightforward, but for a quick guide
-   read :ref:`where to patch <where-to-patch>`.
-
-
-A common need in tests is to patch a class attribute or a module attribute,
-for example patching a builtin or patching a class in a module to test that it
-is instantiated. Modules and classes are effectively global, so patching on
-them has to be undone after the test or the patch will persist into other
-tests and cause hard to diagnose problems.
-
-mock provides three convenient decorators for this: `patch`, `patch.object` and
-`patch.dict`. `patch` takes a single string, of the form
-`package.module.Class.attribute` to specify the attribute you are patching. It
-also optionally takes a value that you want the attribute (or class or
-whatever) to be replaced with. 'patch.object' takes an object and the name of
-the attribute you would like patched, plus optionally the value to patch it
-with.
-
-`patch.object`:
-
-.. doctest::
-
-    >>> original = SomeClass.attribute
-    >>> @patch.object(SomeClass, 'attribute', sentinel.attribute)
-    ... def test():
-    ...     assert SomeClass.attribute == sentinel.attribute
-    ...
-    >>> test()
-    >>> assert SomeClass.attribute == original
-
-    >>> @patch('package.module.attribute', sentinel.attribute)
-    ... def test():
-    ...     from package.module import attribute
-    ...     assert attribute is sentinel.attribute
-    ...
-    >>> test()
-
-If you are patching a module (including `__builtin__`) then use `patch`
-instead of `patch.object`:
-
-.. doctest::
-
-    >>> mock = MagicMock(return_value = sentinel.file_handle)
-    >>> with patch('__builtin__.open', mock):
-    ...     handle = open('filename', 'r')
-    ...
-    >>> mock.assert_called_with('filename', 'r')
-    >>> assert handle == sentinel.file_handle, "incorrect file handle returned"
-
-The module name can be 'dotted', in the form `package.module` if needed:
-
-.. doctest::
-
-    >>> @patch('package.module.ClassName.attribute', sentinel.attribute)
-    ... def test():
-    ...     from package.module import ClassName
-    ...     assert ClassName.attribute == sentinel.attribute
-    ...
-    >>> test()
-
-A nice pattern is to actually decorate test methods themselves:
-
-.. doctest::
-
-    >>> class MyTest(unittest2.TestCase):
-    ...     @patch.object(SomeClass, 'attribute', sentinel.attribute)
-    ...     def test_something(self):
-    ...         self.assertEqual(SomeClass.attribute, sentinel.attribute)
-    ...
-    >>> original = SomeClass.attribute
-    >>> MyTest('test_something').test_something()
-    >>> assert SomeClass.attribute == original
-
-If you want to patch with a Mock, you can use `patch` with only one argument
-(or `patch.object` with two arguments). The mock will be created for you and
-passed into the test function / method:
-
-.. doctest::
-
-    >>> class MyTest(unittest2.TestCase):
-    ...     @patch.object(SomeClass, 'static_method')
-    ...     def test_something(self, mock_method):
-    ...         SomeClass.static_method()
-    ...         mock_method.assert_called_with()
-    ...
-    >>> MyTest('test_something').test_something()
-
-You can stack up multiple patch decorators using this pattern:
-
-.. doctest::
-
-    >>> class MyTest(unittest2.TestCase):
-    ...     @patch('package.module.ClassName1')
-    ...     @patch('package.module.ClassName2')
-    ...     def test_something(self, MockClass2, MockClass1):
-    ...         self.assertTrue(package.module.ClassName1 is MockClass1)
-    ...         self.assertTrue(package.module.ClassName2 is MockClass2)
-    ...
-    >>> MyTest('test_something').test_something()
-
-When you nest patch decorators the mocks are passed in to the decorated
-function in the same order they applied (the normal *python* order that
-decorators are applied). This means from the bottom up, so in the example
-above the mock for `test_module.ClassName2` is passed in first.
-
-There is also :func:`patch.dict` for setting values in a dictionary just
-during a scope and restoring the dictionary to its original state when the test
-ends:
-
-.. doctest::
-
-   >>> foo = {'key': 'value'}
-   >>> original = foo.copy()
-   >>> with patch.dict(foo, {'newkey': 'newvalue'}, clear=True):
-   ...     assert foo == {'newkey': 'newvalue'}
-   ...
-   >>> assert foo == original
-
-`patch`, `patch.object` and `patch.dict` can all be used as context managers.
-
-Where you use `patch` to create a mock for you, you can get a reference to the
-mock using the "as" form of the with statement:
-
-.. doctest::
-
-    >>> class ProductionClass(object):
-    ...     def method(self):
-    ...         pass
-    ...
-    >>> with patch.object(ProductionClass, 'method') as mock_method:
-    ...     mock_method.return_value = None
-    ...     real = ProductionClass()
-    ...     real.method(1, 2, 3)
-    ...
-    >>> mock_method.assert_called_with(1, 2, 3)
-
-
-As an alternative `patch`, `patch.object` and `patch.dict` can be used as
-class decorators. When used in this way it is the same as applying the
-decorator indvidually to every method whose name starts with "test".
-
-For some more advanced examples, see the :ref:`further-examples` page.
diff --git a/branch-1.2/ambari-common/src/test/python/mock/docs/helpers.txt b/branch-1.2/ambari-common/src/test/python/mock/docs/helpers.txt
deleted file mode 100644
index 571b71d..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/docs/helpers.txt
+++ /dev/null
@@ -1,583 +0,0 @@
-=========
- Helpers
-=========
-
-.. currentmodule:: mock
-
-.. testsetup::
-
-    mock.FILTER_DIR = True
-    from pprint import pprint as pp
-    original_dir = dir
-    def dir(obj):
-        print pp(original_dir(obj))
-
-    import urllib2
-    __main__.urllib2 = urllib2
-
-.. testcleanup::
-
-    dir = original_dir
-    mock.FILTER_DIR = True
-
-
-
-call
-====
-
-.. function:: call(*args, **kwargs)
-
-    `call` is a helper object for making simpler assertions, for comparing
-    with :attr:`~Mock.call_args`, :attr:`~Mock.call_args_list`,
-    :attr:`~Mock.mock_calls` and :attr: `~Mock.method_calls`. `call` can also be
-    used with :meth:`~Mock.assert_has_calls`.
-
-    .. doctest::
-
-        >>> m = MagicMock(return_value=None)
-        >>> m(1, 2, a='foo', b='bar')
-        >>> m()
-        >>> m.call_args_list == [call(1, 2, a='foo', b='bar'), call()]
-        True
-
-.. method:: call.call_list()
-
-    For a call object that represents multiple calls, `call_list`
-    returns a list of all the intermediate calls as well as the
-    final call.
-
-`call_list` is particularly useful for making assertions on "chained calls". A
-chained call is multiple calls on a single line of code. This results in
-multiple entries in :attr:`~Mock.mock_calls` on a mock. Manually constructing
-the sequence of calls can be tedious.
-
-:meth:`~call.call_list` can construct the sequence of calls from the same
-chained call:
-
-.. doctest::
-
-    >>> m = MagicMock()
-    >>> m(1).method(arg='foo').other('bar')(2.0)
-    <MagicMock name='mock().method().other()()' id='...'>
-    >>> kall = call(1).method(arg='foo').other('bar')(2.0)
-    >>> kall.call_list()
-    [call(1),
-     call().method(arg='foo'),
-     call().method().other('bar'),
-     call().method().other()(2.0)]
-    >>> m.mock_calls == kall.call_list()
-    True
-
-.. _calls-as-tuples:
-
-A `call` object is either a tuple of (positional args, keyword args) or
-(name, positional args, keyword args) depending on how it was constructed. When
-you construct them yourself this isn't particularly interesting, but the `call`
-objects that are in the :attr:`Mock.call_args`, :attr:`Mock.call_args_list` and
-:attr:`Mock.mock_calls` attributes can be introspected to get at the individual
-arguments they contain.
-
-The `call` objects in :attr:`Mock.call_args` and :attr:`Mock.call_args_list`
-are two-tuples of (positional args, keyword args) whereas the `call` objects
-in :attr:`Mock.mock_calls`, along with ones you construct yourself, are
-three-tuples of (name, positional args, keyword args).
-
-You can use their "tupleness" to pull out the individual arguments for more
-complex introspection and assertions. The positional arguments are a tuple
-(an empty tuple if there are no positional arguments) and the keyword
-arguments are a dictionary:
-
-.. doctest::
-
-    >>> m = MagicMock(return_value=None)
-    >>> m(1, 2, 3, arg='one', arg2='two')
-    >>> kall = m.call_args
-    >>> args, kwargs = kall
-    >>> args
-    (1, 2, 3)
-    >>> kwargs
-    {'arg2': 'two', 'arg': 'one'}
-    >>> args is kall[0]
-    True
-    >>> kwargs is kall[1]
-    True
-
-    >>> m = MagicMock()
-    >>> m.foo(4, 5, 6, arg='two', arg2='three')
-    <MagicMock name='mock.foo()' id='...'>
-    >>> kall = m.mock_calls[0]
-    >>> name, args, kwargs = kall
-    >>> name
-    'foo'
-    >>> args
-    (4, 5, 6)
-    >>> kwargs
-    {'arg2': 'three', 'arg': 'two'}
-    >>> name is m.mock_calls[0][0]
-    True
-
-
-create_autospec
-===============
-
-.. function:: create_autospec(spec, spec_set=False, instance=False, **kwargs)
-
-    Create a mock object using another object as a spec. Attributes on the
-    mock will use the corresponding attribute on the `spec` object as their
-    spec.
-
-    Functions or methods being mocked will have their arguments checked to
-    ensure that they are called with the correct signature.
-
-    If `spec_set` is `True` then attempting to set attributes that don't exist
-    on the spec object will raise an `AttributeError`.
-
-    If a class is used as a spec then the return value of the mock (the
-    instance of the class) will have the same spec. You can use a class as the
-    spec for an instance object by passing `instance=True`. The returned mock
-    will only be callable if instances of the mock are callable.
-
-    `create_autospec` also takes arbitrary keyword arguments that are passed to
-    the constructor of the created mock.
-
-See :ref:`auto-speccing` for examples of how to use auto-speccing with
-`create_autospec` and the `autospec` argument to :func:`patch`.
-
-
-ANY
-===
-
-.. data:: ANY
-
-Sometimes you may need to make assertions about *some* of the arguments in a
-call to mock, but either not care about some of the arguments or want to pull
-them individually out of :attr:`~Mock.call_args` and make more complex
-assertions on them.
-
-To ignore certain arguments you can pass in objects that compare equal to
-*everything*. Calls to :meth:`~Mock.assert_called_with` and
-:meth:`~Mock.assert_called_once_with` will then succeed no matter what was
-passed in.
-
-.. doctest::
-
-    >>> mock = Mock(return_value=None)
-    >>> mock('foo', bar=object())
-    >>> mock.assert_called_once_with('foo', bar=ANY)
-
-`ANY` can also be used in comparisons with call lists like
-:attr:`~Mock.mock_calls`:
-
-.. doctest::
-
-    >>> m = MagicMock(return_value=None)
-    >>> m(1)
-    >>> m(1, 2)
-    >>> m(object())
-    >>> m.mock_calls == [call(1), call(1, 2), ANY]
-    True
-
-
-
-FILTER_DIR
-==========
-
-.. data:: FILTER_DIR
-
-`FILTER_DIR` is a module level variable that controls the way mock objects
-respond to `dir` (only for Python 2.6 or more recent). The default is `True`,
-which uses the filtering described below, to only show useful members. If you
-dislike this filtering, or need to switch it off for diagnostic purposes, then
-set `mock.FILTER_DIR = False`.
-
-With filtering on, `dir(some_mock)` shows only useful attributes and will
-include any dynamically created attributes that wouldn't normally be shown.
-If the mock was created with a `spec` (or `autospec` of course) then all the
-attributes from the original are shown, even if they haven't been accessed
-yet:
-
-.. doctest::
-
-    >>> dir(Mock())
-    ['assert_any_call',
-     'assert_called_once_with',
-     'assert_called_with',
-     'assert_has_calls',
-     'attach_mock',
-     ...
-    >>> import urllib2
-    >>> dir(Mock(spec=urllib2))
-    ['AbstractBasicAuthHandler',
-     'AbstractDigestAuthHandler',
-     'AbstractHTTPHandler',
-     'BaseHandler',
-     ...
-
-Many of the not-very-useful (private to `Mock` rather than the thing being
-mocked) underscore and double underscore prefixed attributes have been
-filtered from the result of calling `dir` on a `Mock`. If you dislike this
-behaviour you can switch it off by setting the module level switch
-`FILTER_DIR`:
-
-.. doctest::
-
-    >>> import mock
-    >>> mock.FILTER_DIR = False
-    >>> dir(mock.Mock())
-    ['_NonCallableMock__get_return_value',
-     '_NonCallableMock__get_side_effect',
-     '_NonCallableMock__return_value_doc',
-     '_NonCallableMock__set_return_value',
-     '_NonCallableMock__set_side_effect',
-     '__call__',
-     '__class__',
-     ...
-
-Alternatively you can just use `vars(my_mock)` (instance members) and
-`dir(type(my_mock))` (type members) to bypass the filtering irrespective of
-`mock.FILTER_DIR`.
-
-
-mock_open
-=========
-
-.. function:: mock_open(mock=None, read_data=None)
-
-    A helper function to create a mock to replace the use of `open`. It works
-    for `open` called directly or used as a context manager.
-
-    The `mock` argument is the mock object to configure. If `None` (the
-    default) then a `MagicMock` will be created for you, with the API limited
-    to methods or attributes available on standard file handles.
-
-    `read_data` is a string for the `read` method of the file handle to return.
-    This is an empty string by default.
-
-Using `open` as a context manager is a great way to ensure your file handles
-are closed properly and is becoming common::
-
-    with open('/some/path', 'w') as f:
-        f.write('something')
-
-The issue is that even if you mock out the call to `open` it is the
-*returned object* that is used as a context manager (and has `__enter__` and
-`__exit__` called).
-
-Mocking context managers with a :class:`MagicMock` is common enough and fiddly
-enough that a helper function is useful.
-
-.. doctest::
-
-    >>> from mock import mock_open
-    >>> m = mock_open()
-    >>> with patch('__main__.open', m, create=True):
-    ...     with open('foo', 'w') as h:
-    ...         h.write('some stuff')
-    ...
-    >>> m.mock_calls
-    [call('foo', 'w'),
-     call().__enter__(),
-     call().write('some stuff'),
-     call().__exit__(None, None, None)]
-    >>> m.assert_called_once_with('foo', 'w')
-    >>> handle = m()
-    >>> handle.write.assert_called_once_with('some stuff')
-
-And for reading files:
-
-.. doctest::
-
-    >>> with patch('__main__.open', mock_open(read_data='bibble'), create=True) as m:
-    ...     with open('foo') as h:
-    ...         result = h.read()
-    ...
-    >>> m.assert_called_once_with('foo')
-    >>> assert result == 'bibble'
-
-
-.. _auto-speccing:
-
-Autospeccing
-============
-
-Autospeccing is based on the existing `spec` feature of mock. It limits the
-api of mocks to the api of an original object (the spec), but it is recursive
-(implemented lazily) so that attributes of mocks only have the same api as
-the attributes of the spec. In addition mocked functions / methods have the
-same call signature as the original so they raise a `TypeError` if they are
-called incorrectly.
-
-Before I explain how auto-speccing works, here's why it is needed.
-
-`Mock` is a very powerful and flexible object, but it suffers from two flaws
-when used to mock out objects from a system under test. One of these flaws is
-specific to the `Mock` api and the other is a more general problem with using
-mock objects.
-
-First the problem specific to `Mock`. `Mock` has two assert methods that are
-extremely handy: :meth:`~Mock.assert_called_with` and
-:meth:`~Mock.assert_called_once_with`.
-
-.. doctest::
-
-    >>> mock = Mock(name='Thing', return_value=None)
-    >>> mock(1, 2, 3)
-    >>> mock.assert_called_once_with(1, 2, 3)
-    >>> mock(1, 2, 3)
-    >>> mock.assert_called_once_with(1, 2, 3)
-    Traceback (most recent call last):
-     ...
-    AssertionError: Expected to be called once. Called 2 times.
-
-Because mocks auto-create attributes on demand, and allow you to call them
-with arbitrary arguments, if you misspell one of these assert methods then
-your assertion is gone:
-
-.. code-block:: pycon
-
-    >>> mock = Mock(name='Thing', return_value=None)
-    >>> mock(1, 2, 3)
-    >>> mock.assret_called_once_with(4, 5, 6)
-
-Your tests can pass silently and incorrectly because of the typo.
-
-The second issue is more general to mocking. If you refactor some of your
-code, rename members and so on, any tests for code that is still using the
-*old api* but uses mocks instead of the real objects will still pass. This
-means your tests can all pass even though your code is broken.
-
-Note that this is another reason why you need integration tests as well as
-unit tests. Testing everything in isolation is all fine and dandy, but if you
-don't test how your units are "wired together" there is still lots of room
-for bugs that tests might have caught.
-
-`mock` already provides a feature to help with this, called speccing. If you
-use a class or instance as the `spec` for a mock then you can only access
-attributes on the mock that exist on the real class:
-
-.. doctest::
-
-    >>> import urllib2
-    >>> mock = Mock(spec=urllib2.Request)
-    >>> mock.assret_called_with
-    Traceback (most recent call last):
-     ...
-    AttributeError: Mock object has no attribute 'assret_called_with'
-
-The spec only applies to the mock itself, so we still have the same issue
-with any methods on the mock:
-
-.. code-block:: pycon
-
-    >>> mock.has_data()
-    <mock.Mock object at 0x...>
-    >>> mock.has_data.assret_called_with()
-
-Auto-speccing solves this problem. You can either pass `autospec=True` to
-`patch` / `patch.object` or use the `create_autospec` function to create a
-mock with a spec. If you use the `autospec=True` argument to `patch` then the
-object that is being replaced will be used as the spec object. Because the
-speccing is done "lazily" (the spec is created as attributes on the mock are
-accessed) you can use it with very complex or deeply nested objects (like
-modules that import modules that import modules) without a big performance
-hit.
-
-Here's an example of it in use:
-
-.. doctest::
-
-    >>> import urllib2
-    >>> patcher = patch('__main__.urllib2', autospec=True)
-    >>> mock_urllib2 = patcher.start()
-    >>> urllib2 is mock_urllib2
-    True
-    >>> urllib2.Request
-    <MagicMock name='urllib2.Request' spec='Request' id='...'>
-
-You can see that `urllib2.Request` has a spec. `urllib2.Request` takes two
-arguments in the constructor (one of which is `self`). Here's what happens if
-we try to call it incorrectly:
-
-.. doctest::
-
-    >>> req = urllib2.Request()
-    Traceback (most recent call last):
-     ...
-    TypeError: <lambda>() takes at least 2 arguments (1 given)
-
-The spec also applies to instantiated classes (i.e. the return value of
-specced mocks):
-
-.. doctest::
-
-    >>> req = urllib2.Request('foo')
-    >>> req
-    <NonCallableMagicMock name='urllib2.Request()' spec='Request' id='...'>
-
-`Request` objects are not callable, so the return value of instantiating our
-mocked out `urllib2.Request` is a non-callable mock. With the spec in place
-any typos in our asserts will raise the correct error:
-
-.. doctest::
-
-    >>> req.add_header('spam', 'eggs')
-    <MagicMock name='urllib2.Request().add_header()' id='...'>
-    >>> req.add_header.assret_called_with
-    Traceback (most recent call last):
-     ...
-    AttributeError: Mock object has no attribute 'assret_called_with'
-    >>> req.add_header.assert_called_with('spam', 'eggs')
-
-In many cases you will just be able to add `autospec=True` to your existing
-`patch` calls and then be protected against bugs due to typos and api
-changes.
-
-As well as using `autospec` through `patch` there is a
-:func:`create_autospec` for creating autospecced mocks directly:
-
-.. doctest::
-
-    >>> import urllib2
-    >>> mock_urllib2 = create_autospec(urllib2)
-    >>> mock_urllib2.Request('foo', 'bar')
-    <NonCallableMagicMock name='mock.Request()' spec='Request' id='...'>
-
-This isn't without caveats and limitations however, which is why it is not
-the default behaviour. In order to know what attributes are available on the
-spec object, autospec has to introspect (access attributes) the spec. As you
-traverse attributes on the mock a corresponding traversal of the original
-object is happening under the hood. If any of your specced objects have
-properties or descriptors that can trigger code execution then you may not be
-able to use autospec. On the other hand it is much better to design your
-objects so that introspection is safe [#]_.
-
-A more serious problem is that it is common for instance attributes to be
-created in the `__init__` method and not to exist on the class at all.
-`autospec` can't know about any dynamically created attributes and restricts
-the api to visible attributes.
-
-.. doctest::
-
-    >>> class Something(object):
-    ...   def __init__(self):
-    ...     self.a = 33
-    ...
-    >>> with patch('__main__.Something', autospec=True):
-    ...   thing = Something()
-    ...   thing.a
-    ...
-    Traceback (most recent call last):
-      ...
-    AttributeError: Mock object has no attribute 'a'
-
-There are a few different ways of resolving this problem. The easiest, but
-not necessarily the least annoying, way is to simply set the required
-attributes on the mock after creation. Just because `autospec` doesn't allow
-you to fetch attributes that don't exist on the spec it doesn't prevent you
-setting them:
-
-.. doctest::
-
-    >>> with patch('__main__.Something', autospec=True):
-    ...   thing = Something()
-    ...   thing.a = 33
-    ...
-
-There is a more aggressive version of both `spec` and `autospec` that *does*
-prevent you setting non-existent attributes. This is useful if you want to
-ensure your code only *sets* valid attributes too, but obviously it prevents
-this particular scenario:
-
-.. doctest::
-
-    >>> with patch('__main__.Something', autospec=True, spec_set=True):
-    ...   thing = Something()
-    ...   thing.a = 33
-    ...
-    Traceback (most recent call last):
-     ...
-    AttributeError: Mock object has no attribute 'a'
-
-Probably the best way of solving the problem is to add class attributes as
-default values for instance members initialised in `__init__`. Note that if
-you are only setting default attributes in `__init__` then providing them via
-class attributes (shared between instances of course) is faster too. e.g.
-
-.. code-block:: python
-
-    class Something(object):
-        a = 33
-
-This brings up another issue. It is relatively common to provide a default
-value of `None` for members that will later be an object of a different type.
-`None` would be useless as a spec because it wouldn't let you access *any*
-attributes or methods on it. As `None` is *never* going to be useful as a
-spec, and probably indicates a member that will normally of some other type,
-`autospec` doesn't use a spec for members that are set to `None`. These will
-just be ordinary mocks (well - `MagicMocks`):
-
-.. doctest::
-
-    >>> class Something(object):
-    ...     member = None
-    ...
-    >>> mock = create_autospec(Something)
-    >>> mock.member.foo.bar.baz()
-    <MagicMock name='mock.member.foo.bar.baz()' id='...'>
-
-If modifying your production classes to add defaults isn't to your liking
-then there are more options. One of these is simply to use an instance as the
-spec rather than the class. The other is to create a subclass of the
-production class and add the defaults to the subclass without affecting the
-production class. Both of these require you to use an alternative object as
-the spec. Thankfully `patch` supports this - you can simply pass the
-alternative object as the `autospec` argument:
-
-.. doctest::
-
-    >>> class Something(object):
-    ...   def __init__(self):
-    ...     self.a = 33
-    ...
-    >>> class SomethingForTest(Something):
-    ...   a = 33
-    ...
-    >>> p = patch('__main__.Something', autospec=SomethingForTest)
-    >>> mock = p.start()
-    >>> mock.a
-    <NonCallableMagicMock name='Something.a' spec='int' id='...'>
-
-.. note::
-
-    An additional limitation (currently) with `autospec` is that unbound
-    methods on mocked classes *don't* take an "explicit self" as the first
-    argument - so this usage will fail with `autospec`.
-
-    .. doctest::
-
-        >>> class Foo(object):
-        ...   def foo(self):
-        ...     pass
-        ...
-        >>> Foo.foo(Foo())
-        >>> MockFoo = create_autospec(Foo)
-        >>> MockFoo.foo(MockFoo())
-        Traceback (most recent call last):
-          ...
-        TypeError: <lambda>() takes exactly 1 argument (2 given)
-
-    The reason is that its very hard to tell the difference between functions,
-    unbound methods and staticmethods across Python 2 & 3 and the alternative
-    implementations. This restriction may be fixed in future versions.
-
-
-------
-
-.. [#] This only applies to classes or already instantiated objects. Calling
-   a mocked class to create a mock instance *does not* create a real instance.
-   It is only attribute lookups - along with calls to `dir` - that are done. A
-   way round this problem would have been to use `getattr_static
-   <http://docs.python.org/dev/library/inspect.html#inspect.getattr_static>`_,
-   which can fetch attributes without triggering code execution. Descriptors
-   like `classmethod` and `staticmethod` *need* to be fetched correctly though,
-   so that their signatures can be mocked correctly.
diff --git a/branch-1.2/ambari-common/src/test/python/mock/docs/index.txt b/branch-1.2/ambari-common/src/test/python/mock/docs/index.txt
deleted file mode 100644
index fe89925..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/docs/index.txt
+++ /dev/null
@@ -1,411 +0,0 @@
-====================================
- Mock - Mocking and Testing Library
-====================================
-
-.. currentmodule:: mock
-
-:Author: `Michael Foord
- <http://www.voidspace.org.uk/python/weblog/index.shtml>`_
-:Version: |release|
-:Date: 2012/10/07
-:Homepage: `Mock Homepage`_
-:Download: `Mock on PyPI`_
-:Documentation: `PDF Documentation
- <http://www.voidspace.org.uk/downloads/mock-1.0.1.pdf>`_
-:License: `BSD License`_
-:Support: `Mailing list (testing-in-python@lists.idyll.org)
- <http://lists.idyll.org/listinfo/testing-in-python>`_
-:Issue tracker: `Google code project
- <http://code.google.com/p/mock/issues/list>`_
-
-.. _Mock Homepage: http://www.voidspace.org.uk/python/mock/
-.. _BSD License: http://www.voidspace.org.uk/python/license.shtml
-
-
-.. currentmodule:: mock
-
-.. module:: mock
-   :synopsis: Mock object and testing library.
-
-.. index:: introduction
-
-mock is a library for testing in Python. It allows you to replace parts of
-your system under test with mock objects and make assertions about how they
-have been used.
-
-mock is now part of the Python standard library, available as `unittest.mock
-<http://docs.python.org/py3k/library/unittest.mock.html#module-unittest.mock>`_
-in Python 3.3 onwards.
-
-mock provides a core :class:`Mock` class removing the need to create a host
-of stubs throughout your test suite. After performing an action, you can make
-assertions about which methods / attributes were used and arguments they were
-called with. You can also specify return values and set needed attributes in
-the normal way.
-
-Additionally, mock provides a :func:`patch` decorator that handles patching
-module and class level attributes within the scope of a test, along with
-:const:`sentinel` for creating unique objects. See the `quick guide`_ for
-some examples of how to use :class:`Mock`, :class:`MagicMock` and
-:func:`patch`.
-
-Mock is very easy to use and is designed for use with
-`unittest <http://pypi.python.org/pypi/unittest2>`_. Mock is based on
-the 'action -> assertion' pattern instead of `'record -> replay'` used by many
-mocking frameworks.
-
-mock is tested on Python versions 2.4-2.7, Python 3 plus the latest versions of
-Jython and PyPy.
-
-
-.. testsetup::
-
-   class ProductionClass(object):
-      def method(self, *args):
-         pass
-
-   module = sys.modules['module'] = ProductionClass
-   ProductionClass.ClassName1 = ProductionClass
-   ProductionClass.ClassName2 = ProductionClass
-
-
-
-API Documentation
-=================
-
-.. toctree::
-   :maxdepth: 2
-
-   mock
-   patch
-   helpers
-   sentinel
-   magicmock
-
-
-User Guide
-==========
-
-.. toctree::
-   :maxdepth: 2
-
-   getting-started
-   examples
-   compare
-   changelog
-
-
-.. index:: installing
-
-Installing
-==========
-
-The current version is |release|. Mock is stable and widely used. If you do
-find any bugs, or have suggestions for improvements / extensions
-then please contact us.
-
-* `mock on PyPI <http://pypi.python.org/pypi/mock>`_
-* `mock documentation as PDF
-  <http://www.voidspace.org.uk/downloads/mock-1.0.1.pdf>`_
-* `Google Code Home & Mercurial Repository <http://code.google.com/p/mock/>`_
-
-.. index:: repository
-.. index:: hg
-
-You can checkout the latest development version from the Google Code Mercurial
-repository with the following command:
-
-    ``hg clone https://mock.googlecode.com/hg/ mock``
-
-
-.. index:: pip
-.. index:: easy_install
-.. index:: setuptools
-
-If you have pip, setuptools or distribute you can install mock with:
-
-    | ``easy_install -U mock``
-    | ``pip install -U mock``
-
-Alternatively you can download the mock distribution from PyPI and after
-unpacking run:
-
-   ``python setup.py install``
-
-
-Quick Guide
-===========
-
-:class:`Mock` and :class:`MagicMock` objects create all attributes and
-methods as you access them and store details of how they have been used. You
-can configure them, to specify return values or limit what attributes are
-available, and then make assertions about how they have been used:
-
-.. doctest::
-
-    >>> from mock import MagicMock
-    >>> thing = ProductionClass()
-    >>> thing.method = MagicMock(return_value=3)
-    >>> thing.method(3, 4, 5, key='value')
-    3
-    >>> thing.method.assert_called_with(3, 4, 5, key='value')
-
-:attr:`side_effect` allows you to perform side effects, including raising an
-exception when a mock is called:
-
-.. doctest::
-
-   >>> mock = Mock(side_effect=KeyError('foo'))
-   >>> mock()
-   Traceback (most recent call last):
-    ...
-   KeyError: 'foo'
-
-   >>> values = {'a': 1, 'b': 2, 'c': 3}
-   >>> def side_effect(arg):
-   ...     return values[arg]
-   ...
-   >>> mock.side_effect = side_effect
-   >>> mock('a'), mock('b'), mock('c')
-   (1, 2, 3)
-   >>> mock.side_effect = [5, 4, 3, 2, 1]
-   >>> mock(), mock(), mock()
-   (5, 4, 3)
-
-Mock has many other ways you can configure it and control its behaviour. For
-example the `spec` argument configures the mock to take its specification
-from another object. Attempting to access attributes or methods on the mock
-that don't exist on the spec will fail with an `AttributeError`.
-
-The :func:`patch` decorator / context manager makes it easy to mock classes or
-objects in a module under test. The object you specify will be replaced with a
-mock (or other object) during the test and restored when the test ends:
-
-.. doctest::
-
-    >>> from mock import patch
-    >>> @patch('module.ClassName2')
-    ... @patch('module.ClassName1')
-    ... def test(MockClass1, MockClass2):
-    ...     module.ClassName1()
-    ...     module.ClassName2()
-
-    ...     assert MockClass1 is module.ClassName1
-    ...     assert MockClass2 is module.ClassName2
-    ...     assert MockClass1.called
-    ...     assert MockClass2.called
-    ...
-    >>> test()
-
-.. note::
-
-   When you nest patch decorators the mocks are passed in to the decorated
-   function in the same order they applied (the normal *python* order that
-   decorators are applied). This means from the bottom up, so in the example
-   above the mock for `module.ClassName1` is passed in first.
-
-   With `patch` it matters that you patch objects in the namespace where they
-   are looked up. This is normally straightforward, but for a quick guide
-   read :ref:`where to patch <where-to-patch>`.
-
-As well as a decorator `patch` can be used as a context manager in a with
-statement:
-
-.. doctest::
-
-    >>> with patch.object(ProductionClass, 'method', return_value=None) as mock_method:
-    ...     thing = ProductionClass()
-    ...     thing.method(1, 2, 3)
-    ...
-    >>> mock_method.assert_called_once_with(1, 2, 3)
-
-
-There is also :func:`patch.dict` for setting values in a dictionary just
-during a scope and restoring the dictionary to its original state when the test
-ends:
-
-.. doctest::
-
-   >>> foo = {'key': 'value'}
-   >>> original = foo.copy()
-   >>> with patch.dict(foo, {'newkey': 'newvalue'}, clear=True):
-   ...     assert foo == {'newkey': 'newvalue'}
-   ...
-   >>> assert foo == original
-
-Mock supports the mocking of Python :ref:`magic methods <magic-methods>`. The
-easiest way of using magic methods is with the :class:`MagicMock` class. It
-allows you to do things like:
-
-.. doctest::
-
-    >>> mock = MagicMock()
-    >>> mock.__str__.return_value = 'foobarbaz'
-    >>> str(mock)
-    'foobarbaz'
-    >>> mock.__str__.assert_called_with()
-
-Mock allows you to assign functions (or other Mock instances) to magic methods
-and they will be called appropriately. The `MagicMock` class is just a Mock
-variant that has all of the magic methods pre-created for you (well, all the
-useful ones anyway).
-
-The following is an example of using magic methods with the ordinary Mock
-class:
-
-.. doctest::
-
-    >>> mock = Mock()
-    >>> mock.__str__ = Mock(return_value='wheeeeee')
-    >>> str(mock)
-    'wheeeeee'
-
-For ensuring that the mock objects in your tests have the same api as the
-objects they are replacing, you can use :ref:`auto-speccing <auto-speccing>`.
-Auto-speccing can be done through the `autospec` argument to patch, or the
-:func:`create_autospec` function. Auto-speccing creates mock objects that
-have the same attributes and methods as the objects they are replacing, and
-any functions and methods (including constructors) have the same call
-signature as the real object.
-
-This ensures that your mocks will fail in the same way as your production
-code if they are used incorrectly:
-
-.. doctest::
-
-   >>> from mock import create_autospec
-   >>> def function(a, b, c):
-   ...     pass
-   ...
-   >>> mock_function = create_autospec(function, return_value='fishy')
-   >>> mock_function(1, 2, 3)
-   'fishy'
-   >>> mock_function.assert_called_once_with(1, 2, 3)
-   >>> mock_function('wrong arguments')
-   Traceback (most recent call last):
-    ...
-   TypeError: <lambda>() takes exactly 3 arguments (1 given)
-
-`create_autospec` can also be used on classes, where it copies the signature of
-the `__init__` method, and on callable objects where it copies the signature of
-the `__call__` method.
-
-
-.. index:: references
-.. index:: articles
-
-References
-==========
-
-Articles, blog entries and other stuff related to testing with Mock:
-
-* `Imposing a No DB Discipline on Django unit tests
-  <https://github.com/carljm/django-testing-slides/blob/master/models/30_no_database.md>`_
-* `mock-django: tools for mocking the Django ORM and models
-  <https://github.com/dcramer/mock-django>`_
-* `PyCon 2011 Video: Testing with mock <https://blip.tv/file/4881513>`_
-* `Mock objects in Python
-  <http://noopenblockers.com/2012/01/06/mock-objects-in-python/>`_
-* `Python: Injecting Mock Objects for Powerful Testing
-  <http://blueprintforge.com/blog/2012/01/08/python-injecting-mock-objects-for-powerful-testing/>`_
-* `Python Mock: How to assert a substring of logger output
-  <http://www.michaelpollmeier.com/python-mock-how-to-assert-a-substring-of-logger-output/>`_
-* `Mocking Django <http://www.mattjmorrison.com/2011/09/mocking-django.html>`_
-* `Mocking dates and other classes that can't be modified
-  <http://williamjohnbert.com/2011/07/how-to-unit-testing-in-django-with-mocking-and-patching/>`_
-* `Mock recipes <http://konryd.blogspot.com/2010/06/mock-recipies.html>`_
-* `Mockity mock mock - some love for the mock module
-  <http://konryd.blogspot.com/2010/05/mockity-mock-mock-some-love-for-mock.html>`_
-* `Coverage and Mock (with django)
-  <http://mattsnider.com/python/mock-and-coverage/>`_
-* `Python Unit Testing with Mock <http://www.insomnihack.com/?p=194>`_
-* `Getting started with Python Mock
-  <http://myadventuresincoding.wordpress.com/2011/02/26/python-python-mock-cheat-sheet/>`_
-* `Smart Parameter Checks with mock
-  <http://tobyho.com/2011/03/24/smart-parameter-checks-in/>`_
-* `Python mock testing techniques and tools
-  <http://agiletesting.blogspot.com/2009/07/python-mock-testing-techniques-and.html>`_
-* `How To Test Django Template Tags
-  <http://techblog.ironfroggy.com/2008/10/how-to-test.html>`_
-* `A presentation on Unit Testing with Mock
-  <http://pypap.blogspot.com/2008/10/newbie-nugget-unit-testing-with-mock.html>`_
-* `Mocking with Django and Google AppEngine
-  <http://michael-a-nelson.blogspot.com/2008/09/mocking-with-django-and-google-app.html>`_
-
-
-.. index:: tests
-.. index:: unittest2
-
-Tests
-=====
-
-Mock uses `unittest2 <http://pypi.python.org/pypi/unittest2>`_ for its own
-test suite. In order to run it, use the `unit2` script that comes with
-`unittest2` module on a checkout of the source repository:
-
-   `unit2 discover`
-
-If you have `setuptools <http://pypi.python.org/pypi/distribute>`_ as well as
-unittest2 you can run:
-
-   ``python setup.py test``
-
-On Python 3.2 you can use ``unittest`` module from the standard library.
-
-   ``python3.2 -m unittest discover``
-
-.. index:: Python 3
-
-On Python 3 the tests for unicode are skipped as they are not relevant. On
-Python 2.4 tests that use the with statements are skipped as the with statement
-is invalid syntax on Python 2.4.
-
-
-.. index:: older versions
-
-Older Versions
-==============
-
-Documentation for older versions of mock:
-
-* `mock 0.8 <http://www.voidspace.org.uk/python/mock/0.8/>`_
-* `mock 0.7 <http://www.voidspace.org.uk/python/mock/0.7/>`_
-* `mock 0.6 <http://www.voidspace.org.uk/python/mock/0.6.0/>`_
-
-Docs from the in-development version of `mock` can be found at
-`mock.readthedocs.org <http://mock.readthedocs.org>`_.
-
-
-Terminology
-===========
-
-Terminology for objects used to replace other ones can be confusing. Terms
-like double, fake, mock, stub, and spy are all used with varying meanings.
-
-In `classic mock terminology
-<http://xunitpatterns.com/Mocks,%20Fakes,%20Stubs%20and%20Dummies.html>`_
-:class:`mock.Mock` is a `spy <http://xunitpatterns.com/Test%20Spy.html>`_ that
-allows for *post-mortem* examination. This is what I call the "action ->
-assertion" [#]_ pattern of testing.
-
-I'm not however a fan of this "statically typed mocking terminology"
-promulgated by `Martin Fowler
-<http://martinfowler.com/articles/mocksArentStubs.html>`_. It confuses usage
-patterns with implementation and prevents you from using natural terminology
-when discussing mocking.
-
-I much prefer duck typing, if an object used in your test suite looks like a
-mock object and quacks like a mock object then it's fine to call it a mock, no
-matter what the implementation looks like.
-
-This terminology is perhaps more useful in less capable languages where
-different usage patterns will *require* different implementations.
-`mock.Mock()` is capable of being used in most of the different roles
-described by Fowler, except (annoyingly / frustratingly / ironically) a Mock
-itself!
-
-How about a simpler definition: a "mock object" is an object used to replace a
-real one in a system under test.
-
-.. [#] This pattern is called "AAA" by some members of the testing community;
-   "Arrange - Act - Assert".
diff --git a/branch-1.2/ambari-common/src/test/python/mock/docs/magicmock.txt b/branch-1.2/ambari-common/src/test/python/mock/docs/magicmock.txt
deleted file mode 100644
index 42b2ed9..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/docs/magicmock.txt
+++ /dev/null
@@ -1,258 +0,0 @@
-
-.. currentmodule:: mock
-
-
-.. _magic-methods:
-
-Mocking Magic Methods
-=====================
-
-.. currentmodule:: mock
-
-:class:`Mock` supports mocking `magic methods
-<http://www.ironpythoninaction.com/magic-methods.html>`_. This allows mock
-objects to replace containers or other objects that implement Python
-protocols.
-
-Because magic methods are looked up differently from normal methods [#]_, this
-support has been specially implemented. This means that only specific magic
-methods are supported. The supported list includes *almost* all of them. If
-there are any missing that you need please let us know!
-
-You mock magic methods by setting the method you are interested in to a function
-or a mock instance. If you are using a function then it *must* take ``self`` as
-the first argument [#]_.
-
-.. doctest::
-
-   >>> def __str__(self):
-   ...     return 'fooble'
-   ...
-   >>> mock = Mock()
-   >>> mock.__str__ = __str__
-   >>> str(mock)
-   'fooble'
-
-   >>> mock = Mock()
-   >>> mock.__str__ = Mock()
-   >>> mock.__str__.return_value = 'fooble'
-   >>> str(mock)
-   'fooble'
-
-   >>> mock = Mock()
-   >>> mock.__iter__ = Mock(return_value=iter([]))
-   >>> list(mock)
-   []
-
-One use case for this is for mocking objects used as context managers in a
-`with` statement:
-
-.. doctest::
-
-   >>> mock = Mock()
-   >>> mock.__enter__ = Mock(return_value='foo')
-   >>> mock.__exit__ = Mock(return_value=False)
-   >>> with mock as m:
-   ...     assert m == 'foo'
-   ...
-   >>> mock.__enter__.assert_called_with()
-   >>> mock.__exit__.assert_called_with(None, None, None)
-
-Calls to magic methods do not appear in :attr:`~Mock.method_calls`, but they
-are recorded in :attr:`~Mock.mock_calls`.
-
-.. note::
-
-   If you use the `spec` keyword argument to create a mock then attempting to
-   set a magic method that isn't in the spec will raise an `AttributeError`.
-
-The full list of supported magic methods is:
-
-* ``__hash__``, ``__sizeof__``, ``__repr__`` and ``__str__``
-* ``__dir__``, ``__format__`` and ``__subclasses__``
-* ``__floor__``, ``__trunc__`` and ``__ceil__``
-* Comparisons: ``__cmp__``, ``__lt__``, ``__gt__``, ``__le__``, ``__ge__``,
-  ``__eq__`` and ``__ne__``
-* Container methods: ``__getitem__``, ``__setitem__``, ``__delitem__``,
-  ``__contains__``, ``__len__``, ``__iter__``, ``__getslice__``,
-  ``__setslice__``, ``__reversed__`` and ``__missing__``
-* Context manager: ``__enter__`` and ``__exit__``
-* Unary numeric methods: ``__neg__``, ``__pos__`` and ``__invert__``
-* The numeric methods (including right hand and in-place variants):
-  ``__add__``, ``__sub__``, ``__mul__``, ``__div__``,
-  ``__floordiv__``, ``__mod__``, ``__divmod__``, ``__lshift__``,
-  ``__rshift__``, ``__and__``, ``__xor__``, ``__or__``, and ``__pow__``
-* Numeric conversion methods: ``__complex__``, ``__int__``, ``__float__``,
-  ``__index__`` and ``__coerce__``
-* Descriptor methods: ``__get__``, ``__set__`` and ``__delete__``
-* Pickling: ``__reduce__``, ``__reduce_ex__``, ``__getinitargs__``,
-  ``__getnewargs__``, ``__getstate__`` and ``__setstate__``
-
-
-The following methods are supported in Python 2 but don't exist in Python 3:
-
-* ``__unicode__``, ``__long__``, ``__oct__``, ``__hex__`` and ``__nonzero__``
-*  ``__truediv__`` and ``__rtruediv__``
-
-The following methods are supported in Python 3 but don't exist in Python 2:
-
-* ``__bool__`` and ``__next__``
-
-The following methods exist but are *not* supported as they are either in use by
-mock, can't be set dynamically, or can cause problems:
-
-* ``__getattr__``, ``__setattr__``, ``__init__`` and ``__new__``
-* ``__prepare__``, ``__instancecheck__``, ``__subclasscheck__``, ``__del__``
-
-
-
-Magic Mock
-==========
-
-There are two `MagicMock` variants: `MagicMock` and `NonCallableMagicMock`.
-
-
-.. class:: MagicMock(*args, **kw)
-
-   ``MagicMock`` is a subclass of :class:`Mock` with default implementations
-   of most of the magic methods. You can use ``MagicMock`` without having to
-   configure the magic methods yourself.
-
-   The constructor parameters have the same meaning as for :class:`Mock`.
-
-   If you use the `spec` or `spec_set` arguments then *only* magic methods
-   that exist in the spec will be created.
-
-
-.. class:: NonCallableMagicMock(*args, **kw)
-
-    A non-callable version of `MagicMock`.
-
-    The constructor parameters have the same meaning as for
-    :class:`MagicMock`, with the exception of `return_value` and
-    `side_effect` which have no meaning on a non-callable mock.
-
-The magic methods are setup with `MagicMock` objects, so you can configure them
-and use them in the usual way:
-
-.. doctest::
-
-   >>> mock = MagicMock()
-   >>> mock[3] = 'fish'
-   >>> mock.__setitem__.assert_called_with(3, 'fish')
-   >>> mock.__getitem__.return_value = 'result'
-   >>> mock[2]
-   'result'
-
-By default many of the protocol methods are required to return objects of a
-specific type. These methods are preconfigured with a default return value, so
-that they can be used without you having to do anything if you aren't interested
-in the return value. You can still *set* the return value manually if you want
-to change the default.
-
-Methods and their defaults:
-
-* ``__lt__``: NotImplemented
-* ``__gt__``: NotImplemented
-* ``__le__``: NotImplemented
-* ``__ge__``: NotImplemented
-* ``__int__`` : 1
-* ``__contains__`` : False
-* ``__len__`` : 1
-* ``__iter__`` : iter([])
-* ``__exit__`` : False
-* ``__complex__`` : 1j
-* ``__float__`` : 1.0
-* ``__bool__`` : True
-* ``__nonzero__`` : True
-* ``__oct__`` : '1'
-* ``__hex__`` : '0x1'
-* ``__long__`` : long(1)
-* ``__index__`` : 1
-* ``__hash__`` : default hash for the mock
-* ``__str__`` : default str for the mock
-* ``__unicode__`` : default unicode for the mock
-* ``__sizeof__``: default sizeof for the mock
-
-For example:
-
-.. doctest::
-
-   >>> mock = MagicMock()
-   >>> int(mock)
-   1
-   >>> len(mock)
-   0
-   >>> hex(mock)
-   '0x1'
-   >>> list(mock)
-   []
-   >>> object() in mock
-   False
-
-The two equality method, `__eq__` and `__ne__`, are special (changed in
-0.7.2). They do the default equality comparison on identity, using a side
-effect, unless you change their return value to return something else:
-
-.. doctest::
-
-   >>> MagicMock() == 3
-   False
-   >>> MagicMock() != 3
-   True
-   >>> mock = MagicMock()
-   >>> mock.__eq__.return_value = True
-   >>> mock == 3
-   True
-
-In `0.8` the `__iter__` also gained special handling implemented with a
-side effect. The return value of `MagicMock.__iter__` can be any iterable
-object and isn't required to be an iterator:
-
-.. doctest::
-
-   >>> mock = MagicMock()
-   >>> mock.__iter__.return_value = ['a', 'b', 'c']
-   >>> list(mock)
-   ['a', 'b', 'c']
-   >>> list(mock)
-   ['a', 'b', 'c']
-
-If the return value *is* an iterator, then iterating over it once will consume
-it and subsequent iterations will result in an empty list:
-
-.. doctest::
-
-   >>> mock.__iter__.return_value = iter(['a', 'b', 'c'])
-   >>> list(mock)
-   ['a', 'b', 'c']
-   >>> list(mock)
-   []
-
-``MagicMock`` has all of the supported magic methods configured except for some
-of the obscure and obsolete ones. You can still set these up if you want.
-
-Magic methods that are supported but not setup by default in ``MagicMock`` are:
-
-* ``__cmp__``
-* ``__getslice__`` and ``__setslice__``
-* ``__coerce__``
-* ``__subclasses__``
-* ``__dir__``
-* ``__format__``
-* ``__get__``, ``__set__`` and ``__delete__``
-* ``__reversed__`` and ``__missing__``
-* ``__reduce__``, ``__reduce_ex__``, ``__getinitargs__``, ``__getnewargs__``,
-  ``__getstate__`` and ``__setstate__``
-* ``__getformat__`` and ``__setformat__``
-
-
-
-------------
-
-.. [#] Magic methods *should* be looked up on the class rather than the
-   instance. Different versions of Python are inconsistent about applying this
-   rule. The supported protocol methods should work with all supported versions
-   of Python.
-.. [#] The function is basically hooked up to the class, but each ``Mock``
-   instance is kept isolated from the others.
diff --git a/branch-1.2/ambari-common/src/test/python/mock/docs/mock.txt b/branch-1.2/ambari-common/src/test/python/mock/docs/mock.txt
deleted file mode 100644
index 58712b2..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/docs/mock.txt
+++ /dev/null
@@ -1,842 +0,0 @@
-The Mock Class
-==============
-
-.. currentmodule:: mock
-
-.. testsetup::
-
-    class SomeClass:
-        pass
-
-
-`Mock` is a flexible mock object intended to replace the use of stubs and
-test doubles throughout your code. Mocks are callable and create attributes as
-new mocks when you access them [#]_. Accessing the same attribute will always
-return the same mock. Mocks record how you use them, allowing you to make
-assertions about what your code has done to them.
-
-:class:`MagicMock` is a subclass of `Mock` with all the magic methods
-pre-created and ready to use. There are also non-callable variants, useful
-when you are mocking out objects that aren't callable:
-:class:`NonCallableMock` and :class:`NonCallableMagicMock`
-
-The :func:`patch` decorators makes it easy to temporarily replace classes
-in a particular module with a `Mock` object. By default `patch` will create
-a `MagicMock` for you. You can specify an alternative class of `Mock` using
-the `new_callable` argument to `patch`.
-
-
-.. index:: side_effect
-.. index:: return_value
-.. index:: wraps
-.. index:: name
-.. index:: spec
-
-.. class:: Mock(spec=None, side_effect=None, return_value=DEFAULT, wraps=None, name=None, spec_set=None, **kwargs)
-
-    Create a new `Mock` object. `Mock` takes several optional arguments
-    that specify the behaviour of the Mock object:
-
-    * `spec`: This can be either a list of strings or an existing object (a
-      class or instance) that acts as the specification for the mock object. If
-      you pass in an object then a list of strings is formed by calling dir on
-      the object (excluding unsupported magic attributes and methods).
-      Accessing any attribute not in this list will raise an `AttributeError`.
-
-      If `spec` is an object (rather than a list of strings) then
-      :attr:`__class__` returns the class of the spec object. This allows mocks
-      to pass `isinstance` tests.
-
-    * `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
-      or get an attribute on the mock that isn't on the object passed as
-      `spec_set` will raise an `AttributeError`.
-
-    * `side_effect`: A function to be called whenever the Mock is called. See
-      the :attr:`~Mock.side_effect` attribute. Useful for raising exceptions or
-      dynamically changing return values. The function is called with the same
-      arguments as the mock, and unless it returns :data:`DEFAULT`, the return
-      value of this function is used as the return value.
-
-      Alternatively `side_effect` can be an exception class or instance. In
-      this case the exception will be raised when the mock is called.
-
-      If `side_effect` is an iterable then each call to the mock will return
-      the next value from the iterable. If any of the members of the iterable
-      are exceptions they will be raised instead of returned.
-
-      A `side_effect` can be cleared by setting it to `None`.
-
-    * `return_value`: The value returned when the mock is called. By default
-      this is a new Mock (created on first access). See the
-      :attr:`return_value` attribute.
-
-    * `wraps`: Item for the mock object to wrap. If `wraps` is not None then
-      calling the Mock will pass the call through to the wrapped object
-      (returning the real result and ignoring `return_value`). Attribute access
-      on the mock will return a Mock object that wraps the corresponding
-      attribute of the wrapped object (so attempting to access an attribute
-      that doesn't exist will raise an `AttributeError`).
-
-      If the mock has an explicit `return_value` set then calls are not passed
-      to the wrapped object and the `return_value` is returned instead.
-
-    * `name`: If the mock has a name then it will be used in the repr of the
-      mock. This can be useful for debugging. The name is propagated to child
-      mocks.
-
-    Mocks can also be called with arbitrary keyword arguments. These will be
-    used to set attributes on the mock after it is created. See the
-    :meth:`configure_mock` method for details.
-
-
-    .. method:: assert_called_with(*args, **kwargs)
-
-        This method is a convenient way of asserting that calls are made in a
-        particular way:
-
-        .. doctest::
-
-            >>> mock = Mock()
-            >>> mock.method(1, 2, 3, test='wow')
-            <Mock name='mock.method()' id='...'>
-            >>> mock.method.assert_called_with(1, 2, 3, test='wow')
-
-
-    .. method:: assert_called_once_with(*args, **kwargs)
-
-       Assert that the mock was called exactly once and with the specified
-       arguments.
-
-       .. doctest::
-
-            >>> mock = Mock(return_value=None)
-            >>> mock('foo', bar='baz')
-            >>> mock.assert_called_once_with('foo', bar='baz')
-            >>> mock('foo', bar='baz')
-            >>> mock.assert_called_once_with('foo', bar='baz')
-            Traceback (most recent call last):
-              ...
-            AssertionError: Expected to be called once. Called 2 times.
-
-
-    .. method:: assert_any_call(*args, **kwargs)
-
-        assert the mock has been called with the specified arguments.
-
-        The assert passes if the mock has *ever* been called, unlike
-        :meth:`assert_called_with` and :meth:`assert_called_once_with` that
-        only pass if the call is the most recent one.
-
-        .. doctest::
-
-            >>> mock = Mock(return_value=None)
-            >>> mock(1, 2, arg='thing')
-            >>> mock('some', 'thing', 'else')
-            >>> mock.assert_any_call(1, 2, arg='thing')
-
-
-    .. method:: assert_has_calls(calls, any_order=False)
-
-        assert the mock has been called with the specified calls.
-        The `mock_calls` list is checked for the calls.
-
-        If `any_order` is False (the default) then the calls must be
-        sequential. There can be extra calls before or after the
-        specified calls.
-
-        If `any_order` is True then the calls can be in any order, but
-        they must all appear in :attr:`mock_calls`.
-
-        .. doctest::
-
-            >>> mock = Mock(return_value=None)
-            >>> mock(1)
-            >>> mock(2)
-            >>> mock(3)
-            >>> mock(4)
-            >>> calls = [call(2), call(3)]
-            >>> mock.assert_has_calls(calls)
-            >>> calls = [call(4), call(2), call(3)]
-            >>> mock.assert_has_calls(calls, any_order=True)
-
-
-    .. method:: reset_mock()
-
-        The reset_mock method resets all the call attributes on a mock object:
-
-        .. doctest::
-
-            >>> mock = Mock(return_value=None)
-            >>> mock('hello')
-            >>> mock.called
-            True
-            >>> mock.reset_mock()
-            >>> mock.called
-            False
-
-        This can be useful where you want to make a series of assertions that
-        reuse the same object. Note that `reset_mock` *doesn't* clear the
-        return value, :attr:`side_effect` or any child attributes you have
-        set using normal assignment. Child mocks and the return value mock
-        (if any) are reset as well.
-
-
-    .. method:: mock_add_spec(spec, spec_set=False)
-
-        Add a spec to a mock. `spec` can either be an object or a
-        list of strings. Only attributes on the `spec` can be fetched as
-        attributes from the mock.
-
-        If `spec_set` is `True` then only attributes on the spec can be set.
-
-
-    .. method:: attach_mock(mock, attribute)
-
-        Attach a mock as an attribute of this one, replacing its name and
-        parent. Calls to the attached mock will be recorded in the
-        :attr:`method_calls` and :attr:`mock_calls` attributes of this one.
-
-
-    .. method:: configure_mock(**kwargs)
-
-        Set attributes on the mock through keyword arguments.
-
-        Attributes plus return values and side effects can be set on child
-        mocks using standard dot notation and unpacking a dictionary in the
-        method call:
-
-        .. doctest::
-
-            >>> mock = Mock()
-            >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
-            >>> mock.configure_mock(**attrs)
-            >>> mock.method()
-            3
-            >>> mock.other()
-            Traceback (most recent call last):
-              ...
-            KeyError
-
-        The same thing can be achieved in the constructor call to mocks:
-
-        .. doctest::
-
-            >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
-            >>> mock = Mock(some_attribute='eggs', **attrs)
-            >>> mock.some_attribute
-            'eggs'
-            >>> mock.method()
-            3
-            >>> mock.other()
-            Traceback (most recent call last):
-              ...
-            KeyError
-
-        `configure_mock` exists to make it easier to do configuration
-        after the mock has been created.
-
-
-    .. method:: __dir__()
-
-        `Mock` objects limit the results of `dir(some_mock)` to useful results.
-        For mocks with a `spec` this includes all the permitted attributes
-        for the mock.
-
-        See :data:`FILTER_DIR` for what this filtering does, and how to
-        switch it off.
-
-
-    .. method:: _get_child_mock(**kw)
-
-        Create the child mocks for attributes and return value.
-        By default child mocks will be the same type as the parent.
-        Subclasses of Mock may want to override this to customize the way
-        child mocks are made.
-
-        For non-callable mocks the callable variant will be used (rather than
-        any custom subclass).
-
-
-    .. attribute:: called
-
-        A boolean representing whether or not the mock object has been called:
-
-        .. doctest::
-
-            >>> mock = Mock(return_value=None)
-            >>> mock.called
-            False
-            >>> mock()
-            >>> mock.called
-            True
-
-    .. attribute:: call_count
-
-        An integer telling you how many times the mock object has been called:
-
-        .. doctest::
-
-            >>> mock = Mock(return_value=None)
-            >>> mock.call_count
-            0
-            >>> mock()
-            >>> mock()
-            >>> mock.call_count
-            2
-
-
-    .. attribute:: return_value
-
-        Set this to configure the value returned by calling the mock:
-
-        .. doctest::
-
-            >>> mock = Mock()
-            >>> mock.return_value = 'fish'
-            >>> mock()
-            'fish'
-
-        The default return value is a mock object and you can configure it in
-        the normal way:
-
-        .. doctest::
-
-            >>> mock = Mock()
-            >>> mock.return_value.attribute = sentinel.Attribute
-            >>> mock.return_value()
-            <Mock name='mock()()' id='...'>
-            >>> mock.return_value.assert_called_with()
-
-        `return_value` can also be set in the constructor:
-
-        .. doctest::
-
-            >>> mock = Mock(return_value=3)
-            >>> mock.return_value
-            3
-            >>> mock()
-            3
-
-
-    .. attribute:: side_effect
-
-        This can either be a function to be called when the mock is called,
-        or an exception (class or instance) to be raised.
-
-        If you pass in a function it will be called with same arguments as the
-        mock and unless the function returns the :data:`DEFAULT` singleton the
-        call to the mock will then return whatever the function returns. If the
-        function returns :data:`DEFAULT` then the mock will return its normal
-        value (from the :attr:`return_value`.
-
-        An example of a mock that raises an exception (to test exception
-        handling of an API):
-
-        .. doctest::
-
-            >>> mock = Mock()
-            >>> mock.side_effect = Exception('Boom!')
-            >>> mock()
-            Traceback (most recent call last):
-              ...
-            Exception: Boom!
-
-        Using `side_effect` to return a sequence of values:
-
-        .. doctest::
-
-            >>> mock = Mock()
-            >>> mock.side_effect = [3, 2, 1]
-            >>> mock(), mock(), mock()
-            (3, 2, 1)
-
-        The `side_effect` function is called with the same arguments as the
-        mock (so it is wise for it to take arbitrary args and keyword
-        arguments) and whatever it returns is used as the return value for
-        the call. The exception is if `side_effect` returns :data:`DEFAULT`,
-        in which case the normal :attr:`return_value` is used.
-
-        .. doctest::
-
-            >>> mock = Mock(return_value=3)
-            >>> def side_effect(*args, **kwargs):
-            ...     return DEFAULT
-            ...
-            >>> mock.side_effect = side_effect
-            >>> mock()
-            3
-
-        `side_effect` can be set in the constructor. Here's an example that
-        adds one to the value the mock is called with and returns it:
-
-        .. doctest::
-
-            >>> side_effect = lambda value: value + 1
-            >>> mock = Mock(side_effect=side_effect)
-            >>> mock(3)
-            4
-            >>> mock(-8)
-            -7
-
-        Setting `side_effect` to `None` clears it:
-
-        .. doctest::
-
-            >>> from mock import Mock
-            >>> m = Mock(side_effect=KeyError, return_value=3)
-            >>> m()
-            Traceback (most recent call last):
-             ...
-            KeyError
-            >>> m.side_effect = None
-            >>> m()
-            3
-
-
-    .. attribute:: call_args
-
-        This is either `None` (if the mock hasn't been called), or the
-        arguments that the mock was last called with. This will be in the
-        form of a tuple: the first member is any ordered arguments the mock
-        was called with (or an empty tuple) and the second member is any
-        keyword arguments (or an empty dictionary).
-
-        .. doctest::
-
-            >>> mock = Mock(return_value=None)
-            >>> print mock.call_args
-            None
-            >>> mock()
-            >>> mock.call_args
-            call()
-            >>> mock.call_args == ()
-            True
-            >>> mock(3, 4)
-            >>> mock.call_args
-            call(3, 4)
-            >>> mock.call_args == ((3, 4),)
-            True
-            >>> mock(3, 4, 5, key='fish', next='w00t!')
-            >>> mock.call_args
-            call(3, 4, 5, key='fish', next='w00t!')
-
-        `call_args`, along with members of the lists :attr:`call_args_list`,
-        :attr:`method_calls` and :attr:`mock_calls` are :data:`call` objects.
-        These are tuples, so they can be unpacked to get at the individual
-        arguments and make more complex assertions. See
-        :ref:`calls as tuples <calls-as-tuples>`.
-
-
-    .. attribute:: call_args_list
-
-        This is a list of all the calls made to the mock object in sequence
-        (so the length of the list is the number of times it has been
-        called). Before any calls have been made it is an empty list. The
-        :data:`call` object can be used for conveniently constructing lists of
-        calls to compare with `call_args_list`.
-
-        .. doctest::
-
-            >>> mock = Mock(return_value=None)
-            >>> mock()
-            >>> mock(3, 4)
-            >>> mock(key='fish', next='w00t!')
-            >>> mock.call_args_list
-            [call(), call(3, 4), call(key='fish', next='w00t!')]
-            >>> expected = [(), ((3, 4),), ({'key': 'fish', 'next': 'w00t!'},)]
-            >>> mock.call_args_list == expected
-            True
-
-        Members of `call_args_list` are :data:`call` objects. These can be
-        unpacked as tuples to get at the individual arguments. See
-        :ref:`calls as tuples <calls-as-tuples>`.
-
-
-    .. attribute:: method_calls
-
-        As well as tracking calls to themselves, mocks also track calls to
-        methods and attributes, and *their* methods and attributes:
-
-        .. doctest::
-
-            >>> mock = Mock()
-            >>> mock.method()
-            <Mock name='mock.method()' id='...'>
-            >>> mock.property.method.attribute()
-            <Mock name='mock.property.method.attribute()' id='...'>
-            >>> mock.method_calls
-            [call.method(), call.property.method.attribute()]
-
-        Members of `method_calls` are :data:`call` objects. These can be
-        unpacked as tuples to get at the individual arguments. See
-        :ref:`calls as tuples <calls-as-tuples>`.
-
-
-    .. attribute:: mock_calls
-
-        `mock_calls` records *all* calls to the mock object, its methods, magic
-        methods *and* return value mocks.
-
-        .. doctest::
-
-            >>> mock = MagicMock()
-            >>> result = mock(1, 2, 3)
-            >>> mock.first(a=3)
-            <MagicMock name='mock.first()' id='...'>
-            >>> mock.second()
-            <MagicMock name='mock.second()' id='...'>
-            >>> int(mock)
-            1
-            >>> result(1)
-            <MagicMock name='mock()()' id='...'>
-            >>> expected = [call(1, 2, 3), call.first(a=3), call.second(),
-            ... call.__int__(), call()(1)]
-            >>> mock.mock_calls == expected
-            True
-
-        Members of `mock_calls` are :data:`call` objects. These can be
-        unpacked as tuples to get at the individual arguments. See
-        :ref:`calls as tuples <calls-as-tuples>`.
-
-
-    .. attribute:: __class__
-
-        Normally the `__class__` attribute of an object will return its type.
-        For a mock object with a `spec` `__class__` returns the spec class
-        instead. This allows mock objects to pass `isinstance` tests for the
-        object they are replacing / masquerading as:
-
-        .. doctest::
-
-            >>> mock = Mock(spec=3)
-            >>> isinstance(mock, int)
-            True
-
-        `__class__` is assignable to, this allows a mock to pass an
-        `isinstance` check without forcing you to use a spec:
-
-        .. doctest::
-
-            >>> mock = Mock()
-            >>> mock.__class__ = dict
-            >>> isinstance(mock, dict)
-            True
-
-.. class:: NonCallableMock(spec=None, wraps=None, name=None, spec_set=None, **kwargs)
-
-    A non-callable version of `Mock`. The constructor parameters have the same
-    meaning of `Mock`, with the exception of `return_value` and `side_effect`
-    which have no meaning on a non-callable mock.
-
-Mock objects that use a class or an instance as a `spec` or `spec_set` are able
-to pass `isintance` tests:
-
-.. doctest::
-
-    >>> mock = Mock(spec=SomeClass)
-    >>> isinstance(mock, SomeClass)
-    True
-    >>> mock = Mock(spec_set=SomeClass())
-    >>> isinstance(mock, SomeClass)
-    True
-
-The `Mock` classes have support for mocking magic methods. See :ref:`magic
-methods <magic-methods>` for the full details.
-
-The mock classes and the :func:`patch` decorators all take arbitrary keyword
-arguments for configuration. For the `patch` decorators the keywords are
-passed to the constructor of the mock being created. The keyword arguments
-are for configuring attributes of the mock:
-
-.. doctest::
-
-        >>> m = MagicMock(attribute=3, other='fish')
-        >>> m.attribute
-        3
-        >>> m.other
-        'fish'
-
-The return value and side effect of child mocks can be set in the same way,
-using dotted notation. As you can't use dotted names directly in a call you
-have to create a dictionary and unpack it using `**`:
-
-.. doctest::
-
-    >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
-    >>> mock = Mock(some_attribute='eggs', **attrs)
-    >>> mock.some_attribute
-    'eggs'
-    >>> mock.method()
-    3
-    >>> mock.other()
-    Traceback (most recent call last):
-      ...
-    KeyError
-
-
-.. class:: PropertyMock(*args, **kwargs)
-
-   A mock intended to be used as a property, or other descriptor, on a class.
-   `PropertyMock` provides `__get__` and `__set__` methods so you can specify
-   a return value when it is fetched.
-
-   Fetching a `PropertyMock` instance from an object calls the mock, with
-   no args. Setting it calls the mock with the value being set.
-
-   .. doctest::
-
-        >>> class Foo(object):
-        ...     @property
-        ...     def foo(self):
-        ...         return 'something'
-        ...     @foo.setter
-        ...     def foo(self, value):
-        ...         pass
-        ...
-        >>> with patch('__main__.Foo.foo', new_callable=PropertyMock) as mock_foo:
-        ...     mock_foo.return_value = 'mockity-mock'
-        ...     this_foo = Foo()
-        ...     print this_foo.foo
-        ...     this_foo.foo = 6
-        ...
-        mockity-mock
-        >>> mock_foo.mock_calls
-        [call(), call(6)]
-
-Because of the way mock attributes are stored you can't directly attach a
-`PropertyMock` to a mock object. Instead you can attach it to the mock type
-object:
-
-.. doctest::
-
-    >>> m = MagicMock()
-    >>> p = PropertyMock(return_value=3)
-    >>> type(m).foo = p
-    >>> m.foo
-    3
-    >>> p.assert_called_once_with()
-
-
-.. index:: __call__
-.. index:: calling
-
-Calling
-=======
-
-Mock objects are callable. The call will return the value set as the
-:attr:`~Mock.return_value` attribute. The default return value is a new Mock
-object; it is created the first time the return value is accessed (either
-explicitly or by calling the Mock) - but it is stored and the same one
-returned each time.
-
-Calls made to the object will be recorded in the attributes
-like :attr:`~Mock.call_args` and :attr:`~Mock.call_args_list`.
-
-If :attr:`~Mock.side_effect` is set then it will be called after the call has
-been recorded, so if `side_effect` raises an exception the call is still
-recorded.
-
-The simplest way to make a mock raise an exception when called is to make
-:attr:`~Mock.side_effect` an exception class or instance:
-
-.. doctest::
-
-        >>> m = MagicMock(side_effect=IndexError)
-        >>> m(1, 2, 3)
-        Traceback (most recent call last):
-          ...
-        IndexError
-        >>> m.mock_calls
-        [call(1, 2, 3)]
-        >>> m.side_effect = KeyError('Bang!')
-        >>> m('two', 'three', 'four')
-        Traceback (most recent call last):
-          ...
-        KeyError: 'Bang!'
-        >>> m.mock_calls
-        [call(1, 2, 3), call('two', 'three', 'four')]
-
-If `side_effect` is a function then whatever that function returns is what
-calls to the mock return. The `side_effect` function is called with the
-same arguments as the mock. This allows you to vary the return value of the
-call dynamically, based on the input:
-
-.. doctest::
-
-        >>> def side_effect(value):
-        ...     return value + 1
-        ...
-        >>> m = MagicMock(side_effect=side_effect)
-        >>> m(1)
-        2
-        >>> m(2)
-        3
-        >>> m.mock_calls
-        [call(1), call(2)]
-
-If you want the mock to still return the default return value (a new mock), or
-any set return value, then there are two ways of doing this. Either return
-`mock.return_value` from inside `side_effect`, or return :data:`DEFAULT`:
-
-.. doctest::
-
-        >>> m = MagicMock()
-        >>> def side_effect(*args, **kwargs):
-        ...     return m.return_value
-        ...
-        >>> m.side_effect = side_effect
-        >>> m.return_value = 3
-        >>> m()
-        3
-        >>> def side_effect(*args, **kwargs):
-        ...     return DEFAULT
-        ...
-        >>> m.side_effect = side_effect
-        >>> m()
-        3
-
-To remove a `side_effect`, and return to the default behaviour, set the
-`side_effect` to `None`:
-
-.. doctest::
-
-        >>> m = MagicMock(return_value=6)
-        >>> def side_effect(*args, **kwargs):
-        ...     return 3
-        ...
-        >>> m.side_effect = side_effect
-        >>> m()
-        3
-        >>> m.side_effect = None
-        >>> m()
-        6
-
-The `side_effect` can also be any iterable object. Repeated calls to the mock
-will return values from the iterable (until the iterable is exhausted and
-a `StopIteration` is raised):
-
-.. doctest::
-
-        >>> m = MagicMock(side_effect=[1, 2, 3])
-        >>> m()
-        1
-        >>> m()
-        2
-        >>> m()
-        3
-        >>> m()
-        Traceback (most recent call last):
-          ...
-        StopIteration
-
-If any members of the iterable are exceptions they will be raised instead of
-returned:
-
-.. doctest::
-
-        >>> iterable = (33, ValueError, 66)
-        >>> m = MagicMock(side_effect=iterable)
-        >>> m()
-        33
-        >>> m()
-        Traceback (most recent call last):
-         ...
-        ValueError
-        >>> m()
-        66
-
-
-.. _deleting-attributes:
-
-Deleting Attributes
-===================
-
-Mock objects create attributes on demand. This allows them to pretend to be
-objects of any type.
-
-You may want a mock object to return `False` to a `hasattr` call, or raise an
-`AttributeError` when an attribute is fetched. You can do this by providing
-an object as a `spec` for a mock, but that isn't always convenient.
-
-You "block" attributes by deleting them. Once deleted, accessing an attribute
-will raise an `AttributeError`.
-
-.. doctest::
-
-    >>> mock = MagicMock()
-    >>> hasattr(mock, 'm')
-    True
-    >>> del mock.m
-    >>> hasattr(mock, 'm')
-    False
-    >>> del mock.f
-    >>> mock.f
-    Traceback (most recent call last):
-        ...
-    AttributeError: f
-
-
-Attaching Mocks as Attributes
-=============================
-
-When you attach a mock as an attribute of another mock (or as the return
-value) it becomes a "child" of that mock. Calls to the child are recorded in
-the :attr:`~Mock.method_calls` and :attr:`~Mock.mock_calls` attributes of the
-parent. This is useful for configuring child mocks and then attaching them to
-the parent, or for attaching mocks to a parent that records all calls to the
-children and allows you to make assertions about the order of calls between
-mocks:
-
-.. doctest::
-
-    >>> parent = MagicMock()
-    >>> child1 = MagicMock(return_value=None)
-    >>> child2 = MagicMock(return_value=None)
-    >>> parent.child1 = child1
-    >>> parent.child2 = child2
-    >>> child1(1)
-    >>> child2(2)
-    >>> parent.mock_calls
-    [call.child1(1), call.child2(2)]
-
-The exception to this is if the mock has a name. This allows you to prevent
-the "parenting" if for some reason you don't want it to happen.
-
-.. doctest::
-
-    >>> mock = MagicMock()
-    >>> not_a_child = MagicMock(name='not-a-child')
-    >>> mock.attribute = not_a_child
-    >>> mock.attribute()
-    <MagicMock name='not-a-child()' id='...'>
-    >>> mock.mock_calls
-    []
-
-Mocks created for you by :func:`patch` are automatically given names. To
-attach mocks that have names to a parent you use the :meth:`~Mock.attach_mock`
-method:
-
-.. doctest::
-
-    >>> thing1 = object()
-    >>> thing2 = object()
-    >>> parent = MagicMock()
-    >>> with patch('__main__.thing1', return_value=None) as child1:
-    ...     with patch('__main__.thing2', return_value=None) as child2:
-    ...         parent.attach_mock(child1, 'child1')
-    ...         parent.attach_mock(child2, 'child2')
-    ...         child1('one')
-    ...         child2('two')
-    ...
-    >>> parent.mock_calls
-    [call.child1('one'), call.child2('two')]
-
-
------
-
-.. [#] The only exceptions are magic methods and attributes (those that have
-       leading and trailing double underscores). Mock doesn't create these but
-       instead of raises an ``AttributeError``. This is because the interpreter
-       will often implicitly request these methods, and gets *very* confused to
-       get a new Mock object when it expects a magic method. If you need magic
-       method support see :ref:`magic methods <magic-methods>`.
diff --git a/branch-1.2/ambari-common/src/test/python/mock/docs/patch.txt b/branch-1.2/ambari-common/src/test/python/mock/docs/patch.txt
deleted file mode 100644
index 3d56264..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/docs/patch.txt
+++ /dev/null
@@ -1,636 +0,0 @@
-==================
- Patch Decorators
-==================
-
-
-.. currentmodule:: mock
-
-.. testsetup::
-
-    class SomeClass(object):
-        static_method = None
-        class_method = None
-        attribute = None
-
-    sys.modules['package'] = package = Mock(name='package')
-    sys.modules['package.module'] = package.module
-
-    class TestCase(unittest2.TestCase):
-        def run(self):
-            result = unittest2.TestResult()
-            super(unittest2.TestCase, self).run(result)
-            assert result.wasSuccessful()
-
-.. testcleanup::
-
-    patch.TEST_PREFIX = 'test'
-
-
-The patch decorators are used for patching objects only within the scope of
-the function they decorate. They automatically handle the unpatching for you,
-even if exceptions are raised. All of these functions can also be used in with
-statements or as class decorators.
-
-
-patch
-=====
-
-.. note::
-
-    `patch` is straightforward to use. The key is to do the patching in the
-    right namespace. See the section `where to patch`_.
-
-.. function:: patch(target, new=DEFAULT, spec=None, create=False, spec_set=None, autospec=None, new_callable=None, **kwargs)
-
-    `patch` acts as a function decorator, class decorator or a context
-    manager. Inside the body of the function or with statement, the `target`
-    is patched with a `new` object. When the function/with statement exits
-    the patch is undone.
-
-    If `new` is omitted, then the target is replaced with a
-    :class:`MagicMock`. If `patch` is used as a decorator and `new` is
-    omitted, the created mock is passed in as an extra argument to the
-    decorated function. If `patch` is used as a context manager the created
-    mock is returned by the context manager.
-
-    `target` should be a string in the form `'package.module.ClassName'`. The
-    `target` is imported and the specified object replaced with the `new`
-    object, so the `target` must be importable from the environment you are
-    calling `patch` from. The target is imported when the decorated function
-    is executed, not at decoration time.
-
-    The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
-    if patch is creating one for you.
-
-    In addition you can pass `spec=True` or `spec_set=True`, which causes
-    patch to pass in the object being mocked as the spec/spec_set object.
-
-    `new_callable` allows you to specify a different class, or callable object,
-    that will be called to create the `new` object. By default `MagicMock` is
-    used.
-
-    A more powerful form of `spec` is `autospec`. If you set `autospec=True`
-    then the mock with be created with a spec from the object being replaced.
-    All attributes of the mock will also have the spec of the corresponding
-    attribute of the object being replaced. Methods and functions being mocked
-    will have their arguments checked and will raise a `TypeError` if they are
-    called with the wrong signature. For mocks
-    replacing a class, their return value (the 'instance') will have the same
-    spec as the class. See the :func:`create_autospec` function and
-    :ref:`auto-speccing`.
-
-    Instead of `autospec=True` you can pass `autospec=some_object` to use an
-    arbitrary object as the spec instead of the one being replaced.
-
-    By default `patch` will fail to replace attributes that don't exist. If
-    you pass in `create=True`, and the attribute doesn't exist, patch will
-    create the attribute for you when the patched function is called, and
-    delete it again afterwards. This is useful for writing tests against
-    attributes that your production code creates at runtime. It is off by by
-    default because it can be dangerous. With it switched on you can write
-    passing tests against APIs that don't actually exist!
-
-    Patch can be used as a `TestCase` class decorator. It works by
-    decorating each test method in the class. This reduces the boilerplate
-    code when your test methods share a common patchings set. `patch` finds
-    tests by looking for method names that start with `patch.TEST_PREFIX`.
-    By default this is `test`, which matches the way `unittest` finds tests.
-    You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
-
-    Patch can be used as a context manager, with the with statement. Here the
-    patching applies to the indented block after the with statement. If you
-    use "as" then the patched object will be bound to the name after the
-    "as"; very useful if `patch` is creating a mock object for you.
-
-    `patch` takes arbitrary keyword arguments. These will be passed to
-    the `Mock` (or `new_callable`) on construction.
-
-    `patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
-    available for alternate use-cases.
-
-`patch` as function decorator, creating the mock for you and passing it into
-the decorated function:
-
-.. doctest::
-
-    >>> @patch('__main__.SomeClass')
-    ... def function(normal_argument, mock_class):
-    ...     print mock_class is SomeClass
-    ...
-    >>> function(None)
-    True
-
-
-Patching a class replaces the class with a `MagicMock` *instance*. If the
-class is instantiated in the code under test then it will be the
-:attr:`~Mock.return_value` of the mock that will be used.
-
-If the class is instantiated multiple times you could use
-:attr:`~Mock.side_effect` to return a new mock each time. Alternatively you
-can set the `return_value` to be anything you want.
-
-To configure return values on methods of *instances* on the patched class
-you must do this on the `return_value`. For example:
-
-.. doctest::
-
-    >>> class Class(object):
-    ...     def method(self):
-    ...         pass
-    ...
-    >>> with patch('__main__.Class') as MockClass:
-    ...     instance = MockClass.return_value
-    ...     instance.method.return_value = 'foo'
-    ...     assert Class() is instance
-    ...     assert Class().method() == 'foo'
-    ...
-
-If you use `spec` or `spec_set` and `patch` is replacing a *class*, then the
-return value of the created mock will have the same spec.
-
-.. doctest::
-
-    >>> Original = Class
-    >>> patcher = patch('__main__.Class', spec=True)
-    >>> MockClass = patcher.start()
-    >>> instance = MockClass()
-    >>> assert isinstance(instance, Original)
-    >>> patcher.stop()
-
-The `new_callable` argument is useful where you want to use an alternative
-class to the default :class:`MagicMock` for the created mock. For example, if
-you wanted a :class:`NonCallableMock` to be used:
-
-.. doctest::
-
-    >>> thing = object()
-    >>> with patch('__main__.thing', new_callable=NonCallableMock) as mock_thing:
-    ...     assert thing is mock_thing
-    ...     thing()
-    ...
-    Traceback (most recent call last):
-      ...
-    TypeError: 'NonCallableMock' object is not callable
-
-Another use case might be to replace an object with a `StringIO` instance:
-
-.. doctest::
-
-    >>> from StringIO import StringIO
-    >>> def foo():
-    ...     print 'Something'
-    ...
-    >>> @patch('sys.stdout', new_callable=StringIO)
-    ... def test(mock_stdout):
-    ...     foo()
-    ...     assert mock_stdout.getvalue() == 'Something\n'
-    ...
-    >>> test()
-
-When `patch` is creating a mock for you, it is common that the first thing
-you need to do is to configure the mock. Some of that configuration can be done
-in the call to patch. Any arbitrary keywords you pass into the call will be
-used to set attributes on the created mock:
-
-.. doctest::
-
-    >>> patcher = patch('__main__.thing', first='one', second='two')
-    >>> mock_thing = patcher.start()
-    >>> mock_thing.first
-    'one'
-    >>> mock_thing.second
-    'two'
-
-As well as attributes on the created mock attributes, like the
-:attr:`~Mock.return_value` and :attr:`~Mock.side_effect`, of child mocks can
-also be configured. These aren't syntactically valid to pass in directly as
-keyword arguments, but a dictionary with these as keys can still be expanded
-into a `patch` call using `**`:
-
-.. doctest::
-
-    >>> config = {'method.return_value': 3, 'other.side_effect': KeyError}
-    >>> patcher = patch('__main__.thing', **config)
-    >>> mock_thing = patcher.start()
-    >>> mock_thing.method()
-    3
-    >>> mock_thing.other()
-    Traceback (most recent call last):
-      ...
-    KeyError
-
-
-patch.object
-============
-
-.. function:: patch.object(target, attribute, new=DEFAULT, spec=None, create=False, spec_set=None, autospec=None, new_callable=None, **kwargs)
-
-    patch the named member (`attribute`) on an object (`target`) with a mock
-    object.
-
-    `patch.object` can be used as a decorator, class decorator or a context
-    manager. Arguments `new`, `spec`, `create`, `spec_set`, `autospec` and
-    `new_callable` have the same meaning as for `patch`. Like `patch`,
-    `patch.object` takes arbitrary keyword arguments for configuring the mock
-    object it creates.
-
-    When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
-    for choosing which methods to wrap.
-
-You can either call `patch.object` with three arguments or two arguments. The
-three argument form takes the object to be patched, the attribute name and the
-object to replace the attribute with.
-
-When calling with the two argument form you omit the replacement object, and a
-mock is created for you and passed in as an extra argument to the decorated
-function:
-
-.. doctest::
-
-    >>> @patch.object(SomeClass, 'class_method')
-    ... def test(mock_method):
-    ...     SomeClass.class_method(3)
-    ...     mock_method.assert_called_with(3)
-    ...
-    >>> test()
-
-`spec`, `create` and the other arguments to `patch.object` have the same
-meaning as they do for `patch`.
-
-
-patch.dict
-==========
-
-.. function:: patch.dict(in_dict, values=(), clear=False, **kwargs)
-
-    Patch a dictionary, or dictionary like object, and restore the dictionary
-    to its original state after the test.
-
-    `in_dict` can be a dictionary or a mapping like container. If it is a
-    mapping then it must at least support getting, setting and deleting items
-    plus iterating over keys.
-
-    `in_dict` can also be a string specifying the name of the dictionary, which
-    will then be fetched by importing it.
-
-    `values` can be a dictionary of values to set in the dictionary. `values`
-    can also be an iterable of `(key, value)` pairs.
-
-    If `clear` is True then the dictionary will be cleared before the new
-    values are set.
-
-    `patch.dict` can also be called with arbitrary keyword arguments to set
-    values in the dictionary.
-
-    `patch.dict` can be used as a context manager, decorator or class
-    decorator. When used as a class decorator `patch.dict` honours
-    `patch.TEST_PREFIX` for choosing which methods to wrap.
-
-`patch.dict` can be used to add members to a dictionary, or simply let a test
-change a dictionary, and ensure the dictionary is restored when the test
-ends.
-
-.. doctest::
-
-    >>> from mock import patch
-    >>> foo = {}
-    >>> with patch.dict(foo, {'newkey': 'newvalue'}):
-    ...     assert foo == {'newkey': 'newvalue'}
-    ...
-    >>> assert foo == {}
-
-    >>> import os
-    >>> with patch.dict('os.environ', {'newkey': 'newvalue'}):
-    ...     print os.environ['newkey']
-    ...
-    newvalue
-    >>> assert 'newkey' not in os.environ
-
-Keywords can be used in the `patch.dict` call to set values in the dictionary:
-
-.. doctest::
-
-    >>> mymodule = MagicMock()
-    >>> mymodule.function.return_value = 'fish'
-    >>> with patch.dict('sys.modules', mymodule=mymodule):
-    ...     import mymodule
-    ...     mymodule.function('some', 'args')
-    ...
-    'fish'
-
-`patch.dict` can be used with dictionary like objects that aren't actually
-dictionaries. At the very minimum they must support item getting, setting,
-deleting and either iteration or membership test. This corresponds to the
-magic methods `__getitem__`, `__setitem__`, `__delitem__` and either
-`__iter__` or `__contains__`.
-
-.. doctest::
-
-    >>> class Container(object):
-    ...     def __init__(self):
-    ...         self.values = {}
-    ...     def __getitem__(self, name):
-    ...         return self.values[name]
-    ...     def __setitem__(self, name, value):
-    ...         self.values[name] = value
-    ...     def __delitem__(self, name):
-    ...         del self.values[name]
-    ...     def __iter__(self):
-    ...         return iter(self.values)
-    ...
-    >>> thing = Container()
-    >>> thing['one'] = 1
-    >>> with patch.dict(thing, one=2, two=3):
-    ...     assert thing['one'] == 2
-    ...     assert thing['two'] == 3
-    ...
-    >>> assert thing['one'] == 1
-    >>> assert list(thing) == ['one']
-
-
-patch.multiple
-==============
-
-.. function:: patch.multiple(target, spec=None, create=False, spec_set=None, autospec=None, new_callable=None, **kwargs)
-
-    Perform multiple patches in a single call. It takes the object to be
-    patched (either as an object or a string to fetch the object by importing)
-    and keyword arguments for the patches::
-
-        with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
-            ...
-
-    Use :data:`DEFAULT` as the value if you want `patch.multiple` to create
-    mocks for you. In this case the created mocks are passed into a decorated
-    function by keyword, and a dictionary is returned when `patch.multiple` is
-    used as a context manager.
-
-    `patch.multiple` can be used as a decorator, class decorator or a context
-    manager. The arguments `spec`, `spec_set`, `create`, `autospec` and
-    `new_callable` have the same meaning as for `patch`. These arguments will
-    be applied to *all* patches done by `patch.multiple`.
-
-    When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
-    for choosing which methods to wrap.
-
-If you want `patch.multiple` to create mocks for you, then you can use
-:data:`DEFAULT` as the value. If you use `patch.multiple` as a decorator
-then the created mocks are passed into the decorated function by keyword.
-
-.. doctest::
-
-    >>> thing = object()
-    >>> other = object()
-
-    >>> @patch.multiple('__main__', thing=DEFAULT, other=DEFAULT)
-    ... def test_function(thing, other):
-    ...     assert isinstance(thing, MagicMock)
-    ...     assert isinstance(other, MagicMock)
-    ...
-    >>> test_function()
-
-`patch.multiple` can be nested with other `patch` decorators, but put arguments
-passed by keyword *after* any of the standard arguments created by `patch`:
-
-.. doctest::
-
-    >>> @patch('sys.exit')
-    ... @patch.multiple('__main__', thing=DEFAULT, other=DEFAULT)
-    ... def test_function(mock_exit, other, thing):
-    ...     assert 'other' in repr(other)
-    ...     assert 'thing' in repr(thing)
-    ...     assert 'exit' in repr(mock_exit)
-    ...
-    >>> test_function()
-
-If `patch.multiple` is used as a context manager, the value returned by the
-context manger is a dictionary where created mocks are keyed by name:
-
-.. doctest::
-
-    >>> with patch.multiple('__main__', thing=DEFAULT, other=DEFAULT) as values:
-    ...     assert 'other' in repr(values['other'])
-    ...     assert 'thing' in repr(values['thing'])
-    ...     assert values['thing'] is thing
-    ...     assert values['other'] is other
-    ...
-
-
-.. _start-and-stop:
-
-patch methods: start and stop
-=============================
-
-All the patchers have `start` and `stop` methods. These make it simpler to do
-patching in `setUp` methods or where you want to do multiple patches without
-nesting decorators or with statements.
-
-To use them call `patch`, `patch.object` or `patch.dict` as normal and keep a
-reference to the returned `patcher` object. You can then call `start` to put
-the patch in place and `stop` to undo it.
-
-If you are using `patch` to create a mock for you then it will be returned by
-the call to `patcher.start`.
-
-.. doctest::
-
-    >>> patcher = patch('package.module.ClassName')
-    >>> from package import module
-    >>> original = module.ClassName
-    >>> new_mock = patcher.start()
-    >>> assert module.ClassName is not original
-    >>> assert module.ClassName is new_mock
-    >>> patcher.stop()
-    >>> assert module.ClassName is original
-    >>> assert module.ClassName is not new_mock
-
-
-A typical use case for this might be for doing multiple patches in the `setUp`
-method of a `TestCase`:
-
-.. doctest::
-
-    >>> class MyTest(TestCase):
-    ...     def setUp(self):
-    ...         self.patcher1 = patch('package.module.Class1')
-    ...         self.patcher2 = patch('package.module.Class2')
-    ...         self.MockClass1 = self.patcher1.start()
-    ...         self.MockClass2 = self.patcher2.start()
-    ...
-    ...     def tearDown(self):
-    ...         self.patcher1.stop()
-    ...         self.patcher2.stop()
-    ...
-    ...     def test_something(self):
-    ...         assert package.module.Class1 is self.MockClass1
-    ...         assert package.module.Class2 is self.MockClass2
-    ...
-    >>> MyTest('test_something').run()
-
-.. caution::
-
-    If you use this technique you must ensure that the patching is "undone" by
-    calling `stop`. This can be fiddlier than you might think, because if an
-    exception is raised in the setUp then tearDown is not called. `unittest2
-    <http://pypi.python.org/pypi/unittest2>`_ cleanup functions make this
-    easier.
-
-    .. doctest::
-
-        >>> class MyTest(TestCase):
-        ...     def setUp(self):
-        ...         patcher = patch('package.module.Class')
-        ...         self.MockClass = patcher.start()
-        ...         self.addCleanup(patcher.stop)
-        ...
-        ...     def test_something(self):
-        ...         assert package.module.Class is self.MockClass
-        ...
-        >>> MyTest('test_something').run()
-
-    As an added bonus you no longer need to keep a reference to the `patcher`
-    object.
-
-It is also possible to stop all patches which have been started by using
-`patch.stopall`.
-
-.. function:: patch.stopall
-
-    Stop all active patches. Only stops patches started with `start`.
-
-
-TEST_PREFIX
-===========
-
-All of the patchers can be used as class decorators. When used in this way
-they wrap every test method on the class. The patchers recognise methods that
-start with `test` as being test methods. This is the same way that the
-`unittest.TestLoader` finds test methods by default.
-
-It is possible that you want to use a different prefix for your tests. You can
-inform the patchers of the different prefix by setting `patch.TEST_PREFIX`:
-
-.. doctest::
-
-    >>> patch.TEST_PREFIX = 'foo'
-    >>> value = 3
-    >>>
-    >>> @patch('__main__.value', 'not three')
-    ... class Thing(object):
-    ...     def foo_one(self):
-    ...         print value
-    ...     def foo_two(self):
-    ...         print value
-    ...
-    >>>
-    >>> Thing().foo_one()
-    not three
-    >>> Thing().foo_two()
-    not three
-    >>> value
-    3
-
-
-Nesting Patch Decorators
-========================
-
-If you want to perform multiple patches then you can simply stack up the
-decorators.
-
-You can stack up multiple patch decorators using this pattern:
-
-.. doctest::
-
-    >>> @patch.object(SomeClass, 'class_method')
-    ... @patch.object(SomeClass, 'static_method')
-    ... def test(mock1, mock2):
-    ...     assert SomeClass.static_method is mock1
-    ...     assert SomeClass.class_method is mock2
-    ...     SomeClass.static_method('foo')
-    ...     SomeClass.class_method('bar')
-    ...     return mock1, mock2
-    ...
-    >>> mock1, mock2 = test()
-    >>> mock1.assert_called_once_with('foo')
-    >>> mock2.assert_called_once_with('bar')
-
-
-Note that the decorators are applied from the bottom upwards. This is the
-standard way that Python applies decorators. The order of the created mocks
-passed into your test function matches this order.
-
-Like all context-managers patches can be nested using contextlib's nested
-function; *every* patching will appear in the tuple after "as":
-
-.. doctest::
-
-    >>> from contextlib import nested
-    >>> with nested(
-    ...         patch('package.module.ClassName1'),
-    ...         patch('package.module.ClassName2')
-    ...     ) as (MockClass1, MockClass2):
-    ...     assert package.module.ClassName1 is MockClass1
-    ...     assert package.module.ClassName2 is MockClass2
-    ...
-
-
-.. _where-to-patch:
-
-Where to patch
-==============
-
-`patch` works by (temporarily) changing the object that a *name* points to with
-another one. There can be many names pointing to any individual object, so
-for patching to work you must ensure that you patch the name used by the system
-under test.
-
-The basic principle is that you patch where an object is *looked up*, which
-is not necessarily the same place as where it is defined. A couple of
-examples will help to clarify this.
-
-Imagine we have a project that we want to test with the following structure::
-
-    a.py
-        -> Defines SomeClass
-
-    b.py
-        -> from a import SomeClass
-        -> some_function instantiates SomeClass
-
-Now we want to test `some_function` but we want to mock out `SomeClass` using
-`patch`. The problem is that when we import module b, which we will have to
-do then it imports `SomeClass` from module a. If we use `patch` to mock out
-`a.SomeClass` then it will have no effect on our test; module b already has a
-reference to the *real* `SomeClass` and it looks like our patching had no
-effect.
-
-The key is to patch out `SomeClass` where it is used (or where it is looked up
-). In this case `some_function` will actually look up `SomeClass` in module b,
-where we have imported it. The patching should look like:
-
-    `@patch('b.SomeClass')`
-
-However, consider the alternative scenario where instead of `from a import
-SomeClass` module b does `import a` and `some_function` uses `a.SomeClass`. Both
-of these import forms are common. In this case the class we want to patch is
-being looked up on the a module and so we have to patch `a.SomeClass` instead:
-
-    `@patch('a.SomeClass')`
-
-
-Patching Descriptors and Proxy Objects
-======================================
-
-Since version 0.6.0 both patch_ and patch.object_ have been able to correctly
-patch and restore descriptors: class methods, static methods and properties.
-You should patch these on the *class* rather than an instance.
-
-Since version 0.7.0 patch_ and patch.object_ work correctly with some objects
-that proxy attribute access, like the `django setttings object
-<http://www.voidspace.org.uk/python/weblog/arch_d7_2010_12_04.shtml#e1198>`_.
-
-.. note::
-
-    In django `import settings` and `from django.conf import settings`
-    return different objects. If you are using libraries / apps that do both you
-    may have to patch both. Grrr...
diff --git a/branch-1.2/ambari-common/src/test/python/mock/docs/sentinel.txt b/branch-1.2/ambari-common/src/test/python/mock/docs/sentinel.txt
deleted file mode 100644
index 1c5223d..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/docs/sentinel.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-==========
- Sentinel
-==========
-
-
-.. currentmodule:: mock
-
-.. testsetup::
-
-    class ProductionClass(object):
-        def something(self):
-            return self.method()
-
-    class Test(unittest2.TestCase):
-        def testSomething(self):
-            pass
-    self = Test('testSomething')
-
-
-.. data:: sentinel
-
-    The ``sentinel`` object provides a convenient way of providing unique
-    objects for your tests.
-
-    Attributes are created on demand when you access them by name. Accessing
-    the same attribute will always return the same object. The objects
-    returned have a sensible repr so that test failure messages are readable.
-
-
-.. data:: DEFAULT
-
-    The `DEFAULT` object is a pre-created sentinel (actually
-    `sentinel.DEFAULT`). It can be used by :attr:`~Mock.side_effect`
-    functions to indicate that the normal return value should be used.
-
-
-Sentinel Example
-================
-
-Sometimes when testing you need to test that a specific object is passed as an
-argument to another method, or returned. It can be common to create named
-sentinel objects to test this. `sentinel` provides a convenient way of
-creating and testing the identity of objects like this.
-
-In this example we monkey patch `method` to return
-`sentinel.some_object`:
-
-.. doctest::
-
-    >>> real = ProductionClass()
-    >>> real.method = Mock(name="method")
-    >>> real.method.return_value = sentinel.some_object
-    >>> result = real.method()
-    >>> assert result is sentinel.some_object
-    >>> sentinel.some_object
-    sentinel.some_object
-
-
diff --git a/branch-1.2/ambari-common/src/test/python/mock/extendmock.py b/branch-1.2/ambari-common/src/test/python/mock/extendmock.py
deleted file mode 100644
index 0550d9f..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/extendmock.py
+++ /dev/null
@@ -1 +0,0 @@
-# merged into mock.py in Mock 0.7
diff --git a/branch-1.2/ambari-common/src/test/python/mock/mock.py b/branch-1.2/ambari-common/src/test/python/mock/mock.py
deleted file mode 100644
index 2a3f869..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/mock.py
+++ /dev/null
@@ -1,2365 +0,0 @@
-# mock.py
-# Test tools for mocking and patching.
-# Copyright (C) 2007-2012 Michael Foord & the mock team
-# E-mail: fuzzyman AT voidspace DOT org DOT uk
-
-# mock 1.0.1
-# http://www.voidspace.org.uk/python/mock/
-
-# Released subject to the BSD License
-# Please see http://www.voidspace.org.uk/python/license.shtml
-
-__all__ = (
-    'Mock',
-    'MagicMock',
-    'patch',
-    'sentinel',
-    'DEFAULT',
-    'ANY',
-    'call',
-    'create_autospec',
-    'FILTER_DIR',
-    'NonCallableMock',
-    'NonCallableMagicMock',
-    'mock_open',
-    'PropertyMock',
-)
-
-
-__version__ = '1.0.1'
-
-
-import pprint
-import sys
-
-try:
-    import inspect
-except ImportError:
-    # for alternative platforms that
-    # may not have inspect
-    inspect = None
-
-try:
-    from functools import wraps as original_wraps
-except ImportError:
-    # Python 2.4 compatibility
-    def wraps(original):
-        def inner(f):
-            f.__name__ = original.__name__
-            f.__doc__ = original.__doc__
-            f.__module__ = original.__module__
-            wrapped = getattr(original, '__wrapped__', original)
-            f.__wrapped__ = wrapped
-            return f
-        return inner
-else:
-    if sys.version_info[:2] >= (3, 2):
-        wraps = original_wraps
-    else:
-        def wraps(func):
-            def inner(f):
-                f = original_wraps(func)(f)
-                wrapped = getattr(func, '__wrapped__', func)
-                f.__wrapped__ = wrapped
-                return f
-            return inner
-
-try:
-    unicode
-except NameError:
-    # Python 3
-    basestring = unicode = str
-
-try:
-    long
-except NameError:
-    # Python 3
-    long = int
-
-try:
-    BaseException
-except NameError:
-    # Python 2.4 compatibility
-    BaseException = Exception
-
-try:
-    next
-except NameError:
-    def next(obj):
-        return obj.next()
-
-
-BaseExceptions = (BaseException,)
-if 'java' in sys.platform:
-    # jython
-    import java
-    BaseExceptions = (BaseException, java.lang.Throwable)
-
-try:
-    _isidentifier = str.isidentifier
-except AttributeError:
-    # Python 2.X
-    import keyword
-    import re
-    regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
-    def _isidentifier(string):
-        if string in keyword.kwlist:
-            return False
-        return regex.match(string)
-
-
-inPy3k = sys.version_info[0] == 3
-
-# Needed to work around Python 3 bug where use of "super" interferes with
-# defining __class__ as a descriptor
-_super = super
-
-self = 'im_self'
-builtin = '__builtin__'
-if inPy3k:
-    self = '__self__'
-    builtin = 'builtins'
-
-FILTER_DIR = True
-
-
-def _is_instance_mock(obj):
-    # can't use isinstance on Mock objects because they override __class__
-    # The base class for all mocks is NonCallableMock
-    return issubclass(type(obj), NonCallableMock)
-
-
-def _is_exception(obj):
-    return (
-        isinstance(obj, BaseExceptions) or
-        isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions)
-    )
-
-
-class _slotted(object):
-    __slots__ = ['a']
-
-
-DescriptorTypes = (
-    type(_slotted.a),
-    property,
-)
-
-
-def _getsignature(func, skipfirst, instance=False):
-    if inspect is None:
-        raise ImportError('inspect module not available')
-
-    if isinstance(func, ClassTypes) and not instance:
-        try:
-            func = func.__init__
-        except AttributeError:
-            return
-        skipfirst = True
-    elif not isinstance(func, FunctionTypes):
-        # for classes where instance is True we end up here too
-        try:
-            func = func.__call__
-        except AttributeError:
-            return
-
-    if inPy3k:
-        try:
-            argspec = inspect.getfullargspec(func)
-        except TypeError:
-            # C function / method, possibly inherited object().__init__
-            return
-        regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann = argspec
-    else:
-        try:
-            regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
-        except TypeError:
-            # C function / method, possibly inherited object().__init__
-            return
-
-    # instance methods and classmethods need to lose the self argument
-    if getattr(func, self, None) is not None:
-        regargs = regargs[1:]
-    if skipfirst:
-        # this condition and the above one are never both True - why?
-        regargs = regargs[1:]
-
-    if inPy3k:
-        signature = inspect.formatargspec(
-            regargs, varargs, varkw, defaults,
-            kwonly, kwonlydef, ann, formatvalue=lambda value: "")
-    else:
-        signature = inspect.formatargspec(
-            regargs, varargs, varkwargs, defaults,
-            formatvalue=lambda value: "")
-    return signature[1:-1], func
-
-
-def _check_signature(func, mock, skipfirst, instance=False):
-    if not _callable(func):
-        return
-
-    result = _getsignature(func, skipfirst, instance)
-    if result is None:
-        return
-    signature, func = result
-
-    # can't use self because "self" is common as an argument name
-    # unfortunately even not in the first place
-    src = "lambda _mock_self, %s: None" % signature
-    checksig = eval(src, {})
-    _copy_func_details(func, checksig)
-    type(mock)._mock_check_sig = checksig
-
-
-def _copy_func_details(func, funcopy):
-    funcopy.__name__ = func.__name__
-    funcopy.__doc__ = func.__doc__
-    #funcopy.__dict__.update(func.__dict__)
-    funcopy.__module__ = func.__module__
-    if not inPy3k:
-        funcopy.func_defaults = func.func_defaults
-        return
-    funcopy.__defaults__ = func.__defaults__
-    funcopy.__kwdefaults__ = func.__kwdefaults__
-
-
-def _callable(obj):
-    if isinstance(obj, ClassTypes):
-        return True
-    if getattr(obj, '__call__', None) is not None:
-        return True
-    return False
-
-
-def _is_list(obj):
-    # checks for list or tuples
-    # XXXX badly named!
-    return type(obj) in (list, tuple)
-
-
-def _instance_callable(obj):
-    """Given an object, return True if the object is callable.
-    For classes, return True if instances would be callable."""
-    if not isinstance(obj, ClassTypes):
-        # already an instance
-        return getattr(obj, '__call__', None) is not None
-
-    klass = obj
-    # uses __bases__ instead of __mro__ so that we work with old style classes
-    if klass.__dict__.get('__call__') is not None:
-        return True
-
-    for base in klass.__bases__:
-        if _instance_callable(base):
-            return True
-    return False
-
-
-def _set_signature(mock, original, instance=False):
-    # creates a function with signature (*args, **kwargs) that delegates to a
-    # mock. It still does signature checking by calling a lambda with the same
-    # signature as the original.
-    if not _callable(original):
-        return
-
-    skipfirst = isinstance(original, ClassTypes)
-    result = _getsignature(original, skipfirst, instance)
-    if result is None:
-        # was a C function (e.g. object().__init__ ) that can't be mocked
-        return
-
-    signature, func = result
-
-    src = "lambda %s: None" % signature
-    checksig = eval(src, {})
-    _copy_func_details(func, checksig)
-
-    name = original.__name__
-    if not _isidentifier(name):
-        name = 'funcopy'
-    context = {'_checksig_': checksig, 'mock': mock}
-    src = """def %s(*args, **kwargs):
-    _checksig_(*args, **kwargs)
-    return mock(*args, **kwargs)""" % name
-    exec (src, context)
-    funcopy = context[name]
-    _setup_func(funcopy, mock)
-    return funcopy
-
-
-def _setup_func(funcopy, mock):
-    funcopy.mock = mock
-
-    # can't use isinstance with mocks
-    if not _is_instance_mock(mock):
-        return
-
-    def assert_called_with(*args, **kwargs):
-        return mock.assert_called_with(*args, **kwargs)
-    def assert_called_once_with(*args, **kwargs):
-        return mock.assert_called_once_with(*args, **kwargs)
-    def assert_has_calls(*args, **kwargs):
-        return mock.assert_has_calls(*args, **kwargs)
-    def assert_any_call(*args, **kwargs):
-        return mock.assert_any_call(*args, **kwargs)
-    def reset_mock():
-        funcopy.method_calls = _CallList()
-        funcopy.mock_calls = _CallList()
-        mock.reset_mock()
-        ret = funcopy.return_value
-        if _is_instance_mock(ret) and not ret is mock:
-            ret.reset_mock()
-
-    funcopy.called = False
-    funcopy.call_count = 0
-    funcopy.call_args = None
-    funcopy.call_args_list = _CallList()
-    funcopy.method_calls = _CallList()
-    funcopy.mock_calls = _CallList()
-
-    funcopy.return_value = mock.return_value
-    funcopy.side_effect = mock.side_effect
-    funcopy._mock_children = mock._mock_children
-
-    funcopy.assert_called_with = assert_called_with
-    funcopy.assert_called_once_with = assert_called_once_with
-    funcopy.assert_has_calls = assert_has_calls
-    funcopy.assert_any_call = assert_any_call
-    funcopy.reset_mock = reset_mock
-
-    mock._mock_delegate = funcopy
-
-
-def _is_magic(name):
-    return '__%s__' % name[2:-2] == name
-
-
-class _SentinelObject(object):
-    "A unique, named, sentinel object."
-    def __init__(self, name):
-        self.name = name
-
-    def __repr__(self):
-        return 'sentinel.%s' % self.name
-
-
-class _Sentinel(object):
-    """Access attributes to return a named object, usable as a sentinel."""
-    def __init__(self):
-        self._sentinels = {}
-
-    def __getattr__(self, name):
-        if name == '__bases__':
-            # Without this help(mock) raises an exception
-            raise AttributeError
-        return self._sentinels.setdefault(name, _SentinelObject(name))
-
-
-sentinel = _Sentinel()
-
-DEFAULT = sentinel.DEFAULT
-_missing = sentinel.MISSING
-_deleted = sentinel.DELETED
-
-
-class OldStyleClass:
-    pass
-ClassType = type(OldStyleClass)
-
-
-def _copy(value):
-    if type(value) in (dict, list, tuple, set):
-        return type(value)(value)
-    return value
-
-
-ClassTypes = (type,)
-if not inPy3k:
-    ClassTypes = (type, ClassType)
-
-_allowed_names = set(
-    [
-        'return_value', '_mock_return_value', 'side_effect',
-        '_mock_side_effect', '_mock_parent', '_mock_new_parent',
-        '_mock_name', '_mock_new_name'
-    ]
-)
-
-
-def _delegating_property(name):
-    _allowed_names.add(name)
-    _the_name = '_mock_' + name
-    def _get(self, name=name, _the_name=_the_name):
-        sig = self._mock_delegate
-        if sig is None:
-            return getattr(self, _the_name)
-        return getattr(sig, name)
-    def _set(self, value, name=name, _the_name=_the_name):
-        sig = self._mock_delegate
-        if sig is None:
-            self.__dict__[_the_name] = value
-        else:
-            setattr(sig, name, value)
-
-    return property(_get, _set)
-
-
-
-class _CallList(list):
-
-    def __contains__(self, value):
-        if not isinstance(value, list):
-            return list.__contains__(self, value)
-        len_value = len(value)
-        len_self = len(self)
-        if len_value > len_self:
-            return False
-
-        for i in range(0, len_self - len_value + 1):
-            sub_list = self[i:i+len_value]
-            if sub_list == value:
-                return True
-        return False
-
-    def __repr__(self):
-        return pprint.pformat(list(self))
-
-
-def _check_and_set_parent(parent, value, name, new_name):
-    if not _is_instance_mock(value):
-        return False
-    if ((value._mock_name or value._mock_new_name) or
-        (value._mock_parent is not None) or
-        (value._mock_new_parent is not None)):
-        return False
-
-    _parent = parent
-    while _parent is not None:
-        # setting a mock (value) as a child or return value of itself
-        # should not modify the mock
-        if _parent is value:
-            return False
-        _parent = _parent._mock_new_parent
-
-    if new_name:
-        value._mock_new_parent = parent
-        value._mock_new_name = new_name
-    if name:
-        value._mock_parent = parent
-        value._mock_name = name
-    return True
-
-
-
-class Base(object):
-    _mock_return_value = DEFAULT
-    _mock_side_effect = None
-    def __init__(self, *args, **kwargs):
-        pass
-
-
-
-class NonCallableMock(Base):
-    """A non-callable version of `Mock`"""
-
-    def __new__(cls, *args, **kw):
-        # every instance has its own class
-        # so we can create magic methods on the
-        # class without stomping on other mocks
-        new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
-        instance = object.__new__(new)
-        return instance
-
-
-    def __init__(
-            self, spec=None, wraps=None, name=None, spec_set=None,
-            parent=None, _spec_state=None, _new_name='', _new_parent=None,
-            **kwargs
-        ):
-        if _new_parent is None:
-            _new_parent = parent
-
-        __dict__ = self.__dict__
-        __dict__['_mock_parent'] = parent
-        __dict__['_mock_name'] = name
-        __dict__['_mock_new_name'] = _new_name
-        __dict__['_mock_new_parent'] = _new_parent
-
-        if spec_set is not None:
-            spec = spec_set
-            spec_set = True
-
-        self._mock_add_spec(spec, spec_set)
-
-        __dict__['_mock_children'] = {}
-        __dict__['_mock_wraps'] = wraps
-        __dict__['_mock_delegate'] = None
-
-        __dict__['_mock_called'] = False
-        __dict__['_mock_call_args'] = None
-        __dict__['_mock_call_count'] = 0
-        __dict__['_mock_call_args_list'] = _CallList()
-        __dict__['_mock_mock_calls'] = _CallList()
-
-        __dict__['method_calls'] = _CallList()
-
-        if kwargs:
-            self.configure_mock(**kwargs)
-
-        _super(NonCallableMock, self).__init__(
-            spec, wraps, name, spec_set, parent,
-            _spec_state
-        )
-
-
-    def attach_mock(self, mock, attribute):
-        """
-        Attach a mock as an attribute of this one, replacing its name and
-        parent. Calls to the attached mock will be recorded in the
-        `method_calls` and `mock_calls` attributes of this one."""
-        mock._mock_parent = None
-        mock._mock_new_parent = None
-        mock._mock_name = ''
-        mock._mock_new_name = None
-
-        setattr(self, attribute, mock)
-
-
-    def mock_add_spec(self, spec, spec_set=False):
-        """Add a spec to a mock. `spec` can either be an object or a
-        list of strings. Only attributes on the `spec` can be fetched as
-        attributes from the mock.
-
-        If `spec_set` is True then only attributes on the spec can be set."""
-        self._mock_add_spec(spec, spec_set)
-
-
-    def _mock_add_spec(self, spec, spec_set):
-        _spec_class = None
-
-        if spec is not None and not _is_list(spec):
-            if isinstance(spec, ClassTypes):
-                _spec_class = spec
-            else:
-                _spec_class = _get_class(spec)
-
-            spec = dir(spec)
-
-        __dict__ = self.__dict__
-        __dict__['_spec_class'] = _spec_class
-        __dict__['_spec_set'] = spec_set
-        __dict__['_mock_methods'] = spec
-
-
-    def __get_return_value(self):
-        ret = self._mock_return_value
-        if self._mock_delegate is not None:
-            ret = self._mock_delegate.return_value
-
-        if ret is DEFAULT:
-            ret = self._get_child_mock(
-                _new_parent=self, _new_name='()'
-            )
-            self.return_value = ret
-        return ret
-
-
-    def __set_return_value(self, value):
-        if self._mock_delegate is not None:
-            self._mock_delegate.return_value = value
-        else:
-            self._mock_return_value = value
-            _check_and_set_parent(self, value, None, '()')
-
-    __return_value_doc = "The value to be returned when the mock is called."
-    return_value = property(__get_return_value, __set_return_value,
-                            __return_value_doc)
-
-
-    @property
-    def __class__(self):
-        if self._spec_class is None:
-            return type(self)
-        return self._spec_class
-
-    called = _delegating_property('called')
-    call_count = _delegating_property('call_count')
-    call_args = _delegating_property('call_args')
-    call_args_list = _delegating_property('call_args_list')
-    mock_calls = _delegating_property('mock_calls')
-
-
-    def __get_side_effect(self):
-        sig = self._mock_delegate
-        if sig is None:
-            return self._mock_side_effect
-        return sig.side_effect
-
-    def __set_side_effect(self, value):
-        value = _try_iter(value)
-        sig = self._mock_delegate
-        if sig is None:
-            self._mock_side_effect = value
-        else:
-            sig.side_effect = value
-
-    side_effect = property(__get_side_effect, __set_side_effect)
-
-
-    def reset_mock(self):
-        "Restore the mock object to its initial state."
-        self.called = False
-        self.call_args = None
-        self.call_count = 0
-        self.mock_calls = _CallList()
-        self.call_args_list = _CallList()
-        self.method_calls = _CallList()
-
-        for child in self._mock_children.values():
-            if isinstance(child, _SpecState):
-                continue
-            child.reset_mock()
-
-        ret = self._mock_return_value
-        if _is_instance_mock(ret) and ret is not self:
-            ret.reset_mock()
-
-
-    def configure_mock(self, **kwargs):
-        """Set attributes on the mock through keyword arguments.
-
-        Attributes plus return values and side effects can be set on child
-        mocks using standard dot notation and unpacking a dictionary in the
-        method call:
-
-        >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
-        >>> mock.configure_mock(**attrs)"""
-        for arg, val in sorted(kwargs.items(),
-                               # we sort on the number of dots so that
-                               # attributes are set before we set attributes on
-                               # attributes
-                               key=lambda entry: entry[0].count('.')):
-            args = arg.split('.')
-            final = args.pop()
-            obj = self
-            for entry in args:
-                obj = getattr(obj, entry)
-            setattr(obj, final, val)
-
-
-    def __getattr__(self, name):
-        if name == '_mock_methods':
-            raise AttributeError(name)
-        elif self._mock_methods is not None:
-            if name not in self._mock_methods or name in _all_magics:
-                raise AttributeError("Mock object has no attribute %r" % name)
-        elif _is_magic(name):
-            raise AttributeError(name)
-
-        result = self._mock_children.get(name)
-        if result is _deleted:
-            raise AttributeError(name)
-        elif result is None:
-            wraps = None
-            if self._mock_wraps is not None:
-                # XXXX should we get the attribute without triggering code
-                # execution?
-                wraps = getattr(self._mock_wraps, name)
-
-            result = self._get_child_mock(
-                parent=self, name=name, wraps=wraps, _new_name=name,
-                _new_parent=self
-            )
-            self._mock_children[name]  = result
-
-        elif isinstance(result, _SpecState):
-            result = create_autospec(
-                result.spec, result.spec_set, result.instance,
-                result.parent, result.name
-            )
-            self._mock_children[name]  = result
-
-        return result
-
-
-    def __repr__(self):
-        _name_list = [self._mock_new_name]
-        _parent = self._mock_new_parent
-        last = self
-
-        dot = '.'
-        if _name_list == ['()']:
-            dot = ''
-        seen = set()
-        while _parent is not None:
-            last = _parent
-
-            _name_list.append(_parent._mock_new_name + dot)
-            dot = '.'
-            if _parent._mock_new_name == '()':
-                dot = ''
-
-            _parent = _parent._mock_new_parent
-
-            # use ids here so as not to call __hash__ on the mocks
-            if id(_parent) in seen:
-                break
-            seen.add(id(_parent))
-
-        _name_list = list(reversed(_name_list))
-        _first = last._mock_name or 'mock'
-        if len(_name_list) > 1:
-            if _name_list[1] not in ('()', '().'):
-                _first += '.'
-        _name_list[0] = _first
-        name = ''.join(_name_list)
-
-        name_string = ''
-        if name not in ('mock', 'mock.'):
-            name_string = ' name=%r' % name
-
-        spec_string = ''
-        if self._spec_class is not None:
-            spec_string = ' spec=%r'
-            if self._spec_set:
-                spec_string = ' spec_set=%r'
-            spec_string = spec_string % self._spec_class.__name__
-        return "<%s%s%s id='%s'>" % (
-            type(self).__name__,
-            name_string,
-            spec_string,
-            id(self)
-        )
-
-
-    def __dir__(self):
-        """Filter the output of `dir(mock)` to only useful members.
-        XXXX
-        """
-        extras = self._mock_methods or []
-        from_type = dir(type(self))
-        from_dict = list(self.__dict__)
-
-        if FILTER_DIR:
-            from_type = [e for e in from_type if not e.startswith('_')]
-            from_dict = [e for e in from_dict if not e.startswith('_') or
-                         _is_magic(e)]
-        return sorted(set(extras + from_type + from_dict +
-                          list(self._mock_children)))
-
-
-    def __setattr__(self, name, value):
-        if name in _allowed_names:
-            # property setters go through here
-            return object.__setattr__(self, name, value)
-        elif (self._spec_set and self._mock_methods is not None and
-            name not in self._mock_methods and
-            name not in self.__dict__):
-            raise AttributeError("Mock object has no attribute '%s'" % name)
-        elif name in _unsupported_magics:
-            msg = 'Attempting to set unsupported magic method %r.' % name
-            raise AttributeError(msg)
-        elif name in _all_magics:
-            if self._mock_methods is not None and name not in self._mock_methods:
-                raise AttributeError("Mock object has no attribute '%s'" % name)
-
-            if not _is_instance_mock(value):
-                setattr(type(self), name, _get_method(name, value))
-                original = value
-                value = lambda *args, **kw: original(self, *args, **kw)
-            else:
-                # only set _new_name and not name so that mock_calls is tracked
-                # but not method calls
-                _check_and_set_parent(self, value, None, name)
-                setattr(type(self), name, value)
-                self._mock_children[name] = value
-        elif name == '__class__':
-            self._spec_class = value
-            return
-        else:
-            if _check_and_set_parent(self, value, name, name):
-                self._mock_children[name] = value
-        return object.__setattr__(self, name, value)
-
-
-    def __delattr__(self, name):
-        if name in _all_magics and name in type(self).__dict__:
-            delattr(type(self), name)
-            if name not in self.__dict__:
-                # for magic methods that are still MagicProxy objects and
-                # not set on the instance itself
-                return
-
-        if name in self.__dict__:
-            object.__delattr__(self, name)
-
-        obj = self._mock_children.get(name, _missing)
-        if obj is _deleted:
-            raise AttributeError(name)
-        if obj is not _missing:
-            del self._mock_children[name]
-        self._mock_children[name] = _deleted
-
-
-
-    def _format_mock_call_signature(self, args, kwargs):
-        name = self._mock_name or 'mock'
-        return _format_call_signature(name, args, kwargs)
-
-
-    def _format_mock_failure_message(self, args, kwargs):
-        message = 'Expected call: %s\nActual call: %s'
-        expected_string = self._format_mock_call_signature(args, kwargs)
-        call_args = self.call_args
-        if len(call_args) == 3:
-            call_args = call_args[1:]
-        actual_string = self._format_mock_call_signature(*call_args)
-        return message % (expected_string, actual_string)
-
-
-    def assert_called_with(_mock_self, *args, **kwargs):
-        """assert that the mock was called with the specified arguments.
-
-        Raises an AssertionError if the args and keyword args passed in are
-        different to the last call to the mock."""
-        self = _mock_self
-        if self.call_args is None:
-            expected = self._format_mock_call_signature(args, kwargs)
-            raise AssertionError('Expected call: %s\nNot called' % (expected,))
-
-        if self.call_args != (args, kwargs):
-            msg = self._format_mock_failure_message(args, kwargs)
-            raise AssertionError(msg)
-
-
-    def assert_called_once_with(_mock_self, *args, **kwargs):
-        """assert that the mock was called exactly once and with the specified
-        arguments."""
-        self = _mock_self
-        if not self.call_count == 1:
-            msg = ("Expected to be called once. Called %s times." %
-                   self.call_count)
-            raise AssertionError(msg)
-        return self.assert_called_with(*args, **kwargs)
-
-
-    def assert_has_calls(self, calls, any_order=False):
-        """assert the mock has been called with the specified calls.
-        The `mock_calls` list is checked for the calls.
-
-        If `any_order` is False (the default) then the calls must be
-        sequential. There can be extra calls before or after the
-        specified calls.
-
-        If `any_order` is True then the calls can be in any order, but
-        they must all appear in `mock_calls`."""
-        if not any_order:
-            if calls not in self.mock_calls:
-                raise AssertionError(
-                    'Calls not found.\nExpected: %r\n'
-                    'Actual: %r' % (calls, self.mock_calls)
-                )
-            return
-
-        all_calls = list(self.mock_calls)
-
-        not_found = []
-        for kall in calls:
-            try:
-                all_calls.remove(kall)
-            except ValueError:
-                not_found.append(kall)
-        if not_found:
-            raise AssertionError(
-                '%r not all found in call list' % (tuple(not_found),)
-            )
-
-
-    def assert_any_call(self, *args, **kwargs):
-        """assert the mock has been called with the specified arguments.
-
-        The assert passes if the mock has *ever* been called, unlike
-        `assert_called_with` and `assert_called_once_with` that only pass if
-        the call is the most recent one."""
-        kall = call(*args, **kwargs)
-        if kall not in self.call_args_list:
-            expected_string = self._format_mock_call_signature(args, kwargs)
-            raise AssertionError(
-                '%s call not found' % expected_string
-            )
-
-
-    def _get_child_mock(self, **kw):
-        """Create the child mocks for attributes and return value.
-        By default child mocks will be the same type as the parent.
-        Subclasses of Mock may want to override this to customize the way
-        child mocks are made.
-
-        For non-callable mocks the callable variant will be used (rather than
-        any custom subclass)."""
-        _type = type(self)
-        if not issubclass(_type, CallableMixin):
-            if issubclass(_type, NonCallableMagicMock):
-                klass = MagicMock
-            elif issubclass(_type, NonCallableMock) :
-                klass = Mock
-        else:
-            klass = _type.__mro__[1]
-        return klass(**kw)
-
-
-
-def _try_iter(obj):
-    if obj is None:
-        return obj
-    if _is_exception(obj):
-        return obj
-    if _callable(obj):
-        return obj
-    try:
-        return iter(obj)
-    except TypeError:
-        # XXXX backwards compatibility
-        # but this will blow up on first call - so maybe we should fail early?
-        return obj
-
-
-
-class CallableMixin(Base):
-
-    def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
-                 wraps=None, name=None, spec_set=None, parent=None,
-                 _spec_state=None, _new_name='', _new_parent=None, **kwargs):
-        self.__dict__['_mock_return_value'] = return_value
-
-        _super(CallableMixin, self).__init__(
-            spec, wraps, name, spec_set, parent,
-            _spec_state, _new_name, _new_parent, **kwargs
-        )
-
-        self.side_effect = side_effect
-
-
-    def _mock_check_sig(self, *args, **kwargs):
-        # stub method that can be replaced with one with a specific signature
-        pass
-
-
-    def __call__(_mock_self, *args, **kwargs):
-        # can't use self in-case a function / method we are mocking uses self
-        # in the signature
-        _mock_self._mock_check_sig(*args, **kwargs)
-        return _mock_self._mock_call(*args, **kwargs)
-
-
-    def _mock_call(_mock_self, *args, **kwargs):
-        self = _mock_self
-        self.called = True
-        self.call_count += 1
-        self.call_args = _Call((args, kwargs), two=True)
-        self.call_args_list.append(_Call((args, kwargs), two=True))
-
-        _new_name = self._mock_new_name
-        _new_parent = self._mock_new_parent
-        self.mock_calls.append(_Call(('', args, kwargs)))
-
-        seen = set()
-        skip_next_dot = _new_name == '()'
-        do_method_calls = self._mock_parent is not None
-        name = self._mock_name
-        while _new_parent is not None:
-            this_mock_call = _Call((_new_name, args, kwargs))
-            if _new_parent._mock_new_name:
-                dot = '.'
-                if skip_next_dot:
-                    dot = ''
-
-                skip_next_dot = False
-                if _new_parent._mock_new_name == '()':
-                    skip_next_dot = True
-
-                _new_name = _new_parent._mock_new_name + dot + _new_name
-
-            if do_method_calls:
-                if _new_name == name:
-                    this_method_call = this_mock_call
-                else:
-                    this_method_call = _Call((name, args, kwargs))
-                _new_parent.method_calls.append(this_method_call)
-
-                do_method_calls = _new_parent._mock_parent is not None
-                if do_method_calls:
-                    name = _new_parent._mock_name + '.' + name
-
-            _new_parent.mock_calls.append(this_mock_call)
-            _new_parent = _new_parent._mock_new_parent
-
-            # use ids here so as not to call __hash__ on the mocks
-            _new_parent_id = id(_new_parent)
-            if _new_parent_id in seen:
-                break
-            seen.add(_new_parent_id)
-
-        ret_val = DEFAULT
-        effect = self.side_effect
-        if effect is not None:
-            if _is_exception(effect):
-                raise effect
-
-            if not _callable(effect):
-                result = next(effect)
-                if _is_exception(result):
-                    raise result
-                return result
-
-            ret_val = effect(*args, **kwargs)
-            if ret_val is DEFAULT:
-                ret_val = self.return_value
-
-        if (self._mock_wraps is not None and
-             self._mock_return_value is DEFAULT):
-            return self._mock_wraps(*args, **kwargs)
-        if ret_val is DEFAULT:
-            ret_val = self.return_value
-        return ret_val
-
-
-
-class Mock(CallableMixin, NonCallableMock):
-    """
-    Create a new `Mock` object. `Mock` takes several optional arguments
-    that specify the behaviour of the Mock object:
-
-    * `spec`: This can be either a list of strings or an existing object (a
-      class or instance) that acts as the specification for the mock object. If
-      you pass in an object then a list of strings is formed by calling dir on
-      the object (excluding unsupported magic attributes and methods). Accessing
-      any attribute not in this list will raise an `AttributeError`.
-
-      If `spec` is an object (rather than a list of strings) then
-      `mock.__class__` returns the class of the spec object. This allows mocks
-      to pass `isinstance` tests.
-
-    * `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
-      or get an attribute on the mock that isn't on the object passed as
-      `spec_set` will raise an `AttributeError`.
-
-    * `side_effect`: A function to be called whenever the Mock is called. See
-      the `side_effect` attribute. Useful for raising exceptions or
-      dynamically changing return values. The function is called with the same
-      arguments as the mock, and unless it returns `DEFAULT`, the return
-      value of this function is used as the return value.
-
-      Alternatively `side_effect` can be an exception class or instance. In
-      this case the exception will be raised when the mock is called.
-
-      If `side_effect` is an iterable then each call to the mock will return
-      the next value from the iterable. If any of the members of the iterable
-      are exceptions they will be raised instead of returned.
-
-    * `return_value`: The value returned when the mock is called. By default
-      this is a new Mock (created on first access). See the
-      `return_value` attribute.
-
-    * `wraps`: Item for the mock object to wrap. If `wraps` is not None then
-      calling the Mock will pass the call through to the wrapped object
-      (returning the real result). Attribute access on the mock will return a
-      Mock object that wraps the corresponding attribute of the wrapped object
-      (so attempting to access an attribute that doesn't exist will raise an
-      `AttributeError`).
-
-      If the mock has an explicit `return_value` set then calls are not passed
-      to the wrapped object and the `return_value` is returned instead.
-
-    * `name`: If the mock has a name then it will be used in the repr of the
-      mock. This can be useful for debugging. The name is propagated to child
-      mocks.
-
-    Mocks can also be called with arbitrary keyword arguments. These will be
-    used to set attributes on the mock after it is created.
-    """
-
-
-
-def _dot_lookup(thing, comp, import_path):
-    try:
-        return getattr(thing, comp)
-    except AttributeError:
-        __import__(import_path)
-        return getattr(thing, comp)
-
-
-def _importer(target):
-    components = target.split('.')
-    import_path = components.pop(0)
-    thing = __import__(import_path)
-
-    for comp in components:
-        import_path += ".%s" % comp
-        thing = _dot_lookup(thing, comp, import_path)
-    return thing
-
-
-def _is_started(patcher):
-    # XXXX horrible
-    return hasattr(patcher, 'is_local')
-
-
-class _patch(object):
-
-    attribute_name = None
-    _active_patches = set()
-
-    def __init__(
-            self, getter, attribute, new, spec, create,
-            spec_set, autospec, new_callable, kwargs
-        ):
-        if new_callable is not None:
-            if new is not DEFAULT:
-                raise ValueError(
-                    "Cannot use 'new' and 'new_callable' together"
-                )
-            if autospec is not None:
-                raise ValueError(
-                    "Cannot use 'autospec' and 'new_callable' together"
-                )
-
-        self.getter = getter
-        self.attribute = attribute
-        self.new = new
-        self.new_callable = new_callable
-        self.spec = spec
-        self.create = create
-        self.has_local = False
-        self.spec_set = spec_set
-        self.autospec = autospec
-        self.kwargs = kwargs
-        self.additional_patchers = []
-
-
-    def copy(self):
-        patcher = _patch(
-            self.getter, self.attribute, self.new, self.spec,
-            self.create, self.spec_set,
-            self.autospec, self.new_callable, self.kwargs
-        )
-        patcher.attribute_name = self.attribute_name
-        patcher.additional_patchers = [
-            p.copy() for p in self.additional_patchers
-        ]
-        return patcher
-
-
-    def __call__(self, func):
-        if isinstance(func, ClassTypes):
-            return self.decorate_class(func)
-        return self.decorate_callable(func)
-
-
-    def decorate_class(self, klass):
-        for attr in dir(klass):
-            if not attr.startswith(patch.TEST_PREFIX):
-                continue
-
-            attr_value = getattr(klass, attr)
-            if not hasattr(attr_value, "__call__"):
-                continue
-
-            patcher = self.copy()
-            setattr(klass, attr, patcher(attr_value))
-        return klass
-
-
-    def decorate_callable(self, func):
-        if hasattr(func, 'patchings'):
-            func.patchings.append(self)
-            return func
-
-        @wraps(func)
-        def patched(*args, **keywargs):
-            # don't use a with here (backwards compatability with Python 2.4)
-            extra_args = []
-            entered_patchers = []
-
-            # can't use try...except...finally because of Python 2.4
-            # compatibility
-            exc_info = tuple()
-            try:
-                try:
-                    for patching in patched.patchings:
-                        arg = patching.__enter__()
-                        entered_patchers.append(patching)
-                        if patching.attribute_name is not None:
-                            keywargs.update(arg)
-                        elif patching.new is DEFAULT:
-                            extra_args.append(arg)
-
-                    args += tuple(extra_args)
-                    return func(*args, **keywargs)
-                except:
-                    if (patching not in entered_patchers and
-                        _is_started(patching)):
-                        # the patcher may have been started, but an exception
-                        # raised whilst entering one of its additional_patchers
-                        entered_patchers.append(patching)
-                    # Pass the exception to __exit__
-                    exc_info = sys.exc_info()
-                    # re-raise the exception
-                    raise
-            finally:
-                for patching in reversed(entered_patchers):
-                    patching.__exit__(*exc_info)
-
-        patched.patchings = [self]
-        if hasattr(func, 'func_code'):
-            # not in Python 3
-            patched.compat_co_firstlineno = getattr(
-                func, "compat_co_firstlineno",
-                func.func_code.co_firstlineno
-            )
-        return patched
-
-
-    def get_original(self):
-        target = self.getter()
-        name = self.attribute
-
-        original = DEFAULT
-        local = False
-
-        try:
-            original = target.__dict__[name]
-        except (AttributeError, KeyError):
-            original = getattr(target, name, DEFAULT)
-        else:
-            local = True
-
-        if not self.create and original is DEFAULT:
-            raise AttributeError(
-                "%s does not have the attribute %r" % (target, name)
-            )
-        return original, local
-
-
-    def __enter__(self):
-        """Perform the patch."""
-        new, spec, spec_set = self.new, self.spec, self.spec_set
-        autospec, kwargs = self.autospec, self.kwargs
-        new_callable = self.new_callable
-        self.target = self.getter()
-
-        # normalise False to None
-        if spec is False:
-            spec = None
-        if spec_set is False:
-            spec_set = None
-        if autospec is False:
-            autospec = None
-
-        if spec is not None and autospec is not None:
-            raise TypeError("Can't specify spec and autospec")
-        if ((spec is not None or autospec is not None) and
-            spec_set not in (True, None)):
-            raise TypeError("Can't provide explicit spec_set *and* spec or autospec")
-
-        original, local = self.get_original()
-
-        if new is DEFAULT and autospec is None:
-            inherit = False
-            if spec is True:
-                # set spec to the object we are replacing
-                spec = original
-                if spec_set is True:
-                    spec_set = original
-                    spec = None
-            elif spec is not None:
-                if spec_set is True:
-                    spec_set = spec
-                    spec = None
-            elif spec_set is True:
-                spec_set = original
-
-            if spec is not None or spec_set is not None:
-                if original is DEFAULT:
-                    raise TypeError("Can't use 'spec' with create=True")
-                if isinstance(original, ClassTypes):
-                    # If we're patching out a class and there is a spec
-                    inherit = True
-
-            Klass = MagicMock
-            _kwargs = {}
-            if new_callable is not None:
-                Klass = new_callable
-            elif spec is not None or spec_set is not None:
-                this_spec = spec
-                if spec_set is not None:
-                    this_spec = spec_set
-                if _is_list(this_spec):
-                    not_callable = '__call__' not in this_spec
-                else:
-                    not_callable = not _callable(this_spec)
-                if not_callable:
-                    Klass = NonCallableMagicMock
-
-            if spec is not None:
-                _kwargs['spec'] = spec
-            if spec_set is not None:
-                _kwargs['spec_set'] = spec_set
-
-            # add a name to mocks
-            if (isinstance(Klass, type) and
-                issubclass(Klass, NonCallableMock) and self.attribute):
-                _kwargs['name'] = self.attribute
-
-            _kwargs.update(kwargs)
-            new = Klass(**_kwargs)
-
-            if inherit and _is_instance_mock(new):
-                # we can only tell if the instance should be callable if the
-                # spec is not a list
-                this_spec = spec
-                if spec_set is not None:
-                    this_spec = spec_set
-                if (not _is_list(this_spec) and not
-                    _instance_callable(this_spec)):
-                    Klass = NonCallableMagicMock
-
-                _kwargs.pop('name')
-                new.return_value = Klass(_new_parent=new, _new_name='()',
-                                         **_kwargs)
-        elif autospec is not None:
-            # spec is ignored, new *must* be default, spec_set is treated
-            # as a boolean. Should we check spec is not None and that spec_set
-            # is a bool?
-            if new is not DEFAULT:
-                raise TypeError(
-                    "autospec creates the mock for you. Can't specify "
-                    "autospec and new."
-                )
-            if original is DEFAULT:
-                raise TypeError("Can't use 'autospec' with create=True")
-            spec_set = bool(spec_set)
-            if autospec is True:
-                autospec = original
-
-            new = create_autospec(autospec, spec_set=spec_set,
-                                  _name=self.attribute, **kwargs)
-        elif kwargs:
-            # can't set keyword args when we aren't creating the mock
-            # XXXX If new is a Mock we could call new.configure_mock(**kwargs)
-            raise TypeError("Can't pass kwargs to a mock we aren't creating")
-
-        new_attr = new
-
-        self.temp_original = original
-        self.is_local = local
-        setattr(self.target, self.attribute, new_attr)
-        if self.attribute_name is not None:
-            extra_args = {}
-            if self.new is DEFAULT:
-                extra_args[self.attribute_name] =  new
-            for patching in self.additional_patchers:
-                arg = patching.__enter__()
-                if patching.new is DEFAULT:
-                    extra_args.update(arg)
-            return extra_args
-
-        return new
-
-
-    def __exit__(self, *exc_info):
-        """Undo the patch."""
-        if not _is_started(self):
-            raise RuntimeError('stop called on unstarted patcher')
-
-        if self.is_local and self.temp_original is not DEFAULT:
-            setattr(self.target, self.attribute, self.temp_original)
-        else:
-            delattr(self.target, self.attribute)
-            if not self.create and not hasattr(self.target, self.attribute):
-                # needed for proxy objects like django settings
-                setattr(self.target, self.attribute, self.temp_original)
-
-        del self.temp_original
-        del self.is_local
-        del self.target
-        for patcher in reversed(self.additional_patchers):
-            if _is_started(patcher):
-                patcher.__exit__(*exc_info)
-
-
-    def start(self):
-        """Activate a patch, returning any created mock."""
-        result = self.__enter__()
-        self._active_patches.add(self)
-        return result
-
-
-    def stop(self):
-        """Stop an active patch."""
-        self._active_patches.discard(self)
-        return self.__exit__()
-
-
-
-def _get_target(target):
-    try:
-        target, attribute = target.rsplit('.', 1)
-    except (TypeError, ValueError):
-        raise TypeError("Need a valid target to patch. You supplied: %r" %
-                        (target,))
-    getter = lambda: _importer(target)
-    return getter, attribute
-
-
-def _patch_object(
-        target, attribute, new=DEFAULT, spec=None,
-        create=False, spec_set=None, autospec=None,
-        new_callable=None, **kwargs
-    ):
-    """
-    patch.object(target, attribute, new=DEFAULT, spec=None, create=False,
-                 spec_set=None, autospec=None, new_callable=None, **kwargs)
-
-    patch the named member (`attribute`) on an object (`target`) with a mock
-    object.
-
-    `patch.object` can be used as a decorator, class decorator or a context
-    manager. Arguments `new`, `spec`, `create`, `spec_set`,
-    `autospec` and `new_callable` have the same meaning as for `patch`. Like
-    `patch`, `patch.object` takes arbitrary keyword arguments for configuring
-    the mock object it creates.
-
-    When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
-    for choosing which methods to wrap.
-    """
-    getter = lambda: target
-    return _patch(
-        getter, attribute, new, spec, create,
-        spec_set, autospec, new_callable, kwargs
-    )
-
-
-def _patch_multiple(target, spec=None, create=False, spec_set=None,
-                    autospec=None, new_callable=None, **kwargs):
-    """Perform multiple patches in a single call. It takes the object to be
-    patched (either as an object or a string to fetch the object by importing)
-    and keyword arguments for the patches::
-
-        with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
-            ...
-
-    Use `DEFAULT` as the value if you want `patch.multiple` to create
-    mocks for you. In this case the created mocks are passed into a decorated
-    function by keyword, and a dictionary is returned when `patch.multiple` is
-    used as a context manager.
-
-    `patch.multiple` can be used as a decorator, class decorator or a context
-    manager. The arguments `spec`, `spec_set`, `create`,
-    `autospec` and `new_callable` have the same meaning as for `patch`. These
-    arguments will be applied to *all* patches done by `patch.multiple`.
-
-    When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
-    for choosing which methods to wrap.
-    """
-    if type(target) in (unicode, str):
-        getter = lambda: _importer(target)
-    else:
-        getter = lambda: target
-
-    if not kwargs:
-        raise ValueError(
-            'Must supply at least one keyword argument with patch.multiple'
-        )
-    # need to wrap in a list for python 3, where items is a view
-    items = list(kwargs.items())
-    attribute, new = items[0]
-    patcher = _patch(
-        getter, attribute, new, spec, create, spec_set,
-        autospec, new_callable, {}
-    )
-    patcher.attribute_name = attribute
-    for attribute, new in items[1:]:
-        this_patcher = _patch(
-            getter, attribute, new, spec, create, spec_set,
-            autospec, new_callable, {}
-        )
-        this_patcher.attribute_name = attribute
-        patcher.additional_patchers.append(this_patcher)
-    return patcher
-
-
-def patch(
-        target, new=DEFAULT, spec=None, create=False,
-        spec_set=None, autospec=None, new_callable=None, **kwargs
-    ):
-    """
-    `patch` acts as a function decorator, class decorator or a context
-    manager. Inside the body of the function or with statement, the `target`
-    is patched with a `new` object. When the function/with statement exits
-    the patch is undone.
-
-    If `new` is omitted, then the target is replaced with a
-    `MagicMock`. If `patch` is used as a decorator and `new` is
-    omitted, the created mock is passed in as an extra argument to the
-    decorated function. If `patch` is used as a context manager the created
-    mock is returned by the context manager.
-
-    `target` should be a string in the form `'package.module.ClassName'`. The
-    `target` is imported and the specified object replaced with the `new`
-    object, so the `target` must be importable from the environment you are
-    calling `patch` from. The target is imported when the decorated function
-    is executed, not at decoration time.
-
-    The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
-    if patch is creating one for you.
-
-    In addition you can pass `spec=True` or `spec_set=True`, which causes
-    patch to pass in the object being mocked as the spec/spec_set object.
-
-    `new_callable` allows you to specify a different class, or callable object,
-    that will be called to create the `new` object. By default `MagicMock` is
-    used.
-
-    A more powerful form of `spec` is `autospec`. If you set `autospec=True`
-    then the mock with be created with a spec from the object being replaced.
-    All attributes of the mock will also have the spec of the corresponding
-    attribute of the object being replaced. Methods and functions being
-    mocked will have their arguments checked and will raise a `TypeError` if
-    they are called with the wrong signature. For mocks replacing a class,
-    their return value (the 'instance') will have the same spec as the class.
-
-    Instead of `autospec=True` you can pass `autospec=some_object` to use an
-    arbitrary object as the spec instead of the one being replaced.
-
-    By default `patch` will fail to replace attributes that don't exist. If
-    you pass in `create=True`, and the attribute doesn't exist, patch will
-    create the attribute for you when the patched function is called, and
-    delete it again afterwards. This is useful for writing tests against
-    attributes that your production code creates at runtime. It is off by by
-    default because it can be dangerous. With it switched on you can write
-    passing tests against APIs that don't actually exist!
-
-    Patch can be used as a `TestCase` class decorator. It works by
-    decorating each test method in the class. This reduces the boilerplate
-    code when your test methods share a common patchings set. `patch` finds
-    tests by looking for method names that start with `patch.TEST_PREFIX`.
-    By default this is `test`, which matches the way `unittest` finds tests.
-    You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
-
-    Patch can be used as a context manager, with the with statement. Here the
-    patching applies to the indented block after the with statement. If you
-    use "as" then the patched object will be bound to the name after the
-    "as"; very useful if `patch` is creating a mock object for you.
-
-    `patch` takes arbitrary keyword arguments. These will be passed to
-    the `Mock` (or `new_callable`) on construction.
-
-    `patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
-    available for alternate use-cases.
-    """
-    getter, attribute = _get_target(target)
-    return _patch(
-        getter, attribute, new, spec, create,
-        spec_set, autospec, new_callable, kwargs
-    )
-
-
-class _patch_dict(object):
-    """
-    Patch a dictionary, or dictionary like object, and restore the dictionary
-    to its original state after the test.
-
-    `in_dict` can be a dictionary or a mapping like container. If it is a
-    mapping then it must at least support getting, setting and deleting items
-    plus iterating over keys.
-
-    `in_dict` can also be a string specifying the name of the dictionary, which
-    will then be fetched by importing it.
-
-    `values` can be a dictionary of values to set in the dictionary. `values`
-    can also be an iterable of `(key, value)` pairs.
-
-    If `clear` is True then the dictionary will be cleared before the new
-    values are set.
-
-    `patch.dict` can also be called with arbitrary keyword arguments to set
-    values in the dictionary::
-
-        with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
-            ...
-
-    `patch.dict` can be used as a context manager, decorator or class
-    decorator. When used as a class decorator `patch.dict` honours
-    `patch.TEST_PREFIX` for choosing which methods to wrap.
-    """
-
-    def __init__(self, in_dict, values=(), clear=False, **kwargs):
-        if isinstance(in_dict, basestring):
-            in_dict = _importer(in_dict)
-        self.in_dict = in_dict
-        # support any argument supported by dict(...) constructor
-        self.values = dict(values)
-        self.values.update(kwargs)
-        self.clear = clear
-        self._original = None
-
-
-    def __call__(self, f):
-        if isinstance(f, ClassTypes):
-            return self.decorate_class(f)
-        @wraps(f)
-        def _inner(*args, **kw):
-            self._patch_dict()
-            try:
-                return f(*args, **kw)
-            finally:
-                self._unpatch_dict()
-
-        return _inner
-
-
-    def decorate_class(self, klass):
-        for attr in dir(klass):
-            attr_value = getattr(klass, attr)
-            if (attr.startswith(patch.TEST_PREFIX) and
-                 hasattr(attr_value, "__call__")):
-                decorator = _patch_dict(self.in_dict, self.values, self.clear)
-                decorated = decorator(attr_value)
-                setattr(klass, attr, decorated)
-        return klass
-
-
-    def __enter__(self):
-        """Patch the dict."""
-        self._patch_dict()
-
-
-    def _patch_dict(self):
-        values = self.values
-        in_dict = self.in_dict
-        clear = self.clear
-
-        try:
-            original = in_dict.copy()
-        except AttributeError:
-            # dict like object with no copy method
-            # must support iteration over keys
-            original = {}
-            for key in in_dict:
-                original[key] = in_dict[key]
-        self._original = original
-
-        if clear:
-            _clear_dict(in_dict)
-
-        try:
-            in_dict.update(values)
-        except AttributeError:
-            # dict like object with no update method
-            for key in values:
-                in_dict[key] = values[key]
-
-
-    def _unpatch_dict(self):
-        in_dict = self.in_dict
-        original = self._original
-
-        _clear_dict(in_dict)
-
-        try:
-            in_dict.update(original)
-        except AttributeError:
-            for key in original:
-                in_dict[key] = original[key]
-
-
-    def __exit__(self, *args):
-        """Unpatch the dict."""
-        self._unpatch_dict()
-        return False
-
-    start = __enter__
-    stop = __exit__
-
-
-def _clear_dict(in_dict):
-    try:
-        in_dict.clear()
-    except AttributeError:
-        keys = list(in_dict)
-        for key in keys:
-            del in_dict[key]
-
-
-def _patch_stopall():
-    """Stop all active patches."""
-    for patch in list(_patch._active_patches):
-        patch.stop()
-
-
-patch.object = _patch_object
-patch.dict = _patch_dict
-patch.multiple = _patch_multiple
-patch.stopall = _patch_stopall
-patch.TEST_PREFIX = 'test'
-
-magic_methods = (
-    "lt le gt ge eq ne "
-    "getitem setitem delitem "
-    "len contains iter "
-    "hash str sizeof "
-    "enter exit "
-    "divmod neg pos abs invert "
-    "complex int float index "
-    "trunc floor ceil "
-)
-
-numerics = "add sub mul div floordiv mod lshift rshift and xor or pow "
-inplace = ' '.join('i%s' % n for n in numerics.split())
-right = ' '.join('r%s' % n for n in numerics.split())
-extra = ''
-if inPy3k:
-    extra = 'bool next '
-else:
-    extra = 'unicode long nonzero oct hex truediv rtruediv '
-
-# not including __prepare__, __instancecheck__, __subclasscheck__
-# (as they are metaclass methods)
-# __del__ is not supported at all as it causes problems if it exists
-
-_non_defaults = set('__%s__' % method for method in [
-    'cmp', 'getslice', 'setslice', 'coerce', 'subclasses',
-    'format', 'get', 'set', 'delete', 'reversed',
-    'missing', 'reduce', 'reduce_ex', 'getinitargs',
-    'getnewargs', 'getstate', 'setstate', 'getformat',
-    'setformat', 'repr', 'dir'
-])
-
-
-def _get_method(name, func):
-    "Turns a callable object (like a mock) into a real function"
-    def method(self, *args, **kw):
-        return func(self, *args, **kw)
-    method.__name__ = name
-    return method
-
-
-_magics = set(
-    '__%s__' % method for method in
-    ' '.join([magic_methods, numerics, inplace, right, extra]).split()
-)
-
-_all_magics = _magics | _non_defaults
-
-_unsupported_magics = set([
-    '__getattr__', '__setattr__',
-    '__init__', '__new__', '__prepare__'
-    '__instancecheck__', '__subclasscheck__',
-    '__del__'
-])
-
-_calculate_return_value = {
-    '__hash__': lambda self: object.__hash__(self),
-    '__str__': lambda self: object.__str__(self),
-    '__sizeof__': lambda self: object.__sizeof__(self),
-    '__unicode__': lambda self: unicode(object.__str__(self)),
-}
-
-_return_values = {
-    '__lt__': NotImplemented,
-    '__gt__': NotImplemented,
-    '__le__': NotImplemented,
-    '__ge__': NotImplemented,
-    '__int__': 1,
-    '__contains__': False,
-    '__len__': 0,
-    '__exit__': False,
-    '__complex__': 1j,
-    '__float__': 1.0,
-    '__bool__': True,
-    '__nonzero__': True,
-    '__oct__': '1',
-    '__hex__': '0x1',
-    '__long__': long(1),
-    '__index__': 1,
-}
-
-
-def _get_eq(self):
-    def __eq__(other):
-        ret_val = self.__eq__._mock_return_value
-        if ret_val is not DEFAULT:
-            return ret_val
-        return self is other
-    return __eq__
-
-def _get_ne(self):
-    def __ne__(other):
-        if self.__ne__._mock_return_value is not DEFAULT:
-            return DEFAULT
-        return self is not other
-    return __ne__
-
-def _get_iter(self):
-    def __iter__():
-        ret_val = self.__iter__._mock_return_value
-        if ret_val is DEFAULT:
-            return iter([])
-        # if ret_val was already an iterator, then calling iter on it should
-        # return the iterator unchanged
-        return iter(ret_val)
-    return __iter__
-
-_side_effect_methods = {
-    '__eq__': _get_eq,
-    '__ne__': _get_ne,
-    '__iter__': _get_iter,
-}
-
-
-
-def _set_return_value(mock, method, name):
-    fixed = _return_values.get(name, DEFAULT)
-    if fixed is not DEFAULT:
-        method.return_value = fixed
-        return
-
-    return_calulator = _calculate_return_value.get(name)
-    if return_calulator is not None:
-        try:
-            return_value = return_calulator(mock)
-        except AttributeError:
-            # XXXX why do we return AttributeError here?
-            #      set it as a side_effect instead?
-            return_value = AttributeError(name)
-        method.return_value = return_value
-        return
-
-    side_effector = _side_effect_methods.get(name)
-    if side_effector is not None:
-        method.side_effect = side_effector(mock)
-
-
-
-class MagicMixin(object):
-    def __init__(self, *args, **kw):
-        _super(MagicMixin, self).__init__(*args, **kw)
-        self._mock_set_magics()
-
-
-    def _mock_set_magics(self):
-        these_magics = _magics
-
-        if self._mock_methods is not None:
-            these_magics = _magics.intersection(self._mock_methods)
-
-            remove_magics = set()
-            remove_magics = _magics - these_magics
-
-            for entry in remove_magics:
-                if entry in type(self).__dict__:
-                    # remove unneeded magic methods
-                    delattr(self, entry)
-
-        # don't overwrite existing attributes if called a second time
-        these_magics = these_magics - set(type(self).__dict__)
-
-        _type = type(self)
-        for entry in these_magics:
-            setattr(_type, entry, MagicProxy(entry, self))
-
-
-
-class NonCallableMagicMock(MagicMixin, NonCallableMock):
-    """A version of `MagicMock` that isn't callable."""
-    def mock_add_spec(self, spec, spec_set=False):
-        """Add a spec to a mock. `spec` can either be an object or a
-        list of strings. Only attributes on the `spec` can be fetched as
-        attributes from the mock.
-
-        If `spec_set` is True then only attributes on the spec can be set."""
-        self._mock_add_spec(spec, spec_set)
-        self._mock_set_magics()
-
-
-
-class MagicMock(MagicMixin, Mock):
-    """
-    MagicMock is a subclass of Mock with default implementations
-    of most of the magic methods. You can use MagicMock without having to
-    configure the magic methods yourself.
-
-    If you use the `spec` or `spec_set` arguments then *only* magic
-    methods that exist in the spec will be created.
-
-    Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
-    """
-    def mock_add_spec(self, spec, spec_set=False):
-        """Add a spec to a mock. `spec` can either be an object or a
-        list of strings. Only attributes on the `spec` can be fetched as
-        attributes from the mock.
-
-        If `spec_set` is True then only attributes on the spec can be set."""
-        self._mock_add_spec(spec, spec_set)
-        self._mock_set_magics()
-
-
-
-class MagicProxy(object):
-    def __init__(self, name, parent):
-        self.name = name
-        self.parent = parent
-
-    def __call__(self, *args, **kwargs):
-        m = self.create_mock()
-        return m(*args, **kwargs)
-
-    def create_mock(self):
-        entry = self.name
-        parent = self.parent
-        m = parent._get_child_mock(name=entry, _new_name=entry,
-                                   _new_parent=parent)
-        setattr(parent, entry, m)
-        _set_return_value(parent, m, entry)
-        return m
-
-    def __get__(self, obj, _type=None):
-        return self.create_mock()
-
-
-
-class _ANY(object):
-    "A helper object that compares equal to everything."
-
-    def __eq__(self, other):
-        return True
-
-    def __ne__(self, other):
-        return False
-
-    def __repr__(self):
-        return '<ANY>'
-
-ANY = _ANY()
-
-
-
-def _format_call_signature(name, args, kwargs):
-    message = '%s(%%s)' % name
-    formatted_args = ''
-    args_string = ', '.join([repr(arg) for arg in args])
-    kwargs_string = ', '.join([
-        '%s=%r' % (key, value) for key, value in kwargs.items()
-    ])
-    if args_string:
-        formatted_args = args_string
-    if kwargs_string:
-        if formatted_args:
-            formatted_args += ', '
-        formatted_args += kwargs_string
-
-    return message % formatted_args
-
-
-
-class _Call(tuple):
-    """
-    A tuple for holding the results of a call to a mock, either in the form
-    `(args, kwargs)` or `(name, args, kwargs)`.
-
-    If args or kwargs are empty then a call tuple will compare equal to
-    a tuple without those values. This makes comparisons less verbose::
-
-        _Call(('name', (), {})) == ('name',)
-        _Call(('name', (1,), {})) == ('name', (1,))
-        _Call(((), {'a': 'b'})) == ({'a': 'b'},)
-
-    The `_Call` object provides a useful shortcut for comparing with call::
-
-        _Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
-        _Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
-
-    If the _Call has no name then it will match any name.
-    """
-    def __new__(cls, value=(), name=None, parent=None, two=False,
-                from_kall=True):
-        name = ''
-        args = ()
-        kwargs = {}
-        _len = len(value)
-        if _len == 3:
-            name, args, kwargs = value
-        elif _len == 2:
-            first, second = value
-            if isinstance(first, basestring):
-                name = first
-                if isinstance(second, tuple):
-                    args = second
-                else:
-                    kwargs = second
-            else:
-                args, kwargs = first, second
-        elif _len == 1:
-            value, = value
-            if isinstance(value, basestring):
-                name = value
-            elif isinstance(value, tuple):
-                args = value
-            else:
-                kwargs = value
-
-        if two:
-            return tuple.__new__(cls, (args, kwargs))
-
-        return tuple.__new__(cls, (name, args, kwargs))
-
-
-    def __init__(self, value=(), name=None, parent=None, two=False,
-                 from_kall=True):
-        self.name = name
-        self.parent = parent
-        self.from_kall = from_kall
-
-
-    def __eq__(self, other):
-        if other is ANY:
-            return True
-        try:
-            len_other = len(other)
-        except TypeError:
-            return False
-
-        self_name = ''
-        if len(self) == 2:
-            self_args, self_kwargs = self
-        else:
-            self_name, self_args, self_kwargs = self
-
-        other_name = ''
-        if len_other == 0:
-            other_args, other_kwargs = (), {}
-        elif len_other == 3:
-            other_name, other_args, other_kwargs = other
-        elif len_other == 1:
-            value, = other
-            if isinstance(value, tuple):
-                other_args = value
-                other_kwargs = {}
-            elif isinstance(value, basestring):
-                other_name = value
-                other_args, other_kwargs = (), {}
-            else:
-                other_args = ()
-                other_kwargs = value
-        else:
-            # len 2
-            # could be (name, args) or (name, kwargs) or (args, kwargs)
-            first, second = other
-            if isinstance(first, basestring):
-                other_name = first
-                if isinstance(second, tuple):
-                    other_args, other_kwargs = second, {}
-                else:
-                    other_args, other_kwargs = (), second
-            else:
-                other_args, other_kwargs = first, second
-
-        if self_name and other_name != self_name:
-            return False
-
-        # this order is important for ANY to work!
-        return (other_args, other_kwargs) == (self_args, self_kwargs)
-
-
-    def __ne__(self, other):
-        return not self.__eq__(other)
-
-
-    def __call__(self, *args, **kwargs):
-        if self.name is None:
-            return _Call(('', args, kwargs), name='()')
-
-        name = self.name + '()'
-        return _Call((self.name, args, kwargs), name=name, parent=self)
-
-
-    def __getattr__(self, attr):
-        if self.name is None:
-            return _Call(name=attr, from_kall=False)
-        name = '%s.%s' % (self.name, attr)
-        return _Call(name=name, parent=self, from_kall=False)
-
-
-    def __repr__(self):
-        if not self.from_kall:
-            name = self.name or 'call'
-            if name.startswith('()'):
-                name = 'call%s' % name
-            return name
-
-        if len(self) == 2:
-            name = 'call'
-            args, kwargs = self
-        else:
-            name, args, kwargs = self
-            if not name:
-                name = 'call'
-            elif not name.startswith('()'):
-                name = 'call.%s' % name
-            else:
-                name = 'call%s' % name
-        return _format_call_signature(name, args, kwargs)
-
-
-    def call_list(self):
-        """For a call object that represents multiple calls, `call_list`
-        returns a list of all the intermediate calls as well as the
-        final call."""
-        vals = []
-        thing = self
-        while thing is not None:
-            if thing.from_kall:
-                vals.append(thing)
-            thing = thing.parent
-        return _CallList(reversed(vals))
-
-
-call = _Call(from_kall=False)
-
-
-
-def create_autospec(spec, spec_set=False, instance=False, _parent=None,
-                    _name=None, **kwargs):
-    """Create a mock object using another object as a spec. Attributes on the
-    mock will use the corresponding attribute on the `spec` object as their
-    spec.
-
-    Functions or methods being mocked will have their arguments checked
-    to check that they are called with the correct signature.
-
-    If `spec_set` is True then attempting to set attributes that don't exist
-    on the spec object will raise an `AttributeError`.
-
-    If a class is used as a spec then the return value of the mock (the
-    instance of the class) will have the same spec. You can use a class as the
-    spec for an instance object by passing `instance=True`. The returned mock
-    will only be callable if instances of the mock are callable.
-
-    `create_autospec` also takes arbitrary keyword arguments that are passed to
-    the constructor of the created mock."""
-    if _is_list(spec):
-        # can't pass a list instance to the mock constructor as it will be
-        # interpreted as a list of strings
-        spec = type(spec)
-
-    is_type = isinstance(spec, ClassTypes)
-
-    _kwargs = {'spec': spec}
-    if spec_set:
-        _kwargs = {'spec_set': spec}
-    elif spec is None:
-        # None we mock with a normal mock without a spec
-        _kwargs = {}
-
-    _kwargs.update(kwargs)
-
-    Klass = MagicMock
-    if type(spec) in DescriptorTypes:
-        # descriptors don't have a spec
-        # because we don't know what type they return
-        _kwargs = {}
-    elif not _callable(spec):
-        Klass = NonCallableMagicMock
-    elif is_type and instance and not _instance_callable(spec):
-        Klass = NonCallableMagicMock
-
-    _new_name = _name
-    if _parent is None:
-        # for a top level object no _new_name should be set
-        _new_name = ''
-
-    mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
-                 name=_name, **_kwargs)
-
-    if isinstance(spec, FunctionTypes):
-        # should only happen at the top level because we don't
-        # recurse for functions
-        mock = _set_signature(mock, spec)
-    else:
-        _check_signature(spec, mock, is_type, instance)
-
-    if _parent is not None and not instance:
-        _parent._mock_children[_name] = mock
-
-    if is_type and not instance and 'return_value' not in kwargs:
-        mock.return_value = create_autospec(spec, spec_set, instance=True,
-                                            _name='()', _parent=mock)
-
-    for entry in dir(spec):
-        if _is_magic(entry):
-            # MagicMock already does the useful magic methods for us
-            continue
-
-        if isinstance(spec, FunctionTypes) and entry in FunctionAttributes:
-            # allow a mock to actually be a function
-            continue
-
-        # XXXX do we need a better way of getting attributes without
-        # triggering code execution (?) Probably not - we need the actual
-        # object to mock it so we would rather trigger a property than mock
-        # the property descriptor. Likewise we want to mock out dynamically
-        # provided attributes.
-        # XXXX what about attributes that raise exceptions other than
-        # AttributeError on being fetched?
-        # we could be resilient against it, or catch and propagate the
-        # exception when the attribute is fetched from the mock
-        try:
-            original = getattr(spec, entry)
-        except AttributeError:
-            continue
-
-        kwargs = {'spec': original}
-        if spec_set:
-            kwargs = {'spec_set': original}
-
-        if not isinstance(original, FunctionTypes):
-            new = _SpecState(original, spec_set, mock, entry, instance)
-            mock._mock_children[entry] = new
-        else:
-            parent = mock
-            if isinstance(spec, FunctionTypes):
-                parent = mock.mock
-
-            new = MagicMock(parent=parent, name=entry, _new_name=entry,
-                            _new_parent=parent, **kwargs)
-            mock._mock_children[entry] = new
-            skipfirst = _must_skip(spec, entry, is_type)
-            _check_signature(original, new, skipfirst=skipfirst)
-
-        # so functions created with _set_signature become instance attributes,
-        # *plus* their underlying mock exists in _mock_children of the parent
-        # mock. Adding to _mock_children may be unnecessary where we are also
-        # setting as an instance attribute?
-        if isinstance(new, FunctionTypes):
-            setattr(mock, entry, new)
-
-    return mock
-
-
-def _must_skip(spec, entry, is_type):
-    if not isinstance(spec, ClassTypes):
-        if entry in getattr(spec, '__dict__', {}):
-            # instance attribute - shouldn't skip
-            return False
-        spec = spec.__class__
-    if not hasattr(spec, '__mro__'):
-        # old style class: can't have descriptors anyway
-        return is_type
-
-    for klass in spec.__mro__:
-        result = klass.__dict__.get(entry, DEFAULT)
-        if result is DEFAULT:
-            continue
-        if isinstance(result, (staticmethod, classmethod)):
-            return False
-        return is_type
-
-    # shouldn't get here unless function is a dynamically provided attribute
-    # XXXX untested behaviour
-    return is_type
-
-
-def _get_class(obj):
-    try:
-        return obj.__class__
-    except AttributeError:
-        # in Python 2, _sre.SRE_Pattern objects have no __class__
-        return type(obj)
-
-
-class _SpecState(object):
-
-    def __init__(self, spec, spec_set=False, parent=None,
-                 name=None, ids=None, instance=False):
-        self.spec = spec
-        self.ids = ids
-        self.spec_set = spec_set
-        self.parent = parent
-        self.instance = instance
-        self.name = name
-
-
-FunctionTypes = (
-    # python function
-    type(create_autospec),
-    # instance method
-    type(ANY.__eq__),
-    # unbound method
-    type(_ANY.__eq__),
-)
-
-FunctionAttributes = set([
-    'func_closure',
-    'func_code',
-    'func_defaults',
-    'func_dict',
-    'func_doc',
-    'func_globals',
-    'func_name',
-])
-
-
-file_spec = None
-
-
-def mock_open(mock=None, read_data=''):
-    """
-    A helper function to create a mock to replace the use of `open`. It works
-    for `open` called directly or used as a context manager.
-
-    The `mock` argument is the mock object to configure. If `None` (the
-    default) then a `MagicMock` will be created for you, with the API limited
-    to methods or attributes available on standard file handles.
-
-    `read_data` is a string for the `read` method of the file handle to return.
-    This is an empty string by default.
-    """
-    global file_spec
-    if file_spec is None:
-        # set on first use
-        if inPy3k:
-            import _io
-            file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
-        else:
-            file_spec = file
-
-    if mock is None:
-        mock = MagicMock(name='open', spec=open)
-
-    handle = MagicMock(spec=file_spec)
-    handle.write.return_value = None
-    handle.__enter__.return_value = handle
-    handle.read.return_value = read_data
-
-    mock.return_value = handle
-    return mock
-
-
-class PropertyMock(Mock):
-    """
-    A mock intended to be used as a property, or other descriptor, on a class.
-    `PropertyMock` provides `__get__` and `__set__` methods so you can specify
-    a return value when it is fetched.
-
-    Fetching a `PropertyMock` instance from an object calls the mock, with
-    no args. Setting it calls the mock with the value being set.
-    """
-    def _get_child_mock(self, **kwargs):
-        return MagicMock(**kwargs)
-
-    def __get__(self, obj, obj_type):
-        return self()
-    def __set__(self, obj, val):
-        self(val)
diff --git a/branch-1.2/ambari-common/src/test/python/mock/mock.wpr b/branch-1.2/ambari-common/src/test/python/mock/mock.wpr
deleted file mode 100644
index e1ded97..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/mock.wpr
+++ /dev/null
@@ -1,26 +0,0 @@
-#!wing
-#!version=4.0
-##################################################################
-# Wing IDE project file                                          #
-##################################################################
-[project attributes]
-proj.directory-list = [{'dirloc': loc('.'),
-                        'excludes': [u'latex',
-                                     u'.hg',
-                                     u'.tox',
-                                     u'dist',
-                                     u'htmlcov',
-                                     u'extendmock.py',
-                                     u'__pycache__',
-                                     u'html',
-                                     u'build',
-                                     u'mock.egg-info',
-                                     u'tests/__pycache__',
-                                     u'.hgignore',
-                                     u'.hgtags'],
-                        'filter': '*',
-                        'include_hidden': 0,
-                        'recursive': 1,
-                        'watch_for_changes': 1}]
-proj.file-type = 'shared'
-testing.auto-test-file-specs = ('test*.py',)
diff --git a/branch-1.2/ambari-common/src/test/python/mock/setup.cfg b/branch-1.2/ambari-common/src/test/python/mock/setup.cfg
deleted file mode 100644
index 566eb37..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/setup.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-[build_sphinx]
-source-dir=docs
-build-dir=html
-[sdist]
-force-manifest = 1
diff --git a/branch-1.2/ambari-common/src/test/python/mock/setup.py b/branch-1.2/ambari-common/src/test/python/mock/setup.py
deleted file mode 100644
index a6ee625..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/setup.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#! /usr/bin/env python
-
-# Copyright (C) 2007-2012 Michael Foord & the mock team
-# E-mail: fuzzyman AT voidspace DOT org DOT uk
-# http://www.voidspace.org.uk/python/mock/
-
-from mock import __version__
-
-import os
-
-
-NAME = 'mock'
-MODULES = ['mock']
-DESCRIPTION = 'A Python Mocking and Patching Library for Testing'
-
-URL = "http://www.voidspace.org.uk/python/mock/"
-
-readme = os.path.join(os.path.dirname(__file__), 'README.txt')
-LONG_DESCRIPTION = open(readme).read()
-
-CLASSIFIERS = [
-    'Development Status :: 5 - Production/Stable',
-    'Environment :: Console',
-    'Intended Audience :: Developers',
-    'License :: OSI Approved :: BSD License',
-    'Programming Language :: Python',
-    'Programming Language :: Python :: 2',
-    'Programming Language :: Python :: 3',
-    'Programming Language :: Python :: 2.5',
-    'Programming Language :: Python :: 2.6',
-    'Programming Language :: Python :: 2.7',
-    'Programming Language :: Python :: 3.1',
-    'Programming Language :: Python :: 3.2',
-    'Programming Language :: Python :: 3.3',
-    'Programming Language :: Python :: Implementation :: CPython',
-    'Programming Language :: Python :: Implementation :: PyPy',
-    'Programming Language :: Python :: Implementation :: Jython',
-    'Operating System :: OS Independent',
-    'Topic :: Software Development :: Libraries',
-    'Topic :: Software Development :: Libraries :: Python Modules',
-    'Topic :: Software Development :: Testing',
-]
-
-AUTHOR = 'Michael Foord'
-AUTHOR_EMAIL = 'michael@voidspace.org.uk'
-KEYWORDS = ("testing test mock mocking unittest patching "
-            "stubs fakes doubles").split(' ')
-
-params = dict(
-    name=NAME,
-    version=__version__,
-    py_modules=MODULES,
-
-    # metadata for upload to PyPI
-    author=AUTHOR,
-    author_email=AUTHOR_EMAIL,
-    description=DESCRIPTION,
-    long_description=LONG_DESCRIPTION,
-    keywords=KEYWORDS,
-    url=URL,
-    classifiers=CLASSIFIERS,
-)
-
-try:
-    from setuptools import setup
-except ImportError:
-    from distutils.core import setup
-else:
-    params['tests_require'] = ['unittest2']
-    params['test_suite'] = 'unittest2.collector'
-
-setup(**params)
diff --git a/branch-1.2/ambari-common/src/test/python/mock/tests/__init__.py b/branch-1.2/ambari-common/src/test/python/mock/tests/__init__.py
deleted file mode 100644
index 54ddf2e..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/tests/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# Copyright (C) 2007-2012 Michael Foord & the mock team
-# E-mail: fuzzyman AT voidspace DOT org DOT uk
-# http://www.voidspace.org.uk/python/mock/
diff --git a/branch-1.2/ambari-common/src/test/python/mock/tests/_testwith.py b/branch-1.2/ambari-common/src/test/python/mock/tests/_testwith.py
deleted file mode 100644
index 0b54780..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/tests/_testwith.py
+++ /dev/null
@@ -1,181 +0,0 @@
-# Copyright (C) 2007-2012 Michael Foord & the mock team
-# E-mail: fuzzyman AT voidspace DOT org DOT uk
-# http://www.voidspace.org.uk/python/mock/
-
-from __future__ import with_statement
-
-from tests.support import unittest2, is_instance
-
-from mock import MagicMock, Mock, patch, sentinel, mock_open, call
-
-from tests.support_with import catch_warnings, nested
-
-something  = sentinel.Something
-something_else  = sentinel.SomethingElse
-
-
-
-class WithTest(unittest2.TestCase):
-
-    def test_with_statement(self):
-        with patch('tests._testwith.something', sentinel.Something2):
-            self.assertEqual(something, sentinel.Something2, "unpatched")
-        self.assertEqual(something, sentinel.Something)
-
-
-    def test_with_statement_exception(self):
-        try:
-            with patch('tests._testwith.something', sentinel.Something2):
-                self.assertEqual(something, sentinel.Something2, "unpatched")
-                raise Exception('pow')
-        except Exception:
-            pass
-        else:
-            self.fail("patch swallowed exception")
-        self.assertEqual(something, sentinel.Something)
-
-
-    def test_with_statement_as(self):
-        with patch('tests._testwith.something') as mock_something:
-            self.assertEqual(something, mock_something, "unpatched")
-            self.assertTrue(is_instance(mock_something, MagicMock),
-                            "patching wrong type")
-        self.assertEqual(something, sentinel.Something)
-
-
-    def test_patch_object_with_statement(self):
-        class Foo(object):
-            something = 'foo'
-        original = Foo.something
-        with patch.object(Foo, 'something'):
-            self.assertNotEqual(Foo.something, original, "unpatched")
-        self.assertEqual(Foo.something, original)
-
-
-    def test_with_statement_nested(self):
-        with catch_warnings(record=True):
-            # nested is deprecated in Python 2.7
-            with nested(patch('tests._testwith.something'),
-                    patch('tests._testwith.something_else')) as (mock_something, mock_something_else):
-                self.assertEqual(something, mock_something, "unpatched")
-                self.assertEqual(something_else, mock_something_else,
-                                 "unpatched")
-        self.assertEqual(something, sentinel.Something)
-        self.assertEqual(something_else, sentinel.SomethingElse)
-
-
-    def test_with_statement_specified(self):
-        with patch('tests._testwith.something', sentinel.Patched) as mock_something:
-            self.assertEqual(something, mock_something, "unpatched")
-            self.assertEqual(mock_something, sentinel.Patched, "wrong patch")
-        self.assertEqual(something, sentinel.Something)
-
-
-    def testContextManagerMocking(self):
-        mock = Mock()
-        mock.__enter__ = Mock()
-        mock.__exit__ = Mock()
-        mock.__exit__.return_value = False
-
-        with mock as m:
-            self.assertEqual(m, mock.__enter__.return_value)
-        mock.__enter__.assert_called_with()
-        mock.__exit__.assert_called_with(None, None, None)
-
-
-    def test_context_manager_with_magic_mock(self):
-        mock = MagicMock()
-
-        with self.assertRaises(TypeError):
-            with mock:
-                'foo' + 3
-        mock.__enter__.assert_called_with()
-        self.assertTrue(mock.__exit__.called)
-
-
-    def test_with_statement_same_attribute(self):
-        with patch('tests._testwith.something', sentinel.Patched) as mock_something:
-            self.assertEqual(something, mock_something, "unpatched")
-
-            with patch('tests._testwith.something') as mock_again:
-                self.assertEqual(something, mock_again, "unpatched")
-
-            self.assertEqual(something, mock_something,
-                             "restored with wrong instance")
-
-        self.assertEqual(something, sentinel.Something, "not restored")
-
-
-    def test_with_statement_imbricated(self):
-        with patch('tests._testwith.something') as mock_something:
-            self.assertEqual(something, mock_something, "unpatched")
-
-            with patch('tests._testwith.something_else') as mock_something_else:
-                self.assertEqual(something_else, mock_something_else,
-                                 "unpatched")
-
-        self.assertEqual(something, sentinel.Something)
-        self.assertEqual(something_else, sentinel.SomethingElse)
-
-
-    def test_dict_context_manager(self):
-        foo = {}
-        with patch.dict(foo, {'a': 'b'}):
-            self.assertEqual(foo, {'a': 'b'})
-        self.assertEqual(foo, {})
-
-        with self.assertRaises(NameError):
-            with patch.dict(foo, {'a': 'b'}):
-                self.assertEqual(foo, {'a': 'b'})
-                raise NameError('Konrad')
-
-        self.assertEqual(foo, {})
-
-
-
-class TestMockOpen(unittest2.TestCase):
-
-    def test_mock_open(self):
-        mock = mock_open()
-        with patch('%s.open' % __name__, mock, create=True) as patched:
-            self.assertIs(patched, mock)
-            open('foo')
-
-        mock.assert_called_once_with('foo')
-
-
-    def test_mock_open_context_manager(self):
-        mock = mock_open()
-        handle = mock.return_value
-        with patch('%s.open' % __name__, mock, create=True):
-            with open('foo') as f:
-                f.read()
-
-        expected_calls = [call('foo'), call().__enter__(), call().read(),
-                          call().__exit__(None, None, None)]
-        self.assertEqual(mock.mock_calls, expected_calls)
-        self.assertIs(f, handle)
-
-
-    def test_explicit_mock(self):
-        mock = MagicMock()
-        mock_open(mock)
-
-        with patch('%s.open' % __name__, mock, create=True) as patched:
-            self.assertIs(patched, mock)
-            open('foo')
-
-        mock.assert_called_once_with('foo')
-
-
-    def test_read_data(self):
-        mock = mock_open(read_data='foo')
-        with patch('%s.open' % __name__, mock, create=True):
-            h = open('bar')
-            result = h.read()
-
-        self.assertEqual(result, 'foo')
-
-
-if __name__ == '__main__':
-    unittest2.main()
diff --git a/branch-1.2/ambari-common/src/test/python/mock/tests/support.py b/branch-1.2/ambari-common/src/test/python/mock/tests/support.py
deleted file mode 100644
index 1b10c34..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/tests/support.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import sys
-
-info = sys.version_info
-if info[:3] >= (3, 2, 0):
-    # for Python 3.2 ordinary unittest is fine
-    import unittest as unittest2
-else:
-    import unittest2
-
-
-try:
-    callable = callable
-except NameError:
-    def callable(obj):
-        return hasattr(obj, '__call__')
-
-
-inPy3k = sys.version_info[0] == 3
-with_available = sys.version_info[:2] >= (2, 5)
-
-
-def is_instance(obj, klass):
-    """Version of is_instance that doesn't access __class__"""
-    return issubclass(type(obj), klass)
-
-
-class SomeClass(object):
-    class_attribute = None
-
-    def wibble(self):
-        pass
-
-
-class X(object):
-    pass
-
-try:
-    next = next
-except NameError:
-    def next(obj):
-        return obj.next()
diff --git a/branch-1.2/ambari-common/src/test/python/mock/tests/support_with.py b/branch-1.2/ambari-common/src/test/python/mock/tests/support_with.py
deleted file mode 100644
index fa28612..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/tests/support_with.py
+++ /dev/null
@@ -1,93 +0,0 @@
-from __future__ import with_statement
-
-import sys
-
-__all__ = ['nested', 'catch_warnings', 'examine_warnings']
-
-
-try:
-    from contextlib import nested
-except ImportError:
-    from contextlib import contextmanager
-    @contextmanager
-    def nested(*managers):
-        exits = []
-        vars = []
-        exc = (None, None, None)
-        try:
-            for mgr in managers:
-                exit = mgr.__exit__
-                enter = mgr.__enter__
-                vars.append(enter())
-                exits.append(exit)
-            yield vars
-        except:
-            exc = sys.exc_info()
-        finally:
-            while exits:
-                exit = exits.pop()
-                try:
-                    if exit(*exc):
-                        exc = (None, None, None)
-                except:
-                    exc = sys.exc_info()
-            if exc != (None, None, None):
-                raise exc[1]
-
-# copied from Python 2.6
-try:
-    from warnings import catch_warnings
-except ImportError:
-    class catch_warnings(object):
-        def __init__(self, record=False, module=None):
-            self._record = record
-            self._module = sys.modules['warnings']
-            self._entered = False
-
-        def __repr__(self):
-            args = []
-            if self._record:
-                args.append("record=True")
-            name = type(self).__name__
-            return "%s(%s)" % (name, ", ".join(args))
-
-        def __enter__(self):
-            if self._entered:
-                raise RuntimeError("Cannot enter %r twice" % self)
-            self._entered = True
-            self._filters = self._module.filters
-            self._module.filters = self._filters[:]
-            self._showwarning = self._module.showwarning
-            if self._record:
-                log = []
-                def showwarning(*args, **kwargs):
-                    log.append(WarningMessage(*args, **kwargs))
-                self._module.showwarning = showwarning
-                return log
-            else:
-                return None
-
-        def __exit__(self, *exc_info):
-            if not self._entered:
-                raise RuntimeError("Cannot exit %r without entering first" % self)
-            self._module.filters = self._filters
-            self._module.showwarning = self._showwarning
-
-    class WarningMessage(object):
-        _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
-                            "line")
-        def __init__(self, message, category, filename, lineno, file=None,
-                        line=None):
-            local_values = locals()
-            for attr in self._WARNING_DETAILS:
-                setattr(self, attr, local_values[attr])
-            self._category_name = None
-            if category.__name__:
-                self._category_name = category.__name__
-
-
-def examine_warnings(func):
-    def wrapper():
-        with catch_warnings(record=True) as ws:
-            func(ws)
-    return wrapper
diff --git a/branch-1.2/ambari-common/src/test/python/mock/tests/testcallable.py b/branch-1.2/ambari-common/src/test/python/mock/tests/testcallable.py
deleted file mode 100644
index f7dcd5e..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/tests/testcallable.py
+++ /dev/null
@@ -1,158 +0,0 @@
-# Copyright (C) 2007-2012 Michael Foord & the mock team
-# E-mail: fuzzyman AT voidspace DOT org DOT uk
-# http://www.voidspace.org.uk/python/mock/
-
-from tests.support import is_instance, unittest2, X, SomeClass
-
-from mock import (
-    Mock, MagicMock, NonCallableMagicMock,
-    NonCallableMock, patch, create_autospec,
-    CallableMixin
-)
-
-
-
-class TestCallable(unittest2.TestCase):
-
-    def assertNotCallable(self, mock):
-        self.assertTrue(is_instance(mock, NonCallableMagicMock))
-        self.assertFalse(is_instance(mock, CallableMixin))
-
-
-    def test_non_callable(self):
-        for mock in NonCallableMagicMock(), NonCallableMock():
-            self.assertRaises(TypeError, mock)
-            self.assertFalse(hasattr(mock, '__call__'))
-            self.assertIn(mock.__class__.__name__, repr(mock))
-
-
-    def test_heirarchy(self):
-        self.assertTrue(issubclass(MagicMock, Mock))
-        self.assertTrue(issubclass(NonCallableMagicMock, NonCallableMock))
-
-
-    def test_attributes(self):
-        one = NonCallableMock()
-        self.assertTrue(issubclass(type(one.one), Mock))
-
-        two = NonCallableMagicMock()
-        self.assertTrue(issubclass(type(two.two), MagicMock))
-
-
-    def test_subclasses(self):
-        class MockSub(Mock):
-            pass
-
-        one = MockSub()
-        self.assertTrue(issubclass(type(one.one), MockSub))
-
-        class MagicSub(MagicMock):
-            pass
-
-        two = MagicSub()
-        self.assertTrue(issubclass(type(two.two), MagicSub))
-
-
-    def test_patch_spec(self):
-        patcher = patch('%s.X' % __name__, spec=True)
-        mock = patcher.start()
-        self.addCleanup(patcher.stop)
-
-        instance = mock()
-        mock.assert_called_once_with()
-
-        self.assertNotCallable(instance)
-        self.assertRaises(TypeError, instance)
-
-
-    def test_patch_spec_set(self):
-        patcher = patch('%s.X' % __name__, spec_set=True)
-        mock = patcher.start()
-        self.addCleanup(patcher.stop)
-
-        instance = mock()
-        mock.assert_called_once_with()
-
-        self.assertNotCallable(instance)
-        self.assertRaises(TypeError, instance)
-
-
-    def test_patch_spec_instance(self):
-        patcher = patch('%s.X' % __name__, spec=X())
-        mock = patcher.start()
-        self.addCleanup(patcher.stop)
-
-        self.assertNotCallable(mock)
-        self.assertRaises(TypeError, mock)
-
-
-    def test_patch_spec_set_instance(self):
-        patcher = patch('%s.X' % __name__, spec_set=X())
-        mock = patcher.start()
-        self.addCleanup(patcher.stop)
-
-        self.assertNotCallable(mock)
-        self.assertRaises(TypeError, mock)
-
-
-    def test_patch_spec_callable_class(self):
-        class CallableX(X):
-            def __call__(self):
-                pass
-
-        class Sub(CallableX):
-            pass
-
-        class Multi(SomeClass, Sub):
-            pass
-
-        class OldStyle:
-            def __call__(self):
-                pass
-
-        class OldStyleSub(OldStyle):
-            pass
-
-        for arg in 'spec', 'spec_set':
-            for Klass in CallableX, Sub, Multi, OldStyle, OldStyleSub:
-                patcher = patch('%s.X' % __name__, **{arg: Klass})
-                mock = patcher.start()
-
-                try:
-                    instance = mock()
-                    mock.assert_called_once_with()
-
-                    self.assertTrue(is_instance(instance, MagicMock))
-                    # inherited spec
-                    self.assertRaises(AttributeError, getattr, instance,
-                                      'foobarbaz')
-
-                    result = instance()
-                    # instance is callable, result has no spec
-                    instance.assert_called_once_with()
-
-                    result(3, 2, 1)
-                    result.assert_called_once_with(3, 2, 1)
-                    result.foo(3, 2, 1)
-                    result.foo.assert_called_once_with(3, 2, 1)
-                finally:
-                    patcher.stop()
-
-
-    def test_create_autopsec(self):
-        mock = create_autospec(X)
-        instance = mock()
-        self.assertRaises(TypeError, instance)
-
-        mock = create_autospec(X())
-        self.assertRaises(TypeError, mock)
-
-
-    def test_create_autospec_instance(self):
-        mock = create_autospec(SomeClass, instance=True)
-
-        self.assertRaises(TypeError, mock)
-        mock.wibble()
-        mock.wibble.assert_called_once_with()
-
-        self.assertRaises(TypeError, mock.wibble, 'some',  'args')
diff --git a/branch-1.2/ambari-common/src/test/python/mock/tests/testhelpers.py b/branch-1.2/ambari-common/src/test/python/mock/tests/testhelpers.py
deleted file mode 100644
index e788da8..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/tests/testhelpers.py
+++ /dev/null
@@ -1,940 +0,0 @@
-# Copyright (C) 2007-2012 Michael Foord & the mock team
-# E-mail: fuzzyman AT voidspace DOT org DOT uk
-# http://www.voidspace.org.uk/python/mock/
-
-from tests.support import unittest2, inPy3k
-
-from mock import (
-    call, _Call, create_autospec, MagicMock,
-    Mock, ANY, _CallList, patch, PropertyMock
-)
-
-from datetime import datetime
-
-class SomeClass(object):
-    def one(self, a, b):
-        pass
-    def two(self):
-        pass
-    def three(self, a=None):
-        pass
-
-
-
-class AnyTest(unittest2.TestCase):
-
-    def test_any(self):
-        self.assertEqual(ANY, object())
-
-        mock = Mock()
-        mock(ANY)
-        mock.assert_called_with(ANY)
-
-        mock = Mock()
-        mock(foo=ANY)
-        mock.assert_called_with(foo=ANY)
-
-    def test_repr(self):
-        self.assertEqual(repr(ANY), '<ANY>')
-        self.assertEqual(str(ANY), '<ANY>')
-
-
-    def test_any_and_datetime(self):
-        mock = Mock()
-        mock(datetime.now(), foo=datetime.now())
-
-        mock.assert_called_with(ANY, foo=ANY)
-
-
-    def test_any_mock_calls_comparison_order(self):
-        mock = Mock()
-        d = datetime.now()
-        class Foo(object):
-            def __eq__(self, other):
-                return False
-            def __ne__(self, other):
-                return True
-
-        for d in datetime.now(), Foo():
-            mock.reset_mock()
-
-            mock(d, foo=d, bar=d)
-            mock.method(d, zinga=d, alpha=d)
-            mock().method(a1=d, z99=d)
-
-            expected = [
-                call(ANY, foo=ANY, bar=ANY),
-                call.method(ANY, zinga=ANY, alpha=ANY),
-                call(), call().method(a1=ANY, z99=ANY)
-            ]
-            self.assertEqual(expected, mock.mock_calls)
-            self.assertEqual(mock.mock_calls, expected)
-
-
-
-class CallTest(unittest2.TestCase):
-
-    def test_call_with_call(self):
-        kall = _Call()
-        self.assertEqual(kall, _Call())
-        self.assertEqual(kall, _Call(('',)))
-        self.assertEqual(kall, _Call(((),)))
-        self.assertEqual(kall, _Call(({},)))
-        self.assertEqual(kall, _Call(('', ())))
-        self.assertEqual(kall, _Call(('', {})))
-        self.assertEqual(kall, _Call(('', (), {})))
-        self.assertEqual(kall, _Call(('foo',)))
-        self.assertEqual(kall, _Call(('bar', ())))
-        self.assertEqual(kall, _Call(('baz', {})))
-        self.assertEqual(kall, _Call(('spam', (), {})))
-
-        kall = _Call(((1, 2, 3),))
-        self.assertEqual(kall, _Call(((1, 2, 3),)))
-        self.assertEqual(kall, _Call(('', (1, 2, 3))))
-        self.assertEqual(kall, _Call(((1, 2, 3), {})))
-        self.assertEqual(kall, _Call(('', (1, 2, 3), {})))
-
-        kall = _Call(((1, 2, 4),))
-        self.assertNotEqual(kall, _Call(('', (1, 2, 3))))
-        self.assertNotEqual(kall, _Call(('', (1, 2, 3), {})))
-
-        kall = _Call(('foo', (1, 2, 4),))
-        self.assertNotEqual(kall, _Call(('', (1, 2, 4))))
-        self.assertNotEqual(kall, _Call(('', (1, 2, 4), {})))
-        self.assertNotEqual(kall, _Call(('bar', (1, 2, 4))))
-        self.assertNotEqual(kall, _Call(('bar', (1, 2, 4), {})))
-
-        kall = _Call(({'a': 3},))
-        self.assertEqual(kall, _Call(('', (), {'a': 3})))
-        self.assertEqual(kall, _Call(('', {'a': 3})))
-        self.assertEqual(kall, _Call(((), {'a': 3})))
-        self.assertEqual(kall, _Call(({'a': 3},)))
-
-
-    def test_empty__Call(self):
-        args = _Call()
-
-        self.assertEqual(args, ())
-        self.assertEqual(args, ('foo',))
-        self.assertEqual(args, ((),))
-        self.assertEqual(args, ('foo', ()))
-        self.assertEqual(args, ('foo',(), {}))
-        self.assertEqual(args, ('foo', {}))
-        self.assertEqual(args, ({},))
-
-
-    def test_named_empty_call(self):
-        args = _Call(('foo', (), {}))
-
-        self.assertEqual(args, ('foo',))
-        self.assertEqual(args, ('foo', ()))
-        self.assertEqual(args, ('foo',(), {}))
-        self.assertEqual(args, ('foo', {}))
-
-        self.assertNotEqual(args, ((),))
-        self.assertNotEqual(args, ())
-        self.assertNotEqual(args, ({},))
-        self.assertNotEqual(args, ('bar',))
-        self.assertNotEqual(args, ('bar', ()))
-        self.assertNotEqual(args, ('bar', {}))
-
-
-    def test_call_with_args(self):
-        args = _Call(((1, 2, 3), {}))
-
-        self.assertEqual(args, ((1, 2, 3),))
-        self.assertEqual(args, ('foo', (1, 2, 3)))
-        self.assertEqual(args, ('foo', (1, 2, 3), {}))
-        self.assertEqual(args, ((1, 2, 3), {}))
-
-
-    def test_named_call_with_args(self):
-        args = _Call(('foo', (1, 2, 3), {}))
-
-        self.assertEqual(args, ('foo', (1, 2, 3)))
-        self.assertEqual(args, ('foo', (1, 2, 3), {}))
-
-        self.assertNotEqual(args, ((1, 2, 3),))
-        self.assertNotEqual(args, ((1, 2, 3), {}))
-
-
-    def test_call_with_kwargs(self):
-        args = _Call(((), dict(a=3, b=4)))
-
-        self.assertEqual(args, (dict(a=3, b=4),))
-        self.assertEqual(args, ('foo', dict(a=3, b=4)))
-        self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
-        self.assertEqual(args, ((), dict(a=3, b=4)))
-
-
-    def test_named_call_with_kwargs(self):
-        args = _Call(('foo', (), dict(a=3, b=4)))
-
-        self.assertEqual(args, ('foo', dict(a=3, b=4)))
-        self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
-
-        self.assertNotEqual(args, (dict(a=3, b=4),))
-        self.assertNotEqual(args, ((), dict(a=3, b=4)))
-
-
-    def test_call_with_args_call_empty_name(self):
-        args = _Call(((1, 2, 3), {}))
-        self.assertEqual(args, call(1, 2, 3))
-        self.assertEqual(call(1, 2, 3), args)
-        self.assertTrue(call(1, 2, 3) in [args])
-
-
-    def test_call_ne(self):
-        self.assertNotEqual(_Call(((1, 2, 3),)), call(1, 2))
-        self.assertFalse(_Call(((1, 2, 3),)) != call(1, 2, 3))
-        self.assertTrue(_Call(((1, 2), {})) != call(1, 2, 3))
-
-
-    def test_call_non_tuples(self):
-        kall = _Call(((1, 2, 3),))
-        for value in 1, None, self, int:
-            self.assertNotEqual(kall, value)
-            self.assertFalse(kall == value)
-
-
-    def test_repr(self):
-        self.assertEqual(repr(_Call()), 'call()')
-        self.assertEqual(repr(_Call(('foo',))), 'call.foo()')
-
-        self.assertEqual(repr(_Call(((1, 2, 3), {'a': 'b'}))),
-                         "call(1, 2, 3, a='b')")
-        self.assertEqual(repr(_Call(('bar', (1, 2, 3), {'a': 'b'}))),
-                         "call.bar(1, 2, 3, a='b')")
-
-        self.assertEqual(repr(call), 'call')
-        self.assertEqual(str(call), 'call')
-
-        self.assertEqual(repr(call()), 'call()')
-        self.assertEqual(repr(call(1)), 'call(1)')
-        self.assertEqual(repr(call(zz='thing')), "call(zz='thing')")
-
-        self.assertEqual(repr(call().foo), 'call().foo')
-        self.assertEqual(repr(call(1).foo.bar(a=3).bing),
-                         'call().foo.bar().bing')
-        self.assertEqual(
-            repr(call().foo(1, 2, a=3)),
-            "call().foo(1, 2, a=3)"
-        )
-        self.assertEqual(repr(call()()), "call()()")
-        self.assertEqual(repr(call(1)(2)), "call()(2)")
-        self.assertEqual(
-            repr(call()().bar().baz.beep(1)),
-            "call()().bar().baz.beep(1)"
-        )
-
-
-    def test_call(self):
-        self.assertEqual(call(), ('', (), {}))
-        self.assertEqual(call('foo', 'bar', one=3, two=4),
-                         ('', ('foo', 'bar'), {'one': 3, 'two': 4}))
-
-        mock = Mock()
-        mock(1, 2, 3)
-        mock(a=3, b=6)
-        self.assertEqual(mock.call_args_list,
-                         [call(1, 2, 3), call(a=3, b=6)])
-
-    def test_attribute_call(self):
-        self.assertEqual(call.foo(1), ('foo', (1,), {}))
-        self.assertEqual(call.bar.baz(fish='eggs'),
-                         ('bar.baz', (), {'fish': 'eggs'}))
-
-        mock = Mock()
-        mock.foo(1, 2 ,3)
-        mock.bar.baz(a=3, b=6)
-        self.assertEqual(mock.method_calls,
-                         [call.foo(1, 2, 3), call.bar.baz(a=3, b=6)])
-
-
-    def test_extended_call(self):
-        result = call(1).foo(2).bar(3, a=4)
-        self.assertEqual(result, ('().foo().bar', (3,), dict(a=4)))
-
-        mock = MagicMock()
-        mock(1, 2, a=3, b=4)
-        self.assertEqual(mock.call_args, call(1, 2, a=3, b=4))
-        self.assertNotEqual(mock.call_args, call(1, 2, 3))
-
-        self.assertEqual(mock.call_args_list, [call(1, 2, a=3, b=4)])
-        self.assertEqual(mock.mock_calls, [call(1, 2, a=3, b=4)])
-
-        mock = MagicMock()
-        mock.foo(1).bar()().baz.beep(a=6)
-
-        last_call = call.foo(1).bar()().baz.beep(a=6)
-        self.assertEqual(mock.mock_calls[-1], last_call)
-        self.assertEqual(mock.mock_calls, last_call.call_list())
-
-
-    def test_call_list(self):
-        mock = MagicMock()
-        mock(1)
-        self.assertEqual(call(1).call_list(), mock.mock_calls)
-
-        mock = MagicMock()
-        mock(1).method(2)
-        self.assertEqual(call(1).method(2).call_list(),
-                         mock.mock_calls)
-
-        mock = MagicMock()
-        mock(1).method(2)(3)
-        self.assertEqual(call(1).method(2)(3).call_list(),
-                         mock.mock_calls)
-
-        mock = MagicMock()
-        int(mock(1).method(2)(3).foo.bar.baz(4)(5))
-        kall = call(1).method(2)(3).foo.bar.baz(4)(5).__int__()
-        self.assertEqual(kall.call_list(), mock.mock_calls)
-
-
-    def test_call_any(self):
-        self.assertEqual(call, ANY)
-
-        m = MagicMock()
-        int(m)
-        self.assertEqual(m.mock_calls, [ANY])
-        self.assertEqual([ANY], m.mock_calls)
-
-
-    def test_two_args_call(self):
-        args = _Call(((1, 2), {'a': 3}), two=True)
-        self.assertEqual(len(args), 2)
-        self.assertEqual(args[0], (1, 2))
-        self.assertEqual(args[1], {'a': 3})
-
-        other_args = _Call(((1, 2), {'a': 3}))
-        self.assertEqual(args, other_args)
-
-
-class SpecSignatureTest(unittest2.TestCase):
-
-    def _check_someclass_mock(self, mock):
-        self.assertRaises(AttributeError, getattr, mock, 'foo')
-        mock.one(1, 2)
-        mock.one.assert_called_with(1, 2)
-        self.assertRaises(AssertionError,
-                          mock.one.assert_called_with, 3, 4)
-        self.assertRaises(TypeError, mock.one, 1)
-
-        mock.two()
-        mock.two.assert_called_with()
-        self.assertRaises(AssertionError,
-                          mock.two.assert_called_with, 3)
-        self.assertRaises(TypeError, mock.two, 1)
-
-        mock.three()
-        mock.three.assert_called_with()
-        self.assertRaises(AssertionError,
-                          mock.three.assert_called_with, 3)
-        self.assertRaises(TypeError, mock.three, 3, 2)
-
-        mock.three(1)
-        mock.three.assert_called_with(1)
-
-        mock.three(a=1)
-        mock.three.assert_called_with(a=1)
-
-
-    def test_basic(self):
-        for spec in (SomeClass, SomeClass()):
-            mock = create_autospec(spec)
-            self._check_someclass_mock(mock)
-
-
-    def test_create_autospec_return_value(self):
-        def f():
-            pass
-        mock = create_autospec(f, return_value='foo')
-        self.assertEqual(mock(), 'foo')
-
-        class Foo(object):
-            pass
-
-        mock = create_autospec(Foo, return_value='foo')
-        self.assertEqual(mock(), 'foo')
-
-
-    def test_autospec_reset_mock(self):
-        m = create_autospec(int)
-        int(m)
-        m.reset_mock()
-        self.assertEqual(m.__int__.call_count, 0)
-
-
-    def test_mocking_unbound_methods(self):
-        class Foo(object):
-            def foo(self, foo):
-                pass
-        p = patch.object(Foo, 'foo')
-        mock_foo = p.start()
-        Foo().foo(1)
-
-        mock_foo.assert_called_with(1)
-
-
-    @unittest2.expectedFailure
-    def test_create_autospec_unbound_methods(self):
-        # see issue 128
-        class Foo(object):
-            def foo(self):
-                pass
-
-        klass = create_autospec(Foo)
-        instance = klass()
-        self.assertRaises(TypeError, instance.foo, 1)
-
-        # Note: no type checking on the "self" parameter
-        klass.foo(1)
-        klass.foo.assert_called_with(1)
-        self.assertRaises(TypeError, klass.foo)
-
-
-    def test_create_autospec_keyword_arguments(self):
-        class Foo(object):
-            a = 3
-        m = create_autospec(Foo, a='3')
-        self.assertEqual(m.a, '3')
-
-    @unittest2.skipUnless(inPy3k, "Keyword only arguments Python 3 specific")
-    def test_create_autospec_keyword_only_arguments(self):
-        func_def = "def foo(a, *, b=None):\n    pass\n"
-        namespace = {}
-        exec (func_def, namespace)
-        foo = namespace['foo']
-
-        m = create_autospec(foo)
-        m(1)
-        m.assert_called_with(1)
-        self.assertRaises(TypeError, m, 1, 2)
-
-        m(2, b=3)
-        m.assert_called_with(2, b=3)
-
-    def test_function_as_instance_attribute(self):
-        obj = SomeClass()
-        def f(a):
-            pass
-        obj.f = f
-
-        mock = create_autospec(obj)
-        mock.f('bing')
-        mock.f.assert_called_with('bing')
-
-
-    def test_spec_as_list(self):
-        # because spec as a list of strings in the mock constructor means
-        # something very different we treat a list instance as the type.
-        mock = create_autospec([])
-        mock.append('foo')
-        mock.append.assert_called_with('foo')
-
-        self.assertRaises(AttributeError, getattr, mock, 'foo')
-
-        class Foo(object):
-            foo = []
-
-        mock = create_autospec(Foo)
-        mock.foo.append(3)
-        mock.foo.append.assert_called_with(3)
-        self.assertRaises(AttributeError, getattr, mock.foo, 'foo')
-
-
-    def test_attributes(self):
-        class Sub(SomeClass):
-            attr = SomeClass()
-
-        sub_mock = create_autospec(Sub)
-
-        for mock in (sub_mock, sub_mock.attr):
-            self._check_someclass_mock(mock)
-
-
-    def test_builtin_functions_types(self):
-        # we could replace builtin functions / methods with a function
-        # with *args / **kwargs signature. Using the builtin method type
-        # as a spec seems to work fairly well though.
-        class BuiltinSubclass(list):
-            def bar(self, arg):
-                pass
-            sorted = sorted
-            attr = {}
-
-        mock = create_autospec(BuiltinSubclass)
-        mock.append(3)
-        mock.append.assert_called_with(3)
-        self.assertRaises(AttributeError, getattr, mock.append, 'foo')
-
-        mock.bar('foo')
-        mock.bar.assert_called_with('foo')
-        self.assertRaises(TypeError, mock.bar, 'foo', 'bar')
-        self.assertRaises(AttributeError, getattr, mock.bar, 'foo')
-
-        mock.sorted([1, 2])
-        mock.sorted.assert_called_with([1, 2])
-        self.assertRaises(AttributeError, getattr, mock.sorted, 'foo')
-
-        mock.attr.pop(3)
-        mock.attr.pop.assert_called_with(3)
-        self.assertRaises(AttributeError, getattr, mock.attr, 'foo')
-
-
-    def test_method_calls(self):
-        class Sub(SomeClass):
-            attr = SomeClass()
-
-        mock = create_autospec(Sub)
-        mock.one(1, 2)
-        mock.two()
-        mock.three(3)
-
-        expected = [call.one(1, 2), call.two(), call.three(3)]
-        self.assertEqual(mock.method_calls, expected)
-
-        mock.attr.one(1, 2)
-        mock.attr.two()
-        mock.attr.three(3)
-
-        expected.extend(
-            [call.attr.one(1, 2), call.attr.two(), call.attr.three(3)]
-        )
-        self.assertEqual(mock.method_calls, expected)
-
-
-    def test_magic_methods(self):
-        class BuiltinSubclass(list):
-            attr = {}
-
-        mock = create_autospec(BuiltinSubclass)
-        self.assertEqual(list(mock), [])
-        self.assertRaises(TypeError, int, mock)
-        self.assertRaises(TypeError, int, mock.attr)
-        self.assertEqual(list(mock), [])
-
-        self.assertIsInstance(mock['foo'], MagicMock)
-        self.assertIsInstance(mock.attr['foo'], MagicMock)
-
-
-    def test_spec_set(self):
-        class Sub(SomeClass):
-            attr = SomeClass()
-
-        for spec in (Sub, Sub()):
-            mock = create_autospec(spec, spec_set=True)
-            self._check_someclass_mock(mock)
-
-            self.assertRaises(AttributeError, setattr, mock, 'foo', 'bar')
-            self.assertRaises(AttributeError, setattr, mock.attr, 'foo', 'bar')
-
-
-    def test_descriptors(self):
-        class Foo(object):
-            @classmethod
-            def f(cls, a, b):
-                pass
-            @staticmethod
-            def g(a, b):
-                pass
-
-        class Bar(Foo):
-            pass
-
-        class Baz(SomeClass, Bar):
-            pass
-
-        for spec in (Foo, Foo(), Bar, Bar(), Baz, Baz()):
-            mock = create_autospec(spec)
-            mock.f(1, 2)
-            mock.f.assert_called_once_with(1, 2)
-
-            mock.g(3, 4)
-            mock.g.assert_called_once_with(3, 4)
-
-
-    @unittest2.skipIf(inPy3k, "No old style classes in Python 3")
-    def test_old_style_classes(self):
-        class Foo:
-            def f(self, a, b):
-                pass
-
-        class Bar(Foo):
-            g = Foo()
-
-        for spec in (Foo, Foo(), Bar, Bar()):
-            mock = create_autospec(spec)
-            mock.f(1, 2)
-            mock.f.assert_called_once_with(1, 2)
-
-            self.assertRaises(AttributeError, getattr, mock, 'foo')
-            self.assertRaises(AttributeError, getattr, mock.f, 'foo')
-
-        mock.g.f(1, 2)
-        mock.g.f.assert_called_once_with(1, 2)
-        self.assertRaises(AttributeError, getattr, mock.g, 'foo')
-
-
-    def test_recursive(self):
-        class A(object):
-            def a(self):
-                pass
-            foo = 'foo bar baz'
-            bar = foo
-
-        A.B = A
-        mock = create_autospec(A)
-
-        mock()
-        self.assertFalse(mock.B.called)
-
-        mock.a()
-        mock.B.a()
-        self.assertEqual(mock.method_calls, [call.a(), call.B.a()])
-
-        self.assertIs(A.foo, A.bar)
-        self.assertIsNot(mock.foo, mock.bar)
-        mock.foo.lower()
-        self.assertRaises(AssertionError, mock.bar.lower.assert_called_with)
-
-
-    def test_spec_inheritance_for_classes(self):
-        class Foo(object):
-            def a(self):
-                pass
-            class Bar(object):
-                def f(self):
-                    pass
-
-        class_mock = create_autospec(Foo)
-
-        self.assertIsNot(class_mock, class_mock())
-
-        for this_mock in class_mock, class_mock():
-            this_mock.a()
-            this_mock.a.assert_called_with()
-            self.assertRaises(TypeError, this_mock.a, 'foo')
-            self.assertRaises(AttributeError, getattr, this_mock, 'b')
-
-        instance_mock = create_autospec(Foo())
-        instance_mock.a()
-        instance_mock.a.assert_called_with()
-        self.assertRaises(TypeError, instance_mock.a, 'foo')
-        self.assertRaises(AttributeError, getattr, instance_mock, 'b')
-
-        # The return value isn't isn't callable
-        self.assertRaises(TypeError, instance_mock)
-
-        instance_mock.Bar.f()
-        instance_mock.Bar.f.assert_called_with()
-        self.assertRaises(AttributeError, getattr, instance_mock.Bar, 'g')
-
-        instance_mock.Bar().f()
-        instance_mock.Bar().f.assert_called_with()
-        self.assertRaises(AttributeError, getattr, instance_mock.Bar(), 'g')
-
-
-    def test_inherit(self):
-        class Foo(object):
-            a = 3
-
-        Foo.Foo = Foo
-
-        # class
-        mock = create_autospec(Foo)
-        instance = mock()
-        self.assertRaises(AttributeError, getattr, instance, 'b')
-
-        attr_instance = mock.Foo()
-        self.assertRaises(AttributeError, getattr, attr_instance, 'b')
-
-        # instance
-        mock = create_autospec(Foo())
-        self.assertRaises(AttributeError, getattr, mock, 'b')
-        self.assertRaises(TypeError, mock)
-
-        # attribute instance
-        call_result = mock.Foo()
-        self.assertRaises(AttributeError, getattr, call_result, 'b')
-
-
-    def test_builtins(self):
-        # used to fail with infinite recursion
-        create_autospec(1)
-
-        create_autospec(int)
-        create_autospec('foo')
-        create_autospec(str)
-        create_autospec({})
-        create_autospec(dict)
-        create_autospec([])
-        create_autospec(list)
-        create_autospec(set())
-        create_autospec(set)
-        create_autospec(1.0)
-        create_autospec(float)
-        create_autospec(1j)
-        create_autospec(complex)
-        create_autospec(False)
-        create_autospec(True)
-
-
-    def test_function(self):
-        def f(a, b):
-            pass
-
-        mock = create_autospec(f)
-        self.assertRaises(TypeError, mock)
-        mock(1, 2)
-        mock.assert_called_with(1, 2)
-
-        f.f = f
-        mock = create_autospec(f)
-        self.assertRaises(TypeError, mock.f)
-        mock.f(3, 4)
-        mock.f.assert_called_with(3, 4)
-
-
-    def test_skip_attributeerrors(self):
-        class Raiser(object):
-            def __get__(self, obj, type=None):
-                if obj is None:
-                    raise AttributeError('Can only be accessed via an instance')
-
-        class RaiserClass(object):
-            raiser = Raiser()
-
-            @staticmethod
-            def existing(a, b):
-                return a + b
-
-        s = create_autospec(RaiserClass)
-        self.assertRaises(TypeError, lambda x: s.existing(1, 2, 3))
-        s.existing(1, 2)
-        self.assertRaises(AttributeError, lambda: s.nonexisting)
-
-        # check we can fetch the raiser attribute and it has no spec
-        obj = s.raiser
-        obj.foo, obj.bar
-
-
-    def test_signature_class(self):
-        class Foo(object):
-            def __init__(self, a, b=3):
-                pass
-
-        mock = create_autospec(Foo)
-
-        self.assertRaises(TypeError, mock)
-        mock(1)
-        mock.assert_called_once_with(1)
-
-        mock(4, 5)
-        mock.assert_called_with(4, 5)
-
-
-    @unittest2.skipIf(inPy3k, 'no old style classes in Python 3')
-    def test_signature_old_style_class(self):
-        class Foo:
-            def __init__(self, a, b=3):
-                pass
-
-        mock = create_autospec(Foo)
-
-        self.assertRaises(TypeError, mock)
-        mock(1)
-        mock.assert_called_once_with(1)
-
-        mock(4, 5)
-        mock.assert_called_with(4, 5)
-
-
-    def test_class_with_no_init(self):
-        # this used to raise an exception
-        # due to trying to get a signature from object.__init__
-        class Foo(object):
-            pass
-        create_autospec(Foo)
-
-
-    @unittest2.skipIf(inPy3k, 'no old style classes in Python 3')
-    def test_old_style_class_with_no_init(self):
-        # this used to raise an exception
-        # due to Foo.__init__ raising an AttributeError
-        class Foo:
-            pass
-        create_autospec(Foo)
-
-
-    def test_signature_callable(self):
-        class Callable(object):
-            def __init__(self):
-                pass
-            def __call__(self, a):
-                pass
-
-        mock = create_autospec(Callable)
-        mock()
-        mock.assert_called_once_with()
-        self.assertRaises(TypeError, mock, 'a')
-
-        instance = mock()
-        self.assertRaises(TypeError, instance)
-        instance(a='a')
-        instance.assert_called_once_with(a='a')
-        instance('a')
-        instance.assert_called_with('a')
-
-        mock = create_autospec(Callable())
-        mock(a='a')
-        mock.assert_called_once_with(a='a')
-        self.assertRaises(TypeError, mock)
-        mock('a')
-        mock.assert_called_with('a')
-
-
-    def test_signature_noncallable(self):
-        class NonCallable(object):
-            def __init__(self):
-                pass
-
-        mock = create_autospec(NonCallable)
-        instance = mock()
-        mock.assert_called_once_with()
-        self.assertRaises(TypeError, mock, 'a')
-        self.assertRaises(TypeError, instance)
-        self.assertRaises(TypeError, instance, 'a')
-
-        mock = create_autospec(NonCallable())
-        self.assertRaises(TypeError, mock)
-        self.assertRaises(TypeError, mock, 'a')
-
-
-    def test_create_autospec_none(self):
-        class Foo(object):
-            bar = None
-
-        mock = create_autospec(Foo)
-        none = mock.bar
-        self.assertNotIsInstance(none, type(None))
-
-        none.foo()
-        none.foo.assert_called_once_with()
-
-
-    def test_autospec_functions_with_self_in_odd_place(self):
-        class Foo(object):
-            def f(a, self):
-                pass
-
-        a = create_autospec(Foo)
-        a.f(self=10)
-        a.f.assert_called_with(self=10)
-
-
-    def test_autospec_property(self):
-        class Foo(object):
-            @property
-            def foo(self):
-                return 3
-
-        foo = create_autospec(Foo)
-        mock_property = foo.foo
-
-        # no spec on properties
-        self.assertTrue(isinstance(mock_property, MagicMock))
-        mock_property(1, 2, 3)
-        mock_property.abc(4, 5, 6)
-        mock_property.assert_called_once_with(1, 2, 3)
-        mock_property.abc.assert_called_once_with(4, 5, 6)
-
-
-    def test_autospec_slots(self):
-        class Foo(object):
-            __slots__ = ['a']
-
-        foo = create_autospec(Foo)
-        mock_slot = foo.a
-
-        # no spec on slots
-        mock_slot(1, 2, 3)
-        mock_slot.abc(4, 5, 6)
-        mock_slot.assert_called_once_with(1, 2, 3)
-        mock_slot.abc.assert_called_once_with(4, 5, 6)
-
-
-class TestCallList(unittest2.TestCase):
-
-    def test_args_list_contains_call_list(self):
-        mock = Mock()
-        self.assertIsInstance(mock.call_args_list, _CallList)
-
-        mock(1, 2)
-        mock(a=3)
-        mock(3, 4)
-        mock(b=6)
-
-        for kall in call(1, 2), call(a=3), call(3, 4), call(b=6):
-            self.assertTrue(kall in mock.call_args_list)
-
-        calls = [call(a=3), call(3, 4)]
-        self.assertTrue(calls in mock.call_args_list)
-        calls = [call(1, 2), call(a=3)]
-        self.assertTrue(calls in mock.call_args_list)
-        calls = [call(3, 4), call(b=6)]
-        self.assertTrue(calls in mock.call_args_list)
-        calls = [call(3, 4)]
-        self.assertTrue(calls in mock.call_args_list)
-
-        self.assertFalse(call('fish') in mock.call_args_list)
-        self.assertFalse([call('fish')] in mock.call_args_list)
-
-
-    def test_call_list_str(self):
-        mock = Mock()
-        mock(1, 2)
-        mock.foo(a=3)
-        mock.foo.bar().baz('fish', cat='dog')
-
-        expected = (
-            "[call(1, 2),\n"
-            " call.foo(a=3),\n"
-            " call.foo.bar(),\n"
-            " call.foo.bar().baz('fish', cat='dog')]"
-        )
-        self.assertEqual(str(mock.mock_calls), expected)
-
-
-    def test_propertymock(self):
-        p = patch('%s.SomeClass.one' % __name__, new_callable=PropertyMock)
-        mock = p.start()
-        try:
-            SomeClass.one
-            mock.assert_called_once_with()
-
-            s = SomeClass()
-            s.one
-            mock.assert_called_with()
-            self.assertEqual(mock.mock_calls, [call(), call()])
-
-            s.one = 3
-            self.assertEqual(mock.mock_calls, [call(), call(), call(3)])
-        finally:
-            p.stop()
-
-
-    def test_propertymock_returnvalue(self):
-        m = MagicMock()
-        p = PropertyMock()
-        type(m).foo = p
-
-        returned = m.foo
-        p.assert_called_once_with()
-        self.assertIsInstance(returned, MagicMock)
-        self.assertNotIsInstance(returned, PropertyMock)
-
-
-if __name__ == '__main__':
-    unittest2.main()
diff --git a/branch-1.2/ambari-common/src/test/python/mock/tests/testmagicmethods.py b/branch-1.2/ambari-common/src/test/python/mock/tests/testmagicmethods.py
deleted file mode 100644
index ef0f16d..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/tests/testmagicmethods.py
+++ /dev/null
@@ -1,486 +0,0 @@
-# Copyright (C) 2007-2012 Michael Foord & the mock team
-# E-mail: fuzzyman AT voidspace DOT org DOT uk
-# http://www.voidspace.org.uk/python/mock/
-
-from tests.support import unittest2, inPy3k
-
-try:
-    unicode
-except NameError:
-    # Python 3
-    unicode = str
-    long = int
-
-import inspect
-import sys
-from mock import Mock, MagicMock, _magics
-
-
-
-class TestMockingMagicMethods(unittest2.TestCase):
-
-    def test_deleting_magic_methods(self):
-        mock = Mock()
-        self.assertFalse(hasattr(mock, '__getitem__'))
-
-        mock.__getitem__ = Mock()
-        self.assertTrue(hasattr(mock, '__getitem__'))
-
-        del mock.__getitem__
-        self.assertFalse(hasattr(mock, '__getitem__'))
-
-
-    def test_magicmock_del(self):
-        mock = MagicMock()
-        # before using getitem
-        del mock.__getitem__
-        self.assertRaises(TypeError, lambda: mock['foo'])
-
-        mock = MagicMock()
-        # this time use it first
-        mock['foo']
-        del mock.__getitem__
-        self.assertRaises(TypeError, lambda: mock['foo'])
-
-
-    def test_magic_method_wrapping(self):
-        mock = Mock()
-        def f(self, name):
-            return self, 'fish'
-
-        mock.__getitem__ = f
-        self.assertFalse(mock.__getitem__ is f)
-        self.assertEqual(mock['foo'], (mock, 'fish'))
-        self.assertEqual(mock.__getitem__('foo'), (mock, 'fish'))
-
-        mock.__getitem__ = mock
-        self.assertTrue(mock.__getitem__ is mock)
-
-
-    def test_magic_methods_isolated_between_mocks(self):
-        mock1 = Mock()
-        mock2 = Mock()
-
-        mock1.__iter__ = Mock(return_value=iter([]))
-        self.assertEqual(list(mock1), [])
-        self.assertRaises(TypeError, lambda: list(mock2))
-
-
-    def test_repr(self):
-        mock = Mock()
-        self.assertEqual(repr(mock), "<Mock id='%s'>" % id(mock))
-        mock.__repr__ = lambda s: 'foo'
-        self.assertEqual(repr(mock), 'foo')
-
-
-    def test_str(self):
-        mock = Mock()
-        self.assertEqual(str(mock), object.__str__(mock))
-        mock.__str__ = lambda s: 'foo'
-        self.assertEqual(str(mock), 'foo')
-
-
-    @unittest2.skipIf(inPy3k, "no unicode in Python 3")
-    def test_unicode(self):
-        mock = Mock()
-        self.assertEqual(unicode(mock), unicode(str(mock)))
-
-        mock.__unicode__ = lambda s: unicode('foo')
-        self.assertEqual(unicode(mock), unicode('foo'))
-
-
-    def test_dict_methods(self):
-        mock = Mock()
-
-        self.assertRaises(TypeError, lambda: mock['foo'])
-        def _del():
-            del mock['foo']
-        def _set():
-            mock['foo'] = 3
-        self.assertRaises(TypeError, _del)
-        self.assertRaises(TypeError, _set)
-
-        _dict = {}
-        def getitem(s, name):
-            return _dict[name]
-        def setitem(s, name, value):
-            _dict[name] = value
-        def delitem(s, name):
-            del _dict[name]
-
-        mock.__setitem__ = setitem
-        mock.__getitem__ = getitem
-        mock.__delitem__ = delitem
-
-        self.assertRaises(KeyError, lambda: mock['foo'])
-        mock['foo'] = 'bar'
-        self.assertEqual(_dict, {'foo': 'bar'})
-        self.assertEqual(mock['foo'], 'bar')
-        del mock['foo']
-        self.assertEqual(_dict, {})
-
-
-    def test_numeric(self):
-        original = mock = Mock()
-        mock.value = 0
-
-        self.assertRaises(TypeError, lambda: mock + 3)
-
-        def add(self, other):
-            mock.value += other
-            return self
-        mock.__add__ = add
-        self.assertEqual(mock + 3, mock)
-        self.assertEqual(mock.value, 3)
-
-        del mock.__add__
-        def iadd(mock):
-            mock += 3
-        self.assertRaises(TypeError, iadd, mock)
-        mock.__iadd__ = add
-        mock += 6
-        self.assertEqual(mock, original)
-        self.assertEqual(mock.value, 9)
-
-        self.assertRaises(TypeError, lambda: 3 + mock)
-        mock.__radd__ = add
-        self.assertEqual(7 + mock, mock)
-        self.assertEqual(mock.value, 16)
-
-
-    @unittest2.skipIf(inPy3k, 'no truediv in Python 3')
-    def test_truediv(self):
-        mock = MagicMock()
-        mock.__truediv__.return_value = 6
-
-        context = {'mock': mock}
-        code = 'from __future__ import division\nresult = mock / 7\n'
-        exec(code, context)
-        self.assertEqual(context['result'], 6)
-
-        mock.__rtruediv__.return_value = 3
-        code = 'from __future__ import division\nresult = 2 / mock\n'
-        exec(code, context)
-        self.assertEqual(context['result'], 3)
-
-
-    @unittest2.skipIf(not inPy3k, 'truediv is available in Python 2')
-    def test_no_truediv(self):
-        self.assertRaises(
-            AttributeError, getattr, MagicMock(), '__truediv__'
-        )
-        self.assertRaises(
-            AttributeError, getattr, MagicMock(), '__rtruediv__'
-        )
-
-
-    def test_hash(self):
-        mock = Mock()
-        # test delegation
-        self.assertEqual(hash(mock), Mock.__hash__(mock))
-
-        def _hash(s):
-            return 3
-        mock.__hash__ = _hash
-        self.assertEqual(hash(mock), 3)
-
-
-    def test_nonzero(self):
-        m = Mock()
-        self.assertTrue(bool(m))
-
-        nonzero = lambda s: False
-        if not inPy3k:
-            m.__nonzero__ = nonzero
-        else:
-            m.__bool__ = nonzero
-
-        self.assertFalse(bool(m))
-
-
-    def test_comparison(self):
-        # note: this test fails with Jython 2.5.1 due to a Jython bug
-        #       it is fixed in jython 2.5.2
-        if not inPy3k:
-            # incomparable in Python 3
-            self. assertEqual(Mock() < 3, object() < 3)
-            self. assertEqual(Mock() > 3, object() > 3)
-            self. assertEqual(Mock() <= 3, object() <= 3)
-            self. assertEqual(Mock() >= 3, object() >= 3)
-        else:
-            self.assertRaises(TypeError, lambda: MagicMock() < object())
-            self.assertRaises(TypeError, lambda: object() < MagicMock())
-            self.assertRaises(TypeError, lambda: MagicMock() < MagicMock())
-            self.assertRaises(TypeError, lambda: MagicMock() > object())
-            self.assertRaises(TypeError, lambda: object() > MagicMock())
-            self.assertRaises(TypeError, lambda: MagicMock() > MagicMock())
-            self.assertRaises(TypeError, lambda: MagicMock() <= object())
-            self.assertRaises(TypeError, lambda: object() <= MagicMock())
-            self.assertRaises(TypeError, lambda: MagicMock() <= MagicMock())
-            self.assertRaises(TypeError, lambda: MagicMock() >= object())
-            self.assertRaises(TypeError, lambda: object() >= MagicMock())
-            self.assertRaises(TypeError, lambda: MagicMock() >= MagicMock())
-
-        mock = Mock()
-        def comp(s, o):
-            return True
-        mock.__lt__ = mock.__gt__ = mock.__le__ = mock.__ge__ = comp
-        self. assertTrue(mock < 3)
-        self. assertTrue(mock > 3)
-        self. assertTrue(mock <= 3)
-        self. assertTrue(mock >= 3)
-
-
-    def test_equality(self):
-        for mock in Mock(), MagicMock():
-            self.assertEqual(mock == mock, True)
-            self.assertIsInstance(mock == mock, bool)
-            self.assertEqual(mock != mock, False)
-            self.assertIsInstance(mock != mock, bool)
-            self.assertEqual(mock == object(), False)
-            self.assertEqual(mock != object(), True)
-
-            def eq(self, other):
-                return other == 3
-            mock.__eq__ = eq
-            self.assertTrue(mock == 3)
-            self.assertFalse(mock == 4)
-
-            def ne(self, other):
-                return other == 3
-            mock.__ne__ = ne
-            self.assertTrue(mock != 3)
-            self.assertFalse(mock != 4)
-
-        mock = MagicMock()
-        mock.__eq__.return_value = True
-        self.assertIsInstance(mock == 3, bool)
-        self.assertEqual(mock == 3, True)
-
-        mock.__ne__.return_value = False
-        self.assertIsInstance(mock != 3, bool)
-        self.assertEqual(mock != 3, False)
-
-
-    def test_len_contains_iter(self):
-        mock = Mock()
-
-        self.assertRaises(TypeError, len, mock)
-        self.assertRaises(TypeError, iter, mock)
-        self.assertRaises(TypeError, lambda: 'foo' in mock)
-
-        mock.__len__ = lambda s: 6
-        self.assertEqual(len(mock), 6)
-
-        mock.__contains__ = lambda s, o: o == 3
-        self.assertTrue(3 in mock)
-        self.assertFalse(6 in mock)
-
-        mock.__iter__ = lambda s: iter('foobarbaz')
-        self.assertEqual(list(mock), list('foobarbaz'))
-
-
-    def test_magicmock(self):
-        mock = MagicMock()
-
-        mock.__iter__.return_value = iter([1, 2, 3])
-        self.assertEqual(list(mock), [1, 2, 3])
-
-        name = '__nonzero__'
-        other = '__bool__'
-        if inPy3k:
-            name, other = other, name
-        getattr(mock, name).return_value = False
-        self.assertFalse(hasattr(mock, other))
-        self.assertFalse(bool(mock))
-
-        for entry in _magics:
-            self.assertTrue(hasattr(mock, entry))
-        self.assertFalse(hasattr(mock, '__imaginery__'))
-
-
-    def test_magic_mock_equality(self):
-        mock = MagicMock()
-        self.assertIsInstance(mock == object(), bool)
-        self.assertIsInstance(mock != object(), bool)
-
-        self.assertEqual(mock == object(), False)
-        self.assertEqual(mock != object(), True)
-        self.assertEqual(mock == mock, True)
-        self.assertEqual(mock != mock, False)
-
-
-    def test_magicmock_defaults(self):
-        mock = MagicMock()
-        self.assertEqual(int(mock), 1)
-        self.assertEqual(complex(mock), 1j)
-        self.assertEqual(float(mock), 1.0)
-        self.assertEqual(long(mock), long(1))
-        self.assertNotIn(object(), mock)
-        self.assertEqual(len(mock), 0)
-        self.assertEqual(list(mock), [])
-        self.assertEqual(hash(mock), object.__hash__(mock))
-        self.assertEqual(str(mock), object.__str__(mock))
-        self.assertEqual(unicode(mock), object.__str__(mock))
-        self.assertIsInstance(unicode(mock), unicode)
-        self.assertTrue(bool(mock))
-        if not inPy3k:
-            self.assertEqual(oct(mock), '1')
-        else:
-            # in Python 3 oct and hex use __index__
-            # so these tests are for __index__ in py3k
-            self.assertEqual(oct(mock), '0o1')
-        self.assertEqual(hex(mock), '0x1')
-        # how to test __sizeof__ ?
-
-
-    @unittest2.skipIf(inPy3k, "no __cmp__ in Python 3")
-    def test_non_default_magic_methods(self):
-        mock = MagicMock()
-        self.assertRaises(AttributeError, lambda: mock.__cmp__)
-
-        mock = Mock()
-        mock.__cmp__ = lambda s, o: 0
-
-        self.assertEqual(mock, object())
-
-
-    def test_magic_methods_and_spec(self):
-        class Iterable(object):
-            def __iter__(self):
-                pass
-
-        mock = Mock(spec=Iterable)
-        self.assertRaises(AttributeError, lambda: mock.__iter__)
-
-        mock.__iter__ = Mock(return_value=iter([]))
-        self.assertEqual(list(mock), [])
-
-        class NonIterable(object):
-            pass
-        mock = Mock(spec=NonIterable)
-        self.assertRaises(AttributeError, lambda: mock.__iter__)
-
-        def set_int():
-            mock.__int__ = Mock(return_value=iter([]))
-        self.assertRaises(AttributeError, set_int)
-
-        mock = MagicMock(spec=Iterable)
-        self.assertEqual(list(mock), [])
-        self.assertRaises(AttributeError, set_int)
-
-
-    def test_magic_methods_and_spec_set(self):
-        class Iterable(object):
-            def __iter__(self):
-                pass
-
-        mock = Mock(spec_set=Iterable)
-        self.assertRaises(AttributeError, lambda: mock.__iter__)
-
-        mock.__iter__ = Mock(return_value=iter([]))
-        self.assertEqual(list(mock), [])
-
-        class NonIterable(object):
-            pass
-        mock = Mock(spec_set=NonIterable)
-        self.assertRaises(AttributeError, lambda: mock.__iter__)
-
-        def set_int():
-            mock.__int__ = Mock(return_value=iter([]))
-        self.assertRaises(AttributeError, set_int)
-
-        mock = MagicMock(spec_set=Iterable)
-        self.assertEqual(list(mock), [])
-        self.assertRaises(AttributeError, set_int)
-
-
-    def test_setting_unsupported_magic_method(self):
-        mock = MagicMock()
-        def set_setattr():
-            mock.__setattr__ = lambda self, name: None
-        self.assertRaisesRegexp(AttributeError,
-            "Attempting to set unsupported magic method '__setattr__'.",
-            set_setattr
-        )
-
-
-    def test_attributes_and_return_value(self):
-        mock = MagicMock()
-        attr = mock.foo
-        def _get_type(obj):
-            # the type of every mock (or magicmock) is a custom subclass
-            # so the real type is the second in the mro
-            return type(obj).__mro__[1]
-        self.assertEqual(_get_type(attr), MagicMock)
-
-        returned = mock()
-        self.assertEqual(_get_type(returned), MagicMock)
-
-
-    def test_magic_methods_are_magic_mocks(self):
-        mock = MagicMock()
-        self.assertIsInstance(mock.__getitem__, MagicMock)
-
-        mock[1][2].__getitem__.return_value = 3
-        self.assertEqual(mock[1][2][3], 3)
-
-
-    def test_magic_method_reset_mock(self):
-        mock = MagicMock()
-        str(mock)
-        self.assertTrue(mock.__str__.called)
-        mock.reset_mock()
-        self.assertFalse(mock.__str__.called)
-
-
-    @unittest2.skipUnless(sys.version_info[:2] >= (2, 6),
-                          "__dir__ not available until Python 2.6 or later")
-    def test_dir(self):
-        # overriding the default implementation
-        for mock in Mock(), MagicMock():
-            def _dir(self):
-                return ['foo']
-            mock.__dir__ = _dir
-            self.assertEqual(dir(mock), ['foo'])
-
-
-    @unittest2.skipIf('PyPy' in sys.version, "This fails differently on pypy")
-    def test_bound_methods(self):
-        m = Mock()
-
-        # XXXX should this be an expected failure instead?
-
-        # this seems like it should work, but is hard to do without introducing
-        # other api inconsistencies. Failure message could be better though.
-        m.__iter__ = [3].__iter__
-        self.assertRaises(TypeError, iter, m)
-
-
-    def test_magic_method_type(self):
-        class Foo(MagicMock):
-            pass
-
-        foo = Foo()
-        self.assertIsInstance(foo.__int__, Foo)
-
-
-    def test_descriptor_from_class(self):
-        m = MagicMock()
-        type(m).__str__.return_value = 'foo'
-        self.assertEqual(str(m), 'foo')
-
-
-    def test_iterable_as_iter_return_value(self):
-        m = MagicMock()
-        m.__iter__.return_value = [1, 2, 3]
-        self.assertEqual(list(m), [1, 2, 3])
-        self.assertEqual(list(m), [1, 2, 3])
-
-        m.__iter__.return_value = iter([4, 5, 6])
-        self.assertEqual(list(m), [4, 5, 6])
-        self.assertEqual(list(m), [])
-
-
-if __name__ == '__main__':
-    unittest2.main()
diff --git a/branch-1.2/ambari-common/src/test/python/mock/tests/testmock.py b/branch-1.2/ambari-common/src/test/python/mock/tests/testmock.py
deleted file mode 100644
index f3ceea9..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/tests/testmock.py
+++ /dev/null
@@ -1,1351 +0,0 @@
-# Copyright (C) 2007-2012 Michael Foord & the mock team
-# E-mail: fuzzyman AT voidspace DOT org DOT uk
-# http://www.voidspace.org.uk/python/mock/
-
-from tests.support import (
-    callable, unittest2, inPy3k, is_instance, next
-)
-
-import copy
-import pickle
-import sys
-
-import mock
-from mock import (
-    call, DEFAULT, patch, sentinel,
-    MagicMock, Mock, NonCallableMock,
-    NonCallableMagicMock, _CallList,
-    create_autospec
-)
-
-
-try:
-    unicode
-except NameError:
-    unicode = str
-
-
-class Iter(object):
-    def __init__(self):
-        self.thing = iter(['this', 'is', 'an', 'iter'])
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        return next(self.thing)
-
-    __next__ = next
-
-
-class Subclass(MagicMock):
-    pass
-
-
-class Thing(object):
-    attribute = 6
-    foo = 'bar'
-
-
-
-class MockTest(unittest2.TestCase):
-
-    def test_all(self):
-        # if __all__ is badly defined then import * will raise an error
-        # We have to exec it because you can't import * inside a method
-        # in Python 3
-        exec("from mock import *")
-
-
-    def test_constructor(self):
-        mock = Mock()
-
-        self.assertFalse(mock.called, "called not initialised correctly")
-        self.assertEqual(mock.call_count, 0,
-                         "call_count not initialised correctly")
-        self.assertTrue(is_instance(mock.return_value, Mock),
-                        "return_value not initialised correctly")
-
-        self.assertEqual(mock.call_args, None,
-                         "call_args not initialised correctly")
-        self.assertEqual(mock.call_args_list, [],
-                         "call_args_list not initialised correctly")
-        self.assertEqual(mock.method_calls, [],
-                          "method_calls not initialised correctly")
-
-        # Can't use hasattr for this test as it always returns True on a mock
-        self.assertFalse('_items' in mock.__dict__,
-                         "default mock should not have '_items' attribute")
-
-        self.assertIsNone(mock._mock_parent,
-                          "parent not initialised correctly")
-        self.assertIsNone(mock._mock_methods,
-                          "methods not initialised correctly")
-        self.assertEqual(mock._mock_children, {},
-                         "children not initialised incorrectly")
-
-
-    def test_unicode_not_broken(self):
-        # This used to raise an exception with Python 2.5 and Mock 0.4
-        unicode(Mock())
-
-
-    def test_return_value_in_constructor(self):
-        mock = Mock(return_value=None)
-        self.assertIsNone(mock.return_value,
-                          "return value in constructor not honoured")
-
-
-    def test_repr(self):
-        mock = Mock(name='foo')
-        self.assertIn('foo', repr(mock))
-        self.assertIn("'%s'" % id(mock), repr(mock))
-
-        mocks = [(Mock(), 'mock'), (Mock(name='bar'), 'bar')]
-        for mock, name in mocks:
-            self.assertIn('%s.bar' % name, repr(mock.bar))
-            self.assertIn('%s.foo()' % name, repr(mock.foo()))
-            self.assertIn('%s.foo().bing' % name, repr(mock.foo().bing))
-            self.assertIn('%s()' % name, repr(mock()))
-            self.assertIn('%s()()' % name, repr(mock()()))
-            self.assertIn('%s()().foo.bar.baz().bing' % name,
-                          repr(mock()().foo.bar.baz().bing))
-
-
-    def test_repr_with_spec(self):
-        class X(object):
-            pass
-
-        mock = Mock(spec=X)
-        self.assertIn(" spec='X' ", repr(mock))
-
-        mock = Mock(spec=X())
-        self.assertIn(" spec='X' ", repr(mock))
-
-        mock = Mock(spec_set=X)
-        self.assertIn(" spec_set='X' ", repr(mock))
-
-        mock = Mock(spec_set=X())
-        self.assertIn(" spec_set='X' ", repr(mock))
-
-        mock = Mock(spec=X, name='foo')
-        self.assertIn(" spec='X' ", repr(mock))
-        self.assertIn(" name='foo' ", repr(mock))
-
-        mock = Mock(name='foo')
-        self.assertNotIn("spec", repr(mock))
-
-        mock = Mock()
-        self.assertNotIn("spec", repr(mock))
-
-        mock = Mock(spec=['foo'])
-        self.assertNotIn("spec", repr(mock))
-
-
-    def test_side_effect(self):
-        mock = Mock()
-
-        def effect(*args, **kwargs):
-            raise SystemError('kablooie')
-
-        mock.side_effect = effect
-        self.assertRaises(SystemError, mock, 1, 2, fish=3)
-        mock.assert_called_with(1, 2, fish=3)
-
-        results = [1, 2, 3]
-        def effect():
-            return results.pop()
-        mock.side_effect = effect
-
-        self.assertEqual([mock(), mock(), mock()], [3, 2, 1],
-                          "side effect not used correctly")
-
-        mock = Mock(side_effect=sentinel.SideEffect)
-        self.assertEqual(mock.side_effect, sentinel.SideEffect,
-                          "side effect in constructor not used")
-
-        def side_effect():
-            return DEFAULT
-        mock = Mock(side_effect=side_effect, return_value=sentinel.RETURN)
-        self.assertEqual(mock(), sentinel.RETURN)
-
-
-    @unittest2.skipUnless('java' in sys.platform,
-                          'This test only applies to Jython')
-    def test_java_exception_side_effect(self):
-        import java
-        mock = Mock(side_effect=java.lang.RuntimeException("Boom!"))
-
-        # can't use assertRaises with java exceptions
-        try:
-            mock(1, 2, fish=3)
-        except java.lang.RuntimeException:
-            pass
-        else:
-            self.fail('java exception not raised')
-        mock.assert_called_with(1,2, fish=3)
-
-
-    def test_reset_mock(self):
-        parent = Mock()
-        spec = ["something"]
-        mock = Mock(name="child", parent=parent, spec=spec)
-        mock(sentinel.Something, something=sentinel.SomethingElse)
-        something = mock.something
-        mock.something()
-        mock.side_effect = sentinel.SideEffect
-        return_value = mock.return_value
-        return_value()
-
-        mock.reset_mock()
-
-        self.assertEqual(mock._mock_name, "child",
-                         "name incorrectly reset")
-        self.assertEqual(mock._mock_parent, parent,
-                         "parent incorrectly reset")
-        self.assertEqual(mock._mock_methods, spec,
-                         "methods incorrectly reset")
-
-        self.assertFalse(mock.called, "called not reset")
-        self.assertEqual(mock.call_count, 0, "call_count not reset")
-        self.assertEqual(mock.call_args, None, "call_args not reset")
-        self.assertEqual(mock.call_args_list, [], "call_args_list not reset")
-        self.assertEqual(mock.method_calls, [],
-                        "method_calls not initialised correctly: %r != %r" %
-                        (mock.method_calls, []))
-        self.assertEqual(mock.mock_calls, [])
-
-        self.assertEqual(mock.side_effect, sentinel.SideEffect,
-                          "side_effect incorrectly reset")
-        self.assertEqual(mock.return_value, return_value,
-                          "return_value incorrectly reset")
-        self.assertFalse(return_value.called, "return value mock not reset")
-        self.assertEqual(mock._mock_children, {'something': something},
-                          "children reset incorrectly")
-        self.assertEqual(mock.something, something,
-                          "children incorrectly cleared")
-        self.assertFalse(mock.something.called, "child not reset")
-
-
-    def test_reset_mock_recursion(self):
-        mock = Mock()
-        mock.return_value = mock
-
-        # used to cause recursion
-        mock.reset_mock()
-
-
-    def test_call(self):
-        mock = Mock()
-        self.assertTrue(is_instance(mock.return_value, Mock),
-                        "Default return_value should be a Mock")
-
-        result = mock()
-        self.assertEqual(mock(), result,
-                         "different result from consecutive calls")
-        mock.reset_mock()
-
-        ret_val = mock(sentinel.Arg)
-        self.assertTrue(mock.called, "called not set")
-        self.assertEqual(mock.call_count, 1, "call_count incoreect")
-        self.assertEqual(mock.call_args, ((sentinel.Arg,), {}),
-                         "call_args not set")
-        self.assertEqual(mock.call_args_list, [((sentinel.Arg,), {})],
-                         "call_args_list not initialised correctly")
-
-        mock.return_value = sentinel.ReturnValue
-        ret_val = mock(sentinel.Arg, key=sentinel.KeyArg)
-        self.assertEqual(ret_val, sentinel.ReturnValue,
-                         "incorrect return value")
-
-        self.assertEqual(mock.call_count, 2, "call_count incorrect")
-        self.assertEqual(mock.call_args,
-                         ((sentinel.Arg,), {'key': sentinel.KeyArg}),
-                         "call_args not set")
-        self.assertEqual(mock.call_args_list, [
-            ((sentinel.Arg,), {}),
-            ((sentinel.Arg,), {'key': sentinel.KeyArg})
-        ],
-            "call_args_list not set")
-
-
-    def test_call_args_comparison(self):
-        mock = Mock()
-        mock()
-        mock(sentinel.Arg)
-        mock(kw=sentinel.Kwarg)
-        mock(sentinel.Arg, kw=sentinel.Kwarg)
-        self.assertEqual(mock.call_args_list, [
-            (),
-            ((sentinel.Arg,),),
-            ({"kw": sentinel.Kwarg},),
-            ((sentinel.Arg,), {"kw": sentinel.Kwarg})
-        ])
-        self.assertEqual(mock.call_args,
-                         ((sentinel.Arg,), {"kw": sentinel.Kwarg}))
-
-
-    def test_assert_called_with(self):
-        mock = Mock()
-        mock()
-
-        # Will raise an exception if it fails
-        mock.assert_called_with()
-        self.assertRaises(AssertionError, mock.assert_called_with, 1)
-
-        mock.reset_mock()
-        self.assertRaises(AssertionError, mock.assert_called_with)
-
-        mock(1, 2, 3, a='fish', b='nothing')
-        mock.assert_called_with(1, 2, 3, a='fish', b='nothing')
-
-
-    def test_assert_called_once_with(self):
-        mock = Mock()
-        mock()
-
-        # Will raise an exception if it fails
-        mock.assert_called_once_with()
-
-        mock()
-        self.assertRaises(AssertionError, mock.assert_called_once_with)
-
-        mock.reset_mock()
-        self.assertRaises(AssertionError, mock.assert_called_once_with)
-
-        mock('foo', 'bar', baz=2)
-        mock.assert_called_once_with('foo', 'bar', baz=2)
-
-        mock.reset_mock()
-        mock('foo', 'bar', baz=2)
-        self.assertRaises(
-            AssertionError,
-            lambda: mock.assert_called_once_with('bob', 'bar', baz=2)
-        )
-
-
-    def test_attribute_access_returns_mocks(self):
-        mock = Mock()
-        something = mock.something
-        self.assertTrue(is_instance(something, Mock), "attribute isn't a mock")
-        self.assertEqual(mock.something, something,
-                         "different attributes returned for same name")
-
-        # Usage example
-        mock = Mock()
-        mock.something.return_value = 3
-
-        self.assertEqual(mock.something(), 3, "method returned wrong value")
-        self.assertTrue(mock.something.called,
-                        "method didn't record being called")
-
-
-    def test_attributes_have_name_and_parent_set(self):
-        mock = Mock()
-        something = mock.something
-
-        self.assertEqual(something._mock_name, "something",
-                         "attribute name not set correctly")
-        self.assertEqual(something._mock_parent, mock,
-                         "attribute parent not set correctly")
-
-
-    def test_method_calls_recorded(self):
-        mock = Mock()
-        mock.something(3, fish=None)
-        mock.something_else.something(6, cake=sentinel.Cake)
-
-        self.assertEqual(mock.something_else.method_calls,
-                          [("something", (6,), {'cake': sentinel.Cake})],
-                          "method calls not recorded correctly")
-        self.assertEqual(mock.method_calls, [
-            ("something", (3,), {'fish': None}),
-            ("something_else.something", (6,), {'cake': sentinel.Cake})
-        ],
-            "method calls not recorded correctly")
-
-
-    def test_method_calls_compare_easily(self):
-        mock = Mock()
-        mock.something()
-        self.assertEqual(mock.method_calls, [('something',)])
-        self.assertEqual(mock.method_calls, [('something', (), {})])
-
-        mock = Mock()
-        mock.something('different')
-        self.assertEqual(mock.method_calls, [('something', ('different',))])
-        self.assertEqual(mock.method_calls,
-                         [('something', ('different',), {})])
-
-        mock = Mock()
-        mock.something(x=1)
-        self.assertEqual(mock.method_calls, [('something', {'x': 1})])
-        self.assertEqual(mock.method_calls, [('something', (), {'x': 1})])
-
-        mock = Mock()
-        mock.something('different', some='more')
-        self.assertEqual(mock.method_calls, [
-            ('something', ('different',), {'some': 'more'})
-        ])
-
-
-    def test_only_allowed_methods_exist(self):
-        for spec in ['something'], ('something',):
-            for arg in 'spec', 'spec_set':
-                mock = Mock(**{arg: spec})
-
-                # this should be allowed
-                mock.something
-                self.assertRaisesRegexp(
-                    AttributeError,
-                    "Mock object has no attribute 'something_else'",
-                    getattr, mock, 'something_else'
-                )
-
-
-    def test_from_spec(self):
-        class Something(object):
-            x = 3
-            __something__ = None
-            def y(self):
-                pass
-
-        def test_attributes(mock):
-            # should work
-            mock.x
-            mock.y
-            mock.__something__
-            self.assertRaisesRegexp(
-                AttributeError,
-                "Mock object has no attribute 'z'",
-                getattr, mock, 'z'
-            )
-            self.assertRaisesRegexp(
-                AttributeError,
-                "Mock object has no attribute '__foobar__'",
-                getattr, mock, '__foobar__'
-            )
-
-        test_attributes(Mock(spec=Something))
-        test_attributes(Mock(spec=Something()))
-
-
-    def test_wraps_calls(self):
-        real = Mock()
-
-        mock = Mock(wraps=real)
-        self.assertEqual(mock(), real())
-
-        real.reset_mock()
-
-        mock(1, 2, fish=3)
-        real.assert_called_with(1, 2, fish=3)
-
-
-    def test_wraps_call_with_nondefault_return_value(self):
-        real = Mock()
-
-        mock = Mock(wraps=real)
-        mock.return_value = 3
-
-        self.assertEqual(mock(), 3)
-        self.assertFalse(real.called)
-
-
-    def test_wraps_attributes(self):
-        class Real(object):
-            attribute = Mock()
-
-        real = Real()
-
-        mock = Mock(wraps=real)
-        self.assertEqual(mock.attribute(), real.attribute())
-        self.assertRaises(AttributeError, lambda: mock.fish)
-
-        self.assertNotEqual(mock.attribute, real.attribute)
-        result = mock.attribute.frog(1, 2, fish=3)
-        Real.attribute.frog.assert_called_with(1, 2, fish=3)
-        self.assertEqual(result, Real.attribute.frog())
-
-
-    def test_exceptional_side_effect(self):
-        mock = Mock(side_effect=AttributeError)
-        self.assertRaises(AttributeError, mock)
-
-        mock = Mock(side_effect=AttributeError('foo'))
-        self.assertRaises(AttributeError, mock)
-
-
-    def test_baseexceptional_side_effect(self):
-        mock = Mock(side_effect=KeyboardInterrupt)
-        self.assertRaises(KeyboardInterrupt, mock)
-
-        mock = Mock(side_effect=KeyboardInterrupt('foo'))
-        self.assertRaises(KeyboardInterrupt, mock)
-
-
-    def test_assert_called_with_message(self):
-        mock = Mock()
-        self.assertRaisesRegexp(AssertionError, 'Not called',
-                                mock.assert_called_with)
-
-
-    def test__name__(self):
-        mock = Mock()
-        self.assertRaises(AttributeError, lambda: mock.__name__)
-
-        mock.__name__ = 'foo'
-        self.assertEqual(mock.__name__, 'foo')
-
-
-    def test_spec_list_subclass(self):
-        class Sub(list):
-            pass
-        mock = Mock(spec=Sub(['foo']))
-
-        mock.append(3)
-        mock.append.assert_called_with(3)
-        self.assertRaises(AttributeError, getattr, mock, 'foo')
-
-
-    def test_spec_class(self):
-        class X(object):
-            pass
-
-        mock = Mock(spec=X)
-        self.assertTrue(isinstance(mock, X))
-
-        mock = Mock(spec=X())
-        self.assertTrue(isinstance(mock, X))
-
-        self.assertIs(mock.__class__, X)
-        self.assertEqual(Mock().__class__.__name__, 'Mock')
-
-        mock = Mock(spec_set=X)
-        self.assertTrue(isinstance(mock, X))
-
-        mock = Mock(spec_set=X())
-        self.assertTrue(isinstance(mock, X))
-
-
-    def test_setting_attribute_with_spec_set(self):
-        class X(object):
-            y = 3
-
-        mock = Mock(spec=X)
-        mock.x = 'foo'
-
-        mock = Mock(spec_set=X)
-        def set_attr():
-            mock.x = 'foo'
-
-        mock.y = 'foo'
-        self.assertRaises(AttributeError, set_attr)
-
-
-    def test_copy(self):
-        current = sys.getrecursionlimit()
-        self.addCleanup(sys.setrecursionlimit, current)
-
-        # can't use sys.maxint as this doesn't exist in Python 3
-        sys.setrecursionlimit(int(10e8))
-        # this segfaults without the fix in place
-        copy.copy(Mock())
-
-
-    @unittest2.skipIf(inPy3k, "no old style classes in Python 3")
-    def test_spec_old_style_classes(self):
-        class Foo:
-            bar = 7
-
-        mock = Mock(spec=Foo)
-        mock.bar = 6
-        self.assertRaises(AttributeError, lambda: mock.foo)
-
-        mock = Mock(spec=Foo())
-        mock.bar = 6
-        self.assertRaises(AttributeError, lambda: mock.foo)
-
-
-    @unittest2.skipIf(inPy3k, "no old style classes in Python 3")
-    def test_spec_set_old_style_classes(self):
-        class Foo:
-            bar = 7
-
-        mock = Mock(spec_set=Foo)
-        mock.bar = 6
-        self.assertRaises(AttributeError, lambda: mock.foo)
-
-        def _set():
-            mock.foo = 3
-        self.assertRaises(AttributeError, _set)
-
-        mock = Mock(spec_set=Foo())
-        mock.bar = 6
-        self.assertRaises(AttributeError, lambda: mock.foo)
-
-        def _set():
-            mock.foo = 3
-        self.assertRaises(AttributeError, _set)
-
-
-    def test_subclass_with_properties(self):
-        class SubClass(Mock):
-            def _get(self):
-                return 3
-            def _set(self, value):
-                raise NameError('strange error')
-            some_attribute = property(_get, _set)
-
-        s = SubClass(spec_set=SubClass)
-        self.assertEqual(s.some_attribute, 3)
-
-        def test():
-            s.some_attribute = 3
-        self.assertRaises(NameError, test)
-
-        def test():
-            s.foo = 'bar'
-        self.assertRaises(AttributeError, test)
-
-
-    def test_setting_call(self):
-        mock = Mock()
-        def __call__(self, a):
-            return self._mock_call(a)
-
-        type(mock).__call__ = __call__
-        mock('one')
-        mock.assert_called_with('one')
-
-        self.assertRaises(TypeError, mock, 'one', 'two')
-
-
-    @unittest2.skipUnless(sys.version_info[:2] >= (2, 6),
-                          "__dir__ not available until Python 2.6 or later")
-    def test_dir(self):
-        mock = Mock()
-        attrs = set(dir(mock))
-        type_attrs = set([m for m in dir(Mock) if not m.startswith('_')])
-
-        # all public attributes from the type are included
-        self.assertEqual(set(), type_attrs - attrs)
-
-        # creates these attributes
-        mock.a, mock.b
-        self.assertIn('a', dir(mock))
-        self.assertIn('b', dir(mock))
-
-        # instance attributes
-        mock.c = mock.d = None
-        self.assertIn('c', dir(mock))
-        self.assertIn('d', dir(mock))
-
-        # magic methods
-        mock.__iter__ = lambda s: iter([])
-        self.assertIn('__iter__', dir(mock))
-
-
-    @unittest2.skipUnless(sys.version_info[:2] >= (2, 6),
-                          "__dir__ not available until Python 2.6 or later")
-    def test_dir_from_spec(self):
-        mock = Mock(spec=unittest2.TestCase)
-        testcase_attrs = set(dir(unittest2.TestCase))
-        attrs = set(dir(mock))
-
-        # all attributes from the spec are included
-        self.assertEqual(set(), testcase_attrs - attrs)
-
-        # shadow a sys attribute
-        mock.version = 3
-        self.assertEqual(dir(mock).count('version'), 1)
-
-
-    @unittest2.skipUnless(sys.version_info[:2] >= (2, 6),
-                          "__dir__ not available until Python 2.6 or later")
-    def test_filter_dir(self):
-        patcher = patch.object(mock, 'FILTER_DIR', False)
-        patcher.start()
-        try:
-            attrs = set(dir(Mock()))
-            type_attrs = set(dir(Mock))
-
-            # ALL attributes from the type are included
-            self.assertEqual(set(), type_attrs - attrs)
-        finally:
-            patcher.stop()
-
-
-    def test_configure_mock(self):
-        mock = Mock(foo='bar')
-        self.assertEqual(mock.foo, 'bar')
-
-        mock = MagicMock(foo='bar')
-        self.assertEqual(mock.foo, 'bar')
-
-        kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
-                  'foo': MagicMock()}
-        mock = Mock(**kwargs)
-        self.assertRaises(KeyError, mock)
-        self.assertEqual(mock.foo.bar(), 33)
-        self.assertIsInstance(mock.foo, MagicMock)
-
-        mock = Mock()
-        mock.configure_mock(**kwargs)
-        self.assertRaises(KeyError, mock)
-        self.assertEqual(mock.foo.bar(), 33)
-        self.assertIsInstance(mock.foo, MagicMock)
-
-
-    def assertRaisesWithMsg(self, exception, message, func, *args, **kwargs):
-        # needed because assertRaisesRegex doesn't work easily with newlines
-        try:
-            func(*args, **kwargs)
-        except:
-            instance = sys.exc_info()[1]
-            self.assertIsInstance(instance, exception)
-        else:
-            self.fail('Exception %r not raised' % (exception,))
-
-        msg = str(instance)
-        self.assertEqual(msg, message)
-
-
-    def test_assert_called_with_failure_message(self):
-        mock = NonCallableMock()
-
-        expected = "mock(1, '2', 3, bar='foo')"
-        message = 'Expected call: %s\nNot called'
-        self.assertRaisesWithMsg(
-            AssertionError, message % (expected,),
-            mock.assert_called_with, 1, '2', 3, bar='foo'
-        )
-
-        mock.foo(1, '2', 3, foo='foo')
-
-
-        asserters = [
-            mock.foo.assert_called_with, mock.foo.assert_called_once_with
-        ]
-        for meth in asserters:
-            actual = "foo(1, '2', 3, foo='foo')"
-            expected = "foo(1, '2', 3, bar='foo')"
-            message = 'Expected call: %s\nActual call: %s'
-            self.assertRaisesWithMsg(
-                AssertionError, message % (expected, actual),
-                meth, 1, '2', 3, bar='foo'
-            )
-
-        # just kwargs
-        for meth in asserters:
-            actual = "foo(1, '2', 3, foo='foo')"
-            expected = "foo(bar='foo')"
-            message = 'Expected call: %s\nActual call: %s'
-            self.assertRaisesWithMsg(
-                AssertionError, message % (expected, actual),
-                meth, bar='foo'
-            )
-
-        # just args
-        for meth in asserters:
-            actual = "foo(1, '2', 3, foo='foo')"
-            expected = "foo(1, 2, 3)"
-            message = 'Expected call: %s\nActual call: %s'
-            self.assertRaisesWithMsg(
-                AssertionError, message % (expected, actual),
-                meth, 1, 2, 3
-            )
-
-        # empty
-        for meth in asserters:
-            actual = "foo(1, '2', 3, foo='foo')"
-            expected = "foo()"
-            message = 'Expected call: %s\nActual call: %s'
-            self.assertRaisesWithMsg(
-                AssertionError, message % (expected, actual), meth
-            )
-
-
-    def test_mock_calls(self):
-        mock = MagicMock()
-
-        # need to do this because MagicMock.mock_calls used to just return
-        # a MagicMock which also returned a MagicMock when __eq__ was called
-        self.assertIs(mock.mock_calls == [], True)
-
-        mock = MagicMock()
-        mock()
-        expected = [('', (), {})]
-        self.assertEqual(mock.mock_calls, expected)
-
-        mock.foo()
-        expected.append(call.foo())
-        self.assertEqual(mock.mock_calls, expected)
-        # intermediate mock_calls work too
-        self.assertEqual(mock.foo.mock_calls, [('', (), {})])
-
-        mock = MagicMock()
-        mock().foo(1, 2, 3, a=4, b=5)
-        expected = [
-            ('', (), {}), ('().foo', (1, 2, 3), dict(a=4, b=5))
-        ]
-        self.assertEqual(mock.mock_calls, expected)
-        self.assertEqual(mock.return_value.foo.mock_calls,
-                         [('', (1, 2, 3), dict(a=4, b=5))])
-        self.assertEqual(mock.return_value.mock_calls,
-                         [('foo', (1, 2, 3), dict(a=4, b=5))])
-
-        mock = MagicMock()
-        mock().foo.bar().baz()
-        expected = [
-            ('', (), {}), ('().foo.bar', (), {}),
-            ('().foo.bar().baz', (), {})
-        ]
-        self.assertEqual(mock.mock_calls, expected)
-        self.assertEqual(mock().mock_calls,
-                         call.foo.bar().baz().call_list())
-
-        for kwargs in dict(), dict(name='bar'):
-            mock = MagicMock(**kwargs)
-            int(mock.foo)
-            expected = [('foo.__int__', (), {})]
-            self.assertEqual(mock.mock_calls, expected)
-
-            mock = MagicMock(**kwargs)
-            mock.a()()
-            expected = [('a', (), {}), ('a()', (), {})]
-            self.assertEqual(mock.mock_calls, expected)
-            self.assertEqual(mock.a().mock_calls, [call()])
-
-            mock = MagicMock(**kwargs)
-            mock(1)(2)(3)
-            self.assertEqual(mock.mock_calls, call(1)(2)(3).call_list())
-            self.assertEqual(mock().mock_calls, call(2)(3).call_list())
-            self.assertEqual(mock()().mock_calls, call(3).call_list())
-
-            mock = MagicMock(**kwargs)
-            mock(1)(2)(3).a.b.c(4)
-            self.assertEqual(mock.mock_calls,
-                             call(1)(2)(3).a.b.c(4).call_list())
-            self.assertEqual(mock().mock_calls,
-                             call(2)(3).a.b.c(4).call_list())
-            self.assertEqual(mock()().mock_calls,
-                             call(3).a.b.c(4).call_list())
-
-            mock = MagicMock(**kwargs)
-            int(mock().foo.bar().baz())
-            last_call = ('().foo.bar().baz().__int__', (), {})
-            self.assertEqual(mock.mock_calls[-1], last_call)
-            self.assertEqual(mock().mock_calls,
-                             call.foo.bar().baz().__int__().call_list())
-            self.assertEqual(mock().foo.bar().mock_calls,
-                             call.baz().__int__().call_list())
-            self.assertEqual(mock().foo.bar().baz.mock_calls,
-                             call().__int__().call_list())
-
-
-    def test_subclassing(self):
-        class Subclass(Mock):
-            pass
-
-        mock = Subclass()
-        self.assertIsInstance(mock.foo, Subclass)
-        self.assertIsInstance(mock(), Subclass)
-
-        class Subclass(Mock):
-            def _get_child_mock(self, **kwargs):
-                return Mock(**kwargs)
-
-        mock = Subclass()
-        self.assertNotIsInstance(mock.foo, Subclass)
-        self.assertNotIsInstance(mock(), Subclass)
-
-
-    def test_arg_lists(self):
-        mocks = [
-            Mock(),
-            MagicMock(),
-            NonCallableMock(),
-            NonCallableMagicMock()
-        ]
-
-        def assert_attrs(mock):
-            names = 'call_args_list', 'method_calls', 'mock_calls'
-            for name in names:
-                attr = getattr(mock, name)
-                self.assertIsInstance(attr, _CallList)
-                self.assertIsInstance(attr, list)
-                self.assertEqual(attr, [])
-
-        for mock in mocks:
-            assert_attrs(mock)
-
-            if callable(mock):
-                mock()
-                mock(1, 2)
-                mock(a=3)
-
-                mock.reset_mock()
-                assert_attrs(mock)
-
-            mock.foo()
-            mock.foo.bar(1, a=3)
-            mock.foo(1).bar().baz(3)
-
-            mock.reset_mock()
-            assert_attrs(mock)
-
-
-    def test_call_args_two_tuple(self):
-        mock = Mock()
-        mock(1, a=3)
-        mock(2, b=4)
-
-        self.assertEqual(len(mock.call_args), 2)
-        args, kwargs = mock.call_args
-        self.assertEqual(args, (2,))
-        self.assertEqual(kwargs, dict(b=4))
-
-        expected_list = [((1,), dict(a=3)), ((2,), dict(b=4))]
-        for expected, call_args in zip(expected_list, mock.call_args_list):
-            self.assertEqual(len(call_args), 2)
-            self.assertEqual(expected[0], call_args[0])
-            self.assertEqual(expected[1], call_args[1])
-
-
-    def test_side_effect_iterator(self):
-        mock = Mock(side_effect=iter([1, 2, 3]))
-        self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
-        self.assertRaises(StopIteration, mock)
-
-        mock = MagicMock(side_effect=['a', 'b', 'c'])
-        self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
-        self.assertRaises(StopIteration, mock)
-
-        mock = Mock(side_effect='ghi')
-        self.assertEqual([mock(), mock(), mock()], ['g', 'h', 'i'])
-        self.assertRaises(StopIteration, mock)
-
-        class Foo(object):
-            pass
-        mock = MagicMock(side_effect=Foo)
-        self.assertIsInstance(mock(), Foo)
-
-        mock = Mock(side_effect=Iter())
-        self.assertEqual([mock(), mock(), mock(), mock()],
-                         ['this', 'is', 'an', 'iter'])
-        self.assertRaises(StopIteration, mock)
-
-
-    def test_side_effect_setting_iterator(self):
-        mock = Mock()
-        mock.side_effect = iter([1, 2, 3])
-        self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
-        self.assertRaises(StopIteration, mock)
-        side_effect = mock.side_effect
-        self.assertIsInstance(side_effect, type(iter([])))
-
-        mock.side_effect = ['a', 'b', 'c']
-        self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
-        self.assertRaises(StopIteration, mock)
-        side_effect = mock.side_effect
-        self.assertIsInstance(side_effect, type(iter([])))
-
-        this_iter = Iter()
-        mock.side_effect = this_iter
-        self.assertEqual([mock(), mock(), mock(), mock()],
-                         ['this', 'is', 'an', 'iter'])
-        self.assertRaises(StopIteration, mock)
-        self.assertIs(mock.side_effect, this_iter)
-
-
-    def test_side_effect_iterator_exceptions(self):
-        for Klass in Mock, MagicMock:
-            iterable = (ValueError, 3, KeyError, 6)
-            m = Klass(side_effect=iterable)
-            self.assertRaises(ValueError, m)
-            self.assertEqual(m(), 3)
-            self.assertRaises(KeyError, m)
-            self.assertEqual(m(), 6)
-
-
-    def test_assert_has_calls_any_order(self):
-        mock = Mock()
-        mock(1, 2)
-        mock(a=3)
-        mock(3, 4)
-        mock(b=6)
-        mock(b=6)
-
-        kalls = [
-            call(1, 2), ({'a': 3},),
-            ((3, 4),), ((), {'a': 3}),
-            ('', (1, 2)), ('', {'a': 3}),
-            ('', (1, 2), {}), ('', (), {'a': 3})
-        ]
-        for kall in kalls:
-            mock.assert_has_calls([kall], any_order=True)
-
-        for kall in call(1, '2'), call(b=3), call(), 3, None, 'foo':
-            self.assertRaises(
-                AssertionError, mock.assert_has_calls,
-                [kall], any_order=True
-            )
-
-        kall_lists = [
-            [call(1, 2), call(b=6)],
-            [call(3, 4), call(1, 2)],
-            [call(b=6), call(b=6)],
-        ]
-
-        for kall_list in kall_lists:
-            mock.assert_has_calls(kall_list, any_order=True)
-
-        kall_lists = [
-            [call(b=6), call(b=6), call(b=6)],
-            [call(1, 2), call(1, 2)],
-            [call(3, 4), call(1, 2), call(5, 7)],
-            [call(b=6), call(3, 4), call(b=6), call(1, 2), call(b=6)],
-        ]
-        for kall_list in kall_lists:
-            self.assertRaises(
-                AssertionError, mock.assert_has_calls,
-                kall_list, any_order=True
-            )
-
-    def test_assert_has_calls(self):
-        kalls1 = [
-                call(1, 2), ({'a': 3},),
-                ((3, 4),), call(b=6),
-                ('', (1,), {'b': 6}),
-        ]
-        kalls2 = [call.foo(), call.bar(1)]
-        kalls2.extend(call.spam().baz(a=3).call_list())
-        kalls2.extend(call.bam(set(), foo={}).fish([1]).call_list())
-
-        mocks = []
-        for mock in Mock(), MagicMock():
-            mock(1, 2)
-            mock(a=3)
-            mock(3, 4)
-            mock(b=6)
-            mock(1, b=6)
-            mocks.append((mock, kalls1))
-
-        mock = Mock()
-        mock.foo()
-        mock.bar(1)
-        mock.spam().baz(a=3)
-        mock.bam(set(), foo={}).fish([1])
-        mocks.append((mock, kalls2))
-
-        for mock, kalls in mocks:
-            for i in range(len(kalls)):
-                for step in 1, 2, 3:
-                    these = kalls[i:i+step]
-                    mock.assert_has_calls(these)
-
-                    if len(these) > 1:
-                        self.assertRaises(
-                            AssertionError,
-                            mock.assert_has_calls,
-                            list(reversed(these))
-                        )
-
-
-    def test_assert_any_call(self):
-        mock = Mock()
-        mock(1, 2)
-        mock(a=3)
-        mock(1, b=6)
-
-        mock.assert_any_call(1, 2)
-        mock.assert_any_call(a=3)
-        mock.assert_any_call(1, b=6)
-
-        self.assertRaises(
-            AssertionError,
-            mock.assert_any_call
-        )
-        self.assertRaises(
-            AssertionError,
-            mock.assert_any_call,
-            1, 3
-        )
-        self.assertRaises(
-            AssertionError,
-            mock.assert_any_call,
-            a=4
-        )
-
-
-    def test_mock_calls_create_autospec(self):
-        def f(a, b):
-            pass
-        obj = Iter()
-        obj.f = f
-
-        funcs = [
-            create_autospec(f),
-            create_autospec(obj).f
-        ]
-        for func in funcs:
-            func(1, 2)
-            func(3, 4)
-
-            self.assertEqual(
-                func.mock_calls, [call(1, 2), call(3, 4)]
-            )
-
-
-    def test_mock_add_spec(self):
-        class _One(object):
-            one = 1
-        class _Two(object):
-            two = 2
-        class Anything(object):
-            one = two = three = 'four'
-
-        klasses = [
-            Mock, MagicMock, NonCallableMock, NonCallableMagicMock
-        ]
-        for Klass in list(klasses):
-            klasses.append(lambda K=Klass: K(spec=Anything))
-            klasses.append(lambda K=Klass: K(spec_set=Anything))
-
-        for Klass in klasses:
-            for kwargs in dict(), dict(spec_set=True):
-                mock = Klass()
-                #no error
-                mock.one, mock.two, mock.three
-
-                for One, Two in [(_One, _Two), (['one'], ['two'])]:
-                    for kwargs in dict(), dict(spec_set=True):
-                        mock.mock_add_spec(One, **kwargs)
-
-                        mock.one
-                        self.assertRaises(
-                            AttributeError, getattr, mock, 'two'
-                        )
-                        self.assertRaises(
-                            AttributeError, getattr, mock, 'three'
-                        )
-                        if 'spec_set' in kwargs:
-                            self.assertRaises(
-                                AttributeError, setattr, mock, 'three', None
-                            )
-
-                        mock.mock_add_spec(Two, **kwargs)
-                        self.assertRaises(
-                            AttributeError, getattr, mock, 'one'
-                        )
-                        mock.two
-                        self.assertRaises(
-                            AttributeError, getattr, mock, 'three'
-                        )
-                        if 'spec_set' in kwargs:
-                            self.assertRaises(
-                                AttributeError, setattr, mock, 'three', None
-                            )
-            # note that creating a mock, setting an instance attribute, and
-            # *then* setting a spec doesn't work. Not the intended use case
-
-
-    def test_mock_add_spec_magic_methods(self):
-        for Klass in MagicMock, NonCallableMagicMock:
-            mock = Klass()
-            int(mock)
-
-            mock.mock_add_spec(object)
-            self.assertRaises(TypeError, int, mock)
-
-            mock = Klass()
-            mock['foo']
-            mock.__int__.return_value =4
-
-            mock.mock_add_spec(int)
-            self.assertEqual(int(mock), 4)
-            self.assertRaises(TypeError, lambda: mock['foo'])
-
-
-    def test_adding_child_mock(self):
-        for Klass in NonCallableMock, Mock, MagicMock, NonCallableMagicMock:
-            mock = Klass()
-
-            mock.foo = Mock()
-            mock.foo()
-
-            self.assertEqual(mock.method_calls, [call.foo()])
-            self.assertEqual(mock.mock_calls, [call.foo()])
-
-            mock = Klass()
-            mock.bar = Mock(name='name')
-            mock.bar()
-            self.assertEqual(mock.method_calls, [])
-            self.assertEqual(mock.mock_calls, [])
-
-            # mock with an existing _new_parent but no name
-            mock = Klass()
-            mock.baz = MagicMock()()
-            mock.baz()
-            self.assertEqual(mock.method_calls, [])
-            self.assertEqual(mock.mock_calls, [])
-
-
-    def test_adding_return_value_mock(self):
-        for Klass in Mock, MagicMock:
-            mock = Klass()
-            mock.return_value = MagicMock()
-
-            mock()()
-            self.assertEqual(mock.mock_calls, [call(), call()()])
-
-
-    def test_manager_mock(self):
-        class Foo(object):
-            one = 'one'
-            two = 'two'
-        manager = Mock()
-        p1 = patch.object(Foo, 'one')
-        p2 = patch.object(Foo, 'two')
-
-        mock_one = p1.start()
-        self.addCleanup(p1.stop)
-        mock_two = p2.start()
-        self.addCleanup(p2.stop)
-
-        manager.attach_mock(mock_one, 'one')
-        manager.attach_mock(mock_two, 'two')
-
-        Foo.two()
-        Foo.one()
-
-        self.assertEqual(manager.mock_calls, [call.two(), call.one()])
-
-
-    def test_magic_methods_mock_calls(self):
-        for Klass in Mock, MagicMock:
-            m = Klass()
-            m.__int__ = Mock(return_value=3)
-            m.__float__ = MagicMock(return_value=3.0)
-            int(m)
-            float(m)
-
-            self.assertEqual(m.mock_calls, [call.__int__(), call.__float__()])
-            self.assertEqual(m.method_calls, [])
-
-
-    def test_attribute_deletion(self):
-        # this behaviour isn't *useful*, but at least it's now tested...
-        for Klass in Mock, MagicMock, NonCallableMagicMock, NonCallableMock:
-            m = Klass()
-            original = m.foo
-            m.foo = 3
-            del m.foo
-            self.assertEqual(m.foo, original)
-
-            new = m.foo = Mock()
-            del m.foo
-            self.assertEqual(m.foo, new)
-
-
-    def test_mock_parents(self):
-        for Klass in Mock, MagicMock:
-            m = Klass()
-            original_repr = repr(m)
-            m.return_value = m
-            self.assertIs(m(), m)
-            self.assertEqual(repr(m), original_repr)
-
-            m.reset_mock()
-            self.assertIs(m(), m)
-            self.assertEqual(repr(m), original_repr)
-
-            m = Klass()
-            m.b = m.a
-            self.assertIn("name='mock.a'", repr(m.b))
-            self.assertIn("name='mock.a'", repr(m.a))
-            m.reset_mock()
-            self.assertIn("name='mock.a'", repr(m.b))
-            self.assertIn("name='mock.a'", repr(m.a))
-
-            m = Klass()
-            original_repr = repr(m)
-            m.a = m()
-            m.a.return_value = m
-
-            self.assertEqual(repr(m), original_repr)
-            self.assertEqual(repr(m.a()), original_repr)
-
-
-    def test_attach_mock(self):
-        classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
-        for Klass in classes:
-            for Klass2 in classes:
-                m = Klass()
-
-                m2 = Klass2(name='foo')
-                m.attach_mock(m2, 'bar')
-
-                self.assertIs(m.bar, m2)
-                self.assertIn("name='mock.bar'", repr(m2))
-
-                m.bar.baz(1)
-                self.assertEqual(m.mock_calls, [call.bar.baz(1)])
-                self.assertEqual(m.method_calls, [call.bar.baz(1)])
-
-
-    def test_attach_mock_return_value(self):
-        classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
-        for Klass in Mock, MagicMock:
-            for Klass2 in classes:
-                m = Klass()
-
-                m2 = Klass2(name='foo')
-                m.attach_mock(m2, 'return_value')
-
-                self.assertIs(m(), m2)
-                self.assertIn("name='mock()'", repr(m2))
-
-                m2.foo()
-                self.assertEqual(m.mock_calls, call().foo().call_list())
-
-
-    def test_attribute_deletion(self):
-        for mock in Mock(), MagicMock():
-            self.assertTrue(hasattr(mock, 'm'))
-
-            del mock.m
-            self.assertFalse(hasattr(mock, 'm'))
-
-            del mock.f
-            self.assertFalse(hasattr(mock, 'f'))
-            self.assertRaises(AttributeError, getattr, mock, 'f')
-
-
-    def test_class_assignable(self):
-        for mock in Mock(), MagicMock():
-            self.assertNotIsInstance(mock, int)
-
-            mock.__class__ = int
-            self.assertIsInstance(mock, int)
-
-
-    @unittest2.expectedFailure
-    def test_pickle(self):
-        for Klass in (MagicMock, Mock, Subclass, NonCallableMagicMock):
-            mock = Klass(name='foo', attribute=3)
-            mock.foo(1, 2, 3)
-            data = pickle.dumps(mock)
-            new = pickle.loads(data)
-
-            new.foo.assert_called_once_with(1, 2, 3)
-            self.assertFalse(new.called)
-            self.assertTrue(is_instance(new, Klass))
-            self.assertIsInstance(new, Thing)
-            self.assertIn('name="foo"', repr(new))
-            self.assertEqual(new.attribute, 3)
-
-
-if __name__ == '__main__':
-    unittest2.main()
diff --git a/branch-1.2/ambari-common/src/test/python/mock/tests/testpatch.py b/branch-1.2/ambari-common/src/test/python/mock/tests/testpatch.py
deleted file mode 100644
index 8eb719b..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/tests/testpatch.py
+++ /dev/null
@@ -1,1815 +0,0 @@
-# Copyright (C) 2007-2012 Michael Foord & the mock team
-# E-mail: fuzzyman AT voidspace DOT org DOT uk
-# http://www.voidspace.org.uk/python/mock/
-
-import os
-import sys
-
-from tests import support
-from tests.support import unittest2, inPy3k, SomeClass, is_instance, callable
-
-from mock import (
-    NonCallableMock, CallableMixin, patch, sentinel,
-    MagicMock, Mock, NonCallableMagicMock, patch, _patch,
-    DEFAULT, call, _get_target
-)
-
-builtin_string = '__builtin__'
-if inPy3k:
-    builtin_string = 'builtins'
-    unicode = str
-
-PTModule = sys.modules[__name__]
-MODNAME = '%s.PTModule' % __name__
-
-
-def _get_proxy(obj, get_only=True):
-    class Proxy(object):
-        def __getattr__(self, name):
-            return getattr(obj, name)
-    if not get_only:
-        def __setattr__(self, name, value):
-            setattr(obj, name, value)
-        def __delattr__(self, name):
-            delattr(obj, name)
-        Proxy.__setattr__ = __setattr__
-        Proxy.__delattr__ = __delattr__
-    return Proxy()
-
-
-# for use in the test
-something  = sentinel.Something
-something_else  = sentinel.SomethingElse
-
-
-class Foo(object):
-    def __init__(self, a):
-        pass
-    def f(self, a):
-        pass
-    def g(self):
-        pass
-    foo = 'bar'
-
-    class Bar(object):
-        def a(self):
-            pass
-
-foo_name = '%s.Foo' % __name__
-
-
-def function(a, b=Foo):
-    pass
-
-
-class Container(object):
-    def __init__(self):
-        self.values = {}
-
-    def __getitem__(self, name):
-        return self.values[name]
-
-    def __setitem__(self, name, value):
-        self.values[name] = value
-
-    def __delitem__(self, name):
-        del self.values[name]
-
-    def __iter__(self):
-        return iter(self.values)
-
-
-
-class PatchTest(unittest2.TestCase):
-
-    def assertNotCallable(self, obj, magic=True):
-        MockClass = NonCallableMagicMock
-        if not magic:
-            MockClass = NonCallableMock
-
-        self.assertRaises(TypeError, obj)
-        self.assertTrue(is_instance(obj, MockClass))
-        self.assertFalse(is_instance(obj, CallableMixin))
-
-
-    def test_single_patchobject(self):
-        class Something(object):
-            attribute = sentinel.Original
-
-        @patch.object(Something, 'attribute', sentinel.Patched)
-        def test():
-            self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
-
-        test()
-        self.assertEqual(Something.attribute, sentinel.Original,
-                         "patch not restored")
-
-
-    def test_patchobject_with_none(self):
-        class Something(object):
-            attribute = sentinel.Original
-
-        @patch.object(Something, 'attribute', None)
-        def test():
-            self.assertIsNone(Something.attribute, "unpatched")
-
-        test()
-        self.assertEqual(Something.attribute, sentinel.Original,
-                         "patch not restored")
-
-
-    def test_multiple_patchobject(self):
-        class Something(object):
-            attribute = sentinel.Original
-            next_attribute = sentinel.Original2
-
-        @patch.object(Something, 'attribute', sentinel.Patched)
-        @patch.object(Something, 'next_attribute', sentinel.Patched2)
-        def test():
-            self.assertEqual(Something.attribute, sentinel.Patched,
-                             "unpatched")
-            self.assertEqual(Something.next_attribute, sentinel.Patched2,
-                             "unpatched")
-
-        test()
-        self.assertEqual(Something.attribute, sentinel.Original,
-                         "patch not restored")
-        self.assertEqual(Something.next_attribute, sentinel.Original2,
-                         "patch not restored")
-
-
-    def test_object_lookup_is_quite_lazy(self):
-        global something
-        original = something
-        @patch('%s.something' % __name__, sentinel.Something2)
-        def test():
-            pass
-
-        try:
-            something = sentinel.replacement_value
-            test()
-            self.assertEqual(something, sentinel.replacement_value)
-        finally:
-            something = original
-
-
-    def test_patch(self):
-        @patch('%s.something' % __name__, sentinel.Something2)
-        def test():
-            self.assertEqual(PTModule.something, sentinel.Something2,
-                             "unpatched")
-
-        test()
-        self.assertEqual(PTModule.something, sentinel.Something,
-                         "patch not restored")
-
-        @patch('%s.something' % __name__, sentinel.Something2)
-        @patch('%s.something_else' % __name__, sentinel.SomethingElse)
-        def test():
-            self.assertEqual(PTModule.something, sentinel.Something2,
-                             "unpatched")
-            self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
-                             "unpatched")
-
-        self.assertEqual(PTModule.something, sentinel.Something,
-                         "patch not restored")
-        self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
-                         "patch not restored")
-
-        # Test the patching and restoring works a second time
-        test()
-
-        self.assertEqual(PTModule.something, sentinel.Something,
-                         "patch not restored")
-        self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
-                         "patch not restored")
-
-        mock = Mock()
-        mock.return_value = sentinel.Handle
-        @patch('%s.open' % builtin_string, mock)
-        def test():
-            self.assertEqual(open('filename', 'r'), sentinel.Handle,
-                             "open not patched")
-        test()
-        test()
-
-        self.assertNotEqual(open, mock, "patch not restored")
-
-
-    def test_patch_class_attribute(self):
-        @patch('%s.SomeClass.class_attribute' % __name__,
-               sentinel.ClassAttribute)
-        def test():
-            self.assertEqual(PTModule.SomeClass.class_attribute,
-                             sentinel.ClassAttribute, "unpatched")
-        test()
-
-        self.assertIsNone(PTModule.SomeClass.class_attribute,
-                          "patch not restored")
-
-
-    def test_patchobject_with_default_mock(self):
-        class Test(object):
-            something = sentinel.Original
-            something2 = sentinel.Original2
-
-        @patch.object(Test, 'something')
-        def test(mock):
-            self.assertEqual(mock, Test.something,
-                             "Mock not passed into test function")
-            self.assertIsInstance(mock, MagicMock,
-                            "patch with two arguments did not create a mock")
-
-        test()
-
-        @patch.object(Test, 'something')
-        @patch.object(Test, 'something2')
-        def test(this1, this2, mock1, mock2):
-            self.assertEqual(this1, sentinel.this1,
-                             "Patched function didn't receive initial argument")
-            self.assertEqual(this2, sentinel.this2,
-                             "Patched function didn't receive second argument")
-            self.assertEqual(mock1, Test.something2,
-                             "Mock not passed into test function")
-            self.assertEqual(mock2, Test.something,
-                             "Second Mock not passed into test function")
-            self.assertIsInstance(mock2, MagicMock,
-                            "patch with two arguments did not create a mock")
-            self.assertIsInstance(mock2, MagicMock,
-                            "patch with two arguments did not create a mock")
-
-            # A hack to test that new mocks are passed the second time
-            self.assertNotEqual(outerMock1, mock1, "unexpected value for mock1")
-            self.assertNotEqual(outerMock2, mock2, "unexpected value for mock1")
-            return mock1, mock2
-
-        outerMock1 = outerMock2 = None
-        outerMock1, outerMock2 = test(sentinel.this1, sentinel.this2)
-
-        # Test that executing a second time creates new mocks
-        test(sentinel.this1, sentinel.this2)
-
-
-    def test_patch_with_spec(self):
-        @patch('%s.SomeClass' % __name__, spec=SomeClass)
-        def test(MockSomeClass):
-            self.assertEqual(SomeClass, MockSomeClass)
-            self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
-            self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
-
-        test()
-
-
-    def test_patchobject_with_spec(self):
-        @patch.object(SomeClass, 'class_attribute', spec=SomeClass)
-        def test(MockAttribute):
-            self.assertEqual(SomeClass.class_attribute, MockAttribute)
-            self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
-                                       MagicMock))
-            self.assertRaises(AttributeError,
-                              lambda: SomeClass.class_attribute.not_wibble)
-
-        test()
-
-
-    def test_patch_with_spec_as_list(self):
-        @patch('%s.SomeClass' % __name__, spec=['wibble'])
-        def test(MockSomeClass):
-            self.assertEqual(SomeClass, MockSomeClass)
-            self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
-            self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
-
-        test()
-
-
-    def test_patchobject_with_spec_as_list(self):
-        @patch.object(SomeClass, 'class_attribute', spec=['wibble'])
-        def test(MockAttribute):
-            self.assertEqual(SomeClass.class_attribute, MockAttribute)
-            self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
-                                       MagicMock))
-            self.assertRaises(AttributeError,
-                              lambda: SomeClass.class_attribute.not_wibble)
-
-        test()
-
-
-    def test_nested_patch_with_spec_as_list(self):
-        # regression test for nested decorators
-        @patch('%s.open' % builtin_string)
-        @patch('%s.SomeClass' % __name__, spec=['wibble'])
-        def test(MockSomeClass, MockOpen):
-            self.assertEqual(SomeClass, MockSomeClass)
-            self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
-            self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
-        test()
-
-
-    def test_patch_with_spec_as_boolean(self):
-        @patch('%s.SomeClass' % __name__, spec=True)
-        def test(MockSomeClass):
-            self.assertEqual(SomeClass, MockSomeClass)
-            # Should not raise attribute error
-            MockSomeClass.wibble
-
-            self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
-
-        test()
-
-
-    def test_patch_object_with_spec_as_boolean(self):
-        @patch.object(PTModule, 'SomeClass', spec=True)
-        def test(MockSomeClass):
-            self.assertEqual(SomeClass, MockSomeClass)
-            # Should not raise attribute error
-            MockSomeClass.wibble
-
-            self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
-
-        test()
-
-
-    def test_patch_class_acts_with_spec_is_inherited(self):
-        @patch('%s.SomeClass' % __name__, spec=True)
-        def test(MockSomeClass):
-            self.assertTrue(is_instance(MockSomeClass, MagicMock))
-            instance = MockSomeClass()
-            self.assertNotCallable(instance)
-            # Should not raise attribute error
-            instance.wibble
-
-            self.assertRaises(AttributeError, lambda: instance.not_wibble)
-
-        test()
-
-
-    def test_patch_with_create_mocks_non_existent_attributes(self):
-        @patch('%s.frooble' % builtin_string, sentinel.Frooble, create=True)
-        def test():
-            self.assertEqual(frooble, sentinel.Frooble)
-
-        test()
-        self.assertRaises(NameError, lambda: frooble)
-
-
-    def test_patchobject_with_create_mocks_non_existent_attributes(self):
-        @patch.object(SomeClass, 'frooble', sentinel.Frooble, create=True)
-        def test():
-            self.assertEqual(SomeClass.frooble, sentinel.Frooble)
-
-        test()
-        self.assertFalse(hasattr(SomeClass, 'frooble'))
-
-
-    def test_patch_wont_create_by_default(self):
-        try:
-            @patch('%s.frooble' % builtin_string, sentinel.Frooble)
-            def test():
-                self.assertEqual(frooble, sentinel.Frooble)
-
-            test()
-        except AttributeError:
-            pass
-        else:
-            self.fail('Patching non existent attributes should fail')
-
-        self.assertRaises(NameError, lambda: frooble)
-
-
-    def test_patchobject_wont_create_by_default(self):
-        try:
-            @patch.object(SomeClass, 'frooble', sentinel.Frooble)
-            def test():
-                self.fail('Patching non existent attributes should fail')
-
-            test()
-        except AttributeError:
-            pass
-        else:
-            self.fail('Patching non existent attributes should fail')
-        self.assertFalse(hasattr(SomeClass, 'frooble'))
-
-
-    def test_patch_with_static_methods(self):
-        class Foo(object):
-            @staticmethod
-            def woot():
-                return sentinel.Static
-
-        @patch.object(Foo, 'woot', staticmethod(lambda: sentinel.Patched))
-        def anonymous():
-            self.assertEqual(Foo.woot(), sentinel.Patched)
-        anonymous()
-
-        self.assertEqual(Foo.woot(), sentinel.Static)
-
-
-    def test_patch_local(self):
-        foo = sentinel.Foo
-        @patch.object(sentinel, 'Foo', 'Foo')
-        def anonymous():
-            self.assertEqual(sentinel.Foo, 'Foo')
-        anonymous()
-
-        self.assertEqual(sentinel.Foo, foo)
-
-
-    def test_patch_slots(self):
-        class Foo(object):
-            __slots__ = ('Foo',)
-
-        foo = Foo()
-        foo.Foo = sentinel.Foo
-
-        @patch.object(foo, 'Foo', 'Foo')
-        def anonymous():
-            self.assertEqual(foo.Foo, 'Foo')
-        anonymous()
-
-        self.assertEqual(foo.Foo, sentinel.Foo)
-
-
-    def test_patchobject_class_decorator(self):
-        class Something(object):
-            attribute = sentinel.Original
-
-        class Foo(object):
-            def test_method(other_self):
-                self.assertEqual(Something.attribute, sentinel.Patched,
-                                 "unpatched")
-            def not_test_method(other_self):
-                self.assertEqual(Something.attribute, sentinel.Original,
-                                 "non-test method patched")
-
-        Foo = patch.object(Something, 'attribute', sentinel.Patched)(Foo)
-
-        f = Foo()
-        f.test_method()
-        f.not_test_method()
-
-        self.assertEqual(Something.attribute, sentinel.Original,
-                         "patch not restored")
-
-
-    def test_patch_class_decorator(self):
-        class Something(object):
-            attribute = sentinel.Original
-
-        class Foo(object):
-            def test_method(other_self, mock_something):
-                self.assertEqual(PTModule.something, mock_something,
-                                 "unpatched")
-            def not_test_method(other_self):
-                self.assertEqual(PTModule.something, sentinel.Something,
-                                 "non-test method patched")
-        Foo = patch('%s.something' % __name__)(Foo)
-
-        f = Foo()
-        f.test_method()
-        f.not_test_method()
-
-        self.assertEqual(Something.attribute, sentinel.Original,
-                         "patch not restored")
-        self.assertEqual(PTModule.something, sentinel.Something,
-                         "patch not restored")
-
-
-    def test_patchobject_twice(self):
-        class Something(object):
-            attribute = sentinel.Original
-            next_attribute = sentinel.Original2
-
-        @patch.object(Something, 'attribute', sentinel.Patched)
-        @patch.object(Something, 'attribute', sentinel.Patched)
-        def test():
-            self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
-
-        test()
-
-        self.assertEqual(Something.attribute, sentinel.Original,
-                         "patch not restored")
-
-
-    def test_patch_dict(self):
-        foo = {'initial': object(), 'other': 'something'}
-        original = foo.copy()
-
-        @patch.dict(foo)
-        def test():
-            foo['a'] = 3
-            del foo['initial']
-            foo['other'] = 'something else'
-
-        test()
-
-        self.assertEqual(foo, original)
-
-        @patch.dict(foo, {'a': 'b'})
-        def test():
-            self.assertEqual(len(foo), 3)
-            self.assertEqual(foo['a'], 'b')
-
-        test()
-
-        self.assertEqual(foo, original)
-
-        @patch.dict(foo, [('a', 'b')])
-        def test():
-            self.assertEqual(len(foo), 3)
-            self.assertEqual(foo['a'], 'b')
-
-        test()
-
-        self.assertEqual(foo, original)
-
-
-    def test_patch_dict_with_container_object(self):
-        foo = Container()
-        foo['initial'] = object()
-        foo['other'] =  'something'
-
-        original = foo.values.copy()
-
-        @patch.dict(foo)
-        def test():
-            foo['a'] = 3
-            del foo['initial']
-            foo['other'] = 'something else'
-
-        test()
-
-        self.assertEqual(foo.values, original)
-
-        @patch.dict(foo, {'a': 'b'})
-        def test():
-            self.assertEqual(len(foo.values), 3)
-            self.assertEqual(foo['a'], 'b')
-
-        test()
-
-        self.assertEqual(foo.values, original)
-
-
-    def test_patch_dict_with_clear(self):
-        foo = {'initial': object(), 'other': 'something'}
-        original = foo.copy()
-
-        @patch.dict(foo, clear=True)
-        def test():
-            self.assertEqual(foo, {})
-            foo['a'] = 3
-            foo['other'] = 'something else'
-
-        test()
-
-        self.assertEqual(foo, original)
-
-        @patch.dict(foo, {'a': 'b'}, clear=True)
-        def test():
-            self.assertEqual(foo, {'a': 'b'})
-
-        test()
-
-        self.assertEqual(foo, original)
-
-        @patch.dict(foo, [('a', 'b')], clear=True)
-        def test():
-            self.assertEqual(foo, {'a': 'b'})
-
-        test()
-
-        self.assertEqual(foo, original)
-
-
-    def test_patch_dict_with_container_object_and_clear(self):
-        foo = Container()
-        foo['initial'] = object()
-        foo['other'] =  'something'
-
-        original = foo.values.copy()
-
-        @patch.dict(foo, clear=True)
-        def test():
-            self.assertEqual(foo.values, {})
-            foo['a'] = 3
-            foo['other'] = 'something else'
-
-        test()
-
-        self.assertEqual(foo.values, original)
-
-        @patch.dict(foo, {'a': 'b'}, clear=True)
-        def test():
-            self.assertEqual(foo.values, {'a': 'b'})
-
-        test()
-
-        self.assertEqual(foo.values, original)
-
-
-    def test_name_preserved(self):
-        foo = {}
-
-        @patch('%s.SomeClass' % __name__, object())
-        @patch('%s.SomeClass' % __name__, object(), autospec=True)
-        @patch.object(SomeClass, object())
-        @patch.dict(foo)
-        def some_name():
-            pass
-
-        self.assertEqual(some_name.__name__, 'some_name')
-
-
-    def test_patch_with_exception(self):
-        foo = {}
-
-        @patch.dict(foo, {'a': 'b'})
-        def test():
-            raise NameError('Konrad')
-        try:
-            test()
-        except NameError:
-            pass
-        else:
-            self.fail('NameError not raised by test')
-
-        self.assertEqual(foo, {})
-
-
-    def test_patch_dict_with_string(self):
-        @patch.dict('os.environ', {'konrad_delong': 'some value'})
-        def test():
-            self.assertIn('konrad_delong', os.environ)
-
-        test()
-
-
-    @unittest2.expectedFailure
-    def test_patch_descriptor(self):
-        # would be some effort to fix this - we could special case the
-        # builtin descriptors: classmethod, property, staticmethod
-        class Nothing(object):
-            foo = None
-
-        class Something(object):
-            foo = {}
-
-            @patch.object(Nothing, 'foo', 2)
-            @classmethod
-            def klass(cls):
-                self.assertIs(cls, Something)
-
-            @patch.object(Nothing, 'foo', 2)
-            @staticmethod
-            def static(arg):
-                return arg
-
-            @patch.dict(foo)
-            @classmethod
-            def klass_dict(cls):
-                self.assertIs(cls, Something)
-
-            @patch.dict(foo)
-            @staticmethod
-            def static_dict(arg):
-                return arg
-
-        # these will raise exceptions if patching descriptors is broken
-        self.assertEqual(Something.static('f00'), 'f00')
-        Something.klass()
-        self.assertEqual(Something.static_dict('f00'), 'f00')
-        Something.klass_dict()
-
-        something = Something()
-        self.assertEqual(something.static('f00'), 'f00')
-        something.klass()
-        self.assertEqual(something.static_dict('f00'), 'f00')
-        something.klass_dict()
-
-
-    def test_patch_spec_set(self):
-        @patch('%s.SomeClass' % __name__, spec_set=SomeClass)
-        def test(MockClass):
-            MockClass.z = 'foo'
-
-        self.assertRaises(AttributeError, test)
-
-        @patch.object(support, 'SomeClass', spec_set=SomeClass)
-        def test(MockClass):
-            MockClass.z = 'foo'
-
-        self.assertRaises(AttributeError, test)
-        @patch('%s.SomeClass' % __name__, spec_set=True)
-        def test(MockClass):
-            MockClass.z = 'foo'
-
-        self.assertRaises(AttributeError, test)
-
-        @patch.object(support, 'SomeClass', spec_set=True)
-        def test(MockClass):
-            MockClass.z = 'foo'
-
-        self.assertRaises(AttributeError, test)
-
-
-    def test_spec_set_inherit(self):
-        @patch('%s.SomeClass' % __name__, spec_set=True)
-        def test(MockClass):
-            instance = MockClass()
-            instance.z = 'foo'
-
-        self.assertRaises(AttributeError, test)
-
-
-    def test_patch_start_stop(self):
-        original = something
-        patcher = patch('%s.something' % __name__)
-        self.assertIs(something, original)
-        mock = patcher.start()
-        try:
-            self.assertIsNot(mock, original)
-            self.assertIs(something, mock)
-        finally:
-            patcher.stop()
-        self.assertIs(something, original)
-
-
-    def test_stop_without_start(self):
-        patcher = patch(foo_name, 'bar', 3)
-
-        # calling stop without start used to produce a very obscure error
-        self.assertRaises(RuntimeError, patcher.stop)
-
-
-    def test_patchobject_start_stop(self):
-        original = something
-        patcher = patch.object(PTModule, 'something', 'foo')
-        self.assertIs(something, original)
-        replaced = patcher.start()
-        try:
-            self.assertEqual(replaced, 'foo')
-            self.assertIs(something, replaced)
-        finally:
-            patcher.stop()
-        self.assertIs(something, original)
-
-
-    def test_patch_dict_start_stop(self):
-        d = {'foo': 'bar'}
-        original = d.copy()
-        patcher = patch.dict(d, [('spam', 'eggs')], clear=True)
-        self.assertEqual(d, original)
-
-        patcher.start()
-        try:
-            self.assertEqual(d, {'spam': 'eggs'})
-        finally:
-            patcher.stop()
-        self.assertEqual(d, original)
-
-
-    def test_patch_dict_class_decorator(self):
-        this = self
-        d = {'spam': 'eggs'}
-        original = d.copy()
-
-        class Test(object):
-            def test_first(self):
-                this.assertEqual(d, {'foo': 'bar'})
-            def test_second(self):
-                this.assertEqual(d, {'foo': 'bar'})
-
-        Test = patch.dict(d, {'foo': 'bar'}, clear=True)(Test)
-        self.assertEqual(d, original)
-
-        test = Test()
-
-        test.test_first()
-        self.assertEqual(d, original)
-
-        test.test_second()
-        self.assertEqual(d, original)
-
-        test = Test()
-
-        test.test_first()
-        self.assertEqual(d, original)
-
-        test.test_second()
-        self.assertEqual(d, original)
-
-
-    def test_get_only_proxy(self):
-        class Something(object):
-            foo = 'foo'
-        class SomethingElse:
-            foo = 'foo'
-
-        for thing in Something, SomethingElse, Something(), SomethingElse:
-            proxy = _get_proxy(thing)
-
-            @patch.object(proxy, 'foo', 'bar')
-            def test():
-                self.assertEqual(proxy.foo, 'bar')
-            test()
-            self.assertEqual(proxy.foo, 'foo')
-            self.assertEqual(thing.foo, 'foo')
-            self.assertNotIn('foo', proxy.__dict__)
-
-
-    def test_get_set_delete_proxy(self):
-        class Something(object):
-            foo = 'foo'
-        class SomethingElse:
-            foo = 'foo'
-
-        for thing in Something, SomethingElse, Something(), SomethingElse:
-            proxy = _get_proxy(Something, get_only=False)
-
-            @patch.object(proxy, 'foo', 'bar')
-            def test():
-                self.assertEqual(proxy.foo, 'bar')
-            test()
-            self.assertEqual(proxy.foo, 'foo')
-            self.assertEqual(thing.foo, 'foo')
-            self.assertNotIn('foo', proxy.__dict__)
-
-
-    def test_patch_keyword_args(self):
-        kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
-                  'foo': MagicMock()}
-
-        patcher = patch(foo_name, **kwargs)
-        mock = patcher.start()
-        patcher.stop()
-
-        self.assertRaises(KeyError, mock)
-        self.assertEqual(mock.foo.bar(), 33)
-        self.assertIsInstance(mock.foo, MagicMock)
-
-
-    def test_patch_object_keyword_args(self):
-        kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
-                  'foo': MagicMock()}
-
-        patcher = patch.object(Foo, 'f', **kwargs)
-        mock = patcher.start()
-        patcher.stop()
-
-        self.assertRaises(KeyError, mock)
-        self.assertEqual(mock.foo.bar(), 33)
-        self.assertIsInstance(mock.foo, MagicMock)
-
-
-    def test_patch_dict_keyword_args(self):
-        original = {'foo': 'bar'}
-        copy = original.copy()
-
-        patcher = patch.dict(original, foo=3, bar=4, baz=5)
-        patcher.start()
-
-        try:
-            self.assertEqual(original, dict(foo=3, bar=4, baz=5))
-        finally:
-            patcher.stop()
-
-        self.assertEqual(original, copy)
-
-
-    def test_autospec(self):
-        class Boo(object):
-            def __init__(self, a):
-                pass
-            def f(self, a):
-                pass
-            def g(self):
-                pass
-            foo = 'bar'
-
-            class Bar(object):
-                def a(self):
-                    pass
-
-        def _test(mock):
-            mock(1)
-            mock.assert_called_with(1)
-            self.assertRaises(TypeError, mock)
-
-        def _test2(mock):
-            mock.f(1)
-            mock.f.assert_called_with(1)
-            self.assertRaises(TypeError, mock.f)
-
-            mock.g()
-            mock.g.assert_called_with()
-            self.assertRaises(TypeError, mock.g, 1)
-
-            self.assertRaises(AttributeError, getattr, mock, 'h')
-
-            mock.foo.lower()
-            mock.foo.lower.assert_called_with()
-            self.assertRaises(AttributeError, getattr, mock.foo, 'bar')
-
-            mock.Bar()
-            mock.Bar.assert_called_with()
-
-            mock.Bar.a()
-            mock.Bar.a.assert_called_with()
-            self.assertRaises(TypeError, mock.Bar.a, 1)
-
-            mock.Bar().a()
-            mock.Bar().a.assert_called_with()
-            self.assertRaises(TypeError, mock.Bar().a, 1)
-
-            self.assertRaises(AttributeError, getattr, mock.Bar, 'b')
-            self.assertRaises(AttributeError, getattr, mock.Bar(), 'b')
-
-        def function(mock):
-            _test(mock)
-            _test2(mock)
-            _test2(mock(1))
-            self.assertIs(mock, Foo)
-            return mock
-
-        test = patch(foo_name, autospec=True)(function)
-
-        mock = test()
-        self.assertIsNot(Foo, mock)
-        # test patching a second time works
-        test()
-
-        module = sys.modules[__name__]
-        test = patch.object(module, 'Foo', autospec=True)(function)
-
-        mock = test()
-        self.assertIsNot(Foo, mock)
-        # test patching a second time works
-        test()
-
-
-    def test_autospec_function(self):
-        @patch('%s.function' % __name__, autospec=True)
-        def test(mock):
-            function(1)
-            function.assert_called_with(1)
-            function(2, 3)
-            function.assert_called_with(2, 3)
-
-            self.assertRaises(TypeError, function)
-            self.assertRaises(AttributeError, getattr, function, 'foo')
-
-        test()
-
-
-    def test_autospec_keywords(self):
-        @patch('%s.function' % __name__, autospec=True,
-               return_value=3)
-        def test(mock_function):
-            #self.assertEqual(function.abc, 'foo')
-            return function(1, 2)
-
-        result = test()
-        self.assertEqual(result, 3)
-
-
-    def test_autospec_with_new(self):
-        patcher = patch('%s.function' % __name__, new=3, autospec=True)
-        self.assertRaises(TypeError, patcher.start)
-
-        module = sys.modules[__name__]
-        patcher = patch.object(module, 'function', new=3, autospec=True)
-        self.assertRaises(TypeError, patcher.start)
-
-
-    def test_autospec_with_object(self):
-        class Bar(Foo):
-            extra = []
-
-        patcher = patch(foo_name, autospec=Bar)
-        mock = patcher.start()
-        try:
-            self.assertIsInstance(mock, Bar)
-            self.assertIsInstance(mock.extra, list)
-        finally:
-            patcher.stop()
-
-
-    def test_autospec_inherits(self):
-        FooClass = Foo
-        patcher = patch(foo_name, autospec=True)
-        mock = patcher.start()
-        try:
-            self.assertIsInstance(mock, FooClass)
-            self.assertIsInstance(mock(3), FooClass)
-        finally:
-            patcher.stop()
-
-
-    def test_autospec_name(self):
-        patcher = patch(foo_name, autospec=True)
-        mock = patcher.start()
-
-        try:
-            self.assertIn(" name='Foo'", repr(mock))
-            self.assertIn(" name='Foo.f'", repr(mock.f))
-            self.assertIn(" name='Foo()'", repr(mock(None)))
-            self.assertIn(" name='Foo().f'", repr(mock(None).f))
-        finally:
-            patcher.stop()
-
-
-    def test_tracebacks(self):
-        @patch.object(Foo, 'f', object())
-        def test():
-            raise AssertionError
-        try:
-            test()
-        except:
-            err = sys.exc_info()
-
-        result = unittest2.TextTestResult(None, None, 0)
-        traceback = result._exc_info_to_string(err, self)
-        self.assertIn('raise AssertionError', traceback)
-
-
-    def test_new_callable_patch(self):
-        patcher = patch(foo_name, new_callable=NonCallableMagicMock)
-
-        m1 = patcher.start()
-        patcher.stop()
-        m2 = patcher.start()
-        patcher.stop()
-
-        self.assertIsNot(m1, m2)
-        for mock in m1, m2:
-            self.assertNotCallable(m1)
-
-
-    def test_new_callable_patch_object(self):
-        patcher = patch.object(Foo, 'f', new_callable=NonCallableMagicMock)
-
-        m1 = patcher.start()
-        patcher.stop()
-        m2 = patcher.start()
-        patcher.stop()
-
-        self.assertIsNot(m1, m2)
-        for mock in m1, m2:
-            self.assertNotCallable(m1)
-
-
-    def test_new_callable_keyword_arguments(self):
-        class Bar(object):
-            kwargs = None
-            def __init__(self, **kwargs):
-                Bar.kwargs = kwargs
-
-        patcher = patch(foo_name, new_callable=Bar, arg1=1, arg2=2)
-        m = patcher.start()
-        try:
-            self.assertIs(type(m), Bar)
-            self.assertEqual(Bar.kwargs, dict(arg1=1, arg2=2))
-        finally:
-            patcher.stop()
-
-
-    def test_new_callable_spec(self):
-        class Bar(object):
-            kwargs = None
-            def __init__(self, **kwargs):
-                Bar.kwargs = kwargs
-
-        patcher = patch(foo_name, new_callable=Bar, spec=Bar)
-        patcher.start()
-        try:
-            self.assertEqual(Bar.kwargs, dict(spec=Bar))
-        finally:
-            patcher.stop()
-
-        patcher = patch(foo_name, new_callable=Bar, spec_set=Bar)
-        patcher.start()
-        try:
-            self.assertEqual(Bar.kwargs, dict(spec_set=Bar))
-        finally:
-            patcher.stop()
-
-
-    def test_new_callable_create(self):
-        non_existent_attr = '%s.weeeee' % foo_name
-        p = patch(non_existent_attr, new_callable=NonCallableMock)
-        self.assertRaises(AttributeError, p.start)
-
-        p = patch(non_existent_attr, new_callable=NonCallableMock,
-                  create=True)
-        m = p.start()
-        try:
-            self.assertNotCallable(m, magic=False)
-        finally:
-            p.stop()
-
-
-    def test_new_callable_incompatible_with_new(self):
-        self.assertRaises(
-            ValueError, patch, foo_name, new=object(), new_callable=MagicMock
-        )
-        self.assertRaises(
-            ValueError, patch.object, Foo, 'f', new=object(),
-            new_callable=MagicMock
-        )
-
-
-    def test_new_callable_incompatible_with_autospec(self):
-        self.assertRaises(
-            ValueError, patch, foo_name, new_callable=MagicMock,
-            autospec=True
-        )
-        self.assertRaises(
-            ValueError, patch.object, Foo, 'f', new_callable=MagicMock,
-            autospec=True
-        )
-
-
-    def test_new_callable_inherit_for_mocks(self):
-        class MockSub(Mock):
-            pass
-
-        MockClasses = (
-            NonCallableMock, NonCallableMagicMock, MagicMock, Mock, MockSub
-        )
-        for Klass in MockClasses:
-            for arg in 'spec', 'spec_set':
-                kwargs = {arg: True}
-                p = patch(foo_name, new_callable=Klass, **kwargs)
-                m = p.start()
-                try:
-                    instance = m.return_value
-                    self.assertRaises(AttributeError, getattr, instance, 'x')
-                finally:
-                    p.stop()
-
-
-    def test_new_callable_inherit_non_mock(self):
-        class NotAMock(object):
-            def __init__(self, spec):
-                self.spec = spec
-
-        p = patch(foo_name, new_callable=NotAMock, spec=True)
-        m = p.start()
-        try:
-            self.assertTrue(is_instance(m, NotAMock))
-            self.assertRaises(AttributeError, getattr, m, 'return_value')
-        finally:
-            p.stop()
-
-        self.assertEqual(m.spec, Foo)
-
-
-    def test_new_callable_class_decorating(self):
-        test = self
-        original = Foo
-        class SomeTest(object):
-
-            def _test(self, mock_foo):
-                test.assertIsNot(Foo, original)
-                test.assertIs(Foo, mock_foo)
-                test.assertIsInstance(Foo, SomeClass)
-
-            def test_two(self, mock_foo):
-                self._test(mock_foo)
-            def test_one(self, mock_foo):
-                self._test(mock_foo)
-
-        SomeTest = patch(foo_name, new_callable=SomeClass)(SomeTest)
-        SomeTest().test_one()
-        SomeTest().test_two()
-        self.assertIs(Foo, original)
-
-
-    def test_patch_multiple(self):
-        original_foo = Foo
-        original_f = Foo.f
-        original_g = Foo.g
-
-        patcher1 = patch.multiple(foo_name, f=1, g=2)
-        patcher2 = patch.multiple(Foo, f=1, g=2)
-
-        for patcher in patcher1, patcher2:
-            patcher.start()
-            try:
-                self.assertIs(Foo, original_foo)
-                self.assertEqual(Foo.f, 1)
-                self.assertEqual(Foo.g, 2)
-            finally:
-                patcher.stop()
-
-            self.assertIs(Foo, original_foo)
-            self.assertEqual(Foo.f, original_f)
-            self.assertEqual(Foo.g, original_g)
-
-
-        @patch.multiple(foo_name, f=3, g=4)
-        def test():
-            self.assertIs(Foo, original_foo)
-            self.assertEqual(Foo.f, 3)
-            self.assertEqual(Foo.g, 4)
-
-        test()
-
-
-    def test_patch_multiple_no_kwargs(self):
-        self.assertRaises(ValueError, patch.multiple, foo_name)
-        self.assertRaises(ValueError, patch.multiple, Foo)
-
-
-    def test_patch_multiple_create_mocks(self):
-        original_foo = Foo
-        original_f = Foo.f
-        original_g = Foo.g
-
-        @patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
-        def test(f, foo):
-            self.assertIs(Foo, original_foo)
-            self.assertIs(Foo.f, f)
-            self.assertEqual(Foo.g, 3)
-            self.assertIs(Foo.foo, foo)
-            self.assertTrue(is_instance(f, MagicMock))
-            self.assertTrue(is_instance(foo, MagicMock))
-
-        test()
-        self.assertEqual(Foo.f, original_f)
-        self.assertEqual(Foo.g, original_g)
-
-
-    def test_patch_multiple_create_mocks_different_order(self):
-        # bug revealed by Jython!
-        original_f = Foo.f
-        original_g = Foo.g
-
-        patcher = patch.object(Foo, 'f', 3)
-        patcher.attribute_name = 'f'
-
-        other = patch.object(Foo, 'g', DEFAULT)
-        other.attribute_name = 'g'
-        patcher.additional_patchers = [other]
-
-        @patcher
-        def test(g):
-            self.assertIs(Foo.g, g)
-            self.assertEqual(Foo.f, 3)
-
-        test()
-        self.assertEqual(Foo.f, original_f)
-        self.assertEqual(Foo.g, original_g)
-
-
-    def test_patch_multiple_stacked_decorators(self):
-        original_foo = Foo
-        original_f = Foo.f
-        original_g = Foo.g
-
-        @patch.multiple(foo_name, f=DEFAULT)
-        @patch.multiple(foo_name, foo=DEFAULT)
-        @patch(foo_name + '.g')
-        def test1(g, **kwargs):
-            _test(g, **kwargs)
-
-        @patch.multiple(foo_name, f=DEFAULT)
-        @patch(foo_name + '.g')
-        @patch.multiple(foo_name, foo=DEFAULT)
-        def test2(g, **kwargs):
-            _test(g, **kwargs)
-
-        @patch(foo_name + '.g')
-        @patch.multiple(foo_name, f=DEFAULT)
-        @patch.multiple(foo_name, foo=DEFAULT)
-        def test3(g, **kwargs):
-            _test(g, **kwargs)
-
-        def _test(g, **kwargs):
-            f = kwargs.pop('f')
-            foo = kwargs.pop('foo')
-            self.assertFalse(kwargs)
-
-            self.assertIs(Foo, original_foo)
-            self.assertIs(Foo.f, f)
-            self.assertIs(Foo.g, g)
-            self.assertIs(Foo.foo, foo)
-            self.assertTrue(is_instance(f, MagicMock))
-            self.assertTrue(is_instance(g, MagicMock))
-            self.assertTrue(is_instance(foo, MagicMock))
-
-        test1()
-        test2()
-        test3()
-        self.assertEqual(Foo.f, original_f)
-        self.assertEqual(Foo.g, original_g)
-
-
-    def test_patch_multiple_create_mocks_patcher(self):
-        original_foo = Foo
-        original_f = Foo.f
-        original_g = Foo.g
-
-        patcher = patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
-
-        result = patcher.start()
-        try:
-            f = result['f']
-            foo = result['foo']
-            self.assertEqual(set(result), set(['f', 'foo']))
-
-            self.assertIs(Foo, original_foo)
-            self.assertIs(Foo.f, f)
-            self.assertIs(Foo.foo, foo)
-            self.assertTrue(is_instance(f, MagicMock))
-            self.assertTrue(is_instance(foo, MagicMock))
-        finally:
-            patcher.stop()
-
-        self.assertEqual(Foo.f, original_f)
-        self.assertEqual(Foo.g, original_g)
-
-
-    def test_patch_multiple_decorating_class(self):
-        test = self
-        original_foo = Foo
-        original_f = Foo.f
-        original_g = Foo.g
-
-        class SomeTest(object):
-
-            def _test(self, f, foo):
-                test.assertIs(Foo, original_foo)
-                test.assertIs(Foo.f, f)
-                test.assertEqual(Foo.g, 3)
-                test.assertIs(Foo.foo, foo)
-                test.assertTrue(is_instance(f, MagicMock))
-                test.assertTrue(is_instance(foo, MagicMock))
-
-            def test_two(self, f, foo):
-                self._test(f, foo)
-            def test_one(self, f, foo):
-                self._test(f, foo)
-
-        SomeTest = patch.multiple(
-            foo_name, f=DEFAULT, g=3, foo=DEFAULT
-        )(SomeTest)
-
-        thing = SomeTest()
-        thing.test_one()
-        thing.test_two()
-
-        self.assertEqual(Foo.f, original_f)
-        self.assertEqual(Foo.g, original_g)
-
-
-    def test_patch_multiple_create(self):
-        patcher = patch.multiple(Foo, blam='blam')
-        self.assertRaises(AttributeError, patcher.start)
-
-        patcher = patch.multiple(Foo, blam='blam', create=True)
-        patcher.start()
-        try:
-            self.assertEqual(Foo.blam, 'blam')
-        finally:
-            patcher.stop()
-
-        self.assertFalse(hasattr(Foo, 'blam'))
-
-
-    def test_patch_multiple_spec_set(self):
-        # if spec_set works then we can assume that spec and autospec also
-        # work as the underlying machinery is the same
-        patcher = patch.multiple(Foo, foo=DEFAULT, spec_set=['a', 'b'])
-        result = patcher.start()
-        try:
-            self.assertEqual(Foo.foo, result['foo'])
-            Foo.foo.a(1)
-            Foo.foo.b(2)
-            Foo.foo.a.assert_called_with(1)
-            Foo.foo.b.assert_called_with(2)
-            self.assertRaises(AttributeError, setattr, Foo.foo, 'c', None)
-        finally:
-            patcher.stop()
-
-
-    def test_patch_multiple_new_callable(self):
-        class Thing(object):
-            pass
-
-        patcher = patch.multiple(
-            Foo, f=DEFAULT, g=DEFAULT, new_callable=Thing
-        )
-        result = patcher.start()
-        try:
-            self.assertIs(Foo.f, result['f'])
-            self.assertIs(Foo.g, result['g'])
-            self.assertIsInstance(Foo.f, Thing)
-            self.assertIsInstance(Foo.g, Thing)
-            self.assertIsNot(Foo.f, Foo.g)
-        finally:
-            patcher.stop()
-
-
-    def test_nested_patch_failure(self):
-        original_f = Foo.f
-        original_g = Foo.g
-
-        @patch.object(Foo, 'g', 1)
-        @patch.object(Foo, 'missing', 1)
-        @patch.object(Foo, 'f', 1)
-        def thing1():
-            pass
-
-        @patch.object(Foo, 'missing', 1)
-        @patch.object(Foo, 'g', 1)
-        @patch.object(Foo, 'f', 1)
-        def thing2():
-            pass
-
-        @patch.object(Foo, 'g', 1)
-        @patch.object(Foo, 'f', 1)
-        @patch.object(Foo, 'missing', 1)
-        def thing3():
-            pass
-
-        for func in thing1, thing2, thing3:
-            self.assertRaises(AttributeError, func)
-            self.assertEqual(Foo.f, original_f)
-            self.assertEqual(Foo.g, original_g)
-
-
-    def test_new_callable_failure(self):
-        original_f = Foo.f
-        original_g = Foo.g
-        original_foo = Foo.foo
-
-        def crasher():
-            raise NameError('crasher')
-
-        @patch.object(Foo, 'g', 1)
-        @patch.object(Foo, 'foo', new_callable=crasher)
-        @patch.object(Foo, 'f', 1)
-        def thing1():
-            pass
-
-        @patch.object(Foo, 'foo', new_callable=crasher)
-        @patch.object(Foo, 'g', 1)
-        @patch.object(Foo, 'f', 1)
-        def thing2():
-            pass
-
-        @patch.object(Foo, 'g', 1)
-        @patch.object(Foo, 'f', 1)
-        @patch.object(Foo, 'foo', new_callable=crasher)
-        def thing3():
-            pass
-
-        for func in thing1, thing2, thing3:
-            self.assertRaises(NameError, func)
-            self.assertEqual(Foo.f, original_f)
-            self.assertEqual(Foo.g, original_g)
-            self.assertEqual(Foo.foo, original_foo)
-
-
-    def test_patch_multiple_failure(self):
-        original_f = Foo.f
-        original_g = Foo.g
-
-        patcher = patch.object(Foo, 'f', 1)
-        patcher.attribute_name = 'f'
-
-        good = patch.object(Foo, 'g', 1)
-        good.attribute_name = 'g'
-
-        bad = patch.object(Foo, 'missing', 1)
-        bad.attribute_name = 'missing'
-
-        for additionals in [good, bad], [bad, good]:
-            patcher.additional_patchers = additionals
-
-            @patcher
-            def func():
-                pass
-
-            self.assertRaises(AttributeError, func)
-            self.assertEqual(Foo.f, original_f)
-            self.assertEqual(Foo.g, original_g)
-
-
-    def test_patch_multiple_new_callable_failure(self):
-        original_f = Foo.f
-        original_g = Foo.g
-        original_foo = Foo.foo
-
-        def crasher():
-            raise NameError('crasher')
-
-        patcher = patch.object(Foo, 'f', 1)
-        patcher.attribute_name = 'f'
-
-        good = patch.object(Foo, 'g', 1)
-        good.attribute_name = 'g'
-
-        bad = patch.object(Foo, 'foo', new_callable=crasher)
-        bad.attribute_name = 'foo'
-
-        for additionals in [good, bad], [bad, good]:
-            patcher.additional_patchers = additionals
-
-            @patcher
-            def func():
-                pass
-
-            self.assertRaises(NameError, func)
-            self.assertEqual(Foo.f, original_f)
-            self.assertEqual(Foo.g, original_g)
-            self.assertEqual(Foo.foo, original_foo)
-
-
-    def test_patch_multiple_string_subclasses(self):
-        for base in (str, unicode):
-            Foo = type('Foo', (base,), {'fish': 'tasty'})
-            foo = Foo()
-            @patch.multiple(foo, fish='nearly gone')
-            def test():
-                self.assertEqual(foo.fish, 'nearly gone')
-
-            test()
-            self.assertEqual(foo.fish, 'tasty')
-
-
-    @patch('mock.patch.TEST_PREFIX', 'foo')
-    def test_patch_test_prefix(self):
-        class Foo(object):
-            thing = 'original'
-
-            def foo_one(self):
-                return self.thing
-            def foo_two(self):
-                return self.thing
-            def test_one(self):
-                return self.thing
-            def test_two(self):
-                return self.thing
-
-        Foo = patch.object(Foo, 'thing', 'changed')(Foo)
-
-        foo = Foo()
-        self.assertEqual(foo.foo_one(), 'changed')
-        self.assertEqual(foo.foo_two(), 'changed')
-        self.assertEqual(foo.test_one(), 'original')
-        self.assertEqual(foo.test_two(), 'original')
-
-
-    @patch('mock.patch.TEST_PREFIX', 'bar')
-    def test_patch_dict_test_prefix(self):
-        class Foo(object):
-            def bar_one(self):
-                return dict(the_dict)
-            def bar_two(self):
-                return dict(the_dict)
-            def test_one(self):
-                return dict(the_dict)
-            def test_two(self):
-                return dict(the_dict)
-
-        the_dict = {'key': 'original'}
-        Foo = patch.dict(the_dict, key='changed')(Foo)
-
-        foo =Foo()
-        self.assertEqual(foo.bar_one(), {'key': 'changed'})
-        self.assertEqual(foo.bar_two(), {'key': 'changed'})
-        self.assertEqual(foo.test_one(), {'key': 'original'})
-        self.assertEqual(foo.test_two(), {'key': 'original'})
-
-
-    def test_patch_with_spec_mock_repr(self):
-        for arg in ('spec', 'autospec', 'spec_set'):
-            p = patch('%s.SomeClass' % __name__, **{arg: True})
-            m = p.start()
-            try:
-                self.assertIn(" name='SomeClass'", repr(m))
-                self.assertIn(" name='SomeClass.class_attribute'",
-                              repr(m.class_attribute))
-                self.assertIn(" name='SomeClass()'", repr(m()))
-                self.assertIn(" name='SomeClass().class_attribute'",
-                              repr(m().class_attribute))
-            finally:
-                p.stop()
-
-
-    def test_patch_nested_autospec_repr(self):
-        p = patch('tests.support', autospec=True)
-        m = p.start()
-        try:
-            self.assertIn(" name='support.SomeClass.wibble()'",
-                          repr(m.SomeClass.wibble()))
-            self.assertIn(" name='support.SomeClass().wibble()'",
-                          repr(m.SomeClass().wibble()))
-        finally:
-            p.stop()
-
-
-    def test_mock_calls_with_patch(self):
-        for arg in ('spec', 'autospec', 'spec_set'):
-            p = patch('%s.SomeClass' % __name__, **{arg: True})
-            m = p.start()
-            try:
-                m.wibble()
-
-                kalls = [call.wibble()]
-                self.assertEqual(m.mock_calls, kalls)
-                self.assertEqual(m.method_calls, kalls)
-                self.assertEqual(m.wibble.mock_calls, [call()])
-
-                result = m()
-                kalls.append(call())
-                self.assertEqual(m.mock_calls, kalls)
-
-                result.wibble()
-                kalls.append(call().wibble())
-                self.assertEqual(m.mock_calls, kalls)
-
-                self.assertEqual(result.mock_calls, [call.wibble()])
-                self.assertEqual(result.wibble.mock_calls, [call()])
-                self.assertEqual(result.method_calls, [call.wibble()])
-            finally:
-                p.stop()
-
-
-    def test_patch_imports_lazily(self):
-        sys.modules.pop('squizz', None)
-
-        p1 = patch('squizz.squozz')
-        self.assertRaises(ImportError, p1.start)
-
-        squizz = Mock()
-        squizz.squozz = 6
-        sys.modules['squizz'] = squizz
-        p1 = patch('squizz.squozz')
-        squizz.squozz = 3
-        p1.start()
-        p1.stop()
-        self.assertEqual(squizz.squozz, 3)
-
-
-    def test_patch_propogrates_exc_on_exit(self):
-        class holder:
-            exc_info = None, None, None
-
-        class custom_patch(_patch):
-            def __exit__(self, etype=None, val=None, tb=None):
-                _patch.__exit__(self, etype, val, tb)
-                holder.exc_info = etype, val, tb
-            stop = __exit__
-
-        def with_custom_patch(target):
-            getter, attribute = _get_target(target)
-            return custom_patch(
-                getter, attribute, DEFAULT, None, False, None,
-                None, None, {}
-            )
-
-        @with_custom_patch('squizz.squozz')
-        def test(mock):
-            raise RuntimeError
-
-        self.assertRaises(RuntimeError, test)
-        self.assertIs(holder.exc_info[0], RuntimeError)
-        self.assertIsNotNone(holder.exc_info[1],
-                            'exception value not propgated')
-        self.assertIsNotNone(holder.exc_info[2],
-                            'exception traceback not propgated')
-
-
-    def test_create_and_specs(self):
-        for kwarg in ('spec', 'spec_set', 'autospec'):
-            p = patch('%s.doesnotexist' % __name__, create=True,
-                      **{kwarg: True})
-            self.assertRaises(TypeError, p.start)
-            self.assertRaises(NameError, lambda: doesnotexist)
-
-            # check that spec with create is innocuous if the original exists
-            p = patch(MODNAME, create=True, **{kwarg: True})
-            p.start()
-            p.stop()
-
-
-    def test_multiple_specs(self):
-        original = PTModule
-        for kwarg in ('spec', 'spec_set'):
-            p = patch(MODNAME, autospec=0, **{kwarg: 0})
-            self.assertRaises(TypeError, p.start)
-            self.assertIs(PTModule, original)
-
-        for kwarg in ('spec', 'autospec'):
-            p = patch(MODNAME, spec_set=0, **{kwarg: 0})
-            self.assertRaises(TypeError, p.start)
-            self.assertIs(PTModule, original)
-
-        for kwarg in ('spec_set', 'autospec'):
-            p = patch(MODNAME, spec=0, **{kwarg: 0})
-            self.assertRaises(TypeError, p.start)
-            self.assertIs(PTModule, original)
-
-
-    def test_specs_false_instead_of_none(self):
-        p = patch(MODNAME, spec=False, spec_set=False, autospec=False)
-        mock = p.start()
-        try:
-            # no spec should have been set, so attribute access should not fail
-            mock.does_not_exist
-            mock.does_not_exist = 3
-        finally:
-            p.stop()
-
-
-    def test_falsey_spec(self):
-        for kwarg in ('spec', 'autospec', 'spec_set'):
-            p = patch(MODNAME, **{kwarg: 0})
-            m = p.start()
-            try:
-                self.assertRaises(AttributeError, getattr, m, 'doesnotexit')
-            finally:
-                p.stop()
-
-
-    def test_spec_set_true(self):
-        for kwarg in ('spec', 'autospec'):
-            p = patch(MODNAME, spec_set=True, **{kwarg: True})
-            m = p.start()
-            try:
-                self.assertRaises(AttributeError, setattr, m,
-                                  'doesnotexist', 'something')
-                self.assertRaises(AttributeError, getattr, m, 'doesnotexist')
-            finally:
-                p.stop()
-
-
-    def test_callable_spec_as_list(self):
-        spec = ('__call__',)
-        p = patch(MODNAME, spec=spec)
-        m = p.start()
-        try:
-            self.assertTrue(callable(m))
-        finally:
-            p.stop()
-
-
-    def test_not_callable_spec_as_list(self):
-        spec = ('foo', 'bar')
-        p = patch(MODNAME, spec=spec)
-        m = p.start()
-        try:
-            self.assertFalse(callable(m))
-        finally:
-            p.stop()
-
-
-    def test_patch_stopall(self):
-        unlink = os.unlink
-        chdir = os.chdir
-        path = os.path
-        patch('os.unlink', something).start()
-        patch('os.chdir', something_else).start()
-
-        @patch('os.path')
-        def patched(mock_path):
-            patch.stopall()
-            self.assertIs(os.path, mock_path)
-            self.assertIs(os.unlink, unlink)
-            self.assertIs(os.chdir, chdir)
-
-        patched()
-        self.assertIs(os.path, path)
-
-
-    def test_wrapped_patch(self):
-        decorated = patch('sys.modules')(function)
-        self.assertIs(decorated.__wrapped__, function)
-
-
-    def test_wrapped_several_times_patch(self):
-        decorated = patch('sys.modules')(function)
-        decorated = patch('sys.modules')(decorated)
-        self.assertIs(decorated.__wrapped__, function)
-
-
-    def test_wrapped_patch_object(self):
-        decorated = patch.object(sys, 'modules')(function)
-        self.assertIs(decorated.__wrapped__, function)
-
-
-    def test_wrapped_patch_dict(self):
-        decorated = patch.dict('sys.modules')(function)
-        self.assertIs(decorated.__wrapped__, function)
-
-
-    def test_wrapped_patch_multiple(self):
-        decorated = patch.multiple('sys', modules={})(function)
-        self.assertIs(decorated.__wrapped__, function)
-
-
-if __name__ == '__main__':
-    unittest2.main()
diff --git a/branch-1.2/ambari-common/src/test/python/mock/tests/testsentinel.py b/branch-1.2/ambari-common/src/test/python/mock/tests/testsentinel.py
deleted file mode 100644
index 981171a..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/tests/testsentinel.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (C) 2007-2012 Michael Foord & the mock team
-# E-mail: fuzzyman AT voidspace DOT org DOT uk
-# http://www.voidspace.org.uk/python/mock/
-
-from tests.support import unittest2
-
-from mock import sentinel, DEFAULT
-
-
-class SentinelTest(unittest2.TestCase):
-
-    def testSentinels(self):
-        self.assertEqual(sentinel.whatever, sentinel.whatever,
-                         'sentinel not stored')
-        self.assertNotEqual(sentinel.whatever, sentinel.whateverelse,
-                            'sentinel should be unique')
-
-
-    def testSentinelName(self):
-        self.assertEqual(str(sentinel.whatever), 'sentinel.whatever',
-                         'sentinel name incorrect')
-
-
-    def testDEFAULT(self):
-        self.assertTrue(DEFAULT is sentinel.DEFAULT)
-
-    def testBases(self):
-        # If this doesn't raise an AttributeError then help(mock) is broken
-        self.assertRaises(AttributeError, lambda: sentinel.__bases__)
-
-
-if __name__ == '__main__':
-    unittest2.main()
diff --git a/branch-1.2/ambari-common/src/test/python/mock/tests/testwith.py b/branch-1.2/ambari-common/src/test/python/mock/tests/testwith.py
deleted file mode 100644
index 34529eb..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/tests/testwith.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import sys
-
-if sys.version_info[:2] >= (2, 5):
-    from tests._testwith import *
-else:
-    from tests.support import unittest2
-
-    class TestWith(unittest2.TestCase):
-
-        @unittest2.skip('tests using with statement skipped on Python 2.4')
-        def testWith(self):
-            pass
-
-
-if __name__ == '__main__':
-    unittest2.main()
diff --git a/branch-1.2/ambari-common/src/test/python/mock/tox.ini b/branch-1.2/ambari-common/src/test/python/mock/tox.ini
deleted file mode 100644
index 58e29d2..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/tox.ini
+++ /dev/null
@@ -1,40 +0,0 @@
-[tox]
-envlist = py25,py26,py27,py31,pypy,py32,py33,jython
-
-[testenv]
-deps=unittest2
-commands={envbindir}/unit2 discover []
-
-[testenv:py26]
-commands=
-    {envbindir}/unit2 discover []
-    {envbindir}/sphinx-build -E -b doctest docs html
-    {envbindir}/sphinx-build -E docs html
-deps =
-    unittest2
-    sphinx
-
-[testenv:py27]
-commands=
-    {envbindir}/unit2 discover []
-    {envbindir}/sphinx-build -E -b doctest docs html
-deps =
-    unittest2
-    sphinx
-
-[testenv:py31]
-deps =
-    unittest2py3k
-
-[testenv:py32]
-commands=
-    {envbindir}/python -m unittest discover []
-deps =
-
-[testenv:py33]
-commands=
-    {envbindir}/python -m unittest discover []
-deps =
-
-# note for jython. Execute in tests directory:
-# rm `find . -name '*$py.class'`
\ No newline at end of file
diff --git a/branch-1.2/ambari-common/src/test/python/mock/unittest.cfg b/branch-1.2/ambari-common/src/test/python/mock/unittest.cfg
deleted file mode 100644
index b2d6f67..0000000
--- a/branch-1.2/ambari-common/src/test/python/mock/unittest.cfg
+++ /dev/null
@@ -1,95 +0,0 @@
-
-[unittest]
-plugins = 
-    unittest2.plugins.debugger
-    unittest2.plugins.checker
-    unittest2.plugins.doctestloader
-    unittest2.plugins.matchregexp
-    unittest2.plugins.moduleloading
-    unittest2.plugins.testcoverage
-    unittest2.plugins.growl
-    unittest2.plugins.filtertests
-    unittest2.plugins.junitxml
-    unittest2.plugins.timed
-    unittest2.plugins.counttests
-    unittest2.plugins.logchannels
-
-excluded-plugins =
-
-# 0, 1 or 2 (default is 1)
-# quiet, normal or verbose
-# can be overriden at command line
-verbosity = normal
-
-# true or false
-# even if false can be switched on at command line
-catch =
-buffer =
-failfast =
-
-
-[matchregexp]
-always-on = False
-full-path = True
-
-[debugger]
-always-on = False
-errors-only = True
-
-[coverage]
-always-on = False
-config =
-report-html = False
-# only used if report-html is false
-annotate = False
-# defaults to './htmlcov/'
-html-directory =
-# if unset will output to console
-text-file =
-branch = False
-timid = False
-cover-pylib = False
-exclude-lines = 
-    # Have to re-enable the standard pragma
-    pragma: no cover
-
-    # Don't complain about missing debug-only code:
-    def __repr__
-    if self\.debug
-
-    # Don't complain if tests don't hit defensive assertion code:
-    raise AssertionError
-    raise NotImplementedError
-
-    # Don't complain if non-runnable code isn't run:
-    if 0:
-    if __name__ == .__main__.
-    
-ignore-errors = False
-modules =
-
-[growl]
-always-on = False
-
-[doctest]
-always-on = False
-
-[module-loading]
-always-on = False
-
-[checker]
-always-on = False
-pep8 = False
-pyflakes = True
-
-[junit-xml]
-always-on = False
-path = junit.xml
-
-[timed]
-always-on = True
-threshold = 0.01
-
-[count]
-always-on = True
-enhanced = False
diff --git a/branch-1.2/ambari-project/pom.xml b/branch-1.2/ambari-project/pom.xml
deleted file mode 100644
index 2ef0125..0000000
--- a/branch-1.2/ambari-project/pom.xml
+++ /dev/null
@@ -1,386 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0                       http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.ambari</groupId>
-    <artifactId>ambari</artifactId>
-    <version>1.2.2-SNAPSHOT</version>
-  </parent>
-  <groupId>org.apache.ambari</groupId>
-  <artifactId>ambari-project</artifactId>
-  <version>1.2.2-SNAPSHOT</version>
-  <description>Apache Ambari Project POM</description>
-  <name>Apache Ambari Project POM</name>
-  <packaging>pom</packaging>
-  <properties>
-    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-  </properties>
-  <pluginRepositories>
-    <pluginRepository>
-      <id>maven2-repository.dev.java.net</id>
-      <name>Java.net Repository for Maven</name>
-      <url>http://download.java.net/maven/2/</url>
-      <layout>default</layout>
-    </pluginRepository>
-    <pluginRepository>
-      <id>maven2-glassfish-repository.dev.java.net</id>
-      <name>Java.net Repository for Maven</name>
-      <url>http://download.java.net/maven/glassfish/</url>
-    </pluginRepository>
-  </pluginRepositories>
-  <repositories>
-    <repository>
-      <id>EclipseLink</id>
-      <url>http://download.eclipse.org/rt/eclipselink/maven.repo</url>
-    </repository>
-  </repositories>
-  <profiles>
-    <profile>
-      <id>dist</id>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-javadoc-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>module-javadocs</id>
-                <phase>package</phase>
-                <goals>
-                  <goal>jar</goal>
-                </goals>
-                <configuration>
-                  <destDir>${project.build.directory}</destDir>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-source-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>ambari-java-sources</id>
-                <phase>package</phase>
-                <goals>
-                  <goal>jar-no-fork</goal>
-                </goals>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-  </profiles>
-  <dependencyManagement>
-    <dependencies>
-      <dependency>
-        <groupId>org.apache.ambari</groupId>
-        <artifactId>ambari-api</artifactId>
-        <version>1.0.3-SNAPSHOT</version>
-      </dependency>
-      <dependency>
-        <groupId>commons-io</groupId>
-        <artifactId>commons-io</artifactId>
-        <version>2.1</version>
-      </dependency>
-      <dependency>
-        <groupId>com.google.inject.extensions</groupId>
-        <artifactId>guice-servlet</artifactId>
-        <version>3.0</version>
-      </dependency>
-       <dependency>
-        <groupId>org.codehaus.jettison</groupId>
-        <artifactId>jettison</artifactId>
-        <version>1.1</version>
-      </dependency>
-      <dependency>
-        <groupId>com.google.inject</groupId>
-        <artifactId>guice</artifactId>
-        <version>3.0</version>
-      </dependency>
-      <dependency>
-        <groupId>com.google.inject.extensions</groupId>
-        <artifactId>guice-assistedinject</artifactId>
-        <version>3.0</version>
-      </dependency>
-      <dependency>
-        <groupId>com.google.inject.extensions</groupId>
-        <artifactId>guice-persist</artifactId>
-        <version>3.0</version>
-      </dependency>
-      <dependency>
-        <groupId>com.google.code.gson</groupId>
-        <artifactId>gson</artifactId>
-        <version>2.2.2</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.derby</groupId>
-        <artifactId>derby</artifactId>
-        <version>10.9.1.0</version>
-      </dependency>
-      <dependency>
-        <groupId>org.springframework.security</groupId>
-        <artifactId>spring-security-core</artifactId>
-        <version>3.1.2.RELEASE</version>
-      </dependency>
-      <dependency>
-        <groupId>org.springframework.security</groupId>
-        <artifactId>spring-security-config</artifactId>
-        <version>3.1.2.RELEASE</version>
-      </dependency>
-      <dependency>
-        <groupId>org.springframework.security</groupId>
-        <artifactId>spring-security-web</artifactId>
-        <version>3.1.2.RELEASE</version>
-      </dependency>
-      <dependency>
-        <groupId>org.springframework.security</groupId>
-        <artifactId>spring-security-ldap</artifactId>
-        <version>3.1.2.RELEASE</version>
-      </dependency>
-      <dependency>
-        <groupId>org.springframework.ldap</groupId>
-        <artifactId>spring-ldap-core</artifactId>
-        <version>1.3.1.RELEASE</version>
-      </dependency>
-      <dependency>
-      <groupId>org.apache.directory.server</groupId>
-      <artifactId>apacheds-core</artifactId>
-      <version>1.5.5</version>
-      </dependency>
-      <dependency>
-      <groupId>org.apache.directory.server</groupId>
-      <artifactId>apacheds-protocol-ldap</artifactId>
-      <version>1.5.5</version>
-      </dependency>
-      <dependency>
-      <groupId>org.apache.directory.shared</groupId>
-      <artifactId>shared-ldap</artifactId>
-      <version>0.9.17</version>
-      </dependency>
-      <dependency>
-        <groupId>org.slf4j</groupId>
-        <artifactId>slf4j-api</artifactId>
-        <version>1.7.2</version>
-      </dependency>
-      <dependency>
-        <groupId>org.slf4j</groupId>
-        <artifactId>slf4j-log4j12</artifactId>
-        <version>1.7.2</version>
-      </dependency>
-      <dependency>
-        <groupId>org.eclipse.persistence</groupId>
-        <artifactId>eclipselink</artifactId>
-        <version>2.4.0</version>
-      </dependency>
-      <dependency>
-        <groupId>postgresql</groupId>
-        <artifactId>postgresql</artifactId>
-        <version>9.1-901.jdbc4</version>
-      </dependency>
-      <dependency>
-        <groupId>org.mockito</groupId>
-        <artifactId>mockito-core</artifactId>
-        <version>1.8.5</version>
-      </dependency>
-       <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-server</artifactId>
-        <version>7.6.7.v20120910</version>
-      </dependency>
-      <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-security</artifactId>
-        <version>7.6.7.v20120910</version>
-      </dependency>
-      <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-servlet</artifactId>
-        <version>7.6.7.v20120910</version>
-      </dependency>
-      <dependency>
-        <groupId>org.eclipse.jetty</groupId>
-        <artifactId>jetty-webapp</artifactId>
-        <version>7.6.7.v20120910</version>
-      </dependency>
-      <dependency>
-        <groupId>commons-logging</groupId>
-        <artifactId>commons-logging</artifactId>
-        <version>1.1.1</version>
-      </dependency>
-      <dependency>
-        <groupId>commons-codec</groupId>
-        <artifactId>commons-codec</artifactId>
-        <version>1.4</version>
-      </dependency>
-      <dependency>
-        <groupId>commons-lang</groupId>
-        <artifactId>commons-lang</artifactId>
-        <version>2.5</version>
-      </dependency>
-      <dependency>
-        <groupId>commons-httpclient</groupId>
-        <artifactId>commons-httpclient</artifactId>
-        <version>3.1</version>
-      </dependency>
-      <dependency>
-        <groupId>javax.servlet</groupId>
-        <artifactId>servlet-api</artifactId>
-        <version>2.5</version>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.jersey</groupId>
-        <artifactId>jersey-core</artifactId>
-        <version>1.11</version>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.jersey</groupId>
-        <artifactId>jersey-grizzly</artifactId>
-        <version>1.11</version>
-      </dependency>
-      <dependency>
-        <groupId>org.codehaus.jackson</groupId>
-        <artifactId>jackson-core-asl</artifactId>
-        <version>1.9.9</version>
-      </dependency>
-      <dependency>
-        <groupId>org.codehaus.jackson</groupId>
-        <artifactId>jackson-jaxrs</artifactId>
-        <version>1.9.9</version>
-      </dependency>
-      <dependency>
-        <groupId>org.codehaus.jackson</groupId>
-        <artifactId>jackson-xc</artifactId>
-        <version>1.9.9</version>
-      </dependency>
-      <dependency>
-        <groupId>org.codehaus.jackson</groupId>
-        <artifactId>jackson-mappper</artifactId>
-        <version>1.9.9</version>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.grizzly</groupId>
-        <artifactId>grizzly-comet-org.apache.ambari.server.controller.utilities.webserver</artifactId>
-        <version>1.9.36</version>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.jersey</groupId>
-        <artifactId>jersey-bundle</artifactId>
-        <version>1.11</version>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.jersey</groupId>
-        <artifactId>jersey-json</artifactId>
-        <version>1.11</version>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.jersey</groupId>
-        <artifactId>jersey-server</artifactId>
-        <version>1.11</version>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.jersey</groupId>
-        <artifactId>jersey-client</artifactId>
-        <version>1.11</version>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.jersey.contribs</groupId>
-        <artifactId>jersey-multipart</artifactId>
-        <version>1.11</version>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.jersey.jersey-test-framework</groupId>
-        <artifactId>jersey-test-framework-core</artifactId>
-        <version>1.11</version>
-        <scope>test</scope>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.jersey.jersey-test-framework</groupId>
-        <artifactId>jersey-test-framework-grizzly2</artifactId>
-        <version>1.11</version>
-      </dependency>
-      <dependency>
-        <groupId>com.sun.jersey.contribs</groupId>
-        <artifactId>jersey-guice</artifactId>
-        <version>1.11</version>
-      </dependency>
-      <dependency>
-        <groupId>log4j</groupId>
-        <artifactId>log4j</artifactId>
-        <version>1.2.16</version>
-        <exclusions>
-          <exclusion>
-            <groupId>com.sun.jdmk</groupId>
-            <artifactId>jmxtools</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>com.sun.jmx</groupId>
-            <artifactId>jmxri</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>javax.mail</groupId>
-            <artifactId>mail</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>javax.jms</groupId>
-            <artifactId>jmx</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>javax.jms</groupId>
-            <artifactId>jms</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-      <dependency>
-        <groupId>junit</groupId>
-        <artifactId>junit</artifactId>
-        <version>4.10</version>
-      </dependency>
-    </dependencies>
-  </dependencyManagement>
-  <build>
-    <pluginManagement>
-      <plugins>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-surefire-plugin</artifactId>
-          <version>2.12</version>
-        </plugin>
-      </plugins>
-    </pluginManagement>
-    <plugins>
-      <plugin>
-        <artifactId>maven-compiler-plugin</artifactId>
-        <version>3.0</version>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <configuration combine.self="override"/>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-surefire-plugin</artifactId>
-        <configuration>
-          <redirectTestOutputToFile>true</redirectTestOutputToFile>
-          <forkMode>always</forkMode>
-          <forkedProcessTimeoutInSeconds>900</forkedProcessTimeoutInSeconds>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/branch-1.2/ambari-project/src/main/assemblies/empty.xml b/branch-1.2/ambari-project/src/main/assemblies/empty.xml
deleted file mode 100644
index 35738b1..0000000
--- a/branch-1.2/ambari-project/src/main/assemblies/empty.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-  
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<assembly>
-    <id>empty</id>
-    <formats/>
-</assembly>
diff --git a/branch-1.2/ambari-server/conf/ambari-env.sh b/branch-1.2/ambari-server/conf/ambari-env.sh
deleted file mode 100644
index fe2694a..0000000
--- a/branch-1.2/ambari-server/conf/ambari-env.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-AMBARI_PID_DIR=/var/run/ambari-server
-AMBARI_PASSPHRASE="DEV"
diff --git a/branch-1.2/ambari-server/conf/log4j.properties b/branch-1.2/ambari-server/conf/log4j.properties
deleted file mode 100644
index a1818ce..0000000
--- a/branch-1.2/ambari-server/conf/log4j.properties
+++ /dev/null
@@ -1,21 +0,0 @@
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-
-# log4j configuration used during build and unit tests
-
-log4j.rootLogger=INFO,stdout
-log4j.threshhold=ALL
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} (%F:%M(%L)) - %m%n
-
-log4j.logger.org.apache.ambari=DEBUG
diff --git a/branch-1.2/ambari-server/conf/unix/ambari-env.sh b/branch-1.2/ambari-server/conf/unix/ambari-env.sh
deleted file mode 100644
index 42361cb..0000000
--- a/branch-1.2/ambari-server/conf/unix/ambari-env.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-AMBARI_PASSHPHRASE="DEV"
diff --git a/branch-1.2/ambari-server/conf/unix/ambari.properties b/branch-1.2/ambari-server/conf/unix/ambari.properties
deleted file mode 100644
index 2836c76..0000000
--- a/branch-1.2/ambari-server/conf/unix/ambari.properties
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2011 The Apache Software Foundation
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-security.server.keys_dir = /var/lib/ambari-server/keys
-resources.dir = /var/lib/ambari-server/resources
-jdk.url=http://public-repo-1.hortonworks.com/ARTIFACTS/jdk-6u31-linux-x64.bin
-metadata.path=/var/lib/ambari-server/resources/stacks
-webapp.dir=/usr/lib/ambari-server/web
-bootstrap.dir=/var/run/ambari-server/bootstrap
-bootstrap.script=/usr/lib/python2.6/site-packages/ambari_server/bootstrap.py
-bootstrap.setup_agent.script=/usr/lib/python2.6/site-packages/ambari_server/setupAgent.py
-server.persistence.inMemory=false
-api.authenticate=true
diff --git a/branch-1.2/ambari-server/conf/unix/ca.config b/branch-1.2/ambari-server/conf/unix/ca.config
deleted file mode 100644
index d838131..0000000
--- a/branch-1.2/ambari-server/conf/unix/ca.config
+++ /dev/null
@@ -1,24 +0,0 @@
-[ ca ]
-default_ca             = CA_CLIENT
-[ CA_CLIENT ]
-dir		       = /var/lib/ambari-server/keys/db
-certs                  = $dir/certs
-new_certs_dir          = $dir/newcerts
-
-database               = $dir/index.txt
-serial                 = $dir/serial
-default_days           = 365    
-
-default_crl_days       = 7  
-default_md             = md5 
-
-policy                 = policy_anything 
-
-[ policy_anything ]
-countryName            = optional
-stateOrProvinceName    = optional 
-localityName           = optional
-organizationName       = optional
-organizationalUnitName = supplied 
-commonName             = optional   
-emailAddress           = optional       
diff --git a/branch-1.2/ambari-server/conf/unix/log4j.properties b/branch-1.2/ambari-server/conf/unix/log4j.properties
deleted file mode 100644
index 6d67159..0000000
--- a/branch-1.2/ambari-server/conf/unix/log4j.properties
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2011 The Apache Software Foundation
-# 
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-# Root logger option
-log4j.rootLogger=INFO,file
-
-# Direct log messages to a log file
-log4j.appender.file=org.apache.log4j.RollingFileAppender
-log4j.appender.file.File=/var/log/ambari-server/ambari-server.log
-log4j.appender.file.MaxFileSize=10MB
-log4j.appender.file.MaxBackupIndex=20
-log4j.appender.file.layout=org.apache.log4j.PatternLayout
-log4j.appender.file.layout.ConversionPattern=%d{ABSOLUTE} %5p %c{1}:%L - %m%n
diff --git a/branch-1.2/ambari-server/docs/api/v1/clusters-cluster.md b/branch-1.2/ambari-server/docs/api/v1/clusters-cluster.md
deleted file mode 100644
index d430909..0000000
--- a/branch-1.2/ambari-server/docs/api/v1/clusters-cluster.md
+++ /dev/null
@@ -1,125 +0,0 @@
-	<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-View Cluster Information
-=====
-
-[Back to Resources](index.md#resources)
-
-Returns information for the specified cluster identified by ":name"
-
-    GET /clusters/:name
-
-**Response**
-
-    200 OK
-    {
-    	"href" : "http://your.ambari.server/api/v1/clusters/c1",
-      	"Clusters" : {
-        	"cluster_name" : "c1",
-        	"cluster_id" : 1,
-        	"version" : "HDP-1.2.0"
-      	},
-      	"services" : [
-        	{
-        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/NAGIOS",
-        		"ServiceInfo" : {
-          			"cluster_name" : "c1",
-          			"service_name" : "NAGIOS"
-          		}
-        	},
-        	{
-        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HCATALOG",
-        		"ServiceInfo" : {
-          			"cluster_name" : "c1",
-          			"service_name" : "HCATALOG"
-          		}
-        	},
-        	{
-        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/PIG",
-        		"ServiceInfo" : {
-          			"cluster_name" : "c1",
-         			"service_name" : "PIG"
-          		}
-        	},
-        	{
-        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/MAPREDUCE",
-        		"ServiceInfo" : {
-          			"cluster_name" : "c1",
-          			"service_name" : "MAPREDUCE"
-          		}
-        	},
-        	{
-        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/GANGLIA",
-        		"ServiceInfo" : {
-          			"cluster_name" : "c1",
-          			"service_name" : "GANGLIA"
-          		}
-        	},
-        	{
-        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HIVE",
-        		"ServiceInfo" : {
-          			"cluster_name" : "c1",
-          			"service_name" : "HIVE"
-          		}
-        	},
-        	{
-        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS",
-        		"ServiceInfo" : {
-          			"cluster_name" : "MyIE9",
-          			"service_name" : "HDFS"
-          		}
-        	},
-        	{
-        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/ZOOKEEPER",
-        		"ServiceInfo" : {
-          			"cluster_name" : "c1",
-         	 		"service_name" : "ZOOKEEPER"
-          		}
-        	},
-        	{
-        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HBASE",
-        		"ServiceInfo" : {
-          			"cluster_name" : "c1",
-          			"service_name" : "HBASE"
-          		}
-        	},
-        	{
-        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/OOZIE",
-        		"ServiceInfo" : {
-          			"cluster_name" : "c1",
-          			"service_name" : "OOZIE"
-          		}
-        	} 
-    	],
-    	"hosts" : [
-      		{
-      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1",
-      			"Hosts" : {
-        			"cluster_name" : "c1",
-        			"host_name" : "some.cluster.host"
-        		}
-      		},
-      		{
-      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host2",
-      		"Hosts" : {
-        		"cluster_name" : "c1",
-        		"host_name" : "another.cluster.host"
-        	}
-        ]
-    }
-
diff --git a/branch-1.2/ambari-server/docs/api/v1/clusters.md b/branch-1.2/ambari-server/docs/api/v1/clusters.md
deleted file mode 100644
index dab58d9..0000000
--- a/branch-1.2/ambari-server/docs/api/v1/clusters.md
+++ /dev/null
@@ -1,42 +0,0 @@
-
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-List Clusters
-=====
-
-[Back to Resources](index.md#resources)
-
-Returns a collection of the currently configured clusters.
-
-    GET /clusters
-
-**Response**
-
-    200 OK
-    {
-      "href" : "http://your.ambari.server/api/v1/clusters",
-      "items" : [ 
-      		{
-        		"href" : "http://your.ambari.server/api/v1/clusters/c1",
-        		"Clusters" : {
-          			"cluster_name" : "c1",
-          			"version" : "HDP-1.2.0"
-        		}
-      		} 	
-    	]
-	}
diff --git a/branch-1.2/ambari-server/docs/api/v1/components-component.md b/branch-1.2/ambari-server/docs/api/v1/components-component.md
deleted file mode 100644
index f462436..0000000
--- a/branch-1.2/ambari-server/docs/api/v1/components-component.md
+++ /dev/null
@@ -1,85 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-View Component Information
-=====
-
-[Back to Resources](index.md#resources)
-
-Refers to a specific component identified by ":componentName" for a given service.
-
-    GET /clusters/:name/services/:serviceName/components/:componentName
-
-**Response**
-
-    200 OK
-    {
-    	"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/DATANODE",
-    	"metrics" : {
-    		"process" : {
-    			...    
-    		},
-      		"rpc" : {
-        		...
-      		},
-      		"ugi" : {
-      			...
-      		},
-      		"dfs" : {
-        		"datanode" : {
-          		...
-        		}
-      		},
-      		"disk" : {
-        		...
-      		},
-      		"cpu" : {
-        		...
-      		},
-      		"rpcdetailed" : {
-      			...
-      		},
-      		"jvm" : {
-        		...
-      		},
-      		"load" : {
-        		...
-      		},
-      		"memory" : {
-        		...
-      		},
-      		"network" : {
-        		...
-      		},
-    	},
-    	"ServiceComponentInfo" : {
-      		"cluster_name" : "c1",
-      		"component_name" : "DATANODE",
-      		"service_name" : "HDFS"
-      		"state" : "STARTED"
-    	},
-    	"host_components" : [
-      		{
-      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components/DATANODE",
-      			"HostRoles" : {
-        			"cluster_name" : "c1",
-        			"component_name" : "DATANODE",
-        			"host_name" : "host1"
-        		}
-      		}
-       	]
-    }
diff --git a/branch-1.2/ambari-server/docs/api/v1/components.md b/branch-1.2/ambari-server/docs/api/v1/components.md
deleted file mode 100644
index 2bfd4a9..0000000
--- a/branch-1.2/ambari-server/docs/api/v1/components.md
+++ /dev/null
@@ -1,66 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-View Service Components
-=====
-
-[Back to Resources](index.md#resources)
-
-Refers to a collection of all components for a given service.
-
-    GET /clusters/:name/services/:serviceName/components
-
-**Response**
-
-    200 OK
-    {
-    	"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components",
-    	"items" : [
-      		{
-      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/DATANODE",
-      			"ServiceComponentInfo" : {
-        			"cluster_name" : "c1",
-        			"component_name" : "DATANODE",
-        			"service_name" : "HDFS"
-        		}
-      		},
-      		{
-      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/SECONDARY_NAMENODE",
-      			"ServiceComponentInfo" : {
-        			"cluster_name" : "c1",
-        			"component_name" : "SECONDARY_NAMENODE",
-        			"service_name" : "HDFS"
-        		}
-      		},
-      		{
-      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/NAMENODE",
-      			"ServiceComponentInfo" : {
-        			"cluster_name" : "c1",
-        			"component_name" : "NAMENODE",
-        			"service_name" : "HDFS"
-        		}
-      		},
-      		{
-      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/HDFS_CLIENT",
-      			"ServiceComponentInfo" : {
-        			"cluster_name" : "c1",
-        			"component_name" : "HDFS_CLIENT",
-        			"service_name" : "HDFS"
-        		}
-      		}
-      	]
-    }
diff --git a/branch-1.2/ambari-server/docs/api/v1/host-component.md b/branch-1.2/ambari-server/docs/api/v1/host-component.md
deleted file mode 100644
index e256575..0000000
--- a/branch-1.2/ambari-server/docs/api/v1/host-component.md
+++ /dev/null
@@ -1,83 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-View Host Component Information
-=====
-
-[Back to Resources](index.md#resources)
-
-Returns information for a specific role on the given host.
-
-    GET /clusters/:name/hosts/:hostName/host_components/:componentName
-
-**Response**
-
-    200 OK
-    {
-    	"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components/DATANODE",
-    	"HostRoles" : {
-    		"cluster_name" : "c1",
-      		"component_name" : "DATANODE",
-      		"host_name" : "host1",
-      		"state" : "STARTED"
-    	},
-    	"host" : {
-    		"href" : "http://localhost:8080/api/v1/clusters/c1/hosts/dev.hortonworks.com"
-    	},
-    	"metrics" : {
-    		"process" : {
-    			...    
-    		},
-      		"ugi" : {
-      			...
-      		},
-      		"dfs" : {
-        		"datanode" : {
-          		...
-        		}
-      		},
-      		"disk" : {
-        		...
-      		},
-      		"cpu" : {
-        		...
-      		},
-      		"jvm" : {
-        		...
-      		},
-      		"load" : {
-        		...
-      		},
-      		"memory" : {
-        		...
-      		},
-      		"network" : {
-        		...
-      		},
-    	},
-    	"component" : [
-      		{
-    	      	"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/DATANODE",
-      			"ServiceComponentInfo" : {
-        			"cluster_name" : "c1",
-        			"component_name" : "DATANODE",
-        			"service_name" : "HDFS"
-        		}
-      		}
-       	]
-    }
-
diff --git a/branch-1.2/ambari-server/docs/api/v1/host-components.md b/branch-1.2/ambari-server/docs/api/v1/host-components.md
deleted file mode 100644
index a5884f1..0000000
--- a/branch-1.2/ambari-server/docs/api/v1/host-components.md
+++ /dev/null
@@ -1,57 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-List Host Components
-=====
-
-[Back to Resources](index.md#resources)
-
-Returns a collection of components running on a given host.
-
-    GET /clusters/:name/hosts/:hostName/host_components
-
-**Response**
-
-    200 OK
-    {
-    	"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components",
-    	items" : [
-    		{
-      			"href" : "your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components/DATANODE",
-      			"HostRoles" : {
-        			"cluster_name" : "c1",
-        			"component_name" : "DATANODE",
-        			"host_name" : "host1"
-      			},
-      			"host" : {
-        			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1"
-      			}
-    		},
-			{
-      			"href" : "your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components/HBASE_CLIENT",
-      			"HostRoles" : {
-        			"cluster_name" : "c1",
-        			"component_name" : "HBASE_CLIENT",
-        			"host_name" : "host1"
-      			},
-      			"host" : {
-        			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1"
-      			}
-    		},
-    		...
-		]
-	}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/docs/api/v1/hosts-host.md b/branch-1.2/ambari-server/docs/api/v1/hosts-host.md
deleted file mode 100644
index 578753b..0000000
--- a/branch-1.2/ambari-server/docs/api/v1/hosts-host.md
+++ /dev/null
@@ -1,100 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-View Host Information
-=====
-
-[Back to Resources](index.md#resources)
-
-Returns information about a single host in a given cluster.
-
-    GET /clusters/:name/hosts/:hostName
-
-**Response**
-
-    200 OK
-    {
-    	"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1",
-    	"metrics" : {
-    		"process" : {
-    			...    
-    		},
-      		"rpc" : {
-        		...
-      		},
-      		"ugi" : {
-      			...
-      		}
-      		"disk" : {
-        		...
-      		},
-      		"cpu" : {
-        		...
-      		},
-      		"rpcdetailed" : {
-      			...
-      		},
-      		"jvm" : {
-        		...
-      		},
-      		"load" : {
-        		...
-      		},
-      		"memory" : {
-        		...
-      		},
-      		"network" : {
-        		...
-      		},
-    	},
-    	"Hosts" : {
-      		"cluster_name" : "c1",
-      		"host_name" : "host1",
-      		"host_state" : "HEALTHY",
-      		"public_host_name" : "host1.yourDomain.com",
-      		"cpu_count" : 1,
-      		"rack_info" : "rack-name",
-      		"os_arch" : "x86_64",
-      		disk_info : [
-      			{
-      				"available" : "41497444",
-        			"used" : "9584560",
-        			"percent" : "19%",
-        			"size" : "51606140",
-        			"type" : "ext4",
-       	 			"mountpoint" : "/"
-      			}
-      		],
-      		"ip" : "10.0.2.15",
-      		"os_type" : "rhel6",
-      		"total_mem" : 2055208,
-      		...        	      		
-    	},
-    	"host_components" : [
-      		{
-      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components/DATANODE",
-      			"HostRoles" : {
-        			"cluster_name" : "c1",
-        			"component_name" : "DATANODE",
-        			"host_name" : "host1"
-        		}
-      		},
-      		...
-       	]
-    }
-
-
diff --git a/branch-1.2/ambari-server/docs/api/v1/hosts.md b/branch-1.2/ambari-server/docs/api/v1/hosts.md
deleted file mode 100644
index 52751bf..0000000
--- a/branch-1.2/ambari-server/docs/api/v1/hosts.md
+++ /dev/null
@@ -1,48 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-List Hosts
-=====
-
-[Back to Resources](index.md#resources)
-
-Returns a collection of all hosts in a given cluster.
-
-    GET /clusters/:name/hosts
-
-**Response**
-
-    200 OK
-    {
-    	"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/",
-    	"items" : [
-    		{
-      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1",
-      			"Hosts" : {
-        			"cluster_name" : "c1",
-        			"host_name" : "host1"
-      			}
-    		},
-    		{
-      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host2",
-      			"Hosts" : {
-        			"cluster_name" : "c1",
-        			"host_name" : "host2"
-      			}
-    		}
-    	]
-	}  	
diff --git a/branch-1.2/ambari-server/docs/api/v1/index.md b/branch-1.2/ambari-server/docs/api/v1/index.md
deleted file mode 100644
index 3f34717..0000000
--- a/branch-1.2/ambari-server/docs/api/v1/index.md
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-server/docs/api/v1/services-service.md b/branch-1.2/ambari-server/docs/api/v1/services-service.md
deleted file mode 100644
index a503d1b..0000000
--- a/branch-1.2/ambari-server/docs/api/v1/services-service.md
+++ /dev/null
@@ -1,72 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-View Service Information
-=====
-
-[Back to Resources](index.md#resources)
-
-Refers to a specific service identified by ":serviceName" for a given cluster.
-
-    GET /clusters/:name/services/:serviceName
-
-**Response**
-
-    200 OK
-    {
-    	"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS",
-    	"ServiceInfo" : {
-      		"cluster_name" : "c1",
-      		"service_name" : "HDFS",
-      		"state" : "STARTED"      		
-      	},
-    	"components" : [
-      		{
-      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/NAMENODE",
-      			"ServiceComponentInfo" : {
-        			"cluster_name" : "c1",
-        			"component_name" : "NAMENODE",
-        			"service_name" : "HDFS"
-       			}
-      		},
-      		{
-      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/DATANODE",
-      			"ServiceComponentInfo" : {
-        			"cluster_name" : "c1",
-        			"component_name" : "DATANODE",
-        			"service_name" : "HDFS"
-        		}
-      		},
-      		{
-      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/HDFS_CLIENT",
-      			"ServiceComponentInfo" : {
-        			"cluster_name" : "c1",
-        			"component_name" : "HDFS_CLIENT",
-        			"service_name" : "HDFS"
-        		}
-      		},
-      		{
-      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/SECONDARY_NAMENODE",
-     			"ServiceComponentInfo" : {
-        			"cluster_name" : "c1",
-        			"component_name" : "SECONDARY_NAMENODE",
-        			"service_name" : "HDFS"
-        		}
-      		}
-      	]
-    }
-
diff --git a/branch-1.2/ambari-server/docs/api/v1/services.md b/branch-1.2/ambari-server/docs/api/v1/services.md
deleted file mode 100644
index 0ddd5ec..0000000
--- a/branch-1.2/ambari-server/docs/api/v1/services.md
+++ /dev/null
@@ -1,55 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-List Services
-=====
-
-[Back to Resources](index.md#resources)
-
-Returns a collection of the services in a given cluster.
-
-    GET /clusters/:name/services
-
-**Response**
-
-    200 OK
-    {
-    	"href" : "http://your.ambari.server/api/v1/clusters/c1/services",
-    	"items" : [
-    		{
-        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/NAGIOS",
-        		"ServiceInfo" : {
-          			"cluster_name" : "c1",
-          			"service_name" : "NAGIOS"
-          		}
-        	},
-        	{
-        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HCATALOG",
-        		"ServiceInfo" : {
-        	  		"cluster_name" : "c1",
-        	  		"service_name" : "HCATALOG"
-        	  	}
-        	},
-        	{
-        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/PIG",
-        		"ServiceInfo" : {
-        	  		"cluster_name" : "c1",
-        	  		"service_name" : "PIG"
-        	  	}	
-        	}
-        ]
-    }    
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/exclude.xml b/branch-1.2/ambari-server/exclude.xml
deleted file mode 100644
index 74f3c0f..0000000
--- a/branch-1.2/ambari-server/exclude.xml
+++ /dev/null
@@ -1,34 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<FindBugsFilter>
-
-    <Match>
-        <Bug pattern="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD"/>
-
-        <Class name="org.apache.ambari.server.controller.AmbariServer"/>
-        <Method name="run"/>
-    </Match>
-    <Match>
-        <Bug pattern="WMI_WRONG_MAP_ITERATOR"/>
-    </Match>
-    <Match>
-        <Bug pattern="EI_EXPOSE_REP"/>
-    </Match>
-    <Match>
-        <Bug pattern="EI_EXPOSE_REP2"/>
-    </Match>
-
-</FindBugsFilter>
diff --git a/branch-1.2/ambari-server/pom.xml b/branch-1.2/ambari-server/pom.xml
deleted file mode 100644
index 1705e9c..0000000
--- a/branch-1.2/ambari-server/pom.xml
+++ /dev/null
@@ -1,588 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-  <parent>
-    <groupId>org.apache.ambari</groupId>
-    <artifactId>ambari-project</artifactId>
-    <version>1.2.2-SNAPSHOT</version>
-    <relativePath>../ambari-project</relativePath>
-  </parent>
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.apache.ambari</groupId>
-  <artifactId>ambari-server</artifactId>
-  <packaging>jar</packaging>
-  <name>Ambari Server</name>
-  <version>1.2.2-SNAPSHOT</version>
-  <description>Ambari Server</description>
-  <properties>
-    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-    <python.ver>python &gt;= 2.6</python.ver>
-  </properties>
-  <build>
-    <plugins>
-      <plugin>
-        <artifactId>maven-compiler-plugin</artifactId>
-        <version>3.0</version>
-      </plugin>
-      <plugin>
-        <artifactId>maven-assembly-plugin</artifactId>
-        <configuration>
-          <descriptors>
-            <descriptor>src/main/assemblies/server.xml</descriptor>
-          </descriptors>
-        </configuration>
-        <executions>
-          <execution>
-            <id>build-tarball</id>
-            <phase>package</phase>
-            <goals>
-              <goal>single</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <configuration>
-          <excludes>
-            <exclude>pass.txt</exclude>
-            <exclude>derby.log</exclude>
-            <exclude>src/test/resources/users.ldif</exclude>
-            <exclude>src/test/resources/gsInstaller-hosts.txt</exclude>
-            <exclude>src/test/resources/temporal_ganglia_data.txt</exclude>
-            <exclude>src/test/resources/users.ldif</exclude>
-            <exclude>src/main/resources/ca.config</exclude>
-            <exclude>src/main/resources/db/serial</exclude>
-            <exclude>src/main/resources/db/index.txt</exclude>
-            <exclude>conf/unix/ca.config</exclude>
-            <exclude>**/*.json</exclude>
-
-            <!--gitignore content-->
-            <exclude>src/main/resources/db/newcerts/**</exclude>
-
-            <!--test samples-->
-            <exclude>src/test/resources/TestAmbaryServer.samples/**</exclude>
-          </excludes>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>rpm-maven-plugin</artifactId>
-        <version>2.0.1</version>
-        <executions>
-          <execution>
-            <!-- unbinds rpm creation from maven lifecycle -->
-            <phase>none</phase>
-            <goals>
-              <goal>rpm</goal>
-            </goals>
-          </execution>
-        </executions>
-        <configuration>
-          <!-- places rpm to specified folder -->
-          <!--
-          <workarea>
-            rpm-target
-          </workarea>
-          -->
-          <copyright>2012, Apache Software Foundation</copyright>
-          <version>${project.version}</version>
-          <group>Development</group>
-          <description>Maven Recipe: RPM Package.</description>
-          <requires>
-            <require>postgresql-server &gt;= 8.1</require>
-            <require>openssl</require>
-            <require>${python.ver}</require>
-          </requires>
-          <preremoveScriptlet>
-            <script>mv /etc/ambari-server/conf /etc/ambari-server/conf.save</script>
-          </preremoveScriptlet>
-          <mappings>
-            <mapping>
-              <directory>/usr/lib/ambari-server</directory>
-              <dependency>
-              </dependency>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-            </mapping>
-            <mapping>
-              <directory>/usr/lib/ambari-server/web</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>${basedir}/../ambari-web/public</location>
-                  <includes>
-                    <include>**</include>
-                  </includes>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>/usr/lib/ambari-server</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>${project.build.directory}/${project.artifactId}-${project.version}.jar</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>/usr/sbin</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>src/main/python/ambari-server.py</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>/usr/sbin</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>sbin/ambari-server</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>/etc/ambari-server/conf</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <configuration>true</configuration>
-              <sources>
-                <source>
-                  <location>conf/unix/ambari.properties</location>
-                </source>
-                <source>
-                  <location>conf/unix/log4j.properties</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>/var/lib/ambari-server/</directory>
-              <filemode>700</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>conf/unix/ambari-env.sh</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>/var/lib/ambari-server/keys</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>conf/unix/ca.config</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>/var/lib/ambari-server/keys/db</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>src/main/resources/db</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>/var/run/ambari-server/bootstrap</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-            </mapping>
-            <mapping>
-              <directory>/var/log/ambari-server</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-            </mapping>
-            <mapping>
-              <directory>/var/lib/ambari-server/resources</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>src/main/resources/Ambari-DDL-Postgres-CREATE.sql</location>
-                </source>
-                <source>
-                  <location>src/main/resources/Ambari-DDL-Postgres-DROP.sql</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>/var/lib/ambari-server/resources/upgrade</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-            </mapping>
-            <mapping>
-              <directory>/var/lib/ambari-server/resources/upgrade/ddl</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.sql</location>
-                </source>
-                <source>
-                  <location>src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Fix.sql</location>
-                </source>
-                <source>
-                  <location>src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Check.sql</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>/var/lib/ambari-server/resources/upgrade/dml</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>src/main/resources/upgrade/dml/Ambari-DML-Postgres-UPGRADE_STACK.sql</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>/var/lib/ambari-server/resources/stacks</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>src/main/resources/stacks</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>/usr/lib/python2.6/site-packages/ambari_server</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-              <sources>
-                <source>
-                  <location>src/main/python/bootstrap.py</location>
-                </source>
-                <source>
-                  <location>src/main/python/setupAgent.py</location>
-                </source>
-              </sources>
-            </mapping>
-            <mapping>
-              <directory>/var/run/ambari-server</directory>
-              <filemode>755</filemode>
-              <username>root</username>
-              <groupname>root</groupname>
-            </mapping>
-          </mappings>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
-        <version>2.5.2</version>
-        <configuration>
-          <failOnError>false</failOnError>
-        </configuration>
-        <executions>
-          <execution>
-            <phase>verify</phase>
-            <goals>
-              <goal>check</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>exec-maven-plugin</artifactId>
-        <version>1.2</version>
-        <executions>
-          <execution>
-            <configuration>
-              <executable>python2.6</executable>
-              <workingDirectory>src/test/python</workingDirectory>
-              <arguments>
-                <argument>unitTests.py</argument>
-              </arguments>
-              <environmentVariables>
-                <PYTHONPATH>${project.basedir}/../ambari-common/src/test/python:${project.basedir}/src/main/python:${project.basedir}/src/main/python/ambari-server-state:${project.basedir}/src/test/python:$PYTHONPATH</PYTHONPATH>
-              </environmentVariables>
-              <skip>${skipTests}</skip>
-            </configuration>
-            <id>python-test</id>
-            <phase>test</phase>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-  <profiles>
-  </profiles>
-  <dependencies>
-    <dependency>
-      <groupId>commons-io</groupId>
-      <artifactId>commons-io</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.google.inject.extensions</groupId>
-      <artifactId>guice-assistedinject</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.google.inject.extensions</groupId>
-      <artifactId>guice-persist</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.google.inject.extensions</groupId>
-      <artifactId>guice-servlet</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.derby</groupId>
-      <artifactId>derby</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.springframework.security</groupId>
-      <artifactId>spring-security-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.springframework.security</groupId>
-      <artifactId>spring-security-config</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.springframework.security</groupId>
-      <artifactId>spring-security-web</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.springframework.security</groupId>
-      <artifactId>spring-security-ldap</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.springframework.ldap</groupId>
-      <artifactId>spring-ldap-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.directory.server</groupId>
-      <artifactId>apacheds-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.directory.server</groupId>
-      <artifactId>apacheds-protocol-ldap</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.directory.shared</groupId>
-      <artifactId>shared-ldap</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-log4j12</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.persistence</groupId>
-      <artifactId>eclipselink</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-security</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-servlet</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-webapp</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jetty</groupId>
-      <artifactId>jetty-server</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>commons-logging</groupId>
-      <artifactId>commons-logging</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>commons-codec</groupId>
-      <artifactId>commons-codec</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>commons-lang</groupId>
-      <artifactId>commons-lang</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>commons-httpclient</groupId>
-      <artifactId>commons-httpclient</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>javax.servlet</groupId>
-      <artifactId>servlet-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey</groupId>
-      <artifactId>jersey-json</artifactId>
-      <exclusions>
-        <exclusion>
-          <groupId>org.codehaus.jackson</groupId>
-          <artifactId>jackson-xc</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.codehaus.jettison</groupId>
-          <artifactId>jettison</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.codehaus.jackson</groupId>
-          <artifactId>jackson-mapper-asl</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey</groupId>
-      <artifactId>jersey-server</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey</groupId>
-      <artifactId>jersey-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey.contribs</groupId>
-      <artifactId>jersey-multipart</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey.contribs</groupId>
-      <artifactId>jersey-guice</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.codehaus.jackson</groupId>
-      <artifactId>jackson-mapper-asl</artifactId>
-      <version>1.9.2</version>
-    </dependency>
-    <dependency>
-      <groupId>org.codehaus.jackson</groupId>
-      <artifactId>jackson-core-asl</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.codehaus.jackson</groupId>
-      <artifactId>jackson-jaxrs</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.codehaus.jackson</groupId>
-      <artifactId>jackson-xc</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey.jersey-test-framework</groupId>
-      <artifactId>jersey-test-framework-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey.jersey-test-framework</groupId>
-      <artifactId>jersey-test-framework-grizzly2</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.codehaus.jettison</groupId>
-      <artifactId>jettison</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.easymock</groupId>
-      <artifactId>easymock</artifactId>
-      <version>3.1</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.objenesis</groupId>
-      <artifactId>objenesis-tck</artifactId>
-      <version>1.2</version>
-    </dependency>
-    <dependency>
-      <groupId>cglib</groupId>
-      <artifactId>cglib</artifactId>
-      <version>2.2.2</version>
-    </dependency>
-    <dependency>
-      <groupId>asm</groupId>
-      <artifactId>asm</artifactId>
-      <version>3.3.1</version>
-    </dependency>
-    <dependency>
-      <groupId>org.xerial</groupId>
-      <artifactId>sqlite-jdbc</artifactId>
-      <version>3.7.2</version>
-    </dependency>
-    <dependency>
-      <groupId>com.google.inject</groupId>
-      <artifactId>guice</artifactId>
-      <version>3.0</version>
-    </dependency>
-    <dependency>
-      <groupId>com.google.code.gson</groupId>
-      <artifactId>gson</artifactId>
-      <version>2.2.2</version>
-    </dependency>
-    <dependency>
-      <groupId>postgresql</groupId>
-      <artifactId>postgresql</artifactId>
-      <version>9.1-901.jdbc4</version>
-    </dependency>
-  </dependencies>
-  <!--<reporting>
-        <plugins>
-            <plugin>
-                <groupId>org.codehaus.mojo</groupId>
-                <artifactId>findbugs-maven-plugin</artifactId>
-                <version>2.5.2</version>
-            </plugin>
-        </plugins>
-    </reporting>-->
-</project>
diff --git a/branch-1.2/ambari-server/sbin/ambari-server b/branch-1.2/ambari-server/sbin/ambari-server
deleted file mode 100644
index 795f638..0000000
--- a/branch-1.2/ambari-server/sbin/ambari-server
+++ /dev/null
@@ -1,100 +0,0 @@
-#!/usr/bin/env bash
-# description: ambari-server daemon
-# processname: ambari-server
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# /etc/init.d/ambari-server
-
-export PATH=/usr/lib/ambari-server/*:$PATH
-export AMBARI_CONF_DIR=/etc/ambari-server/conf:$PATH
-
-if [ -a /usr/bin/python2.6 ]; then
-  PYTHON=/usr/bin/python2.6
-fi
-
-if [ -a /var/lib/ambari-server/ambari-env.sh ]; then
-  . /var/lib/ambari-server/ambari-env.sh
-fi
-
-if [ "x$PYTHON" == "x" ]; then
-  PYTHON=/usr/bin/python
-fi
-
-if [ "x$AMBARI_PASSPHRASE" == "x" ]; then
-  AMBARI_PASSPHRASE="DEV"
-fi
-
-if [ "x$JAVA_HOME" != "x" ]; then
-  export JAVA_HOME=$JAVA_HOME
-fi
-
-export AMBARI_PASSPHRASE=$AMBARI_PASSPHRASE
-
-# check for version
-majversion=`$PYTHON -V 2>&1 | awk '{print $2}' | cut -d'.' -f1`
-minversion=`$PYTHON -V 2>&1 | awk '{print $2}' | cut -d'.' -f2`
-numversion=$(( 10 * $majversion + $minversion))
-if (( $numversion < 26 )); then
-  echo "Need python version > 2.6"
-  exit 1
-fi
-echo "Using python " $PYTHON
-
-case "$1" in
-  start)
-        echo -e "Starting ambari-server"
-        $PYTHON /usr/sbin/ambari-server.py $@
-        ;;
-  stop)
-        echo -e "Stopping ambari-server"
-        $PYTHON /usr/sbin/ambari-server.py $@
-        ;;
-  reset)
-        echo -e "Resetting ambari-server"
-        $PYTHON /usr/sbin/ambari-server.py $@
-        ;;
-  restart)
-        echo -e "Restarting ambari-server"
-        $0 stop
-        $0 start
-        ;;
-  upgrade)
-        echo -e "Upgrading ambari-server"
-        $PYTHON /usr/sbin/ambari-server.py $@
-        ;;
-  upgradestack)
-        echo -e "Upgrading stack of ambari-server"
-        $PYTHON /usr/sbin/ambari-server.py $@
-        ;;
-  setup)
-        echo -e "Run postgresql initdb"
-        initdb_res=`/sbin/service postgresql initdb`
-        if [ "0" == "$?" ]; then
-          echo -e "${initdb_res}"
-        fi
-        echo -e "Run postgresql start"
-        /sbin/service postgresql start
-        echo -e "Setup ambari-server"
-        $PYTHON /usr/sbin/ambari-server.py $@
-        ;;
-  *)
-        echo "Usage: /usr/sbin/ambari-server {start|stop|restart|setup|upgrade|upgradestack} [options]"
-        exit 1
-esac
-
-exit 0
diff --git a/branch-1.2/ambari-server/src/examples/spec.json b/branch-1.2/ambari-server/src/examples/spec.json
deleted file mode 100644
index 7863749..0000000
--- a/branch-1.2/ambari-server/src/examples/spec.json
+++ /dev/null
@@ -1,60 +0,0 @@
-{
-  "stack":{
-    "comment":"Stack Definition defines where the artifacts need to be fetched from.""name":"hdp-1.0.1",
-    "services":{
-      "hadoop":{
-        "version":1.0
-      }
-      "oozie":{
-        "version":1.1
-      }
-    }
-    "repository":{
-      "comment":"Could be winpkg/tarball/others""type":yum,
-      "info":{
-        "key_1":"value_1",
-        "url":"url"
-      }
-    }
-  },
-  "hosts":[
-    "comment":"This can also be thought of as representation of hosts eg. small/medium/large""host_1",
-    "host_2"
-  ],
-  "configuration":{
-    "hadoop-env":{
-      "HADOOP_CONF_DIR":"/etc/hadoop",
-      "HADOOP_NAMENODE_OPTS":"-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT",
-      "HADOOP_CLIENT_OPTS":"-Xmx128m"
-    },
-    "core-site":{
-      "fs.default.name":"hdfs://${namenode}:8020/",
-      "hadoop.tmp.dir":"/grid/0/hadoop/tmp",
-      "!hadoop.security.authentication":"kerberos",
-      
-    }
-  },
-  "services":{
-    "comment":"Spec for which services need to be configured for a cluster and also service config properties",
-    "hdfs":{
-      "enabled":true,
-      "user":"hdfs"
-    }
-    "mapred":{
-      "user":"mapred",
-      "enabled":false
-    }
-  },
-  "roles":{
-    "comment_1":"Configuration knobs that are host specific",
-    "comment_2":"Also the host role mapping here can just be mapping to ec2/azure instance of small/large",
-    "namenode":{
-      "host":"host_1",
-      "configuration":{
-        "hdfs-site":{
-          "dfs.https.enable":"true"
-        }
-      }
-    }
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/main/assemblies/empty.xml b/branch-1.2/ambari-server/src/main/assemblies/empty.xml
deleted file mode 100644
index 35738b1..0000000
--- a/branch-1.2/ambari-server/src/main/assemblies/empty.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-  
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<assembly>
-    <id>empty</id>
-    <formats/>
-</assembly>
diff --git a/branch-1.2/ambari-server/src/main/assemblies/server.xml b/branch-1.2/ambari-server/src/main/assemblies/server.xml
deleted file mode 100644
index 83800e4..0000000
--- a/branch-1.2/ambari-server/src/main/assemblies/server.xml
+++ /dev/null
@@ -1,107 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-  
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<assembly>
-  <id>dist</id>
-  <formats>
-    <format>dir</format>
-    <format>tar.gz</format>
-  </formats>
-  <includeBaseDirectory>false</includeBaseDirectory>
-  <files>
-    <file>
-      <source>${project.build.directory}/${artifact.artifactId}-${artifact.version}.jar</source>
-      <outputDirectory>ambari-server-${project.version}/lib/ambari-server</outputDirectory>
-    </file>
-   <file>
-      <source>${basedir}/src/main/python/ambari-server.py</source>
-      <outputDirectory>/ambari-server-${project.version}/sbin</outputDirectory>
-    </file>
-    <file>
-      <source>${basedir}/src/main/python/bootstrap.py</source>
-      <outputDirectory>/ambari-server-${project.version}/sbin</outputDirectory>
-    </file>
-    <file>
-      <source>${basedir}/src/main/python/setupAgent.py</source>
-      <outputDirectory>/ambari-server-${project.version}/sbin</outputDirectory>
-    </file>
-  </files>
-  <fileSets>
-    <!-- Distro files, readme, licenses, etc -->
-    <fileSet>
-      <directory>${basedir}/../</directory>
-      <outputDirectory>ambari-server-${project.version}/</outputDirectory>
-      <includes>
-        <include>*.txt</include>
-      </includes>
-    </fileSet>
-     <!--
-    <fileSet>
-      <directory>${project.build.directory}/web/</directory>
-      <outputDirectory>ambari-server-${project.version}/web/</outputDirectory>
-      <includes>
-        <include>*</include>
-      </includes>
-    </fileSet>
-    -->
-    <!--
-    <fileSet>
-      <directory>${basedir}/src/main/bin</directory>
-      <outputDirectory>ambari-server-${project.version}/bin</outputDirectory>
-      <includes>
-        <include>*</include>
-      </includes>
-      <fileMode>0755</fileMode>
-    </fileSet>
-    -->
-    <fileSet>
-      <directory>${basedir}/src/main/resources/</directory>
-      <outputDirectory>/ambari-server-${project.version}/keystore</outputDirectory>
-      <includes>
-        <include>db/*</include>
-        <include>ca.config</include>
-        <include>pass.txt</include>
-      </includes>
-    </fileSet>
-    <fileSet>
-      <directory>${basedir}/../ambari-web/public</directory>
-      <outputDirectory>ambari-server-${project.version}/web</outputDirectory>
-      <includes>
-        <include>**</include>
-      </includes>
-    </fileSet>
-    <fileSet>
-      <directory>src/main/conf</directory>
-      <outputDirectory>/ambari-server-${project.version}/etc/ambari-server/conf</outputDirectory>
-    </fileSet>
-    <fileSet>
-      <directory>src/main/resources</directory>
-      <outputDirectory>/ambari-server-${project.version}/var/lib/ambari-server/resources/</outputDirectory>
-      <includes>
-        <include>stacks/**</include>
-      </includes>
-    </fileSet>
-  </fileSets>
-  <dependencySets>
-    <dependencySet>
-      <outputDirectory>ambari-server-${project.version}/lib/ambari-server</outputDirectory>
-      <unpack>false</unpack>
-      <scope>compile</scope>
-    </dependencySet>
-  </dependencySets>
-</assembly>
diff --git a/branch-1.2/ambari-server/src/main/conf/ambari.properties b/branch-1.2/ambari-server/src/main/conf/ambari.properties
deleted file mode 100644
index 14b867b..0000000
--- a/branch-1.2/ambari-server/src/main/conf/ambari.properties
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2011 The Apache Software Foundation
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-metadata.path=src/main/resources/stacks
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/main/conf/log4j.properties b/branch-1.2/ambari-server/src/main/conf/log4j.properties
deleted file mode 100644
index bd6c8cd..0000000
--- a/branch-1.2/ambari-server/src/main/conf/log4j.properties
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2011 The Apache Software Foundation
-# 
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-ambari.root.logger=INFO,DRFA
-ambari.log.dir=/var/log/ambari-server
-ambari.log.file=ambari.log
-
-
-# Define the root logger to the system property "ambari.root.logger".
-log4j.rootLogger=${ambari.root.logger}
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${ambari.log.dir}/${ambari.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/db/DBConnector.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/db/DBConnector.java
deleted file mode 100644
index 6b648e4..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/db/DBConnector.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.eventdb.db;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.ambari.eventdb.model.DataTable;
-import org.apache.ambari.eventdb.model.Jobs.JobDBEntry;
-import org.apache.ambari.eventdb.model.TaskAttempt;
-import org.apache.ambari.eventdb.model.WorkflowContext;
-import org.apache.ambari.eventdb.model.Workflows;
-import org.apache.ambari.eventdb.model.Workflows.WorkflowDBEntry.WorkflowFields;
-
-public interface DBConnector {
-  public void submitJob(JobDBEntry j, WorkflowContext context) throws IOException;
-  
-  public void updateJob(JobDBEntry j) throws IOException;
-  
-  public Workflows fetchWorkflows() throws IOException;
-  
-  public Workflows fetchWorkflows(WorkflowFields field, boolean sortAscending, int offset, int limit) throws IOException;
-  
-  public DataTable fetchWorkflows(int offset, int limit, String searchTerm, int echo, WorkflowFields field, boolean sortAscending, String searchWorkflowId,
-      String searchWorkflowName, String searchWorkflowType, String searchUserName, int minJobs, int maxJobs, long minInputBytes, long maxInputBytes,
-      long minOutputBytes, long maxOutputBytes, long minDuration, long maxDuration, long minStartTime, long maxStartTime) throws IOException;
-  
-  public List<JobDBEntry> fetchJobDetails(String workflowID) throws IOException;
-  
-  public long[] fetchJobStartStopTimes(String jobID) throws IOException;
-  
-  public List<TaskAttempt> fetchTaskAttempts(String jobID, String taskType) throws IOException;
-  
-  public void close();
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/db/PostgresConnector.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/db/PostgresConnector.java
deleted file mode 100644
index ece26c5..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/db/PostgresConnector.java
+++ /dev/null
@@ -1,502 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.eventdb.db;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.EnumMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.ambari.eventdb.model.DataTable;
-import org.apache.ambari.eventdb.model.DataTable.AvgData;
-import org.apache.ambari.eventdb.model.DataTable.Summary;
-import org.apache.ambari.eventdb.model.DataTable.Summary.SummaryFields;
-import org.apache.ambari.eventdb.model.DataTable.Times;
-import org.apache.ambari.eventdb.model.Jobs.JobDBEntry;
-import org.apache.ambari.eventdb.model.Jobs.JobDBEntry.JobFields;
-import org.apache.ambari.eventdb.model.TaskAttempt;
-import org.apache.ambari.eventdb.model.TaskAttempt.TaskAttemptFields;
-import org.apache.ambari.eventdb.model.WorkflowContext;
-import org.apache.ambari.eventdb.model.Workflows;
-import org.apache.ambari.eventdb.model.Workflows.WorkflowDBEntry;
-import org.apache.ambari.eventdb.model.Workflows.WorkflowDBEntry.WorkflowFields;
-import org.apache.commons.lang.NotImplementedException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.codehaus.jackson.JsonParseException;
-import org.codehaus.jackson.map.JsonMappingException;
-import org.codehaus.jackson.map.ObjectMapper;
-
-public class PostgresConnector implements DBConnector {
-  private static Log LOG = LogFactory.getLog(PostgresConnector.class);
-  private static final String WORKFLOW_TABLE_NAME = "workflow";
-  private static final String JOB_TABLE_NAME = "job";
-  private static final String TASK_ATTEMPT_TABLE_NAME = "taskattempt";
-  public static final String SORT_ASC = "ASC";
-  public static final String SORT_DESC = "DESC";
-  
-  private static final ObjectMapper jsonMapper = new ObjectMapper();
-  
-  private Connection db;
-  
-  public static enum Statements {
-    SJ_INSERT_JOB_PS(""),
-    SJ_CHECK_WORKFLOW_PS(""),
-    SJ_INSERT_WORKFLOW_PS(""),
-    UJ_UPDATE_JOB_PS(""),
-    UJ_UPDATE_WORKFLOW_PS(""),
-    FW_PS("SELECT " + WorkflowDBEntry.WORKFLOW_FIELDS + " FROM " + WORKFLOW_TABLE_NAME),
-    FW_COUNT_PS("SELECT count(*) as " + SummaryFields.numRows + " FROM " + WORKFLOW_TABLE_NAME),
-    FW_SUMMARY_PS("SELECT count(*) as " + SummaryFields.numRows + ", "
-        + getAvg(WorkflowFields.NUMJOBSTOTAL, SummaryFields.avgJobs, SummaryFields.minJobs, SummaryFields.maxJobs) + ", "
-        + getAvg(WorkflowFields.INPUTBYTES, SummaryFields.avgInput, SummaryFields.minInput, SummaryFields.maxInput) + ", "
-        + getAvg(WorkflowFields.OUTPUTBYTES, SummaryFields.avgOutput, SummaryFields.minOutput, SummaryFields.maxOutput) + ", "
-        + getAvg(WorkflowFields.DURATION, SummaryFields.avgDuration, SummaryFields.minDuration, SummaryFields.maxDuration) + ", min("
-        + WorkflowFields.STARTTIME + ") as " + SummaryFields.youngest + ", max(" + WorkflowFields.STARTTIME + ") as " + SummaryFields.oldest + " FROM "
-        + WORKFLOW_TABLE_NAME),
-    FJD_PS("SELECT " + JobDBEntry.JOB_FIELDS + " FROM " + JOB_TABLE_NAME + " WHERE " + JobFields.WORKFLOWID.toString() + " = ?"),
-    FJSS_PS("SELECT " + JobFields.SUBMITTIME + ", " + JobFields.FINISHTIME + " FROM " + JOB_TABLE_NAME + " WHERE " + JobFields.JOBID + " = ?"),
-    FTA_PS("SELECT " + TaskAttempt.TASK_ATTEMPT_FIELDS + " FROM " + TASK_ATTEMPT_TABLE_NAME + " WHERE " + TaskAttemptFields.JOBID + " = ? AND "
-        + TaskAttemptFields.TASKTYPE + " = ? ORDER BY " + TaskAttemptFields.STARTTIME);
-    
-    private String statementString;
-    
-    Statements(String statementString) {
-      this.statementString = statementString;
-    }
-    
-    public String getStatementString() {
-      return statementString;
-    }
-    
-    private static String getAvg(WorkflowFields field, SummaryFields avg, SummaryFields min, SummaryFields max) {
-      return "avg(" + field + ") as " + avg + ", min(" + field + ") as " + min + ", max(" + field + ") as " + max;
-    }
-  }
-  
-  private Map<Statements,PreparedStatement> preparedStatements = new EnumMap<Statements,PreparedStatement>(Statements.class);
-  
-  public PostgresConnector(String hostname, String dbname, String username, String password) throws IOException {
-    String url = "jdbc:postgresql://" + hostname + "/" + dbname;
-    try {
-      Class.forName("org.postgresql.Driver");
-      db = DriverManager.getConnection(url, username, password);
-    } catch (ClassNotFoundException e) {
-      db = null;
-      throw new IOException(e);
-    } catch (SQLException e) {
-      db = null;
-      throw new IOException(e);
-    }
-  }
-  
-  @Override
-  public void submitJob(JobDBEntry j, WorkflowContext context) throws IOException {
-    // PreparedStatement insertJobPS = getPS(Statements.SJ_INSERT_JOB_PS);
-    // PreparedStatement checkWorkflowPS = getPS(Statements.SJ_CHECK_WORKFLOW_PS);
-    // PreparedStatement insertWorkflowPS = getPS(Statements.SJ_INSERT_WORKFLOW_PS);
-    throw new NotImplementedException();
-  }
-  
-  @Override
-  public void updateJob(JobDBEntry j) throws IOException {
-    // PreparedStatement updateJobPS = getPS(Statements.UJ_UPDATE_JOB_PS);
-    // PreparedStatement updateWorkflowPS = getPS(Statements.UJ_UPDATE_WORKFLOW_PS);
-    throw new NotImplementedException();
-  }
-  
-  @Override
-  public Workflows fetchWorkflows() throws IOException {
-    Workflows workflows = new Workflows();
-    workflows.setWorkflows(fetchWorkflows(getPS(Statements.FW_PS)));
-    workflows.setSummary(fetchSummary(getPS(Statements.FW_SUMMARY_PS)));
-    return workflows;
-  }
-  
-  @Override
-  public Workflows fetchWorkflows(WorkflowFields field, boolean sortAscending, int offset, int limit) throws IOException {
-    if (offset < 0)
-      offset = 0;
-    Workflows workflows = new Workflows();
-    workflows.setWorkflows(fetchWorkflows(getQualifiedPS(Statements.FW_PS, "", field, sortAscending, offset, limit)));
-    workflows.setSummary(fetchSummary(getPS(Statements.FW_SUMMARY_PS)));
-    return workflows;
-  }
-  
-  private List<WorkflowDBEntry> fetchWorkflows(PreparedStatement ps) throws IOException {
-    List<WorkflowDBEntry> workflows = new ArrayList<WorkflowDBEntry>();
-    ResultSet rs = null;
-    try {
-      rs = ps.executeQuery();
-      while (rs.next()) {
-        workflows.add(getWorkflowDBEntry(rs));
-      }
-    } catch (SQLException e) {
-      throw new IOException(e);
-    } finally {
-      try {
-        if (rs != null)
-          rs.close();
-      } catch (SQLException e) {
-        LOG.error("Exception while closing ResultSet", e);
-      }
-    }
-    return workflows;
-  }
-  
-  private Summary fetchSummary(PreparedStatement ps) throws IOException {
-    Summary summary = new Summary();
-    ResultSet rs = null;
-    try {
-      rs = ps.executeQuery();
-      if (rs.next()) {
-        summary.setNumRows(SummaryFields.numRows.getInt(rs));
-        summary.setJobs(getAvgData(rs, SummaryFields.avgJobs, SummaryFields.minJobs, SummaryFields.maxJobs));
-        summary.setInput(getAvgData(rs, SummaryFields.avgInput, SummaryFields.minInput, SummaryFields.maxInput));
-        summary.setOutput(getAvgData(rs, SummaryFields.avgOutput, SummaryFields.minOutput, SummaryFields.maxOutput));
-        summary.setDuration(getAvgData(rs, SummaryFields.avgDuration, SummaryFields.minDuration, SummaryFields.maxDuration));
-        Times times = new Times();
-        times.setYoungest(SummaryFields.youngest.getLong(rs));
-        times.setOldest(SummaryFields.oldest.getLong(rs));
-        summary.setTimes(times);
-      }
-    } catch (SQLException e) {
-      throw new IOException(e);
-    } finally {
-      try {
-        if (rs != null)
-          rs.close();
-      } catch (SQLException e) {
-        LOG.error("Exception while closing ResultSet", e);
-      }
-    }
-    return summary;
-  }
-  
-  private static WorkflowDBEntry getWorkflowDBEntry(ResultSet rs) throws SQLException, JsonParseException, JsonMappingException, IOException {
-    WorkflowDBEntry w = new WorkflowDBEntry();
-    w.setWorkflowId(WorkflowFields.WORKFLOWID.getString(rs));
-    w.setWorkflowName(WorkflowFields.WORKFLOWNAME.getString(rs));
-    w.setUserName(WorkflowFields.USERNAME.getString(rs));
-    w.setStartTime(WorkflowFields.STARTTIME.getLong(rs));
-    w.setElapsedTime(WorkflowFields.DURATION.getLong(rs));
-    w.setNumJobsTotal(WorkflowFields.NUMJOBSTOTAL.getInt(rs));
-    w.setInputBytes(WorkflowFields.INPUTBYTES.getLong(rs));
-    w.setOutputBytes(WorkflowFields.OUTPUTBYTES.getLong(rs));
-    w.setNumJobsCompleted(WorkflowFields.NUMJOBSCOMPLETED.getInt(rs));
-    w.setWorkflowContext(jsonMapper.readValue(WorkflowFields.WORKFLOWCONTEXT.getString(rs), WorkflowContext.class));
-    return w;
-  }
-  
-  private static AvgData getAvgData(ResultSet rs, SummaryFields avg, SummaryFields min, SummaryFields max) throws SQLException {
-    AvgData avgData = new AvgData();
-    avgData.setAvg(avg.getDouble(rs));
-    avgData.setMin(min.getLong(rs));
-    avgData.setMax(max.getLong(rs));
-    return avgData;
-  }
-  
-  @Override
-  public DataTable fetchWorkflows(int offset, int limit, String searchTerm, int echo, WorkflowFields col, boolean sortAscending, String searchWorkflowId,
-      String searchWorkflowName, String searchWorkflowType, String searchUserName, int minJobs, int maxJobs, long minInputBytes, long maxInputBytes,
-      long minOutputBytes, long maxOutputBytes, long minDuration, long maxDuration, long minStartTime, long maxStartTime) throws IOException {
-    int total = 0;
-    PreparedStatement ps = getPS(Statements.FW_COUNT_PS);
-    ResultSet rs = null;
-    try {
-      rs = ps.executeQuery();
-      if (rs.next())
-        total = SummaryFields.numRows.getInt(rs);
-    } catch (SQLException e) {
-      throw new IOException(e);
-    } finally {
-      try {
-        if (rs != null)
-          rs.close();
-      } catch (SQLException e) {
-        LOG.error("Exception while closing ResultSet", e);
-      }
-    }
-    
-    String searchClause = buildSearchClause(searchTerm, searchWorkflowId, searchWorkflowName, searchWorkflowType, searchUserName, minJobs, maxJobs,
-        minInputBytes, maxInputBytes, minOutputBytes, maxOutputBytes, minDuration, maxDuration, minStartTime, maxStartTime);
-    List<WorkflowDBEntry> workflows = fetchWorkflows(getQualifiedPS(Statements.FW_PS, searchClause, col, sortAscending, offset, limit));
-    Summary summary = fetchSummary(getQualifiedPS(Statements.FW_SUMMARY_PS, searchClause));
-    DataTable table = new DataTable();
-    table.setiTotalRecords(total);
-    table.setiTotalDisplayRecords(summary.getNumRows());
-    if (workflows.isEmpty()) {
-      table.setStartIndex(-1);
-      table.setEndIndex(-1);
-    } else {
-      table.setStartIndex(offset);
-      table.setEndIndex(offset + workflows.size() - 1);
-    }
-    table.setAaData(workflows);
-    table.setsEcho(echo);
-    table.setSummary(summary);
-    return table;
-  }
-  
-  @Override
-  public List<JobDBEntry> fetchJobDetails(String workflowId) throws IOException {
-    PreparedStatement ps = getPS(Statements.FJD_PS);
-    List<JobDBEntry> jobs = new ArrayList<JobDBEntry>();
-    ResultSet rs = null;
-    try {
-      ps.setString(1, workflowId);
-      rs = ps.executeQuery();
-      while (rs.next()) {
-        JobDBEntry j = new JobDBEntry();
-        j.setConfPath(JobFields.CONFPATH.getString(rs));
-        j.setSubmitTime(JobFields.SUBMITTIME.getLong(rs));
-        long finishTime = JobFields.FINISHTIME.getLong(rs);
-        if (finishTime > j.getSubmitTime())
-          j.setElapsedTime(finishTime - j.getSubmitTime());
-        else
-          j.setElapsedTime(0);
-        j.setInputBytes(JobFields.INPUTBYTES.getLong(rs));
-        j.setJobId(JobFields.JOBID.getString(rs));
-        j.setJobName(JobFields.JOBNAME.getString(rs));
-        j.setMaps(JobFields.MAPS.getInt(rs));
-        j.setOutputBytes(JobFields.OUTPUTBYTES.getLong(rs));
-        j.setReduces(JobFields.REDUCES.getInt(rs));
-        j.setStatus(JobFields.STATUS.getString(rs));
-        j.setUserName(JobFields.USERNAME.getString(rs));
-        j.setWorkflowEntityName(JobFields.WORKFLOWENTITYNAME.getString(rs));
-        j.setWorkflowId(JobFields.WORKFLOWID.getString(rs));
-        jobs.add(j);
-      }
-      rs.close();
-    } catch (SQLException e) {
-      throw new IOException(e);
-    } finally {
-      if (rs != null)
-        try {
-          rs.close();
-        } catch (SQLException e) {
-          LOG.error("Exception while closing ResultSet", e);
-        }
-      
-    }
-    return jobs;
-  }
-  
-  @Override
-  public long[] fetchJobStartStopTimes(String jobID) throws IOException {
-    PreparedStatement ps = getPS(Statements.FJSS_PS);
-    long[] times = new long[2];
-    ResultSet rs = null;
-    try {
-      ps.setString(1, jobID);
-      rs = ps.executeQuery();
-      if (!rs.next())
-        return null;
-      times[0] = JobFields.SUBMITTIME.getLong(rs);
-      times[1] = JobFields.FINISHTIME.getLong(rs);
-      rs.close();
-    } catch (SQLException e) {
-      throw new IOException(e);
-    } finally {
-      if (rs != null)
-        try {
-          rs.close();
-        } catch (SQLException e) {
-          LOG.error("Exception while closing ResultSet", e);
-        }
-    }
-    if (times[1] == 0)
-      times[1] = System.currentTimeMillis();
-    if (times[1] < times[0])
-      times[1] = times[0];
-    return times;
-  }
-  
-  @Override
-  public List<TaskAttempt> fetchTaskAttempts(String jobID, String taskType) throws IOException {
-    PreparedStatement ps = getPS(Statements.FTA_PS);
-    List<TaskAttempt> taskAttempts = new ArrayList<TaskAttempt>();
-    ResultSet rs = null;
-    try {
-      ps.setString(1, jobID);
-      ps.setString(2, taskType);
-      rs = ps.executeQuery();
-      while (rs.next()) {
-        TaskAttempt t = new TaskAttempt();
-        t.setFinishTime(TaskAttemptFields.FINISHTIME.getLong(rs));
-        t.setInputBytes(TaskAttemptFields.INPUTBYTES.getLong(rs));
-        t.setLocality(TaskAttemptFields.LOCALITY.getString(rs));
-        t.setMapFinishTime(TaskAttemptFields.MAPFINISHTIME.getLong(rs));
-        t.setOutputBytes(TaskAttemptFields.OUTPUTBYTES.getLong(rs));
-        t.setShuffleFinishTime(TaskAttemptFields.SHUFFLEFINISHTIME.getLong(rs));
-        t.setSortFinishTime(TaskAttemptFields.SORTFINISHTIME.getLong(rs));
-        t.setStartTime(TaskAttemptFields.STARTTIME.getLong(rs));
-        t.setStatus(TaskAttemptFields.STATUS.getString(rs));
-        t.setTaskAttemptId(TaskAttemptFields.TASKATTEMPTID.getString(rs));
-        t.setTaskType(TaskAttemptFields.TASKTYPE.getString(rs));
-        taskAttempts.add(t);
-      }
-      rs.close();
-    } catch (SQLException e) {
-      throw new IOException(e);
-    } finally {
-      if (rs != null)
-        try {
-          rs.close();
-        } catch (SQLException e) {
-          LOG.error("Exception while closing ResultSet", e);
-        }
-    }
-    return taskAttempts;
-  }
-  
-  private PreparedStatement getPS(Statements statement) throws IOException {
-    if (db == null)
-      throw new IOException("postgres db not initialized");
-    
-    synchronized (preparedStatements) {
-      if (!preparedStatements.containsKey(statement)) {
-        try {
-          preparedStatements.put(statement, db.prepareStatement(statement.getStatementString()));
-        } catch (SQLException e) {
-          throw new IOException(e);
-        }
-      }
-    }
-    
-    return preparedStatements.get(statement);
-  }
-  
-  private PreparedStatement getQualifiedPS(Statements statement, String searchClause) throws IOException {
-    if (db == null)
-      throw new IOException("postgres db not initialized");
-    try {
-      // LOG.debug("preparing " + statement.getStatementString() + searchClause);
-      return db.prepareStatement(statement.getStatementString() + searchClause);
-    } catch (SQLException e) {
-      throw new IOException(e);
-    }
-  }
-  
-  private PreparedStatement getQualifiedPS(Statements statement, String searchClause, WorkflowFields field, boolean sortAscending, int offset, int limit)
-      throws IOException {
-    if (db == null)
-      throw new IOException("postgres db not initialized");
-    String limitClause = " ORDER BY " + field.toString() + " " + (sortAscending ? SORT_ASC : SORT_DESC) + " OFFSET " + offset
-        + (limit >= 0 ? " LIMIT " + limit : "");
-    return getQualifiedPS(statement, searchClause + limitClause);
-  }
-  
-  private static void addRangeSearch(StringBuilder sb, WorkflowFields field, int min, int max) {
-    if (min >= 0)
-      append(sb, greaterThan(field, Integer.toString(min)));
-    if (max >= 0)
-      append(sb, lessThan(field, Integer.toString(max)));
-  }
-  
-  private static void addRangeSearch(StringBuilder sb, WorkflowFields field, long min, long max) {
-    if (min >= 0)
-      append(sb, greaterThan(field, Long.toString(min)));
-    if (max >= 0)
-      append(sb, lessThan(field, Long.toString(max)));
-  }
-  
-  private static void append(StringBuilder sb, String s) {
-    if (sb.length() > WHERE.length())
-      sb.append(" and ");
-    sb.append(s);
-  }
-  
-  private static String like(WorkflowFields field, String s) {
-    return field.toString() + " like '%" + s + "%'";
-  }
-  
-  private static String startsWith(WorkflowFields field, String s) {
-    return field.toString() + " like '" + s + "%'";
-  }
-  
-  private static String equals(WorkflowFields field, String s) {
-    return field.toString() + " = '" + s + "'";
-  }
-  
-  private static String lessThan(WorkflowFields field, String s) {
-    return field.toString() + " <= " + s;
-  }
-  
-  private static String greaterThan(WorkflowFields field, String s) {
-    return field.toString() + " >= " + s;
-  }
-  
-  private static final String WHERE = " where ";
-  
-  private static String buildSearchClause(String searchTerm, String searchWorkflowId, String searchWorkflowName, String searchWorkflowType,
-      String searchUserName, int minJobs, int maxJobs, long minInputBytes, long maxInputBytes, long minOutputBytes, long maxOutputBytes, long minDuration,
-      long maxDuration, long minStartTime, long maxStartTime) {
-    StringBuilder sb = new StringBuilder();
-    sb.append(WHERE);
-    if (searchTerm != null && searchTerm.length() > 0) {
-      sb.append("(");
-      sb.append(like(WorkflowFields.WORKFLOWID, searchTerm));
-      sb.append(" or ");
-      sb.append(like(WorkflowFields.WORKFLOWNAME, searchTerm));
-      sb.append(" or ");
-      sb.append(like(WorkflowFields.USERNAME, searchTerm));
-      sb.append(")");
-    }
-    if (searchWorkflowId != null)
-      append(sb, like(WorkflowFields.WORKFLOWID, searchWorkflowId));
-    if (searchWorkflowName != null)
-      append(sb, like(WorkflowFields.WORKFLOWNAME, searchWorkflowName));
-    if (searchWorkflowType != null)
-      append(sb, startsWith(WorkflowFields.WORKFLOWID, searchWorkflowType));
-    if (searchUserName != null)
-      append(sb, equals(WorkflowFields.USERNAME, searchUserName));
-    addRangeSearch(sb, WorkflowFields.NUMJOBSTOTAL, minJobs, maxJobs);
-    addRangeSearch(sb, WorkflowFields.INPUTBYTES, minInputBytes, maxInputBytes);
-    addRangeSearch(sb, WorkflowFields.OUTPUTBYTES, minOutputBytes, maxOutputBytes);
-    addRangeSearch(sb, WorkflowFields.DURATION, minDuration, maxDuration);
-    addRangeSearch(sb, WorkflowFields.STARTTIME, minStartTime, maxStartTime);
-    
-    if (sb.length() == WHERE.length())
-      return "";
-    else
-      return sb.toString();
-  }
-  
-  @Override
-  public void close() {
-    if (db != null) {
-      try {
-        db.close();
-      } catch (SQLException e) {
-        LOG.error("Exception while closing connector", e);
-      }
-      db = null;
-    }
-  }
-  
-  @Override
-  protected void finalize() throws Throwable {
-    close();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/DataTable.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/DataTable.java
deleted file mode 100644
index 4fcac8e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/DataTable.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.eventdb.model;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.apache.ambari.eventdb.model.Workflows.WorkflowDBEntry;
-
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-public class DataTable {
-  int sEcho;
-  int iTotalRecords;
-  int iTotalDisplayRecords;
-  int startIndex;
-  int endIndex;
-  List<WorkflowDBEntry> aaData;
-  Summary summary;
-  
-  @XmlRootElement
-  @XmlAccessorType(XmlAccessType.FIELD)
-  public static class Summary {
-    public static enum SummaryFields {
-      numRows,
-      avgJobs,
-      minJobs,
-      maxJobs,
-      avgInput,
-      minInput,
-      maxInput,
-      avgOutput,
-      minOutput,
-      maxOutput,
-      avgDuration,
-      minDuration,
-      maxDuration,
-      youngest,
-      oldest;
-      
-      public int getInt(ResultSet rs) throws SQLException {
-        return rs.getInt(this.toString());
-      }
-      
-      public long getLong(ResultSet rs) throws SQLException {
-        return rs.getLong(this.toString());
-      }
-      
-      public double getDouble(ResultSet rs) throws SQLException {
-        return rs.getDouble(this.toString());
-      }
-    }
-    
-    int numRows;
-    AvgData jobs;
-    AvgData input;
-    AvgData output;
-    AvgData duration;
-    Times times;
-    
-    public int getNumRows() {
-      return numRows;
-    }
-    
-    public void setNumRows(int numRows) {
-      this.numRows = numRows;
-    }
-    
-    public AvgData getJobs() {
-      return jobs;
-    }
-    
-    public void setJobs(AvgData jobs) {
-      this.jobs = jobs;
-    }
-    
-    public AvgData getInput() {
-      return input;
-    }
-    
-    public void setInput(AvgData input) {
-      this.input = input;
-    }
-    
-    public AvgData getOutput() {
-      return output;
-    }
-    
-    public void setOutput(AvgData output) {
-      this.output = output;
-    }
-    
-    public AvgData getDuration() {
-      return duration;
-    }
-    
-    public void setDuration(AvgData duration) {
-      this.duration = duration;
-    }
-    
-    public Times getTimes() {
-      return times;
-    }
-    
-    public void setTimes(Times times) {
-      this.times = times;
-    }
-  }
-  
-  @XmlRootElement
-  @XmlAccessorType(XmlAccessType.FIELD)
-  public static class AvgData {
-    double avg;
-    long min;
-    long max;
-    
-    public double getAvg() {
-      return avg;
-    }
-    
-    public void setAvg(double avg) {
-      this.avg = avg;
-    }
-    
-    public long getMin() {
-      return min;
-    }
-    
-    public void setMin(long min) {
-      this.min = min;
-    }
-    
-    public long getMax() {
-      return max;
-    }
-    
-    public void setMax(long max) {
-      this.max = max;
-    }
-  }
-  
-  @XmlRootElement
-  @XmlAccessorType(XmlAccessType.FIELD)
-  public static class Times {
-    long oldest;
-    long youngest;
-    
-    public long getOldest() {
-      return oldest;
-    }
-    
-    public void setOldest(long oldest) {
-      this.oldest = oldest;
-    }
-    
-    public long getYoungest() {
-      return youngest;
-    }
-    
-    public void setYoungest(long youngest) {
-      this.youngest = youngest;
-    }
-  }
-  
-  public DataTable() {}
-  
-  public int getsEcho() {
-    return sEcho;
-  }
-  
-  public void setsEcho(int sEcho) {
-    this.sEcho = sEcho;
-  }
-  
-  public int getiTotalRecords() {
-    return iTotalRecords;
-  }
-  
-  public void setiTotalRecords(int iTotalRecords) {
-    this.iTotalRecords = iTotalRecords;
-  }
-  
-  public int getiTotalDisplayRecords() {
-    return iTotalDisplayRecords;
-  }
-  
-  public void setiTotalDisplayRecords(int iTotalDisplayRecords) {
-    this.iTotalDisplayRecords = iTotalDisplayRecords;
-  }
-  
-  public int getStartIndex() {
-    return startIndex;
-  }
-  
-  public void setStartIndex(int startIndex) {
-    this.startIndex = startIndex;
-  }
-  
-  public int getEndIndex() {
-    return endIndex;
-  }
-  
-  public void setEndIndex(int endIndex) {
-    this.endIndex = endIndex;
-  }
-  
-  public List<WorkflowDBEntry> getAaData() {
-    return aaData;
-  }
-  
-  public void setAaData(List<WorkflowDBEntry> aaData) {
-    this.aaData = aaData;
-  }
-  
-  public Summary getSummary() {
-    return summary;
-  }
-  
-  public void setSummary(Summary summary) {
-    this.summary = summary;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/Jobs.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/Jobs.java
deleted file mode 100644
index 4f878b1..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/Jobs.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.eventdb.model;
-
-import org.apache.commons.lang.StringUtils;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.XmlTransient;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.List;
-
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-public class Jobs {
-  List<JobDBEntry> jobs;
-  
-  public static class JobDBEntry {
-    public static enum JobFields {
-      JOBID,
-      JOBNAME,
-      STATUS,
-      USERNAME,
-      SUBMITTIME,
-      FINISHTIME,
-      MAPS,
-      REDUCES,
-      INPUTBYTES,
-      OUTPUTBYTES,
-      CONFPATH,
-      WORKFLOWID,
-      WORKFLOWENTITYNAME;
-      
-      public String getString(ResultSet rs) throws SQLException {
-        return rs.getString(this.toString());
-      }
-      
-      public int getInt(ResultSet rs) throws SQLException {
-        return rs.getInt(this.toString());
-      }
-      
-      public long getLong(ResultSet rs) throws SQLException {
-        return rs.getLong(this.toString());
-      }
-      
-      public static String join() {
-        String[] tmp = new String[JobFields.values().length];
-        for (int i = 0; i < tmp.length; i++)
-          tmp[i] = JobFields.values()[i].toString();
-        return StringUtils.join(tmp, ",");
-      }
-    }
-    
-    @XmlTransient
-    public static final String JOB_FIELDS = JobFields.join();
-    
-    private String jobId;
-    private String jobName;
-    private String status;
-    private String userName;
-    private long submitTime;
-    private long elapsedTime;
-    private int maps;
-    private int reduces;
-    private long inputBytes;
-    private long outputBytes;
-    private String confPath;
-    private String workflowId;
-    private String workflowEntityName;
-    
-    public JobDBEntry() {
-      /* Required by JAXB. */
-    }
-    
-    public String getJobId() {
-      return jobId;
-    }
-    
-    public String getJobName() {
-      return jobName;
-    }
-    
-    public String getStatus() {
-      return status;
-    }
-    
-    public String getUserName() {
-      return userName;
-    }
-    
-    public long getSubmitTime() {
-      return submitTime;
-    }
-    
-    public long getElapsedTime() {
-      return elapsedTime;
-    }
-    
-    public int getMaps() {
-      return maps;
-    }
-    
-    public int getReduces() {
-      return reduces;
-    }
-    
-    public long getInputBytes() {
-      return inputBytes;
-    }
-    
-    public long getOutputBytes() {
-      return outputBytes;
-    }
-    
-    public String getConfPath() {
-      return confPath;
-    }
-    
-    public String getWorkflowId() {
-      return workflowId;
-    }
-    
-    public String getWorkflowEntityName() {
-      return workflowEntityName;
-    }
-    
-    public void setJobId(String jobId) {
-      this.jobId = jobId;
-    }
-    
-    public void setJobName(String jobName) {
-      this.jobName = jobName;
-    }
-    
-    public void setStatus(String status) {
-      this.status = status;
-    }
-    
-    public void setUserName(String userName) {
-      this.userName = userName;
-    }
-    
-    public void setSubmitTime(long submitTime) {
-      this.submitTime = submitTime;
-    }
-    
-    public void setElapsedTime(long elapsedTime) {
-      this.elapsedTime = elapsedTime;
-    }
-    
-    public void setMaps(int maps) {
-      this.maps = maps;
-    }
-    
-    public void setReduces(int reduces) {
-      this.reduces = reduces;
-    }
-    
-    public void setInputBytes(long inputBytes) {
-      this.inputBytes = inputBytes;
-    }
-    
-    public void setOutputBytes(long outputBytes) {
-      this.outputBytes = outputBytes;
-    }
-    
-    public void setConfPath(String confPath) {
-      this.confPath = confPath;
-    }
-    
-    public void setWorkflowId(String workflowId) {
-      this.workflowId = workflowId;
-    }
-    
-    public void setWorkflowEntityName(String workflowEntityName) {
-      this.workflowEntityName = workflowEntityName;
-    }
-  }
-  
-  public Jobs() {}
-  
-  public List<JobDBEntry> getJobs() {
-    return jobs;
-  }
-  
-  public void setJobs(List<JobDBEntry> jobs) {
-    this.jobs = jobs;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/TaskAttempt.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/TaskAttempt.java
deleted file mode 100644
index 523a8d9..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/TaskAttempt.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.eventdb.model;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-
-import org.apache.commons.lang.StringUtils;
-
-/**
- * 
- */
-public class TaskAttempt {
-  public static enum TaskAttemptFields {
-    JOBID,
-    TASKATTEMPTID,
-    TASKTYPE,
-    STARTTIME,
-    FINISHTIME,
-    MAPFINISHTIME,
-    SHUFFLEFINISHTIME,
-    SORTFINISHTIME,
-    INPUTBYTES,
-    OUTPUTBYTES,
-    STATUS,
-    LOCALITY;
-    
-    public String getString(ResultSet rs) throws SQLException {
-      return rs.getString(this.toString());
-    }
-    
-    public int getInt(ResultSet rs) throws SQLException {
-      return rs.getInt(this.toString());
-    }
-    
-    public long getLong(ResultSet rs) throws SQLException {
-      return rs.getLong(this.toString());
-    }
-    
-    public static String join() {
-      String[] tmp = new String[TaskAttemptFields.values().length];
-      for (int i = 0; i < tmp.length; i++)
-        tmp[i] = TaskAttemptFields.values()[i].toString();
-      return StringUtils.join(tmp, ",");
-    }
-  }
-  
-  public static final String TASK_ATTEMPT_FIELDS = TaskAttemptFields.join();
-  
-  private String taskAttemptId;
-  private String taskType;
-  private long startTime;
-  private long finishTime;
-  private long mapFinishTime;
-  private long shuffleFinishTime;
-  private long sortFinishTime;
-  private long inputBytes;
-  private long outputBytes;
-  private String status;
-  private String locality;
-  
-  public TaskAttempt() {}
-  
-  public String getTaskAttemptId() {
-    return taskAttemptId;
-  }
-  
-  public void setTaskAttemptId(String taskAttemptId) {
-    this.taskAttemptId = taskAttemptId;
-  }
-  
-  public String getTaskType() {
-    return taskType;
-  }
-  
-  public void setTaskType(String taskType) {
-    this.taskType = taskType;
-  }
-  
-  public long getStartTime() {
-    return startTime;
-  }
-  
-  public void setStartTime(long startTime) {
-    this.startTime = startTime;
-  }
-  
-  public long getFinishTime() {
-    return finishTime;
-  }
-  
-  public void setFinishTime(long finishTime) {
-    this.finishTime = finishTime;
-  }
-  
-  public long getMapFinishTime() {
-    return mapFinishTime;
-  }
-  
-  public void setMapFinishTime(long mapFinishTime) {
-    this.mapFinishTime = mapFinishTime;
-  }
-  
-  public long getShuffleFinishTime() {
-    return shuffleFinishTime;
-  }
-  
-  public void setShuffleFinishTime(long shuffleFinishTime) {
-    this.shuffleFinishTime = shuffleFinishTime;
-  }
-  
-  public long getSortFinishTime() {
-    return sortFinishTime;
-  }
-  
-  public void setSortFinishTime(long sortFinishTime) {
-    this.sortFinishTime = sortFinishTime;
-  }
-  
-  public long getInputBytes() {
-    return inputBytes;
-  }
-
-  public long getOutputBytes() {
-    return outputBytes;
-  }
-
-  public void setInputBytes(long inputBytes) {
-    this.inputBytes = inputBytes;
-  }
-
-  public void setOutputBytes(long outputBytes) {
-    this.outputBytes = outputBytes;
-  }
-
-  public String getStatus() {
-    return status;
-  }
-  
-  public void setStatus(String status) {
-    this.status = status;
-  }
-  
-  public String getLocality() {
-    return locality;
-  }
-  
-  public void setLocality(String locality) {
-    this.locality = locality;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/TaskData.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/TaskData.java
deleted file mode 100644
index 37007df..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/TaskData.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.eventdb.model;
-
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-public class TaskData {
-  private List<Point> map;
-  private List<Point> shuffle;
-  private List<Point> reduce;
-  
-  public static class Point {
-    private long x;
-    private int y;
-    
-    public Point() {}
-    
-    public Point(long x, int y) {
-      this.x = x;
-      this.y = y;
-    }
-    
-    public long getX() {
-      return x;
-    }
-    
-    public int getY() {
-      return y;
-    }
-    
-    public void setX(long x) {
-      this.x = x;
-    }
-    
-    public void setY(int y) {
-      this.y = y;
-    }
-  }
-  
-  public TaskData() {}
-  
-  public List<Point> getMapData() {
-    return map;
-  }
-  
-  public void setMapData(List<Point> mapData) {
-    this.map = mapData;
-  }
-  
-  public List<Point> getShuffleData() {
-    return shuffle;
-  }
-  
-  public void setShuffleData(List<Point> shuffleData) {
-    this.shuffle = shuffleData;
-  }
-  
-  public List<Point> getReduceData() {
-    return reduce;
-  }
-  
-  public void setReduceData(List<Point> reduceData) {
-    this.reduce = reduceData;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/TaskLocalityData.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/TaskLocalityData.java
deleted file mode 100644
index e50a1a0..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/TaskLocalityData.java
+++ /dev/null
@@ -1,156 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.eventdb.model;
-
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-public class TaskLocalityData {
-  private List<DataPoint> mapNodeLocal;
-  private List<DataPoint> mapRackLocal;
-  private List<DataPoint> mapOffSwitch;
-  private List<DataPoint> reduceOffSwitch;
-  private long submitTime;
-  private long finishTime;
-  
-  public static class DataPoint {
-    private long x;
-    private long y;
-    private long r;
-    private long io;
-    private String label;
-    private String status;
-    
-    public DataPoint() {}
-    
-    public DataPoint(long x) {
-      this(x, 0, 0, 0, null, null);
-    }
-    
-    public DataPoint(long x, long y, long r, long io, String taskAttemptId, String status) {
-      this.x = x;
-      this.y = y;
-      this.r = r;
-      this.io = io;
-      this.label = taskAttemptId;
-      this.status = status;
-    }
-    
-    public long getX() {
-      return x;
-    }
-    
-    public long getY() {
-      return y;
-    }
-    
-    public long getR() {
-      return r;
-    }
-    
-    public long getIO() {
-      return io;
-    }
-    
-    public String getLabel() {
-      return label;
-    }
-    
-    public String getStatus() {
-      return status;
-    }
-    
-    public void setX(long x) {
-      this.x = x;
-    }
-    
-    public void setY(long y) {
-      this.y = y;
-    }
-    
-    public void setR(long r) {
-      this.r = r;
-    }
-    
-    public void setIO(long io) {
-      this.io = io;
-    }
-    
-    public void setLabel(String label) {
-      this.label = label;
-    }
-    
-    public void setStatus(String status) {
-      this.status = status;
-    }
-  }
-  
-  public TaskLocalityData() {}
-  
-  public List<DataPoint> getMapNodeLocal() {
-    return mapNodeLocal;
-  }
-  
-  public void setMapNodeLocal(List<DataPoint> mapNodeLocal) {
-    this.mapNodeLocal = mapNodeLocal;
-  }
-  
-  public List<DataPoint> getMapRackLocal() {
-    return mapRackLocal;
-  }
-  
-  public void setMapRackLocal(List<DataPoint> mapRackLocal) {
-    this.mapRackLocal = mapRackLocal;
-  }
-  
-  public List<DataPoint> getMapOffSwitch() {
-    return mapOffSwitch;
-  }
-  
-  public void setMapOffSwitch(List<DataPoint> mapOffSwitch) {
-    this.mapOffSwitch = mapOffSwitch;
-  }
-  
-  public List<DataPoint> getReduceOffSwitch() {
-    return reduceOffSwitch;
-  }
-  
-  public void setReduceOffSwitch(List<DataPoint> reduceOffSwitch) {
-    this.reduceOffSwitch = reduceOffSwitch;
-  }
-  
-  public long getSubmitTime() {
-    return submitTime;
-  }
-  
-  public void setSubmitTime(long submitTime) {
-    this.submitTime = submitTime;
-  }
-  
-  public long getFinishTime() {
-    return finishTime;
-  }
-  
-  public void setFinishTime(long finishTime) {
-    this.finishTime = finishTime;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/WorkflowContext.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/WorkflowContext.java
deleted file mode 100644
index c2b0468..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/WorkflowContext.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.eventdb.model;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-
-
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-public class WorkflowContext {
-  
-  private String workflowId;
-  private String workflowName;
-  private String workflowEntityName;
-  
-  private WorkflowDag workflowDag;
-  
-  private WorkflowContext parentWorkflowContext;
-  
-  public WorkflowContext() {
-    /* Required by JAXB. */
-  }
-  
-  /* Getters. */
-  public String getWorkflowId() {
-    return this.workflowId;
-  }
-  
-  public String getWorkflowName() {
-    return this.workflowName;
-  }
-  
-  public String getWorkflowEntityName() {
-    return this.workflowEntityName;
-  }
-  
-  public WorkflowDag getWorkflowDag() {
-    return this.workflowDag;
-  }
-  
-  public WorkflowContext getParentWorkflowContext() {
-    return this.parentWorkflowContext;
-  }
-  
-  /* Setters. */
-  public void setWorkflowId(String wfId) {
-    this.workflowId = wfId;
-  }
-  
-  public void setWorkflowName(String wfName) {
-    this.workflowName = wfName;
-  }
-  
-  public void setWorkflowEntityName(String wfEntityName) {
-    this.workflowEntityName = wfEntityName;
-  }
-  
-  public void setWorkflowDag(WorkflowDag wfDag) {
-    this.workflowDag = wfDag;
-  }
-  
-  public void setParentWorkflowContext(WorkflowContext pWfContext) {
-    this.parentWorkflowContext = pWfContext;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/WorkflowDag.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/WorkflowDag.java
deleted file mode 100644
index 07056bf..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/WorkflowDag.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.eventdb.model;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-public class WorkflowDag {
-  
-  public static class WorkflowDagEntry {
-    
-    private String source;
-    private List<String> targets = new ArrayList<String>();
-    
-    public WorkflowDagEntry() {
-      /* Required by JAXB. */
-    }
-    
-    /* Getters. */
-    public String getSource() {
-      return this.source;
-    }
-    
-    public List<String> getTargets() {
-      return this.targets;
-    }
-    
-    /* Setters. */
-    public void setSource(String source) {
-      this.source = source;
-    }
-    
-    public void setTargets(List<String> targets) {
-      this.targets = targets;
-    }
-    
-    public void addTarget(String target) {
-      this.targets.add(target);
-    }
-  }
-  
-  List<WorkflowDagEntry> entries = new ArrayList<WorkflowDagEntry>();
-  
-  public WorkflowDag() {
-    /* Required by JAXB. */
-  }
-  
-  /* Getters. */
-  public List<WorkflowDagEntry> getEntries() {
-    return this.entries;
-  }
-  
-  /* Setters. */
-  public void setEntries(List<WorkflowDagEntry> entries) {
-    this.entries = entries;
-  }
-  
-  public void addEntry(WorkflowDag.WorkflowDagEntry entry) {
-    this.entries.add(entry);
-  }
-  
-  public int size() {
-    Set<String> nodes = new HashSet<String>();
-    for (WorkflowDagEntry entry : entries) {
-      nodes.add(entry.getSource());
-      nodes.addAll(entry.getTargets());
-    }
-    return nodes.size();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/Workflows.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/Workflows.java
deleted file mode 100644
index 541ce79..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/model/Workflows.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.eventdb.model;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.XmlTransient;
-
-import org.apache.ambari.eventdb.model.DataTable.Summary;
-import org.apache.commons.lang.StringUtils;
-
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-public class Workflows {
-  List<WorkflowDBEntry> workflows;
-  Summary summary;
-  
-  public static class WorkflowDBEntry {
-    public static enum WorkflowFields {
-      WORKFLOWID,
-      WORKFLOWNAME,
-      USERNAME,
-      STARTTIME,
-      LASTUPDATETIME,
-      DURATION,
-      NUMJOBSTOTAL,
-      NUMJOBSCOMPLETED,
-      INPUTBYTES,
-      OUTPUTBYTES,
-      PARENTWORKFLOWID,
-      WORKFLOWCONTEXT;
-      
-      public String getString(ResultSet rs) throws SQLException {
-        return rs.getString(this.toString());
-      }
-      
-      public int getInt(ResultSet rs) throws SQLException {
-        return rs.getInt(this.toString());
-      }
-      
-      public long getLong(ResultSet rs) throws SQLException {
-        return rs.getLong(this.toString());
-      }
-      
-      public static String join() {
-        String[] tmp = new String[WorkflowFields.values().length];
-        for (int i = 0; i < tmp.length; i++)
-          tmp[i] = WorkflowFields.values()[i].toString();
-        return StringUtils.join(tmp, ",");
-      }
-    }
-    
-    @XmlTransient
-    public static final String WORKFLOW_FIELDS = WorkflowFields.join();
-    
-    private String workflowId;
-    private String workflowName;
-    private String userName;
-    private long startTime;
-    private long elapsedTime;
-    private long inputBytes;
-    private long outputBytes;
-    private int numJobsTotal;
-    private int numJobsCompleted;
-    private String parentWorkflowId;
-    private WorkflowContext workflowContext;
-    
-    public WorkflowDBEntry() {
-      /* Required by JAXB. */
-    }
-    
-    public String getWorkflowId() {
-      return workflowId;
-    }
-    
-    public String getWorkflowName() {
-      return workflowName;
-    }
-    
-    public String getUserName() {
-      return userName;
-    }
-    
-    public long getStartTime() {
-      return startTime;
-    }
-    
-    public long getElapsedTime() {
-      return elapsedTime;
-    }
-    
-    public int getNumJobsTotal() {
-      return numJobsTotal;
-    }
-    
-    public int getNumJobsCompleted() {
-      return numJobsCompleted;
-    }
-    
-    public String getParentWorkflowId() {
-      return parentWorkflowId;
-    }
-    
-    public WorkflowContext getWorkflowContext() {
-      return workflowContext;
-    }
-    
-    public void setWorkflowId(String workflowId) {
-      this.workflowId = workflowId;
-    }
-    
-    public void setWorkflowName(String workflowName) {
-      this.workflowName = workflowName;
-    }
-    
-    public void setUserName(String userName) {
-      this.userName = userName;
-    }
-    
-    public void setStartTime(long startTime) {
-      this.startTime = startTime;
-    }
-    
-    public void setElapsedTime(long elapsedTime) {
-      this.elapsedTime = elapsedTime;
-    }
-    
-    public void setNumJobsTotal(int numJobsTotal) {
-      this.numJobsTotal = numJobsTotal;
-    }
-    
-    public void setNumJobsCompleted(int numJobsCompleted) {
-      this.numJobsCompleted = numJobsCompleted;
-    }
-    
-    public void setParentWorkflowId(String parentWorkflowId) {
-      this.parentWorkflowId = parentWorkflowId;
-    }
-    
-    public void setWorkflowContext(WorkflowContext workflowContext) {
-      this.workflowContext = workflowContext;
-    }
-    
-    public long getInputBytes() {
-      return inputBytes;
-    }
-    
-    public void setInputBytes(long inputBytes) {
-      this.inputBytes = inputBytes;
-    }
-    
-    public long getOutputBytes() {
-      return outputBytes;
-    }
-    
-    public void setOutputBytes(long outputBytes) {
-      this.outputBytes = outputBytes;
-    }
-  }
-  
-  public Workflows() {}
-  
-  public List<WorkflowDBEntry> getWorkflows() {
-    return workflows;
-  }
-  
-  public void setWorkflows(List<WorkflowDBEntry> workflows) {
-    this.workflows = workflows;
-  }
-  
-  public Summary getSummary() {
-    return summary;
-  }
-  
-  public void setSummary(Summary summary) {
-    this.summary = summary;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/webservice/JAXBContextResolver.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/webservice/JAXBContextResolver.java
deleted file mode 100644
index 974155c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/webservice/JAXBContextResolver.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.eventdb.webservice;
-
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.ext.Provider;
-
-import org.codehaus.jackson.jaxrs.JacksonJaxbJsonProvider;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.annotate.JsonSerialize.Inclusion;
-
-@Provider
-public class JAXBContextResolver extends JacksonJaxbJsonProvider {
-  public JAXBContextResolver() {
-    super();
-  }
-  
-  @Override
-  public ObjectMapper locateMapper(Class<?> type, MediaType mediaType) {
-    ObjectMapper mapper = super.locateMapper(type, mediaType);
-    mapper.setSerializationInclusion(Inclusion.NON_NULL);
-    return mapper;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/webservice/WorkflowJsonService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/webservice/WorkflowJsonService.java
deleted file mode 100644
index 2b76f69..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/eventdb/webservice/WorkflowJsonService.java
+++ /dev/null
@@ -1,398 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.eventdb.webservice;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Set;
-import java.util.TreeSet;
-
-import javax.servlet.ServletContext;
-import javax.ws.rs.DefaultValue;
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.MediaType;
-
-import org.apache.ambari.eventdb.db.PostgresConnector;
-import org.apache.ambari.eventdb.model.DataTable;
-import org.apache.ambari.eventdb.model.Jobs;
-import org.apache.ambari.eventdb.model.Jobs.JobDBEntry;
-import org.apache.ambari.eventdb.model.TaskAttempt;
-import org.apache.ambari.eventdb.model.TaskData;
-import org.apache.ambari.eventdb.model.TaskData.Point;
-import org.apache.ambari.eventdb.model.TaskLocalityData;
-import org.apache.ambari.eventdb.model.TaskLocalityData.DataPoint;
-import org.apache.ambari.eventdb.model.Workflows;
-import org.apache.ambari.eventdb.model.Workflows.WorkflowDBEntry;
-import org.apache.ambari.eventdb.model.Workflows.WorkflowDBEntry.WorkflowFields;
-
-@Path("/jobhistory")
-public class WorkflowJsonService {
-  private static final String PREFIX = "eventdb.";
-  private static final String HOSTNAME = PREFIX + "db.hostname";
-  private static final String DBNAME = PREFIX + "db.name";
-  private static final String USERNAME = PREFIX + "db.user";
-  private static final String PASSWORD = PREFIX + "db.password";
-  
-  private static final String DEFAULT_HOSTNAME = "localhost";
-  private static final String DEFAULT_DBNAME = "ambarirca";
-  private static final String DEFAULT_USERNAME = "mapred";
-  private static final String DEFAULT_PASSWORD = "mapred";
-  
-  private static final Workflows EMPTY_WORKFLOWS = new Workflows();
-  private static final List<JobDBEntry> EMPTY_JOBS = Collections.emptyList();
-  {
-    List<WorkflowDBEntry> emptyWorkflows = Collections.emptyList();
-    EMPTY_WORKFLOWS.setWorkflows(emptyWorkflows);
-  }
-  
-  PostgresConnector getConnector() throws IOException {
-    return new PostgresConnector(DEFAULT_HOSTNAME, DEFAULT_DBNAME, DEFAULT_USERNAME, DEFAULT_PASSWORD);
-  }
-  
-  @Context
-  ServletContext servletContext;
-  
-  @GET
-  @Produces(MediaType.APPLICATION_JSON)
-  @Path("/workflow")
-  public Workflows getWorkflows(@QueryParam("orderBy") String field, @DefaultValue(PostgresConnector.SORT_ASC) @QueryParam("sortDir") String sortDir,
-      @DefaultValue("0") @QueryParam("offset") int offset, @DefaultValue("-1") @QueryParam("limit") int limit) {
-    Workflows workflows = EMPTY_WORKFLOWS;
-    PostgresConnector conn = null;
-    try {
-      conn = getConnector();
-      if (field == null)
-        workflows = conn.fetchWorkflows();
-      else {
-        field = field.toUpperCase();
-        if ("ELAPSEDTIME".equals(field))
-          field = "DURATION";
-        workflows = conn.fetchWorkflows(WorkflowFields.valueOf(field), sortDir.toUpperCase().equals(PostgresConnector.SORT_ASC), offset, limit);
-      }
-    } catch (IOException e) {
-      e.printStackTrace();
-      workflows = EMPTY_WORKFLOWS;
-    } finally {
-      if (conn != null) {
-        conn.close();
-      }
-    }
-    return workflows;
-  }
-  
-  @GET
-  @Produces(MediaType.APPLICATION_JSON)
-  @Path("/datatable")
-  public DataTable getWorkflowDataTable(@DefaultValue("0") @QueryParam("iDisplayStart") int start,
-      @DefaultValue("10") @QueryParam("iDisplayLength") int amount, @QueryParam("sSearch") String searchTerm, @DefaultValue("0") @QueryParam("sEcho") int echo,
-      @DefaultValue("0") @QueryParam("iSortCol_0") int col, @DefaultValue(PostgresConnector.SORT_ASC) @QueryParam("sSortDir_0") String sdir,
-      @QueryParam("sSearch_0") String workflowId, @QueryParam("sSearch_1") String workflowName, @QueryParam("sSearch_2") String workflowType,
-      @QueryParam("sSearch_3") String userName, @DefaultValue("-1") @QueryParam("minJobs") int minJobs, @DefaultValue("-1") @QueryParam("maxJobs") int maxJobs,
-      @DefaultValue("-1") @QueryParam("minInputBytes") long minInputBytes, @DefaultValue("-1") @QueryParam("maxInputBytes") long maxInputBytes,
-      @DefaultValue("-1") @QueryParam("minOutputBytes") long minOutputBytes, @DefaultValue("-1") @QueryParam("maxOutputBytes") long maxOutputBytes,
-      @DefaultValue("-1") @QueryParam("minDuration") long minDuration, @DefaultValue("-1") @QueryParam("maxDuration") long maxDuration,
-      @DefaultValue("-1") @QueryParam("minStartTime") long minStartTime, @DefaultValue("-1") @QueryParam("maxStartTime") long maxStartTime) {
-    
-    if (start < 0)
-      start = 0;
-    if (amount < 10 || amount > 100)
-      amount = 10;
-    
-    boolean sortAscending = true;
-    if (!sdir.toUpperCase().equals(PostgresConnector.SORT_ASC))
-      sortAscending = false;
-    
-    WorkflowFields field = null;
-    switch (col) {
-      case 0: // workflowId
-        field = WorkflowFields.WORKFLOWID;
-        break;
-      case 1: // workflowName
-        field = WorkflowFields.WORKFLOWNAME;
-        break;
-      case 2: // workflowType
-        field = WorkflowFields.WORKFLOWID;
-        break;
-      case 3: // userName
-        field = WorkflowFields.USERNAME;
-        break;
-      case 4: // numJobsTotal
-        field = WorkflowFields.NUMJOBSTOTAL;
-        break;
-      case 5: // inputBytes
-        field = WorkflowFields.INPUTBYTES;
-        break;
-      case 6: // outputBytes
-        field = WorkflowFields.OUTPUTBYTES;
-        break;
-      case 7: // duration
-        field = WorkflowFields.DURATION;
-        break;
-      case 8: // startTime
-        field = WorkflowFields.STARTTIME;
-        break;
-      default:
-        field = WorkflowFields.WORKFLOWID;
-    }
-    
-    DataTable table = null;
-    PostgresConnector conn = null;
-    try {
-      conn = getConnector();
-      table = conn.fetchWorkflows(start, amount, searchTerm, echo, field, sortAscending, workflowId, workflowName, workflowType, userName, minJobs, maxJobs,
-          minInputBytes, maxInputBytes, minOutputBytes, maxOutputBytes, minDuration, maxDuration, minStartTime, maxStartTime);
-    } catch (IOException e) {
-      e.printStackTrace();
-    } finally {
-      if (conn != null) {
-        conn.close();
-      }
-    }
-    return table;
-  }
-  
-  @GET
-  @Produces(MediaType.APPLICATION_JSON)
-  @Path("/job")
-  public Jobs getJobs(@QueryParam("workflowId") String workflowId) {
-    Jobs jobs = new Jobs();
-    PostgresConnector conn = null;
-    try {
-      conn = getConnector();
-      jobs.setJobs(conn.fetchJobDetails(workflowId));
-    } catch (IOException e) {
-      e.printStackTrace();
-      jobs.setJobs(EMPTY_JOBS);
-    } finally {
-      if (conn != null) {
-        conn.close();
-      }
-    }
-    return jobs;
-  }
-  
-  @GET
-  @Produces(MediaType.APPLICATION_JSON)
-  @Path("/task")
-  public TaskData getTaskDetails(@QueryParam("jobId") String jobId, @QueryParam("width") int steps) {
-    TaskData points = new TaskData();
-    PostgresConnector conn = null;
-    try {
-      conn = getConnector();
-      long[] times = conn.fetchJobStartStopTimes(jobId);
-      if (times != null) {
-        double submitTimeSecs = times[0] / 1000.0;
-        double finishTimeSecs = times[1] / 1000.0;
-        double step = (finishTimeSecs - submitTimeSecs) / steps;
-        if (step < 1)
-          step = 1;
-        getMapDetails(conn, points, jobId, submitTimeSecs, finishTimeSecs, step);
-        getReduceDetails(conn, points, jobId, submitTimeSecs, finishTimeSecs, step);
-      }
-    } catch (IOException e) {
-      e.printStackTrace();
-    } finally {
-      if (conn != null) {
-        conn.close();
-      }
-    }
-    return points;
-  }
-  
-  @GET
-  @Produces(MediaType.APPLICATION_JSON)
-  @Path("/tasklocality")
-  public TaskLocalityData getTaskLocalityDetails(@QueryParam("jobId") String jobId, @DefaultValue("4") @QueryParam("minr") int minr,
-      @DefaultValue("24") @QueryParam("maxr") int maxr) {
-    if (maxr < minr)
-      maxr = minr;
-    TaskLocalityData data = new TaskLocalityData();
-    PostgresConnector conn = null;
-    try {
-      conn = getConnector();
-      long[] times = conn.fetchJobStartStopTimes(jobId);
-      if (times != null) {
-        getTaskAttemptsByLocality(conn, jobId, times[0], times[1], data, minr, maxr);
-      }
-    } catch (IOException e) {
-      e.printStackTrace();
-    } finally {
-      if (conn != null) {
-        conn.close();
-      }
-    }
-    return data;
-  }
-  
-  private static void getMapDetails(PostgresConnector conn, TaskData points, String jobId, double submitTimeSecs, double finishTimeSecs, double step)
-      throws IOException {
-    List<TaskAttempt> taskAttempts = conn.fetchTaskAttempts(jobId, "MAP");
-    List<Point> mapPoints = new ArrayList<Point>();
-    for (double time = submitTimeSecs; time < finishTimeSecs; time += step) {
-      int numTasks = 0;
-      for (TaskAttempt taskAttempt : taskAttempts)
-        if ((taskAttempt.getStartTime() / 1000.0) <= (time + step) && (taskAttempt.getFinishTime() / 1000.0) >= time)
-          numTasks++;
-      mapPoints.add(new Point(Math.round(time), numTasks));
-    }
-    points.setMapData(mapPoints);
-  }
-  
-  private static void getReduceDetails(PostgresConnector conn, TaskData points, String jobId, double submitTimeSecs, double finishTimeSecs, double step)
-      throws IOException {
-    List<TaskAttempt> taskAttempts = conn.fetchTaskAttempts(jobId, "REDUCE");
-    List<Point> shufflePoints = new ArrayList<Point>();
-    List<Point> reducePoints = new ArrayList<Point>();
-    for (double time = submitTimeSecs; time < finishTimeSecs; time += step) {
-      int numShuffleTasks = 0;
-      int numReduceTasks = 0;
-      for (TaskAttempt taskAttempt : taskAttempts) {
-        if ((taskAttempt.getStartTime() / 1000.0) <= (time + step) && (taskAttempt.getShuffleFinishTime() / 1000.0) >= time) {
-          numShuffleTasks++;
-        } else if ((taskAttempt.getShuffleFinishTime() / 1000.0) < (time + step) && (taskAttempt.getFinishTime() / 1000.0) >= time) {
-          numReduceTasks++;
-        }
-      }
-      shufflePoints.add(new Point(Math.round(time), numShuffleTasks));
-      reducePoints.add(new Point(Math.round(time), numReduceTasks));
-    }
-    points.setShuffleData(shufflePoints);
-    points.setReduceData(reducePoints);
-  }
-  
-  private static void getTaskAttemptsByLocality(PostgresConnector conn, String jobId, long submitTime, long finishTime, TaskLocalityData data, int minr,
-      int maxr) throws IOException {
-    long submitTimeX = transformX(submitTime);
-    long finishTimeX = transformX(finishTime);
-    List<TaskAttempt> mapAttempts = conn.fetchTaskAttempts(jobId, "MAP");
-    List<TaskAttempt> reduceAttempts = conn.fetchTaskAttempts(jobId, "REDUCE");
-    Set<Long> xPoints = getXPoints(mapAttempts, reduceAttempts, submitTimeX, finishTimeX);
-    Long[] xList = xPoints.toArray(new Long[xPoints.size()]);
-    MinMax io = new MinMax();
-    data.setMapNodeLocal(processLocalityData(mapAttempts, "NODE_LOCAL", xList, io));
-    data.setMapRackLocal(processLocalityData(mapAttempts, "RACK_LOCAL", xList, io));
-    data.setMapOffSwitch(processLocalityData(mapAttempts, "OFF_SWITCH", xList, io));
-    data.setReduceOffSwitch(processLocalityData(reduceAttempts, "OFF_SWITCH", xList, io));
-    setRValues(data.getMapNodeLocal(), minr, maxr, io.max);
-    setRValues(data.getMapRackLocal(), minr, maxr, io.max);
-    setRValues(data.getMapOffSwitch(), minr, maxr, io.max);
-    setRValues(data.getReduceOffSwitch(), minr, maxr, io.max);
-    data.setSubmitTime(submitTimeX);
-    data.setFinishTime(finishTimeX);
-  }
-  
-  private static class MinMax {
-    private long min = Long.MAX_VALUE;
-    private long max = 0;
-  }
-  
-  private static long transformX(long time) {
-    return Math.round(time / 1000.0);
-  }
-  
-  private static long untransformX(long x) {
-    return x * 1000;
-  }
-  
-  private static long transformY(long time) {
-    return time;
-  }
-  
-  private static Set<Long> getXPoints(List<TaskAttempt> mapAttempts, List<TaskAttempt> reduceAttempts, long submitTimeX, long finishTimeX) {
-    TreeSet<Long> xPoints = new TreeSet<Long>();
-    TreeSet<TaskAttempt> sortedAttempts = new TreeSet<TaskAttempt>(new Comparator<TaskAttempt>() {
-      @Override
-      public int compare(TaskAttempt t1, TaskAttempt t2) {
-        if (t1.getStartTime() < t2.getStartTime())
-          return -1;
-        else if (t1.getStartTime() > t2.getStartTime())
-          return 1;
-        return t1.getTaskAttemptId().compareTo(t2.getTaskAttemptId());
-      }
-    });
-    sortedAttempts.addAll(mapAttempts);
-    sortedAttempts.addAll(reduceAttempts);
-    getXPoints(sortedAttempts, xPoints);
-    xPoints.add(submitTimeX);
-    xPoints.add(finishTimeX);
-    return xPoints;
-  }
-  
-  private static void getXPoints(Iterable<TaskAttempt> taskAttempts, Set<Long> xPoints) {
-    for (TaskAttempt taskAttempt : taskAttempts) {
-      long x = transformX(taskAttempt.getStartTime());
-      while (xPoints.contains(x))
-        x += 1;
-      xPoints.add(x);
-      taskAttempt.setStartTime(untransformX(x));
-    }
-  }
-  
-  private static int addDataPoint(List<DataPoint> data, DataPoint point, int index, Long[] xPoints) {
-    while (index < xPoints.length) {
-      if (point.getX() == xPoints[index]) {
-        index++;
-        break;
-      } else if (point.getX() > xPoints[index]) {
-        data.add(new DataPoint(xPoints[index++]));
-      }
-    }
-    data.add(point);
-    return index;
-  }
-  
-  private static List<DataPoint> processLocalityData(List<TaskAttempt> taskAttempts, String locality, Long[] xPoints, MinMax io) {
-    List<DataPoint> data = new ArrayList<DataPoint>();
-    int i = 0;
-    for (TaskAttempt taskAttempt : taskAttempts) {
-      if (locality.equals(taskAttempt.getLocality())) {
-        DataPoint point = new DataPoint();
-        point.setX(transformX(taskAttempt.getStartTime()));
-        point.setY(transformY(taskAttempt.getFinishTime() - taskAttempt.getStartTime()));
-        point.setIO(taskAttempt.getInputBytes() + taskAttempt.getOutputBytes());
-        point.setLabel(taskAttempt.getTaskAttemptId());
-        point.setStatus(taskAttempt.getStatus());
-        i = addDataPoint(data, point, i, xPoints);
-        io.max = Math.max(io.max, point.getIO());
-        io.min = Math.min(io.min, point.getIO());
-      }
-    }
-    while (i < xPoints.length)
-      data.add(new DataPoint(xPoints[i++]));
-    return data;
-  }
-  
-  private static void setRValues(List<DataPoint> data, int minr, int maxr, long maxIO) {
-    for (DataPoint point : data) {
-      if (point.getY() == 0) {
-        continue;
-      }
-      if (maxIO == 0 || maxr == minr) {
-        point.setR(minr);
-        continue;
-      }
-      point.setR(Math.round(Math.sqrt(point.getIO() * 1.0 / maxIO) * (maxr - minr) + minr));
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/AmbariException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/AmbariException.java
deleted file mode 100644
index 2950533..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/AmbariException.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server;
-
-import java.io.IOException;
-
-@SuppressWarnings("serial")
-public class AmbariException extends IOException {
-
-  public AmbariException(String message) {
-    super(message);
-  }
-
-  public AmbariException(String message, Throwable cause) {
-    super(message, cause);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ClusterNotFoundException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ClusterNotFoundException.java
deleted file mode 100644
index cd338a0..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ClusterNotFoundException.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server;
-
-@SuppressWarnings("serial")
-public class ClusterNotFoundException extends ObjectNotFoundException {
-
-  public ClusterNotFoundException(String clusterName) {
-    super("Cluster not found, clusterName=" + clusterName);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/DuplicateResourceException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/DuplicateResourceException.java
deleted file mode 100644
index 0e75a76..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/DuplicateResourceException.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server;
-
-/**
- * Thrown when an attempt is made to create an already existing resource.
- */
-public class DuplicateResourceException extends AmbariException {
-
-  /**
-   * Constructor.
-   *
-   * @param message  message
-   */
-  public DuplicateResourceException(String message) {
-    super(message);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/HostNotFoundException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/HostNotFoundException.java
deleted file mode 100644
index f5768c1..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/HostNotFoundException.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server;
-
-@SuppressWarnings("serial")
-public class HostNotFoundException extends ObjectNotFoundException {
-
-  public HostNotFoundException(String hostname) {
-    super("Host not found, hostname=" + hostname);
-  }
-
-  public HostNotFoundException(String clusterName, String hostname) {
-    super("Host not found, cluster=" + clusterName + ", hostname=" + hostname);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ObjectNotFoundException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ObjectNotFoundException.java
deleted file mode 100644
index 138774a..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ObjectNotFoundException.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server;
-
-@SuppressWarnings("serial")
-public class ObjectNotFoundException extends AmbariException {
-
-  public ObjectNotFoundException(String message) {
-    super(message);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ParentObjectNotFoundException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ParentObjectNotFoundException.java
deleted file mode 100644
index 98d60d5..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ParentObjectNotFoundException.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server;
-
-/**
- * Indicates that a parent of a resource doesn't exist.
- */
-public class ParentObjectNotFoundException extends AmbariException {
-
-  /**
-   * Constructor.
-   *
-   * @param msg    message
-   * @param cause  the root cause
-   */
-  public ParentObjectNotFoundException(String msg, ObjectNotFoundException cause) {
-    super(msg + ".  " + cause.getMessage(), cause);
-  }
-
-  /**
-   * Constructor.
-   *
-   * @param message message
-   */
-  public ParentObjectNotFoundException(String message) {
-    super(message);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/Role.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/Role.java
deleted file mode 100644
index 00d4585..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/Role.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server;
-
-//This enumerates all the roles that the server can handle.
-//Each component or a job maps to a particular role.
-public enum Role {
-  ZOOKEEPER_SERVER,
-  ZOOKEEPER_CLIENT,
-  NAMENODE,
-  NAMENODE_SERVICE_CHECK,
-  DATANODE,
-  HDFS_SERVICE_CHECK,
-  SECONDARY_NAMENODE,
-  HDFS_CLIENT,
-  HBASE_MASTER,
-  HBASE_REGIONSERVER,
-  HBASE_CLIENT,
-  JOBTRACKER,
-  TASKTRACKER,
-  MAPREDUCE_CLIENT,
-  JAVA_JCE,
-  KERBEROS_SERVER,
-  KERBEROS_CLIENT,
-  KERBEROS_ADMIN_CLIENT,
-  HADOOP_CLIENT,
-  JOBTRACKER_SERVICE_CHECK,
-  MAPREDUCE_SERVICE_CHECK,
-  ZOOKEEPER_SERVICE_CHECK,
-  ZOOKEEPER_QUORUM_SERVICE_CHECK,
-  HBASE_SERVICE_CHECK,
-  MYSQL_SERVER,
-  HIVE_SERVER,
-  HIVE_METASTORE,
-  HIVE_CLIENT,
-  HIVE_SERVICE_CHECK,
-  HCAT,
-  HCAT_SERVICE_CHECK,
-  OOZIE_CLIENT,
-  OOZIE_SERVER,
-  OOZIE_SERVICE_CHECK,
-  PIG,
-  PIG_SERVICE_CHECK,
-  SQOOP,
-  SQOOP_SERVICE_CHECK,
-  WEBHCAT_SERVER,
-  WEBHCAT_SERVICE_CHECK,
-  DASHBOARD,
-  DASHBOARD_SERVICE_CHECK,
-  NAGIOS_SERVER,
-  GANGLIA_SERVER,
-  GANGLIA_MONITOR,
-  GMOND_SERVICE_CHECK,
-  GMETAD_SERVICE_CHECK,
-  MONTOR_WEBSERVER,
-  DECOMMISSION_DATANODE
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/RoleCommand.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/RoleCommand.java
deleted file mode 100644
index 92fa5e0..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/RoleCommand.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server;
-
-public enum RoleCommand {
-  INSTALL,
-  UNINSTALL,
-  START,
-  STOP,
-  EXECUTE,
-  ABORT
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ServiceComponentHostNotFoundException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ServiceComponentHostNotFoundException.java
deleted file mode 100644
index 4c59665..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ServiceComponentHostNotFoundException.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server;
-
-@SuppressWarnings("serial")
-public class ServiceComponentHostNotFoundException
-    extends ObjectNotFoundException {
-
-  public ServiceComponentHostNotFoundException(String clusterName,
-      String serviceName, String serviceComponentName, String hostName) {
-    super("ServiceComponentHost not found"
-        + ", clusterName=" + clusterName
-        + ", serviceName=" + serviceName
-        + ", serviceComponentName=" + serviceComponentName
-        + ", hostName=" + hostName);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ServiceComponentNotFoundException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ServiceComponentNotFoundException.java
deleted file mode 100644
index 32f3505..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ServiceComponentNotFoundException.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server;
-
-@SuppressWarnings("serial")
-public class ServiceComponentNotFoundException
-    extends ObjectNotFoundException {
-
-  public ServiceComponentNotFoundException (String clusterName,
-      String serviceName, String serviceComponentName) {
-    super("ServiceComponent not found"
-        + ", clusterName=" + clusterName
-        + ", serviceName=" + serviceName
-        + ", serviceComponentName=" + serviceComponentName);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ServiceNotFoundException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ServiceNotFoundException.java
deleted file mode 100644
index b98d733..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/ServiceNotFoundException.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server;
-
-@SuppressWarnings("serial")
-public class ServiceNotFoundException extends ObjectNotFoundException {
-
-  public ServiceNotFoundException(String clusterName, String serviceName) {
-    super("Service not found"
-        + ", clusterName=" + clusterName
-        + ", serviceName=" + serviceName);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/StackNotFoundException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/StackNotFoundException.java
deleted file mode 100644
index f8829a2..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/StackNotFoundException.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server;
-
-@SuppressWarnings("serial")
-public class StackNotFoundException extends ObjectNotFoundException {
-
-  public StackNotFoundException (String stackName,
-      String stackVersion) {
-    super("Stack Information not found"
-        + ", stackName=" + stackName
-        + ", stackVersion=" + stackVersion);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessor.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessor.java
deleted file mode 100644
index 252e9b5..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessor.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.actionmanager;
-
-import java.util.Collection;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.agent.CommandReport;
-
-public interface ActionDBAccessor {
-
-  public Stage getAction(String actionId);
-
-  public List<Stage> getAllStages(long requestId);
-
-  public void abortOperation(long requestId);
-
-  public void timeoutHostRole(String host, long requestId, long stageId, Role role);
-
-  /**
-   * Returns all the pending stages, including queued and not-queued.
-   * A stage is considered in progress if it is in progress for any host.
-   */
-  public List<Stage> getStagesInProgress();
-
-  public void persistActions(List<Stage> stages);
-
-  public void updateHostRoleState(String hostname, long requestId,
-      long stageId, String role, CommandReport report);
-
-  public void abortHostRole(String host, long requestId, long stageId,
-      Role role);
-
-  /**
-   * Return the last persisted Request ID as seen when the DBAccessor object
-   * was initialized.
-   * Value should remain unchanged through the lifetime of the object instance.
-   * @return Request Id seen at init time
-   */
-  public long getLastPersistedRequestIdWhenInitialized();
-
-  /**
-   * Updates scheduled stage.
-   * @param s
-   * @param hostname
-   * @param roleStr
-   */
-  public void hostRoleScheduled(Stage s, String hostname, String roleStr);
-
-  public List<HostRoleCommand> getRequestTasks(long requestId);
-
-  public List<HostRoleCommand> getAllTasksByRequestIds(Collection<Long> requestIds);
-
-  public List<HostRoleCommand> getTasksByRequestAndTaskIds(Collection<Long> requestIds, Collection<Long> taskIds);
-
-  public Collection<HostRoleCommand> getTasks(Collection<Long> taskIds);
-
-  public List<Stage> getStagesByHostRoleStatus(Set<HostRoleStatus> statuses);
-
-  public List<Long> getRequests();
-
-  public HostRoleCommand getTask(long taskId);
-
-  public List<Long> getRequestsByStatus(RequestStatus status);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java
deleted file mode 100644
index 83c7cae..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java
+++ /dev/null
@@ -1,346 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.actionmanager;
-
-import java.util.*;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.apache.ambari.server.orm.dao.ExecutionCommandDAO;
-import org.apache.ambari.server.orm.dao.HostDAO;
-import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
-import org.apache.ambari.server.orm.dao.RoleSuccessCriteriaDAO;
-import org.apache.ambari.server.orm.dao.StageDAO;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.ExecutionCommandEntity;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
-import org.apache.ambari.server.orm.entities.RoleSuccessCriteriaEntity;
-import org.apache.ambari.server.orm.entities.StageEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.Singleton;
-import com.google.inject.persist.Transactional;
-
-@Singleton
-public class ActionDBAccessorImpl implements ActionDBAccessor {
-  private static final Logger LOG = LoggerFactory.getLogger(ActionDBAccessorImpl.class);
-
-  @Inject
-  private ClusterDAO clusterDAO;
-  @Inject
-  private HostDAO hostDAO;
-  @Inject
-  private StageDAO stageDAO;
-  @Inject
-  private HostRoleCommandDAO hostRoleCommandDAO;
-  @Inject
-  private ExecutionCommandDAO executionCommandDAO;
-  @Inject
-  private RoleSuccessCriteriaDAO roleSuccessCriteriaDAO;
-  @Inject
-  private StageFactory stageFactory;
-  @Inject
-  private HostRoleCommandFactory hostRoleCommandFactory;
-  @Inject
-  private Clusters clusters;
-
-  private final long requestId;
-
-  @Inject
-  public ActionDBAccessorImpl(Injector injector) {
-    injector.injectMembers(this);
-    requestId = stageDAO.getLastRequestId();
-
-
-  }
-
-  /* (non-Javadoc)
-   * @see org.apache.ambari.server.actionmanager.ActionDBAccessor#getAction(java.lang.String)
-   */
-  @Override
-  public Stage getAction(String actionId) {
-    return stageFactory.createExisting(actionId);
-  }
-
-  /* (non-Javadoc)
-   * @see org.apache.ambari.server.actionmanager.ActionDBAccessor#getAllStages(java.lang.String)
-   */
-  @Override
-  public List<Stage> getAllStages(long requestId) {
-    List<Stage> stages = new ArrayList<Stage>();
-    for (StageEntity stageEntity : stageDAO.findByRequestId(requestId)) {
-      stages.add(stageFactory.createExisting(stageEntity));
-    }
-    return stages;
-  }
-
-  /* (non-Javadoc)
-   * @see org.apache.ambari.server.actionmanager.ActionDBAccessor#abortOperation(long)
-   */
-  @Override
-  public void abortOperation(long requestId) {
-    Collection<HostRoleStatus> sourceStatuses =
-        Arrays.asList(HostRoleStatus.QUEUED, HostRoleStatus.IN_PROGRESS,
-            HostRoleStatus.PENDING);
-    int result = hostRoleCommandDAO.updateStatusByRequestId(requestId,
-        HostRoleStatus.ABORTED, sourceStatuses);
-    LOG.info("Aborted {} commands " + result);
-  }
-
-  /* (non-Javadoc)
-   * @see org.apache.ambari.server.actionmanager.ActionDBAccessor#timeoutHostRole(long, long, org.apache.ambari.server.Role)
-   */
-  @Override
-  @Transactional
-  public void timeoutHostRole(String host, long requestId, long stageId,
-      Role role) {
-    List<HostRoleCommandEntity> commands =
-        hostRoleCommandDAO.findByHostRole(host, requestId, stageId, role);
-    for (HostRoleCommandEntity command : commands) {
-      command.setStatus(HostRoleStatus.TIMEDOUT);
-      hostRoleCommandDAO.merge(command);
-    }
-  }
-
-  /* (non-Javadoc)
-   * @see org.apache.ambari.server.actionmanager.ActionDBAccessor#getPendingStages()
-   */
-  @Override
-  public List<Stage> getStagesInProgress() {
-    List<Stage> stages = new ArrayList<Stage>();
-    List<HostRoleStatus> statuses =
-        Arrays.asList(HostRoleStatus.QUEUED, HostRoleStatus.IN_PROGRESS,
-            HostRoleStatus.PENDING);
-    for (StageEntity stageEntity : stageDAO.findByCommandStatuses(statuses)) {
-      stages.add(stageFactory.createExisting(stageEntity));
-    }
-    return stages;
-  }
-
-  /* (non-Javadoc)
-   * @see org.apache.ambari.server.actionmanager.ActionDBAccessor#persistActions(java.util.List)
-   */
-  @Override
-  @Transactional
-  public void persistActions(List<Stage> stages) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Adding stages to DB, stageCount=" + stages.size());
-    }
-    for (Stage stage : stages) {
-      StageEntity stageEntity = stage.constructNewPersistenceEntity();
-      Cluster cluster;
-      try {
-        cluster = clusters.getCluster(stage.getClusterName());
-      } catch (AmbariException e) {
-        throw new RuntimeException(e);
-      }
-      ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
-
-      stageEntity.setCluster(clusterEntity);
-      clusterEntity.getStages().add(stageEntity);
-
-      for (HostRoleCommand hostRoleCommand : stage.getOrderedHostRoleCommands()) {
-        HostRoleCommandEntity hostRoleCommandEntity = hostRoleCommand.constructNewPersistenceEntity();
-        stageEntity.getHostRoleCommands().add(hostRoleCommandEntity);
-        hostRoleCommandEntity.setStage(stageEntity);
-
-        HostEntity hostEntity = hostDAO.findByName(hostRoleCommandEntity.getHostName());
-        if (hostEntity == null) {
-          LOG.error("Host {} doesn't exists in database" + hostRoleCommandEntity.getHostName());
-          throw new RuntimeException("Host '"+hostRoleCommandEntity.getHostName()+"' doesn't exists in database");
-        }
-        hostEntity.getHostRoleCommandEntities().add(hostRoleCommandEntity);
-        hostRoleCommandEntity.setHost(hostEntity);
-        hostRoleCommandDAO.create(hostRoleCommandEntity);
-
-        assert hostRoleCommandEntity.getTaskId() != null;
-
-        hostRoleCommand.setTaskId(hostRoleCommandEntity.getTaskId());
-        ExecutionCommandEntity executionCommandEntity = hostRoleCommand.constructExecutionCommandEntity();
-        executionCommandEntity.setHostRoleCommand(hostRoleCommandEntity);
-
-        executionCommandEntity.setTaskId(hostRoleCommandEntity.getTaskId());
-        hostRoleCommandEntity.setExecutionCommand(executionCommandEntity);
-
-        executionCommandDAO.create(hostRoleCommandEntity.getExecutionCommand());
-        hostRoleCommandDAO.merge(hostRoleCommandEntity);
-        hostDAO.merge(hostEntity);
-      }
-
-      for (RoleSuccessCriteriaEntity roleSuccessCriteriaEntity : stageEntity.getRoleSuccessCriterias()) {
-        roleSuccessCriteriaDAO.create(roleSuccessCriteriaEntity);
-      }
-
-      stageDAO.create(stageEntity);
-      clusterDAO.merge(clusterEntity);
-    }
-  }
-
-  @Override
-  @Transactional
-  public void updateHostRoleState(String hostname, long requestId,
-      long stageId, String role, CommandReport report) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Update HostRoleState: "
-          + "HostName " + hostname + " requestId " + requestId + " stageId "
-          + stageId + " role " + role + " report " + report);
-    }
-    List<HostRoleCommandEntity> commands = hostRoleCommandDAO.findByHostRole(
-        hostname, requestId, stageId, Role.valueOf(role));
-    for (HostRoleCommandEntity command : commands) {
-      command.setStatus(HostRoleStatus.valueOf(report.getStatus()));
-      command.setStdOut(report.getStdOut().getBytes());
-      command.setStdError(report.getStdErr().getBytes());
-      command.setExitcode(report.getExitCode());
-      hostRoleCommandDAO.merge(command);
-    }
-  }
-
-  @Override
-  public void abortHostRole(String host, long requestId, long stageId, Role role) {
-    CommandReport report = new CommandReport();
-    report.setExitCode(999);
-    report.setStdErr("Host Role in invalid state");
-    report.setStdOut("");
-    report.setStatus("ABORTED");
-    updateHostRoleState(host, requestId, stageId, role.toString(), report);
-  }
-
-  @Override
-  public long getLastPersistedRequestIdWhenInitialized() {
-    return requestId;
-  }
-
-  @Override
-  @Transactional
-  public void hostRoleScheduled(Stage s, String hostname, String roleStr) {
-    HostRoleCommand hostRoleCommand = s.getHostRoleCommand(hostname, roleStr);
-    HostRoleCommandEntity entity = hostRoleCommandDAO.findByPK(hostRoleCommand.getTaskId());
-    if (entity != null) {
-      entity.setStartTime(hostRoleCommand.getStartTime());
-      entity.setLastAttemptTime(hostRoleCommand.getLastAttemptTime());
-      entity.setStatus(hostRoleCommand.getStatus());
-      entity.setAttemptCount(hostRoleCommand.getAttemptCount());
-      hostRoleCommandDAO.merge(entity);
-    } else {
-      throw new RuntimeException("HostRoleCommand is not persisted, cannot update:\n" + hostRoleCommand);
-    }
-
-  }
-
-  @Override
-  public List<HostRoleCommand> getRequestTasks(long requestId) {
-    List<HostRoleCommand> tasks = new ArrayList<HostRoleCommand>();
-    for (HostRoleCommandEntity hostRoleCommandEntity : hostRoleCommandDAO.findByRequest(requestId)) {
-      tasks.add(hostRoleCommandFactory.createExisting(hostRoleCommandEntity));
-    }
-    return tasks;
-  }
-
-  @Override
-  public List<HostRoleCommand> getAllTasksByRequestIds(Collection<Long> requestIds) {
-    if (requestIds.isEmpty()) {
-      return Collections.emptyList();
-    }
-    List<HostRoleCommand> tasks = new ArrayList<HostRoleCommand>();
-    for (HostRoleCommandEntity hostRoleCommandEntity : hostRoleCommandDAO.findByRequestIds(requestIds)) {
-      tasks.add(hostRoleCommandFactory.createExisting(hostRoleCommandEntity));
-    }
-    return tasks;
-  }
-
-  @Override
-  public List<HostRoleCommand> getTasksByRequestAndTaskIds(Collection<Long> requestIds, Collection<Long> taskIds) {
-    if (!requestIds.isEmpty() && !taskIds.isEmpty()) {
-      List<HostRoleCommand> tasks = new ArrayList<HostRoleCommand>();
-      for (HostRoleCommandEntity hostRoleCommandEntity : hostRoleCommandDAO.findByRequestAndTaskIds(requestIds, taskIds)) {
-        tasks.add(hostRoleCommandFactory.createExisting(hostRoleCommandEntity));
-      }
-      return tasks;
-    }else if (requestIds.isEmpty()) {
-      return getTasks(taskIds);
-    }else if (taskIds.isEmpty()) {
-      return getAllTasksByRequestIds(requestIds);
-    } else {
-      return Collections.emptyList();
-    }
-  }
-
-  @Override
-  public List<HostRoleCommand> getTasks(Collection<Long> taskIds) {
-    if (taskIds.isEmpty()) {
-      return Collections.emptyList();
-    }
-    List<HostRoleCommand> commands = new ArrayList<HostRoleCommand>();
-    for (HostRoleCommandEntity commandEntity : hostRoleCommandDAO.findByPKs(taskIds)) {
-      commands.add(hostRoleCommandFactory.createExisting(commandEntity));
-    }
-    return commands;
-  }
-
-  @Override
-  public List<Stage> getStagesByHostRoleStatus(Set<HostRoleStatus> statuses) {
-    List<Stage> stages = new ArrayList<Stage>();
-    for (StageEntity stageEntity : stageDAO.findByCommandStatuses(statuses)) {
-      stages.add(stageFactory.createExisting(stageEntity));
-    }
-    return stages;
-  }
-
-  @Override
-  public List<Long> getRequests() {
-    return hostRoleCommandDAO.getRequests();
-  }
-
-  public HostRoleCommand getTask(long taskId) {
-    HostRoleCommandEntity commandEntity = hostRoleCommandDAO.findByPK((int)taskId);
-    if (commandEntity == null) {
-      return null;
-    }
-    return hostRoleCommandFactory.createExisting(commandEntity);
-  }
-
-  @Override
-  public List<Long> getRequestsByStatus(RequestStatus status) {
-    boolean match = true;
-    Set<HostRoleStatus> statuses = new HashSet<HostRoleStatus>();
-    if (status == RequestStatus.IN_PROGRESS) {
-      statuses.addAll( Arrays.asList(HostRoleStatus.PENDING,
-          HostRoleStatus.IN_PROGRESS, HostRoleStatus.QUEUED));
-    } else if (status == RequestStatus.COMPLETED) {
-      match = false;
-      statuses.addAll( Arrays.asList(HostRoleStatus.PENDING,
-          HostRoleStatus.IN_PROGRESS, HostRoleStatus.QUEUED,
-          HostRoleStatus.ABORTED, HostRoleStatus.FAILED,
-          HostRoleStatus.FAILED, HostRoleStatus.TIMEDOUT));
-    } else if (status == RequestStatus.FAILED) {
-      statuses.addAll( Arrays.asList(HostRoleStatus.ABORTED,
-          HostRoleStatus.FAILED, HostRoleStatus.FAILED,
-          HostRoleStatus.TIMEDOUT));
-    }
-    return hostRoleCommandDAO.getRequestsByTaskStatus(statuses, match);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBInMemoryImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBInMemoryImpl.java
deleted file mode 100644
index 38984c1..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBInMemoryImpl.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.actionmanager;
-
-import java.util.*;
-
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.agent.ExecutionCommand;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Singleton;
-
-@Singleton
-public class ActionDBInMemoryImpl implements ActionDBAccessor {
-
-  // for a persisted DB, this will be initialized in the ctor
-  // with the highest persisted requestId value in the DB
-  private final long lastRequestId = 0;
-  private static Logger LOG = LoggerFactory.getLogger(ActionDBInMemoryImpl.class);
-  List<Stage> stageList = new ArrayList<Stage>();
-
-  @Override
-  public synchronized Stage getAction(String actionId) {
-    for (Stage s: stageList) {
-      if (s.getActionId().equals(actionId)) {
-        return s;
-      }
-    }
-    return null;
-  }
-  @Override
-  public synchronized List<Stage> getAllStages(long requestId) {
-    List<Stage> l = new ArrayList<Stage>();
-    for (Stage s: stageList) {
-      if (s.getRequestId() == requestId) {
-        l.add(s);
-      }
-    }
-    return l;
-  }
-
-  @Override
-  public synchronized void abortOperation(long requestId) {
-    for (Stage s : stageList) {
-      if (s.getRequestId() == requestId) {
-        for (String host : s.getHostRoleCommands().keySet()) {
-          Map<String, HostRoleCommand> roleCommands = s.getHostRoleCommands().get(host);
-          for (String role : roleCommands.keySet()) {
-            HostRoleCommand cmd = roleCommands.get(role);
-            HostRoleStatus status = s.getHostRoleStatus(host, cmd.getRole()
-                .toString());
-            if (status.equals(HostRoleStatus.IN_PROGRESS)
-                || status.equals(HostRoleStatus.QUEUED)
-                || status.equals(HostRoleStatus.PENDING)) {
-              s.setHostRoleStatus(host, cmd.getRole().toString(),
-                  HostRoleStatus.ABORTED);
-            }
-          }
-        }
-      }
-    }
-  }
-
-  @Override
-  public synchronized void timeoutHostRole(String host, long requestId,
-      long stageId, Role role) {
-    for (Stage s : stageList) {
-      s.setHostRoleStatus(host, role.toString(), HostRoleStatus.TIMEDOUT);
-    }
-  }
-
-  @Override
-  public synchronized List<Stage> getStagesInProgress() {
-    List<Stage> l = new ArrayList<Stage>();
-    for (Stage s: stageList) {
-      if (s.isStageInProgress()) {
-        l.add(s);
-      }
-    }
-    return l;
-  }
-
-  @Override
-  public synchronized void persistActions(List<Stage> stages) {
-    for (Stage s: stages) {
-      stageList.add(s);
-    }
-  }
-  @Override
-  public synchronized void updateHostRoleState(String hostname, long requestId,
-      long stageId, String role, CommandReport report) {
-    LOG.info("DEBUG stages to iterate: "+stageList.size());
-    for (Stage s : stageList) {
-      if (s.getRequestId() == requestId && s.getStageId() == stageId) {
-        s.setHostRoleStatus(hostname, role,
-            HostRoleStatus.valueOf(report.getStatus()));
-        s.setExitCode(hostname, role, report.getExitCode());
-        s.setStderr(hostname, role, report.getStdErr());
-        s.setStdout(hostname, role, report.getStdOut());
-      }
-    }
-  }
-
-  @Override
-  public void abortHostRole(String host, long requestId, long stageId, Role role) {
-    CommandReport report = new CommandReport();
-    report.setExitCode(999);
-    report.setStdErr("Host Role in invalid state");
-    report.setStdOut("");
-    report.setStatus("ABORTED");
-    updateHostRoleState(host, requestId, stageId, role.toString(), report);
-  }
-
-  @Override
-  public synchronized long getLastPersistedRequestIdWhenInitialized() {
-    return lastRequestId;
-  }
-
-  @Override
-  public void hostRoleScheduled(Stage s, String hostname, String roleStr) {
-    //Nothing needed for in-memory implementation
-  }
-
-  @Override
-  public List<HostRoleCommand> getRequestTasks(long requestId) {
-    return null;
-  }
-
-  @Override
-  public List<HostRoleCommand> getAllTasksByRequestIds(Collection<Long> requestIds) {
-    //TODO not implemented
-    return null;
-  }
-
-  @Override
-  public List<HostRoleCommand> getTasksByRequestAndTaskIds(Collection<Long> requestIds, Collection<Long> taskIds) {
-    //TODO not implemented
-    return null;
-  }
-
-  @Override
-  public Collection<HostRoleCommand> getTasks(Collection<Long> taskIds) {
-    return null;
-  }
-
-  @Override
-  public List<Stage> getStagesByHostRoleStatus(Set<HostRoleStatus> statuses) {
-    List<Stage> l = new ArrayList<Stage>();
-    for (Stage s: stageList) {
-      if (s.doesStageHaveHostRoleStatus(statuses)) {
-        l.add(s);
-      }
-    }
-    return l;
-  }
-  @Override
-  public synchronized List<Long> getRequests() {
-    Set<Long> requestIds = new HashSet<Long>();
-    for (Stage s: stageList) {
-      requestIds.add(s.getRequestId());
-    }
-    List<Long> ids = new ArrayList<Long>();
-    ids.addAll(requestIds);
-    return ids;
-  }
-
-  public HostRoleCommand getTask(long taskId) {
-    for (Stage s : stageList) {
-      for (String host : s.getHostRoleCommands().keySet()) {
-        Map<String, HostRoleCommand> map = s.getHostRoleCommands().get(host);
-        for (HostRoleCommand hostRoleCommand : map.values()) {
-          if (hostRoleCommand.getTaskId() == taskId) {
-            return hostRoleCommand;
-          }
-        }
-      }
-    }
-    return null;
-  }
-  @Override
-  public List<Long> getRequestsByStatus(RequestStatus status) {
-    // TODO
-    throw new RuntimeException("Functionality not implemented");
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java
deleted file mode 100644
index a925a86..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.actionmanager;
-
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-import com.google.inject.name.Named;
-import org.apache.ambari.server.agent.ActionQueue;
-import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.controller.HostsMap;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.utils.StageUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collection;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicLong;
-
-
-/**
- * This class acts as the interface for action manager with other components.
- */
-@Singleton
-public class ActionManager {
-  private final ActionScheduler scheduler;
-  private final ActionDBAccessor db;
-  private final ActionQueue actionQueue;
-  private final HostsMap hostsMap;
-  private static Logger LOG = LoggerFactory.getLogger(ActionManager.class);
-  private final AtomicLong requestCounter;
-
-  @Inject
-  public ActionManager(@Named("schedulerSleeptime") long schedulerSleepTime,
-      @Named("actionTimeout") long actionTimeout,
-      ActionQueue aq, Clusters fsm, ActionDBAccessor db, HostsMap hostsMap) {
-    this.actionQueue = aq;
-    this.db = db;
-    this.hostsMap = hostsMap;
-    scheduler = new ActionScheduler(schedulerSleepTime, actionTimeout, db,
-        actionQueue, fsm, 2, hostsMap);
-    requestCounter = new AtomicLong(
-        db.getLastPersistedRequestIdWhenInitialized());
-  }
-
-  public void start() {
-    LOG.info("Starting scheduler thread");
-    scheduler.start();
-  }
-
-  public void shutdown() {
-    scheduler.stop();
-  }
-
-  public void sendActions(List<Stage> stages) {
-    
-    for (Stage s: stages) {
-      LOG.info("Persisting stage into db: " + s.toString());
-    }
-    db.persistActions(stages);
-
-    // Now scheduler should process actions
-    scheduler.awake();
-  }
-
-  public List<Stage> getRequestStatus(long requestId) {
-    return db.getAllStages(requestId);
-  }
-
-  public Stage getAction(long requestId, long stageId) {
-    return db.getAction(StageUtils.getActionId(requestId, stageId));
-  }
-
-  public void processTaskResponse(String hostname, List<CommandReport> reports) {
-    if (reports == null) {
-      return;
-    }
-    //persist the action response into the db.
-    for (CommandReport report : reports) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Processing command report : " + report.toString());
-      }
-      String actionId = report.getActionId();
-      long [] requestStageIds = StageUtils.getRequestStage(actionId);
-      long requestId = requestStageIds[0];
-      long stageId = requestStageIds[1];
-      HostRoleCommand command = db.getTask(report.getTaskId());
-      if (command == null) {
-        LOG.warn("The task " + report.getTaskId()
-            + " is invalid");
-        continue;
-      }
-      if (!command.getStatus().equals(HostRoleStatus.IN_PROGRESS)
-          && !command.getStatus().equals(HostRoleStatus.QUEUED)) {
-        LOG.warn("The task " + command.getTaskId()
-            + " is not in progress, ignoring update");
-        continue;
-      }
-      db.updateHostRoleState(hostname, requestId, stageId, report.getRole(),
-          report);
-    }
-  }
-
-  public void handleLostHost(String host) {
-    //Do nothing, the task will timeout anyway.
-    //The actions can be failed faster as an optimization
-    //if action timeout happens to be much larger than
-    //heartbeat timeout.
-  }
-
-  public long getNextRequestId() {
-    return requestCounter.incrementAndGet();
-  }
-
-  public List<HostRoleCommand> getRequestTasks(long requestId) {
-    return db.getRequestTasks(requestId);
-  }
-
-  public List<HostRoleCommand> getAllTasksByRequestIds(Collection<Long> requestIds) {
-    return db.getAllTasksByRequestIds(requestIds);
-  }
-
-  public List<HostRoleCommand> getTasksByRequestAndTaskIds(Collection<Long> requestIds, Collection<Long> taskIds) {
-    return db.getTasksByRequestAndTaskIds(requestIds, taskIds);
-  }
-
-  public Collection<HostRoleCommand> getTasks(Collection<Long> taskIds) {
-    return db.getTasks(taskIds);
-  }
-
-  public List<Stage> getRequestsByHostRoleStatus(Set<HostRoleStatus> statuses) {
-    return db.getStagesByHostRoleStatus(statuses);
-  }
-
-  /**
-   * Returns last 20 requests
-   * @return
-   */
-  public List<Long> getRequests() {
-    return db.getRequests();
-  }
-
-  /**
-   * Returns last 20 requests
-   * @return
-   */
-  public List<Long> getRequestsByStatus(RequestStatus status) {
-    return db.getRequestsByStatus(status);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java
deleted file mode 100644
index ac18af5..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java
+++ /dev/null
@@ -1,404 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.actionmanager;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.ServiceComponentNotFoundException;
-import org.apache.ambari.server.agent.ActionQueue;
-import org.apache.ambari.server.agent.ExecutionCommand;
-import org.apache.ambari.server.controller.HostsMap;
-import org.apache.ambari.server.state.*;
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpFailedEvent;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.*;
-
-/**
- * This class encapsulates the action scheduler thread.
- * Action schedule frequently looks at action database and determines if
- * there is an action that can be scheduled.
- */
-class ActionScheduler implements Runnable {
-
-  private static Logger LOG = LoggerFactory.getLogger(ActionScheduler.class);
-  private final long actionTimeout;
-  private final long sleepTime;
-  private volatile boolean shouldRun = true;
-  private Thread schedulerThread = null;
-  private final ActionDBAccessor db;
-  private final short maxAttempts;
-  private final ActionQueue actionQueue;
-  private final Clusters fsmObject;
-  private boolean taskTimeoutAdjustment = true;
-  private final HostsMap hostsMap;
-  private final Object wakeupSyncObject = new Object();
-
-  /**
-   * true if scheduler should run ASAP.
-   * We need this flag to avoid sleep in situations, when
-   * we receive awake() request during running a scheduler iteration.
-   */
-  private boolean activeAwakeRequest = false;
-
-  public ActionScheduler(long sleepTimeMilliSec, long actionTimeoutMilliSec,
-      ActionDBAccessor db, ActionQueue actionQueue, Clusters fsmObject,
-      int maxAttempts, HostsMap hostsMap) {
-    this.sleepTime = sleepTimeMilliSec;
-    this.hostsMap = hostsMap;
-    this.actionTimeout = actionTimeoutMilliSec;
-    this.db = db;
-    this.actionQueue = actionQueue;
-    this.fsmObject = fsmObject;
-    this.maxAttempts = (short) maxAttempts;
-  }
-
-  public void start() {
-    schedulerThread = new Thread(this);
-    schedulerThread.start();
-  }
-
-  public void stop() {
-    shouldRun = false;
-    schedulerThread.interrupt();
-  }
-
-  /**
-   * Should be called from another thread when we want scheduler to
-   * make a run ASAP (for example, to process desired configs of SCHs).
-   * The method is guaranteed to return quickly.
-   */
-  public void awake() {
-    synchronized (wakeupSyncObject) {
-      activeAwakeRequest = true;
-      wakeupSyncObject.notify();
-    }
-  }
-
-  @Override
-  public void run() {
-    while (shouldRun) {
-      try {
-        synchronized (wakeupSyncObject) {
-          if (!activeAwakeRequest) {
-              wakeupSyncObject.wait(sleepTime);
-          }
-          activeAwakeRequest = false;
-        }
-        doWork();
-      } catch (InterruptedException ex) {
-        LOG.warn("Scheduler thread is interrupted going to stop", ex);
-        shouldRun = false;
-      } catch (Exception ex) {
-        LOG.warn("Exception received", ex);
-      } catch (Throwable t) {
-        LOG.warn("ERROR", t);
-      }
-    }
-  }
-
-  private void doWork() throws AmbariException {
-    List<Stage> stages = db.getStagesInProgress();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Scheduler wakes up");
-    }
-    if (stages == null || stages.isEmpty()) {
-      //Nothing to do
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("No stage in progress..nothing to do");
-      }
-      return;
-    }
-
-    for (Stage s : stages) {
-      List<ExecutionCommand> commandsToSchedule = new ArrayList<ExecutionCommand>();
-      Map<String, RoleStats> roleStats = processInProgressStage(s, commandsToSchedule);
-      //Check if stage is failed
-      boolean failed = false;
-      for (String role : roleStats.keySet()) {
-        RoleStats stats = roleStats.get(role);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Stats for role:"+role+", stats="+stats);
-        }
-        if (stats.isRoleFailed()) {
-          failed = true;
-          break;
-        }
-      }
-      if (failed) {
-        LOG.warn("Operation completely failed, borting request id:"
-            + s.getRequestId());
-        db.abortOperation(s.getRequestId());
-        return;
-      }
-
-      //Schedule what we have so far
-      for (ExecutionCommand cmd : commandsToSchedule) {
-        try {
-          scheduleHostRole(s, cmd);
-        } catch (InvalidStateTransitionException e) {
-          LOG.warn("Could not schedule host role "+cmd.toString(), e);
-          db.abortHostRole(cmd.getHostname(), s.getRequestId(), s.getStageId(),
-              cmd.getRole());
-        }
-      }
-
-      //Check if ready to go to next stage
-      boolean goToNextStage = true;
-      for (String role: roleStats.keySet()) {
-        RoleStats stats = roleStats.get(role);
-        if (!stats.isSuccessFactorMet()) {
-          goToNextStage = false;
-          break;
-        }
-      }
-      if (!goToNextStage) {
-        return;
-      }
-    }
-  }
-
-  /**
-   * @param commandsToSchedule
-   * @return Stats for the roles in the stage. It is used to determine whether stage
-   * has succeeded or failed.
-   */
-  private Map<String, RoleStats> processInProgressStage(Stage s,
-      List<ExecutionCommand> commandsToSchedule) {
-    // Map to track role status
-    Map<String, RoleStats> roleStats = initRoleStats(s);
-    long now = System.currentTimeMillis();
-    long taskTimeout = actionTimeout;
-    if (taskTimeoutAdjustment) {
-      taskTimeout = actionTimeout + s.getTaskTimeout();
-    }
-    for (String host : s.getHosts()) {
-      List<ExecutionCommandWrapper> commandWrappers = s.getExecutionCommands(host);
-      for(ExecutionCommandWrapper wrapper : commandWrappers) {
-        ExecutionCommand c = wrapper.getExecutionCommand();
-        String roleStr = c.getRole().toString();
-        HostRoleStatus status = s.getHostRoleStatus(host, roleStr);
-        if (timeOutActionNeeded(status, s, host, roleStr, now, taskTimeout)) {
-          LOG.info("Host:" + host + ", role:" + roleStr + ", actionId:"
-              + s.getActionId() + " timed out");
-          if (s.getAttemptCount(host, roleStr) >= maxAttempts) {
-            LOG.warn("Host:" + host + ", role:" + roleStr + ", actionId:"
-                + s.getActionId() + " expired");
-            db.timeoutHostRole(host, s.getRequestId(), s.getStageId(),
-                c.getRole());
-            //Reinitialize status
-            status = s.getHostRoleStatus(host, roleStr);
-            ServiceComponentHostOpFailedEvent timeoutEvent =
-                new ServiceComponentHostOpFailedEvent(roleStr,
-                    host, now);
-            try {
-              Cluster cluster = fsmObject.getCluster(s.getClusterName());
-              Service svc = cluster.getService(c.getServiceName());
-              ServiceComponent svcComp = svc.getServiceComponent(
-                  roleStr);
-              ServiceComponentHost svcCompHost =
-                  svcComp.getServiceComponentHost(host);
-              svcCompHost.handleEvent(timeoutEvent);
-            } catch (ServiceComponentNotFoundException scnex) {
-              LOG.info("Not a service component, assuming its an action", scnex);
-            } catch (InvalidStateTransitionException e) {
-              LOG.info("Transition failed for host: " + host + ", role: "
-                  + roleStr, e);
-            } catch (AmbariException ex) {
-              LOG.warn("Invalid live state", ex);
-            }
-          } else {
-            commandsToSchedule.add(c);
-          }
-        } else if (status.equals(HostRoleStatus.PENDING)) {
-          //Need to schedule first time
-          commandsToSchedule.add(c);
-        }
-        this.updateRoleStats(status, roleStats.get(roleStr));
-      }
-    }
-    return roleStats;
-  }
-
-  private Map<String, RoleStats> initRoleStats(Stage s) {
-    Map<Role, Integer> hostCountsForRoles = new HashMap<Role, Integer>();
-    Map<String, RoleStats> roleStats = new TreeMap<String, RoleStats>();
-
-    for (String host : s.getHostRoleCommands().keySet()) {
-      Map<String, HostRoleCommand> roleCommandMap = s.getHostRoleCommands().get(host);
-      for (String role : roleCommandMap.keySet()) {
-        HostRoleCommand c = roleCommandMap.get(role);
-        if (hostCountsForRoles.get(c.getRole()) == null) {
-          hostCountsForRoles.put(c.getRole(), 0);
-        }
-        int val = hostCountsForRoles.get(c.getRole());
-        hostCountsForRoles.put(c.getRole(), val + 1);
-      }
-    }
-
-    for (Role r : hostCountsForRoles.keySet()) {
-      RoleStats stats = new RoleStats(hostCountsForRoles.get(r),
-          s.getSuccessFactor(r));
-      roleStats.put(r.toString(), stats);
-    }
-    return roleStats;
-  }
-
-  private boolean timeOutActionNeeded(HostRoleStatus status, Stage stage,
-      String host, String role, long currentTime, long taskTimeout) {
-    if (( !status.equals(HostRoleStatus.QUEUED) ) &&
-        ( ! status.equals(HostRoleStatus.IN_PROGRESS) )) {
-      return false;
-    }
-    if (currentTime > stage.getLastAttemptTime(host, role)+taskTimeout) {
-      return true;
-    }
-    return false;
-  }
-
-  private void scheduleHostRole(Stage s, ExecutionCommand cmd)
-      throws InvalidStateTransitionException, AmbariException {
-    long now = System.currentTimeMillis();
-    String roleStr = cmd.getRole().toString();
-    String hostname = cmd.getHostname();
-    if (s.getStartTime(hostname, roleStr) < 0) {
-      try {
-        Cluster c = fsmObject.getCluster(s.getClusterName());
-        Service svc = c.getService(cmd.getServiceName());
-        ServiceComponent svcComp = svc.getServiceComponent(roleStr);
-        ServiceComponentHost svcCompHost =
-            svcComp.getServiceComponentHost(hostname);
-        svcCompHost.handleEvent(s.getFsmEvent(hostname, roleStr).getEvent());
-      } catch (ServiceComponentNotFoundException scnex) {
-        LOG.info("Not a service component, assuming its an action", scnex);
-      } catch (InvalidStateTransitionException e) {
-        LOG.info(
-            "Transition failed for host: " + hostname + ", role: "
-                + roleStr, e);
-        throw e;
-      } catch (AmbariException e) {
-        LOG.warn("Exception in fsm: " + hostname + ", role: " + roleStr,
-            e);
-        throw e;
-      }
-      s.setStartTime(hostname,roleStr, now);
-      s.setHostRoleStatus(hostname, roleStr, HostRoleStatus.QUEUED);
-    }
-    s.setLastAttemptTime(hostname, roleStr, now);
-    s.incrementAttemptCount(hostname, roleStr);
-    LOG.info("Scheduling command: "+cmd.toString()+" for host: "+hostname);
-    /** change the hostname in the command for the host itself **/
-    cmd.setHostname(hostsMap.getHostMap(hostname));
-    actionQueue.enqueue(hostname, cmd);
-    db.hostRoleScheduled(s, hostname, roleStr);
-  }
-
-  private void updateRoleStats(HostRoleStatus status, RoleStats rs) {
-    switch (status) {
-    case COMPLETED:
-      rs.numSucceeded++;
-      break;
-    case FAILED:
-      rs.numFailed++;
-      break;
-    case QUEUED:
-      rs.numQueued++;
-      break;
-    case PENDING:
-      rs.numPending++;
-      break;
-    case TIMEDOUT:
-      rs.numTimedOut++;
-      break;
-    case ABORTED:
-      rs.numAborted++;
-      break;
-    case IN_PROGRESS:
-      rs.numInProgress++;
-      break;
-    default:
-      LOG.error("Unknown status " + status.name());
-    }
-  }
-  
-  
-  public void setTaskTimeoutAdjustment(boolean val) {
-    this.taskTimeoutAdjustment = val;
-  }
-
-  static class RoleStats {
-    int numInProgress;
-    int numQueued = 0;
-    int numSucceeded = 0;
-    int numFailed = 0;
-    int numTimedOut = 0;
-    int numPending = 0;
-    int numAborted = 0;
-    final int totalHosts;
-    final float successFactor;
-
-    RoleStats(int total, float successFactor) {
-      this.totalHosts = total;
-      this.successFactor = successFactor;
-    }
-
-    /**
-     * Role successful means the role is successful enough to
-     */
-    boolean isSuccessFactorMet() {
-      int minSuccessNeeded = (int) Math.ceil(successFactor * totalHosts);
-      if (minSuccessNeeded <= numSucceeded) {
-        return true;
-      } else {
-        return false;
-      }
-    }
-
-    private boolean isRoleInProgress() {
-      return (numPending+numQueued+numInProgress > 0);
-    }
-
-    /**
-     * Role failure means role is no longer in progress and success factor is
-     * not met.
-     */
-    boolean isRoleFailed() {
-      if (isRoleInProgress() || isSuccessFactorMet()) {
-        return false;
-      } else {
-        return true;
-      }
-    }
-
-    public String toString() {
-      StringBuilder builder = new StringBuilder();
-      builder.append("numQueued="+numQueued);
-      builder.append(", numInProgress="+numInProgress);
-      builder.append(", numSucceeded="+numSucceeded);
-      builder.append(", numFailed="+numFailed);
-      builder.append(", numTimedOut="+numTimedOut);
-      builder.append(", numPending="+numPending);
-      builder.append(", numAborted="+numAborted);
-      builder.append(", totalHosts="+totalHosts);
-      builder.append(", successFactor="+successFactor);
-      return builder.toString();
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
deleted file mode 100644
index 72caf2d..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.actionmanager;
-
-import org.apache.ambari.server.agent.ExecutionCommand;
-import org.apache.ambari.server.utils.StageUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-
-public class ExecutionCommandWrapper {
-  private static Log LOG = LogFactory.getLog(ExecutionCommandWrapper.class);
-  String jsonExecutionCommand = null;
-  ExecutionCommand executionCommand = null;
-
-  public ExecutionCommandWrapper(String jsonExecutionCommand) {
-    this.jsonExecutionCommand = jsonExecutionCommand;
-  }
-
-  public ExecutionCommandWrapper(ExecutionCommand executionCommand) {
-    this.executionCommand = executionCommand;
-  }
-
-  public ExecutionCommand getExecutionCommand() {
-    if (executionCommand != null) {
-      return executionCommand;
-    } else if (jsonExecutionCommand != null) {
-      try {
-        executionCommand = StageUtils.stringToExecutionCommand(jsonExecutionCommand);
-        return executionCommand;
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-    } else {
-      throw new RuntimeException(
-          "Invalid ExecutionCommandWrapper, both object and string"
-              + " representations are null");
-    }
-  }
-
-  public String getJson() {
-    if (jsonExecutionCommand != null) {
-      return jsonExecutionCommand;
-    } else if (executionCommand != null) {
-      try {
-        jsonExecutionCommand = StageUtils.jaxbToString(executionCommand);
-        return jsonExecutionCommand;
-      } catch (JAXBException e) {
-        throw new RuntimeException(e);
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-    } else {
-      throw new RuntimeException(
-          "Invalid ExecutionCommandWrapper, both object and string"
-              + " representations are null");
-    }
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o)
-      return true;
-    if (o == null || getClass() != o.getClass())
-      return false;
-
-    ExecutionCommandWrapper wrapper = (ExecutionCommandWrapper) o;
-    
-    if (executionCommand != null && wrapper.executionCommand != null) {
-      return executionCommand.equals(wrapper.executionCommand);
-    } else {
-      return getJson().equals(wrapper.getJson());
-    }
-  }
-
-  @Override
-  public int hashCode() {
-    if (executionCommand != null) {
-      return executionCommand.hashCode();
-    } else if (jsonExecutionCommand != null) {
-      return jsonExecutionCommand.hashCode();
-    }
-    throw new RuntimeException("Invalid Wrapper object");
-  }
-
-  void invalidateJson() {
-    if (executionCommand == null) {
-      throw new RuntimeException("Invalid Wrapper object");
-    }
-    jsonExecutionCommand = null;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostAction.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostAction.java
deleted file mode 100644
index 2327d07..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostAction.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.actionmanager;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.ambari.server.agent.ExecutionCommand;
-import org.apache.ambari.server.utils.StageUtils;
-
-/**
- * Encapsulates entire task for a host for a stage or action. This class
- * contains all the information to generate an
- * {@link org.apache.ambari.server.agent.ExecutionCommand} that will be
- * scheduled for a host.
- */
-public class HostAction {
-  private final String host;
-  private List<HostRoleCommand> roles;
-  private long startTime = -1;
-  private long lastAttemptTime = -1;
-  private short attemptCount = 0;
-
-  /**
-   * This object will be serialized and sent to the agent.
-   */
-  private ExecutionCommand commandToHost;
-
-  public String getManifest() {
-    //generate manifest
-    return null;
-  }
-
-  public HostAction(String host) {
-    this.host = host;
-    roles = new ArrayList<HostRoleCommand>();
-    commandToHost = new ExecutionCommand();
-    commandToHost.setHostname(host);
-  }
-
-  public HostAction(HostAction ha) {
-    this.host = ha.host;
-    this.roles = ha.roles;
-    this.startTime = ha.startTime;
-    this.lastAttemptTime = ha.lastAttemptTime;
-    this.attemptCount = ha.attemptCount;
-    this.commandToHost = ha.commandToHost;
-  }
-
-  public void addHostRoleCommand(HostRoleCommand cmd) {
-    roles.add(cmd);
-  }
-
-  public List<HostRoleCommand> getRoleCommands() {
-    return roles;
-  }
-
-  public long getStartTime() {
-    return startTime;
-  }
-
-  public long getLastAttemptTime() {
-    return this.lastAttemptTime;
-  }
-
-  public void setLastAttemptTime(long t) {
-    this.lastAttemptTime = t;
-  }
-
-  public void incrementAttemptCount() {
-    this.attemptCount ++;
-  }
-
-  public short getAttemptCount() {
-    return this.attemptCount;
-  }
-
-  public ExecutionCommand getCommandToHost() {
-    return this.commandToHost;
-  }
-
-  public synchronized void setCommandId(long requestId, long stageId) {
-    commandToHost.setCommandId(StageUtils.getActionId(requestId, stageId));
-  }
-
-  public void setStartTime(long startTime) {
-    this.startTime = startTime;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostRoleCommand.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostRoleCommand.java
deleted file mode 100644
index 610cd57..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostRoleCommand.java
+++ /dev/null
@@ -1,256 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.actionmanager;
-
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.RoleCommand;
-import org.apache.ambari.server.orm.entities.ExecutionCommandEntity;
-import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
-import org.apache.ambari.server.state.ServiceComponentHostEvent;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Injector;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
-
-/**
- * This class encapsulates the information for an task on a host for a
- * particular role which action manager needs. It doesn't capture actual
- * command and parameters, but just the stuff enough for action manager to
- * track the request.
- * For the actual command refer {@link HostAction#commandToHost}
- */
-public class HostRoleCommand {
-  private static final Logger log = LoggerFactory.getLogger(HostRoleCommand.class);
-
-  private long taskId = -1;
-  private long stageId = -1;
-  private long requestId = -1;
-  private String hostName;
-  private final Role role;
-  private HostRoleStatus status = HostRoleStatus.PENDING;
-  private String stdout = "";
-  private String stderr = "";
-  private int exitCode = 999; //Default is unknown
-  private final ServiceComponentHostEventWrapper event;
-  private long startTime = -1;
-  private long lastAttemptTime = -1;
-  private short attemptCount = 0;
-  private RoleCommand roleCommand;
-
-  private ExecutionCommandWrapper executionCommandWrapper;
-
-  public HostRoleCommand(String host, Role role,
-      ServiceComponentHostEvent event, RoleCommand command) {
-    this.hostName = host;
-    this.role = role;
-    this.event = new ServiceComponentHostEventWrapper(event);
-    this.roleCommand = command;
-  }
-
-  @AssistedInject
-  public HostRoleCommand(@Assisted HostRoleCommandEntity hostRoleCommandEntity, Injector injector) {
-    taskId = hostRoleCommandEntity.getTaskId();
-    stageId = hostRoleCommandEntity.getStage().getStageId();
-    requestId = hostRoleCommandEntity.getStage().getRequestId();
-    this.hostName = hostRoleCommandEntity.getHostName();
-    role = hostRoleCommandEntity.getRole();
-    status = hostRoleCommandEntity.getStatus();
-    stdout = new String(hostRoleCommandEntity.getStdOut());
-    stderr = new String(hostRoleCommandEntity.getStdError());
-    exitCode = hostRoleCommandEntity.getExitcode();
-    startTime = hostRoleCommandEntity.getStartTime();
-    lastAttemptTime = hostRoleCommandEntity.getLastAttemptTime();
-    attemptCount = hostRoleCommandEntity.getAttemptCount();
-    roleCommand = hostRoleCommandEntity.getRoleCommand();
-    event = new ServiceComponentHostEventWrapper(hostRoleCommandEntity.getEvent());
-    executionCommandWrapper = new ExecutionCommandWrapper(new String(
-        hostRoleCommandEntity
-            .getExecutionCommand().getCommand()
-    ));
-  }
-
-  HostRoleCommandEntity constructNewPersistenceEntity() {
-    HostRoleCommandEntity hostRoleCommandEntity = new HostRoleCommandEntity();
-    hostRoleCommandEntity.setHostName(hostName);
-    hostRoleCommandEntity.setRole(role);
-    hostRoleCommandEntity.setStatus(status);
-    hostRoleCommandEntity.setStdError(stderr.getBytes());
-    hostRoleCommandEntity.setExitcode(exitCode);
-    hostRoleCommandEntity.setStdOut(stdout.getBytes());
-    hostRoleCommandEntity.setStartTime(startTime);
-    hostRoleCommandEntity.setLastAttemptTime(lastAttemptTime);
-    hostRoleCommandEntity.setAttemptCount(attemptCount);
-    hostRoleCommandEntity.setRoleCommand(roleCommand);
-
-    hostRoleCommandEntity.setEvent(event.getEventJson());
-    ExecutionCommandEntity executionCommandEntity = new ExecutionCommandEntity();
-    executionCommandEntity.setCommand(executionCommandWrapper.getJson().getBytes());
-    executionCommandEntity.setHostRoleCommand(hostRoleCommandEntity);
-    hostRoleCommandEntity.setExecutionCommand(executionCommandEntity);
-
-    return hostRoleCommandEntity;
-  }
-
-  ExecutionCommandEntity constructExecutionCommandEntity() {
-    ExecutionCommandEntity executionCommandEntity = new ExecutionCommandEntity();
-    executionCommandEntity.setCommand(executionCommandWrapper.getJson().getBytes());
-    return executionCommandEntity;
-  }
-
-
-  public long getTaskId() {
-    return taskId;
-  }
-
-  public void setTaskId(long taskId) {
-    if (this.taskId != -1) {
-      throw new RuntimeException("Attempt to set taskId again, not allowed");
-    }
-    this.taskId = taskId;
-    executionCommandWrapper.getExecutionCommand().setTaskId(taskId);
-    //Need to invalidate json because taskId is updated.
-    executionCommandWrapper.invalidateJson();
-  }
-
-  public String getHostName() {
-    return hostName;
-  }
-
-  public Role getRole() {
-    return role;
-  }
-
-  public HostRoleStatus getStatus() {
-    return status;
-  }
-
-  public ServiceComponentHostEventWrapper getEvent() {
-    return event;
-  }
-
-  public void setStatus(HostRoleStatus status) {
-    this.status = status;
-  }
-
-  public String getStdout() {
-    return stdout;
-  }
-
-  public void setStdout(String stdout) {
-    this.stdout = stdout;
-  }
-
-  public String getStderr() {
-    return stderr;
-  }
-
-  public void setStderr(String stderr) {
-    this.stderr = stderr;
-  }
-
-  public int getExitCode() {
-    return exitCode;
-  }
-
-  public void setExitCode(int exitCode) {
-    this.exitCode = exitCode;
-  }
-
-  public long getStartTime() {
-    return startTime;
-  }
-
-  public void setStartTime(long startTime) {
-    this.startTime = startTime;
-  }
-
-  public long getLastAttemptTime() {
-    return lastAttemptTime;
-  }
-
-  public void setLastAttemptTime(long lastAttemptTime) {
-    this.lastAttemptTime = lastAttemptTime;
-  }
-
-  public short getAttemptCount() {
-    return attemptCount;
-  }
-
-  public void incrementAttemptCount() {
-    this.attemptCount++;
-  }
-
-  public ExecutionCommandWrapper getExecutionCommandWrapper() {
-    return executionCommandWrapper;
-  }
-
-  public void setExecutionCommandWrapper(ExecutionCommandWrapper executionCommandWrapper) {
-    this.executionCommandWrapper = executionCommandWrapper;
-  }
-
-  public RoleCommand getRoleCommand() {
-    return roleCommand;
-  }
-
-  public void setRoleCommand(RoleCommand roleCommand) {
-    this.roleCommand = roleCommand;
-  }
-
-  public long getStageId() {
-    return stageId;
-  }
-
-  public long getRequestId() {
-    return requestId;
-  }
-
-  @Override
-  public int hashCode() {
-    return (hostName.toString() + role.toString() + roleCommand.toString())
-        .hashCode();
-  }
-
-  @Override
-  public boolean equals(Object other) {
-    if (!(other instanceof HostRoleCommand)) {
-      return false;
-    }
-    HostRoleCommand o = (HostRoleCommand) other;
-    return (this.role.equals(o.role) && this.hostName.equals(o.hostName) && this.roleCommand
-        .equals(o.roleCommand));
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder builder = new StringBuilder();
-    builder.append("HostRoleCommand State:\n");
-    builder.append("  TaskId: ").append(taskId).append("\n");
-    builder.append("  Role: ").append(role).append("\n");
-    builder.append("  Status: ").append(status).append("\n");
-    builder.append("  Event: ").append(event).append("\n");
-    builder.append("  stdout: ").append(stdout).append("\n");
-    builder.append("  stderr: ").append(stderr).append("\n");
-    builder.append("  exitcode: ").append(exitCode).append("\n");
-    builder.append("  Start time: ").append(startTime).append("\n");
-    builder.append("  Last attempt time: ").append(lastAttemptTime).append("\n");
-    builder.append("  attempt count: ").append(attemptCount).append("\n");
-    return builder.toString();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostRoleCommandFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostRoleCommandFactory.java
deleted file mode 100644
index 1126666..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostRoleCommandFactory.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.actionmanager;
-
-import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
-
-public interface HostRoleCommandFactory {
-  HostRoleCommand createExisting(HostRoleCommandEntity hostRoleCommandEntity);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostRoleStatus.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostRoleStatus.java
deleted file mode 100644
index 25ef6c3..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/HostRoleStatus.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.actionmanager;
-
-public enum HostRoleStatus {
-  PENDING, //Not queued for a host
-  QUEUED, //Queued for a host
-  IN_PROGRESS, //Host reported it is working
-  COMPLETED, //Host reported success
-  FAILED, //Failed
-  TIMEDOUT, //Host did not respond in time
-  ABORTED //Operation was abandoned
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/RequestStatus.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/RequestStatus.java
deleted file mode 100644
index 181ab25..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/RequestStatus.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.actionmanager;
-
-public enum RequestStatus {
-  /// At least, one task is pending/queued or in progress
-  IN_PROGRESS,
-  /// All tasks are completed
-  COMPLETED, 
-  /// At least, one task is failed/timed out or aborted
-  FAILED
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ServiceComponentHostEventWrapper.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ServiceComponentHostEventWrapper.java
deleted file mode 100644
index 83bee65..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ServiceComponentHostEventWrapper.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.actionmanager;
-
-import java.io.IOException;
-
-import javax.xml.bind.JAXBException;
-
-import org.apache.ambari.server.state.ServiceComponentHostEvent;
-import org.apache.ambari.server.utils.StageUtils;
-
-public class ServiceComponentHostEventWrapper {
-
-  private ServiceComponentHostEvent event = null;
-  private String eventJson = null;
-
-  public ServiceComponentHostEventWrapper(ServiceComponentHostEvent event) {
-    this.event  = event;
-  }
-  
-  public ServiceComponentHostEventWrapper(String eventJson) {
-    this.eventJson = eventJson;
-  }
-
-  public ServiceComponentHostEvent getEvent() {
-    if (event != null) {
-      return event;
-    } else if (eventJson != null) {
-      try {
-        event = StageUtils.fromJson(eventJson, ServiceComponentHostEvent.class);
-        return event;
-      } catch (IOException e) {
-        throw new RuntimeException("Illegal Json for event", e);
-      }
-    }
-    return null;
-  }
-  
-  public String getEventJson() { 
-    if (eventJson != null) {
-      return eventJson;
-    } else if (event != null) {
-      try {
-        eventJson = StageUtils.jaxbToString(event);
-        return eventJson;
-      } catch (JAXBException e) {
-        throw new RuntimeException("Couldn't get json", e);
-      } catch (IOException e) {
-        throw new RuntimeException("Couldn't get json", e);
-      }
-    } else {
-      return null;
-    }
-  }
-  
-  public String toString() {
-    if (event != null) {
-      return event.toString();
-    } else if (eventJson != null) {
-      return eventJson;
-    }
-    return "null";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
deleted file mode 100644
index 042dc28..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
+++ /dev/null
@@ -1,427 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.actionmanager;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.RoleCommand;
-import org.apache.ambari.server.agent.ExecutionCommand;
-import org.apache.ambari.server.orm.dao.HostDAO;
-import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
-import org.apache.ambari.server.orm.dao.StageDAO;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
-import org.apache.ambari.server.orm.entities.RoleSuccessCriteriaEntity;
-import org.apache.ambari.server.orm.entities.StageEntity;
-import org.apache.ambari.server.state.ServiceComponentHostEvent;
-import org.apache.ambari.server.utils.StageUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import com.google.inject.Injector;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
-
-//This class encapsulates the stage. The stage encapsulates all the information
-//required to persist an action.
-public class Stage {
-  private static Logger LOG = LoggerFactory.getLogger(Stage.class);
-  private final long requestId;
-  private final String clusterName;
-  private long stageId = -1;
-  private final String logDir;
-  private int taskTimeout = -1;
-  private int perTaskTimeFactor = 60000;
-
-  //Map of roles to successFactors for this stage. Default is 1 i.e. 100%
-  private Map<Role, Float> successFactors = new HashMap<Role, Float>();
-
-  //Map of host to host-roles
-  Map<String, Map<String, HostRoleCommand>> hostRoleCommands =
-      new TreeMap<String, Map<String, HostRoleCommand>>();
-  private Map<String, List<ExecutionCommandWrapper>> commandsToSend =
-      new TreeMap<String, List<ExecutionCommandWrapper>>();
-
-  @AssistedInject
-  public Stage(@Assisted long requestId, @Assisted("logDir") String logDir, @Assisted("clusterName") String clusterName) {
-    this.requestId = requestId;
-    this.logDir = logDir;
-    this.clusterName = clusterName;
-  }
-
-  /**
-   * Creates Stage existing in database
-   * @param actionId "requestId-stageId" string
-   */
-  @AssistedInject
-  public Stage(@Assisted String actionId, Injector injector) {
-    this(injector.getInstance(StageDAO.class).findByActionId(actionId), injector);
-  }
-
-  @AssistedInject
-  public Stage(@Assisted StageEntity stageEntity, Injector injector) {
-    HostRoleCommandDAO hostRoleCommandDAO = injector.getInstance(HostRoleCommandDAO.class);
-    HostDAO hostDAO = injector.getInstance(HostDAO.class);
-    HostRoleCommandFactory hostRoleCommandFactory = injector.getInstance(HostRoleCommandFactory.class);
-
-    requestId = stageEntity.getRequestId();
-    stageId = stageEntity.getStageId();
-    logDir = stageEntity.getLogInfo();
-    clusterName = stageEntity.getCluster().getClusterName();
-
-    for (HostEntity hostEntity : hostDAO.findByStage(stageEntity)) {
-      List<HostRoleCommandEntity> commands = hostRoleCommandDAO.findSortedCommandsByStageAndHost(stageEntity, hostEntity);
-      commandsToSend.put(hostEntity.getHostName(), new ArrayList<ExecutionCommandWrapper>());
-      hostRoleCommands.put(hostEntity.getHostName(), new TreeMap<String, HostRoleCommand>());
-      for (HostRoleCommandEntity command : commands) {
-        HostRoleCommand hostRoleCommand = hostRoleCommandFactory.createExisting(command);
-        hostRoleCommands.get(hostEntity.getHostName()).put(hostRoleCommand.getRole().toString(), hostRoleCommand);
-        commandsToSend.get(hostEntity.getHostName()).add(hostRoleCommand.getExecutionCommandWrapper());
-      }
-    }
-
-    for (RoleSuccessCriteriaEntity successCriteriaEntity : stageEntity.getRoleSuccessCriterias()) {
-      successFactors.put(successCriteriaEntity.getRole(), successCriteriaEntity.getSuccessFactor().floatValue());
-    }
-  }
-
-  /**
-   * Creates object to be persisted in database
-   * @return StageEntity
-   */
-  public synchronized StageEntity constructNewPersistenceEntity() {
-    StageEntity stageEntity = new StageEntity();
-    stageEntity.setRequestId(requestId);
-    stageEntity.setStageId(getStageId());
-    stageEntity.setLogInfo(logDir);
-    stageEntity.setHostRoleCommands(new ArrayList<HostRoleCommandEntity>());
-    stageEntity.setRoleSuccessCriterias(new ArrayList<RoleSuccessCriteriaEntity>());
-
-    for (Role role : successFactors.keySet()) {
-      RoleSuccessCriteriaEntity roleSuccessCriteriaEntity = new RoleSuccessCriteriaEntity();
-      roleSuccessCriteriaEntity.setRole(role);
-      roleSuccessCriteriaEntity.setStage(stageEntity);
-      roleSuccessCriteriaEntity.setSuccessFactor(successFactors.get(role).doubleValue());
-      stageEntity.getRoleSuccessCriterias().add(roleSuccessCriteriaEntity);
-    }
-    return stageEntity;
-  }
-
-  public List<HostRoleCommand> getOrderedHostRoleCommands() {
-    List<HostRoleCommand> commands = new ArrayList<HostRoleCommand>();
-    //TODO trick for proper storing order, check it
-    for (String hostName : hostRoleCommands.keySet()) {
-      for (ExecutionCommandWrapper executionCommandWrapper : commandsToSend.get(hostName)) {
-        for (HostRoleCommand hostRoleCommand : hostRoleCommands.get(hostName).values()) {
-          if (hostRoleCommand.getExecutionCommandWrapper() == executionCommandWrapper) {
-            commands.add(hostRoleCommand);
-          }
-        }
-      }
-    }
-    return commands;
-  }
-
-  public synchronized void setStageId(long stageId) {
-    if (this.stageId != -1) {
-      throw new RuntimeException("Attempt to set stageId again! Not allowed.");
-    }
-    this.stageId = stageId;
-    for (String host: this.commandsToSend.keySet()) {
-      for (ExecutionCommandWrapper wrapper : this.commandsToSend.get(host)) {
-        ExecutionCommand cmd = wrapper.getExecutionCommand();
-        cmd.setCommandId(StageUtils.getActionId(requestId, stageId));
-      }
-    }
-  }
-
-  public synchronized long getStageId() {
-    return stageId;
-  }
-
-  public String getActionId() {
-    return StageUtils.getActionId(requestId, getStageId());
-  }
-
-  /**
-   * A new host role command is created for execution.
-   * Creates both ExecutionCommand and HostRoleCommand objects and
-   * adds them to the Stage. This should be called only once for a host-role
-   * for a given stage.
-   */
-  public synchronized void addHostRoleExecutionCommand(String host, Role role,  RoleCommand command,
-      ServiceComponentHostEvent event, String clusterName, String serviceName) {
-    HostRoleCommand hrc = new HostRoleCommand(host, role, event, command);
-    ExecutionCommand cmd = new ExecutionCommand();
-    ExecutionCommandWrapper wrapper = new ExecutionCommandWrapper(cmd);
-    hrc.setExecutionCommandWrapper(wrapper);
-    cmd.setHostname(host);
-    cmd.setClusterName(clusterName);
-    cmd.setServiceName(serviceName);
-    cmd.setCommandId(this.getActionId());
-    cmd.setRole(role);
-    cmd.setRoleCommand(command);
-    Map<String, HostRoleCommand> hrcMap = this.hostRoleCommands.get(host);
-    if (hrcMap == null) {
-      hrcMap = new TreeMap<String, HostRoleCommand>();
-      this.hostRoleCommands.put(host, hrcMap);
-    }
-    if (hrcMap.get(role.toString()) != null) {
-      throw new RuntimeException(
-          "Setting the host role command second time for same stage: stage="
-              + this.getActionId() + ", host=" + host + ", role=" + role);
-    }
-    hrcMap.put(role.toString(), hrc);
-    List<ExecutionCommandWrapper> execCmdList = this.commandsToSend.get(host);
-    if (execCmdList == null) {
-      execCmdList = new ArrayList<ExecutionCommandWrapper>();
-      this.commandsToSend.put(host, execCmdList);
-    }
-
-    if (execCmdList.contains(wrapper)) {
-      //todo: proper exception
-      throw new RuntimeException(
-          "Setting the execution command second time for same stage: stage="
-              + this.getActionId() + ", host=" + host + ", role=" + role);
-    }
-    execCmdList.add(wrapper);
-  }
-
-  /**
-   *
-   * @return list of hosts
-   */
-  public synchronized List<String> getHosts() { // TODO: Check whether method should be synchronized
-    List<String> hlist = new ArrayList<String>();
-    for (String h : this.hostRoleCommands.keySet()) {
-      hlist.add(h);
-    }
-    return hlist;
-  }
-
-  synchronized float getSuccessFactor(Role r) {
-    Float f = successFactors.get(r);
-    if (f == null) {
-      if (r.equals(Role.DATANODE) || r.equals(Role.TASKTRACKER) || r.equals(Role.GANGLIA_MONITOR) ||
-          r.equals(Role.HBASE_REGIONSERVER)) {
-        return (float) 0.5;
-      } else {
-        return 1;
-      }
-    } else {
-      return f;
-    }
-  }
-
-  public synchronized void setSuccessFactors(Map<Role, Float> suc) {
-    successFactors = suc;
-  }
-
-  public synchronized Map<Role, Float> getSuccessFactors() {
-    return successFactors;
-  }
-
-  public long getRequestId() {
-    return requestId;
-  }
-
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  public long getLastAttemptTime(String host, String role) {
-    return this.hostRoleCommands.get(host).get(role).getLastAttemptTime();
-  }
-
-  public short getAttemptCount(String host, String role) {
-    return this.hostRoleCommands.get(host).get(role).getAttemptCount();
-  }
-
-  public void incrementAttemptCount(String hostname, String role) {
-    this.hostRoleCommands.get(hostname).get(role).incrementAttemptCount();
-  }
-
-  public void setLastAttemptTime(String host, String role, long t) {
-    this.hostRoleCommands.get(host).get(role).setLastAttemptTime(t);
-  }
-
-  public ExecutionCommandWrapper getExecutionCommandWrapper(String hostname,
-      String role) {
-    HostRoleCommand hrc = hostRoleCommands.get(hostname).get(role);
-    if (hrc != null) {
-      return hrc.getExecutionCommandWrapper();
-    } else {
-      return null;
-    }
-  }
-
-  public List<ExecutionCommandWrapper> getExecutionCommands(String hostname) {
-    return commandsToSend.get(hostname);
-  }
-
-  public long getStartTime(String hostname, String role) {
-    return this.hostRoleCommands.get(hostname).get(role).getStartTime();
-  }
-
-  public void setStartTime(String hostname, String role, long startTime) {
-    this.hostRoleCommands.get(hostname).get(role).setStartTime(startTime);
-  }
-
-  public HostRoleStatus getHostRoleStatus(String hostname, String role) {
-    return this.hostRoleCommands.get(hostname).get(role).getStatus();
-  }
-
-  public void setHostRoleStatus(String host, String role,
-      HostRoleStatus status) {
-    this.hostRoleCommands.get(host).get(role).setStatus(status);
-  }
-
-  public ServiceComponentHostEventWrapper getFsmEvent(String hostname, String roleStr) {
-    return this.hostRoleCommands.get(hostname).get(roleStr).getEvent();
-  }
-
-
-  public void setExitCode(String hostname, String role, int exitCode) {
-    this.hostRoleCommands.get(hostname).get(role).setExitCode(exitCode);
-  }
-
-  public int getExitCode(String hostname, String role) {
-    return this.hostRoleCommands.get(hostname).get(role).getExitCode();
-  }
-
-  public void setStderr(String hostname, String role, String stdErr) {
-    this.hostRoleCommands.get(hostname).get(role).setStderr(stdErr);
-  }
-
-  public void setStdout(String hostname, String role, String stdOut) {
-    this.hostRoleCommands.get(hostname).get(role).setStdout(stdOut);
-  }
-
-  public synchronized boolean isStageInProgress() {
-    for(String host: hostRoleCommands.keySet()) {
-      for (String role : hostRoleCommands.get(host).keySet()) {
-        HostRoleCommand hrc = hostRoleCommands.get(host).get(role);
-        if (hrc == null) {
-          return false;
-        }
-        if (hrc.getStatus().equals(HostRoleStatus.PENDING) ||
-            hrc.getStatus().equals(HostRoleStatus.QUEUED) ||
-            hrc.getStatus().equals(HostRoleStatus.IN_PROGRESS)) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  public synchronized boolean doesStageHaveHostRoleStatus(
-      Set<HostRoleStatus> statuses) {
-    for(String host: hostRoleCommands.keySet()) {
-      for (String role : hostRoleCommands.get(host).keySet()) {
-        HostRoleCommand hrc = hostRoleCommands.get(host).get(role);
-        if (hrc == null) {
-          return false;
-        }
-        for (HostRoleStatus status : statuses)
-        if (hrc.getStatus().equals(status)) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  public Map<String, List<ExecutionCommandWrapper>> getExecutionCommands() {
-    return this.commandsToSend;
-  }
-
-  public String getLogDir() {
-    return this.logDir;
-  }
-
-  public Map<String, Map<String, HostRoleCommand>> getHostRoleCommands() {
-    return hostRoleCommands;
-  }
-
-  /**
-   * This method should be used only in stage planner. To add
-   * a new execution command use
-   * {@link #addHostRoleExecutionCommand(String, Role, RoleCommand,
-   * ServiceComponentHostEvent, String, String)}
-   */
-  public synchronized void addExecutionCommandWrapper(Stage origStage,
-      String hostname, Role r) {
-    String role = r.toString();
-    if (commandsToSend.get(hostname) == null) {
-      commandsToSend.put(hostname, new ArrayList<ExecutionCommandWrapper>());
-    }
-    commandsToSend.get(hostname).add(
-        origStage.getExecutionCommandWrapper(hostname, role));
-    if (hostRoleCommands.get(hostname) == null) {
-      hostRoleCommands.put(hostname, new TreeMap<String, HostRoleCommand>());
-    }
-    // TODO add reference to ExecutionCommand into HostRoleCommand
-    hostRoleCommands.get(hostname).put(role,
-        origStage.getHostRoleCommand(hostname, role));
-  }
-
-  HostRoleCommand getHostRoleCommand(String hostname, String role) {
-    return hostRoleCommands.get(hostname).get(role);
-  }
-  
-  public synchronized int getTaskTimeout() {
-    if (taskTimeout == -1) {
-      int maxTasks = 0;
-      for (String host: commandsToSend.keySet()) {
-        if (commandsToSend.get(host).size() > maxTasks) {
-          maxTasks = commandsToSend.get(host).size();
-        }
-      }
-      taskTimeout = maxTasks * perTaskTimeFactor;
-    }  
-    return taskTimeout;
-  }
-
-  @Override //Object
-  public synchronized String toString() {
-    StringBuilder builder = new StringBuilder();
-    builder.append("STAGE DESCRIPTION BEGIN\n");
-    builder.append("requestId="+requestId+"\n");
-    builder.append("stageId="+stageId+"\n");
-    builder.append("clusterName="+clusterName+"\n");
-    builder.append("logDir=" + logDir+"\n");
-    builder.append("Success Factors:\n");
-    for (Role r : successFactors.keySet()) {
-      builder.append("  role: "+r+", factor: "+successFactors.get(r)+"\n");
-    }
-    for (HostRoleCommand hostRoleCommand : getOrderedHostRoleCommands()) {
-      builder.append("HOST: ").append(hostRoleCommand.getHostName()).append(" :\n");
-      builder.append(hostRoleCommand.getExecutionCommandWrapper().getJson());
-      builder.append("\n");
-      builder.append(hostRoleCommand.toString());
-      builder.append("\n");
-    }
-    builder.append("STAGE DESCRIPTION END\n");
-    return builder.toString();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactory.java
deleted file mode 100644
index 93994ad..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/StageFactory.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.actionmanager;
-
-import com.google.inject.assistedinject.Assisted;
-import org.apache.ambari.server.orm.entities.StageEntity;
-
-public interface StageFactory {
-
-  Stage createNew(long requestId, @Assisted("logDir") String logDir, @Assisted("clusterName") String clusterName);
-
-  Stage createExisting(String actionId);
-
-  Stage createExisting(StageEntity stageEntity);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/ActionQueue.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/ActionQueue.java
deleted file mode 100644
index a606fb2..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/ActionQueue.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Queue;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Singleton;
-
-@Singleton
-public class ActionQueue {
-
-  private static Logger LOG = LoggerFactory.getLogger(ActionQueue.class);
-
-  Map<String, Queue<AgentCommand>> hostQueues;
-
-  public ActionQueue() {
-    hostQueues = new HashMap<String, Queue<AgentCommand>>();
-  }
-
-  private synchronized Queue<AgentCommand> getQueue(String hostname) {
-    return hostQueues.get(hostname);
-  }
-
-  private synchronized void addQueue(String hostname, Queue<AgentCommand> q) {
-    hostQueues.put(hostname, q);
-  }
-
-  public void enqueue(String hostname, AgentCommand cmd) {
-    Queue<AgentCommand> q;
-    synchronized (this) {
-      q = getQueue(hostname);
-      if (q == null) {
-        addQueue(hostname, new LinkedList<AgentCommand>());
-        q = getQueue(hostname);
-      }
-    }
-    synchronized (q) {
-      if (q.contains(cmd)) {
-        LOG.warn("cmd already exists in the queue, not adding again");
-        return;
-      }
-      q.add(cmd);
-    }
-  }
-
-  public AgentCommand dequeue(String hostname) {
-    Queue<AgentCommand> q = getQueue(hostname);
-    if (q == null) {
-      return null;
-    }
-    synchronized (q) {
-      if (q.isEmpty()) {
-        return null;
-      } else {
-        return q.remove();
-      }
-    }
-  }
-  
-  public int size(String hostname) {
-    Queue<AgentCommand> q = getQueue(hostname);
-    if (q == null) {
-      return 0;
-    }
-    synchronized(q) {
-      return q.size();
-    }
-  }
-
-  public List<AgentCommand> dequeueAll(String hostname) {
-    Queue<AgentCommand> q = getQueue(hostname);
-    if (q == null) {
-      return null;
-    }
-    List<AgentCommand> l = new ArrayList<AgentCommand>();
-    synchronized (q) {
-      while (true) {
-        try {
-          AgentCommand cmd = q.remove();
-          if (cmd != null) {
-            l.add(cmd);
-          }
-        } catch (NoSuchElementException ex) {
-          return l;
-        }
-      }
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentCommand.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentCommand.java
deleted file mode 100644
index ec7c58f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentCommand.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-public abstract class AgentCommand {
-
-  private AgentCommandType commandType;
-
-  public AgentCommand() {
-    this.commandType = AgentCommandType.STATUS_COMMAND;
-  }
-
-  public AgentCommand(AgentCommandType type) {
-    this.commandType = type;
-  }
-
-  public enum AgentCommandType {
-    EXECUTION_COMMAND,
-    STATUS_COMMAND,
-    REGISTRATION_COMMAND
-  }
-
-  public AgentCommandType getCommandType() {
-    return commandType;
-  }
-  
-  public void setCommandType(AgentCommandType commandType) {
-    this.commandType = commandType;
-  }
-
-  @Override
-  public String toString() {
-    return "AgentCommand{" +
-            "commandType=" + commandType +
-            '}';
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentEnv.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentEnv.java
deleted file mode 100644
index 1dffbfe..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentEnv.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-import org.codehaus.jackson.map.annotate.JsonSerialize;
-import org.codehaus.jackson.map.annotate.JsonSerialize.Inclusion;
-
-/**
- * Agent environment data.
- */
-public class AgentEnv {
-
-  /**
-   * Various directories, configurable in <code>ambari-agent.ini</code>
-   */
-  private Directory[] paths = new Directory[0];
-
-  /**
-   * Java processes running on the system.  Default empty array.
-   */
-  private JavaProc[] javaProcs = new JavaProc[0];
-  
-  /**
-   * Various RPM package versions.
-   */
-  private Rpm[] rpms = new Rpm[0];
-  
-  /**
-   * Number of pid files found in <code>/var/run/hadoop</code>
-   */
-  private int varRunHadoopPidCount = 0;
-  
-  /**
-   * Number of log files found in <code>/var/log/hadoop</code>
-   */
-  private int varLogHadoopLogCount = 0;
-
-  /**
-   * Directories that match name <code>/etc/alternatives/*conf</code>
-   */
-  private Alternative[] etcAlternativesConf = new Alternative[0];
-
-  /**
-   * Output for repo listing.  Command to do this varies, but for RHEL it is
-   * <code>yum -C repolist</code>
-   */
-  private String repoInfo;
-  
-
-  public Directory[] getPaths() {
-      return paths;
-  }
-  
-  public void setPaths(Directory[] dirs) {
-    paths = dirs;
-  }
-  
-  public void setVarRunHadoopPidCount(int count) {
-    varRunHadoopPidCount = count;
-  }
-  
-  public int getVarRunHadoopPidCount() {
-    return varRunHadoopPidCount;
-  }
-  
-  public void setVarLogHadoopLogCount(int count) {
-    varLogHadoopLogCount = count;
-  }
-  
-  public int getVarLogHadoopLogCount() {
-    return varLogHadoopLogCount;
-  }
-  
-  public void setJavaProcs(JavaProc[] procs) {
-    javaProcs = procs;
-  }
-  
-  public JavaProc[] getJavaProcs() {
-    return javaProcs;
-  }
-  
-  public void setRpms(Rpm[] rpm) {
-    rpms = rpm;
-  }
-  
-  public Rpm[] getRpms() {
-    return rpms;
-  }
-  
-  public void setEtcAlternativesConf(Alternative[] dirs) {
-    etcAlternativesConf = dirs;
-  }
-  
-  public Alternative[] getEtcAlternativesConf() {
-    return etcAlternativesConf;
-  }
-  
-  public void setRepoInfo(String info) {
-    repoInfo = info;
-  }
-  
-  public String getRepoInfo() {
-    return repoInfo;
-  }
-  
-  /**
-   * Represents information about rpm-installed packages
-   */
-  public static class Rpm {
-    private String rpmName;
-    private boolean rpmInstalled = false;
-    private String rpmVersion;
-    
-    public void setName(String name) {
-      rpmName = name;
-    }
-    
-    public String getName() {
-      return rpmName;
-    }
-    
-    public void setInstalled(boolean installed) {
-      rpmInstalled = installed;
-    }
-    
-    public boolean isInstalled() {
-      return rpmInstalled;
-    }
-    
-    public void setVersion(String version) {
-      rpmVersion = version;
-    }
-    
-    @JsonSerialize(include=Inclusion.NON_NULL)
-    public String getVersion() {
-      return rpmVersion;
-    }
-  }
-  
-  /**
-   * Represents information about a directory of interest.
-   */
-  public static class Directory {
-    private String dirName;
-    private String dirType;
-    
-    public void setName(String name) {
-      dirName = name;
-    }
-    
-    public String getName() {
-      return dirName;
-    }
-    
-    public void setType(String type) {
-      dirType = type;
-    }
-    
-    public String getType() {
-      return dirType;
-    }
-  }
-  
-  /**
-   * Represents information about running java processes.
-   */
-  public static class JavaProc {
-    private String user;
-    private int pid = 0;
-    private boolean is_hadoop = false;
-    private String command;
-    
-    public void setUser(String user) {
-      this.user = user;
-    }
-    
-    public String getUser() {
-      return user;
-    }
-    
-    public void setPid(int pid) {
-      this.pid = pid;
-    }
-    
-    public int getPid() {
-      return pid;
-    }
-    
-    public void setHadoop(boolean hadoop) {
-      is_hadoop = hadoop;
-    }
-    
-    public boolean isHadoop() {
-      return is_hadoop;
-    }
-    
-    public void setCommand(String cmd) {
-      command = cmd;
-    }
-    
-    public String getCommand() {
-      return command;
-    }
-  }
-  
-  public static class Alternative {
-    private String altName;
-    private String altTarget;
-    
-    public void setName(String name) {
-      altName = name;
-    }
-    
-    public String getName() {
-      return altName;
-    }
-    
-    public void setTarget(String target) {
-      altTarget = target;
-    }
-    
-    public String getTarget() {
-      return altTarget;
-    }
-  }
-  
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/CommandReport.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/CommandReport.java
deleted file mode 100644
index 8fab0db..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/CommandReport.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-import org.codehaus.jackson.annotate.JsonProperty;
-
-
-public class CommandReport {
-
-  String role;
-  String actionId;
-  String stdout;
-  String stderr;
-  String status;
-  int exitCode;
-  private String clusterName;
-  private String serviceName;
-  private long taskId;
-  
-  @JsonProperty("taskId")
-  public long getTaskId() {
-    return taskId;
-  }
-  
-  @JsonProperty("taskId")
-  public void setTaskId(long taskId) {
-    this.taskId = taskId;
-  }
-  
-  @JsonProperty("clusterName")
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-  
-  @JsonProperty("clusterName") 
-  public String getClusterName() {
-    return this.clusterName;
-  }
-
-  @JsonProperty("actionId")
-  public String getActionId() {
-    return this.actionId;
-  }
-  
-  @JsonProperty("actionId")
-  public void setActionId(String actionId) {
-    this.actionId = actionId;
-  }
-  
-  @JsonProperty("stderr")
-  public String getStdErr() {
-    return this.stderr;
-  }
-  
-  @JsonProperty("stderr")
-  public void setStdErr(String stderr) {
-    this.stderr = stderr;
-  }
-  
-  @JsonProperty("exitcode")
-  public int getExitCode() {
-    return this.exitCode;
-  }
-  
-  @JsonProperty("exitcode")
-  public void setExitCode(int exitCode) {
-    this.exitCode = exitCode;
-  }
-  
-  @JsonProperty("stdout")
-  public String getStdOut() {
-    return this.stdout;
-  }
-  
-  @JsonProperty("stdout")
-  public void setStdOut(String stdout) {
-    this.stdout = stdout;
-  }
-
-  @JsonProperty("role")
-  public String getRole() {
-    return role;
-  }
-  
-  @JsonProperty("role")
-  public void setRole(String role) {
-    this.role = role;
-  }
-  
-  @JsonProperty("status")
-  public String getStatus() {
-    return status;
-  }
-  
-  @JsonProperty("status")
-  public void setStatus(String status) {
-    this.status = status;
-  }
-  
-  @JsonProperty("serviceName")
-  public String getServiceName() {
-    return serviceName;
-  }
-  
-  @JsonProperty("serviceName")
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  @Override
-  public String toString() {
-    return "CommandReport{" +
-            "role='" + role + '\'' +
-            ", actionId='" + actionId + '\'' +
-            ", status='" + status + '\'' +
-            ", exitCode=" + exitCode +
-            ", clusterName='" + clusterName + '\'' +
-            ", serviceName='" + serviceName + '\'' +
-            ", taskId=" + taskId +
-            '}';
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/ComponentStatus.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/ComponentStatus.java
deleted file mode 100644
index bfddd3e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/ComponentStatus.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-
-
-public class ComponentStatus {
-  String componentName;
-  String msg;
-  String status;
-  String serviceName;
-  String clusterName;
-
-  public String getComponentName() {
-    return this.componentName;
-  }
-
-  public void setComponentName(String componentName) {
-    this.componentName = componentName;
-  }
-
-  public String getMessage() {
-    return this.msg;
-  }
-
-  public void setMessage(String msg) {
-    this.msg = msg;
-  }
-
-  public String getStatus() {
-    return this.status;
-  }
-
-  public void setStatus(String status) {
-    this.status = status;
-  }
-
-  public String getMsg() {
-    return msg;
-  }
-
-  public void setMsg(String msg) {
-    this.msg = msg;
-  }
-
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-
-  @Override
-  public String toString() {
-    return "ComponentStatus{" +
-            "componentName='" + componentName + '\'' +
-            ", msg='" + msg + '\'' +
-            ", status='" + status + '\'' +
-            ", serviceName='" + serviceName + '\'' +
-            ", clusterName='" + clusterName + '\'' +
-            '}';
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/DiskInfo.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/DiskInfo.java
deleted file mode 100644
index e3fb88e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/DiskInfo.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.agent;
-
-import org.codehaus.jackson.annotate.JsonProperty;
-
-
-/**
- * Information about a mounted disk on a given node
- */
-
-public class DiskInfo {
-  String available;
-  String mountpoint;
-  String device;
-  String used;
-  String percent;
-  String size;
-  String type;
-
-  /**
-   * DiskInfo object that tracks information about a disk.
-   * @param mountpoint
-   * @param available
-   * @param used
-   * @param percent
-   * @param size
-   */
-  public DiskInfo(String device, String mountpoint, String available,
-      String used, String percent, String size, String type) {
-    this.device = device;
-    this.mountpoint = mountpoint;
-    this.available = available;
-    this.used = used;
-    this.percent = percent;
-    this.size = size;
-    this.type = type;
-  }
-
-  /**
-   * Needed for Serialization
-   */
-  public DiskInfo() {}
-
-  @JsonProperty("available")
-  public void setAvailable(String available) {
-    this.available = available;
-  }
-  
-  @JsonProperty("available")
-  public String getAvailable() {
-    return this.available;
-  }
-
-  @JsonProperty("mountpoint")
-  public String getMountPoint() {
-    return this.mountpoint;
-  }
-  
-  @JsonProperty("mountpoint")
-  public void setMountPoint(String mountpoint) {
-    this.mountpoint = mountpoint;
-  }
-
-  @JsonProperty("type")
-  public String getType() {
-    return this.type;
-  }
-
-  @JsonProperty("type")
-  public void setType(String type) {
-    this.type = type;
-  }
-  
-  @JsonProperty("used")
-  public String getUsed() {
-    return this.used;
-  }
-
-  @JsonProperty("used")
-  public void setUsed(String used) {
-    this.used = used;
-  }
-  
-  @JsonProperty("percent")
-  public String getPercent() {
-    return this.percent;
-  }
-  
-  @JsonProperty("percent")
-  public void setPercent(String percent) {
-    this.percent = percent;
-  }
-  
-  @JsonProperty("size")
-  public String getSize() {
-    return this.size;
-  }
-  
-  @JsonProperty("size")
-  public void setSize(String size) {
-    this.size = size;
-  }
-  
-  @Override
-  public String toString() {
-    return "available=" + this.available + ",mountpoint=" + this.mountpoint
-         + ",used=" + this.used + ",percent=" + this.percent + ",size=" +
-        this.size + ",device=" + this.device +
-        ",type=" + this.type;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
deleted file mode 100644
index 4b2a61d..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ /dev/null
@@ -1,193 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.RoleCommand;
-import org.apache.ambari.server.utils.StageUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.codehaus.jackson.annotate.JsonProperty;
-
-
-/**
- * Execution commands are scheduled by action manager, and these are
- * persisted in the database for recovery.
- */
-public class ExecutionCommand extends AgentCommand {
-  
-  private static Log LOG = LogFactory.getLog(ExecutionCommand.class);
-  
-  public ExecutionCommand() {
-    super(AgentCommandType.EXECUTION_COMMAND);
-  }
-  private String clusterName;
-  private long taskId;
-  private String commandId;
-  private String hostname;
-  private Role role;
-  private Map<String, String> hostLevelParams = new HashMap<String, String>();
-  private Map<String, String> roleParams = null;
-  private RoleCommand roleCommand;
-  private Map<String, List<String>> clusterHostInfo = 
-      new HashMap<String, List<String>>();
-  private Map<String, Map<String, String>> configurations;
-  private String serviceName;
-  
-  @JsonProperty("commandId")
-  public String getCommandId() {
-    return this.commandId;
-  }
-  
-  @JsonProperty("commandId")
-  public void setCommandId(String commandId) {
-    this.commandId = commandId;
-  }
-  
-  @Override
-  public boolean equals(Object other) {
-    if (!(other instanceof ExecutionCommand)) {
-      return false;
-    }
-    ExecutionCommand o = (ExecutionCommand) other;
-    return (this.commandId.equals(o.commandId) &&
-            this.hostname.equals(o.hostname) &&
-            this.role.equals(o.role) &&
-            this.roleCommand.equals(o.roleCommand));
-  }
-  
-  @Override
-  public String toString() {
-    try {
-      return StageUtils.jaxbToString(this);
-    } catch (Exception ex) {
-      LOG.warn("Exception in json conversion", ex);
-      return "Exception in json conversion"; 
-    }
-  }
-
-  @Override
-  public int hashCode() {
-    return (hostname + commandId + role).hashCode();
-  }
-
-  @JsonProperty("taskId")
-  public long getTaskId() {
-    return taskId;
-  }
-
-  @JsonProperty("taskId")
-  public void setTaskId(long taskId) {
-    this.taskId = taskId;
-  }
-
-  @JsonProperty("role")
-  public Role getRole() {
-    return role;
-  }
-
-  @JsonProperty("role")
-  public void setRole(Role role) {
-    this.role = role;
-  }
-
-  @JsonProperty("roleParams")
-  public Map<String, String> getRoleParams() {
-    return roleParams;
-  }
-
-  @JsonProperty("roleParams")
-  public void setRoleParams(Map<String, String> roleParams) {
-    this.roleParams = roleParams;
-  }
-
-  @JsonProperty("roleCommand")
-  public RoleCommand getRoleCommand() {
-    return roleCommand;
-  }
-
-  @JsonProperty("roleCommand")
-  public void setRoleCommand(RoleCommand cmd) {
-    this.roleCommand = cmd;
-  }
-  
-  @JsonProperty("clusterName")
-  public String getClusterName() {
-    return clusterName;
-  }
-  
-  @JsonProperty("clusterName")
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-
-  @JsonProperty("hostname")
-  public String getHostname() {
-    return hostname;
-  }
-
-  @JsonProperty("hostname")
-  public void setHostname(String hostname) {
-    this.hostname = hostname;
-  }
-
-  @JsonProperty("hostLevelParams")
-  public Map<String, String> getHostLevelParams() {
-    return hostLevelParams;
-  }
-
-  @JsonProperty("hostLevelParams")
-  public void setHostLevelParams(Map<String, String> params) {
-    this.hostLevelParams = params;
-  }
-
-  @JsonProperty("clusterHostInfo")
-  public Map<String, List<String>> getClusterHostInfo() {
-    return clusterHostInfo;
-  }
-
-  @JsonProperty("clusterHostInfo")
-  public void setClusterHostInfo(Map<String, List<String>> clusterHostInfo) {
-    this.clusterHostInfo = clusterHostInfo;
-  }
-  
-  @JsonProperty("configurations")
-  public Map<String, Map<String, String>> getConfigurations() {
-    return configurations;
-  }
-
-  @JsonProperty("configurations")
-  public void setConfigurations(Map<String, Map<String, String>> configurations) {
-    this.configurations = configurations;
-  }
-
-  @JsonProperty("serviceName")
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  @JsonProperty("serviceName")
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeat.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeat.java
deleted file mode 100644
index 62ea7f3..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeat.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.agent;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
- *
- *
- * Data model for Ambari Agent to send heartbeat to Ambari Server.
- *
- */
-
-public class HeartBeat {
-  private long responseId = -1;
-  private long timestamp;
-  private String hostname;
-  List<CommandReport> reports = new ArrayList<CommandReport>();
-  List<ComponentStatus> componentStatus = new ArrayList<ComponentStatus>();
-  HostStatus nodeStatus;
-  private AgentEnv agentEnv = null;
-
-  public long getResponseId() {
-    return responseId;
-  }
-
-  public void setResponseId(long responseId) {
-    this.responseId=responseId;
-  }
-
-  public long getTimestamp() {
-    return timestamp;
-  }
-
-  public String getHostname() {
-    return hostname;
-  }
-
-  public void setTimestamp(long timestamp) {
-    this.timestamp = timestamp;
-  }
-
-  public void setHostname(String hostname) {
-    this.hostname = hostname;
-  }
-  
-  @JsonProperty("reports")
-  public List<CommandReport> getReports() {
-    return this.reports;
-  }
-  
-  @JsonProperty("reports")
-  public void setReports(List<CommandReport> reports) {
-    this.reports = reports;
-  }
-  
-  public HostStatus getNodeStatus() {
-    return nodeStatus;
-  }
-
-  public void setNodeStatus(HostStatus nodeStatus) {
-    this.nodeStatus = nodeStatus;
-  }
-  
-  public AgentEnv getAgentEnv() {
-    return agentEnv;
-  }
-  
-  public void setAgentEnv(AgentEnv env) {
-    agentEnv = env;
-  }
-
-  @JsonProperty("componentStatus")
-  public List<ComponentStatus> getComponentStatus() {
-    return componentStatus;
-  }
-
-  @JsonProperty("componentStatus")
-  public void setComponentStatus(List<ComponentStatus> componentStatus) {
-    this.componentStatus = componentStatus;
-  }
-
-  @Override
-  public String toString() {
-    return "HeartBeat{" +
-            "responseId=" + responseId +
-            ", timestamp=" + timestamp +
-            ", hostname='" + hostname + '\'' +
-            ", reports=" + reports +
-            ", componentStatus=" + componentStatus +
-            ", nodeStatus=" + nodeStatus +
-            '}';
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
deleted file mode 100644
index e7205b0..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
+++ /dev/null
@@ -1,370 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.Singleton;
-import org.apache.ambari.server.*;
-import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.metadata.ActionMetadata;
-import org.apache.ambari.server.state.*;
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-import org.apache.ambari.server.state.host.HostHealthyHeartbeatEvent;
-import org.apache.ambari.server.state.host.HostRegistrationRequestEvent;
-import org.apache.ambari.server.state.host.HostStatusUpdatesReceivedEvent;
-import org.apache.ambari.server.state.host.HostUnhealthyHeartbeatEvent;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpFailedEvent;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpInProgressEvent;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpSucceededEvent;
-import org.apache.ambari.server.utils.StageUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-
-/**
- * This class handles the heartbeats coming from the agent, passes on the information
- * to other modules and processes the queue to send heartbeat response.
- */
-@Singleton
-public class HeartBeatHandler {
-  private static Log LOG = LogFactory.getLog(HeartBeatHandler.class);
-  private final Clusters clusterFsm;
-  private final ActionQueue actionQueue;
-  private final ActionManager actionManager;
-  private HeartbeatMonitor heartbeatMonitor;
-
-  @Inject
-  Injector injector;
-  @Inject
-  Configuration config;
-  @Inject
-  AmbariMetaInfo ambariMetaInfo;
-  @Inject
-  ActionMetadata actionMetadata;
-
-  private Map<String, Long> hostResponseIds = new HashMap<String, Long>();
-  private Map<String, HeartBeatResponse> hostResponses = new HashMap<String, HeartBeatResponse>();
-
-  @Inject
-  public HeartBeatHandler(Clusters fsm, ActionQueue aq, ActionManager am,
-      Injector injector) {
-    this.clusterFsm = fsm;
-    this.actionQueue = aq;
-    this.actionManager = am;
-    this.heartbeatMonitor = new HeartbeatMonitor(fsm, aq, am, 60000);
-    injector.injectMembers(this);
-  }
-
-  public void start() {
-    heartbeatMonitor.start();
-  }
-
-  void setHeartbeatMonitor(HeartbeatMonitor heartbeatMonitor) {
-    this.heartbeatMonitor = heartbeatMonitor;
-  }
-
-  public HeartBeatResponse handleHeartBeat(HeartBeat heartbeat)
-      throws AmbariException {
-    String hostname = heartbeat.getHostname();
-    Long currentResponseId = hostResponseIds.get(hostname);
-    HeartBeatResponse response;
-    if (currentResponseId == null) {
-      //Server restarted, or unknown host.
-      LOG.error("CurrentResponseId unknown - send register command");
-      response = new HeartBeatResponse();
-      RegistrationCommand regCmd = new RegistrationCommand();
-      response.setResponseId(0);
-      response.setRegistrationCommand(regCmd);
-      return response;
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Received heartbeat from host"
-          +  ", hostname=" + hostname
-          + ", currentResponseId=" + currentResponseId
-          + ", receivedResponseId=" + heartbeat.getResponseId());
-    }
-
-    if (heartbeat.getResponseId() == currentResponseId - 1) {
-      LOG.warn("Old responseId received - response was lost - returning cached response");
-      return hostResponses.get(hostname);
-    }else if (heartbeat.getResponseId() != currentResponseId) {
-      LOG.error("Error in responseId sequence - sending agent restart command");
-      response = new HeartBeatResponse();
-      response.setRestartAgent(true);
-      response.setResponseId(currentResponseId);
-      return response;
-    }
-
-    response = new HeartBeatResponse();
-    response.setResponseId(++currentResponseId);
-
-    Host hostObject = clusterFsm.getHost(hostname);
-
-    if (hostObject.getState().equals(HostState.HEARTBEAT_LOST)) {
-      // After loosing heartbeat agent should reregister
-      LOG.warn("Host is in HEARTBEAT_LOST state - sending register command");
-      response = new HeartBeatResponse();
-      RegistrationCommand regCmd = new RegistrationCommand();
-      response.setResponseId(0);
-      response.setRegistrationCommand(regCmd);
-      return response;
-    }
-
-    hostResponseIds.put(hostname, currentResponseId);
-    hostResponses.put(hostname, response);
-
-    long now = System.currentTimeMillis();
-
-    // If the host is waiting for component status updates, notify it
-    if (heartbeat.componentStatus.size() > 0
-            && hostObject.getState().equals(HostState.WAITING_FOR_HOST_STATUS_UPDATES)) {
-      try {
-        LOG.debug("Got component status updates");
-        hostObject.handleEvent(new HostStatusUpdatesReceivedEvent(hostname, now));
-      } catch (InvalidStateTransitionException e) {
-        LOG.warn("Failed to notify the host about component status updates", e);
-      }
-    }
-
-    try {
-      if (heartbeat.getNodeStatus().getStatus().equals(HostStatus.Status.HEALTHY)) {
-        hostObject.handleEvent(new HostHealthyHeartbeatEvent(hostname, now,
-            heartbeat.getAgentEnv()));
-      } else {
-        hostObject.handleEvent(new HostUnhealthyHeartbeatEvent(hostname, now,
-            null));
-      }
-    } catch (InvalidStateTransitionException ex) {
-      LOG.warn("Asking agent to reregister due to " + ex.getMessage(),  ex);
-      hostObject.setState(HostState.INIT);
-      RegistrationCommand regCmd = new RegistrationCommand();
-      response.setRegistrationCommand(regCmd);
-      return response;
-    }
-
-    //Examine heartbeat for command reports
-    List<CommandReport> reports = heartbeat.getReports();
-    for (CommandReport report : reports) {
-      String clusterName = report.getClusterName();
-      if ((clusterName == null) || "".equals(clusterName)) {
-        clusterName = "cluster1";
-      }
-      Cluster cl = clusterFsm.getCluster(report.getClusterName());
-      String service = report.getServiceName();
-      if (service == null || "".equals(service)) {
-        throw new AmbariException("Invalid command report, service: " + service);
-      }
-      if (actionMetadata.getActions(service.toLowerCase()).contains(report.getRole())) {
-        LOG.info(report.getRole() + " is an action - skip component lookup");
-      } else {
-        try {
-          Service svc = cl.getService(service);
-          ServiceComponent svcComp = svc.getServiceComponent(report.getRole());
-          ServiceComponentHost scHost = svcComp.getServiceComponentHost(hostname);
-          if (report.getStatus().equals("COMPLETED")) {
-            scHost.handleEvent(new ServiceComponentHostOpSucceededEvent(scHost
-                .getServiceComponentName(), hostname, now));
-          } else if (report.getStatus().equals("FAILED")) {
-            scHost.handleEvent(new ServiceComponentHostOpFailedEvent(scHost
-                .getServiceComponentName(), hostname, now));
-          } else if (report.getStatus().equals("IN_PROGRESS")) {
-            scHost.handleEvent(new ServiceComponentHostOpInProgressEvent(scHost
-                .getServiceComponentName(), hostname, now));
-          }
-        } catch (ServiceComponentNotFoundException scnex) {
-          LOG.info("Service component not found ", scnex);
-        } catch (InvalidStateTransitionException ex) {
-          LOG.warn("State machine exception", ex);
-        }
-      }
-    }
-    //Update state machines from reports
-    actionManager.processTaskResponse(hostname, reports);
-
-    // Examine heartbeart for component live status reports
-    Set<Cluster> clusters = clusterFsm.getClustersForHost(hostname);
-    for (Cluster cl : clusters) {
-      for (ComponentStatus status : heartbeat.componentStatus) {
-        if (status.getClusterName().equals(cl.getClusterName())) {
-          try {
-            Service svc = cl.getService(status.getServiceName());
-            String componentName = status.getComponentName();
-            if (svc.getServiceComponents().containsKey(componentName)) {
-              ServiceComponent svcComp = svc.getServiceComponent(
-                      componentName);
-              ServiceComponentHost scHost = svcComp.getServiceComponentHost(
-                      hostname);
-              State prevState = scHost.getState();
-              State liveState = State.valueOf(State.class, status.getStatus());
-              if (prevState.equals(State.INSTALLED)
-                  || prevState.equals(State.START_FAILED)
-                  || prevState.equals(State.STARTED)
-                  || prevState.equals(State.STARTING)
-                  || prevState.equals(State.STOPPING)
-                  || prevState.equals(State.STOP_FAILED)) {
-                scHost.setState(liveState);
-                if (!prevState.equals(liveState)) {
-                  LOG.info("State of service component " + componentName
-                      + " of service " + status.getServiceName()
-                      + " of cluster " + status.getClusterName()
-                      + " has changed from " + prevState + " to " + liveState
-                      + " at host " + hostname);
-                }
-              }
-              // TODO need to get config version and stack version from live state
-            } else {
-              // TODO: What should be done otherwise?
-            }
-          }
-          catch (ServiceNotFoundException e) {
-            LOG.warn("Received a live status update for a non-initialized"
-                + " service"
-                + ", clusterName=" + status.getClusterName()
-                + ", serviceName=" + status.getServiceName());
-            // FIXME ignore invalid live update and continue for now?
-            continue;
-          }
-          catch (ServiceComponentNotFoundException e) {
-            LOG.warn("Received a live status update for a non-initialized"
-                + " servicecomponent"
-                + ", clusterName=" + status.getClusterName()
-                + ", serviceName=" + status.getServiceName()
-                + ", componentName=" + status.getComponentName());
-            // FIXME ignore invalid live update and continue for now?
-            continue;
-          }
-          catch (ServiceComponentHostNotFoundException e) {
-            LOG.warn("Received a live status update for a non-initialized"
-                + " service"
-                + ", clusterName=" + status.getClusterName()
-                + ", serviceName=" + status.getServiceName()
-                + ", componentName=" + status.getComponentName()
-                + ", hostname=" + hostname);
-            // FIXME ignore invalid live update and continue for now?
-            continue;
-          }
-        }
-      }
-    }
-
-    // Send commands if node is active
-    if (hostObject.getState().equals(HostState.HEALTHY)) {
-      List<AgentCommand> cmds = actionQueue.dequeueAll(heartbeat.getHostname());
-      if (cmds != null && !cmds.isEmpty()) {
-        for (AgentCommand ac : cmds) {
-          try {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Sending command string = " + StageUtils.jaxbToString(ac));
-            }
-          } catch (Exception e) {
-            throw new AmbariException("Could not get jaxb string for command", e);
-          }
-          switch (ac.getCommandType()) {
-            case EXECUTION_COMMAND: {
-              response.addExecutionCommand((ExecutionCommand) ac);
-              break;
-            }
-            case STATUS_COMMAND: {
-              response.addStatusCommand((StatusCommand) ac);
-              break;
-            }
-              default:
-                  LOG.error("There is no action for agent command ="+ ac.getCommandType().name() );
-          }
-        }
-      }
-    }
-    return response;
-  }
-
-  public String getOsType(String os, String osRelease) {
-    String osType = "";
-    if (os != null) {
-      osType = os;
-    }
-    if (osRelease != null) {
-      String[] release = osRelease.split("\\.");
-      if (release.length > 0) {
-        osType += release[0];
-      }
-    }
-    return osType.toLowerCase();
-  }
-
-  public RegistrationResponse handleRegistration(Register register)
-    throws InvalidStateTransitionException, AmbariException {
-    String hostname = register.getHostname();
-    long now = System.currentTimeMillis();
-
-    String agentOsType = getOsType(register.getHardwareProfile().getOS(),
-        register.getHardwareProfile().getOSRelease());
-    if (!ambariMetaInfo.areOsTypesCompatible(
-        config.getServerOsType().toLowerCase(), agentOsType)) {
-      LOG.warn("Received registration request from host with non matching"
-          + " os type"
-          + ", hostname=" + hostname
-          + ", serverOsType=" + config.getServerOsType()
-          + ", agentOstype=" + agentOsType);
-      throw new AmbariException("Cannot register host as it does not match"
-          + " server's os type"
-          + ", hostname=" + hostname
-          + ", serverOsType=" + config.getServerOsType()
-          + ", agentOstype=" + agentOsType);
-    }
-
-    Host hostObject;
-    try {
-      hostObject = clusterFsm.getHost(hostname);
-    } catch (HostNotFoundException ex) {
-      clusterFsm.addHost(hostname);
-      hostObject = clusterFsm.getHost(hostname);
-    }
-    // Resetting host state
-    hostObject.setState(HostState.INIT);
-
-    // Get status of service components
-    List<StatusCommand> cmds = heartbeatMonitor.generateStatusCommands(hostname);
-
-    hostObject.handleEvent(new HostRegistrationRequestEvent(hostname,
-        null != register.getPublicHostname() ? register.getPublicHostname() : hostname,
-        new AgentVersion("v1"), now, register.getHardwareProfile(), register.getAgentEnv()));
-    RegistrationResponse response = new RegistrationResponse();
-    if (cmds.isEmpty()) {
-      //No status commands needed let the fsm know that status step is done
-      hostObject.handleEvent(new HostStatusUpdatesReceivedEvent(hostname,
-          now));
-    }
-    response.setStatusCommands(cmds);
-
-    response.setResponseStatus(RegistrationStatus.OK);
-
-    Long requestId = 0L;
-    hostResponseIds.put(hostname, requestId);
-    response.setResponseId(requestId);
-    return response;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatResponse.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatResponse.java
deleted file mode 100644
index c35e502..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatResponse.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.agent;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
- *
- * Controller to Agent response data model.
- *
- */
-public class HeartBeatResponse {
-
-  private long responseId;
- 
-  List<ExecutionCommand> executionCommands = new ArrayList<ExecutionCommand>();
-  List<StatusCommand> statusCommands = new ArrayList<StatusCommand>();
-
-  RegistrationCommand registrationCommand;
-
-  boolean restartAgent = false;
-
-  @JsonProperty("responseId")
-  public long getResponseId() {
-    return responseId;
-  }
-
-  @JsonProperty("responseId")
-  public void setResponseId(long responseId) {
-    this.responseId=responseId;
-  }
-
-  @JsonProperty("executionCommands")
-  public List<ExecutionCommand> getExecutionCommands() {
-    return executionCommands;
-  }
-
-  @JsonProperty("executionCommands")
-  public void setExecutionCommands(List<ExecutionCommand> executionCommands) {
-    this.executionCommands = executionCommands;
-  }
-
-  @JsonProperty("statusCommands")
-  public List<StatusCommand> getStatusCommands() {
-    return statusCommands;
-  }
-
-  @JsonProperty("statusCommands")
-  public void setStatusCommands(List<StatusCommand> statusCommands) {
-    this.statusCommands = statusCommands;
-  }
-
-  @JsonProperty("registrationCommand")
-  public RegistrationCommand getRegistrationCommand() {
-    return registrationCommand;
-  }
-
-  @JsonProperty("registrationCommand")
-  public void setRegistrationCommand(RegistrationCommand registrationCommand) {
-    this.registrationCommand = registrationCommand;
-  }
-
-  @JsonProperty("restartAgent")
-  public boolean isRestartAgent() {
-    return restartAgent;
-  }
-
-  @JsonProperty("restartAgent")
-  public void setRestartAgent(boolean restartAgent) {
-    this.restartAgent = restartAgent;
-  }
-
-  public void addExecutionCommand(ExecutionCommand execCmd) {
-    executionCommands.add(execCmd);
-  }
-
-  public void addStatusCommand(StatusCommand statCmd) {
-    statusCommands.add(statCmd);
-  }
-
-  @Override
-  public String toString() {
-    return "HeartBeatResponse{" +
-            "responseId=" + responseId +
-            ", executionCommands=" + executionCommands +
-            ", statusCommands=" + statusCommands +
-            ", registrationCommand=" + registrationCommand +
-            ", restartAgent=" + restartAgent +
-            '}';
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
deleted file mode 100644
index cf50b86..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.HostState;
-import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-import org.apache.ambari.server.state.host.HostHeartbeatLostEvent;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-/**
- * Monitors the node state and heartbeats.
- */
-public class HeartbeatMonitor implements Runnable {
-  private static Log LOG = LogFactory.getLog(HeartbeatMonitor.class);
-  private Clusters fsm;
-  private ActionQueue actionQueue;
-  private ActionManager actionManager;
-  private final int threadWakeupInterval; //1 minute
-  private volatile boolean shouldRun = true;
-  private Thread monitorThread = null;
-
-  public HeartbeatMonitor(Clusters fsm, ActionQueue aq, ActionManager am,
-      int threadWakeupInterval) {
-    this.fsm = fsm;
-    this.actionQueue = aq;
-    this.actionManager = am;
-    this.threadWakeupInterval = threadWakeupInterval;
-  }
-
-  public void shutdown() {
-    shouldRun = false;
-  }
-
-  public void start() {
-    monitorThread = new Thread(this);
-    monitorThread.start();
-  }
-
-  void join(long millis) throws InterruptedException {
-    monitorThread.join(millis);
-  }
-
-  public boolean isAlive() {
-    return monitorThread.isAlive();
-  }
-
-  @Override
-  public void run() {
-    while (shouldRun) {
-      try {
-        Thread.sleep(threadWakeupInterval);
-        doWork();
-      } catch (InterruptedException ex) {
-        LOG.warn("Scheduler thread is interrupted going to stop", ex);
-        shouldRun = false;
-      } catch (Exception ex) {
-        LOG.warn("Exception received", ex);
-      } catch (Throwable t) {
-        LOG.warn("ERROR", t);
-      }
-    }
-  }
-
-  //Go through all the nodes, check for last heartbeat or any waiting state
-  //If heartbeat is lost, update node fsm state, purge the action queue
-  //notify action manager for node failure.
-  private void doWork() throws InvalidStateTransitionException, AmbariException {
-    List<Host> allHosts = fsm.getHosts();
-    long now = System.currentTimeMillis();
-    for (Host hostObj : allHosts) {
-      String host = hostObj.getHostName();
-      HostState hostState = hostObj.getState();
-      String hostname = hostObj.getHostName();
-
-      long lastHeartbeat = 0;
-      try {
-        lastHeartbeat = fsm.getHost(host).getLastHeartbeatTime();
-      } catch (AmbariException e) {
-        LOG.warn("Exception in getting host object; Is it fatal?", e);
-      }
-      if (lastHeartbeat + 5*threadWakeupInterval < now) {
-        LOG.warn("Hearbeat lost from host "+host);
-        //Heartbeat is expired
-        hostObj.handleEvent(new HostHeartbeatLostEvent(host));
-        //Purge action queue
-        actionQueue.dequeueAll(host);
-        //notify action manager
-        actionManager.handleLostHost(host);
-      }
-      if (hostState == HostState.WAITING_FOR_HOST_STATUS_UPDATES) {
-        long timeSpentInState = hostObj.getTimeInState();
-        if (timeSpentInState + 5*threadWakeupInterval < now) {
-          //Go back to init, the agent will be asked to register again in the next heartbeat
-          LOG.warn("timeSpentInState + 5*threadWakeupInterval < now, Go back to init");
-          hostObj.setState(HostState.INIT);
-        }
-      }
-
-      // Get status of service components
-      List<StatusCommand> cmds = generateStatusCommands(hostname);
-      if (cmds.isEmpty()) {
-        // Nothing to do
-      } else {
-        for (StatusCommand command : cmds) {
-          actionQueue.enqueue(hostname, command);
-        }
-      }
-    }
-  }
-
-  /**
-   * @param hostname
-   * @return  list of commands to get status of service components on a concrete host
-   */
-  public List<StatusCommand> generateStatusCommands(String hostname) throws AmbariException {
-    List<StatusCommand> cmds = new ArrayList<StatusCommand>();
-    for (Cluster cl : fsm.getClustersForHost(hostname)) {
-      List<ServiceComponentHost> roleList = cl
-              .getServiceComponentHosts(hostname);
-      for (ServiceComponentHost sch : roleList) {
-        String serviceName = sch.getServiceName();
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Live status will include status of service " + serviceName +
-                " of cluster " + cl.getClusterName());
-        }
-        
-        Map<String, Config> configs = sch.getDesiredConfigs();
-        
-        Map<String, Map<String, String>> configurations =
-            new TreeMap<String, Map<String, String>>();
-        
-        for (Config config : configs.values()) {
-          if (config.getType().equals("global"))
-            configurations.put(config.getType(),
-              config.getProperties());
-        }
-        
-        StatusCommand statusCmd = new StatusCommand();
-        statusCmd.setClusterName(cl.getClusterName());
-        statusCmd.setServiceName(serviceName);
-        statusCmd.setComponentName(sch.getServiceComponentName());
-        statusCmd.setConfigurations(configurations);			
-        cmds.add(statusCmd);
-      }
-    }
-    return cmds;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HostInfo.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HostInfo.java
deleted file mode 100644
index 988e78b..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HostInfo.java
+++ /dev/null
@@ -1,401 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.agent;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
- *
- * Data model for Ambari Agent to send hardware profile to Ambari Server.
- *
- */
-public class HostInfo {
-  private String architecture;
-  private String domain;
-  private String fqdn;
-  private String hardwareisa;
-  private String hardwaremodel;
-  private String hostname;
-  private String id;
-  private String interfaces;
-  private String ipaddress;
-  private String kernel;
-  private String kernelmajversion;
-  private String kernelrelease;
-  private String kernelversion;
-  private String macaddress;
-  private long memoryfree;
-  private long memorysize;
-  private List<DiskInfo> mounts = new ArrayList<DiskInfo>();
-  private long memorytotal;
-  private String netmask;
-  private String operatingsystem;
-  private String operatingsystemrelease;
-  private String osfamily;
-  private int physicalprocessorcount;
-  private int processorcount;
-  private boolean selinux;
-  private String swapfree;
-  private String swapsize;
-  private String timezone;
-  private String uptime;
-  private long uptime_days;
-  private long uptime_hours;
-
-
-  @JsonProperty("architecture")
-  public String getArchitecture() {
-    return this.architecture;
-  }
-
-  @JsonProperty("architecture")
-  public void setArchitecture(String architecture) {
-    this.architecture = architecture;
-  }
-
-  @JsonProperty("domain")
-  public String getDomain() {
-    return this.domain;
-  }
-
-  @JsonProperty("domain")
-  public void setDomain(String domain) {
-    this.domain = domain;
-  }
-
-  @JsonProperty("fqdn")
-  public String getFQDN() {
-    return this.fqdn;
-  }
-
-  @JsonProperty("fqdn")
-  public void setFQDN(String fqdn) {
-    this.fqdn = fqdn;
-  }
-
-  @JsonProperty("hardwareisa")
-  public String getHardwareIsa() {
-    return hardwareisa;
-  }
-
-  @JsonProperty("hardwareisa")
-  public void setHardwareIsa(String hardwareisa) {
-    this.hardwareisa = hardwareisa;
-  }
-
-  @JsonProperty("hardwaremodel")
-  public String getHardwareModel() {
-    return this.hardwaremodel;
-  }
-
-  @JsonProperty("hardwaremodel")
-  public void setHardwareModel(String hardwaremodel) {
-    this.hardwaremodel = hardwaremodel;
-  }
-
-  @JsonProperty("hostname")
-  public String getHostName() {
-    return this.hostname;
-  }
-
-  @JsonProperty("hostname")
-  public void setHostName(String hostname) {
-    this.hostname = hostname;
-  }
-
-  @JsonProperty("id")
-  public String getAgentUserId() {
-    return id;
-  }
-
-  @JsonProperty("id")
-  public void setAgentUserId(String id) {
-    this.id = id;
-  }
-
-  @JsonProperty("interfaces")
-  public String getInterfaces() {
-    return this.interfaces;
-  }
-
-  @JsonProperty("interfaces")
-  public void setInterfaces(String interfaces) {
-    this.interfaces = interfaces;
-  }
-
-  @JsonProperty("ipaddress")
-  public String getIPAddress() {
-    return this.ipaddress;
-  }
-
-  @JsonProperty("ipaddress")
-  public void setIPAddress(String ipaddress) {
-    this.ipaddress = ipaddress;
-  }
-
-  @JsonProperty("kernel")
-  public String getKernel() {
-    return this.kernel;
-  }
-
-  @JsonProperty("kernel")
-  public void setKernel(String kernel) {
-    this.kernel = kernel;
-  }
-
-  @JsonProperty("kernelmajversion")
-  public String getKernelMajVersion() {
-    return this.kernelmajversion;
-  }
-
-  @JsonProperty("kernelmajversion")
-  public void setKernelMajVersion(String kernelmajversion) {
-    this.kernelmajversion = kernelmajversion;
-  }
-
-  @JsonProperty("kernelrelease")
-  public String getKernelRelease() {
-    return this.kernelrelease;
-  }
-
-  @JsonProperty("kernelrelease")
-  public void setKernelRelease(String kernelrelease) {
-    this.kernelrelease = kernelrelease;
-  }
-
-  @JsonProperty("kernelversion")
-  public String getKernelVersion() {
-    return this.kernelversion;
-  }
-
-  @JsonProperty("kernelversion")
-  public void setKernelVersion(String kernelversion) {
-    this.kernelversion = kernelversion;
-  }
-
-  @JsonProperty("macaddress")
-  public String getMacAddress() {
-    return this.macaddress;
-  }
-
-  @JsonProperty("macaddress")
-  public void setMacAddress(String macaddress) {
-    this.macaddress = macaddress;
-  }
-
-  @JsonProperty("memoryfree")
-  public long getFreeMemory() {
-    return this.memoryfree;
-  }
-
-  @JsonProperty("memoryfree")
-  public void setFreeMemory(long memoryfree) {
-    this.memoryfree = memoryfree;
-  }
-
-  @JsonProperty("memorysize")
-  public long getMemorySize() {
-    return this.memorysize;
-  }
-
-  @JsonProperty("memorysize")
-  public void setMemorySize(long memorysize) {
-    this.memorysize = memorysize;
-  }
-
-  @JsonProperty("mounts")
-  public List<DiskInfo> getMounts() {
-    return this.mounts;
-  }
-
-  @JsonProperty("mounts")
-  public void setMounts(List<DiskInfo> mounts) {
-    this.mounts = mounts;
-  }
-
-  @JsonProperty("memorytotal")
-  public long getMemoryTotal() {
-    return this.memorytotal;
-  }
-
-  @JsonProperty("memorytotal")
-  public void setMemoryTotal(long memorytotal) {
-    this.memorytotal = memorytotal;
-  }
-
-  @JsonProperty("netmask")
-  public String getNetMask() {
-    return this.netmask;
-  }
-
-  @JsonProperty("netmask")
-  public void setNetMask(String netmask) {
-    this.netmask = netmask;
-  }
-
-  @JsonProperty("operatingsystem")
-  public String getOS() {
-    return this.operatingsystem;
-  }
-
-  @JsonProperty("operatingsystem")
-  public void setOS(String operatingsystem) {
-    this.operatingsystem = operatingsystem;
-  }
-
-  @JsonProperty("operatingsystemrelease")
-  public String getOSRelease() {
-    return this.operatingsystemrelease;
-  }
-
-  @JsonProperty("operatingsystemrelease")
-  public void setOSRelease(String operatingsystemrelease) {
-    this.operatingsystemrelease = operatingsystemrelease;
-  }
-
-  @JsonProperty("osfamily")
-  public String getOSFamily() {
-    return this.osfamily;
-  }
-
-  @JsonProperty("osfamily")
-  public void setOSFamily(String osfamily) {
-    this.osfamily = osfamily;
-  }
-
-  @JsonProperty("physicalprocessorcount")
-  public int getPhysicalProcessorCount() {
-    return this.physicalprocessorcount;
-  }
-
-  @JsonProperty("physicalprocessorcount")
-  public void setPhysicalProcessorCount(int physicalprocessorcount) {
-    this.physicalprocessorcount = physicalprocessorcount;
-  }
-
-  @JsonProperty("processorcount")
-  public int getProcessorCount() {
-    return this.processorcount;
-  }
-
-  @JsonProperty("processorcount")
-  public void setProcessorCount(int processorcount) {
-    this.processorcount = processorcount;
-  }
-
-  @JsonProperty("selinux")
-  public boolean getSeLinux() {
-    return selinux;
-  }
-
-  @JsonProperty("selinux")
-  public void setSeLinux(boolean selinux) {
-    this.selinux = selinux;
-  }
-
-  @JsonProperty("swapfree")
-  public String getSwapFree() {
-    return this.swapfree;
-  }
-
-  @JsonProperty("swapfree")
-  public void setSwapFree(String swapfree) {
-    this.swapfree = swapfree;
-  }
-
-  @JsonProperty("swapsize")
-  public String getSwapSize() {
-    return swapsize;
-  }
-
-  @JsonProperty("swapsize")
-  public void setSwapSize(String swapsize) {
-    this.swapsize = swapsize;
-  }
-
-  @JsonProperty("timezone")
-  public String getTimeZone() {
-    return this.timezone;
-  }
-
-  @JsonProperty("timezone")
-  public void setTimeZone(String timezone) {
-    this.timezone = timezone;
-  }
-
-  @JsonProperty("uptime")
-  public String getUptime() {
-    return this.uptime;
-  }
-
-  @JsonProperty("uptime")
-  public void setUpTime(String uptime) {
-    this.uptime = uptime;
-  }
-
-  @JsonProperty("uptime_hours")
-  public long getUptimeHours() {
-    return this.uptime_hours;
-  }
-
-  @JsonProperty("uptime_hours")
-  public void setUpTimeHours(long uptime_hours) {
-    this.uptime_hours = uptime_hours;
-  }
-
-  @JsonProperty("uptime_days")
-  public long getUpTimeDays() {
-    return this.uptime_days;
-  }
-
-  @JsonProperty("uptime_days")
-  public void setUpTimeDays(long uptime_days) {
-    this.uptime_days = uptime_days;
-  }
-
-  private String getDiskString() {
-    if (mounts == null) {
-      return null;
-    }
-    StringBuilder ret = new StringBuilder();
-    for (DiskInfo diskInfo : mounts) {
-      ret.append("(").append(diskInfo.toString()).append(")");
-    }
-    return ret.toString();
-  }
-
-  public String toString() {
-    return "[" +
-        "hostname=" + this.hostname + "," +
-        "fqdn=" + this.fqdn + "," +
-        "domain=" + this.domain + "," +
-        "architecture=" + this.architecture + "," +
-        "processorcount=" + this.processorcount + "," +
-        "physicalprocessorcount=" + this.physicalprocessorcount + "," +
-        "osname=" + this.operatingsystem + "," +
-        "osversion=" + this.operatingsystemrelease + "," +
-        "osfamily=" + this.osfamily + "," +
-        "memory=" + this.memorytotal + "," +
-        "uptime_hours=" + this.uptime_hours + "," +
-        "mounts=" + getDiskString() + "]\n";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HostStatus.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HostStatus.java
deleted file mode 100644
index d609ecd..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/HostStatus.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-
-/**
- * Status of the host as described by the agent.
- *
- */
-public class HostStatus {
-  public HostStatus(Status status, String cause) {
-    super();
-    this.status = status;
-    this.cause = cause;
-  }
-  public HostStatus() {
-    super();
-  }
-  
-  public enum Status {
-    HEALTHY,
-    UNHEALTHY
-  }
-  Status status;
-  String cause;
-  public Status getStatus() {
-    return status;
-  }
-  public void setStatus(Status status) {
-    this.status = status;
-  }
-  public String getCause() {
-    return cause;
-  }
-  public void setCause(String cause) {
-    this.cause = cause;
-  }
-
-  @Override
-  public String toString() {
-    return "HostStatus{" +
-            "status=" + status +
-            ", cause='" + cause + '\'' +
-            '}';
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/Register.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/Register.java
deleted file mode 100644
index fa4b512..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/Register.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.agent;
-
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
- *
- * Data model for Ambari Agent to send heartbeat to Ambari Controller.
- *
- */
-public class Register {
-  private int responseId = -1;
-  private long timestamp;
-  private String hostname;
-  private HostInfo hardwareProfile;
-  private String publicHostname;
-  private AgentEnv agentEnv;
-
-  @JsonProperty("responseId")
-  public int getResponseId() {
-    return responseId;
-  }
-
-  @JsonProperty("responseId")
-  public void setResponseId(int responseId) {
-    this.responseId=responseId;
-  }
-
-  public long getTimestamp() {
-    return timestamp;
-  }
-
-  public String getHostname() {
-    return hostname;
-  }
-  
-  public void setHostname(String hostname) {
-    this.hostname = hostname;
-  }
-  
-  public HostInfo getHardwareProfile() {
-    return hardwareProfile;
-  }
-  
-  public void setHardwareProfile(HostInfo hardwareProfile) {
-    this.hardwareProfile = hardwareProfile;
-  }
-  
-  public void setTimestamp(long timestamp) {
-    this.timestamp = timestamp;
-  }
-
-  public String getPublicHostname() {
-    return publicHostname;
-  }
-  
-  public void setPublicHostname(String name) {
-    publicHostname = name;
-  }
-  
-  public AgentEnv getAgentEnv() {
-    return agentEnv;
-  }
-  
-  public void setAgentEnv(AgentEnv env) {
-    agentEnv = env;
-  }
-
-  @Override
-  public String toString() {
-    String ret = "responseId=" + responseId + "\n" +
-             "timestamp=" + timestamp + "\n" +
-             "hostname="  + hostname + "\n";
-
-    if (hardwareProfile != null)
-      ret = ret + "hardwareprofile=" + this.hardwareProfile.toString();
-    return ret;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/RegistrationCommand.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/RegistrationCommand.java
deleted file mode 100644
index c6aa28c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/RegistrationCommand.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.XmlType;
-
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlType(name = "", propOrder = {})
-public class RegistrationCommand extends AgentCommand {
-
-  public RegistrationCommand() {
-    super(AgentCommandType.REGISTRATION_COMMAND);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/RegistrationResponse.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/RegistrationResponse.java
deleted file mode 100644
index 5e466aa..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/RegistrationResponse.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.agent;
-
-import java.util.List;
-
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
- *
- * Controller to Agent response data model.
- *
- */
-public class RegistrationResponse {
-  @JsonProperty("response")
-  private RegistrationStatus response;
-  
-  //Response id to start with, usually zero.
-  @JsonProperty("responseId")
-  private long responseId;
-  
-  @JsonProperty("statusCommands")
-  private List<StatusCommand> statusCommands = null;
-
-  public RegistrationStatus getResponseStatus() {
-    return response;
-  }
-
-  public void setResponseStatus(RegistrationStatus response) {
-    this.response = response;
-  }
-
-  public List<StatusCommand> getStatusCommands() {
-    return statusCommands;
-  }
-
-  public void setStatusCommands(List<StatusCommand> statusCommands) {
-    this.statusCommands = statusCommands;
-  }
-
-  public long getResponseId() {
-    return responseId;
-  }
-
-  public void setResponseId(long responseId) {
-    this.responseId = responseId;
-  }
-
-  @Override
-  public String toString() {
-    return "RegistrationResponse{" +
-            "response=" + response +
-            ", responseId=" + responseId +
-            ", statusCommands=" + statusCommands +
-            '}';
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/RegistrationStatus.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/RegistrationStatus.java
deleted file mode 100644
index bc52827..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/RegistrationStatus.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-
-public enum RegistrationStatus {
-  OK,
-  FAILED
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
deleted file mode 100644
index 54a3267..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-import java.util.List;
-import java.util.Map;
-
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
- * Command to report the status of a list of services in roles.
- */
-public class StatusCommand extends AgentCommand {
-
-  public StatusCommand() {
-    super(AgentCommandType.STATUS_COMMAND);
-  }
-
-  private String clusterName;
-  private String serviceName;
-  private String componentName;
-  private Map<String, Map<String, String>> configurations;
-
-  @JsonProperty("clusterName")
-  public String getClusterName() {
-    return clusterName;
-  }
-  
-  @JsonProperty("clusterName")
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-
-  @JsonProperty("serviceName")
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  @JsonProperty("serviceName")
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  @JsonProperty("componentName")
-  public String getComponentName() {
-    return componentName;
-  }
-
-  @JsonProperty("componentName")
-  public void setComponentName(String componentName) {
-    this.componentName = componentName;
-  }
-  
-  @JsonProperty("configurations")
-  public Map<String, Map<String, String>> getConfigurations() {
-    return configurations;
-  }
-
-  @JsonProperty("configurations")
-  public void setConfigurations(Map<String, Map<String, String>> configurations) {
-    this.configurations = configurations;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/rest/AgentJackSonJsonProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/rest/AgentJackSonJsonProvider.java
deleted file mode 100644
index 4a3b5ff..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/rest/AgentJackSonJsonProvider.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.agent.rest;
-
-import javax.ws.rs.ext.ContextResolver;
-import javax.ws.rs.ext.Provider;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.codehaus.jackson.map.DeserializationConfig;
-import org.codehaus.jackson.map.ObjectMapper;
-
-
-@Provider
-public class AgentJackSonJsonProvider implements ContextResolver<ObjectMapper> {
-  private static Log LOG = LogFactory.getLog(AgentJackSonJsonProvider.class);
-  @Override
-  public ObjectMapper getContext(Class<?> type) {
-    ObjectMapper result = new ObjectMapper();
-    result.configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, 
-        false);
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/rest/AgentResource.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/rest/AgentResource.java
deleted file mode 100644
index a204bed..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/agent/rest/AgentResource.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.agent.rest;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.ws.rs.Consumes;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.MediaType;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.agent.HeartBeat;
-import org.apache.ambari.server.agent.HeartBeatHandler;
-import org.apache.ambari.server.agent.HeartBeatResponse;
-import org.apache.ambari.server.agent.Register;
-import org.apache.ambari.server.agent.RegistrationResponse;
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import com.google.inject.Inject;
-
-/**
- * Agent Resource represents Ambari agent controller.
- * It provides API for Ambari agents to get the cluster configuration changes
- * as well as report the node attributes and state of services running the on
- * the cluster nodes
- */
-@Path("/")
-public class AgentResource {
-  private static HeartBeatHandler hh;
-  private static Log LOG = LogFactory.getLog(AgentResource.class);
-
-  @Inject
-  public static void init(HeartBeatHandler instance) {
-    hh = instance;
-    hh.start();
-  }
-
-  /**
-   * Register information about the host (Internal API to be used for
-   * Ambari Agent)
-   * @response.representation.200.doc This API is invoked by Ambari agent running
-   *  on a cluster to register with the server.
-   * @response.representation.200.mediaType application/json
-   * @response.representation.406.doc Error in register message format
-   * @response.representation.408.doc Request Timed out
-   * @param message Register message
-   * @throws InvalidStateTransitionException
-   * @throws AmbariException
-   * @throws Exception
-   */
-  @Path("register/{hostName}")
-  @POST
-  @Consumes(MediaType.APPLICATION_JSON)
-  @Produces({MediaType.APPLICATION_JSON})
-  public RegistrationResponse register(Register message,
-      @Context HttpServletRequest req)
-      throws WebApplicationException, AmbariException, InvalidStateTransitionException {
-    /* Call into the heartbeat handler */
-
-    RegistrationResponse response = hh.handleRegistration(message);
-    LOG.debug("Sending registration responce " + hh);
-    return response;
-  }
-
-  /**
-   * Update state of the node (Internal API to be used by Ambari agent).
-   *
-   * @response.representation.200.doc This API is invoked by Ambari agent running
-   *  on a cluster to update the state of various services running on the node.
-   * @response.representation.200.mediaType application/json
-   * @response.representation.406.doc Error in heartbeat message format
-   * @response.representation.408.doc Request Timed out
-   * @param message Heartbeat message
-   * @throws Exception
-   */
-  @Path("heartbeat/{hostName}")
-  @POST
-  @Consumes(MediaType.APPLICATION_JSON)
-  @Produces({MediaType.APPLICATION_JSON})
-  public HeartBeatResponse heartbeat(HeartBeat message)
-      throws WebApplicationException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Received Heartbeat message " + message);
-    }
-    HeartBeatResponse heartBeatResponse;
-    try {
-      heartBeatResponse = hh.handleHeartBeat(message);
-      LOG.debug("Sending heartbeat responce " + hh);
-    } catch (Exception e) {
-      LOG.info("Error in HeartBeat", e);
-      throw new WebApplicationException(500);
-    }
-    return heartBeatResponse;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/BaseManagementHandler.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/BaseManagementHandler.java
deleted file mode 100644
index 3d2f063..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/BaseManagementHandler.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.handlers;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.services.Request;
-import org.apache.ambari.server.api.services.Result;
-import org.apache.ambari.server.api.services.ResultImpl;
-import org.apache.ambari.server.api.services.ResultStatus;
-import org.apache.ambari.server.api.services.persistence.PersistenceManager;
-import org.apache.ambari.server.api.services.persistence.PersistenceManagerImpl;
-import org.apache.ambari.server.api.util.TreeNode;
-import org.apache.ambari.server.controller.spi.ClusterController;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.RequestStatus;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Base handler for operations that persist state to the back-end.
- */
-public abstract class BaseManagementHandler implements RequestHandler {
-
-  /**
-   * Logger instance.
-   */
-  protected final static Logger LOG =
-      LoggerFactory.getLogger(BaseManagementHandler.class);
-
-  /**
-   * PersistenceManager implementation.
-   */
-  PersistenceManager m_pm = new PersistenceManagerImpl(getClusterController());
-
-  protected BaseManagementHandler() {
-  }
-
-  public Result handleRequest(Request request) {
-    ResourceInstance resource = request.getResource();
-    Predicate queryPredicate;
-    try {
-      queryPredicate = request.getQueryPredicate();
-    } catch (InvalidQueryException e) {
-      return new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST,
-          "Invalid Request: " + e.getMessage()));
-    }
-    if (queryPredicate != null) {
-      resource.getQuery().setUserPredicate(queryPredicate);
-    }
-
-    return handleRequest(resource, request.getHttpBodyProperties());
-  }
-
-  protected Result handleRequest(ResourceInstance resource, Set<Map<String, Object>> setProperties) {
-    return persist(resource, setProperties);
-  }
-
-  protected Result createResult(RequestStatus requestStatus) {
-
-    boolean            isSynchronous = requestStatus.getStatus() == RequestStatus.Status.Complete;
-    Result             result        = new ResultImpl(isSynchronous);
-    TreeNode<Resource> tree          = result.getResultTree();
-
-    if (! isSynchronous) {
-      tree.addChild(requestStatus.getRequestResource(), "request");
-    }
-
-    //todo: currently always empty
-    Set<Resource> setResources = requestStatus.getAssociatedResources();
-    if (! setResources.isEmpty()) {
-      TreeNode<Resource> resourcesNode = tree.addChild(null, "resources");
-
-      int count = 1;
-      for (Resource resource : setResources) {
-        //todo: provide a more meaningful node name
-        resourcesNode.addChild(resource, resource.getType() + ":" + count++);
-      }
-    }
-
-    return result;
-  }
-
-  //todo: controller should be injected
-  protected ClusterController getClusterController() {
-    return ClusterControllerHelper.getClusterController();
-  }
-
-  protected PersistenceManager getPersistenceManager() {
-    return m_pm;
-  }
-
-  protected abstract Result persist(ResourceInstance r, Set<Map<String, Object>> properties);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/CreateHandler.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/CreateHandler.java
deleted file mode 100644
index 0c3eddf..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/CreateHandler.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.handlers;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.services.*;
-import org.apache.ambari.server.api.services.ResultStatus;
-import org.apache.ambari.server.controller.spi.*;
-
-import java.util.Map;
-import java.util.Set;
-
-
-/**
- * Responsible for create requests.
- */
-public class CreateHandler extends BaseManagementHandler {
-
-  @Override
-  protected Result persist(ResourceInstance r, Set<Map<String, Object>> properties) {
-    Result result;
-    try {
-      RequestStatus status = getPersistenceManager().create(r, properties);
-
-      result = createResult(status);
-
-      if (result.isSynchronous()) {
-        result.setResultStatus(new ResultStatus(ResultStatus.STATUS.CREATED));
-      } else {
-        result.setResultStatus(new ResultStatus(ResultStatus.STATUS.ACCEPTED));
-      }
-
-    } catch (UnsupportedPropertyException e) {
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage()));
-    } catch (NoSuchParentResourceException e) {
-      //todo: is this the correct status code?
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage()));
-    } catch (SystemException e) {
-      if (LOG.isErrorEnabled()) {
-        LOG.error("Caught a system exception while attempting to create a resource", e.getMessage());
-      }
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e.getMessage()));
-    } catch (ResourceAlreadyExistsException e) {
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.CONFLICT, e.getMessage()));
-    } catch(IllegalArgumentException e) {
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage()));
-    } catch (RuntimeException e) {
-      if (LOG.isErrorEnabled()) {
-        LOG.error("Caught a runtime exception while attempting to create a resource", e);
-      }
-      //result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e.getMessage()));
-      throw e;
-    }
-
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/DeleteHandler.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/DeleteHandler.java
deleted file mode 100644
index 0d4f4ea..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/DeleteHandler.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.handlers;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.services.ResultStatus;
-import org.apache.ambari.server.api.services.Result;
-import org.apache.ambari.server.api.services.ResultImpl;
-import org.apache.ambari.server.controller.spi.*;
-
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Responsible for delete requests.
- */
-public class DeleteHandler extends BaseManagementHandler implements RequestHandler {
-
-  @Override
-  protected Result persist(ResourceInstance r, Set<Map<String, Object>> properties) {
-    Result result;
-      try {
-        RequestStatus status = getPersistenceManager().delete(r, properties);
-        result = createResult(status);
-
-        if (result.isSynchronous()) {
-          result.setResultStatus(new ResultStatus(ResultStatus.STATUS.OK));
-        } else {
-          result.setResultStatus(new ResultStatus(ResultStatus.STATUS.ACCEPTED));
-        }
-      } catch (SystemException e) {
-        result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e));
-      } catch (NoSuchParentResourceException e) {
-        result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e));
-      } catch (NoSuchResourceException e) {
-        if (r.isCollectionResource()) {
-          //todo: The query didn't match any resource so no resources were updated.
-          //todo: 200 may be ok but we need to return a collection
-          //todo: of resources that were updated.
-          result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.OK, e));
-        } else {
-          result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e));
-        }
-      } catch (UnsupportedPropertyException e) {
-        result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e));
-      }
-
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/QueryCreateHandler.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/QueryCreateHandler.java
deleted file mode 100644
index eb0760e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/QueryCreateHandler.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.api.handlers;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.resources.ResourceInstanceFactory;
-import org.apache.ambari.server.api.resources.ResourceInstanceFactoryImpl;
-import org.apache.ambari.server.api.services.Request;
-import org.apache.ambari.server.api.services.ResultStatus;
-import org.apache.ambari.server.api.services.Result;
-import org.apache.ambari.server.api.services.ResultImpl;
-import org.apache.ambari.server.api.util.TreeNode;
-import org.apache.ambari.server.controller.spi.*;
-
-import java.util.*;
-
-/**
- * Handler for creates that are applied to the results of a query.
- */
-public class QueryCreateHandler extends BaseManagementHandler {
-
-  private RequestHandler m_readHandler = new ReadHandler();
-
-  @Override
-  public Result handleRequest(Request request) {
-    Result queryResult = getReadHandler().handleRequest(request);
-    if (queryResult.getStatus().isErrorState() ||
-        queryResult.getResultTree().getChildren().isEmpty()) {
-
-      //return the query result if result has error state or contains no resources
-      //todo: For case where no resources are returned, will return 200 ok.
-      //todo: What is the appropriate status code?
-      return queryResult;
-    }
-
-    ResourceInstance resource = request.getResource();
-    Resource.Type createType = getCreateType(request.getHttpBody(), resource);
-    Set<Map<String, Object>> setProperties = buildCreateSet(request, queryResult, createType);
-    ResourceInstance createResource = getResourceFactory().createResource(
-        createType, request.getResource().getIds());
-
-    return super.handleRequest(createResource, setProperties);
-  }
-
-  private Set<Map<String, Object>> buildCreateSet(Request request, Result queryResult, Resource.Type createType) {
-    Set<Map<String, Object>> setRequestProps = request.getHttpBodyProperties();
-    Set<Map<String, Object>> setCreateProps = new HashSet<Map<String, Object>>(setRequestProps.size());
-
-    ResourceInstance  resource            = request.getResource();
-    Resource.Type     type                = resource.getResourceDefinition().getType();
-    ClusterController controller          = getClusterController();
-    String            resourceKeyProperty = controller.getSchema(type).getKeyPropertyId(type);
-    String            createKeyProperty   = controller.getSchema(createType).getKeyPropertyId(type);
-
-    TreeNode<Resource> tree = queryResult.getResultTree();
-    Collection<TreeNode<Resource>> treeChildren = tree.getChildren();
-    for (TreeNode<Resource> node : treeChildren) {
-      Resource r = node.getObject();
-      Object keyVal = r.getPropertyValue(resourceKeyProperty);
-
-      for (Map<String, Object> mapProps : setRequestProps) {
-        Map<String, Object> mapResourceProps = new HashMap<String, Object>(mapProps);
-        mapResourceProps.put(createKeyProperty, keyVal);
-        setCreateProps.add(mapResourceProps);
-      }
-    }
-    return setCreateProps;
-  }
-
-  private Resource.Type getCreateType(String requestBody, ResourceInstance resource) {
-    int startIdx = requestBody.indexOf("\"") + 1;
-    int endIdx = requestBody.indexOf("\"", startIdx + 1);
-
-    ResourceInstance res =  resource.getSubResources().get(requestBody.substring(startIdx, endIdx));
-    return res == null ? null : res.getResourceDefinition().getType();
-  }
-
-  @Override
-  protected Result persist(ResourceInstance r, Set<Map<String, Object>> properties) {
-    Result result;
-    try {
-      RequestStatus status = getPersistenceManager().create(r, properties);
-
-      result = createResult(status);
-
-      if (result.isSynchronous()) {
-        result.setResultStatus(new ResultStatus(ResultStatus.STATUS.CREATED));
-      } else {
-        result.setResultStatus(new ResultStatus(ResultStatus.STATUS.ACCEPTED));
-      }
-
-    } catch (UnsupportedPropertyException e) {
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e));
-    } catch (ResourceAlreadyExistsException e) {
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.CONFLICT, e));
-    } catch (NoSuchParentResourceException e) {
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e));
-    } catch (SystemException e) {
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e));
-    }
-
-    return result;
-  }
-
-  protected ResourceInstanceFactory getResourceFactory() {
-    return new ResourceInstanceFactoryImpl();
-  }
-
-  protected RequestHandler getReadHandler() {
-    return m_readHandler;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/ReadHandler.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/ReadHandler.java
deleted file mode 100644
index 9a24711..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/ReadHandler.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.handlers;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.api.services.Request;
-import org.apache.ambari.server.api.services.ResultImpl;
-import org.apache.ambari.server.api.services.ResultStatus;
-import org.apache.ambari.server.api.services.Result;
-import org.apache.ambari.server.api.query.Query;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Map;
-
-/**
- * Responsible for read requests.
- */
-public class ReadHandler implements RequestHandler {
-
-  /**
-   * Logger instance.
-   */
-  private final static Logger LOG =
-      LoggerFactory.getLogger(ReadHandler.class);
-
-  @Override
-  public Result handleRequest(Request request) {
-    Query query = request.getResource().getQuery();
-
-    try {
-      addFieldsToQuery(request, query);
-    } catch (IllegalArgumentException e) {
-      return new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage()));
-    }
-
-    Result result;
-    Predicate p = null;
-    try {
-      p = request.getQueryPredicate();
-      query.setUserPredicate(p);
-
-      result = query.execute();
-      result.setResultStatus(new ResultStatus(ResultStatus.STATUS.OK));
-    } catch (SystemException e) {
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e));
-    } catch (NoSuchParentResourceException e) {
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage()));
-    } catch (UnsupportedPropertyException e) {
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e.getMessage()));
-    } catch (NoSuchResourceException e) {
-      if (p == null) {
-        // no predicate specified, resource requested by id
-        result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e.getMessage()));
-      } else {
-        // resource(s) requested using predicate
-        result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.OK, e));
-        result.getResultTree().setProperty("isCollection", "true");
-      }
-    } catch (IllegalArgumentException e) {
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST,
-          "Invalid Request: " + e.getMessage()));
-    } catch (InvalidQueryException e) {
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST,
-          "Invalid Request: " + e.getMessage()));
-    } catch (RuntimeException e) {
-      if (LOG.isErrorEnabled()) {
-        LOG.error("Caught a runtime exception executing a query", e);
-      }
-      //result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e));
-      throw e;
-    }
-    return result;
-  }
-
-  private void addFieldsToQuery(Request request, Query query) throws IllegalArgumentException {
-    //Partial response
-    for (Map.Entry<String, TemporalInfo> entry : request.getFields().entrySet()) {
-      // Iterate over map and add props/temporalInfo
-      String propertyId = entry.getKey();
-      query.addProperty(PropertyHelper.getPropertyCategory(propertyId),
-          PropertyHelper.getPropertyName(propertyId), entry.getValue());
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/RequestHandler.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/RequestHandler.java
deleted file mode 100644
index 381dedb..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/RequestHandler.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.handlers;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.api.services.Request;
-import org.apache.ambari.server.api.services.Result;
-
-/**
- * Responsible for handling of requests and returning a result.
- */
-public interface RequestHandler {
-  /**
-   * Handle the given request and return a result.
-   *
-   * @param request the request to handle
-   * @return the result of the request
-   */
-  public Result handleRequest(Request request);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/RequestHandlerFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/RequestHandlerFactory.java
deleted file mode 100644
index 2186267..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/RequestHandlerFactory.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.handlers;
-
-import org.apache.ambari.server.api.services.Request;
-
-/**
- * Factory for {@link RequestHandler}
- * Returns the appropriate request handler based on the request.
- */
-public class RequestHandlerFactory {
-  /**
-   * Return an instance of the correct request handler based on the request type.
-   *
-   * @param requestType the request type.  Is one of {@link Request.Type}
-   * @return a request handler for the request
-   */
-  public RequestHandler getRequestHandler(Request.Type requestType) {
-    switch (requestType) {
-      case GET:
-        return new ReadHandler();
-      case POST:
-        return new CreateHandler();
-      case PUT:
-        return new UpdateHandler();
-      case DELETE:
-        return new DeleteHandler();
-      case QUERY_POST:
-        return new QueryCreateHandler();
-      default:
-        //todo:
-        throw new UnsupportedOperationException("Unsupported Request Type: " + requestType);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/UpdateHandler.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/UpdateHandler.java
deleted file mode 100644
index 24c5480..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/handlers/UpdateHandler.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.handlers;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.services.*;
-import org.apache.ambari.server.controller.spi.*;
-
-import java.util.Map;
-import java.util.Set;
-
-
-/**
- * Responsible for update requests.
- */
-public class UpdateHandler extends BaseManagementHandler {
-
-  @Override
-  protected Result persist(ResourceInstance r, Set<Map<String, Object>> properties) {
-    Result result;
-    try {
-      RequestStatus status = getPersistenceManager().update(r, properties);
-
-      result = createResult(status);
-      if (result.isSynchronous()) {
-        result.setResultStatus(new ResultStatus(ResultStatus.STATUS.OK));
-      } else {
-        result.setResultStatus(new ResultStatus(ResultStatus.STATUS.ACCEPTED));
-      }
-
-    } catch (UnsupportedPropertyException e) {
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.BAD_REQUEST, e));
-    } catch (NoSuchParentResourceException e) {
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e));
-    } catch (NoSuchResourceException e) {
-      if (r.isCollectionResource()) {
-        //todo: what is the correct status code here.  The query didn't match any resource
-        //todo: so no resource were updated.  200 may be ok but we would need to return a collection
-        //todo: of resources that were updated.
-        result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.OK, e));
-      } else {
-        result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.NOT_FOUND, e));
-      }
-    } catch (SystemException e) {
-      result = new ResultImpl(new ResultStatus(ResultStatus.STATUS.SERVER_ERROR, e));
-    }
-
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/InvalidQueryException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/InvalidQueryException.java
deleted file mode 100644
index e2426d4..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/InvalidQueryException.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate;
-
-/**
- * Exception indicating that a query compilation error occurred.
- */
-public class InvalidQueryException extends Exception {
-  /**
-   * Constructor.
-   *
-   * @param msg msg
-   */
-  public InvalidQueryException(String msg) {
-    super(msg);
-  }
-
-  /**
-   * Constructor.
-   *
-   * @param msg        msg
-   * @param throwable  root cause
-   */
-  public InvalidQueryException(String msg, Throwable throwable) {
-    super(msg, throwable);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/PredicateCompiler.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/PredicateCompiler.java
deleted file mode 100644
index 7738d2c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/PredicateCompiler.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate;
-
-import org.apache.ambari.server.controller.spi.Predicate;
-
-/**
- * Compiler which takes a query expression as input and produces a predicate instance as output.
- */
-public class PredicateCompiler {
-
-  /**
-   * Lexer instance used to translate expressions into stream of tokens.
-   */
-  private QueryLexer lexer = new QueryLexer();
-
-  /**
-   * Parser instance used to produce a predicate instance from a stream of tokens.
-   */
-  private QueryParser parser = new QueryParser();
-
-  /**
-   * Generate a predicate from a query expression.
-   *
-   * @param exp  query expression
-   *
-   * @return a predicate instance
-   * @throws InvalidQueryException if unable to compile the expression
-   */
-  public Predicate compile(String exp) throws InvalidQueryException {
-    return parser.parse(lexer.tokens(exp));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/QueryLexer.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/QueryLexer.java
deleted file mode 100644
index fc813a9..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/QueryLexer.java
+++ /dev/null
@@ -1,501 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate;
-
-import java.util.*;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Scans a query expression and generates an array of tokens.
- * Each token contains type and value information.
- *
- * First, the query expression is broken down into string tokens using
- * a regular expression which splits on a set of deliminators which includes
- * operators and brackets.
- *
- * Second, each string token is converted into a Token with type and value information.
- */
-public class QueryLexer {
-  /**
-   * All valid deliminators.
-   */
-  private static final String[] ALL_DELIMS =
-      {".in\\(",".isEmpty\\(","<=",">=","!=","=","<",">","&","|","!","(", ")"};
-
-  /**
-   * Map of token type to list of valid handlers for next token.
-   */
-  private static final Map<Token.TYPE, List<TokenHandler>> TOKEN_HANDLERS =
-      new HashMap<Token.TYPE, List<TokenHandler>>();
-
-  /**
-   * Set of property names to ignore.
-   */
-  private static final Set<String> SET_IGNORE = new HashSet<String>();
-
-  /**
-   * Constructor.
-   * Register token handlers.
-   */
-  public QueryLexer() {
-    //todo: refactor handler registration
-    List<TokenHandler> listHandlers = new ArrayList<TokenHandler>();
-    listHandlers.add(new LogicalUnaryOperatorTokenHandler());
-    listHandlers.add(new OpenBracketTokenHandler());
-    listHandlers.add(new PropertyOperandTokenHandler());
-
-    TOKEN_HANDLERS.put(Token.TYPE.BRACKET_OPEN, listHandlers);
-    TOKEN_HANDLERS.put(Token.TYPE.LOGICAL_OPERATOR, listHandlers);
-    TOKEN_HANDLERS.put(Token.TYPE.LOGICAL_UNARY_OPERATOR, listHandlers);
-
-    listHandlers= new ArrayList<TokenHandler>();
-    listHandlers.add(new RelationalOperatorTokenHandler());
-    listHandlers.add(new RelationalOperatorFuncTokenHandler());
-    TOKEN_HANDLERS.put(Token.TYPE.PROPERTY_OPERAND, listHandlers);
-
-    listHandlers = new ArrayList<TokenHandler>();
-    listHandlers.add(new ValueOperandTokenHandler());
-    TOKEN_HANDLERS.put(Token.TYPE.RELATIONAL_OPERATOR, listHandlers);
-
-    listHandlers = new ArrayList<TokenHandler>();
-    listHandlers.add(new CloseBracketTokenHandler());
-    listHandlers.add(new ValueOperandTokenHandler());
-    TOKEN_HANDLERS.put(Token.TYPE.RELATIONAL_OPERATOR_FUNC, listHandlers);
-
-    listHandlers = new ArrayList<TokenHandler>();
-    listHandlers.add(new CloseBracketTokenHandler());
-    listHandlers.add(new LogicalOperatorTokenHandler());
-    TOKEN_HANDLERS.put(Token.TYPE.VALUE_OPERAND, listHandlers);
-    TOKEN_HANDLERS.put(Token.TYPE.BRACKET_CLOSE, listHandlers);
-  }
-
-
-  /**
-   * Scan the provided query and generate a token stream to be used by the query parser.
-   *
-   * @param exp  the query expression to scan
-   *
-   * @return an array of tokens
-   * @throws InvalidQueryException if the query is invalid
-   */
-  public Token[] tokens(String exp) throws InvalidQueryException {
-
-    ScanContext ctx = new ScanContext();
-    for (String tok : parseStringTokens(exp)) {
-      List<TokenHandler> listHandlers = TOKEN_HANDLERS.get(ctx.getLastTokenType());
-      boolean            processed    = false;
-      int                idx          = 0;
-
-      while (!processed && idx < listHandlers.size()) {
-        processed = listHandlers.get(idx++).handleToken(tok, ctx);
-      }
-
-      if (! processed) {
-        throw new InvalidQueryException("Invalid Query Token: token='" +
-            tok + "\', previous token type=" + ctx.getLastTokenType());
-      }
-    }
-    return ctx.getTokenList().toArray(new Token[ctx.getTokenList().size()]);
-  }
-
-  /**
-   * Uses a regular expression to scan a query expression and produce a list of string tokens.
-   * These tokens are the exact strings that exist in the original syntax.
-   *
-   * @param exp  the query expression
-   *
-   * @return list of string tokens from the query expression
-   */
-  private List<String> parseStringTokens(String exp) {
-    Pattern      pattern       = generatePattern();
-    Matcher      matcher       = pattern.matcher(exp);
-    List<String> listStrTokens = new ArrayList<String>();
-    int pos = 0;
-
-    while (matcher.find()) { // while there's a delimiter in the string
-      if (pos != matcher.start()) {
-        // add anything between the current and previous delimiter to the tokens list
-        listStrTokens.add(exp.substring(pos, matcher.start()));
-      }
-      listStrTokens.add(matcher.group()); // add the delimiter
-      pos = matcher.end(); // Remember end of delimiter
-    }
-    if (pos != exp.length()) {
-      // Add any chars remaining in the string after last delimiter
-      listStrTokens.add(exp.substring(pos));
-    }
-    return listStrTokens;
-  }
-
-  /**
-   * Generate the regex pattern to tokenize the query expression.
-   *
-   * @return the regex pattern
-   */
-  private Pattern generatePattern() {
-    StringBuilder sb = new StringBuilder();
-    sb.append('(');
-    for (String delim : ALL_DELIMS) { // For each delimiter
-      if (sb.length() != 1) sb.append('|');
-      sb.append('\\');
-      sb.append(delim);
-    }
-    sb.append(')');
-
-    return Pattern.compile(sb.toString());
-  }
-
-  /**
-   * Add property names that the lexer should ignore.
-   */
-  static {
-    // ignore values
-    SET_IGNORE.add("fields");
-    SET_IGNORE.add("_");
-  }
-
-  /**
-   * Scan context.  Provides contextual information related to the current scan.
-   */
-  private class ScanContext {
-    /**
-     * The last token type scanned.
-     */
-    private Token.TYPE m_lastType;
-
-    /**
-     * The last property operand value
-     */
-    private String m_propertyName;
-
-    /**
-     * List of tokens generated by the scan
-     */
-    private List<Token> m_listTokens = new ArrayList<Token>();
-
-    /**
-     * Whether the current expression should be ignored.
-     * This is used to ignore portions of the query string that are
-     * not query specific.
-     */
-    private boolean m_ignore = false;
-
-    /**
-     * Constructor.
-     */
-    private ScanContext() {
-      //init last type to the logical op type
-      m_lastType = Token.TYPE.LOGICAL_OPERATOR;
-    }
-
-    /**
-     * Set the ignore tokens flag.
-     *
-     * @param ignore  true to ignore tokens; false otherwise
-     */
-    public void setIgnoreTokens(boolean ignore) {
-      m_ignore = ignore;
-    }
-
-    /**
-     * Get the type of the last token.
-     *
-     * @return the type of the last token
-     */
-    public Token.TYPE getLastTokenType() {
-      return m_lastType;
-    }
-
-    /**
-     * Set the type of the last token.
-     *
-     * @param lastType  the type of the last token
-     */
-    public void setLastTokenType(Token.TYPE lastType) {
-      m_lastType = lastType;
-    }
-
-    /**
-     * Get the current property operand value.
-     * This is used to hold the property operand name until it is added since,
-     * the following relational operator token is added first.
-     *
-     * @return the current property operand value
-     */
-    public String getPropertyOperand() {
-      return m_propertyName;
-    }
-
-    /**
-     * Set the current property operand value.
-     * This is used to hold the property operand name until it is added since,
-     * the following relational operator token is added first.
-     */
-    public void setPropertyOperand(String prop) {
-      m_propertyName = prop;
-    }
-
-    /**
-     * Add a token.
-     *
-     * @param token  the token to add
-     */
-    public void addToken(Token token) {
-      if (! m_ignore) {
-        m_listTokens.add(token);
-      }
-    }
-
-    /**
-     * Get the list of generated tokens.
-     *
-     * @return the list of generated tokens
-     */
-    public List<Token> getTokenList() {
-      return m_listTokens;
-    }
-  }
-
-  /**
-   * Token handler base class.
-   * Token handlers are responsible for processing specific token type.
-   */
-  private abstract class TokenHandler {
-    /**
-     * Provides base token handler functionality then delegates to the individual concrete handlers.
-     *
-     * @param token   the token to process
-     * @param ctx     the scan context
-     *
-     * @return true if this handler processed the token; false otherwise
-     * @throws InvalidQueryException  if an invalid token is encountered
-     */
-    public boolean handleToken(String token, ScanContext ctx) throws InvalidQueryException {
-      if (handles(token, ctx.getLastTokenType())) {
-        _handleToken(token, ctx);
-        ctx.setLastTokenType(getType());
-        return true;
-      } else {
-        return false;
-      }
-    }
-
-    /**
-     * Process a token.
-     *
-     * @param token  the token to process
-     * @param ctx    the current scan context
-     * @throws InvalidQueryException if an invalid token is encountered
-     */
-    public abstract void _handleToken(String token, ScanContext ctx) throws InvalidQueryException;
-
-    /**
-     * Get the token handler type.
-     *
-     * @return the token handler type
-     */
-    public abstract Token.TYPE getType();
-
-    /**
-     * Determine if a handler handles a specific token type.
-     *
-     * @param token              the token type
-     * @param previousTokenType  the previous token type
-     *
-     * @return true if the handler handles the specified type; false otherwise
-     */
-    public abstract boolean handles(String token, Token.TYPE previousTokenType);
-  }
-
-  /**
-   * Property Operand token handler.
-   */
-  private class PropertyOperandTokenHandler extends TokenHandler {
-
-    @Override
-    public void _handleToken(String token, ScanContext ctx) throws InvalidQueryException {
-      //don't add prop name token until after operator token
-      if (! SET_IGNORE.contains(token)) {
-        ctx.setPropertyOperand(token);
-      } else {
-        ctx.setIgnoreTokens(true);
-        if (!ctx.getTokenList().isEmpty()) {
-        // remove '&' token that separates ignored token and query
-          ctx.getTokenList().remove(ctx.getTokenList().size() -1);
-        }
-      }
-    }
-
-    @Override
-    public Token.TYPE getType() {
-      return Token.TYPE.PROPERTY_OPERAND;
-    }
-
-    @Override
-    public boolean handles(String token, Token.TYPE previousTokenType) {
-      return token.matches("[^!&\\|<=|>=|!=|=|<|>\\(\\)]+");
-    }
-  }
-
-  /**
-   * Value Operand token handler.
-   */
-  private class ValueOperandTokenHandler extends TokenHandler {
-    @Override
-    public void _handleToken(String token, ScanContext ctx) throws InvalidQueryException {
-      ctx.addToken(new Token(Token.TYPE.VALUE_OPERAND, token));
-    }
-
-    @Override
-    public Token.TYPE getType() {
-      return Token.TYPE.VALUE_OPERAND;
-    }
-
-    @Override
-    public boolean handles(String token, Token.TYPE previousTokenType) {
-      return token.matches("[^!&\\|<=|>=|!=|=|<|>]+");
-    }
-  }
-
-  /**
-   * Open Bracket token handler.
-   */
-  private class OpenBracketTokenHandler extends TokenHandler {
-    @Override
-    public void _handleToken(String token, ScanContext ctx) throws InvalidQueryException {
-      ctx.addToken(new Token(Token.TYPE.BRACKET_OPEN, token));
-    }
-
-    @Override
-    public Token.TYPE getType() {
-      return Token.TYPE.BRACKET_OPEN;
-    }
-
-    @Override
-    public boolean handles(String token, Token.TYPE previousTokenType) {
-      return token.matches("\\(");
-    }
-  }
-
-  /**
-   * Close Bracket token handler.
-   */
-  private class CloseBracketTokenHandler extends TokenHandler {
-    @Override
-    public void _handleToken(String token, ScanContext ctx) throws InvalidQueryException {
-      ctx.addToken(new Token(Token.TYPE.BRACKET_CLOSE, token));
-    }
-
-    @Override
-    public Token.TYPE getType() {
-      return Token.TYPE.BRACKET_CLOSE;
-    }
-
-    @Override
-    public boolean handles(String token, Token.TYPE previousTokenType) {
-      return token.matches("\\)");
-    }
-  }
-
-  /**
-   * Relational Operator token handler.
-   */
-  private class RelationalOperatorTokenHandler extends TokenHandler {
-    @Override
-    public void _handleToken(String token, ScanContext ctx) throws InvalidQueryException {
-      ctx.addToken(new Token(Token.TYPE.RELATIONAL_OPERATOR, token));
-      ctx.addToken(new Token(Token.TYPE.PROPERTY_OPERAND, ctx.getPropertyOperand()));
-    }
-
-    @Override
-    public Token.TYPE getType() {
-      return Token.TYPE.RELATIONAL_OPERATOR;
-    }
-
-    @Override
-    public boolean handles(String token, Token.TYPE previousTokenType) {
-      return token.matches("<=|>=|!=|=|<|>");
-    }
-  }
-
-  /**
-   * Relational Operator function token handler.
-   */
-  private class RelationalOperatorFuncTokenHandler extends TokenHandler {
-    @Override
-    public void _handleToken(String token, ScanContext ctx) throws InvalidQueryException {
-      ctx.addToken(new Token(Token.TYPE.RELATIONAL_OPERATOR_FUNC, token));
-      ctx.addToken(new Token(Token.TYPE.PROPERTY_OPERAND, ctx.getPropertyOperand()));
-    }
-
-    @Override
-    public Token.TYPE getType() {
-      return Token.TYPE.RELATIONAL_OPERATOR_FUNC;
-    }
-
-    //todo: add a unary relational operator func
-    @Override
-    public boolean handles(String token, Token.TYPE previousTokenType) {
-      return token.matches("\\.[a-zA-Z]+\\(");
-    }
-  }
-
-
-  /**
-   * Logical Operator token handler.
-   */
-  private class LogicalOperatorTokenHandler extends TokenHandler {
-    @Override
-    public void _handleToken(String token, ScanContext ctx) throws InvalidQueryException {
-      ctx.addToken(new Token(Token.TYPE.LOGICAL_OPERATOR, token));
-      ctx.setIgnoreTokens(false);
-    }
-
-    @Override
-    public Token.TYPE getType() {
-      return Token.TYPE.LOGICAL_OPERATOR;
-    }
-
-    @Override
-    public boolean handles(String token, Token.TYPE previousTokenType) {
-      return token.matches("[!&\\|]");
-    }
-  }
-
-  /**
-   * Logical Unary Operator token handler.
-   */
-  private class LogicalUnaryOperatorTokenHandler extends TokenHandler {
-    @Override
-    public void _handleToken(String token, ScanContext ctx) throws InvalidQueryException {
-      ctx.addToken(new Token(Token.TYPE.LOGICAL_UNARY_OPERATOR, token));
-    }
-
-    @Override
-    public Token.TYPE getType() {
-      return Token.TYPE.LOGICAL_UNARY_OPERATOR;
-    }
-
-    @Override
-    public boolean handles(String token, Token.TYPE previousTokenType) {
-      return "!".equals(token);
-    }
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/QueryParser.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/QueryParser.java
deleted file mode 100644
index 629fcba..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/QueryParser.java
+++ /dev/null
@@ -1,514 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate;
-
-import org.apache.ambari.server.api.predicate.expressions.Expression;
-import org.apache.ambari.server.api.predicate.expressions.LogicalExpressionFactory;
-import org.apache.ambari.server.api.predicate.expressions.RelationalExpression;
-import org.apache.ambari.server.api.predicate.operators.*;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-import java.util.*;
-
-/**
- * Parser which produces a predicate instance from an array of tokens,
- * which is generated by the lexer.
- */
-public class QueryParser {
-
-  /**
-   * Map of token type to token handlers.
-   */
-  private static final Map<Token.TYPE, TokenHandler> TOKEN_HANDLERS =
-      new HashMap<Token.TYPE, TokenHandler>();
-
-  /**
-   * Constructor.
-   * Register token handlers.
-   *
-   */
-  public QueryParser() {
-    TOKEN_HANDLERS.put(Token.TYPE.BRACKET_OPEN, new BracketOpenTokenHandler());
-    TOKEN_HANDLERS.put(Token.TYPE.BRACKET_CLOSE, new BracketCloseTokenHandler());
-    TOKEN_HANDLERS.put(Token.TYPE.RELATIONAL_OPERATOR, new RelationalOperatorTokenHandler());
-    TOKEN_HANDLERS.put(Token.TYPE.LOGICAL_OPERATOR, new LogicalOperatorTokenHandler());
-    TOKEN_HANDLERS.put(Token.TYPE.LOGICAL_UNARY_OPERATOR, new LogicalUnaryOperatorTokenHandler());
-    TOKEN_HANDLERS.put(Token.TYPE.PROPERTY_OPERAND, new PropertyOperandTokenHandler());
-    TOKEN_HANDLERS.put(Token.TYPE.VALUE_OPERAND, new ValueOperandTokenHandler());
-    TOKEN_HANDLERS.put(Token.TYPE.RELATIONAL_OPERATOR_FUNC, new RelationalOperatorFuncTokenHandler());
-  }
-
-  /**
-   * Generate a Predicate instance from an array of tokens.
-   * Each input token contains a type and a value.
-   *
-   * Based on the token type and location, the tokens are first translated into a list of
-   * expressions, both relational and logical.  These expressions are then merged into a tree
-   * of expressions with a single root following operator precedence and explicit grouping rules.
-   * Depending on the query, this merging of expressions into a tree of expressions may occur in
-   * several passes, one pass per level of precedence starting at the highest level of precedence.
-   *
-   *  The predicate is built by traversing the expression tree in-order with each node expressing itself
-   *  as a predicate.
-   *
-   * @param tokens  an array of tokens which represent the query,
-   *                each token contains type and value information
-   *
-   * @return a new predicate instance based on the supplied tokens
-   * @throws InvalidQueryException if unable to parse the tokens and produce a predicate
-   */
-  public Predicate parse(Token[] tokens) throws InvalidQueryException {
-    ParseContext ctx = parseExpressions(tokens);
-
-    List<Expression> listExpressions       = ctx.getExpressions();
-    List<Expression> listMergedExpressions = mergeExpressions(listExpressions, ctx.getMaxPrecedence());
-
-    return listMergedExpressions.isEmpty() ? null :
-        listMergedExpressions.get(0).toPredicate();
-  }
-
-  /**
-   * Create parse context from an array of tokens. The parse context contains a list of expressions
-   * and other information about the expressions an parsed tokens.
-   *
-   * @param tokens  an array of tokens which represent the query,
-   *                each token contains type and value information
-   *
-   * @return a parse context which contains a list of expressions
-   * @throws InvalidQueryException if unable to properly parse the tokens into a parse context
-   */
-  private ParseContext parseExpressions(Token[] tokens) throws InvalidQueryException {
-    ParseContext ctx = new ParseContext(tokens);
-
-    while (ctx.getCurrentTokensIndex() < tokens.length) {
-      TOKEN_HANDLERS.get(tokens[ctx.getCurrentTokensIndex()].getType()).handleToken(ctx);
-    }
-
-    if (ctx.getPrecedenceLevel() != 0) {
-      throw new InvalidQueryException("Invalid query string: mismatched parentheses.");
-    }
-
-    return ctx;
-  }
-
-  /**
-   * Merge list of expressions into a tree of logical/relational expressions.
-   * This is done recursively in several passes, one per level of precedence starting at the
-   * highest precedence level. Recursion exits when a single expression remains.
-   *
-   * @param listExpressions  list of expressions to merge
-   * @param precedenceLevel  the precedence level that is to be merged
-   *
-   * @return  tree of expressions with a single root expression
-   */
-  private List<Expression> mergeExpressions(List<Expression> listExpressions, int precedenceLevel) {
-    if (listExpressions.size() > 1) {
-      Stack<Expression> stack = new Stack<Expression>();
-
-      stack.push(listExpressions.remove(0));
-      while (! listExpressions.isEmpty()) {
-        Expression exp = stack.pop();
-        Expression left = stack.empty() ? null : stack.pop();
-        Expression right = listExpressions.remove(0);
-        stack.addAll(exp.merge(left, right, precedenceLevel));
-      }
-      return mergeExpressions(new ArrayList<Expression>(stack), precedenceLevel - 1);
-    }
-    return listExpressions;
-  }
-
-  /**
-   * A parse context which contains information related to parsing the provided tokens into expressions.
-   */
-  private class ParseContext {
-    /**
-     * The current context precedence level.  This is dictated by bracket tokens.
-     */
-    private int m_precedence = 0;
-
-    /**
-     * Current position in tokens array
-     */
-    private int m_tokensIdx = 0;
-
-    /**
-     * Tokens
-     */
-    private Token[] m_tokens;
-
-    /**
-     * The type of the previous token used in validation.
-     */
-    private Token.TYPE m_previousTokenType = null;
-
-    /**
-     * The list of expressions which are generated from the tokens.
-     */
-    private List<Expression> m_listExpressions = new ArrayList<Expression>();
-
-    /**
-     * Highest precedence level in expression.
-     */
-    int m_maxPrecedence = 0;
-
-    public ParseContext(Token[] tokens) {
-      m_tokens = tokens;
-    }
-
-    /**
-     * Get array of all tokens.
-     * @return token array
-     */
-    public Token[] getTokens() {
-      return m_tokens;
-    }
-
-    /**
-     * Get the current position in the tokens array.
-     * @return the current tokens index
-     */
-    public int getCurrentTokensIndex() {
-      return m_tokensIdx;
-    }
-
-    /**
-     * Set the current position in the tokens array.
-     * Each handler should set this value after processing a token(s).
-     * @param idx  current tokens index
-     */
-    public void setCurrentTokensIndex(int idx) {
-      m_tokensIdx = idx;
-    }
-
-    /**
-     * Increment the context precedence level.
-     *
-     * @param val  how much the level is increased by
-     */
-    public void incPrecedenceLevel(int val) {
-      m_precedence += val;
-    }
-
-    /**
-     * Decrement the context precedence level.
-     *
-     * @param val  how much the level is decremented by
-     * @throws InvalidQueryException if the level is decremented below 0
-     */
-    public void decPrecedenceLevel(int val) throws InvalidQueryException {
-      m_precedence -= val;
-      if (m_precedence < 0) {
-        throw new InvalidQueryException("Invalid query string: mismatched parentheses.");
-      }
-    }
-
-    /**
-     * Get the current context precedence level.
-     *
-     * @return current context precedence level
-     */
-    public int getPrecedenceLevel() {
-      return m_precedence;
-    }
-
-    /**
-     * Get the list of generated expressions.
-     *
-     * @return the list of generated expressions
-     */
-    public List<Expression> getExpressions() {
-      return m_listExpressions;
-    }
-
-    /**
-     * Get the last expression.
-     *
-     * @return the last expression
-     */
-    public Expression getPrecedingExpression() {
-      return m_listExpressions == null ? null :
-          m_listExpressions.get(m_listExpressions.size() - 1);
-    }
-
-    /**
-     * Get the highest operator precedence in the list of generated expressions.
-     *
-     * @return the max operator precedence
-     */
-    public int getMaxPrecedence() {
-      return m_maxPrecedence;
-    }
-
-    /**
-     * Update the max precedence level.
-     * The max precedence level is only updated if the provided level > the current level.
-     *
-     * @param precedenceLevel the new value
-     */
-    public void updateMaxPrecedence(int precedenceLevel) {
-      if (precedenceLevel > m_maxPrecedence) {
-        m_maxPrecedence = precedenceLevel;
-      }
-    }
-
-    /**
-     * Add a generated expression.
-     *
-     * @param exp  the expression to add
-     */
-    public void addExpression(Expression exp) {
-      m_listExpressions.add(exp);
-    }
-
-    /**
-     * Set the token type of the current token
-     *
-     * @param type  type of the current token
-     */
-    private void setTokenType(Token.TYPE type) {
-      m_previousTokenType = type;
-    }
-
-    /**
-     * Get the last token type set.
-     *
-     * @return the last token type set
-     */
-    public Token.TYPE getPreviousTokenType() {
-      return m_previousTokenType;
-    }
-  }
-
-
-  /**
-   * Base token handler.
-   * Token handlers are responsible for handling the processing of a specific token type.
-   */
-  private abstract class TokenHandler {
-    /**
-     * Process a token. Handles common token processing functionality then delegates to the individual
-     * concrete handlers.
-     *
-     * @param ctx    the current parse context
-     * @throws InvalidQueryException if unable to process the token
-     */
-    public void handleToken(ParseContext ctx) throws InvalidQueryException {
-      Token token = ctx.getTokens()[ctx.getCurrentTokensIndex()];
-      if (! validate(ctx.getPreviousTokenType())) {
-        throw new InvalidQueryException("Unexpected token encountered in query string. Last Token Type=" +
-            ctx.getPreviousTokenType() + ", Current Token[type=" + token.getType() +
-            ", value='" + token.getValue() + "']");
-      }
-      ctx.setTokenType(token.getType());
-
-      int idxIncrement = _handleToken(ctx);
-      ctx.setCurrentTokensIndex(ctx.getCurrentTokensIndex() + idxIncrement);
-    }
-
-    /**
-     * Process a token.
-     *
-     * @param ctx    the current parse context
-     * @throws InvalidQueryException if unable to process the token
-     */
-    public abstract int _handleToken(ParseContext ctx) throws InvalidQueryException;
-
-    /**
-     * Validate the token based on the previous token.
-     *
-     * @param previousTokenType  the type of the previous token
-     * @return true if validation is successful, false otherwise
-     */
-    public abstract boolean validate(Token.TYPE previousTokenType);
-  }
-
-  /**
-   * Open Bracket token handler.
-   */
-  private class BracketOpenTokenHandler extends TokenHandler {
-
-    @Override
-    public int _handleToken(ParseContext ctx) {
-      ctx.incPrecedenceLevel(Operator.MAX_OP_PRECEDENCE);
-      return 1;
-    }
-
-    @Override
-    public boolean validate(Token.TYPE previousTokenType) {
-     return previousTokenType == null                        ||
-            previousTokenType == Token.TYPE.BRACKET_OPEN     ||
-            previousTokenType == Token.TYPE.LOGICAL_OPERATOR ||
-            previousTokenType == Token.TYPE.LOGICAL_UNARY_OPERATOR;
-    }
-  }
-
-  /**
-   * Close Bracket token handler
-   */
-  private class BracketCloseTokenHandler extends TokenHandler {
-    @Override
-    public int _handleToken(ParseContext ctx) throws InvalidQueryException{
-      ctx.decPrecedenceLevel(Operator.MAX_OP_PRECEDENCE);
-
-      return 1;
-    }
-
-    @Override
-    public boolean validate(Token.TYPE previousTokenType) {
-      return previousTokenType == Token.TYPE.VALUE_OPERAND ||
-             previousTokenType == Token.TYPE.BRACKET_CLOSE;
-    }
-  }
-
-  /**
-   * Relational Operator token handler
-   */
-  private class RelationalOperatorTokenHandler extends TokenHandler {
-    @Override
-    public int _handleToken(ParseContext ctx) throws InvalidQueryException {
-      Token token = ctx.getTokens()[ctx.getCurrentTokensIndex()];
-      RelationalOperator relationalOp = RelationalOperatorFactory.createOperator(token.getValue());
-      //todo: use factory to create expression
-      ctx.addExpression(new RelationalExpression(relationalOp));
-
-      return 1;
-    }
-
-    @Override
-    public boolean validate(Token.TYPE previousTokenType) {
-      return previousTokenType == null                     ||
-          previousTokenType == Token.TYPE.BRACKET_OPEN     ||
-          previousTokenType == Token.TYPE.LOGICAL_OPERATOR ||
-          previousTokenType == Token.TYPE.LOGICAL_UNARY_OPERATOR;
-    }
-  }
-
-  /**
-   * Relational Operator function token handler
-   */
-  private class RelationalOperatorFuncTokenHandler extends TokenHandler {
-    @Override
-    public int _handleToken(ParseContext ctx) throws InvalidQueryException {
-      Token[]            tokens       = ctx.getTokens();
-      int                idx          = ctx.getCurrentTokensIndex();
-      Token              token        = tokens[idx];
-      RelationalOperator relationalOp = RelationalOperatorFactory.createOperator(token.getValue());
-
-      ctx.addExpression(new RelationalExpression(relationalOp));
-      ctx.setCurrentTokensIndex(++idx);
-
-      TokenHandler propertyHandler = new PropertyOperandTokenHandler();
-      propertyHandler.handleToken(ctx);
-
-      // handle right operand if applicable to operator
-      idx = ctx.getCurrentTokensIndex();
-      if (ctx.getCurrentTokensIndex() < tokens.length &&
-          tokens[idx].getType().equals(Token.TYPE.VALUE_OPERAND)) {
-        TokenHandler valueHandler = new ValueOperandTokenHandler();
-        valueHandler.handleToken(ctx);
-      }
-
-      // skip closing bracket
-      idx = ctx.getCurrentTokensIndex();
-      if (idx >= tokens.length || tokens[idx].getType() != Token.TYPE.BRACKET_CLOSE) {
-        throw new InvalidQueryException("Missing closing bracket for in expression.") ;
-      }
-      return 1;
-    }
-
-    @Override
-    public boolean validate(Token.TYPE previousTokenType) {
-      return previousTokenType == null                     ||
-          previousTokenType == Token.TYPE.BRACKET_OPEN     ||
-          previousTokenType == Token.TYPE.LOGICAL_OPERATOR ||
-          previousTokenType == Token.TYPE.LOGICAL_UNARY_OPERATOR;
-    }
-  }
-
-  /**
-   * Logical Operator token handler
-   */
-  private class LogicalOperatorTokenHandler extends TokenHandler {
-    @Override
-    public int _handleToken(ParseContext ctx) throws InvalidQueryException {
-      Token token = ctx.getTokens()[ctx.getCurrentTokensIndex()];
-      LogicalOperator logicalOp = LogicalOperatorFactory.createOperator(token.getValue(), ctx.getPrecedenceLevel());
-      ctx.updateMaxPrecedence(logicalOp.getPrecedence());
-      ctx.addExpression(LogicalExpressionFactory.createLogicalExpression(logicalOp));
-
-      return 1;
-    }
-
-    @Override
-    public boolean validate(Token.TYPE previousTokenType) {
-      return previousTokenType == Token.TYPE.VALUE_OPERAND ||
-             previousTokenType == Token.TYPE.BRACKET_CLOSE;
-    }
-  }
-
-  /**
-   * Logical Unary Operator token handler
-   */
-  private class LogicalUnaryOperatorTokenHandler extends LogicalOperatorTokenHandler {
-    @Override
-    public boolean validate(Token.TYPE previousTokenType) {
-      return previousTokenType == null                 ||
-          previousTokenType == Token.TYPE.BRACKET_OPEN ||
-          previousTokenType == Token.TYPE.LOGICAL_OPERATOR;
-    }
-  }
-
-  /**
-   * Property Operand token handler
-   */
-  private class PropertyOperandTokenHandler extends TokenHandler {
-    @Override
-    public int _handleToken(ParseContext ctx) throws InvalidQueryException {
-      Token token = ctx.getTokens()[ctx.getCurrentTokensIndex()];
-      ctx.getPrecedingExpression().setLeftOperand(token.getValue());
-
-      return 1;
-    }
-
-    @Override
-    public boolean validate(Token.TYPE previousTokenType) {
-      return previousTokenType == Token.TYPE.RELATIONAL_OPERATOR ||
-          previousTokenType == Token.TYPE.RELATIONAL_OPERATOR_FUNC;
-    }
-  }
-
-  /**
-   * Value Operand token handler
-   */
-  private class ValueOperandTokenHandler extends TokenHandler {
-    @Override
-    public int _handleToken(ParseContext ctx) throws InvalidQueryException {
-      Token token = ctx.getTokens()[ctx.getCurrentTokensIndex()];
-      ctx.getPrecedingExpression().setRightOperand(token.getValue());
-
-      return 1;
-    }
-
-    @Override
-    public boolean validate(Token.TYPE previousTokenType) {
-      return previousTokenType == Token.TYPE.PROPERTY_OPERAND;
-    }
-  }
-}
-
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/Token.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/Token.java
deleted file mode 100644
index 3063f2b..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/Token.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate;
-
-/**
- * Token representation which is generated by the lexer.
- * Contains type and value information.
- */
-public class Token {
-
-  /**
-   * Token types.
-   */
-  public enum TYPE {
-    /** Property name operand.  This is the left operand in relational expressions. */
-    PROPERTY_OPERAND,
-    /** Value operand.  This is the right operand in relational expressions. */
-    VALUE_OPERAND,
-    /** Relational operator */
-    RELATIONAL_OPERATOR,
-    /** Relational operator function */
-    RELATIONAL_OPERATOR_FUNC,
-    /** Logical operator */
-    LOGICAL_OPERATOR,
-    /** Logical unary operator such as !*/
-    LOGICAL_UNARY_OPERATOR,
-    /** Opening bracket */
-    BRACKET_OPEN,
-    /** Closing bracket */
-    BRACKET_CLOSE
-  }
-
-  /**
-   * Token type.
-   */
-  private TYPE m_type;
-
-  /**
-   * Token value.
-   */
-  private String m_value;
-
-
-  /**
-   * Constructor.
-   *
-   * @param type   type
-   * @param value  value
-   */
-  public Token(TYPE type, String value) {
-    m_type = type;
-    m_value = value;
-  }
-
-  /**
-   * Get the token type.
-   * @return token type
-   */
-  public TYPE getType() {
-    return m_type;
-  }
-
-  /**
-   * Get the token value.
-   * @return token value
-   */
-  public String getValue() {
-    return m_value;
-  }
-
-  @Override
-  public String toString() {
-    return "Token{ type=" + m_type + ", value='" + m_value + "' }";
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    Token token = (Token) o;
-
-    return m_type == token.m_type &&
-        (m_value == null ? token.m_value == null : m_value.equals(token.m_value));
-  }
-
-  @Override
-  public int hashCode() {
-    int result = m_type.hashCode();
-    result = 31 * result + (m_value != null ? m_value.hashCode() : 0);
-
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/AbstractExpression.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/AbstractExpression.java
deleted file mode 100644
index ddaaf7c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/AbstractExpression.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.expressions;
-
-import org.apache.ambari.server.api.predicate.operators.Operator;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Base class for expressions.
- */
-public abstract class AbstractExpression<T> implements Expression<T> {
-
-  /**
-   * The operator.
-   */
-  private final Operator m_op;
-
-  /**
-   * The left operand.
-   * */
-  private T m_left = null;
-
-  /**
-   * The right operand.
-   */
-  private T m_right = null;
-
-  /**
-   * Constructor.
-   *
-   * @param op  the expressions operator
-   */
-  protected AbstractExpression(Operator op) {
-    m_op = op;
-  }
-
-  @Override
-  public void setLeftOperand(T left) {
-    m_left = left;
-  }
-
-  @Override
-  public void setRightOperand(T right) {
-    m_right = right;
-  }
-
-  @Override
-  public T getLeftOperand() {
-    return m_left;
-  }
-
-  @Override
-  public T getRightOperand() {
-    return m_right;
-  }
-
-  @Override
-  public Operator getOperator() {
-    return m_op;
-  }
-
-  @Override
-  public List<Expression> merge(Expression left, Expression right, int precedence) {
-    return defaultMerge(left, right);
-  }
-
-  /**
-   * Base merge implementation.
-   * No merge is done, simply returns the left expression, this and the right expression.
-   *
-   * @param left   the expression to the left of this expression
-   * @param right  the expression to the right of this expression
-   *
-   * @return a list containing the un-merged left expression, this and right expression
-   */
-  protected List<Expression> defaultMerge(Expression left, Expression right) {
-    List<Expression> listExpressions = new ArrayList<Expression>();
-    if (left != null) {
-      listExpressions.add(left);
-    }
-    listExpressions.add(this);
-    if (right != null) {
-      listExpressions.add(right);
-    }
-
-    return listExpressions;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/Expression.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/Expression.java
deleted file mode 100644
index 56cdba0..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/Expression.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.expressions;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.api.predicate.operators.Operator;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-import java.util.List;
-
-/**
- * Expression representation.
- * There are two types of expressions, relational and logical.
- * Each expression has an operator and either 2 operands for binary
- * expressions or 1 operand for unary expressions.
- */
-public interface Expression<T> {
-
-  /**
-   * Merge expression with surrounding expressions.
-   *
-   * @param left        the preceding expression
-   * @param right       the following expression
-   * @param precedence  the precedence level being merged.  Only expressions at this precedence level
-   *                    should be merged. Others should simply return the left expression, themselves and
-   *                    the right expression in that order.
-   *
-   * @return a list of expressions after merging.  Do not return any null elements.
-   */
-  public List<Expression> merge(Expression left, Expression right, int precedence);
-
-
-  /**
-   * Get the predicate representation of the expression.
-   * @return a predicate instance for the expression
-   */
-  public Predicate toPredicate() throws InvalidQueryException;
-
-  /**
-   * Set the expressions left operand.
-   *
-   * @param left  the left operand
-   */
-  public void setLeftOperand(T left);
-
-  /**
-   * Set the expressions right operand.
-   *
-   * @param right  the right operand
-   */
-  public void setRightOperand(T right);
-
-  /**
-   * Get the left operand expression.
-   *
-   * @return the left operand
-   */
-  public T getLeftOperand();
-
-  /**
-   * Get the right operand expression.
-   *
-   * @return the right operand.
-   */
-  public T getRightOperand();
-
-  /**
-   * Get the expression operator.
-   *
-   * @return the logical operator for the expression
-   */
-  public Operator getOperator();
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/LogicalExpression.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/LogicalExpression.java
deleted file mode 100644
index 465d151..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/LogicalExpression.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.expressions;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.api.predicate.operators.LogicalOperator;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-import java.util.Collections;
-import java.util.List;
-
-/**
- * Logical expression implementation.
- * Always a binary expression that consists of a logical operator and
- * expressions for the left and right operands.
- */
-public class LogicalExpression extends AbstractExpression<Expression> {
-
-  /**
-   * Constructor.
-   *
-   * @param op  the logical operator of the expression
-   */
-  public LogicalExpression(LogicalOperator op) {
-    super(op);
-  }
-
-
-  @Override
-  public Predicate toPredicate() throws InvalidQueryException {
-    return ((LogicalOperator) getOperator()).
-        toPredicate(getLeftOperand().toPredicate(), getRightOperand().toPredicate());
-  }
-
-  @Override
-  public List<Expression> merge(Expression left, Expression right, int precedence) {
-    if (getOperator().getPrecedence() == precedence && getLeftOperand() == null) {
-      setLeftOperand(left);
-      setRightOperand(right);
-      return Collections.<Expression>singletonList(this);
-    } else {
-      return defaultMerge(left, right);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/LogicalExpressionFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/LogicalExpressionFactory.java
deleted file mode 100644
index 283d135..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/LogicalExpressionFactory.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.expressions;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.api.predicate.operators.LogicalOperator;
-
-/**
- * Factory of logical expression instances.
- */
-public class LogicalExpressionFactory {
-  /**
-   * Create a logical expression instance.
-   *
-   * @param op  the logical operator
-   *
-   * @return a new logical expression instance
-   * @throws InvalidQueryException
-   */
-  public static LogicalExpression createLogicalExpression(LogicalOperator op) throws InvalidQueryException {
-    switch (op.getType()) {
-      case AND:
-      case OR:
-        return new LogicalExpression(op);
-      case NOT :
-        return new NotLogicalExpression(op);
-      default:
-        throw new RuntimeException("An invalid logical operator type was encountered: " + op);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/NotLogicalExpression.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/NotLogicalExpression.java
deleted file mode 100644
index ec8dc70..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/NotLogicalExpression.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.expressions;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.api.predicate.operators.LogicalOperator;
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.predicate.NotPredicate;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * A 'NOT' logical expression representation.
- * Negates a corresponding right operand.
- */
-public class NotLogicalExpression extends LogicalExpression {
-  /**
-   * Constructor.
-   *
-   * @param op  the logical operator
-   */
-  public NotLogicalExpression(LogicalOperator op) {
-    super(op);
-  }
-
-  @Override
-  public List<Expression> merge(Expression left, Expression right, int precedence) {
-    if (getOperator().getPrecedence() == precedence && getRightOperand() == null) {
-      List<Expression> listExpressions = new ArrayList<Expression>();
-      if (left != null) {
-        listExpressions.add(left);
-      }
-      setRightOperand(right);
-      listExpressions.add(this);
-      return listExpressions;
-    } else {
-      // do nothing, already merged
-      return defaultMerge(left, right);
-    }
-  }
-
-  @Override
-  public Predicate toPredicate() throws InvalidQueryException {
-    //todo: remove need to down cast to BasePredicate
-    return new NotPredicate((BasePredicate) getRightOperand().toPredicate());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/RelationalExpression.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/RelationalExpression.java
deleted file mode 100644
index 09d57a0..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/expressions/RelationalExpression.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.expressions;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.api.predicate.operators.RelationalOperator;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-/**
- * Relational Expression.
- * Consists of a property name for the left operand, a relational operator
- * and a value as the right operand.
- */
-public class RelationalExpression extends AbstractExpression<String> {
-
-  /**
-   * Constructor.
-   *
-   * @param op  relational operator
-   */
-  public RelationalExpression(RelationalOperator op) {
-    super(op);
-  }
-
-  @Override
-  public Predicate toPredicate() throws InvalidQueryException {
-    return ((RelationalOperator) getOperator()).
-        toPredicate(getLeftOperand(), getRightOperand());
-  }
-
-  @Override
-  public String toString() {
-    return "RelationalExpression{ property='" + getLeftOperand() + "\', value='"
-        + getRightOperand() + "\', op=" + getOperator() + " }";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/AbstractOperator.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/AbstractOperator.java
deleted file mode 100644
index 82ad21b..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/AbstractOperator.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-/**
- * Base operator implementation.
- */
-public abstract class AbstractOperator implements Operator {
-  /**
-   * The precedence value for the current context.
-   */
-  private final int m_ctxPrecedence;
-
-
-  /**
-   * Constructor.
-   *
-   * @param ctxPrecedence  the context precedence value
-   */
-  protected AbstractOperator(int ctxPrecedence) {
-    m_ctxPrecedence = ctxPrecedence;
-  }
-
-  /**
-   * Return the base precedence for this operator.
-   * This is the value that is specific to the operator
-   * type and doesn't take context into account.
-   *
-   * @return the base precedence for this operator type
-   */
-  public int getBasePrecedence() {
-    // this value is used for all relational operators
-    // logical operators override this value
-    return -1;
-  }
-
-  @Override
-  public int getPrecedence() {
-    return getBasePrecedence() + m_ctxPrecedence;
-  }
-
-  @Override
-  public String toString() {
-    return getName();
-  }
-
-  /**
-   * Get the name of the operator.
-   *
-   * @return the operator name
-   */
-  public abstract String getName();
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/AndOperator.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/AndOperator.java
deleted file mode 100644
index 85c60eb..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/AndOperator.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-import org.apache.ambari.server.controller.predicate.AndPredicate;
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-/**
- * And operator implementation.
- */
-public class AndOperator extends AbstractOperator implements LogicalOperator {
-
-  /**
-   * Constructor.
-   *
-   * @param ctxPrecedence  precedence value for the current context
-   */
-  public AndOperator(int ctxPrecedence) {
-    super(ctxPrecedence);
-  }
-
-  @Override
-  public TYPE getType() {
-    return TYPE.AND;
-  }
-
-  @Override
-  public String getName() {
-    return "AndOperator";
-  }
-
-  @Override
-  public int getBasePrecedence() {
-    return 2;
-  }
-
-  @Override
-  public Predicate toPredicate(Predicate left, Predicate right) {
-    //todo: refactor to not need down casts
-    return new AndPredicate((BasePredicate) left, (BasePredicate) right);
-  }
-
-  @Override
-  public String toString() {
-    return getName() + "[precedence=" + getPrecedence() + "]";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/EqualsOperator.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/EqualsOperator.java
deleted file mode 100644
index ddd9e0d..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/EqualsOperator.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-import org.apache.ambari.server.controller.predicate.EqualsPredicate;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-/**
- * Equals operator implementation.
- */
-public class EqualsOperator extends AbstractOperator implements RelationalOperator {
-
-  /**
-   * Constructor.
-   */
-  public EqualsOperator() {
-    super(0);
-  }
-
-  @Override
-  public TYPE getType() {
-    return TYPE.EQUAL;
-  }
-
-  @Override
-  public Predicate toPredicate(String prop, String val) {
-    return new EqualsPredicate<String>(prop, val);
-  }
-
-  @Override
-  public String getName() {
-    return "EqualsOperator";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/GreaterEqualsOperator.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/GreaterEqualsOperator.java
deleted file mode 100644
index 175a255..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/GreaterEqualsOperator.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-import org.apache.ambari.server.controller.predicate.GreaterEqualsPredicate;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-/**
- * Greater Than Or Equals operator implementation.
- */
-public class GreaterEqualsOperator extends AbstractOperator implements RelationalOperator {
-
-  /**
-   * Constructor.
-   */
-  public GreaterEqualsOperator() {
-    super(0);
-  }
-
-  @Override
-  public TYPE getType() {
-    return TYPE.GREATER_EQUAL;
-  }
-
-  @Override
-  public Predicate toPredicate(String prop, String val) {
-    return new GreaterEqualsPredicate<String>(prop, val);
-  }
-
-  @Override
-  public String getName() {
-    return "GreaterEqualsOperator";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/GreaterOperator.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/GreaterOperator.java
deleted file mode 100644
index 832507c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/GreaterOperator.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-import org.apache.ambari.server.controller.predicate.GreaterPredicate;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-/**
- * Greater Than operator implementation.
- */
-public class GreaterOperator extends AbstractOperator implements RelationalOperator {
-
-  /**
-   * Constructor.
-   */
-  public GreaterOperator() {
-    super(0);
-  }
-
-  @Override
-  public TYPE getType() {
-    return TYPE.GREATER;
-  }
-
-  @Override
-  public Predicate toPredicate(String prop, String val) {
-    return new GreaterPredicate<String>(prop, val);
-  }
-
-  @Override
-  public String getName() {
-    return "GreaterOperator";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/InOperator.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/InOperator.java
deleted file mode 100644
index a879d7f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/InOperator.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.api.predicate.operators;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.predicate.EqualsPredicate;
-import org.apache.ambari.server.controller.predicate.OrPredicate;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * IN relational operator.
- * This is a binary operator which takes a comma delimited right operand and
- * creates equals predicates with the left operand and each right operand token.
- * The equals predicates are combined with an OR predicate.
- *
- */
-public class InOperator extends AbstractOperator implements RelationalOperator {
-
-  public InOperator() {
-    super(0);
-  }
-
-  @Override
-  public String getName() {
-    return "InOperator";
-  }
-
-  @Override
-  public Predicate toPredicate(String prop, String val) throws InvalidQueryException {
-
-    if (val == null) {
-      throw new InvalidQueryException("IN operator is missing a required right operand.");
-    }
-
-    String[] tokens = val.split(",");
-    List<EqualsPredicate> listPredicates = new ArrayList<EqualsPredicate>();
-    for (String token : tokens) {
-      listPredicates.add(new EqualsPredicate(prop, token.trim()));
-    }
-    return listPredicates.size() == 1 ? listPredicates.get(0) :
-        buildOrPredicate(listPredicates);
-  }
-
-  private OrPredicate buildOrPredicate(List<EqualsPredicate> listPredicates) {
-    return new OrPredicate(listPredicates.toArray(new BasePredicate[listPredicates.size()]));
-  }
-
-  @Override
-  public TYPE getType() {
-    return TYPE.IN;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/IsEmptyOperator.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/IsEmptyOperator.java
deleted file mode 100644
index f43a619..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/IsEmptyOperator.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.controller.predicate.CategoryIsEmptyPredicate;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-/**
- * Operator that is used to determine if a category is empty, meaning that it doesn't
- * contain any properties.
- */
-public class IsEmptyOperator extends AbstractOperator implements RelationalOperator {
-  public IsEmptyOperator() {
-    super(0);
-  }
-
-  @Override
-  public String getName() {
-    return "IsEmptyOperator";
-  }
-
-  @Override
-  public Predicate toPredicate(String prop, String val) throws InvalidQueryException {
-    if (val != null) {
-      throw new InvalidQueryException("'isEmpty' operator shouldn't have a right operand but one exists: " + val);
-    }
-    return new CategoryIsEmptyPredicate(prop);
-  }
-
-  @Override
-  public TYPE getType() {
-    return TYPE.IS_EMPTY;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LessEqualsOperator.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LessEqualsOperator.java
deleted file mode 100644
index bd6e568..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LessEqualsOperator.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-import org.apache.ambari.server.controller.predicate.LessEqualsPredicate;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-/**
- * Less Than or Equals operator implementation.
- */
-public class LessEqualsOperator extends AbstractOperator implements RelationalOperator {
-
-  /**
-   * Constructor.
-   */
-  public LessEqualsOperator() {
-    super(0);
-  }
-
-  @Override
-  public TYPE getType() {
-    return TYPE.LESS_EQUAL;
-  }
-
-  @Override
-  public Predicate toPredicate(String prop, String val) {
-    return new LessEqualsPredicate<String>(prop, val);
-  }
-
-  @Override
-  public String getName() {
-    return "LessEqualsOperator";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LessOperator.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LessOperator.java
deleted file mode 100644
index 8cf378e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LessOperator.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-import org.apache.ambari.server.controller.predicate.LessPredicate;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-/**
- * Less Than operator implementation.
- */
-public class LessOperator extends AbstractOperator implements RelationalOperator {
-
-  /**
-   * Constructor.
-   */
-  public LessOperator() {
-    super(0);
-  }
-
-  @Override
-  public TYPE getType() {
-    return TYPE.LESS;
-  }
-
-  @Override
-  public Predicate toPredicate(String prop, String val) {
-    return new LessPredicate<String>(prop, val);
-  }
-
-  @Override
-  public String getName() {
-    return "LessOperator";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LogicalOperator.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LogicalOperator.java
deleted file mode 100644
index 8881aad..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LogicalOperator.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-import org.apache.ambari.server.controller.spi.Predicate;
-
-/**
- * Logical operator external representation.
- */
-public interface LogicalOperator extends Operator {
-  /**
-   * Create a predicate for this logical operator.
-   *
-   * @param left   left operand
-   * @param right  right operand
-   * @return a predicate instance for this operator
-   */
-  public Predicate toPredicate(Predicate left, Predicate right);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LogicalOperatorFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LogicalOperatorFactory.java
deleted file mode 100644
index fb12075..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/LogicalOperatorFactory.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-
-/**
- * Factory of Logical Operators.
- */
-public class LogicalOperatorFactory {
-  /**
-   * Creates a logical operator based on the operator token.
-   *
-   * @param operator      string representation of operator
-   * @param ctxPrecedence precedence value of current context
-   *
-   * @return a logical operator instance
-   * @throws InvalidQueryException if the operator string is invalid
-   */
-  public static LogicalOperator createOperator(String operator, int ctxPrecedence)
-      throws InvalidQueryException {
-    if ("&".equals(operator)) {
-      return new AndOperator(ctxPrecedence);
-    } else if ("|".equals(operator)) {
-      return new OrOperator(ctxPrecedence);
-    } else if ("!".equals(operator)) {
-      return new NotOperator(ctxPrecedence);
-    } else {
-      throw new RuntimeException("Invalid Logical Operator Type: " + operator);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/NotEqualsOperator.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/NotEqualsOperator.java
deleted file mode 100644
index d78a8e0..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/NotEqualsOperator.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-import org.apache.ambari.server.controller.predicate.EqualsPredicate;
-import org.apache.ambari.server.controller.predicate.NotPredicate;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-/**
- * Not Equals relational operator implementation.
- */
-public class NotEqualsOperator extends AbstractOperator implements RelationalOperator {
-
-  /**
-   * Constructor.
-   */
-  public NotEqualsOperator() {
-    super(0);
-  }
-
-  @Override
-  public TYPE getType() {
-    return TYPE.NOT_EQUAL;
-  }
-
-  @Override
-  public Predicate toPredicate(String prop, String val) {
-    return new NotPredicate(new EqualsPredicate<String>(prop, val));
-  }
-
-  @Override
-  public String getName() {
-    return "NotEqualsOperator";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/NotOperator.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/NotOperator.java
deleted file mode 100644
index 60cb832..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/NotOperator.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.predicate.NotPredicate;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-/**
- * Not unary operator implementation.
- */
-public class NotOperator extends AbstractOperator implements LogicalOperator {
-
-  /**
-   * Constructor.
-   *
-   * @param ctxPrecedence  the precedence value of the current context
-   */
-  public NotOperator(int ctxPrecedence) {
-    super(ctxPrecedence);
-  }
-
-  @Override
-  public TYPE getType() {
-    return TYPE.NOT;
-  }
-
-  @Override
-  public String getName() {
-    return "NotOperator";
-  }
-
-  @Override
-  public int getBasePrecedence() {
-    return 3;
-  }
-
-  @Override
-  public Predicate toPredicate(Predicate left, Predicate right) {
-    return new NotPredicate((BasePredicate) right);
-  }
-
-  @Override
-  public String toString() {
-    return getName() + "[precedence=" + getPrecedence() + "]";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/Operator.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/Operator.java
deleted file mode 100644
index b3ced7a..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/Operator.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-/**
- * Operator representation.
- */
-public interface Operator {
-
-  /**
-   * Operator types.
-   */
-  public enum TYPE {
-    LESS,
-    LESS_EQUAL,
-    GREATER,
-    GREATER_EQUAL,
-    EQUAL,
-    NOT_EQUAL,
-    AND,
-    OR,
-    NOT,
-    IN,
-    IS_EMPTY
-  }
-
-  /**
-   * The highest base operator precedence level.
-   */
-  public static final int MAX_OP_PRECEDENCE = 3;
-
-  /**
-   * Get the operator type.
-   *
-   * @return the operator type
-   */
-  public TYPE getType();
-
-  /**
-   * Obtain the precedence of the operator.
-   * This value is calculated based on the operators base precedence and the context of the
-   * surrounding expressions.  Higher precedence values have higher precedence.
-   *
-   * @return  the precedence of this operator in it's current context
-   */
-  public int getPrecedence();
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/OrOperator.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/OrOperator.java
deleted file mode 100644
index a55059b..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/OrOperator.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.predicate.OrPredicate;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-/**
- * Or operator implementation.
- */
-public class OrOperator extends AbstractOperator implements LogicalOperator {
-
-  /**
-   * Constructor.
-   *
-   * @param ctxPrecedence  precedence value for the current context
-   */
-  public OrOperator(int ctxPrecedence) {
-    super(ctxPrecedence);
-  }
-
-  @Override
-  public TYPE getType() {
-    return TYPE.OR;
-  }
-
-  @Override
-  public String getName() {
-    return "OrOperator";
-  }
-
-  @Override
-  public int getBasePrecedence() {
-    return 1;
-  }
-
-  @Override
-  public Predicate toPredicate(Predicate left, Predicate right) {
-    //todo: refactor to remove down casts
-    return new OrPredicate((BasePredicate) left, (BasePredicate) right);
-  }
-
-  @Override
-  public String toString() {
-    return getName() + "[precedence=" + getPrecedence() + "]";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/RelationalOperator.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/RelationalOperator.java
deleted file mode 100644
index cbcba0c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/RelationalOperator.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-/**
- * Relational operator external representation.
- */
-public interface RelationalOperator extends Operator {
-  /**
-   * Create a predicate for this relational operator.
-   *
-   * @param prop  left operand
-   * @param val   right operand
-   * @return  a predicate instance for this operator.
-   * @throws  InvalidQueryException if unable to build the predicate because of invalid operands
-   */
-  public Predicate toPredicate(String prop, String val) throws InvalidQueryException;
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/RelationalOperatorFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/RelationalOperatorFactory.java
deleted file mode 100644
index 4004ec3..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/predicate/operators/RelationalOperatorFactory.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-
-/**
- * Factory of relational operators.
- */
-public class RelationalOperatorFactory {
-  /**
-   * Create a relational operator based on the string representation
-   * of the operator.
-   *
-   * @param operator  the string representation of the operator
-   *
-   * @return relational operator for the given string
-   * @throws InvalidQueryException if an invalid operator is passed in
-   */
-  public static RelationalOperator createOperator(String operator) throws InvalidQueryException {
-    if ("!=".equals(operator)) {
-      return new NotEqualsOperator();
-    } else if ("=".equals(operator)) {
-      return new EqualsOperator();
-    } else if ("<=".equals(operator)) {
-      return new LessEqualsOperator();
-    } else if ("<".equals(operator)) {
-      return new LessOperator();
-    } else if (">=".equals(operator)) {
-      return new GreaterEqualsOperator();
-    } else if (">".equals(operator)) {
-      return new GreaterOperator();
-    } else if (".in(".equals(operator)) {
-      return new InOperator();
-    } else if (".isEmpty(".equals(operator)) {
-      return new IsEmptyOperator();
-    } else {
-      throw new RuntimeException("Invalid Operator Type: " + operator);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/query/Query.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/query/Query.java
deleted file mode 100644
index 76b2368..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/query/Query.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.query;
-
-import org.apache.ambari.server.api.services.Result;
-import org.apache.ambari.server.controller.spi.*;
-
-import java.util.Set;
-
-
-/**
- * Responsible for querying the back end for read requests
- */
-public interface Query {
-
-  /**
-   * Add a property to the query.
-   * This is the select portion of the query.
-   *
-   * @param group         the group name that contains the property
-   * @param property      the property name
-   * @param temporalInfo  temporal information for the property
-   */
-  public void addProperty(String group, String property, TemporalInfo temporalInfo);
-
-  /**
-   * Add a local (not sub-resource) property to the query.
-   * This is the select portion of the query.
-   *
-   * @param property the property id which contains the group, property name
-   *                 and whether the property is temporal
-   */
-  public void addLocalProperty(String property);
-
-  /**
-   * Obtain the properties of the query.
-   * These are the properties that make up the select portion of the query for which
-   * values are to be retrieved.
-   *
-   * @return the query properties
-   */
-  public Set<String> getProperties();
-
-  /**
-   * Execute the query.
-   *
-   * @return the result of the query.
-   *
-   * @throws UnsupportedPropertyException if the query or query predicate contains invalid non-existent properties
-   * @throws SystemException an internal error occurred
-   * @throws NoSuchResourceException the query didn't match any resources
-   * @throws NoSuchParentResourceException a specified parent resource doesn't exist
-   */
-  public Result execute()
-      throws UnsupportedPropertyException, SystemException, NoSuchResourceException, NoSuchParentResourceException;
-
-  /**
-   * Return the predicate used to identify the associated resource.  This includes the primary key and
-   * all parent id's;
-   *
-   * @return the predicate used to identify the associated resource
-   */
-  public Predicate getPredicate();
-
-  /**
-   * Set the user provided predicated on this query.
-   * This predicate will be "AND'd" with the internal query to produce the final predicate.
-   *
-   * @param predicate  the user provided predicate
-   */
-  public void setUserPredicate(Predicate predicate);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/query/QueryImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/query/QueryImpl.java
deleted file mode 100644
index c4ef338..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/query/QueryImpl.java
+++ /dev/null
@@ -1,349 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.query;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.services.ResultImpl;
-import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.controller.predicate.AndPredicate;
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.predicate.EqualsPredicate;
-import org.apache.ambari.server.api.services.Result;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.api.util.TreeNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.*;
-
-/**
- * Default read query.
- */
-public class QueryImpl implements Query {
-
-  /**
-   * Resource instance.
-   */
-  private ResourceInstance m_resource;
-
-  /**
-   * Properties of the query which make up the select portion of the query.
-   */
-  private Set<String> m_setQueryProperties = new HashSet<String>();
-
-  /**
-   * Indicates that the query should include all available properties.
-   */
-  private boolean allProperties = false;
-
-  /**
-   * Map that associates each property set on the query to temporal data.
-   */
-  private Map<String, TemporalInfo> m_mapPropertyTemporalInfo = new HashMap<String, TemporalInfo>();
-
-  /**
-   * Map that associates categories with temporal data.
-   */
-  private Map<String, TemporalInfo> m_mapCategoryTemporalInfo = new HashMap<String, TemporalInfo>();
-
-  /**
-   * Sub-resources of the resource which is being operated on.
-   */
-  private Map<String, ResourceInstance> m_mapSubResources = new HashMap<String, ResourceInstance>();
-
-  /**
-   * The user supplied predicate.
-   */
-  private Predicate m_userPredicate;
-
-  /**
-   * The logger.
-   */
-  private final static Logger LOG =
-      LoggerFactory.getLogger(QueryImpl.class);
-
-  /**
-   * Constructor.
-   *
-   * @param resource the resource being operated on
-   */
-  public QueryImpl(ResourceInstance resource) {
-    m_resource = resource;
-  }
-
-  @Override
-  public void addProperty(String category, String name, TemporalInfo temporalInfo) {
-    if (category == null && name.equals("*")) {
-      // wildcard
-      addAllProperties(temporalInfo);
-    } else{
-      if (!addPropertyToSubResource(category, name, temporalInfo)){
-        String propertyId = PropertyHelper.getPropertyId(category, name.equals("*") ? null : name);
-        addLocalProperty(propertyId);
-        if (temporalInfo != null) {
-          m_mapCategoryTemporalInfo.put(propertyId, temporalInfo);
-        }
-      }
-    }
-  }
-
-  @Override
-  public void addLocalProperty(String property) {
-    m_setQueryProperties.add(property);
-  }
-
-  @Override
-  public Result execute()
-      throws UnsupportedPropertyException, SystemException, NoSuchResourceException, NoSuchParentResourceException {
-
-    Result result = createResult();
-    Resource.Type resourceType = m_resource.getResourceDefinition().getType();
-    if (m_resource.getIds().get(resourceType) == null) {
-      addCollectionProperties(resourceType);
-      result.getResultTree().setProperty("isCollection", "true");
-    }
-
-    if (m_setQueryProperties.isEmpty() && m_mapSubResources.isEmpty()) {
-      //Add sub resource properties for default case where no fields are specified.
-      m_mapSubResources.putAll(m_resource.getSubResources());
-    }
-
-    if (LOG.isInfoEnabled()) {
-      //todo: include predicate info.  Need to implement toString for all predicates.
-      LOG.info("Executing resource query: " + m_resource.getIds());
-    }
-
-    Predicate predicate = createPredicate(m_resource);
-    Iterable<Resource> iterResource = getClusterController().getResources(
-        resourceType, createRequest(), predicate);
-
-    TreeNode<Resource> tree = result.getResultTree();
-    int count = 1;
-    for (Resource resource : iterResource) {
-      // add a child node for the resource and provide a unique name.  The name is never used.
-      //todo: provide a more meaningful node name
-      TreeNode<Resource> node = tree.addChild(resource, resource.getType() + ":" + count++);
-      for (Map.Entry<String, ResourceInstance> entry : m_mapSubResources.entrySet()) {
-        String subResCategory = entry.getKey();
-        ResourceInstance r = entry.getValue();
-
-        setParentIdsOnSubResource(resource, r);
-
-        TreeNode<Resource> childResult = r.getQuery().execute().getResultTree();
-        childResult.setName(subResCategory);
-        childResult.setProperty("isCollection", "false");
-        node.addChild(childResult);
-      }
-    }
-    return result;
-  }
-
-  @Override
-  public Predicate getPredicate() {
-    //todo: create predicate once
-    return createPredicate(m_resource);
-  }
-
-  @Override
-  public Set<String> getProperties() {
-    return Collections.unmodifiableSet(m_setQueryProperties);
-  }
-
-  @Override
-  public void setUserPredicate(Predicate predicate) {
-    m_userPredicate = predicate;
-  }
-
-  ClusterController getClusterController() {
-    return ClusterControllerHelper.getClusterController();
-  }
-
-  private void addCollectionProperties(Resource.Type resourceType) {
-    Schema schema = getClusterController().getSchema(resourceType);
-    // add pk
-    String property = schema.getKeyPropertyId(resourceType);
-    addProperty(PropertyHelper.getPropertyCategory(property), PropertyHelper.getPropertyName(property), null);
-
-    for (Resource.Type type : m_resource.getIds().keySet()) {
-      // add fk's
-      String keyPropertyId = schema.getKeyPropertyId(type);
-      //todo: property id can be null in some cases such as host_component queries which obtain
-      //todo: component sub-resources.  Component will not have host fk.
-      //todo: refactor so that null check is not required.
-      if (keyPropertyId != null) {
-        addProperty(PropertyHelper.getPropertyCategory(keyPropertyId), PropertyHelper.getPropertyName(keyPropertyId), null);
-      }
-    }
-  }
-
-  private void addAllProperties(TemporalInfo temporalInfo) {
-    allProperties = true;
-    if (temporalInfo != null) {
-      m_mapCategoryTemporalInfo.put(null, temporalInfo);
-    }
-
-    for (Map.Entry<String, ResourceInstance> entry : m_resource.getSubResources().entrySet()) {
-      String name = entry.getKey();
-      if (! m_mapSubResources.containsKey(name)) {
-        m_mapSubResources.put(name, entry.getValue());
-      }
-    }
-  }
-
-  private boolean addPropertyToSubResource(String path, String property, TemporalInfo temporalInfo) {
-    // cases:
-    // - path is null, property is path (all sub-resource props will have a path)
-    // - path is single token and prop in non null
-    //      (path only will presented as above case with property only)
-    // - path is multi level and prop is non null
-
-    boolean resourceAdded = false;
-    if (path == null) {
-      path = property;
-      property = null;
-    }
-
-    int i = path.indexOf("/");
-    String p = i == -1 ? path : path.substring(0, i);
-
-    ResourceInstance subResource = m_resource.getSubResources().get(p);
-    if (subResource != null) {
-      m_mapSubResources.put(p, subResource);
-      //todo: handle case of trailing '/' (for example fields=subResource/)
-
-      if (property != null || !path.equals(p)) {
-        //only add if a sub property is set or if a sub category is specified
-        subResource.getQuery().addProperty(i == -1 ? null : path.substring(i + 1), property, temporalInfo);
-      }
-      resourceAdded = true;
-    }
-    return resourceAdded;
-  }
-
-  private BasePredicate createInternalPredicate(ResourceInstance resource) {
-    Resource.Type resourceType = resource.getResourceDefinition().getType();
-    Map<Resource.Type, String> mapResourceIds = resource.getIds();
-    Schema schema = getClusterController().getSchema(resourceType);
-
-    Set<BasePredicate> setPredicates = new HashSet<BasePredicate>();
-    for (Map.Entry<Resource.Type, String> entry : mapResourceIds.entrySet()) {
-      if (entry.getValue() != null) {
-        String keyPropertyId = schema.getKeyPropertyId(entry.getKey());
-        if (keyPropertyId != null) {
-          setPredicates.add(new EqualsPredicate<String>(keyPropertyId, entry.getValue()));
-        }
-      }
-    }
-
-    if (setPredicates.size() == 1) {
-      return setPredicates.iterator().next();
-    } else if (setPredicates.size() > 1) {
-      return new AndPredicate(setPredicates.toArray(new BasePredicate[setPredicates.size()]));
-    } else {
-      return null;
-    }
-  }
-
-  private Predicate createPredicate(ResourceInstance resource) {
-    Predicate predicate = null;
-    //todo: change reference type to Predicate when predicate hierarchy is fixed
-    BasePredicate internalPredicate = createInternalPredicate(resource);
-    if (internalPredicate == null) {
-      if (m_userPredicate != null) {
-        predicate = m_userPredicate;
-      }
-    } else {
-      predicate = (m_userPredicate == null ? internalPredicate :
-          new AndPredicate((BasePredicate) m_userPredicate, internalPredicate));
-    }
-    return predicate;
-  }
-
-  private Request createRequest() {
-    Set<String> setProperties = new HashSet<String>();
-
-    Map<String, TemporalInfo> mapTemporalInfo    = new HashMap<String, TemporalInfo>();
-    TemporalInfo              globalTemporalInfo = m_mapCategoryTemporalInfo.get(null);
-
-    for (String group : m_setQueryProperties) {
-      TemporalInfo temporalInfo = m_mapCategoryTemporalInfo.get(group);
-      if (temporalInfo != null) {
-        mapTemporalInfo.put(group, temporalInfo);
-      } else if (globalTemporalInfo != null) {
-        mapTemporalInfo.put(group, globalTemporalInfo);
-      }
-      setProperties.add(group);
-    }
-
-    return PropertyHelper.getReadRequest(allProperties ? Collections.<String>emptySet() : setProperties, mapTemporalInfo);
-  }
-
-  private void setParentIdsOnSubResource(Resource resource, ResourceInstance r) {
-    Map<Resource.Type, String> mapParentIds = m_resource.getIds();
-    Map<Resource.Type, String> mapResourceIds = new HashMap<Resource.Type, String>(mapParentIds.size());
-    for (Map.Entry<Resource.Type, String> resourceIdEntry : mapParentIds.entrySet()) {
-      Resource.Type type = resourceIdEntry.getKey();
-      String value = resourceIdEntry.getValue();
-
-      if (value == null) {
-        Object o = resource.getPropertyValue(getClusterController().getSchema(type).getKeyPropertyId(type));
-        value = o == null ? null : o.toString();
-      }
-      if (value != null) {
-        mapResourceIds.put(type, value);
-      }
-    }
-    String resourceKeyProp = getClusterController().getSchema(resource.getType()).
-        getKeyPropertyId(resource.getType());
-    //todo: shouldn't use toString here
-    mapResourceIds.put(resource.getType(), resource.getPropertyValue(resourceKeyProp).toString());
-    r.setIds(mapResourceIds);
-  }
-
-  Result createResult() {
-    return new ResultImpl(true);
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    QueryImpl that = (QueryImpl) o;
-
-    return m_mapCategoryTemporalInfo.equals(that.m_mapCategoryTemporalInfo) &&
-           m_mapPropertyTemporalInfo.equals(that.m_mapPropertyTemporalInfo) &&
-           m_setQueryProperties.equals(that.m_setQueryProperties) &&
-           m_mapSubResources.equals(that.m_mapSubResources) &&
-           m_resource.equals(that.m_resource) &&
-           m_userPredicate == null ? that.m_userPredicate == null : m_userPredicate.equals(that.m_userPredicate);
-  }
-
-  @Override
-  public int hashCode() {
-    int result = m_resource.hashCode();
-    result = 31 * result + m_setQueryProperties.hashCode();
-    result = 31 * result + m_mapPropertyTemporalInfo.hashCode();
-    result = 31 * result + m_mapCategoryTemporalInfo.hashCode();
-    result = 31 * result + m_mapSubResources.hashCode();
-    result = 31 * result + (m_userPredicate != null ? m_userPredicate.hashCode() : 0);
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ActionResourceDefinition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ActionResourceDefinition.java
deleted file mode 100644
index 2a28686..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ActionResourceDefinition.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.api.resources;
-
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-
-public class ActionResourceDefinition extends BaseResourceDefinition {
-
-
-  public ActionResourceDefinition() {
-    super(Resource.Type.Action);
-  }
-  
-  @Override
-  public String getPluralName() {
-    return "actions";
-  }
-
-  @Override
-  public String getSingularName() {
-    return "action";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/BaseResourceDefinition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/BaseResourceDefinition.java
deleted file mode 100644
index 8076879..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/BaseResourceDefinition.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.resources;
-
-
-import org.apache.ambari.server.api.services.Request;
-import org.apache.ambari.server.api.util.TreeNode;
-import org.apache.ambari.server.controller.spi.ClusterController;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.Schema;
-import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Base resource definition.  Contains behavior common to all resource types.
- */
-public abstract class BaseResourceDefinition implements ResourceDefinition {
-
-  /**
-   * Resource type.  One of {@link Resource.Type}
-   */
-  private Resource.Type m_type;
-
-
-  /**
-   * Constructor.
-   *
-   * @param resourceType resource type
-   */
-  public BaseResourceDefinition(Resource.Type resourceType) {
-    m_type = resourceType;
-  }
-
-  @Override
-  public Resource.Type getType() {
-    return m_type;
-  }
-
-  @Override
-  public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    return Collections.emptySet();
-  }
-
-  @Override
-  public List<PostProcessor> getPostProcessors() {
-    List<PostProcessor> listProcessors = new ArrayList<PostProcessor>();
-    listProcessors.add(new BaseHrefPostProcessor());
-
-    return listProcessors;
-  }
-
-  ClusterController getClusterController() {
-    return ClusterControllerHelper.getClusterController();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-      boolean result =false;
-      if(this == o) result = true;
-      if(o instanceof BaseResourceDefinition){
-          BaseResourceDefinition other = (BaseResourceDefinition) o;
-          if(m_type == other.m_type )
-              result = true;
-      }
-      return result;
-  }
-
-  @Override
-  public int hashCode() {
-    return m_type.hashCode();
-  }
-
-  class BaseHrefPostProcessor implements PostProcessor {
-    @Override
-    public void process(Request request, TreeNode<Resource> resultNode, String href) {
-      Resource r = resultNode.getObject();
-      TreeNode<Resource> parent = resultNode.getParent();
-
-      if (parent.getName() != null) {
-        Schema schema = getClusterController().getSchema(r.getType());
-        Object id = r.getPropertyValue(schema.getKeyPropertyId(r.getType()));
-
-        int i = href.indexOf("?");
-        if (i != -1) {
-          href = href.substring(0, i);
-        }
-
-        if (!href.endsWith("/")) {
-          href = href + '/';
-        }
-        href = "true".equals(parent.getProperty("isCollection")) ?
-            href + id : href + parent.getName() + '/' + id;
-      }
-      resultNode.setProperty("href", href);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ClusterResourceDefinition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ClusterResourceDefinition.java
deleted file mode 100644
index 127ef16..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ClusterResourceDefinition.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.resources;
-
-import java.util.HashSet;
-import java.util.Set;
-import org.apache.ambari.server.controller.spi.Resource;
-
-/**
- * Cluster resource definition.
- */
-public class ClusterResourceDefinition extends BaseResourceDefinition {
-
-
-  /**
-   * Constructor.
-   */
-  public ClusterResourceDefinition() {
-    super(Resource.Type.Cluster);
-  }
-
-
-  @Override
-  public String getPluralName() {
-    return "clusters";
-  }
-
-  @Override
-  public String getSingularName() {
-    return "cluster";
-  }
-
-  @Override
-  public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    Set<SubResourceDefinition> setChildren = new HashSet<SubResourceDefinition>();
-    setChildren.add(new SubResourceDefinition(Resource.Type.Service));
-    setChildren.add(new SubResourceDefinition(Resource.Type.Host));
-    setChildren.add(new SubResourceDefinition(Resource.Type.Configuration));
-    setChildren.add(new SubResourceDefinition(Resource.Type.Request));
-
-    return setChildren;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ComponentResourceDefinition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ComponentResourceDefinition.java
deleted file mode 100644
index 12673e8..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ComponentResourceDefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.api.resources;
-
-import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
-import org.apache.ambari.server.api.services.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.Schema;
-import org.apache.ambari.server.api.util.TreeNode;
-
-import java.util.*;
-
-/**
- * Component resource definition.
- */
-public class ComponentResourceDefinition extends BaseResourceDefinition {
-
-  /**
-   * Constructor.
-   */
-  public ComponentResourceDefinition() {
-    super(Resource.Type.Component);
-  }
-
-  @Override
-  public String getPluralName() {
-    return "components";
-  }
-
-  @Override
-  public String getSingularName() {
-    return "component";
-  }
-
-
-  @Override
-  public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    return Collections.singleton(new SubResourceDefinition(
-        Resource.Type.HostComponent, Collections.singleton(Resource.Type.Host), true));
-  }
-
-  @Override
-  public List<PostProcessor> getPostProcessors() {
-    List<PostProcessor> listProcessors = super.getPostProcessors();
-    listProcessors.add(new ComponentHrefProcessor());
-
-    return listProcessors;
-  }
-
-  /**
-   * Base resource processor which generates href's.  This is called by the
-   * {@link org.apache.ambari.server.api.services.ResultPostProcessor} during post processing of a result.
-   */
-  private class ComponentHrefProcessor extends BaseHrefPostProcessor {
-    @Override
-    public void process(Request request, TreeNode<Resource> resultNode, String href) {
-      TreeNode<Resource> parent = resultNode.getParent();
-
-      if (parent.getParent() != null && parent.getParent().getObject().getType() == Resource.Type.HostComponent) {
-        Resource r = resultNode.getObject();
-        Schema schema = ClusterControllerHelper.getClusterController().getSchema(r.getType());
-        Object serviceId = r.getPropertyValue(schema.getKeyPropertyId(Resource.Type.Service));
-        Object componentId = r.getPropertyValue(schema.getKeyPropertyId(r.getType()));
-
-        href = href.substring(0, href.indexOf("/hosts/") + 1) +
-            "services/" + serviceId + "/components/" + componentId;
-
-        resultNode.setProperty("href", href);
-      } else {
-        super.process(request, resultNode, href);
-      }
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ConfigurationResourceDefinition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ConfigurationResourceDefinition.java
deleted file mode 100644
index 9d2495f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ConfigurationResourceDefinition.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.resources;
-
-import java.util.*;
-
-import org.apache.ambari.server.api.services.Request;
-import org.apache.ambari.server.api.util.TreeNode;
-import org.apache.ambari.server.controller.spi.Resource;
-
-/**
- * Configuration resource definition.
- */
-public class ConfigurationResourceDefinition extends BaseResourceDefinition {
-
-  /**
-   * Constructor.
-   */
-  public ConfigurationResourceDefinition() {
-    super(Resource.Type.Configuration);
-  }
-
-  @Override
-  public List<PostProcessor> getPostProcessors() {
-    List<PostProcessor> listProcessors = super.getPostProcessors();
-    listProcessors.add(new HrefProcessor());
-
-    return listProcessors;
-  }
-
-  @Override
-  public String getPluralName() {
-    return "configurations";
-  }
-
-  @Override
-  public String getSingularName() {
-    return "configuration";
-  }
-
-  private class HrefProcessor extends BaseHrefPostProcessor {
-
-    @Override
-    public void process(Request request, TreeNode<Resource> resultNode, String href) {
-      if (resultNode.getObject().getType() == Resource.Type.Configuration) {
-
-        if (! href.endsWith("/")) {
-          href += '/';
-        }
-
-        String clustersToken = "/clusters";
-        int idx = href.indexOf(clustersToken) + clustersToken.length() + 1;
-        idx = href.indexOf("/", idx) + 1;
-
-        String type = (String) resultNode.getObject().getPropertyValue("type");
-        String tag = (String) resultNode.getObject().getPropertyValue("tag");
-        href = href.substring(0, idx) + "configurations?type=" + type + "&tag=" + tag;
-
-        resultNode.setProperty("href", href);
-      } else {
-        super.process(request, resultNode, href);
-      }
-
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/DetachedHostResourceDefinition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/DetachedHostResourceDefinition.java
deleted file mode 100644
index 64801d7..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/DetachedHostResourceDefinition.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.api.resources;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-
-/**
- * Resource definition for /hosts resources.
- */
-public class DetachedHostResourceDefinition extends BaseResourceDefinition {
-
-  public DetachedHostResourceDefinition() {
-    super(Resource.Type.Host);
-  }
-
-  @Override
-  public String getPluralName() {
-    return "hosts";
-  }
-
-  @Override
-  public String getSingularName() {
-    return "host";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/HostComponentResourceDefinition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/HostComponentResourceDefinition.java
deleted file mode 100644
index a0e01f3..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/HostComponentResourceDefinition.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.resources;
-
-import org.apache.ambari.server.api.services.Request;
-import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.Schema;
-import org.apache.ambari.server.api.util.TreeNode;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.*;
-
-/**
- * Host_Component resource definition.
- */
-public class HostComponentResourceDefinition extends BaseResourceDefinition {
-
-  /**
-   * Constructor.
-   */
-  public HostComponentResourceDefinition() {
-    super(Resource.Type.HostComponent);
-  }
-
-  @Override
-  public String getPluralName() {
-    return "host_components";
-  }
-
-  @Override
-  public String getSingularName() {
-    return "host_component";
-  }
-
-
-  @Override
-  public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    Set<SubResourceDefinition> setSubResources = new HashSet<SubResourceDefinition>();
-    setSubResources.add(new SubResourceDefinition(Resource.Type.Component,
-        Collections.singleton(Resource.Type.Service), false));
-
-    return setSubResources;
-  }
-
-  @Override
-  public List<PostProcessor> getPostProcessors() {
-    List<PostProcessor> listProcessors = new ArrayList<PostProcessor>();
-    listProcessors.add(new HostComponentHrefProcessor());
-    listProcessors.add(new HostComponentHostProcessor());
-
-    return listProcessors;
-  }
-  /**
-   * Host_Component resource processor which is responsible for generating href's for host components.
-   * This is called by the ResultPostProcessor during post processing of a result.
-   */
-  private class HostComponentHrefProcessor extends BaseHrefPostProcessor {
-    @Override
-    public void process(Request request, TreeNode<Resource> resultNode, String href) {
-      if (! href.contains("/hosts/")) {
-        Resource r = resultNode.getObject();
-        Schema schema = ClusterControllerHelper.getClusterController().getSchema(r.getType());
-        Object host = r.getPropertyValue(schema.getKeyPropertyId(Resource.Type.Host));
-        Object hostComponent = r.getPropertyValue(schema.getKeyPropertyId(r.getType()));
-
-        int idx = href.indexOf("clusters/") + "clusters/".length();
-        idx = href.indexOf("/", idx) + 1;
-
-        href = href.substring(0, idx) +
-            "hosts/" + host + "/host_components/" + hostComponent;
-
-        resultNode.setProperty("href", href);
-      } else {
-        super.process(request, resultNode, href);
-      }
-    }
-  }
-
-  /**
-   * Host_Component resource processor which is responsible for generating a host section for host components.
-   * This is called by the ResultPostProcessor during post processing of a result.
-   */
-  private class HostComponentHostProcessor implements PostProcessor {
-    @Override
-    public void process(Request request, TreeNode<Resource> resultNode, String href) {
-      //todo: look at partial request fields to ensure that hosts should be returned
-      if (request.getResource().getResourceDefinition().getType() == getType()) {
-        // only add host if query host_resource was directly queried
-        String nodeHref = resultNode.getProperty("href");
-        resultNode.getObject().setProperty(PropertyHelper.getPropertyId("host", "href"),
-            nodeHref.substring(0, nodeHref.indexOf("/host_components/")));
-      }
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/HostResourceDefinition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/HostResourceDefinition.java
deleted file mode 100644
index 8aba287..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/HostResourceDefinition.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.resources;
-
-
-import java.util.Collections;
-import java.util.Set;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-/**
- * Host resource definition.
- */
-public class HostResourceDefinition extends BaseResourceDefinition {
-
-  /**
-   * Constructor.
-   */
-  public HostResourceDefinition() {
-    super(Resource.Type.Host);
-  }
-
-  @Override
-  public String getPluralName() {
-    return "hosts";
-  }
-
-  @Override
-  public String getSingularName() {
-    return "host";
-  }
-
-  @Override
-  public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    return Collections.singleton(new SubResourceDefinition(Resource.Type.HostComponent));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/RequestResourceDefinition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/RequestResourceDefinition.java
deleted file mode 100644
index a870c5e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/RequestResourceDefinition.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.resources;
-
-
-import org.apache.ambari.server.api.services.Request;
-import org.apache.ambari.server.api.util.TreeNode;
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-
-
-/**
- * Request resource definition.
- */
-public class RequestResourceDefinition extends BaseResourceDefinition {
-
-  /**
-   * Constructor.
-   */
-  public RequestResourceDefinition() {
-    super(Resource.Type.Request);
-  }
-
-  @Override
-  public String getPluralName() {
-    return "requests";
-  }
-
-  @Override
-  public String getSingularName() {
-    return "request";
-  }
-
-  @Override
-  public Set<SubResourceDefinition> getSubResourceDefinitions() {
-      return Collections.singleton(new SubResourceDefinition(Resource.Type.Task));
-  }
-
-  @Override
-  public List<PostProcessor> getPostProcessors() {
-    return Collections.<PostProcessor>singletonList(new RequestHrefPostProcessor());
-  }
-
-  private class RequestHrefPostProcessor implements PostProcessor {
-    @Override
-    public void process(Request request, TreeNode<Resource> resultNode, String href) {
-      StringBuilder sb = new StringBuilder();
-      String[] toks = href.split("/");
-
-      for (int i = 0; i < toks.length; ++i) {
-        String s = toks[i];
-        sb.append(s).append('/');
-        if ("clusters".equals(s)) {
-          sb.append(toks[i + 1]).append('/');
-          break;
-        }
-      }
-
-      Object requestId = resultNode.getObject().getPropertyValue(getClusterController().
-          getSchema(Resource.Type.Request).getKeyPropertyId(Resource.Type.Request));
-
-      sb.append("requests/").append(requestId);
-
-      resultNode.setProperty("href", sb.toString());
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceDefinition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceDefinition.java
deleted file mode 100644
index ba69869..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceDefinition.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.resources;
-
-import org.apache.ambari.server.api.services.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.api.util.TreeNode;
-
-import org.apache.ambari.server.api.services.ResultPostProcessor;
-
-import java.util.List;
-import java.util.Set;
-
-/**
- * Resource Definition.
- * Provides information specific to a specific resource type.
- */
-public interface ResourceDefinition {
-  /**
-   * Obtain the plural name of the resource.
-   *
-   * @return the plural name of the resource
-   */
-  public String getPluralName();
-
-  /**
-   * Obtain the singular name of the resource.
-   *
-   * @return the singular name of the resource
-   */
-  public String getSingularName();
-
-  /**
-   * Obtain the type of resource.  Is one of {@link Resource.Type}.
-   *
-   * @return the type of resource
-   */
-  public Resource.Type getType();
-
-  /**
-   * Obtain a set of all child resource types.
-   *
-   * @return set of sub-resource definitions
-   */
-  public Set<SubResourceDefinition> getSubResourceDefinitions();
-
-  /**
-   * Obtain any resource post processors.  A resource processor is used to provide resource specific processing of
-   * results and is called by the {@link ResultPostProcessor} while post processing a result.
-   *
-   * @return list of resource specific result processors
-   */
-  public List<PostProcessor> getPostProcessors();
-
-  /**
-   * Resource specific result processor.
-   * Used to provide resource specific processing of a result.
-   */
-  public interface PostProcessor {
-    public void process(Request request, TreeNode<Resource> resultNode, String href);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstance.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstance.java
deleted file mode 100644
index a3a8e3a..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstance.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.resources;
-
-import org.apache.ambari.server.api.query.Query;
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.util.Map;
-
-/**
- * Resource instance which contains request specific state.
- */
-public interface ResourceInstance {
-
-  /**
-   * Set the values of the parent foreign keys.
-   *
-   * @param mapIds  map of all parent foreign keys. Map from resource type to id value.
-   */
-  public void setIds(Map<Resource.Type, String> mapIds);
-
-  /**
-   * Obtain the primary and foreign key properties for the resource.
-   *
-   * @return map of primary and foreign key values keyed by resource type
-   */
-  public Map<Resource.Type, String> getIds();
-
-  /**
-   * Return the query associated with the resource.
-   * Each resource has one query.
-   *
-   * @return the associated query
-   */
-  public Query getQuery();
-
-  /**
-   * Return the resource definition for this resource type.
-   * All information in the definition is static and is specific to the resource type,
-   * not the resource instance.
-   *
-   * @return  the associated resource definition
-   */
-  public ResourceDefinition getResourceDefinition();
-
-  /**
-   * Return all sub-resource instances.
-   * This will include all children of this resource as well
-   * as any other resources referred to via a foreign key property.
-   *
-   * @return all sub-resource instances
-   */
-  public Map<String, ResourceInstance> getSubResources();
-
-  /**
-   * Determine if resource is a collection resource.
-   *
-   * @return true if the resource is a collection resource; false otherwise
-   */
-  public boolean isCollectionResource();
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactory.java
deleted file mode 100644
index 23c581d..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactory.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.api.resources;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.util.Map;
-
-/**
- * Factory for creating resource instances.
- */
-public interface ResourceInstanceFactory {
-  /**
-   * Create a resource instance.
-   *
-   * @param type    the type of resource to create
-   * @param mapIds  the resource id's which identify the resource
-   *
-   * @return  a new resource instance of the specified type
-   */
-  public ResourceInstance createResource(Resource.Type type, Map<Resource.Type, String> mapIds);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactoryImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactoryImpl.java
deleted file mode 100644
index ef14006..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceFactoryImpl.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.api.resources;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.util.Map;
-
-/**
- * Factory for creating resource instances.
- */
-public class ResourceInstanceFactoryImpl implements ResourceInstanceFactory {
-
-  @Override
-  public ResourceInstance createResource(Resource.Type type, Map<Resource.Type, String> mapIds) {
-
-    /**
-     * The resource definition for the specified type.
-     */
-    ResourceDefinition resourceDefinition;
-
-    //todo: consider ResourceDependencyManager : Map<Resource.Type, ResourceDefinition>
-    switch (type) {
-      case Cluster:
-        resourceDefinition = new ClusterResourceDefinition();
-        break;
-
-      case Service:
-        resourceDefinition = new ServiceResourceDefinition();
-        break;
-
-      case Host:
-        resourceDefinition = mapIds.containsKey(Resource.Type.Cluster) ?
-            new HostResourceDefinition() : new DetachedHostResourceDefinition();
-        break;
-
-      case Component:
-        resourceDefinition = new  ComponentResourceDefinition();
-        break;
-
-      case HostComponent:
-        resourceDefinition = new HostComponentResourceDefinition();
-        break;
-
-      case Action:
-        resourceDefinition = new ActionResourceDefinition();
-        break;
-
-      case Configuration:
-        resourceDefinition = new ConfigurationResourceDefinition();
-        break;
-
-      case Task:
-        resourceDefinition = new TaskResourceDefinition();
-        break;
-
-      case User:
-        resourceDefinition = new UserResourceDefinition();
-        break;
-
-      case Request:
-        resourceDefinition = new RequestResourceDefinition();
-        break;
-
-      default:
-        throw new IllegalArgumentException("Unsupported resource type: " + type);
-    }
-
-    return new ResourceInstanceImpl(mapIds, resourceDefinition, this);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceImpl.java
deleted file mode 100644
index f75d5fd..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ResourceInstanceImpl.java
+++ /dev/null
@@ -1,156 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.api.resources;
-
-import org.apache.ambari.server.api.query.Query;
-import org.apache.ambari.server.api.query.QueryImpl;
-import org.apache.ambari.server.controller.spi.ClusterController;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Resource instance which contains request specific state.
- */
-public class ResourceInstanceImpl implements ResourceInstance {
-
-  /**
-   * Query associated with the resource definition.
-   */
-  private Query m_query;
-
-  /**
-   * Map of primary and foreign keys and values necessary to identify the resource.
-   */
-  private Map<Resource.Type, String> m_mapResourceIds = new HashMap<Resource.Type, String>();
-
-  /**
-   * Definition for the resource type.  The definition contains all information specific to the
-   * resource type.
-   */
-  private ResourceDefinition m_resourceDefinition;
-
-  /**
-   * Sub-resource instances of this resource.
-   * Map of resource resource name to resource instance.
-   */
-  private Map<String, ResourceInstance> m_mapSubResources;
-
-  /**
-   * Factory for creating resource instances.
-   * Used to create sub-resource instances.
-   */
-  private ResourceInstanceFactory m_resourceFactory;
-
-  /**
-   * Cluster controller reference.
-   */
-  //todo: should be injected.
-  private ClusterController m_controller = ClusterControllerHelper.getClusterController();
-
-
-  public ResourceInstanceImpl(Map<Resource.Type, String> mapIds, ResourceDefinition resourceDefinition,
-                              ResourceInstanceFactory resourceFactory) {
-
-    m_resourceDefinition = resourceDefinition;
-    m_query              = new QueryImpl(this);
-    m_resourceFactory    = resourceFactory;
-
-    setIds(mapIds);
-  }
-
-  @Override
-  public void setIds(Map<Resource.Type, String> mapIds) {
-    m_mapResourceIds.putAll(mapIds);
-  }
-
-  @Override
-  public Map<Resource.Type, String> getIds() {
-    return new HashMap<Resource.Type, String>((m_mapResourceIds));
-  }
-
-  @Override
-  public Query getQuery() {
-    return m_query;
-  }
-
-  @Override
-  public ResourceDefinition getResourceDefinition() {
-    return m_resourceDefinition;
-  }
-
-
-  @Override
-  public Map<String, ResourceInstance> getSubResources() {
-    if (m_mapSubResources == null) {
-      m_mapSubResources = new HashMap<String, ResourceInstance>();
-      Set<SubResourceDefinition> setSubResourceDefs = getResourceDefinition().getSubResourceDefinitions();
-
-      for (SubResourceDefinition subResDef : setSubResourceDefs) {
-        ResourceInstance resource = m_resourceFactory.createResource(subResDef.getType(), getIds());
-
-        // ensure pk is returned
-        resource.getQuery().addLocalProperty(m_controller.getSchema(
-            subResDef.getType()).getKeyPropertyId(subResDef.getType()));
-        // add additionally required fk properties
-        for (Resource.Type fkType : subResDef.getAdditionalForeignKeys()) {
-          resource.getQuery().addLocalProperty(m_controller.getSchema(subResDef.getType()).getKeyPropertyId(fkType));
-        }
-
-        String subResourceName = subResDef.isCollection() ? resource.getResourceDefinition().getPluralName() :
-            resource.getResourceDefinition().getSingularName();
-
-        m_mapSubResources.put(subResourceName, resource);
-      }
-    }
-    return m_mapSubResources;
-  }
-
-  @Override
-  public boolean isCollectionResource() {
-    return getIds().get(getResourceDefinition().getType()) == null;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ResourceInstanceImpl that = (ResourceInstanceImpl) o;
-
-    return m_mapResourceIds.equals(that.m_mapResourceIds) &&
-           m_query == that.m_query &&
-           m_resourceDefinition.equals(that.m_resourceDefinition) &&
-           m_mapSubResources == null ? that.m_mapSubResources == null :
-               m_mapSubResources.equals(that.m_mapSubResources);
-  }
-
-  @Override
-  public int hashCode() {
-    int result = 13;
-    result = 31 * result + m_mapResourceIds.hashCode();
-    result = 31 * result + m_resourceDefinition.hashCode();
-    result = 31 * result + (m_mapSubResources != null ? m_mapSubResources.hashCode() : 0);
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ServiceResourceDefinition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ServiceResourceDefinition.java
deleted file mode 100644
index e1355ad..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ServiceResourceDefinition.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.resources;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.util.*;
-
-/**
- * Service resource definition.
- */
-public class ServiceResourceDefinition extends BaseResourceDefinition {
-
-  /**
-   * Constructor.
-   *
-   */
-  public ServiceResourceDefinition() {
-    super(Resource.Type.Service);
-  }
-
-  @Override
-  public String getPluralName() {
-    return "services";
-  }
-
-  @Override
-  public String getSingularName() {
-    return "service";
-  }
-
-  @Override
-  public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    return Collections.singleton(new SubResourceDefinition(Resource.Type.Component));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/SubResourceDefinition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/SubResourceDefinition.java
deleted file mode 100644
index 6f01772..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/SubResourceDefinition.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.api.resources;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.util.Collections;
-import java.util.Set;
-
-/**
- * Definition of a sub-resource.
- * Defines a resource instance that is added to another resource as a sub-resource.
- */
-public class SubResourceDefinition {
-
-  /**
-   * Resource type.
-   */
-  private Resource.Type m_type;
-
-  /**
-   * Additional foreign key properties to include by default in the sub-resource.
-   */
-  private Set<Resource.Type> m_setForeignKeys;
-
-  /**
-   * Whether the sub-resource is a collection or a single instance.
-   */
-  private boolean m_isCollection = true;
-
-
-  /**
-   * Constructor.
-   * Simple constructor which uses default state for everything except resource type.
-   *
-   * @param type  resource type
-   */
-  public SubResourceDefinition(Resource.Type type) {
-    m_type = type;
-  }
-
-  /**
-   * Constructor.
-   * This constructor allows all state to be set.
-   *
-   * @param type            resource type
-   * @param setForeignKeys  set of additional foreign keys to include in resource by default
-   * @param isCollection    whether the sub-resource is a collection
-   */
-  public SubResourceDefinition(Resource.Type type, Set<Resource.Type> setForeignKeys, boolean isCollection) {
-    m_type = type;
-    m_setForeignKeys = setForeignKeys;
-    m_isCollection = isCollection;
-  }
-
-  /**
-   * Obtain the sub-resource type.
-   *
-   * @return  the sub-resource type
-   */
-  public Resource.Type getType() {
-    return m_type;
-  }
-
-  /**
-   * Get the set of additional foreign key properties that are included in the resource by default.
-   *
-   * @return  set of additional foreign key properties
-   */
-  public Set<Resource.Type> getAdditionalForeignKeys() {
-    return m_setForeignKeys == null ? Collections.<Resource.Type>emptySet() : m_setForeignKeys;
-  }
-
-  /**
-   * Whether the sub-resource is a collection.
-   *
-   * @return  true if a collection, false if an instance
-   */
-  public boolean isCollection() {
-    return m_isCollection;
-  }
-}
-
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/TaskResourceDefinition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/TaskResourceDefinition.java
deleted file mode 100644
index 45adf2b..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/TaskResourceDefinition.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.resources;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-
-/**
- * Task resource definition.
- */
-public class TaskResourceDefinition extends BaseResourceDefinition {
-
-  /**
-   * Constructor.
-   */
-  public TaskResourceDefinition() {
-    super(Resource.Type.Task);
-  }
-
-  @Override
-  public String getPluralName() {
-    return "tasks";
-  }
-
-  @Override
-  public String getSingularName() {
-    return "task";
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/UserResourceDefinition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/UserResourceDefinition.java
deleted file mode 100644
index 194284a..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/resources/UserResourceDefinition.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.api.resources;
-
-import java.util.Collections;
-import java.util.Set;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-/**
- * User Resource Definition
- */
-public class UserResourceDefinition extends BaseResourceDefinition {
-
-  public UserResourceDefinition() {
-    super(Resource.Type.User);
-  }
-  
-  @Override
-  public String getPluralName() {
-    return "users";
-  }
-
-  @Override
-  public String getSingularName() {
-    return "user";
-  }
-
-  @Override
-  public Set<SubResourceDefinition> getSubResourceDefinitions() {
-    return Collections.emptySet();
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/rest/BootStrapResource.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/rest/BootStrapResource.java
deleted file mode 100644
index 5672598..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/rest/BootStrapResource.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.rest;
-
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.ambari.server.bootstrap.BSHostStatus;
-import org.apache.ambari.server.bootstrap.BSResponse;
-import org.apache.ambari.server.bootstrap.BootStrapImpl;
-import org.apache.ambari.server.bootstrap.BootStrapStatus;
-import org.apache.ambari.server.bootstrap.SshHostInfo;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import com.google.inject.Inject;
-
-@Path("/bootstrap")
-public class BootStrapResource {
-
-  private static BootStrapImpl bsImpl;
-  private static Log LOG = LogFactory.getLog(BootStrapResource.class);
-
-  @Inject
-  public static void init(BootStrapImpl instance) {
-    bsImpl = instance;
-  }
-  /**
-   * Run bootstrap on a list of hosts.
-   * @response.representation.200.doc
-   *
-   * @response.representation.200.mediaType application/json
-   * @response.representation.406.doc Error in format
-   * @response.representation.408.doc Request Timed out
-   * @throws Exception
-   */
-  @POST
-  @Consumes(MediaType.APPLICATION_JSON)
-  @Produces({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML})
-  public BSResponse bootStrap(SshHostInfo sshInfo, @Context UriInfo uriInfo) {
-    
-    normalizeHosts(sshInfo);
-
-    BSResponse resp = bsImpl.runBootStrap(sshInfo);
-
-    return resp;
-  }
-
-  /**
-   * Current BootStrap Information thats running.
-   * @response.representation.200.doc
-   *
-   * @response.representation.200.mediaType application/json
-   * @response.representation.406.doc Error in format
-   * @response.representation.408.doc Request Timed out
-   * @throws Exception
-   */
-  @GET
-  @Path("/{requestId}")
-  @Produces({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML})
-  public BootStrapStatus getBootStrapStatus(@PathParam("requestId")
-    long requestId, @Context UriInfo info) {
-    BootStrapStatus status = bsImpl.getStatus(requestId);
-    if (status == null)
-      throw new WebApplicationException(Response.Status.NO_CONTENT);
-    return status;
-  }
-
-
-  /**
-   * Gets a list of bootstrapped hosts.
-   *
-   * @param info  the host info, with no SSL key information
-   */
-  @GET
-  @Path("/hosts")
-  @Produces(MediaType.APPLICATION_JSON)
-  public List<BSHostStatus> getBootStrapHosts(@Context UriInfo uriInfo) {
-    List<BSHostStatus> allStatus = bsImpl.getHostInfo(null);
-
-    if (0 == allStatus.size())
-      throw new WebApplicationException(Response.Status.NO_CONTENT);
-
-    return allStatus;
-  }
-  /**
-   * Gets a list of bootstrapped hosts.
-   *
-   * @param info  the host info, with no SSL key information required
-   */
-  @POST
-  @Path("/hosts")
-  @Produces(MediaType.APPLICATION_JSON)
-  public List<BSHostStatus> getBootStrapHosts(SshHostInfo info, @Context UriInfo uriInfo) {
-
-    List<BSHostStatus> allStatus = bsImpl.getHostInfo(info.getHosts());
-
-    if (0 == allStatus.size())
-      throw new WebApplicationException(Response.Status.NO_CONTENT);
-
-    return allStatus;
-  }
-  
-  
-  private void normalizeHosts(SshHostInfo info) {
-    List<String> validHosts = new ArrayList<String>();
-    List<String> newHosts = new ArrayList<String>();
-    
-    for (String host: info.getHosts()) {
-      try {
-        InetAddress addr = InetAddress.getByName(host);
-        
-        if (!validHosts.contains(addr.getHostAddress())) {
-          validHosts.add(addr.getHostAddress());
-          newHosts.add(host);
-        } else {
-          LOG.warn("Host " + host + " has already been targeted to be bootstrapped.");
-        }
-      } catch (UnknownHostException e) {
-        LOG.warn("Host " + host + " cannot be determined.");
-      }
-    }
-    
-    info.setHosts(newHosts);
-  }
-  
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/rest/HealthCheck.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/rest/HealthCheck.java
deleted file mode 100644
index 2f31611..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/rest/HealthCheck.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.rest;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.MediaType;
-
-/**
- * A simple POJO to do a health check on the server to see if its running
- * or not
- */
-
-@Path("/check")
-public class HealthCheck {
-  private static final String status = "RUNNING";
-  // This method is called if TEXT_PLAIN is request
-
-  @GET
-  @Produces(MediaType.TEXT_PLAIN)
-  public String plainTextCheck() {
-    return status;
-  }
-
-  // This method is called if XML is request
-  @GET
-  @Produces(MediaType.TEXT_XML)
-  public String xmlCheck() {
-    return "<?xml version=\"1.0\"?>" + "<status> " + status + "</status>";
-  }
-
-  // This method is called if HTML is request
-  @GET
-  @Produces(MediaType.TEXT_HTML)
-  public String  htmlCheck() {
-    return "<html> " + "<title>" + "Status" + "</title>"
-        + "<body><h1>" + status + "</body></h1>" + "</html> ";
-  }
-}
-
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActionService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActionService.java
deleted file mode 100644
index 8b8d0cc..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ActionService.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.api.services;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.util.HashMap;
-import java.util.Map;
-
-public class ActionService extends BaseService {
-  /**
-   * Parent cluster name.
-   */
-  private String m_clusterName;
-  
-  private String m_serviceName;
-
-  /**
-   * Constructor.
-   *
-   * @param clusterName cluster id
-   * @param serviceName service
-   */
-  public ActionService(String clusterName, String serviceName) {
-    m_clusterName = clusterName;
-    m_serviceName = serviceName;
-  }
-
-  /**
-   * Handles URL: /clusters/{clusterId}/services/{serviceName}/actions
-   * Get all actions for a service in a cluster.
-   *
-   * @param headers http headers
-   * @param ui      uri info
-   * @return service collection resource representation
-   */
-  @GET
-  @Produces("text/plain")
-  public Response getActions(@Context HttpHeaders headers, @Context UriInfo ui) {
-    return handleRequest(headers, null, ui, Request.Type.GET,
-        createActionResource(m_clusterName, m_serviceName, null));
-  }
-
-  /**
-   * Handles URL: /clusters/{clusterId}/services/{serviceName}/actions.  
-   * The body should contain:
-   * <pre>
-   * {
-   *     "actionName":"name_string",
-   *     "parameters":
-   *     {
-   *         "key1":"value1",
-   *         // ...
-   *         "keyN":"valueN"
-   *     }
-   * }
-   * </pre>
-   * Get all services for a cluster.
-   *
-   * @param headers http headers
-   * @param ui      uri info
-   * @return service collection resource representation
-   */
-  @POST
-  @Produces("text/plain")
-  public Response createActions(String body,@Context HttpHeaders headers, @Context UriInfo ui) {
-    return handleRequest(headers, body, ui, Request.Type.POST,
-        createActionResource(m_clusterName, m_serviceName, null));
-  }
-  
-  /**
-   * Handles: POST /clusters/{clusterId}/services/{serviceId}/{actionName}
-   * Create a specific action.
-   *
-   * @param body        http body
-   * @param headers     http headers
-   * @param ui          uri info
-   * @param actionName  action name
-   *
-   * @return information regarding the created action
-   */
-  @POST
-  @Path("{actionName}")
-  @Produces("text/plain")
-  public Response createAction(String body, @Context HttpHeaders headers, @Context UriInfo ui,
-                               @PathParam("actionName") String actionName) {
-    return handleRequest(headers, body, ui, Request.Type.POST,
-        createActionResource(m_clusterName, m_serviceName, actionName));
-  }
-
-  /**
-   * Create an action resource instance.
-   *
-   * @param clusterName cluster name
-   * @param serviceName service name
-   * @param actionName  action name
-   *
-   * @return an action resource instance
-   */
-  ResourceInstance createActionResource(String clusterName, String serviceName, String actionName) {
-    Map<Resource.Type,String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.Cluster, clusterName);
-    mapIds.put(Resource.Type.Service, serviceName);
-    mapIds.put(Resource.Type.Action, actionName);
-
-    return createResource(Resource.Type.Action, mapIds);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
deleted file mode 100644
index 3bb6b2c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ /dev/null
@@ -1,656 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.state.*;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.w3c.dom.*;
-import org.xml.sax.SAXException;
-
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-import java.io.File;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * ServiceInfo responsible getting information about cluster.
- */
-@Singleton
-public class AmbariMetaInfo {
-
-    private List<StackInfo> stacksResult = new ArrayList<StackInfo>();
-    private File stackRoot;
-    private final static Logger LOG = LoggerFactory
-            .getLogger(AmbariMetaInfo.class);
-
-    private static final String SERVICES_FOLDER_NAME = "services";
-    private static final String SERVICE_METAINFO_FILE_NAME = "metainfo.xml";
-    private static final String SERVICE_CONFIG_FOLDER_NAME = "configuration";
-    private static final String SERVICE_CONFIG_FILE_NAME_POSTFIX = "-site.xml";
-
-    private static final String REPOSITORY_FILE_NAME = "repoinfo.xml";
-    private static final String REPOSITORY_FOLDER_NAME = "repos";
-    private static final String REPOSITORY_XML_MAIN_BLOCK_NAME = "os";
-    private static final String REPOSITORY_XML_ATTRIBUTE_OS_TYPE = "type";
-    private static final String REPOSITORY_XML_REPO_BLOCK_NAME = "repo";
-    private static final String REPOSITORY_XML_PROPERTY_BASEURL = "baseurl";
-    private static final String REPOSITORY_XML_PROPERTY_REPOID = "repoid";
-    private static final String REPOSITORY_XML_PROPERTY_REPONAME = "reponame";
-    private static final String REPOSITORY_XML_PROPERTY_MIRRORSLIST = "mirrorslist";
-
-    private static final String METAINFO_XML_MAIN_BLOCK_NAME = "metainfo";
-    private static final String METAINFO_XML_PROPERTY_VERSION = "version";
-    private static final String METAINFO_XML_PROPERTY_USER = "user";
-    private static final String METAINFO_XML_PROPERTY_COMMENT = "comment";
-    private static final String METAINFO_XML_PROPERTY_COMPONENT_MAIN = "component";
-    private static final String METAINFO_XML_PROPERTY_COMPONENT_NAME = "name";
-    private static final String METAINFO_XML_PROPERTY_COMPONENT_CATEGORY = "category";
-
-    private static final String PROPERTY_XML_MAIN_BLOCK_NAME = "property";
-    private static final String PROPERTY_XML_PROPERTY_NAME = "name";
-    private static final String PROPERTY_XML_PROPERTY_VALUE = "value";
-    private static final String PROPERTY_XML_PROPERTY_DESCRIPTION = "description";
-    private static final FilenameFilter FILENAME_FILTER = new FilenameFilter() {
-      @Override
-      public boolean accept(File dir, String s) {
-        if (s.equals(".svn") || s.equals(".git"))
-          return false;
-        return true;
-      }
-    };
-
-    /**
-     * Ambari Meta Info Object
-     *
-     * @param conf Configuration API to be used.
-     * @throws Exception
-     */
-    @Inject
-    public AmbariMetaInfo(Configuration conf) throws Exception {
-        String stackPath = conf.getMetadataPath();
-        this.stackRoot = new File(stackPath);
-    }
-
-    @Inject
-    public AmbariMetaInfo(File stackRoot) throws Exception {
-        this.stackRoot = stackRoot;
-    }
-
-
-    /**
-     * Initialize the Ambari Meta Info
-     *
-     * @throws Exception throws exception if not able to parse the Meta data.
-     */
-    public void init() throws Exception {
-        getConfigurationInformation(stackRoot);
-    }
-
-
-    /**
-     * Get component category
-     *
-     * @param stackName
-     * @param version
-     * @param serviceName
-     * @param componentName
-     * @return component component Info
-     */
-    public ComponentInfo getComponentCategory(String stackName, String version,
-                                              String serviceName, String componentName) {
-        ComponentInfo component = null;
-        List<ComponentInfo> components = getComponentsByService(stackName, version,
-                serviceName);
-        if (components != null)
-            for (ComponentInfo cmp : components) {
-                if (cmp.getName().equals(componentName)) {
-                    component = cmp;
-                    break;
-                }
-            }
-        return component;
-    }
-
-
-    /**
-     * Get components by service
-     *
-     * @param stackName
-     * @param version
-     * @param serviceName
-     * @return
-     */
-    public List<ComponentInfo> getComponentsByService(String stackName,
-                                                      String version, String serviceName) {
-        List<ComponentInfo> componentsResult = null;
-        ServiceInfo service = getServiceInfo(stackName, version, serviceName);
-        if (service != null)
-            componentsResult = service.getComponents();
-
-        return componentsResult;
-    }
-
-    public Map<String, List<RepositoryInfo>> getRepository(String stackName,
-                                                           String version) {
-        Map<String, List<RepositoryInfo>> reposResult = null;
-        StackInfo stack = getStackInfo(stackName, version);
-        if (stack != null) {
-            List<RepositoryInfo> repository = stack.getRepositories();
-            reposResult = new HashMap<String, List<RepositoryInfo>>();
-            for (RepositoryInfo repo : repository) {
-                if (!reposResult.containsKey(repo.getOsType())) {
-                    reposResult.put(repo.getOsType(),
-                            new ArrayList<RepositoryInfo>());
-                }
-                reposResult.get(repo.getOsType()).add(repo);
-            }
-        }
-        return reposResult;
-    }
-
-    /*
-     * function for given a stack name and version, is it a supported stack
-     */
-    public boolean isSupportedStack(String stackName, String version) {
-        boolean exist = false;
-        StackInfo stack = getStackInfo(stackName, version);
-        if (stack == null)
-            exist = true;
-        return exist;
-    }
-
-    /*
-     * support isValidService(), isValidComponent for a given stack/version
-     */
-    public boolean isValidService(String stackName, String version,
-                                  String serviceName) {
-        ServiceInfo service = getServiceInfo(stackName, version, serviceName);
-        return (service != null);
-    }
-
-    /*
-     * support isValidService(), isValidComponent for a given stack/version
-     */
-    public boolean isValidServiceComponent(String stackName, String version,
-                                           String serviceName, String componentName) {
-        ServiceInfo service = getServiceInfo(stackName, version, serviceName);
-        if (service == null) {
-            return false;
-        }
-        for (ComponentInfo compInfo : service.getComponents()) {
-            if (compInfo.getName().equals(componentName)) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-
-    /**
-     * Get the name of a service given the component name.
-     *
-     * @param stackName     the stack name
-     * @param version       the stack version
-     * @param componentName the component name
-     * @return the service name
-     */
-    public String getComponentToService(String stackName, String version,
-                                        String componentName) {
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Looking for service for component"
-                    + ", stackName=" + stackName
-                    + ", stackVersion=" + version
-                    + ", componentName=" + componentName);
-        }
-        Map<String, ServiceInfo> services = getServices(stackName, version);
-        String retService = null;
-        if (services == null
-                || services.isEmpty()) {
-            return retService;
-        }
-        boolean found = false;
-        for (Map.Entry<String, ServiceInfo> entry : services.entrySet()) {
-            for (ComponentInfo compInfo : entry.getValue().getComponents()) {
-                if (compInfo.getName().equals(componentName)) {
-                    retService = entry.getKey();
-                    found = true;
-                    break;
-                }
-            }
-            if (found)
-                break;
-        }
-        return retService;
-    }
-
-    /**
-     * Get the service configs supported for a service in a particular stack
-     *
-     * @param stackName   the stack name
-     * @param version     the version of the stack
-     * @param serviceName the name of the service in the stack
-     * @return the config knobs supported for the service
-     */
-    public Map<String, Map<String, String>> getSupportedConfigs(String stackName,
-                                                                String version, String serviceName) {
-        Map<String, Map<String, String>> propertiesResult = new HashMap<String, Map<String, String>>();
-
-        ServiceInfo service = getServiceInfo(stackName, version, serviceName);
-        if (service != null)
-            if (serviceName.equals(service.getName())) {
-                List<PropertyInfo> properties = service.getProperties();
-                if (properties != null)
-                    for (PropertyInfo propertyInfo : properties) {
-                        Map<String, String> fileProperties = propertiesResult
-                                .get(propertyInfo.getFilename());
-                        if (fileProperties == null) {
-                            fileProperties = new HashMap<String, String>();
-                            fileProperties.put(propertyInfo.getName(),
-                                    propertyInfo.getValue());
-                            propertiesResult.put(propertyInfo.getFilename(), fileProperties);
-
-                        } else {
-                            fileProperties.put(propertyInfo.getName(),
-                                    propertyInfo.getValue());
-                        }
-
-                    }
-            }
-
-        return propertiesResult;
-    }
-
-    /**
-     * Given a stack name and version return all the services with info
-     *
-     * @param stackName the stack name
-     * @param version   the version of the stack
-     * @return the information of abt varios services that are supported in the
-     *         stack
-     */
-    public Map<String, ServiceInfo> getServices(String stackName, String version) {
-
-        Map<String, ServiceInfo> servicesInfoResult = new HashMap<String, ServiceInfo>();
-
-        List<ServiceInfo> services = null;
-        StackInfo stack = getStackInfo(stackName, version);
-        if (stack == null)
-            return null;
-        services = stack.getServices();
-        if (services != null)
-            for (ServiceInfo service : services) {
-                servicesInfoResult.put(service.getName(), service);
-            }
-        return servicesInfoResult;
-    }
-
-    public ServiceInfo getServiceInfo(String stackName, String version,
-                                      String serviceName) {
-        ServiceInfo serviceInfoResult = null;
-        List<ServiceInfo> services = null;
-        StackInfo stack = getStackInfo(stackName, version);
-        if (stack == null)
-            return null;
-        services = stack.getServices();
-        if (services != null)
-            for (ServiceInfo service : services) {
-                if (serviceName.equals(service.getName())) {
-                    serviceInfoResult = service;
-                    break;
-                }
-            }
-        return serviceInfoResult;
-    }
-
-    public List<ServiceInfo> getSupportedServices(String stackName, String version) {
-        List<ServiceInfo> servicesResulr = null;
-        StackInfo stack = getStackInfo(stackName, version);
-        if (stack != null)
-            servicesResulr = stack.getServices();
-        return servicesResulr;
-    }
-
-    public List<StackInfo> getSupportedStacks() {
-        return stacksResult;
-    }
-
-    public StackInfo getStackInfo(String stackName, String version) {
-        StackInfo stackInfoResult = null;
-
-        for (StackInfo stack : stacksResult) {
-            if (stackName.equals(stack.getName())
-                    && version.equals(stack.getVersion())) {
-                stackInfoResult = stack;
-                break;
-            }
-        }
-        return stackInfoResult;
-    }
-
-
-    private void getConfigurationInformation(File stackRoot) throws Exception {
-
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Loading stack information"
-                    + ", stackRoot=" + stackRoot.getAbsolutePath());
-        }
-
-        if (!stackRoot.isDirectory() && !stackRoot.exists())
-            throw new IOException("" + Configuration.METADETA_DIR_PATH
-                    + " should be a directory with stack"
-                    + ", stackRoot=" + stackRoot.getAbsolutePath());
-        File[] stacks = stackRoot.listFiles(FILENAME_FILTER);
-        for (File stackFolder : stacks) {
-            if (stackFolder.isFile())
-                continue;
-            File[] concretStacks = stackFolder.listFiles(FILENAME_FILTER);
-            for (File stack : concretStacks) {
-                if (stack.isFile())
-                    continue;
-                StackInfo stackInfo = new StackInfo();
-                stackInfo.setName(stackFolder.getName());
-                stackInfo.setVersion(stack.getName());
-
-                if (LOG.isDebugEnabled()) {
-                    LOG.debug("Adding new stack to known stacks"
-                            + ", stackName=" + stackFolder.getName()
-                            + ", stackVersion=" + stack.getName());
-                }
-
-                stacksResult.add(stackInfo);
-                // get repository data for current stack of techs
-                File repositoryFolder = new File(stack.getAbsolutePath()
-                        + File.separator + REPOSITORY_FOLDER_NAME + File.separator
-                        + REPOSITORY_FILE_NAME);
-
-                if (repositoryFolder.exists()) {
-                    if (LOG.isDebugEnabled()) {
-                        LOG.debug("Adding repositories to stack"
-                                + ", stackName=" + stackFolder.getName()
-                                + ", stackVersion=" + stack.getName()
-                                + ", repoFolder=" + repositoryFolder.getPath());
-                    }
-                    List<RepositoryInfo> repositoryInfoList = getRepository(repositoryFolder);
-                    stackInfo.getRepositories().addAll(repositoryInfoList);
-                }
-
-                // Get services for this stack
-                File servicesRootFolder = new File(stack.getAbsolutePath()
-                        + File.separator + SERVICES_FOLDER_NAME);
-                File[] servicesFolders = servicesRootFolder.listFiles(FILENAME_FILTER);
-
-                if (servicesFolders != null) {
-                    for (File serviceFolder : servicesFolders) {
-                        // Get information about service
-                        ServiceInfo serviceInfo = new ServiceInfo();
-                        serviceInfo.setName(serviceFolder.getName());
-                        stackInfo.getServices().add(serviceInfo);
-
-                        if (LOG.isDebugEnabled()) {
-                            LOG.debug("Adding new service to stack"
-                                    + ", stackName=" + stackFolder.getName()
-                                    + ", stackVersion=" + stack.getName()
-                                    + ", serviceName=" + serviceInfo.getName());
-                        }
-
-                        // Get metainfo data from metainfo.xml
-                        File metainfoFile = new File(serviceFolder.getAbsolutePath()
-                                + File.separator + SERVICE_METAINFO_FILE_NAME);
-                        if (metainfoFile.exists()) {
-                            setMetaInfo(metainfoFile, serviceInfo);
-                        }
-
-                        // Get all properties from all "configs/*-site.xml" files
-                        File serviceConfigFolder = new File(serviceFolder.getAbsolutePath()
-                                + File.separator + SERVICE_CONFIG_FOLDER_NAME);
-                        File[] configFiles = serviceConfigFolder.listFiles(FILENAME_FILTER);
-                        if (configFiles != null) {
-                            for (File config : configFiles) {
-                                if (config.getName().endsWith(SERVICE_CONFIG_FILE_NAME_POSTFIX)) {
-                                    serviceInfo.getProperties().addAll(getProperties(config));
-                                }
-                            }
-                        }
-                    }
-                }
-            }
-        }
-    }
-
-    private List<RepositoryInfo> getRepository(File repositoryFile) throws ParserConfigurationException, IOException, SAXException {
-
-        List<RepositoryInfo> repositorysInfo = new ArrayList<RepositoryInfo>();
-//    try {
-
-        DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance();
-        DocumentBuilder dBuilder = dbFactory.newDocumentBuilder();
-        Document doc = dBuilder.parse(repositoryFile);
-
-        NodeList osNodes = doc
-                .getElementsByTagName(REPOSITORY_XML_MAIN_BLOCK_NAME);
-
-        for (int index = 0; index < osNodes.getLength(); index++) {
-            Node osNode = osNodes.item(index);
-
-            if (osNode.getNodeType() == Node.ELEMENT_NODE) {
-                if (!osNode.getNodeName().equals(REPOSITORY_XML_MAIN_BLOCK_NAME)) {
-                    continue;
-                }
-                NamedNodeMap attrs = osNode.getAttributes();
-                Node osAttr = attrs.getNamedItem(REPOSITORY_XML_ATTRIBUTE_OS_TYPE);
-                if (osAttr == null) {
-                    continue;
-                }
-                String osType = osAttr.getNodeValue();
-
-                NodeList repoNodes = osNode.getChildNodes();
-                for (int j = 0; j < repoNodes.getLength(); j++) {
-                    Node repoNode = repoNodes.item(j);
-                    if (repoNode.getNodeType() != Node.ELEMENT_NODE
-                            || !repoNode.getNodeName().equals(
-                            REPOSITORY_XML_REPO_BLOCK_NAME)) {
-                        continue;
-                    }
-                    Element property = (Element) repoNode;
-                    String repoId = getTagValue(REPOSITORY_XML_PROPERTY_REPOID,
-                            property);
-                    String repoName = getTagValue(REPOSITORY_XML_PROPERTY_REPONAME,
-                            property);
-                    String baseUrl = getTagValue(
-                            REPOSITORY_XML_PROPERTY_BASEURL, property);
-                    String mirrorsList = getTagValue(
-                            REPOSITORY_XML_PROPERTY_MIRRORSLIST, property);
-
-                    String[] osTypes = osType.split(",");
-
-                    for (String os : osTypes) {
-                        RepositoryInfo repositoryInfo = new RepositoryInfo();
-                        repositoryInfo.setOsType(os.trim());
-                        repositoryInfo.setRepoId(repoId);
-                        repositoryInfo.setRepoName(repoName);
-                        repositoryInfo.setBaseUrl(baseUrl);
-                        repositoryInfo.setMirrorsList(mirrorsList);
-
-                        if (LOG.isDebugEnabled()) {
-                            LOG.debug("Adding repo to stack"
-                                    + ", repoInfo=" + repositoryInfo.toString());
-                        }
-                        repositorysInfo.add(repositoryInfo);
-                    }
-                }
-            }
-        }
-//    } catch (Exception e) {
-//      e.printStackTrace();
-//    }
-
-        return repositorysInfo;
-    }
-
-    private void setMetaInfo(File metainfoFile, ServiceInfo serviceInfo) {
-
-        DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance();
-
-        Document doc = null;
-        DocumentBuilder dBuilder = null;
-        try {
-            dBuilder = dbFactory.newDocumentBuilder();
-            doc = dBuilder.parse(metainfoFile);
-        } catch (SAXException e) {
-            LOG.error("Error while parsing metainf.xml", e);
-        } catch (IOException e) {
-            LOG.error("Error while open metainf.xml", e);
-        } catch (ParserConfigurationException e) {
-            LOG.error("Error while parsing metainf.xml", e);
-        }
-
-        if(doc==null) return;
-
-        doc.getDocumentElement().normalize();
-
-        NodeList metaInfoNodes = doc
-                .getElementsByTagName(METAINFO_XML_MAIN_BLOCK_NAME);
-
-        if (metaInfoNodes.getLength() > 0) {
-            Node metaInfoNode = metaInfoNodes.item(0);
-            if (metaInfoNode.getNodeType() == Node.ELEMENT_NODE) {
-
-                Element metaInfoElem = (Element) metaInfoNode;
-
-                serviceInfo.setVersion(getTagValue(METAINFO_XML_PROPERTY_VERSION,
-                        metaInfoElem));
-                serviceInfo.setUser(getTagValue(METAINFO_XML_PROPERTY_USER,
-                        metaInfoElem));
-                serviceInfo.setComment(getTagValue(METAINFO_XML_PROPERTY_COMMENT,
-                        metaInfoElem));
-            }
-        }
-
-        NodeList componentInfoNodes = doc
-                .getElementsByTagName(METAINFO_XML_PROPERTY_COMPONENT_MAIN);
-
-        if (componentInfoNodes.getLength() > 0) {
-            for (int index = 0; index < componentInfoNodes.getLength(); index++) {
-                Node componentInfoNode = componentInfoNodes.item(index);
-                if (componentInfoNode.getNodeType() == Node.ELEMENT_NODE) {
-                    Element componentInfoElem = (Element) componentInfoNode;
-
-                    ComponentInfo componentInfo = new ComponentInfo();
-                    componentInfo.setName(getTagValue(
-                            METAINFO_XML_PROPERTY_COMPONENT_NAME, componentInfoElem));
-                    componentInfo.setCategory(getTagValue(
-                            METAINFO_XML_PROPERTY_COMPONENT_CATEGORY, componentInfoElem));
-                    serviceInfo.getComponents().add(componentInfo);
-
-                }
-            }
-        }
-
-    }
-
-    private List<PropertyInfo> getProperties(File propertyFile) {
-
-        List<PropertyInfo> resultPropertyList = new ArrayList<PropertyInfo>();
-        try {
-            DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance();
-            DocumentBuilder dBuilder = dbFactory.newDocumentBuilder();
-            Document doc = dBuilder.parse(propertyFile);
-            doc.getDocumentElement().normalize();
-
-            NodeList propertyNodes = doc
-                    .getElementsByTagName(PROPERTY_XML_MAIN_BLOCK_NAME);
-
-            for (int index = 0; index < propertyNodes.getLength(); index++) {
-
-                Node node = propertyNodes.item(index);
-                if (node.getNodeType() == Node.ELEMENT_NODE) {
-                    Element property = (Element) node;
-                    PropertyInfo propertyInfo = new PropertyInfo();
-                    propertyInfo
-                            .setName(getTagValue(PROPERTY_XML_PROPERTY_NAME, property));
-                    propertyInfo.setValue(getTagValue(PROPERTY_XML_PROPERTY_VALUE,
-                            property));
-
-                    propertyInfo.setDescription(getTagValue(
-                            PROPERTY_XML_PROPERTY_DESCRIPTION, property));
-                    propertyInfo.setFilename(propertyFile.getName());
-
-                    if (propertyInfo.getName() == null || propertyInfo.getValue() == null)
-                        continue;
-
-                    resultPropertyList.add(propertyInfo);
-                }
-            }
-        } catch (Exception e) {
-            e.printStackTrace();
-            return null;
-        }
-        return resultPropertyList;
-    }
-
-    private String getTagValue(String sTag, Element rawElement) {
-        String result = null;
-
-        if (rawElement.getElementsByTagName(sTag) != null && rawElement.getElementsByTagName(sTag).getLength() > 0) {
-            if (rawElement.getElementsByTagName(sTag).item(0) != null) {
-                NodeList element = rawElement.getElementsByTagName(sTag).item(0).getChildNodes();
-
-                if (element != null && element.item(0)!=null) {
-                    Node value = (Node) element.item(0);
-
-                    result = value.getNodeValue();
-                }
-            }
-        }
-
-        return result;
-    }
-
-    public boolean areOsTypesCompatible(String type1, String type2) {
-        if (type1 == null || type2 == null) {
-            return false;
-        }
-        if (type1.equals(type2)) {
-            return true;
-        }
-        if (type1.equals("redhat5") || type1.equals("centos5")) {
-            if (type2.equals("centos5")
-                    || type2.equals("redhat5")) {
-                return true;
-            }
-        } else if (type1.equals("redhat6") || type1.equals("centos6")) {
-            if (type2.equals("centos6")
-                    || type2.equals("redhat6")) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaService.java
deleted file mode 100644
index 7a2d2e5..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaService.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import java.io.IOException;
-import java.util.List;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.Response;
-import javax.xml.bind.JAXBException;
-
-import org.apache.ambari.server.state.ServiceInfo;
-import org.apache.ambari.server.state.StackInfo;
-import org.codehaus.jackson.JsonGenerationException;
-import org.codehaus.jackson.map.JsonMappingException;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.SerializationConfig;
-import org.codehaus.jackson.map.ser.FilterProvider;
-import org.codehaus.jackson.map.ser.impl.SimpleBeanPropertyFilter;
-import org.codehaus.jackson.map.ser.impl.SimpleFilterProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-
-@Path("/stacks/")
-public class AmbariMetaService {
-  private static AmbariMetaInfo ambariMetainfo;
-  private static Logger LOG = LoggerFactory.getLogger(AmbariMetaService.class);
-
-  @Inject
-  public static void init(AmbariMetaInfo instance) {
-    ambariMetainfo = instance;
-  }
-
-  /**
-   * Filter properties from the service info and others
-   * @param object
-   * @return
-   * @throws IOException
-   * @throws JsonMappingException
-   * @throws JsonGenerationException
-   */
-  public String filterProperties(Object object, boolean ignoreConfigs) throws
-  JsonGenerationException, JsonMappingException, IOException {
-    ObjectMapper mapper = new ObjectMapper();
-    mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true);
-    mapper.configure(SerializationConfig.Feature.USE_ANNOTATIONS, true);
-    if (ignoreConfigs) {
-    FilterProvider filters = new SimpleFilterProvider().addFilter(
-          "propertiesfilter",
-          SimpleBeanPropertyFilter.serializeAllExcept("properties"));
-      mapper.setFilters(filters);
-    } else {
-      FilterProvider filters = new SimpleFilterProvider().addFilter(
-          "propertiesfilter", SimpleBeanPropertyFilter.serializeAllExcept());
-      mapper.setFilters(filters);
-    }
-    String json = mapper.writeValueAsString(object);
-    return json;
-  }
-
-  @GET
-  @Produces("text/plain")
-  public Response getStacks() throws JsonGenerationException,
-  JsonMappingException, JAXBException, IOException {
-    List<StackInfo> stackInfos = ambariMetainfo.getSupportedStacks();
-    String output = filterProperties(stackInfos, true);
-    return Response.status(Response.Status.OK).entity(output).build();
-  }
-
-  @GET
-  @Path("{stackName}/version/{versionNumber}")
-  @Produces("text/plain")
-  public Response getStack(@PathParam("stackName") String stackName,
-      @PathParam("versionNumber") String versionNumber) throws
-      JsonGenerationException, JsonMappingException, JAXBException, IOException  {
-    StackInfo stackInfo = ambariMetainfo.getStackInfo(stackName, versionNumber);
-    String output = filterProperties(stackInfo, true);
-    return Response.status(Response.Status.OK).entity(output).build();
-  }
-
-  @GET
-  @Path("{stackName}/version/{versionNumber}/services/{serviceName}")
-  @Produces("text/plain")
-  public Response getServiceInfo(@PathParam("stackName") String stackName,
-      @PathParam("versionNumber") String versionNumber,
-      @PathParam("serviceName") String serviceName) throws
-      JsonGenerationException, JsonMappingException, JAXBException, IOException  {
-    ServiceInfo serviceInfo = ambariMetainfo.getServiceInfo(stackName,
-        versionNumber, serviceName);
-    String output = filterProperties(serviceInfo, false);
-    return Response.status(Response.Status.OK).entity(output).build();
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseRequest.java
deleted file mode 100644
index 1fef4ba..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseRequest.java
+++ /dev/null
@@ -1,186 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.api.predicate.PredicateCompiler;
-import org.apache.ambari.server.api.resources.*;
-import org.apache.ambari.server.api.services.parsers.JsonPropertyParser;
-import org.apache.ambari.server.api.services.parsers.RequestBodyParser;
-import org.apache.ambari.server.api.services.serializers.JsonSerializer;
-import org.apache.ambari.server.api.services.serializers.ResultSerializer;
-import org.apache.ambari.server.controller.internal.TemporalInfoImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.TemporalInfo;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.UriInfo;
-import java.io.UnsupportedEncodingException;
-import java.net.URLDecoder;
-import java.util.*;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Request implementation.
- */
-public abstract class BaseRequest implements Request {
-
-  /**
-   * URI information
-   */
-  private UriInfo m_uriInfo;
-
-  /**
-   * Http headers
-   */
-  private HttpHeaders m_headers;
-
-  /**
-   * Http Body
-   */
-  private String m_body;
-
-
-  /**
-   * Associated resource definition
-   */
-  private ResourceInstance m_resource;
-
-
-  /**
-   * Constructor.
-   *
-   * @param headers      http headers
-   * @param body         http body
-   * @param uriInfo      uri information
-   * @param resource     associated resource definition
-   */
-  public BaseRequest(HttpHeaders headers, String body, UriInfo uriInfo, ResourceInstance resource) {
-
-    m_headers  = headers;
-    m_body     = body;
-    m_uriInfo  = uriInfo;
-    m_resource = resource;
-  }
-
-  @Override
-  public ResourceInstance getResource() {
-    return m_resource;
-  }
-
-  @Override
-  public String getURI() {
-    try {
-      return URLDecoder.decode(m_uriInfo.getRequestUri().toASCIIString(), "UTF-8");
-    } catch (UnsupportedEncodingException e) {
-      throw new RuntimeException("Unable to decode URI: " + e, e);
-    }
-  }
-
-  @Override
-  public int getAPIVersion() {
-    return 0;
-  }
-
-  @Override
-  public Predicate getQueryPredicate() throws InvalidQueryException {
-    String uri     = getURI();
-    int    qsBegin = uri.indexOf("?");
-
-    return (qsBegin == -1) ? null :
-        getPredicateCompiler().compile(uri.substring(qsBegin + 1));
-  }
-
-  @Override
-  public Map<String, TemporalInfo> getFields() {
-    Map<String, TemporalInfo> mapProperties;
-    String partialResponseFields = m_uriInfo.getQueryParameters().getFirst("fields");
-    if (partialResponseFields == null) {
-      mapProperties = Collections.emptyMap();
-    } else {
-      Set<String> setMatches = new HashSet<String>();
-      // Pattern basically splits a string using ',' as the deliminator unless ',' is between '[' and ']'.
-      // Actually, captures char sequences between ',' and all chars between '[' and ']' including ','.
-      Pattern re = Pattern.compile("[^,\\[]*?\\[[^\\]]*?\\]|[^,]+");
-      Matcher m = re.matcher(partialResponseFields);
-      while (m.find()){
-        for (int groupIdx = 0; groupIdx < m.groupCount() + 1; groupIdx++) {
-          setMatches.add(m.group(groupIdx));
-        }
-      }
-
-      mapProperties = new HashMap<String, TemporalInfo>(setMatches.size());
-      for (String field : setMatches) {
-        TemporalInfo temporalInfo = null;
-        if (field.contains("[")) {
-          String[] temporalData = field.substring(field.indexOf('[') + 1,
-              field.indexOf(']')).split(",");
-          field = field.substring(0, field.indexOf('['));
-          long start = Long.parseLong(temporalData[0].trim());
-          long end   = -1;
-          long step  = -1;
-          if (temporalData.length >= 2) {
-            end = Long.parseLong(temporalData[1].trim());
-            if (temporalData.length == 3) {
-              step = Long.parseLong(temporalData[2].trim());
-            }
-          }
-          temporalInfo = new TemporalInfoImpl(start, end, step);
-        }
-        mapProperties.put(field, temporalInfo);
-      }
-    }
-
-    return mapProperties;
-  }
-
-  @Override
-  public Map<String, List<String>> getHttpHeaders() {
-    return m_headers.getRequestHeaders();
-  }
-
-  @Override
-  public String getHttpBody() {
-    return m_body;
-  }
-
-  @Override
-  public Set<Map<String, Object>> getHttpBodyProperties() {
-    return getHttpBodyParser().parse(getHttpBody());
-  }
-
-  @Override
-  public ResultSerializer getResultSerializer() {
-    return new JsonSerializer();
-  }
-
-  @Override
-  public ResultPostProcessor getResultPostProcessor() {
-    return new ResultPostProcessorImpl(this);
-  }
-
-  protected RequestBodyParser getHttpBodyParser() {
-    return new JsonPropertyParser();
-  }
-
-  protected PredicateCompiler getPredicateCompiler() {
-    return new PredicateCompiler();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseService.java
deleted file mode 100644
index 38fdb49..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseService.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.handlers.RequestHandler;
-import org.apache.ambari.server.api.handlers.RequestHandlerFactory;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.resources.ResourceInstanceFactory;
-import org.apache.ambari.server.api.resources.ResourceInstanceFactoryImpl;
-import org.apache.ambari.server.api.services.serializers.ResultSerializer;
-import org.apache.ambari.server.controller.spi.Resource;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import java.util.Map;
-
-/**
- * Provides common functionality to all services.
- */
-public abstract class BaseService {
-
-  /**
-   * Factory for creating resource instances.
-   */
-  private ResourceInstanceFactory m_resourceFactory = new ResourceInstanceFactoryImpl();
-
-  /**
-   * Factory for creating request handlers.
-   */
-  private RequestHandlerFactory m_handlerFactory = new RequestHandlerFactory();
-
-  /**
-   * All requests are funneled through this method so that common logic can be executed.
-   * This consists of creating a {@link Request} instance, invoking the correct {@link RequestHandler} and
-   * applying the proper {@link ResultSerializer} to the result.
-   *
-   * @param headers      http headers
-   * @param body         http body
-   * @param uriInfo      uri information
-   * @param requestType  http request type
-   * @param resource     resource instance that is being acted on
-   *
-   * @return the response of the operation in serialized form
-   */
-  protected Response handleRequest(HttpHeaders headers, String body, UriInfo uriInfo, Request.Type requestType,
-                                   ResourceInstance resource) {
-
-    Request request = getRequestFactory().createRequest(
-        headers, body, uriInfo, requestType, resource);
-
-    Result result = getRequestHandler(request.getRequestType()).handleRequest(request);
-    if (! result.getStatus().isErrorState()) {
-      request.getResultPostProcessor().process(result);
-    }
-
-    return Response.status(result.getStatus().getStatusCode()).entity(
-        request.getResultSerializer().serialize(result)).build();
-  }
-
-  /**
-   * Obtain the factory from which to create Request instances.
-   *
-   * @return the Request factory
-   */
-  RequestFactory getRequestFactory() {
-    return new RequestFactory();
-  }
-
-  /**
-   * Obtain the appropriate RequestHandler for the request.
-   *
-   * @param requestType  the request type
-   *
-   * @return the request handler to invoke
-   */
-  RequestHandler getRequestHandler(Request.Type requestType) {
-    return m_handlerFactory.getRequestHandler(requestType);
-  }
-
-  /**
-   * Create a resource instance.
-   *
-   * @param type    the resource type
-   * @param mapIds  all primary and foreign key properties and values
-   *
-   * @return a newly created resource instance
-   */
-  ResourceInstance createResource(Resource.Type type, Map<Resource.Type, String> mapIds) {
-    return m_resourceFactory.createResource(type, mapIds);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ClusterService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ClusterService.java
deleted file mode 100644
index 11d4536..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ClusterService.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.controller.spi.Resource;
-
-import javax.ws.rs.*;
-import javax.ws.rs.core.*;
-import java.util.Collections;
-
-
-/**
- * Service responsible for cluster resource requests.
- */
-@Path("/clusters/")
-public class ClusterService extends BaseService {
-
-  /**
-   * Handles: GET /clusters/{clusterID}
-   * Get a specific cluster.
-   *
-   * @param headers     http headers
-   * @param ui          uri info
-   * @param clusterName cluster id
-   * @return cluster instance representation
-   */
-  @GET
-  @Path("{clusterName}")
-  @Produces("text/plain")
-  public Response getCluster(@Context HttpHeaders headers, @Context UriInfo ui,
-                             @PathParam("clusterName") String clusterName) {
-
-    return handleRequest(headers, null, ui, Request.Type.GET, createClusterResource(clusterName));
-  }
-
-  /**
-   * Handles: GET  /clusters
-   * Get all clusters.
-   *
-   * @param headers http headers
-   * @param ui      uri info
-   * @return cluster collection resource representation
-   */
-  @GET
-  @Produces("text/plain")
-  public Response getClusters(@Context HttpHeaders headers, @Context UriInfo ui) {
-    return handleRequest(headers, null, ui, Request.Type.GET, createClusterResource(null));
-  }
-
-  /**
-   * Handles: POST /clusters/{clusterID}
-   * Create a specific cluster.
-   *
-   * @param headers     http headers
-   * @param ui          uri info
-   * @param clusterName cluster id
-   * @return information regarding the created cluster
-   */
-   @POST
-   @Path("{clusterName}")
-   @Produces("text/plain")
-   public Response createCluster(String body, @Context HttpHeaders headers, @Context UriInfo ui,
-                                 @PathParam("clusterName") String clusterName) {
-
-    return handleRequest(headers, body, ui, Request.Type.POST, createClusterResource(clusterName));
-  }
-
-  /**
-   * Handles: PUT /clusters/{clusterID}
-   * Update a specific cluster.
-   *
-   * @param headers     http headers
-   * @param ui          uri info
-   * @param clusterName cluster id
-   * @return information regarding the updated cluster
-   */
-  @PUT
-  @Path("{clusterName}")
-  @Produces("text/plain")
-  public Response updateCluster(String body, @Context HttpHeaders headers, @Context UriInfo ui,
-                                @PathParam("clusterName") String clusterName) {
-
-    return handleRequest(headers, body, ui, Request.Type.PUT, createClusterResource(clusterName));
-  }
-
-  /**
-   * Handles: DELETE /clusters/{clusterID}
-   * Delete a specific cluster.
-   *
-   * @param headers     http headers
-   * @param ui          uri info
-   * @param clusterName cluster id
-   * @return information regarding the deleted cluster
-   */
-  @DELETE
-  @Path("{clusterName}")
-  @Produces("text/plain")
-  public Response deleteCluster(@Context HttpHeaders headers, @Context UriInfo ui,
-                                @PathParam("clusterName") String clusterName) {
-
-    return handleRequest(headers, null, ui, Request.Type.DELETE, createClusterResource(clusterName));
-  }
-
-  /**
-   * Get the hosts sub-resource
-   *
-   * @param clusterName cluster id
-   * @return the hosts service
-   */
-  @Path("{clusterName}/hosts")
-  public HostService getHostHandler(@PathParam("clusterName") String clusterName) {
-    return new HostService(clusterName);
-  }
-
-  /**
-   * Get the services sub-resource
-   *
-   * @param clusterName cluster id
-   * @return the services service
-   */
-  @Path("{clusterName}/services")
-  public ServiceService getServiceHandler(@PathParam("clusterName") String clusterName) {
-    return new ServiceService(clusterName);
-  }
-  
-  /**
-   * Gets the configurations sub-resource.
-   *
-   * @param clusterName  the cluster name
-   * @return the configuration service
-   */
-  @Path("{clusterName}/configurations")
-  public ConfigurationService getConfigurationHandler(@PathParam("clusterName") String clusterName) {
-    return new ConfigurationService(clusterName);
-  }
-
-  /**
-   * Gets the requests sub-resource.
-   */
-  @Path("{clusterName}/requests")
-  public RequestService getRequestHandler(@PathParam("clusterName") String clusterName) {
-    return new RequestService(clusterName);
-  }
-
-  /**
-   * Get the host component resource without specifying the parent host component.
-   * Allows accessing host component resources across hosts.
-   *
-   * @param clusterName the cluster name
-   * @return  the host component service with no parent set
-   */
-  @Path("{clusterName}/host_components")
-  public HostComponentService getHostComponentHandler(@PathParam("clusterName") String clusterName) {
-    return new HostComponentService(clusterName, null);
-  }
-
-  /**
-   * Create a cluster resource instance.
-   *
-   * @param clusterName cluster name
-   *
-   * @return a cluster resource instance
-   */
-  ResourceInstance createClusterResource(String clusterName) {
-    return createResource(Resource.Type.Cluster,
-        Collections.singletonMap(Resource.Type.Cluster, clusterName));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ComponentService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ComponentService.java
deleted file mode 100644
index f5a9f4e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ComponentService.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.controller.spi.Resource;
-
-import javax.ws.rs.*;
-import javax.ws.rs.core.*;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Service responsible for components resource requests.
- */
-public class ComponentService extends BaseService {
-  /**
-   * Parent cluster id.
-   */
-  private String m_clusterName;
-
-  /**
-   * Parent service id.
-   */
-  private String m_serviceName;
-
-  /**
-   * Constructor.
-   *
-   * @param clusterName cluster id
-   * @param serviceName service id
-   */
-  public ComponentService(String clusterName, String serviceName) {
-    m_clusterName = clusterName;
-    m_serviceName = serviceName;
-  }
-
-  /**
-   * Handles GET: /clusters/{clusterID}/services/{serviceID}/components/{componentID}
-   * Get a specific component.
-   *
-   * @param headers       http headers
-   * @param ui            uri info
-   * @param componentName component id
-   * @return a component resource representation
-   */
-  @GET
-  @Path("{componentName}")
-  @Produces("text/plain")
-  public Response getComponent(@Context HttpHeaders headers, @Context UriInfo ui,
-                               @PathParam("componentName") String componentName) {
-
-    return handleRequest(headers, null, ui, Request.Type.GET,
-        createComponentResource(m_clusterName, m_serviceName, componentName));
-  }
-
-  /**
-   * Handles GET: /clusters/{clusterID}/services/{serviceID}/components
-   * Get all components for a service.
-   *
-   * @param headers http headers
-   * @param ui      uri info
-   * @return component collection resource representation
-   */
-  @GET
-  @Produces("text/plain")
-  public Response getComponents(@Context HttpHeaders headers, @Context UriInfo ui) {
-    return handleRequest(headers, null, ui, Request.Type.GET,
-        createComponentResource(m_clusterName, m_serviceName, null));
-  }
-
-  /**
-   * Handles: POST /clusters/{clusterID}/services/{serviceID}/components
-   * Create components by specifying an array of components in the http body.
-   * This is used to create multiple components in a single request.
-   *
-   * @param body          http body
-   * @param headers       http headers
-   * @param ui            uri info
-   *
-   * @return status code only, 201 if successful
-   */
-  @POST
-  @Produces("text/plain")
-  public Response createComponents(String body, @Context HttpHeaders headers, @Context UriInfo ui) {
-
-    return handleRequest(headers, body, ui, Request.Type.POST,
-        createComponentResource(m_clusterName, m_serviceName, null));
-  }
-
-  /**
-   * Handles: POST /clusters/{clusterID}/services/{serviceID}/components/{componentID}
-   * Create a specific component.
-   *
-   * @param body          http body
-   * @param headers       http headers
-   * @param ui            uri info
-   * @param componentName component id
-   *
-   * @return information regarding the created component
-   */
-  @POST
-  @Path("{componentName}")
-  @Produces("text/plain")
-  public Response createComponent(String body, @Context HttpHeaders headers, @Context UriInfo ui,
-                                @PathParam("componentName") String componentName) {
-
-    return handleRequest(headers, body, ui, Request.Type.POST,
-        createComponentResource(m_clusterName, m_serviceName, componentName));
-  }
-
-  /**
-   * Handles: PUT /clusters/{clusterID}/services/{serviceID}/components/{componentID}
-   * Update a specific component.
-   *
-   * @param body          http body
-   * @param headers       http headers
-   * @param ui            uri info
-   * @param componentName component id
-   *
-   * @return information regarding the updated component
-   */
-  @PUT
-  @Path("{componentName}")
-  @Produces("text/plain")
-  public Response updateComponent(String body, @Context HttpHeaders headers, @Context UriInfo ui,
-                                @PathParam("componentName") String componentName) {
-
-    return handleRequest(headers, body, ui, Request.Type.PUT, createComponentResource(
-        m_clusterName, m_serviceName, componentName));
-  }
-
-  /**
-   * Handles: PUT /clusters/{clusterID}/services/{serviceID}/components
-   * Update multiple components.
-   *
-   * @param body          http body
-   * @param headers       http headers
-   * @param ui            uri info
-   *
-   * @return information regarding the updated component
-   */
-  @PUT
-  @Produces("text/plain")
-  public Response updateComponents(String body, @Context HttpHeaders headers, @Context UriInfo ui) {
-
-    return handleRequest(headers, body, ui, Request.Type.PUT, createComponentResource(
-        m_clusterName, m_serviceName, null));
-  }
-
-  /**
-   * Handles: DELETE /clusters/{clusterID}/services/{serviceID}/components/{componentID}
-   * Delete a specific component.
-   *
-   * @param headers     http headers
-   * @param ui          uri info
-   * @param componentName cluster id
-   * @return information regarding the deleted cluster
-   */
-  @DELETE
-  @Path("{componentName}")
-  @Produces("text/plain")
-  public Response deleteComponent(@Context HttpHeaders headers, @Context UriInfo ui,
-                                @PathParam("componentName") String componentName) {
-
-    return handleRequest(headers, null, ui, Request.Type.DELETE, createComponentResource(
-        m_clusterName, m_serviceName, componentName));
-  }
-
-  /**
-   * Create a component resource instance.
-   *
-   *
-   * @param clusterName   cluster name
-   * @param serviceName   service name
-   * @param componentName component name
-   *
-   * @return a component resource instance
-   */
-  ResourceInstance createComponentResource(String clusterName, String serviceName, String componentName) {
-    Map<Resource.Type,String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.Cluster, clusterName);
-    mapIds.put(Resource.Type.Service, serviceName);
-    mapIds.put(Resource.Type.Component, componentName);
-
-    return createResource(Resource.Type.Component, mapIds);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ConfigurationService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ConfigurationService.java
deleted file mode 100644
index aef3423..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ConfigurationService.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Service responsible for services resource requests.
- */
-public class ConfigurationService extends BaseService {
-  /**
-   * Parent cluster name.
-   */
-  private String m_clusterName;
-
-  /**
-   * Constructor.
-   *
-   * @param clusterName cluster id
-   */
-  public ConfigurationService(String clusterName) {
-    m_clusterName = clusterName;
-  }
-
-  /**
-   * Handles URL: /clusters/{clusterId}/configurations
-   * Get all services for a cluster.
-   *
-   * @param headers http headers
-   * @param ui      uri info
-   * @return service collection resource representation
-   */
-  @GET
-  @Produces("text/plain")
-  public Response getConfigurations(@Context HttpHeaders headers, @Context UriInfo ui) {
-    return handleRequest(headers, null, ui, Request.Type.GET, createConfigurationResource(m_clusterName));
-  }
-
-  /**
-   * Handles URL: /clusters/{clusterId}/configurations
-   * The body should contain:
-   * <pre>
-   * {
-   *     "type":"type_string",
-   *     "tag":"version_tag",
-   *     "properties":
-   *     {
-   *         "key1":"value1",
-   *         // ...
-   *         "keyN":"valueN"
-   *     }
-   * }
-   * </pre>
-   *
-   * To create multiple configurations is a request, provide an array of configuration properties.
-   *
-   * @param headers http headers
-   * @param ui      uri info
-   * @return status code only, 201 if successful
-   */
-  @POST
-  @Produces("text/plain")
-  public Response createConfigurations(String body,@Context HttpHeaders headers, @Context UriInfo ui) {
-
-    return handleRequest(headers, body, ui, Request.Type.POST, createConfigurationResource(m_clusterName));
-  }
-
-  /**
-   * Create a service resource instance.
-   *
-   * @param clusterName cluster name
-   *
-   * @return a service resource instance
-   */
-  ResourceInstance createConfigurationResource(String clusterName) {
-    Map<Resource.Type,String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.Cluster, clusterName);
-    mapIds.put(Resource.Type.Configuration, null);
-
-    return createResource(Resource.Type.Configuration, mapIds);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/DeleteRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/DeleteRequest.java
deleted file mode 100644
index ab856fb..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/DeleteRequest.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.UriInfo;
-
-/**
- * A DELETE request.
- */
-public class DeleteRequest extends BaseRequest {
-  /**
-   * Constructor.
-   *
-   * @param headers     http headers
-   * @param body        http body
-   * @param uriInfo     uri information
-   * @param resource    associated resource definition
-   */
-  public DeleteRequest(HttpHeaders headers, String body, UriInfo uriInfo, ResourceInstance resource) {
-    super(headers, body, uriInfo, resource);
-  }
-
-  @Override
-  public Type getRequestType() {
-    return Type.DELETE;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/GetRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/GetRequest.java
deleted file mode 100644
index 463fa1f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/GetRequest.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.UriInfo;
-
-/**
- * A GET request.
- */
-public class GetRequest extends BaseRequest {
-  /**
-   * Constructor.
-   *
-   * @param headers     http headers
-   * @param body        http body
-   * @param uriInfo     uri information
-   * @param resource    associated resource definition
-   */
-  public GetRequest(HttpHeaders headers, String body, UriInfo uriInfo, ResourceInstance resource) {
-    super(headers, body, uriInfo, resource);
-  }
-
-  @Override
-  public Type getRequestType() {
-    return Type.GET;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/HostComponentService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/HostComponentService.java
deleted file mode 100644
index c7f6b5e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/HostComponentService.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.controller.spi.Resource;
-
-import javax.ws.rs.*;
-import javax.ws.rs.core.*;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Service responsible for host_components resource requests.
- */
-public class HostComponentService extends BaseService {
-  /**
-   * Parent cluster id.
-   */
-  private String m_clusterName;
-
-  /**
-   * Parent host id.
-   */
-  private String m_hostName;
-
-  /**
-   * Constructor.
-   *
-   * @param clusterName cluster id
-   * @param hostName    host id
-   */
-  public HostComponentService(String clusterName, String hostName) {
-    m_clusterName = clusterName;
-    m_hostName = hostName;
-  }
-
-  /**
-   * Handles GET /clusters/{clusterID}/hosts/{hostID}/host_components/{hostComponentID}
-   * Get a specific host_component.
-   *
-   * @param headers           http headers
-   * @param ui                uri info
-   * @param hostComponentName host_component id
-   * @return host_component resource representation
-   */
-  @GET
-  @Path("{hostComponentName}")
-  @Produces("text/plain")
-  public Response getHostComponent(@Context HttpHeaders headers, @Context UriInfo ui,
-                                   @PathParam("hostComponentName") String hostComponentName) {
-
-    //todo: needs to be refactored when properly handling exceptions
-    if (m_hostName == null) {
-      // don't allow case where host is not in url but a host_component instance resource is requested
-      String s = "Invalid request. Must provide host information when requesting a host_resource instance resource.";
-      return Response.status(400).entity(s).build();
-    }
-
-    return handleRequest(headers, null, ui, Request.Type.GET,
-        createHostComponentResource(m_clusterName, m_hostName, hostComponentName));
-  }
-
-  /**
-   * Handles GET /clusters/{clusterID}/hosts/{hostID}/host_components/
-   * Get all host components for a host.
-   *
-   * @param headers http headers
-   * @param ui      uri info
-   * @return host_component collection resource representation
-   */
-  @GET
-  @Produces("text/plain")
-  public Response getHostComponents(@Context HttpHeaders headers, @Context UriInfo ui) {
-    return handleRequest(headers, null, ui, Request.Type.GET,
-        createHostComponentResource(m_clusterName, m_hostName, null));
-  }
-
-  /**
-   * Handles POST /clusters/{clusterID}/hosts/{hostID}/host_components
-   * Create host components by specifying an array of host components in the http body.
-   * This is used to create multiple host components in a single request.
-   *
-   * @param body              http body
-   * @param headers           http headers
-   * @param ui                uri info
-   *
-   * @return status code only, 201 if successful
-   */
-  @POST
-  @Produces("text/plain")
-  public Response createHostComponents(String body, @Context HttpHeaders headers, @Context UriInfo ui) {
-
-    return handleRequest(headers, body, ui, Request.Type.POST,
-        createHostComponentResource(m_clusterName, m_hostName, null));
-  }
-
-  /**
-   * Handles POST /clusters/{clusterID}/hosts/{hostID}/host_components/{hostComponentID}
-   * Create a specific host_component.
-   *
-   * @param body              http body
-   * @param headers           http headers
-   * @param ui                uri info
-   * @param hostComponentName host_component id
-   *
-   * @return host_component resource representation
-   */
-  @POST
-  @Path("{hostComponentName}")
-  @Produces("text/plain")
-  public Response createHostComponent(String body, @Context HttpHeaders headers, @Context UriInfo ui,
-                                   @PathParam("hostComponentName") String hostComponentName) {
-
-    return handleRequest(headers, body, ui, Request.Type.POST,
-        createHostComponentResource(m_clusterName, m_hostName, hostComponentName));
-  }
-
-  /**
-   * Handles PUT /clusters/{clusterID}/hosts/{hostID}/host_components/{hostComponentID}
-   * Updates a specific host_component.
-   *
-   * @param body              http body
-   * @param headers           http headers
-   * @param ui                uri info
-   * @param hostComponentName host_component id
-   *
-   * @return information regarding updated host_component
-   */
-  @PUT
-  @Path("{hostComponentName}")
-  @Produces("text/plain")
-  public Response updateHostComponent(String body, @Context HttpHeaders headers, @Context UriInfo ui,
-                                      @PathParam("hostComponentName") String hostComponentName) {
-
-    return handleRequest(headers, body, ui, Request.Type.PUT,
-        createHostComponentResource(m_clusterName, m_hostName, hostComponentName));
-  }
-
-  /**
-   * Handles PUT /clusters/{clusterID}/hosts/{hostID}/host_components
-   * Updates multiple host_component resources.
-   *
-   * @param body              http body
-   * @param headers           http headers
-   * @param ui                uri info
-   *
-   * @return information regarding updated host_component resources
-   */
-  @PUT
-  @Produces("text/plain")
-  public Response updateHostComponents(String body, @Context HttpHeaders headers, @Context UriInfo ui) {
-
-    return handleRequest(headers, body, ui, Request.Type.PUT,
-        createHostComponentResource(m_clusterName, m_hostName, null));
-  }
-
-  /**
-   * Handles DELETE /clusters/{clusterID}/hosts/{hostID}/host_components/{hostComponentID}
-   * Delete a specific host_component.
-   *
-   * @param headers           http headers
-   * @param ui                uri info
-   * @param hostComponentName host_component id
-   *
-   * @return host_component resource representation
-   */
-  @DELETE
-  @Path("{hostComponentName}")
-  @Produces("text/plain")
-  public Response deleteHostComponent(@Context HttpHeaders headers, @Context UriInfo ui,
-                                   @PathParam("hostComponentName") String hostComponentName) {
-
-    return handleRequest(headers, null, ui, Request.Type.DELETE,
-        createHostComponentResource(m_clusterName, m_hostName, hostComponentName));
-  }
-
-  /**
-   * Create a host_component resource instance.
-   *
-   * @param clusterName       cluster name
-   * @param hostName          host name
-   * @param hostComponentName host_component name
-   *
-   * @return a host resource instance
-   */
-  ResourceInstance createHostComponentResource(String clusterName, String hostName, String hostComponentName) {
-    Map<Resource.Type,String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.Cluster, clusterName);
-    mapIds.put(Resource.Type.Host, hostName);
-    mapIds.put(Resource.Type.HostComponent, hostComponentName);
-
-    return createResource(Resource.Type.HostComponent, mapIds);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/HostService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/HostService.java
deleted file mode 100644
index f89ea40..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/HostService.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import javax.ws.rs.DELETE;
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Service responsible for hosts resource requests.
- */
-@Path("/hosts/")
-public class HostService extends BaseService {
-
-  /**
-   * Parent cluster id.
-   */
-  private String m_clusterName;
-
-  /**
-   * Constructor.
-   */
-  public HostService() {
-  }
-
-  /**
-   * Constructor.
-   *
-   * @param clusterName cluster id
-   */
-  public HostService(String clusterName) {
-    m_clusterName = clusterName;
-  }
-
-  /**
-   * Handles GET /clusters/{clusterID}/hosts/{hostID} and /hosts/{hostID}
-   * Get a specific host.
-   *
-   * @param headers  http headers
-   * @param ui       uri info
-   * @param hostName host id
-   * @return host resource representation
-   */
-  @GET
-  @Path("{hostName}")
-  @Produces("text/plain")
-  public Response getHost(@Context HttpHeaders headers, @Context UriInfo ui,
-                          @PathParam("hostName") String hostName) {
-
-    return handleRequest(headers, null, ui, Request.Type.GET,
-        createHostResource(m_clusterName, hostName, ui));
-  }
-
-  /**
-   * Handles GET /clusters/{clusterID}/hosts and /hosts
-   * Get all hosts for a cluster.
-   *
-   * @param headers http headers
-   * @param ui      uri info
-   * @return host collection resource representation
-   */
-  @GET
-  @Produces("text/plain")
-  public Response getHosts(@Context HttpHeaders headers, @Context UriInfo ui) {
-    return handleRequest(headers, null, ui, Request.Type.GET,
-        createHostResource(m_clusterName, null, ui));
-  }
-
-  /**
-   * Handles POST /clusters/{clusterID}/hosts
-   * Create hosts by specifying an array of hosts in the http body.
-   * This is used to create multiple hosts in a single request.
-   *
-   * @param body     http body
-   * @param headers  http headers
-   * @param ui       uri info
-   *
-   * @return status code only, 201 if successful
-   */
-  @POST
-  @Produces("text/plain")
-  public Response createHosts(String body, @Context HttpHeaders headers, @Context UriInfo ui) {
-
-    return handleRequest(headers, body, ui, Request.Type.POST,
-        createHostResource(m_clusterName, null, ui));
-  }
-
-  /**
-   * Handles POST /clusters/{clusterID}/hosts/{hostID}
-   * Create a specific host.
-   *
-   * @param body     http body
-   * @param headers  http headers
-   * @param ui       uri info
-   * @param hostName host id
-   *
-   * @return host resource representation
-   */
-  @POST
-  @Path("{hostName}")
-  @Produces("text/plain")
-  public Response createHost(String body, @Context HttpHeaders headers, @Context UriInfo ui,
-                          @PathParam("hostName") String hostName) {
-
-    return handleRequest(headers, body, ui, Request.Type.POST,
-        createHostResource(m_clusterName, hostName, ui));
-  }
-
-  /**
-   * Handles PUT /clusters/{clusterID}/hosts/{hostID}
-   * Updates a specific host.
-   *
-   * @param body     http body
-   * @param headers  http headers
-   * @param ui       uri info
-   * @param hostName host id
-   *
-   * @return information regarding updated host
-   */
-  @PUT
-  @Path("{hostName}")
-  @Produces("text/plain")
-  public Response updateHost(String body, @Context HttpHeaders headers, @Context UriInfo ui,
-                          @PathParam("hostName") String hostName) {
-
-    return handleRequest(headers, body, ui, Request.Type.PUT,
-        createHostResource(m_clusterName, hostName, ui));
-  }
-
-  /**
-   * Handles PUT /clusters/{clusterID}/hosts
-   * Updates multiple hosts.
-   *
-   * @param body     http body
-   * @param headers  http headers
-   * @param ui       uri info
-   *
-   * @return information regarding updated host
-   */
-  @PUT
-  @Produces("text/plain")
-  public Response updateHosts(String body, @Context HttpHeaders headers, @Context UriInfo ui) {
-
-    return handleRequest(headers, body, ui, Request.Type.PUT,
-        createHostResource(m_clusterName, null, ui));
-  }
-
-  /**
-   * Handles DELETE /clusters/{clusterID}/hosts/{hostID}
-   * Deletes a specific host.
-   *
-   * @param headers  http headers
-   * @param ui       uri info
-   * @param hostName host id
-   *
-   * @return host resource representation
-   */
-  @DELETE
-  @Path("{hostName}")
-  @Produces("text/plain")
-  public Response deleteHost(@Context HttpHeaders headers, @Context UriInfo ui,
-                             @PathParam("hostName") String hostName) {
-
-    return handleRequest(headers, null, ui, Request.Type.DELETE,
-        createHostResource(m_clusterName, hostName, ui));
-  }
-
-  /**
-   * Get the host_components sub-resource.
-   *
-   * @param hostName host id
-   * @return the host_components service
-   */
-  @Path("{hostName}/host_components")
-  public HostComponentService getHostComponentHandler(@PathParam("hostName") String hostName) {
-    return new HostComponentService(m_clusterName, hostName);
-  }
-
-  /**
-   * Create a service resource instance.
-   *
-   *
-   *
-   * @param clusterName  cluster
-   * @param hostName     host name
-   * @param ui           uri information
-   *
-   * @return a host resource instance
-   */
-  ResourceInstance createHostResource(String clusterName, String hostName, UriInfo ui) {
-    boolean isAttached = ui.getRequestUri().toString().contains("/clusters/");
-
-    Map<Resource.Type,String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.Host, hostName);
-    if (isAttached) {
-      mapIds.put(Resource.Type.Cluster, clusterName);
-    }
-
-    return createResource(Resource.Type.Host, mapIds);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/KeyService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/KeyService.java
deleted file mode 100644
index 6d2d22f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/KeyService.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.api.services;
-
-import com.google.inject.Inject;
-import org.apache.ambari.server.utils.StageUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-import java.util.Collection;
-
-@Path("/keys/")
-public class KeyService {
-  private static final Logger log = LoggerFactory.getLogger(KeyService.class);
-  private static PersistKeyValueImpl persistKeyVal;
-
-  @Inject
-  public static void init(PersistKeyValueImpl instance) {
-    persistKeyVal = instance;
-  }
-
-  @Path("{number}")
-  @GET
-  @Produces("text/plain")
-  public String getKeys(@PathParam("number") int number) throws IOException, JAXBException {
-    Collection<String> keys = persistKeyVal.generateKeys(number);
-    String result = StageUtils.jaxbToString(keys);
-    log.info("Returning keys {}", result);
-    return result;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/LogoutService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/LogoutService.java
deleted file mode 100644
index 62e9380..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/LogoutService.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.api.services;
-
-import org.springframework.security.core.context.SecurityContextHolder;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.Response;
-
-/**
- * Service performing logout of current user
- */
-@Path("/logout")
-public class LogoutService {
-
-  @GET
-  @Produces("text/plain")
-  public Response performLogout() {
-    SecurityContextHolder.clearContext();
-    return Response.status(Response.Status.OK).build();
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/PersistKeyValueImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/PersistKeyValueImpl.java
deleted file mode 100644
index 1bcf67e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/PersistKeyValueImpl.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.orm.dao.KeyValueDAO;
-import org.apache.ambari.server.orm.entities.KeyValueEntity;
-
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.Response;
-import java.util.*;
-
-@Singleton
-public class PersistKeyValueImpl {
-
-  @Inject
-  KeyValueDAO keyValueDAO;
-
-  public String generateKey() {
-    return UUID.randomUUID().toString();
-  }
-
-  public Collection<String> generateKeys(int number) {
-    List<String> keys = new ArrayList<String>(number);
-    for (int i = 0; i < number; i++) {
-      keys.add(generateKey());
-    }
-    return keys;
-  }
-
-  public synchronized String getValue(String key) {
-    KeyValueEntity keyValueEntity = keyValueDAO.findByKey(key);
-    if (keyValueEntity != null) {
-      return keyValueEntity.getValue();
-    }
-    throw new WebApplicationException(Response.Status.NOT_FOUND);
-  }
-
-  public synchronized String put(String value) {
-    String key = generateKey();
-    put(key, value);
-    return key;
-  }
-
-  @Transactional
-  public synchronized void put(String key, String value) {
-    KeyValueEntity keyValueEntity = keyValueDAO.findByKey(key);
-    if (keyValueEntity != null) {
-      keyValueEntity.setValue(value);
-      keyValueDAO.merge(keyValueEntity);
-    } else {
-      keyValueEntity = new KeyValueEntity();
-      keyValueEntity.setKey(key);
-      keyValueEntity.setValue(value);
-      keyValueDAO.create(keyValueEntity);
-    }
-  }
-  
-  public synchronized Map<String, String> getAllKeyValues() {
-    Map<String, String> map = new HashMap<String, String>();
-    for (KeyValueEntity keyValueEntity : keyValueDAO.findAll()) {
-      map.put(keyValueEntity.getKey(), keyValueEntity.getValue());
-    }
-    return map;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/PersistKeyValueService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/PersistKeyValueService.java
deleted file mode 100644
index 2ca83c2..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/PersistKeyValueService.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Map;
-
-import javax.ws.rs.*;
-import javax.ws.rs.core.Response;
-import javax.xml.bind.JAXBException;
-
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-import org.apache.ambari.server.utils.StageUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import com.google.inject.Inject;
-
-@Path("/persist/")
-public class PersistKeyValueService {
-  private static PersistKeyValueImpl persistKeyVal;
-  private static Log LOG = LogFactory.getLog(PersistKeyValueService.class);
-
-  @Inject
-  public static void init(PersistKeyValueImpl instance) {
-    persistKeyVal = instance;
-  }
-
-  @SuppressWarnings("unchecked")
-  @POST
-  @Produces("text/plain")
-  public Response update(String keyValues)
-      throws WebApplicationException, InvalidStateTransitionException,
-      JAXBException, IOException {
-    LOG.info("Received message from UI " + keyValues);
-    Map<String, String> keyValuesMap = StageUtils.fromJson(keyValues, Map.class);
-    /* Call into the heartbeat handler */
-
-    for (Map.Entry<String, String> keyValue: keyValuesMap.entrySet()) {
-      persistKeyVal.put(keyValue.getKey(), keyValue.getValue());
-    }
-    return Response.status(Response.Status.ACCEPTED).build();
-  }
-
-  @SuppressWarnings("unchecked")
-  @PUT
-  @Produces("text/plain")
-  public String store(String values) throws IOException, JAXBException {
-    LOG.info("Received message from UI " + values);
-    Collection<String> valueCollection = StageUtils.fromJson(values, Collection.class);
-    Collection<String> keys = new ArrayList<String>(valueCollection.size());
-    for (String s : valueCollection) {
-      keys.add(persistKeyVal.put(s));
-    }
-    String stringRet = StageUtils.jaxbToString(keys);
-    LOG.info("Returning " + stringRet);
-    return stringRet;
-  }
-  
-  @GET
-  @Produces("text/plain")
-  @Path("{keyName}")
-  public String getKey( @PathParam("keyName") String keyName) {
-    LOG.info("Looking for keyName " + keyName);
-    return persistKeyVal.getValue(keyName);
-  }
-  
-  @GET
-  @Produces("text/plain")
-  public String getAllKeyValues() throws JAXBException, IOException {
-    Map<String, String> ret = persistKeyVal.getAllKeyValues();
-    String stringRet = StageUtils.jaxbToString(ret);
-    LOG.info("Returning " + stringRet);
-    return stringRet;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/PostRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/PostRequest.java
deleted file mode 100644
index c25b556..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/PostRequest.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.UriInfo;
-
-/**
- * A POST request.
- */
-public class PostRequest extends BaseRequest {
-  /**
-   * Constructor.
-   *
-   * @param headers     http headers
-   * @param body        http body
-   * @param uriInfo     uri information
-   * @param resource    associated resource definition
-   */
-  public PostRequest(HttpHeaders headers, String body, UriInfo uriInfo, ResourceInstance resource) {
-    super(headers, body, uriInfo, resource);
-  }
-
-  @Override
-  public Type getRequestType() {
-    return Type.POST;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/PutRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/PutRequest.java
deleted file mode 100644
index d7efc5c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/PutRequest.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.UriInfo;
-
-/**
- * A PUT request.
- */
-public class PutRequest extends BaseRequest {
-  /**
-   * Constructor.
-   *
-   * @param headers     http headers
-   * @param body        http body
-   * @param uriInfo     uri information
-   * @param resource    associated resource definition
-   */
-  public PutRequest(HttpHeaders headers, String body, UriInfo uriInfo, ResourceInstance resource) {
-    super(headers, body, uriInfo, resource);
-  }
-
-  @Override
-  public Type getRequestType() {
-    return Type.PUT;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/QueryPostRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/QueryPostRequest.java
deleted file mode 100644
index c8ed76c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/QueryPostRequest.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.UriInfo;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Request for creating sub-resources of instances based on a query.
- */
-public class QueryPostRequest extends PostRequest {
-  /**
-   * Constructor.
-   *
-   * @param headers      http headers
-   * @param body         http body
-   * @param uriInfo      uri information
-   * @param resource     associated resource instance
-   */
-  public QueryPostRequest(HttpHeaders headers, String body, UriInfo uriInfo, ResourceInstance resource) {
-    super(headers, body, uriInfo, resource);
-  }
-
-  @Override
-  public Set<Map<String, Object>> getHttpBodyProperties() {
-    String httpBody = getHttpBody();
-    //strip array name
-    int startIdx = httpBody.indexOf("[");
-    int endIdx = httpBody.lastIndexOf("]");
-
-    return getHttpBodyParser().parse(httpBody.substring(startIdx, endIdx + 1));
-  }
-
-  @Override
-  public Type getRequestType() {
-    return Type.QUERY_POST;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/Request.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/Request.java
deleted file mode 100644
index e07ee39..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/Request.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.api.resources.ResourceDefinition;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.services.serializers.ResultSerializer;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.TemporalInfo;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Provides information on the current request.
- */
-public interface Request {
-
-  /**
-   * Enum of request types.
-   */
-  public enum Type {
-    GET,
-    POST,
-    PUT,
-    DELETE,
-    QUERY_POST
-  }
-
-  /**
-   * Obtain the resource definition which corresponds to the resource being operated on by the request.
-   * The resource definition provides information about the resource type;
-   *
-   * @return the associated {@link ResourceDefinition}
-   */
-  public ResourceInstance getResource();
-
-  /**
-   * Obtain the URI of this request.
-   *
-   * @return the request uri
-   */
-  public String getURI();
-
-  /**
-   * Obtain the http request type.  Type is one of {@link Type}.
-   *
-   * @return the http request type
-   */
-  public Type getRequestType();
-
-  /**
-   * Obtain the api version of the request.  The api version is specified in the request URI.
-   *
-   * @return the api version of the request
-   */
-  public int getAPIVersion();
-
-  /**
-   * Obtain the query predicate that was built from the user provided predicate fields in the query string.
-   * If multiple predicates are supplied, then they will be combined using the appropriate grouping predicate
-   * such as 'AND'.
-   *
-   * @return the user defined predicate
-   * @throws InvalidQueryException if the query syntax is invalid
-   */
-  public Predicate getQueryPredicate() throws InvalidQueryException;
-
-  /**
-   * Obtain the partial response fields and associated temporal information which were provided
-   * in the query string of the request uri.
-   *
-   * @return map of partial response propertyId to temporal information
-   */
-  public Map<String, TemporalInfo> getFields();
-
-  /**
-   * Obtain the result serializer for the request. The default serializer is of type JSON.
-   *
-   * @return the result serializer for the request
-   */
-  public ResultSerializer getResultSerializer();
-
-  /**
-   * Obtain the processor which processes the result returned from the request handler.
-   * The post processor adds additional information such as href fields to the result.
-   *
-   * @return the result processor associated with the request
-   */
-  public ResultPostProcessor getResultPostProcessor();
-
-  /**
-   * Obtain the http headers associated with the request.
-   *
-   * @return the http headers
-   */
-  public Map<String, List<String>> getHttpHeaders();
-
-  /**
-   * Obtain the http body associated with the request.
-   *
-   * @return the http body
-   */
-  public String getHttpBody();
-
-  /**
-   * Obtain the properties which have been parsed from the http body.
-   *
-   * @return a set of maps containing the properties contained in the http body
-   */
-  public Set<Map<String, Object>> getHttpBodyProperties();
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/RequestFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/RequestFactory.java
deleted file mode 100644
index a8d13d3..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/RequestFactory.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.UriInfo;
-
-/**
- * Factory for {@link Request} instances.
- */
-public class RequestFactory {
-  /**
-   * Create a request instance.
-   *
-   * @param headers      http headers
-   * @param uriInfo      uri information
-   * @param requestType  http request type
-   * @param resource     associated resource instance
-   *
-   * @return a new Request instance
-   */
-  public Request createRequest(HttpHeaders headers, String body, UriInfo uriInfo, Request.Type requestType,
-                               ResourceInstance resource) {
-    switch (requestType) {
-      case GET:
-        return new GetRequest(headers, body, uriInfo, resource);
-      case PUT:
-        return new PutRequest(headers, body, uriInfo, resource);
-      case DELETE:
-        return new DeleteRequest(headers, body, uriInfo, resource);
-      case POST:
-        return (uriInfo.getQueryParameters().isEmpty() || body == null) ?
-            new PostRequest(headers, body, uriInfo, resource) :
-            new QueryPostRequest(headers, body, uriInfo, resource);
-      default:
-        throw new IllegalArgumentException("Invalid request type: " + requestType);
-    }
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/RequestService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/RequestService.java
deleted file mode 100644
index 5594050..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/RequestService.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.controller.spi.Resource;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import java.util.HashMap;
-import java.util.Map;
-
-
-/**
- * Service responsible for request resource requests.
- */
-public class RequestService extends BaseService {
-  /**
-   * Parent cluster name.
-   */
-  private String m_clusterName;
-
-
-  /**
-   * Constructor.
-   *
-   * @param clusterName cluster id
-   */
-  public RequestService(String clusterName) {
-    m_clusterName = clusterName;
-  }
-
-  /**
-   * Handles URL: /clusters/{clusterID}/requests/{requestID}
-   * Get a specific request.
-   *
-   * @param headers    http headers
-   * @param ui         uri info
-   * @param requestId  request id
-   *
-   * @return request resource representation
-   */
-  @GET
-  @Path("{requestId}")
-  @Produces("text/plain")
-  public Response getRequest(@Context HttpHeaders headers, @Context UriInfo ui,
-                             @PathParam("requestId") String requestId) {
-
-    return handleRequest(headers, null, ui, Request.Type.GET,
-        createRequestResource(m_clusterName, requestId));
-  }
-
-  /**
-   * Handles URL: /clusters/{clusterId}/requests
-   * Get all requests for a cluster.
-   *
-   * @param headers http headers
-   * @param ui      uri info
-   *
-   * @return request collection resource representation
-   */
-  @GET
-  @Produces("text/plain")
-  public Response getRequests(@Context HttpHeaders headers, @Context UriInfo ui) {
-    return handleRequest(headers, null, ui, Request.Type.GET,
-        createRequestResource(m_clusterName, null));
-  }
-
-  /**
-   * Gets the tasks sub-resource.
-   */
-  @Path("{requestId}/tasks")
-  public TaskService getTaskHandler(@PathParam("requestId") String requestId) {
-    return new TaskService(m_clusterName, requestId);
-  }
-
-  /**
-   * Create a request resource instance.
-   *
-   * @param clusterName  cluster name
-   * @param requestId    request id
-   *
-   * @return a request resource instance
-   */
-  ResourceInstance createRequestResource(String clusterName, String requestId) {
-    Map<Resource.Type,String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.Cluster, clusterName);
-    mapIds.put(Resource.Type.Request, requestId);
-
-    return createResource(Resource.Type.Request, mapIds);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/Result.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/Result.java
deleted file mode 100644
index c827ac4..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/Result.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.api.util.TreeNode;
-
-/**
- * Represents a result from a request handler invocation.
- */
-public interface Result {
-
-  public static enum STATUS { OK(200, "OK", false), CREATED(201, "Created", false), ACCEPTED(202, "Accepted", false),
-    CONFLICT(409, "Resource Conflict", true), NOT_FOUND(404, "Not Found", true), BAD_REQUEST(400, "Bad Request", true),
-    UNAUTHORIZED(401, "Unauthorized", true), FORBIDDEN(403, "Forbidden", true),
-    SERVER_ERROR(500, "Internal Server Error", true);
-
-    private int    m_code;
-    private String m_desc;
-    private boolean m_isErrorState;
-
-    private STATUS(int code, String description, boolean isErrorState) {
-      m_code = code;
-      m_desc = description;
-      m_isErrorState = isErrorState;
-    }
-
-    public int getStatus() {
-      return m_code;
-    }
-
-    public String getDescription() {
-      return m_desc;
-    }
-
-    public boolean isErrorState() {
-      return m_isErrorState;
-    }
-
-    @Override
-    public String toString() {
-      return getDescription();
-    }
-  };
-
-  /**
-   * Obtain the results of the request invocation as a Tree structure.
-   *
-   * @return the results of the request a a Tree structure
-   */
-  public TreeNode<Resource> getResultTree();
-
-  /**
-   * Determine whether the request was handled synchronously.
-   * If the request is synchronous, all work was completed prior to returning.
-   *
-   * @return true if the request was synchronous, false if it was asynchronous
-   */
-  public boolean isSynchronous();
-
-  public ResultStatus getStatus();
-
-  public void setResultStatus(ResultStatus status);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultImpl.java
deleted file mode 100644
index adcbde1..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultImpl.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.api.util.TreeNode;
-import org.apache.ambari.server.api.util.TreeNodeImpl;
-
-
-/**
- * Result implementation.
- */
-public class ResultImpl implements Result {
-
-  /**
-   * Whether the request was handled synchronously.
-   */
-  private boolean m_synchronous;
-
-  /**
-   * Result status.
-   */
-  private ResultStatus m_status;
-
-  /**
-   * Tree structure which holds the results
-   */
-  private TreeNode<Resource> m_tree = new TreeNodeImpl<Resource>(null, null, null);
-
-
-  /**
-   * Constructor.
-   *
-   * @param synchronous true if request was handled synchronously, false otherwise
-   */
-  public ResultImpl(boolean synchronous) {
-    m_synchronous = synchronous;
-  }
-
-  /**
-   * Constructor.
-   *
-   * @param status  result status
-   */
-  public ResultImpl(ResultStatus status) {
-    m_status = status;
-  }
-
-  @Override
-  public TreeNode<Resource> getResultTree() {
-    return m_tree;
-  }
-
-  @Override
-  public boolean isSynchronous() {
-    return m_synchronous;
-  }
-
-  @Override
-  public ResultStatus getStatus() {
-    return m_status;
-  }
-
-  @Override
-  public void setResultStatus(ResultStatus status) {
-    m_status = status;
-  }
-}
-
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultPostProcessor.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultPostProcessor.java
deleted file mode 100644
index ab25bb2..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultPostProcessor.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-/**
- * Processor which processes result objects prior to them being returned to the service.
- * Processing can include adding additional data such as hrefs, or modifying/deleting existing data.
- */
-public interface ResultPostProcessor {
-  /**
-   * Process the given result.
-   * The passed in process is directly modified.
-   *
-   * @param result the result to process.
-   */
-  public void process(Result result);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultPostProcessorImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultPostProcessorImpl.java
deleted file mode 100644
index c02e0a2..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultPostProcessorImpl.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.resources.RequestResourceDefinition;
-import org.apache.ambari.server.api.resources.ResourceDefinition;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.api.util.TreeNode;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Processes returned results to add href's and other content.
- */
-public class ResultPostProcessorImpl implements ResultPostProcessor {
-  /**
-   * the associated request
-   */
-  private Request m_request;
-
-  /**
-   * Map of resource post processors keyed by resource type.
-   * These are used to act on specific resource types contained in the result.
-   */
-  Map<Resource.Type, List<ResourceDefinition.PostProcessor>>
-      m_mapPostProcessors = new HashMap<Resource.Type, List<ResourceDefinition.PostProcessor>>();
-
-
-  /**
-   * Constructor.
-   *
-   * @param request the associated request
-   */
-  public ResultPostProcessorImpl(Request request) {
-    m_request = request;
-
-    registerResourceProcessors(m_request.getResource());
-  }
-
-  @Override
-  public void process(Result result) {
-    processNode(result.getResultTree(), m_request.getURI());
-  }
-
-  /**
-   * Process a node of the result tree.  Recursively calls child nodes.
-   *
-   * @param node the node to process
-   * @param href the current href
-   */
-  private void processNode(TreeNode<Resource> node, String href) {
-    Resource r = node.getObject();
-    if (r != null) {
-      List<ResourceDefinition.PostProcessor> listProcessors =
-          m_mapPostProcessors.get(r.getType());
-      for (ResourceDefinition.PostProcessor processor : listProcessors) {
-        processor.process(m_request, node, href);
-      }
-      href = node.getProperty("href");
-      int i = href.indexOf('?');
-      if (i != -1) {
-        href = href.substring(0, i);
-      }
-    } else {
-      String isItemsCollection = node.getProperty("isCollection");
-      if (node.getName() == null && "true".equals(isItemsCollection)) {
-        node.setName("items");
-        node.setProperty("href", href);
-      }
-    }
-    for (TreeNode<Resource> child : node.getChildren()) {
-      processNode(child, href);
-    }
-  }
-
-  /**
-   * Registers the resource processors.
-   * Recursively registers child resource processors.
-   *
-   * @param resource the root resource
-   */
-  private void registerResourceProcessors(ResourceInstance resource) {
-    //todo: reconsider registration mechanism
-    Resource.Type type = resource.getResourceDefinition().getType();
-    List<ResourceDefinition.PostProcessor> listProcessors = m_mapPostProcessors.get(type);
-    if (listProcessors == null) {
-      listProcessors = new ArrayList<ResourceDefinition.PostProcessor>();
-      m_mapPostProcessors.put(type, listProcessors);
-    }
-    listProcessors.addAll(resource.getResourceDefinition().getPostProcessors());
-
-    for (ResourceInstance child : resource.getSubResources().values()) {
-      // avoid cycle
-      if (!m_mapPostProcessors.containsKey(child.getResourceDefinition().getType())) {
-        registerResourceProcessors(child);
-      }
-    }
-
-    // always add Request post processors since they may be returned but will not be a child
-    m_mapPostProcessors.put(Resource.Type.Request, new RequestResourceDefinition().getPostProcessors());
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultStatus.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultStatus.java
deleted file mode 100644
index 6284879..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ResultStatus.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-/**
- * Result status information.
- */
-public class ResultStatus {
-
-  /**
-   * STATUS enum. Maps a status to a status code.
-   */
-  public static enum STATUS { OK(200, "OK", false), CREATED(201, "Created", false), ACCEPTED(202, "Accepted", false),
-    CONFLICT(409, "Resource Conflict", true), NOT_FOUND(404, "Not Found", true), BAD_REQUEST(400, "Bad Request", true),
-    UNAUTHORIZED(401, "Unauthorized", true), FORBIDDEN(403, "Forbidden", true),
-    SERVER_ERROR(500, "Internal Server Error", true);
-
-    /**
-     * Status code
-     */
-    private int m_code;
-
-    /**
-     * Description
-     */
-    private String m_desc;
-
-    /**
-     * whether this is an error state
-     */
-    private boolean m_isErrorState;
-
-    /**
-     * Constructor.
-     *
-     * @param code         status code
-     * @param description  description
-     * @param isErrorState whether this is an error state
-     */
-    private STATUS(int code, String description, boolean isErrorState) {
-      m_code = code;
-      m_desc = description;
-      m_isErrorState = isErrorState;
-    }
-
-    /**
-     * Obtain the status code.
-     * This is an http response code.
-     *
-     * @return  the status code
-     */
-    public int getStatus() {
-      return m_code;
-    }
-
-    /**
-     * Obtain a brief description.
-     *
-     * @return the description
-     */
-    public String getDescription() {
-      return m_desc;
-    }
-
-    /**
-     * Whether this status is an error state
-     *
-     * @return true if this is an error state; false otherwise
-     */
-    public boolean isErrorState() {
-      return m_isErrorState;
-    }
-
-    @Override
-    public String toString() {
-      return getDescription();
-    }
-  }
-
-  /**
-   * Status instance
-   */
-  private STATUS m_status;
-
-  /**
-   * Result status message
-   */
-  private String m_msg;
-
-  /**
-   * Constructor.
-   *
-   * @param status result status
-   * @param msg    result msg.  Usually used in case of an error.
-   */
-  public ResultStatus(STATUS status, String msg) {
-    m_status       = status;
-    m_msg          = msg;
-  }
-
-  /**
-   * Constructor.
-   *
-   * @param status  result status
-   */
-  public ResultStatus(STATUS status) {
-    m_status = status;
-  }
-
-  /**
-   * Constructor.
-   *
-   * @param status  result status
-   * @param e       result exception
-   */
-  public ResultStatus(STATUS status, Exception e) {
-    m_status = status;
-    m_msg = e.toString();
-  }
-
-  /**
-   * Obtain the result status.
-   * The result status contains a status code and a description of the status.
-   *
-   * @return  the result status
-   */
-  public STATUS getStatus() {
-    return m_status;
-  }
-
-  /**
-   * Obtain the status code.
-   * This is a shortcut to obtaining the status code from the associated result status.
-   *
-   * @return the status code
-   */
-  public int getStatusCode() {
-    return m_status.getStatus();
-  }
-
-  /**
-   * Determine whether the status is an error state.
-   * This is a shortcut to getting this information from the associated result status.
-   *
-   * @return true if the status is a result state; false otherwise
-   */
-  public boolean isErrorState() {
-    return m_status.isErrorState();
-  }
-
-  /**
-   * Obtain the result message.
-   * This message is usually used when an exception occurred.
-   *
-   * @return the result message
-   */
-  public String getMessage() {
-    return m_msg;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ServiceService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ServiceService.java
deleted file mode 100644
index da834a5..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/ServiceService.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.controller.spi.Resource;
-
-import javax.ws.rs.*;
-import javax.ws.rs.core.*;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Service responsible for services resource requests.
- */
-public class ServiceService extends BaseService {
-  /**
-   * Parent cluster name.
-   */
-  private String m_clusterName;
-
-  /**
-   * Constructor.
-   *
-   * @param clusterName cluster id
-   */
-  public ServiceService(String clusterName) {
-    m_clusterName = clusterName;
-  }
-
-  /**
-   * Handles URL: /clusters/{clusterID}/services/{serviceID}
-   * Get a specific service.
-   *
-   * @param headers     http headers
-   * @param ui          uri info
-   * @param serviceName service id
-   * @return service resource representation
-   */
-  @GET
-  @Path("{serviceName}")
-  @Produces("text/plain")
-  public Response getService(@Context HttpHeaders headers, @Context UriInfo ui,
-                             @PathParam("serviceName") String serviceName) {
-
-    return handleRequest(headers, null, ui, Request.Type.GET,
-        createServiceResource(m_clusterName, serviceName));
-  }
-
-  /**
-   * Handles URL: /clusters/{clusterId}/services
-   * Get all services for a cluster.
-   *
-   * @param headers http headers
-   * @param ui      uri info
-   * @return service collection resource representation
-   */
-  @GET
-  @Produces("text/plain")
-  public Response getServices(@Context HttpHeaders headers, @Context UriInfo ui) {
-    return handleRequest(headers, null, ui, Request.Type.GET,
-        createServiceResource(m_clusterName, null));
-  }
-
-  /**
-   * Handles: POST /clusters/{clusterId}/services/{serviceId}
-   * Create a specific service.
-   *
-   * @param body        http body
-   * @param headers     http headers
-   * @param ui          uri info
-   * @param serviceName service id
-   * @return information regarding the created service
-   */
-  @POST
-  @Path("{serviceName}")
-  @Produces("text/plain")
-  public Response createService(String body, @Context HttpHeaders headers, @Context UriInfo ui,
-                                @PathParam("serviceName") String serviceName) {
-
-    return handleRequest(headers, body, ui, Request.Type.POST,
-        createServiceResource(m_clusterName, serviceName));
-  }
-
-  /**
-   * Handles: POST /clusters/{clusterId}/services
-   * Create multiple services.
-   *
-   * @param body        http body
-   * @param headers     http headers
-   * @param ui          uri info
-   * @return information regarding the created services
-   */
-  @POST
-  @Produces("text/plain")
-  public Response createServices(String body, @Context HttpHeaders headers, @Context UriInfo ui) {
-
-    return handleRequest(headers, body, ui, Request.Type.POST,
-        createServiceResource(m_clusterName, null));
-  }
-
-  /**
-   * Handles: PUT /clusters/{clusterId}/services/{serviceId}
-   * Update a specific service.
-   *
-   * @param body        http body
-   * @param headers     http headers
-   * @param ui          uri info
-   * @param serviceName service id
-   * @return information regarding the updated service
-   */
-  @PUT
-  @Path("{serviceName}")
-  @Produces("text/plain")
-  public Response updateService(String body, @Context HttpHeaders headers, @Context UriInfo ui,
-                                @PathParam("serviceName") String serviceName) {
-
-    return handleRequest(headers, body, ui, Request.Type.PUT, createServiceResource(m_clusterName, serviceName));
-  }
-
-  /**
-   * Handles: PUT /clusters/{clusterId}/services
-   * Update multiple services.
-   *
-   * @param body        http body
-   * @param headers     http headers
-   * @param ui          uri info
-   * @return information regarding the updated service
-   */
-  @PUT
-  @Produces("text/plain")
-  public Response updateServices(String body, @Context HttpHeaders headers, @Context UriInfo ui) {
-
-    return handleRequest(headers, body, ui, Request.Type.PUT, createServiceResource(m_clusterName, null));
-  }
-
-  /**
-   * Handles: DELETE /clusters/{clusterId}/services/{serviceId}
-   * Delete a specific service.
-   *
-   * @param headers     http headers
-   * @param ui          uri info
-   * @param serviceName service id
-   * @return information regarding the deleted service
-   */
-  @DELETE
-  @Path("{serviceName}")
-  @Produces("text/plain")
-  public Response deleteService(@Context HttpHeaders headers, @Context UriInfo ui,
-                                @PathParam("serviceName") String serviceName) {
-
-    return handleRequest(headers, null, ui, Request.Type.DELETE, createServiceResource(m_clusterName, serviceName));
-  }
-
-  /**
-   * Get the components sub-resource.
-   *
-   * @param serviceName service id
-   * @return the components service
-   */
-  @Path("{serviceName}/components")
-  public ComponentService getComponentHandler(@PathParam("serviceName") String serviceName) {
-
-    return new ComponentService(m_clusterName, serviceName);
-  }
-  
-  /**
-   * Get the components sub-resource.
-   *
-   * @param serviceName service id
-   * @return the action service
-   */
-  @Path("{serviceName}/actions")
-  public ActionService getActionHandler(@PathParam("serviceName") String serviceName) {
-    return new ActionService(m_clusterName, serviceName);
-  }
-
-  /**
-   * Create a service resource instance.
-   *
-   *
-   * @param clusterName  cluster name
-   * @param serviceName  service name
-   *
-   * @return a service resource instance
-   */
-  ResourceInstance createServiceResource(String clusterName, String serviceName) {
-    Map<Resource.Type,String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.Cluster, clusterName);
-    mapIds.put(Resource.Type.Service, serviceName);
-
-    return createResource(Resource.Type.Service, mapIds);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/TaskService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/TaskService.java
deleted file mode 100644
index 3997375..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/TaskService.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.controller.spi.Resource;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Service responsible for task resource requests.
- */
-public class TaskService extends BaseService {
-  /**
-   * Parent cluster id.
-   */
-  private String m_clusterName;
-
-  /**
-   * Parent request id.
-   */
-  private String m_requestId;
-
-  /**
-   * Constructor.
-   *
-   * @param clusterName  cluster id
-   * @param requestId    request id
-   */
-  public TaskService(String clusterName, String requestId) {
-    m_clusterName = clusterName;
-    m_requestId = requestId;
-  }
-
-  /**
-   * Handles GET: /clusters/{clusterID}/requests/{requestID}/tasks/{taskID}
-   * Get a specific task.
-   *
-   * @param headers  http headers
-   * @param ui       uri info
-   * @param taskId   component id
-   *
-   * @return a task resource representation
-   */
-  @GET
-  @Path("{taskId}")
-  @Produces("text/plain")
-  public Response getTask(@Context HttpHeaders headers, @Context UriInfo ui,
-                          @PathParam("taskId") String taskId) {
-
-    return handleRequest(headers, null, ui, Request.Type.GET,
-        createTaskResource(m_clusterName, m_requestId, taskId));
-  }
-
-  /**
-   * Handles GET: /clusters/{clusterID}/requests/{requestID}/tasks
-   * Get all tasks for a request.
-   *
-   * @param headers http headers
-   * @param ui      uri info
-   *
-   * @return task collection resource representation
-   */
-  @GET
-  @Produces("text/plain")
-  public Response getComponents(@Context HttpHeaders headers, @Context UriInfo ui) {
-    return handleRequest(headers, null, ui, Request.Type.GET,
-        createTaskResource(m_clusterName, m_requestId, null));
-  }
-
-  /**
-   * Create a task resource instance.
-   *
-   * @param clusterName  cluster name
-   * @param requestId    request id
-   * @param taskId       task id
-   *
-   * @return a task resource instance
-   */
-  ResourceInstance createTaskResource(String clusterName, String requestId, String taskId) {
-    Map<Resource.Type,String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.Cluster, clusterName);
-    mapIds.put(Resource.Type.Request, requestId);
-    mapIds.put(Resource.Type.Task, taskId);
-
-    return createResource(Resource.Type.Task, mapIds);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java
deleted file mode 100644
index c0a4976..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/UserService.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.api.services;
-
-import javax.ws.rs.DELETE;
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.util.Collections;
-
-/**
- * Service responsible for user requests.
- */
-@Path("/users/")
-public class UserService extends BaseService {
-
-  /**
-   * Gets all users.
-   * Handles: GET /users requests.
-   */
-  @GET
-  @Produces("text/plain")
-  public Response getUsers(@Context HttpHeaders headers, @Context UriInfo ui) {
-    return handleRequest(headers, null, ui, Request.Type.GET, createUserResource(null));
-  } 
-
-  /**
-   * Gets a single user.
-   * Handles: GET /users/{username} requests
-   * 
-   * @param headers     http headers
-   * @param ui          uri info
-   * @param userName    the username
-   * @return information regarding the created user
-   */
-  @GET
-  @Path("{userName}")
-  @Produces("text/plain")
-  public Response getUser(@Context HttpHeaders headers, @Context UriInfo ui,
-      @PathParam("userName") String userName) {
-    return handleRequest(headers, null, ui, Request.Type.GET, createUserResource(userName));
-  }
-  
-  /**
-   * Creates a user.
-   * Handles: POST /users/{userName}
-   *
-   * @param headers     http headers
-   * @param ui          uri info
-   * @param userName    the username
-   * @return information regarding the created user
-   */
-   @POST
-   @Path("{userName}")
-   @Produces("text/plain")
-   public Response createUser(String body, @Context HttpHeaders headers, @Context UriInfo ui,
-                                 @PathParam("userName") String userName) {
-
-    return handleRequest(headers, body, ui, Request.Type.POST, createUserResource(userName));
-  }
-  
-   
-   /**
-    * Updates a specific user.
-    * Handles: PUT /users/{userName}
-    *
-    * @param headers     http headers
-    * @param ui          uri info
-    * @param userName   the username
-    * @return information regarding the updated user
-    */
-   @PUT
-   @Path("{userName}")
-   @Produces("text/plain")
-   public Response updateUser(String body, @Context HttpHeaders headers, @Context UriInfo ui,
-                                 @PathParam("userName") String userName) {
-
-     return handleRequest(headers, body, ui, Request.Type.PUT, createUserResource(userName));
-   }
-   
-   /**
-    * Deletes a user.
-    * Handles:  DELETE /users/{userName}
-    */
-   @DELETE
-   @Path("{userName}")
-   @Produces("text/plain")
-   public Response deleteUser(@Context HttpHeaders headers, @Context UriInfo ui,
-                                 @PathParam("userName") String userName) {
-     return handleRequest(headers, null, ui, Request.Type.DELETE, createUserResource(userName));
-   }
-
-  /**
-   * Create a user resource instance.
-   *
-   * @param userName  user name
-   *
-   * @return a user resource instance
-   */
-  private ResourceInstance createUserResource(String userName) {
-    return createResource(Resource.Type.User,
-        Collections.singletonMap(Resource.Type.User, userName));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/parsers/JsonPropertyParser.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/parsers/JsonPropertyParser.java
deleted file mode 100644
index 90fef73..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/parsers/JsonPropertyParser.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services.parsers;
-
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.codehaus.jackson.JsonNode;
-import org.codehaus.jackson.map.ObjectMapper;
-
-import java.io.IOException;
-import java.util.*;
-
-/**
- * JSON parser which parses a JSON string into a map of properties and values.
- */
-public class JsonPropertyParser implements RequestBodyParser {
-  private Set<Map<String, Object>> m_setProperties = new HashSet<Map<String, Object>>();
-
-
-  @Override
-  public Set<Map<String, Object>> parse(String s) {
-
-    ObjectMapper mapper = new ObjectMapper();
-
-    if (s != null && ! s.isEmpty()) {
-      s = ensureArrayFormat(s);
-      try {
-        JsonNode[] nodes = mapper.readValue(s, JsonNode[].class);
-        for(JsonNode node : nodes) {
-          Map<String, Object> mapProperties = new HashMap<String, Object>();
-          processNode(node, "", mapProperties);
-          m_setProperties.add(mapProperties);
-        }
-      } catch (IOException e) {
-        throw new RuntimeException("Unable to parse json: " + e, e);
-      }
-    }
-    return m_setProperties;
-  }
-
-  private void processNode(JsonNode node, String path, Map<String, Object> mapProperties) {
-    Iterator<String> iter = node.getFieldNames();
-    String name;
-    while (iter.hasNext()) {
-      name = iter.next();
-      JsonNode child = node.get(name);
-      if (child.isContainerNode()) {
-        processNode(child, path.isEmpty() ? name : path + '.' + name, mapProperties);
-      } else {
-        mapProperties.put(PropertyHelper.getPropertyId(path, name), child.asText());
-      }
-    }
-  }
-
-  private String ensureArrayFormat(String s) {
-    return s.startsWith("[") ? s : '[' + s + ']';
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/parsers/RequestBodyParser.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/parsers/RequestBodyParser.java
deleted file mode 100644
index ceb22b8..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/parsers/RequestBodyParser.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services.parsers;
-
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Parse the provided String into a map of properties and associated values.
- */
-public interface RequestBodyParser {
-  /**
-   * Parse the provided string into a map of properties and values.
-   * The key contains both the category hierarchy and the property name.
-   *
-   * @param s  the string body to be parsed
-   *
-   * @return a set of maps of properties or an empty set if no properties exist
-   */
-  public Set<Map<String, Object>> parse(String s);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/persistence/PersistenceManager.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/persistence/PersistenceManager.java
deleted file mode 100644
index 1b84c99..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/persistence/PersistenceManager.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services.persistence;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.controller.spi.*;
-
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Persistence manager which is responsible for persisting a resource state to the back end.
- * This includes create, update and delete operations.
- */
-public interface PersistenceManager {
-
-  public RequestStatus create(ResourceInstance resource, Set<Map<String, Object>> setProperties)
-      throws UnsupportedPropertyException,
-             ResourceAlreadyExistsException,
-             NoSuchParentResourceException,
-             SystemException;
-
-  public RequestStatus update(ResourceInstance resource, Set<Map<String, Object>> setProperties)
-      throws UnsupportedPropertyException, SystemException, NoSuchParentResourceException, NoSuchResourceException;
-
-
-  public RequestStatus delete(ResourceInstance resource, Set<Map<String, Object>> setProperties)
-      throws UnsupportedPropertyException, SystemException, NoSuchParentResourceException, NoSuchResourceException;
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/persistence/PersistenceManagerImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/persistence/PersistenceManagerImpl.java
deleted file mode 100644
index 5339ccf..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/persistence/PersistenceManagerImpl.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services.persistence;
-
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Persistence Manager implementation.
- */
-public class PersistenceManagerImpl implements PersistenceManager {
-
-  /**
-   * Cluster Controller reference.
-   */
-  private ClusterController m_controller;
-
-  /**
-   * Constructor.
-   *
-   * @param controller  the cluster controller
-   */
-  public PersistenceManagerImpl(ClusterController controller) {
-    m_controller = controller;
-  }
-
-  @Override
-  public RequestStatus create(ResourceInstance resource, Set<Map<String, Object>> setProperties)
-      throws UnsupportedPropertyException,
-             SystemException,
-             ResourceAlreadyExistsException,
-             NoSuchParentResourceException {
-
-    Map<Resource.Type, String> mapResourceIds = resource.getIds();
-    Resource.Type type = resource.getResourceDefinition().getType();
-    Schema schema = m_controller.getSchema(type);
-
-    if (setProperties.size() == 0) {
-      setProperties.add(new HashMap<String, Object>());
-    }
-
-    for (Map<String, Object> mapProperties : setProperties) {
-      for (Map.Entry<Resource.Type, String> entry : mapResourceIds.entrySet()) {
-        String property = schema.getKeyPropertyId(entry.getKey());
-        if (! mapProperties.containsKey(property)) {
-          mapProperties.put(property, entry.getValue());
-        }
-      }
-    }
-    return m_controller.createResources(type, createControllerRequest(setProperties));
-  }
-
-  @Override
-  public RequestStatus update(ResourceInstance resource, Set<Map<String, Object>> setProperties)
-      throws UnsupportedPropertyException, SystemException, NoSuchParentResourceException, NoSuchResourceException {
-
-    return m_controller.updateResources(resource.getResourceDefinition().getType(),
-        createControllerRequest(setProperties), resource.getQuery().getPredicate());
-  }
-
-  @Override
-  public RequestStatus delete(ResourceInstance resource, Set<Map<String, Object>> setProperties)
-      throws UnsupportedPropertyException, SystemException, NoSuchParentResourceException, NoSuchResourceException {
-    //todo: need to account for multiple resources and user predicate
-    return m_controller.deleteResources(resource.getResourceDefinition().getType(),
-        resource.getQuery().getPredicate());
-
-  }
-
-  protected Request createControllerRequest(Set<Map<String, Object>> setProperties) {
-    return PropertyHelper.getCreateRequest(setProperties);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/serializers/JsonSerializer.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/serializers/JsonSerializer.java
deleted file mode 100644
index 0db20e9..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/serializers/JsonSerializer.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services.serializers;
-
-import org.apache.ambari.server.api.services.ResultStatus;
-import org.apache.ambari.server.api.services.Result;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.api.util.TreeNode;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.JsonGenerator;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.util.DefaultPrettyPrinter;
-
-import java.io.*;
-import java.nio.charset.Charset;
-import java.util.Map;
-
-/**
- * JSON serializer.
- * Responsible for representing a result as JSON.
- */
-public class JsonSerializer implements ResultSerializer {
-
-  /**
-   * Factory used to create JSON generator.
-   */
-  JsonFactory m_factory = new JsonFactory();
-
-  ObjectMapper m_mapper = new ObjectMapper(m_factory);
-
-  /**
-   * Generator which writes JSON.
-   */
-  JsonGenerator m_generator;
-
-
-  @Override
-  public Object serialize(Result result) {
-    try {
-      ByteArrayOutputStream bytesOut = init();
-
-      if (result.getStatus().isErrorState()) {
-        return serializeError(result.getStatus());
-      }
-
-      processNode(result.getResultTree());
-
-      m_generator.close();
-      return bytesOut.toString("UTF-8");
-    } catch (IOException e) {
-      //todo: exception handling.  Create ResultStatus 500 and call serializeError
-      throw new RuntimeException("Unable to serialize to json: " + e, e);
-    }
-  }
-
-  @Override
-  public Object serializeError(ResultStatus error) {
-    try {
-      ByteArrayOutputStream bytesOut = init();
-      //m_mapper.writeValue(m_generator, error);
-      m_generator.writeStartObject();
-      m_generator.writeNumberField("status", error.getStatus().getStatus());
-      m_generator.writeStringField("message", error.getMessage());
-      m_generator.writeEndObject();
-      m_generator.close();
-      return bytesOut.toString("UTF-8");
-
-    } catch (IOException e) {
-      //todo: exception handling
-      throw new RuntimeException("Unable to serialize to json: " + e, e);
-    }
-  }
-
-  private ByteArrayOutputStream init() throws IOException {
-    ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
-    m_generator = createJsonGenerator(bytesOut);
-
-    DefaultPrettyPrinter p = new DefaultPrettyPrinter();
-    p.indentArraysWith(new DefaultPrettyPrinter.Lf2SpacesIndenter());
-    m_generator.setPrettyPrinter(p);
-
-    return bytesOut;
-  }
-
-  private void processNode(TreeNode<Resource> node) throws IOException {
-    String name = node.getName();
-    Resource r = node.getObject();
-
-    if (r == null) {
-      if (name != null) {
-        if (node.getParent() == null) {
-          m_generator.writeStartObject();
-          writeHref(node);
-        }
-        m_generator.writeArrayFieldStart(name);
-      }
-    } else {
-      m_generator.writeStartObject();
-      writeHref(node);
-      // resource props
-      handleResourceProperties(r.getProperties());
-    }
-
-    for (TreeNode<Resource> child : node.getChildren()) {
-      processNode(child);
-    }
-
-    if (r == null) {
-      if (name != null) {
-        m_generator.writeEndArray();
-        if (node.getParent() == null) {
-          m_generator.writeEndObject();
-        }
-      }
-    } else {
-      m_generator.writeEndObject();
-    }
-  }
-
-  private void handleResourceProperties(TreeNode<Map<String, Object>> node) throws IOException {
-    String category = node.getName();
-
-    if (category != null) {
-      m_generator.writeFieldName(category);
-      m_generator.writeStartObject();
-    }
-
-    for (Map.Entry<String, Object> entry : node.getObject().entrySet()) {
-      m_generator.writeFieldName(entry.getKey());
-      m_mapper.writeValue(m_generator, entry.getValue());
-    }
-
-    for (TreeNode<Map<String, Object>> n : node.getChildren()) {
-      handleResourceProperties(n);
-    }
-
-    if (category != null) {
-      m_generator.writeEndObject();
-    }
-  }
-
-  private JsonGenerator createJsonGenerator(ByteArrayOutputStream baos) throws IOException {
-    JsonGenerator generator = m_factory.createJsonGenerator(new OutputStreamWriter(baos,
-        Charset.forName("UTF-8").newEncoder()));
-
-    DefaultPrettyPrinter p = new DefaultPrettyPrinter();
-    p.indentArraysWith(new DefaultPrettyPrinter.Lf2SpacesIndenter());
-    generator.setPrettyPrinter(p);
-
-    return generator;
-  }
-
-  private void writeHref(TreeNode<Resource> node) throws IOException {
-    String hrefProp = node.getProperty("href");
-    if (hrefProp != null) {
-      m_generator.writeStringField("href", hrefProp);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/serializers/ResultSerializer.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/serializers/ResultSerializer.java
deleted file mode 100644
index 8ff8994..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/services/serializers/ResultSerializer.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services.serializers;
-
-
-import org.apache.ambari.server.api.services.ResultStatus;
-import org.apache.ambari.server.api.services.Result;
-
-/**
- * Format internal result to format expected by client.
- */
-public interface ResultSerializer {
-  /**
-   * Serialize the given result to a format expected by client.
-   *
-   *
-   * @param result  internal result
-   * @return the serialized result
-   */
-  Object serialize(Result result);
-
-  /**
-   * Serialize an error result to the format expected by the client.
-   *
-   * @param error  the error result
-   *
-   * @return the serialized error result
-   */
-  Object serializeError(ResultStatus error);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/util/TreeNode.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/util/TreeNode.java
deleted file mode 100644
index ffb41fa..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/util/TreeNode.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.util;
-
-import java.util.Collection;
-
-/**
- * Tree where each node can have a name, properties and an associated object.
- */
-public interface TreeNode<T> {
-  /**
-   * Obtain the parent node or null if this node is the root.
-   *
-   * @return the parent node or null if this node is the root
-   */
-  public TreeNode<T> getParent();
-
-  /**
-   * Obtain the list of child nodes.
-   *
-   * @return a list of child nodes or an empty list if a leaf node
-   */
-  public Collection<TreeNode<T>> getChildren();
-
-  /**
-   * Obtain the object associated with this node.
-   *
-   * @return the object associated with this node or null
-   */
-  public T getObject();
-
-  /**
-   * Obtain the name of the node.
-   *
-   * @return the name of the node or null
-   */
-  public String getName();
-
-  /**
-   * Set the name of the node.
-   *
-   * @param name the name to set
-   */
-  public void setName(String name);
-
-  /**
-   * Set the parent node.
-   *
-   * @param parent the parent node to set
-   */
-  public void setParent(TreeNode<T> parent);
-
-  /**
-   * Add a child node for the provided object.
-   *
-   * @param child the object associated with the new child node
-   * @param name  the name of the child node
-   * @return the newly created child node
-   */
-  public TreeNode<T> addChild(T child, String name);
-
-  /**
-   * Add the specified child node.
-   *
-   * @param child the child node to add
-   * @return the added child node
-   */
-  public TreeNode<T> addChild(TreeNode<T> child);
-
-  /**
-   * Set a property on the node.
-   *
-   * @param name  the name of the property
-   * @param value the value of the property
-   */
-  public void setProperty(String name, String value);
-
-  /**
-   * Get the specified node property.
-   *
-   * @param name property name
-   * @return the requested property value or null
-   */
-  public String getProperty(String name);
-
-  /**
-   * Find a child node by name.
-   * The name may contain '/' to delimit names to find a child more then one level deep.
-   * To find a node named 'bar' that is a child of a child named 'foo', use the name 'foo/bar'.
-   *
-   * @param name  the name of the child.  May contain the '/' path separator.
-   *
-   * @return the requested node or null if the child was not found
-   */
-  public TreeNode<T> getChild(String name);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/util/TreeNodeImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/util/TreeNodeImpl.java
deleted file mode 100644
index 5883f5b..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/api/util/TreeNodeImpl.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.util;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Basic implementation of TreeNode.
- */
-public class TreeNodeImpl<T> implements TreeNode<T> {
-
-  /**
-   * name of the node
-   */
-  private String m_name;
-
-  /**
-   * parent of the node
-   */
-  private TreeNode<T> m_parent;
-
-  /**
-   * child nodes
-   */
-  private Map<String, TreeNode<T>> m_mapChildren = new HashMap<String, TreeNode<T>>();
-
-  /**
-   * associated object
-   */
-  private T m_object;
-
-  /**
-   * properties
-   */
-  private Map<String, String> m_mapNodeProps;
-
-  /**
-   * Constructor.
-   *
-   * @param parent parent node
-   * @param object associated object
-   * @param name   node name
-   */
-  public TreeNodeImpl(TreeNode<T> parent, T object, String name) {
-    m_parent = parent;
-    m_object = object;
-    m_name = name;
-  }
-
-  @Override
-  public TreeNode<T> getParent() {
-    return m_parent;
-  }
-
-  @Override
-  public Collection<TreeNode<T>> getChildren() {
-    return m_mapChildren.values();
-  }
-
-  @Override
-  public T getObject() {
-    return m_object;
-  }
-
-  @Override
-  public void setName(String name) {
-    m_name = name;
-  }
-
-  @Override
-  public String getName() {
-    return m_name;
-  }
-
-  @Override
-  public void setParent(TreeNode<T> parent) {
-    m_parent = parent;
-  }
-
-  @Override
-  public TreeNode<T> addChild(T child, String name) {
-    TreeNodeImpl<T> node = new TreeNodeImpl<T>(this, child, name);
-    m_mapChildren.put(name, node);
-
-    return node;
-  }
-
-  @Override
-  public TreeNode<T> addChild(TreeNode<T> child) {
-    child.setParent(this);
-    m_mapChildren.put(child.getName(), child);
-
-    return child;
-  }
-
-  @Override
-  public void setProperty(String name, String value) {
-    if (m_mapNodeProps == null) {
-      m_mapNodeProps = new HashMap<String, String>();
-    }
-    m_mapNodeProps.put(name, value);
-  }
-
-  @Override
-  public String getProperty(String name) {
-    return m_mapNodeProps == null ? null : m_mapNodeProps.get(name);
-  }
-
-  @Override
-  public TreeNode<T> getChild(String name) {
-    if (name != null && name.contains("/")) {
-      int i = name.indexOf('/');
-      String s = name.substring(0, i);
-      TreeNode<T> node = m_mapChildren.get(s);
-      return node == null ? null : node.getChild(name.substring(i + 1));
-    } else {
-      return m_mapChildren.get(name);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSHostStatus.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSHostStatus.java
deleted file mode 100644
index f875541..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSHostStatus.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.bootstrap;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.XmlType;
-
-/**
- *  BootStrap Status for a host.
- */
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlType(name = "", propOrder = {})
-public class BSHostStatus {
-  @XmlElement
-  private String hostName;
-  @XmlElement
-  private String status;
-  @XmlElement
-  private String statusCode;
-  @XmlElement
-  private String statusAction;
-  @XmlElement
-  private String log;
-
-
-  public void setStatus(String status) {
-    this.status = status;
-  }
-
-  public String getStatus() {
-    return this.status;
-  }
-
-  public void setHostName(String hostName) {
-    this.hostName = hostName;
-  }
-
-  public String getHostName() {
-    return this.hostName;
-  }
-
-  public String getLog() {
-    return this.log;
-  }
-
-  public void setLog(String log) {
-    this.log = log;
-  }
-  
-  public String getStatusCode() {
-    return statusCode;
-  }
-  
-  public void setStatusCode(String code) {
-    statusCode = code;
-  }
-  
-  public String getStatusAction() {
-    return statusAction;
-  }
-  
-  public void setStatusAction(String action) {
-    statusAction = action;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSHostStatusCollector.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSHostStatusCollector.java
deleted file mode 100644
index 8754ba9..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSHostStatusCollector.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.bootstrap;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-/**
- * Runnable class that gets the hoststatus output by looking at the files
- * in a certain directory. Only meant to be useful for bootstrap as of now.
- */
-class BSHostStatusCollector {
-  private File requestIdDir;
-  private List<BSHostStatus> hostStatus;
-  private static final String logFileFilter = ".log";
-  private static final String doneFileFilter = ".done";
-  private static Log LOG = LogFactory.getLog(BSHostStatusCollector.class);
-
-  private List<String> hosts;
-
-  public BSHostStatusCollector(File requestIdDir, List<String> hosts) {
-    this.requestIdDir = requestIdDir;
-    this.hosts = hosts;
-  }
-
-  public List<BSHostStatus> getHostStatus() {
-    return hostStatus;
-  }
-
-  public void run() {
-    LOG.info("Request directory " + requestIdDir);
-    hostStatus = new ArrayList<BSHostStatus>();
-    if (hosts == null) {
-      return;
-    }
-    File done;
-    File log;
-    LOG.info("HostList for polling on " + hosts);
-    for (String host : hosts) {
-      /* Read through the files and gather output */
-      BSHostStatus status = new BSHostStatus();
-      status.setHostName(host);
-      done = new File(requestIdDir, host + doneFileFilter);
-      log = new File(requestIdDir, host + logFileFilter);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Polling bootstrap status for host"
-            + ", requestDir=" + requestIdDir
-            + ", host=" + host
-            + ", doneFileExists=" + done.exists()
-            + ", logFileExists=" + log.exists());
-      }
-      if (!done.exists()) {
-        status.setStatus("RUNNING");
-      } else {
-        status.setStatus("FAILED");
-        try {
-          String statusCode = FileUtils.readFileToString(done).trim();
-          if (statusCode.equals("0")) {
-            status.setStatus("DONE");
-          }
-          
-          updateStatus(status, statusCode);
-        } catch (IOException e) {
-          LOG.info("Error reading done file " + done);
-        }
-      }
-      if (!log.exists()) {
-        status.setLog("");
-      } else {
-        String logString = "";
-        BufferedReader reader = null;
-        try {
-          StringBuilder sb = new StringBuilder();
-          reader = new BufferedReader(new FileReader(log));
-
-          String line = null;
-          while (null != (line = reader.readLine())) {
-            if (line.startsWith("tcgetattr:") || line.startsWith("tput:"))
-              continue;
-
-            if (0 != sb.length() || 0 == line.length())
-              sb.append('\n');
-
-            if (-1 != line.indexOf ("\\n"))
-              sb.append(line.replace("\\n", "\n"));
-            else
-              sb.append(line);
-          }
-          
-          logString = sb.toString();
-        } catch (IOException e) {
-          LOG.info("Error reading log file " + log);
-        }
-        finally {
-          try {
-            reader.close();
-          }
-          catch (Exception e) {
-          }
-        }
-        status.setLog(logString);
-      }
-      hostStatus.add(status);
-    }
-  }
-  
-  private void updateStatus(BSHostStatus status, String statusCode) {
-    
-    status.setStatusCode(statusCode);
-    
-    int reason = -1;
-    try {
-      reason = Integer.parseInt(statusCode);
-    } catch (Exception e) {
-    }
-    
-    switch (reason) {
-    // case X: (as we find them)
-    case 2:
-      status.setStatusAction("Processing could not continue because the file was not found.");
-      break;
-    case 255:
-    default:
-      if (null != status.getLog()) {
-        String lowerLog = status.getLog().toLowerCase();
-        if (-1 != lowerLog.indexOf("permission denied") && -1 != lowerLog.indexOf("publickey")) {
-          status.setStatusAction("Use correct SSH key");
-        } else if (-1 != lowerLog.indexOf("connect to host")) {
-          status.setStatusAction("Please verify that the hostname '" + status.getHostName() + "' is correct.");
-        }
-      }
-      break;
-    }
-    
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSResponse.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSResponse.java
deleted file mode 100644
index 2258a5d..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSResponse.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.bootstrap;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlEnum;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.XmlType;
-
-/**
- * Class captures immediate response to a bootstrap api call.
- * If the api call is ok, the return should return that its ok.
- */
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlType(name = "", propOrder = {})
-public class BSResponse {
-  @XmlType(name="status")
-  @XmlEnum
-  public enum BSRunStat {
-    OK,
-    ERROR
-  }
-
-  @XmlElement
-  private BSRunStat status;
-  @XmlElement
-  private String log;
-  @XmlElement
-  private long requestId;
-
-  public long getRequestId() {
-    return this.requestId;
-  }
-
-  public void setRequestId(long requestId) {
-    this.requestId = requestId;
-  }
-
-  public BSRunStat getStatus() {
-    return this.status;
-  }
-
-  public void setStatus(BSRunStat status) {
-    this.status  = status;
-  }
-
-  public String getLog() {
-    return this.log;
-  }
-
-  public void setLog(String log) {
-    this.log = log;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSRunner.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSRunner.java
deleted file mode 100644
index 92e0faa..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BSRunner.java
+++ /dev/null
@@ -1,281 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.bootstrap;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.StringWriter;
-import java.util.List;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.ambari.server.bootstrap.BootStrapStatus.BSStat;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-/**
- * @author ncole
- *
- */
-class BSRunner extends Thread {
-  private static Log LOG = LogFactory.getLog(BSRunner.class);
-
-  private  boolean finished = false;
-  private SshHostInfo sshHostInfo;
-  private File bootDir;
-  private String bsScript;
-  private File requestIdDir;
-  private File sshKeyFile;
-  private int requestId;
-  private String agentSetupScript;
-  private String agentSetupPassword;
-  private String ambariHostname;
-  private boolean verbose;
-  private BootStrapImpl bsImpl;
-
-  public BSRunner(BootStrapImpl impl, SshHostInfo sshHostInfo, String bootDir,
-      String bsScript, String agentSetupScript, String agentSetupPassword,
-      int requestId, long timeout, String hostName, boolean isVerbose)
-  {
-    this.requestId = requestId;
-    this.sshHostInfo = sshHostInfo;
-    this.bsScript = bsScript;
-    this.bootDir = new File(bootDir);
-    this.requestIdDir = new File(bootDir, Integer.toString(requestId));
-    this.sshKeyFile = new File(this.requestIdDir, "sshKey");
-    this.agentSetupScript = agentSetupScript;
-    this.agentSetupPassword = agentSetupPassword;
-    this.ambariHostname = hostName;
-    this.verbose = isVerbose;
-    this.bsImpl = impl;
-    BootStrapStatus status = new BootStrapStatus();
-    status.setLog("RUNNING");
-    status.setStatus(BSStat.RUNNING);
-    bsImpl.updateStatus(requestId, status);
-  }
-
-  /**
-   * Update the gathered data from reading output
-   *
-   */
-  private class BSStatusCollector implements Runnable {
-    @Override
-    public void run() {
-      BSHostStatusCollector collector = new BSHostStatusCollector(requestIdDir,
-          sshHostInfo.getHosts());
-      collector.run();
-      List<BSHostStatus> hostStatus = collector.getHostStatus();
-      BootStrapStatus status = new BootStrapStatus();
-      status.setHostsStatus(hostStatus);
-      status.setLog("");
-      status.setStatus(BSStat.RUNNING);
-      bsImpl.updateStatus(requestId, status);
-    }
-  }
-
-  private String createHostString(List<String> list) {
-    StringBuilder ret = new StringBuilder();
-    if (list == null) {
-      return "";
-    }
-
-    int i = 0;
-    for (String host: list) {
-      ret.append(host);
-      if (i++ != list.size()-1)
-        ret.append(",");
-    }
-    return ret.toString();
-  }
-
-  /** Create request id dir for each bootstrap call **/
-  private void createRunDir() throws IOException {
-    if (!bootDir.exists()) {
-      // create the bootdir directory.
-      if (! bootDir.mkdirs()) {
-        throw new IOException("Cannot create " + bootDir);
-      }
-    }
-    /* create the request id directory */
-    if (requestIdDir.exists()) {
-      /* delete the directory and make sure we start back */
-      FileUtils.deleteDirectory(requestIdDir);
-    }
-    /* create the directory for the run dir */
-    if (! requestIdDir.mkdirs()) {
-      throw new IOException("Cannot create " + requestIdDir);
-    }
-  }
-
-  private void writeSshKeyFile(String data) throws IOException {
-    FileUtils.writeStringToFile(sshKeyFile, data);
-  }
-
-  public synchronized void finished() {
-    this.finished = true;
-  }
-
-  @Override
-  public void run() {
-    String hostString = createHostString(sshHostInfo.getHosts());
-    String commands[] = new String[6];
-    String shellCommand[] = new String[3];
-    BSStat stat = BSStat.RUNNING;
-    String scriptlog = "";
-    try {
-      createRunDir();
-      if (LOG.isDebugEnabled()) {
-        // FIXME needs to be removed later
-        // security hole
-        LOG.debug("Using ssh key=\""
-            + sshHostInfo.getSshKey() + "\"");
-      }
-
-      writeSshKeyFile(sshHostInfo.getSshKey());
-      /* Running command:
-       * script hostlist bsdir sshkeyfile
-       */
-      shellCommand[0] = "sh";
-      shellCommand[1] = "-c";
-      
-      commands[0] = this.bsScript;
-      commands[1] = hostString;
-      commands[2] = this.requestIdDir.toString();
-      commands[3] = this.sshKeyFile.toString();
-      commands[4] = this.agentSetupScript.toString();
-      commands[5] = this.ambariHostname;
-      LOG.info("Host= " + hostString + " bs=" + this.bsScript + " requestDir=" +
-          requestIdDir + " keyfile=" + this.sshKeyFile + " server=" + this.ambariHostname);
-
-      String[] env = new String[] { "AMBARI_PASSPHRASE=" + agentSetupPassword };
-      if (this.verbose)
-        env = new String[] { env[0], " BS_VERBOSE=\"-vvv\" " };
-
-      StringBuilder commandString = new StringBuilder();
-      for (String comm : commands) {
-        commandString.append(" " + comm);
-      }   
-     
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(commandString);
-      }
-      
-      String bootStrapOutputFile = requestIdDir + File.separator + "bootstrap.out";
-      String bootStrapErrorFile = requestIdDir + File.separator + "bootstrap.err";
-      commandString.append(
-          " 1> " + bootStrapOutputFile + " 2>" + bootStrapErrorFile);
-      
-      shellCommand[2] = commandString.toString();
-      Process process = Runtime.getRuntime().exec(shellCommand, env);
-
-      /** Startup a scheduled executor service to look through the logs
-       */
-      ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
-      BSStatusCollector statusCollector = new BSStatusCollector();
-      ScheduledFuture<?> handle = scheduler.scheduleWithFixedDelay(statusCollector,
-          0, 10, TimeUnit.SECONDS);
-      LOG.info("Kicking off the scheduler for polling on logs in " +
-          this.requestIdDir);
-      try {
-
-        LOG.info("Bootstrap output, log="
-              + bootStrapErrorFile + " " + bootStrapOutputFile);
-        int exitCode = process.waitFor();
-        String outMesg = "";
-        String errMesg = "";
-        try {
-          outMesg = FileUtils.readFileToString(new File(bootStrapOutputFile));
-          errMesg = FileUtils.readFileToString(new File(bootStrapErrorFile));
-        } catch(IOException io) {
-          LOG.info("Error in reading files ", io);
-        }
-        scriptlog = outMesg + "\n\n" + errMesg;
-        LOG.info("Script log Mesg " + scriptlog);
-        if (exitCode != 0) {
-          stat = BSStat.ERROR;
-        } else {
-          stat = BSStat.SUCCESS;
-        }
-
-        scheduler.schedule(new BSStatusCollector(), 0, TimeUnit.SECONDS);
-        long startTime = System.currentTimeMillis();
-        while (true) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Waiting for hosts status to be updated");
-          }
-          boolean pendingHosts = false;
-          BootStrapStatus tmpStatus = bsImpl.getStatus(requestId);
-          for (BSHostStatus status : tmpStatus.getHostsStatus()) {
-            if (status.getStatus().equals("RUNNING")) {
-              pendingHosts = true;
-            }
-          }
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Whether hosts status yet to be updated, pending="
-                + pendingHosts);
-          }
-          if (!pendingHosts) {
-            break;
-          }
-          try {
-            Thread.sleep(1000);
-          } catch (InterruptedException e) {
-            // continue
-          }
-          long now = System.currentTimeMillis();
-          if (now >= (startTime+15000)) {
-            LOG.warn("Gave up waiting for hosts status to be updated");
-            break;
-          }
-        }
-      } catch (InterruptedException e) {
-        throw new IOException(e);
-      } finally {
-        handle.cancel(true);
-        /* schedule a last update */
-        scheduler.schedule(new BSStatusCollector(), 0, TimeUnit.SECONDS);
-        scheduler.shutdownNow();
-        try {
-          scheduler.awaitTermination(10, TimeUnit.SECONDS);
-        } catch (InterruptedException e) {
-          LOG.info("Interruped while waiting for scheduler");
-        }
-        process.destroy();
-      }
-    } catch(IOException io) {
-      LOG.info("Error executing bootstrap " + io.getMessage());
-      stat = BSStat.ERROR;
-    } finally {
-      /* get the bstatus */
-      BootStrapStatus tmpStatus = bsImpl.getStatus(requestId);
-      tmpStatus.setLog(scriptlog);
-      tmpStatus.setStatus(stat);
-      bsImpl.updateStatus(requestId, tmpStatus);
-      bsImpl.reset();
-      finished();
-    }
-  }
-
-  public synchronized boolean isRunning() {
-    return !this.finished;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BootStrapImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BootStrapImpl.java
deleted file mode 100644
index 6e12ff2..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BootStrapImpl.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.bootstrap;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetAddress;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.ambari.server.bootstrap.BSResponse.BSRunStat;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-
-@Singleton
-public class BootStrapImpl {
-  private File bootStrapDir;
-  private String bootScript;
-  private String bootSetupAgentScript;
-  private String bootSetupAgentPassword;
-  private BSRunner bsRunner;
-  private String masterHostname;
-  long timeout;
-
-  private static Log LOG = LogFactory.getLog(BootStrapImpl.class);
-
-  /* Monotonically increasing requestid for the bootstrap api to query on */
-  int requestId = 0;
-  private FifoLinkedHashMap<Long, BootStrapStatus> bsStatus;
-
-
-  @Inject
-  public BootStrapImpl(Configuration conf) throws IOException {
-    this.bootStrapDir = conf.getBootStrapDir();
-    this.bootScript = conf.getBootStrapScript();
-    this.bootSetupAgentScript = conf.getBootSetupAgentScript();
-    this.bootSetupAgentPassword = conf.getBootSetupAgentPassword();
-    this.bsStatus = new FifoLinkedHashMap<Long, BootStrapStatus>();
-    this.masterHostname = conf.getMasterHostname(
-        InetAddress.getLocalHost().getCanonicalHostName());
-  }
-
-  /**
-   * Return {@link BootStrapStatus} for a given responseId.
-   * @param requestId the responseId for which the status needs to be returned.
-   * @return status for a specific response id. A response Id of -1 means the
-   * latest responseId.
-   */
-  public synchronized BootStrapStatus getStatus(long requestId) {
-    if (! bsStatus.containsKey(Long.valueOf(requestId))) {
-      return null;
-    }
-    return bsStatus.get(Long.valueOf(requestId));
-  }
-
-  /**
-   * update status of a request. Mostly called by the status collector thread.
-   * @param requestId the request id.
-   * @param status the status of the update.
-   */
-  synchronized void updateStatus(long requestId, BootStrapStatus status) {
-    bsStatus.put(Long.valueOf(requestId), status);
-  }
-
-
-  public synchronized void init() throws IOException {
-    if (!bootStrapDir.exists()) {
-      boolean mkdirs = bootStrapDir.mkdirs();
-      if (!mkdirs) throw new IOException("Unable to make directory for " +
-          "bootstrap " + bootStrapDir);
-    }
-  }
-
-  public  synchronized BSResponse runBootStrap(SshHostInfo info) {
-    BSResponse response = new BSResponse();
-    /* Run some checks for ssh host */
-    LOG.info("BootStrapping hosts " + info.hostListAsString());
-    if (bsRunner != null) {
-      response.setLog("BootStrap in Progress: Cannot Run more than one.");
-      response.setStatus(BSRunStat.ERROR);
-
-      return response;
-    }
-    requestId++;
-
-    bsRunner = new BSRunner(this, info, bootStrapDir.toString(),
-        bootScript, bootSetupAgentScript, bootSetupAgentPassword, requestId, 0L,
-        this.masterHostname, info.isVerbose());
-    bsRunner.start();
-    response.setStatus(BSRunStat.OK);
-    response.setLog("Running Bootstrap now.");
-    response.setRequestId(requestId);
-    return response;
-  }
-
-  /**
-   * @param hosts
-   * @return
-   */
-  public synchronized List<BSHostStatus> getHostInfo(List<String> hosts) {
-    List<BSHostStatus> statuses = new ArrayList<BSHostStatus>();
-
-    if (null == hosts || 0 == hosts.size() || (hosts.size() == 1 && hosts.get(0).equals("*"))) {
-      for (BootStrapStatus status : bsStatus.values()) {
-        if (null != status.getHostsStatus())
-          statuses.addAll(status.getHostsStatus());
-      }
-    } else {
-      // TODO make bootstrapping a bit more robust then stop looping
-      for (BootStrapStatus status : bsStatus.values()) {
-        for (BSHostStatus hostStatus : status.getHostsStatus()) {
-          if (-1 != hosts.indexOf(hostStatus.getHostName())) {
-            statuses.add(hostStatus);
-          }
-        }
-      }
-    }
-
-    return statuses;
-  }
-
-  /**
-   *
-   */
-  public synchronized void reset() {
-    bsRunner = null;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BootStrapPostStatus.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BootStrapPostStatus.java
deleted file mode 100644
index b862b5e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BootStrapPostStatus.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.bootstrap;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlEnum;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.XmlType;
-
-/**
- * Class captures immediate response to a bootstrap api call.
- * If the api call is ok, the return should return that its ok.
- */
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlType(name = "", propOrder = {})
-public class BootStrapPostStatus {
-  @XmlType(name="status")
-  @XmlEnum
-  public enum BSPostStat {
-    OK,
-    ERROR
-  }
-
-  @XmlElement
-  private BSPostStat postStatus;
-  @XmlElement
-  private String log;
-  @XmlElement
-  private long requestId;
-
-  public long getRequestId() {
-    return this.requestId;
-  }
-
-  public void setRequestId(long requestId) {
-    this.requestId = requestId;
-  }
-
-  public BSPostStat getStatus() {
-    return this.postStatus;
-  }
-
-  public void setStatus(BSPostStat status) {
-    this.postStatus  = status;
-  }
-
-  public String getLog() {
-    return this.log;
-  }
-
-  public void setLog(String log) {
-    this.log = log;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BootStrapStatus.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BootStrapStatus.java
deleted file mode 100644
index 76f3fba..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/BootStrapStatus.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.bootstrap;
-
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlEnum;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.XmlType;
-
-
-/**
- * Status of a bootstrap operation. Operation is successful or error
- * and explains all the info regarding the operation on each host.
- *
- */
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlType(name = "", propOrder = {})
-public class BootStrapStatus {
-  @XmlType(name="status")
-  @XmlEnum
-  public enum BSStat {
-    RUNNING,
-    SUCCESS,
-    ERROR
-  }
-
-  @XmlElement
-  private BSStat status;
-
-  @XmlElement
-  private List<BSHostStatus> hostsStatus;
-
-  @XmlElement
-  private String log;
-
-  public void setStatus(BSStat status) {
-    this.status = status;
-  }
-
-  public BSStat getStatus() {
-    return this.status;
-  }
-
-  public void setHostsStatus(List<BSHostStatus> hostsStatus) {
-    this.hostsStatus = hostsStatus;
-  }
-
-  public List<BSHostStatus> getHostsStatus() {
-    return this.hostsStatus;
-  }
-
-  public void setLog(String log) {
-    this.log = log;
-  }
-
-  public String getLog() {
-    return this.log;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/FifoLinkedHashMap.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/FifoLinkedHashMap.java
deleted file mode 100644
index 2eb7d04..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/FifoLinkedHashMap.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.bootstrap;
-
-import java.util.LinkedHashMap;
-import java.util.Map;
-
-/**
- * Only Store the most recent 100 Key Value Pairs.
- *
- */
-@SuppressWarnings("serial")
-public class FifoLinkedHashMap<K, V> extends
-LinkedHashMap<K, V> {
-  public static final int MAX_ENTRIES = 100;
-  protected boolean removeEldestEntry(Map.Entry<K,
-      V> eldest) {
-    return size() > MAX_ENTRIES;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/SshHostInfo.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/SshHostInfo.java
deleted file mode 100644
index d5f3816..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/bootstrap/SshHostInfo.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.bootstrap;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.XmlType;
-
-/**
- * Information that the API needs to provide to run bootstrap on hosts.
- *
- */
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlType(name = "", propOrder = {})
-public class SshHostInfo {
-
-  @XmlElement
-  private String sshKey;
-
-  @XmlElement
-  private List<String>  hosts = new ArrayList<String>();
-  
-  @XmlElement
-  private boolean verbose = false;
-
-  public String getSshKey() {
-    return sshKey;
-  }
-
-  public void setSshKey(String sshKey) {
-    this.sshKey = sshKey;
-  }
-
-  public void setHosts(List<String> hosts) {
-    this.hosts = hosts;
-  }
-
-  public List<String> getHosts() {
-    return this.hosts;
-  }
-  
-  public boolean isVerbose() {
-    return verbose;
-  }
-  
-  public void setVerbose(boolean verbose) {
-    this.verbose = verbose;
-  }
-
-  public String hostListAsString() {
-    StringBuilder ret = new StringBuilder();
-    if (this.hosts == null) {
-      return "";
-    }
-    for (String host : this.hosts) {
-      ret.append(host).append(":");
-    }
-    return ret.toString();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
deleted file mode 100644
index d4f95d0..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ /dev/null
@@ -1,425 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.configuration;
-
-import com.google.inject.Singleton;
-import org.apache.ambari.server.orm.PersistenceType;
-import org.apache.ambari.server.security.ClientSecurityType;
-import org.apache.ambari.server.security.authorization.LdapServerProperties;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.RandomStringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-
-
-/**
- * Ambari configuration.
- * Reads properties from ambari.properties
- */
-@Singleton
-public class Configuration {
-
-  public static final String CONFIG_FILE = "ambari.properties";
-  public static final String BOOTSTRAP_DIR = "bootstrap.dir";
-  public static final String BOOTSTRAP_DIR_DEFAULT = "/var/run/ambari-server/bootstrap";
-  public static final String WEBAPP_DIR = "webapp.dir";
-  public static final String BOOTSTRAP_SCRIPT = "bootstrap.script";
-  public static final String BOOTSTRAP_SCRIPT_DEFAULT =  "/usr/bin/ambari_bootstrap";
-  public static final String BOOTSTRAP_SETUP_AGENT_SCRIPT = "bootstrap.setup_agent.script";
-  public static final String BOOTSTRAP_SETUP_AGENT_PASSWORD = "bootstrap.setup_agent.password";
-  public static final String BOOTSTRAP_MASTER_HOSTNAME = "bootstrap.master_host_name";
-  public static final String API_AUTHENTICATE = "api.authenticate";
-  public static final String API_USE_SSL = "api.ssl";
-  public static final String SRVR_KSTR_DIR_KEY = "security.server.keys_dir";
-  public static final String SRVR_CRT_NAME_KEY = "security.server.cert_name";
-  public static final String SRVR_KEY_NAME_KEY = "security.server.key_name";
-  public static final String KSTR_NAME_KEY =
-      "security.server.keystore_name";
-  public static final String SRVR_CRT_PASS_FILE_KEY =
-      "security.server.crt_pass_file";
-  public static final String SRVR_CRT_PASS_KEY = "security.server.crt_pass";
-  public static final String SRVR_CRT_PASS_LEN_KEY = "security.server.crt_pass.len";
-  public static final String PASSPHRASE_ENV_KEY =
-      "security.server.passphrase_env_var";
-  public static final String PASSPHRASE_KEY = "security.server.passphrase";
-  public static final String RESOURCES_DIR_KEY = "resources.dir";
-  public static final String METADETA_DIR_PATH = "metadata.path";
-
-
-  public static final String CLIENT_SECURITY_KEY = "client.security";
-  public static final String CLIENT_API_PORT_KEY = "client.api.port";
-  public static final String LDAP_USE_SSL_KEY = "authentication.ldap.useSSL";
-  public static final String LDAP_PRIMARY_URL_KEY =
-      "authentication.ldap.primaryUrl";
-  public static final String LDAP_SECONDARY_URL_KEY =
-      "authentication.ldap.secondaryUrl";
-  public static final String LDAP_BASE_DN_KEY =
-      "authentication.ldap.baseDn";
-  public static final String LDAP_BIND_ANONYMOUSLY_KEY =
-      "authentication.ldap.bindAnonymously";
-  public static final String LDAP_MANAGER_DN_KEY =
-      "authentication.ldap.managerDn";
-  public static final String LDAP_MANAGER_PASSWORD_KEY =
-      "authentication.ldap.managerPassword";
-  public static final String LDAP_USERNAME_ATTRIBUTE_KEY =
-      "authentication.ldap.usernameAttribute";
-
-  public static final String USER_ROLE_NAME_KEY =
-      "authorization.userRoleName";
-  public static final String ADMIN_ROLE_NAME_KEY =
-      "authorization.adminRoleName";
-
-  public static final String PERSISTENCE_IN_MEMORY_KEY =
-      "server.persistence.inMemory";
-  public static final String SERVER_JDBC_USER_NAME_KEY =
-      "server.jdbc.user.name";
-  private static final String SERVER_JDBC_USER_NAME_DEFAULT =
-      "ambari-server";
-  public static final String SERVER_JDBC_USER_PASSWD_KEY =
-      "server.jdbc.user.passwd";
-  private static final String SERVER_JDBC_USER_PASSWD_DEFAULT =
-      "bigdata";
-
-  public static final String OS_VERSION_KEY =
-      "server.os_type";
-
-  public static final String SRVR_HOSTS_MAPPING = 
-      "server.hosts.mapping";
-
-  public static final String SSL_TRUSTSTORE_PATH_KEY = "ssl.trustStore.path";
-  public static final String SSL_TRUSTSTORE_PASSWORD_KEY = "ssl.trustStore.password";
-  public static final String SSL_TRUSTSTORE_TYPE_KEY = "ssl.trustStore.type";
-
-  private static final String SRVR_KSTR_DIR_DEFAULT = ".";
-  public static final String SRVR_CRT_NAME_DEFAULT = "ca.crt";
-  public static final String SRVR_KEY_NAME_DEFAULT = "ca.key";
-  public static final String KSTR_NAME_DEFAULT = "keystore.p12";
-  private static final String SRVR_CRT_PASS_FILE_DEFAULT ="pass.txt";
-  private static final String SRVR_CRT_PASS_LEN_DEFAULT = "50";
-  private static final String PASSPHRASE_ENV_DEFAULT = "AMBARI_PASSPHRASE";
-  private static final String RESOURCES_DIR_DEFAULT =
-      "/var/share/ambari/resources/";
-
-  private static final String CLIENT_SECURITY_DEFAULT = "local";
-  private static final int CLIENT_API_PORT_DEFAULT = 8080;
-
-  private static final String USER_ROLE_NAME_DEFAULT = "user";
-  private static final String ADMIN_ROLE_NAME_DEFAULT = "admin";
-  private static final String LDAP_BIND_ANONYMOUSLY_DEFAULT = "true";
-
-  //TODO For embedded server only - should be removed later
-  private static final String LDAP_PRIMARY_URL_DEFAULT = "localhost:33389";
-  private static final String LDAP_BASE_DN_DEFAULT = "dc=ambari,dc=apache,dc=org";
-  private static final String LDAP_USERNAME_ATTRIBUTE_DEFAULT = "uid";
-
-  //TODO for development purposes only, should be changed to 'false'
-  private static final String PERSISTENCE_IN_MEMORY_DEFAULT = "true";
-
-
-
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      Configuration.class);
-
-  private Properties properties;
-
-
-  private Map<String, String> configsMap;
-
-
-  public Configuration() {
-    this(readConfigFile());
-  }
-
-  /**
-   * For Testing only. This is to be able to create Configuration object
-   * for testing.
-   * @param properties properties to use for testing using the Conf object.
-   */
-  public Configuration(Properties properties) {
-    this.properties = properties;
-
-    configsMap = new HashMap<String, String>();
-    configsMap.put(SRVR_KSTR_DIR_KEY, properties.getProperty(
-        SRVR_KSTR_DIR_KEY, SRVR_KSTR_DIR_DEFAULT));
-    configsMap.put(SRVR_KSTR_DIR_KEY, properties.getProperty(
-        SRVR_KSTR_DIR_KEY, SRVR_KSTR_DIR_DEFAULT));
-    configsMap.put(SRVR_CRT_NAME_KEY, properties.getProperty(
-        SRVR_CRT_NAME_KEY, SRVR_CRT_NAME_DEFAULT));
-    configsMap.put(SRVR_KEY_NAME_KEY, properties.getProperty(
-        SRVR_KEY_NAME_KEY, SRVR_KEY_NAME_DEFAULT));
-    configsMap.put(KSTR_NAME_KEY, properties.getProperty(
-        KSTR_NAME_KEY, KSTR_NAME_DEFAULT));
-    configsMap.put(SRVR_CRT_PASS_FILE_KEY, properties.getProperty(
-        SRVR_CRT_PASS_FILE_KEY, SRVR_CRT_PASS_FILE_DEFAULT));
-    configsMap.put(PASSPHRASE_ENV_KEY, properties.getProperty(
-        PASSPHRASE_ENV_KEY, PASSPHRASE_ENV_DEFAULT));
-    configsMap.put(PASSPHRASE_KEY, System.getenv(configsMap.get(
-        PASSPHRASE_ENV_KEY)));
-    configsMap.put(USER_ROLE_NAME_KEY, properties.getProperty(
-        USER_ROLE_NAME_KEY, USER_ROLE_NAME_DEFAULT));
-    configsMap.put(ADMIN_ROLE_NAME_KEY, properties.getProperty(
-        ADMIN_ROLE_NAME_KEY, ADMIN_ROLE_NAME_DEFAULT));
-    configsMap.put(RESOURCES_DIR_KEY, properties.getProperty(
-        RESOURCES_DIR_KEY, RESOURCES_DIR_DEFAULT));
-    configsMap.put(SRVR_CRT_PASS_LEN_KEY, properties.getProperty(
-        SRVR_CRT_PASS_LEN_KEY, SRVR_CRT_PASS_LEN_DEFAULT));
-
-    File passFile = new File(configsMap.get(SRVR_KSTR_DIR_KEY) + File.separator
-        + configsMap.get(SRVR_CRT_PASS_FILE_KEY));
-    String randStr = null;
-
-    if (!passFile.exists()) {
-      LOG.info("Generation of file with password");
-      try {
-        randStr = RandomStringUtils.randomAlphanumeric(Integer
-            .parseInt(configsMap.get(SRVR_CRT_PASS_LEN_KEY)));
-        FileUtils.writeStringToFile(passFile, randStr);
-
-      } catch (IOException e) {
-        e.printStackTrace();
-        throw new RuntimeException(
-            "Error reading certificate password from file");
-      }
-    } else {
-      LOG.info("Reading password from existing file");
-      try {
-        randStr = FileUtils.readFileToString(passFile);
-      } catch (IOException e) {
-        e.printStackTrace();
-      }
-    }
-    configsMap.put(SRVR_CRT_PASS_KEY, randStr);
-
-    loadSSLParams();
-  }
-
-  /**
-   * Loads trusted certificates store properties
-   */
-  private void loadSSLParams(){
-    if (properties.getProperty(SSL_TRUSTSTORE_PATH_KEY) != null) {
-      System.setProperty("javax.net.ssl.trustStore", properties.getProperty(SSL_TRUSTSTORE_PATH_KEY));
-    }
-    if (properties.getProperty(SSL_TRUSTSTORE_PASSWORD_KEY) != null) {
-      System.setProperty("javax.net.ssl.trustStorePassword", properties.getProperty(SSL_TRUSTSTORE_PASSWORD_KEY));
-    }
-    if (properties.getProperty(SSL_TRUSTSTORE_TYPE_KEY) != null) {
-      System.setProperty("javax.net.ssl.trustStoreType", properties.getProperty(SSL_TRUSTSTORE_TYPE_KEY));
-    }
-  }
-
-
-  /**
-   * Find, read, and parse the configuration file.
-   * @return the properties that were found or empty if no file was found
-   */
-  private static Properties readConfigFile() {
-    Properties properties = new Properties();
-
-    //Get property file stream from classpath
-    InputStream inputStream = Configuration.class.getClassLoader().getResourceAsStream(CONFIG_FILE);
-
-    if (inputStream == null)
-      throw new RuntimeException(CONFIG_FILE + " not found in classpath");
-
-
-    // load the properties
-    try {
-      properties.load(inputStream);
-    } catch (FileNotFoundException fnf) {
-      LOG.info("No configuration file " + CONFIG_FILE + " found in classpath.", fnf);
-    } catch (IOException ie) {
-      throw new IllegalArgumentException("Can't read configuration file " +
-          CONFIG_FILE, ie);
-    }
-
-    return properties;
-  }
-
-  public File getBootStrapDir() {
-    String fileName = properties.getProperty(BOOTSTRAP_DIR);
-    if (fileName == null) {
-      fileName = BOOTSTRAP_DIR_DEFAULT;
-    }
-    return new File(fileName);
-  }
-
-  public String getBootStrapScript() {
-    String bootscript = properties.getProperty(BOOTSTRAP_SCRIPT);
-    if (bootscript == null) {
-      return BOOTSTRAP_SCRIPT_DEFAULT;
-    }
-    return bootscript;
-  }
-
-  public String getBootSetupAgentScript() {
-    return properties.getProperty(BOOTSTRAP_SETUP_AGENT_SCRIPT,
-        "/usr/lib/python2.6/site-packages/ambari_server/setupAgent.py");
-  }
-
-  public String getBootSetupAgentPassword() {
-    String pass = configsMap.get(PASSPHRASE_KEY);
-
-    if (null != pass)
-      return pass;
-
-    // fallback
-    return properties.getProperty(BOOTSTRAP_SETUP_AGENT_PASSWORD, "password");
-  }
-
-  /**
-   * Get the map with server config parameters.
-   * Keys - public constants of this class
-   * @return the map with server config parameters
-   */
-  public Map<String, String> getConfigsMap() {
-    return configsMap;
-  }
-
-  /**
-   * Gets client security type
-   * @return appropriate ClientSecurityType
-   */
-  public ClientSecurityType getClientSecurityType() {
-    return ClientSecurityType.fromString(properties.getProperty(CLIENT_SECURITY_KEY));
-  }
-
-  public void setClientSecurityType(ClientSecurityType type) {
-    properties.setProperty(CLIENT_SECURITY_KEY, type.toString());
-  }
-
-  public String getWebAppDir() {
-    LOG.info("Web App DIR test " + properties.getProperty(WEBAPP_DIR));
-    return properties.getProperty(WEBAPP_DIR, "web");
-  }
-
-  /**
-   * Get the file that will be used for host mapping.
-   * @return null if such a file is not present, value if present.
-   */
-  public String getHostsMapFile() {
-    LOG.info("Hosts Mapping File " +  properties.getProperty(SRVR_HOSTS_MAPPING));
-    return properties.getProperty(SRVR_HOSTS_MAPPING);
-  }
-
-  /**
-   * Gets ambari stack-path
-   * @return String
-   */
-  public String getMetadataPath() {
-    return properties.getProperty(METADETA_DIR_PATH);
-  }
-
-  /**
-   * Check to see if the API should be authenticated or not
-   * @return false if not, true if the authentication is enabled.
-   */
-  public boolean getApiAuthentication() {
-    return ("true".equals(properties.getProperty(API_AUTHENTICATE, "false")));
-  }
-
-  /**
-   * Check to see if the API should be authenticated via ssl or not
-   * @return false if not, true if ssl needs to be used.
-   */
-  public boolean getApiSSLAuthentication() {
-    return ("true".equals(properties.getProperty(API_USE_SSL, "false")));
-  }
-
-
-  public PersistenceType getPersistenceType() {
-    String value = properties.getProperty(PERSISTENCE_IN_MEMORY_KEY, PERSISTENCE_IN_MEMORY_DEFAULT);
-    if ("true".equalsIgnoreCase(value)) {
-      return PersistenceType.IN_MEMORY;
-    } else {
-      return PersistenceType.POSTGRES;
-    }
-  }
-
-  public String getDatabaseUser() {
-    return properties.getProperty(SERVER_JDBC_USER_NAME_KEY, SERVER_JDBC_USER_NAME_DEFAULT);
-  }
-
-  public String getDatabasePassword() {
-    String filePath = properties.getProperty(SERVER_JDBC_USER_PASSWD_KEY);
-    if (filePath == null) {
-      LOG.debug("DB password file not specified - using default");
-      return SERVER_JDBC_USER_PASSWD_DEFAULT;
-    } else {
-      LOG.debug("Reading password from file {}", filePath);
-      String password;
-      try {
-        password = FileUtils.readFileToString(new File(filePath));
-      } catch (IOException e) {
-        throw new RuntimeException("Unable to read database password", e);
-      }
-      return password;
-    }
-  }
-
-  /**
-   * Gets parameters of LDAP server to connect to
-   * @return LdapServerProperties object representing connection parameters
-   */
-  public LdapServerProperties getLdapServerProperties() {
-    LdapServerProperties ldapServerProperties = new LdapServerProperties();
-
-    ldapServerProperties.setPrimaryUrl(properties.getProperty(
-        LDAP_PRIMARY_URL_KEY, LDAP_PRIMARY_URL_DEFAULT));
-    ldapServerProperties.setSecondaryUrl(properties.getProperty(
-        LDAP_SECONDARY_URL_KEY));
-    ldapServerProperties.setUseSsl("true".equalsIgnoreCase(properties.
-        getProperty(LDAP_USE_SSL_KEY)));
-    ldapServerProperties.setAnonymousBind("true".
-        equalsIgnoreCase(properties.getProperty(LDAP_BIND_ANONYMOUSLY_KEY,
-            LDAP_BIND_ANONYMOUSLY_DEFAULT)));
-    ldapServerProperties.setManagerDn(properties.getProperty(
-        LDAP_MANAGER_DN_KEY));
-    ldapServerProperties.setManagerPassword(properties.getProperty(
-        LDAP_MANAGER_PASSWORD_KEY));
-    ldapServerProperties.setBaseDN(properties.getProperty
-        (LDAP_BASE_DN_KEY, LDAP_BASE_DN_DEFAULT));
-    ldapServerProperties.setUsernameAttribute(properties.
-        getProperty(LDAP_USERNAME_ATTRIBUTE_KEY, LDAP_USERNAME_ATTRIBUTE_DEFAULT));
-
-    return ldapServerProperties;
-  }
-
-  public String getServerOsType() {
-    return properties.getProperty(OS_VERSION_KEY, "");
-  }
-
-  public String getMasterHostname(String defaultValue) {
-    return properties.getProperty(BOOTSTRAP_MASTER_HOSTNAME, defaultValue);
-  }
-
-
-  public int getClientApiPort() {
-    return Integer.parseInt(properties.getProperty(CLIENT_API_PORT_KEY, String.valueOf(CLIENT_API_PORT_DEFAULT)));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionRequest.java
deleted file mode 100644
index 775382d..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionRequest.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller;
-
-import java.util.Map;
-
-public class ActionRequest {
-  private String clusterName; 
-
-  private String serviceName;
-  
-  private String actionName; //for CREATE only
-
-  private Map<String, String> parameters; //for CREATE only
-
-  public ActionRequest(String clusterName, String serviceName,
-      String actionName, Map<String, String> params) {
-    this.clusterName = clusterName;
-    this.serviceName = serviceName;
-    this.actionName = actionName;
-    this.parameters = params;
-  }
-
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  public String getActionName() {
-    return actionName;
-  }
-
-  public void setActionName(String actionName) {
-    this.actionName = actionName;
-  }
-
-  public Map<String, String> getParameters() {
-    return parameters;
-  }
-
-  public void setParameters(Map<String, String> parameters) {
-    this.parameters = parameters;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionResponse.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionResponse.java
deleted file mode 100644
index 83d1a71..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionResponse.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller;
-
-
-public class ActionResponse {
-  private String clusterName; 
-
-  private String serviceName;
-  
-  private String actionName;
-
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  public String getActionName() {
-    return actionName;
-  }
-
-  public void setActionName(String actionName) {
-    this.actionName = actionName;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
deleted file mode 100644
index 75f392a..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
+++ /dev/null
@@ -1,381 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.ObjectNotFoundException;
-import org.apache.ambari.server.ParentObjectNotFoundException;
-
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Management controller interface.
- */
-public interface AmbariManagementController {
-
-  // ----- Create -----------------------------------------------------------
-
-  /**
-   * Create the cluster defined by the attributes in the given request object.
-   *
-   * @param request  the request object which defines the cluster to be created
-   *
-   * @throws AmbariException thrown if the cluster cannot be created
-   */
-  public void createCluster(ClusterRequest request) throws AmbariException;
-
-  /**
-   * Create the service defined by the attributes in the given request object.
-   *
-   * @param requests  the request object which defines the service to be created
-   *
-   * @throws AmbariException thrown if the service cannot be created
-   */
-  public void createServices(Set<ServiceRequest> requests)
-      throws AmbariException, ParentObjectNotFoundException;
-
-  /**
-   * Create the component defined by the attributes in the given request object.
-   *
-   * @param requests  the request object which defines the component to be created
-   *
-   * @throws AmbariException thrown if the component cannot be created
-   */
-  public void createComponents(Set<ServiceComponentRequest> requests)
-      throws AmbariException;
-
-  /**
-   * Create the host defined by the attributes in the given request object.
-   *
-   * @param requests  the request object which defines the host to be created
-   *
-   * @throws AmbariException thrown if the host cannot be created
-   */
-  public void createHosts(Set<HostRequest> requests)
-      throws AmbariException;
-
-  /**
-   * Create the host component defined by the attributes in the given request object.
-   *
-   * @param requests  the request object which defines the host component to be created
-   *
-   * @throws AmbariException thrown if the host component cannot be created
-   */
-  public void createHostComponents(
-      Set<ServiceComponentHostRequest> requests) throws AmbariException;
-
-  /**
-   * Creates a configuration.
-   *
-   * @param request the request object which defines the configuration.
-   *
-   * @throws AmbariException when the configuration cannot be created.
-   */
-  public void createConfiguration(ConfigurationRequest request)
-      throws AmbariException;
-  
-  /**
-   * Creates users.
-   * 
-   * @param requests the request objects which defines the user.
-   * 
-   * @throws AmbariException when the user cannot be created.
-   */
-  public void createUsers(Set<UserRequest> requests) throws AmbariException;
-
-
-  // ----- Read -------------------------------------------------------------
-
-  /**
-   * Get the clusters identified by the given request objects.
-   *
-   * @param requests  the request objects which identify the clusters to be returned
-   *
-   * @return a set of cluster responses
-   *
-   * @throws AmbariException thrown if the resource cannot be read
-   */
-  public Set<ClusterResponse> getClusters(Set<ClusterRequest> requests)
-      throws AmbariException;
-
-  /**
-   * Get the services identified by the given request objects.
-   *
-   * @param requests  the request objects which identify the services
-   * to be returned
-   *
-   * @return a set of service responses
-   *
-   * @throws AmbariException thrown if the resource cannot be read
-   */
-  public Set<ServiceResponse> getServices(Set<ServiceRequest> requests)
-      throws AmbariException;
-
-  /**
-   * Get the components identified by the given request objects.
-   *
-   * @param requests  the request objects which identify the components to be returned
-   *
-   * @return a set of component responses
-   *
-   * @throws AmbariException thrown if the resource cannot be read
-   */
-  public Set<ServiceComponentResponse> getComponents(
-      Set<ServiceComponentRequest> requests) throws AmbariException;
-
-  /**
-   * Get the hosts identified by the given request objects.
-   *
-   * @param requests  the request objects which identify the hosts to be returned
-   *
-   * @return a set of host responses
-   *
-   * @throws AmbariException thrown if the resource cannot be read
-   */
-  public Set<HostResponse> getHosts(Set<HostRequest> requests)
-      throws AmbariException;
-
-  /**
-   * Get the host components identified by the given request objects.
-   *
-   * @param requests  the request objects which identify the host components
-   * to be returned
-   *
-   * @return a set of host component responses
-   *
-   * @throws AmbariException thrown if the resource cannot be read
-   */
-  public Set<ServiceComponentHostResponse> getHostComponents(
-      Set<ServiceComponentHostRequest> requests) throws AmbariException;
-
-  /**
-   * Gets the configurations identified by the given request objects.
-   *
-   * @param requests   the request objects
-   *
-   * @return  a set of configuration responses
-   *
-   * @throws AmbariException if the configurations could not be read
-   */
-  public Set<ConfigurationResponse> getConfigurations(
-      Set<ConfigurationRequest> requests) throws AmbariException;
-
-  /**
-   * Gets the request status identified by the given request object.
-   *
-   * @param request   the request object
-   *
-   * @return  a set of request status responses
-   *
-   * @throws AmbariException if the request status could not be read
-   */
-  public Set<RequestStatusResponse> getRequestStatus(RequestStatusRequest request)
-      throws AmbariException;
-
-  /**
-   * Gets the task status identified by the given request objects.
-   *
-   * @param requests   the request objects
-   *
-   * @return  a set of task status responses
-   *
-   * @throws AmbariException if the configurations could not be read
-   */
-  public Set<TaskStatusResponse> getTaskStatus(Set<TaskStatusRequest> requests)
-      throws AmbariException;
-
-  /**
-   * Gets the users identified by the given request objects.
-   *
-   * @param requests  the request objects
-   * 
-   * @return  a set of user responses
-   * 
-   * @throws AmbariException if the users could not be read
-   */
-  public Set<UserResponse> getUsers(Set<UserRequest> requests)
-      throws AmbariException;
-  
-  /**
-   * Gets the host component config mappings
-   * 
-   * @param request the host component request
-   * 
-   * @return the configuration mappings
-   * 
-   * @throws AmbariException
-   */
-  public Map<String, String> getHostComponentDesiredConfigMapping(
-      ServiceComponentHostRequest request) throws AmbariException;
-
-  // ----- Update -----------------------------------------------------------
-
-  /**
-   * Update the cluster identified by the given request object with the
-   * values carried by the given request object.
-   *
-   * @param request    the request object which defines which cluster to
-   *                   update and the values to set
-   *
-   * @return a track action response
-   *
-   * @throws AmbariException thrown if the resource cannot be updated
-   */
-  public RequestStatusResponse updateCluster(ClusterRequest request)
-      throws AmbariException;
-
-  /**
-   * Update the service identified by the given request object with the
-   * values carried by the given request object.
-   *
-   * @param requests    the request object which defines which service to
-   *                   update and the values to set
-   *
-   * @return a track action response
-   *
-   * @throws AmbariException thrown if the resource cannot be updated
-   */
-  public RequestStatusResponse updateServices(Set<ServiceRequest> requests)
-      throws AmbariException;
-
-  /**
-   * Update the component identified by the given request object with the
-   * values carried by the given request object.
-   *
-   * @param requests    the request object which defines which component to
-   *                   update and the values to set
-   *
-   * @return a track action response
-   *
-   * @throws AmbariException thrown if the resource cannot be updated
-   */
-  public RequestStatusResponse updateComponents(
-      Set<ServiceComponentRequest> requests) throws AmbariException;
-
-  /**
-   * Update the host identified by the given request object with the
-   * values carried by the given request object.
-   *
-   * @param requests    the request object which defines which host to
-   *                   update and the values to set
-   *
-   * @throws AmbariException thrown if the resource cannot be updated
-   */
-  public void updateHosts(Set<HostRequest> requests)
-      throws AmbariException;
-
-  /**
-   * Update the host component identified by the given request object with the
-   * values carried by the given request object.
-   *
-   * @param requests    the request object which defines which host component to
-   *                   update and the values to set
-   *
-   * @return a track action response
-   *
-   * @throws AmbariException thrown if the resource cannot be updated
-   */
-  public RequestStatusResponse updateHostComponents(
-      Set<ServiceComponentHostRequest> requests) throws AmbariException;
-  
-  /**
-   * Updates the users specified.
-   * 
-   * @param requests  the users to modify
-   * 
-   * @throws  AmbariException if the resources cannot be updated
-   */
-  public void updateUsers(Set<UserRequest> requests) throws AmbariException;
-
-
-  // ----- Delete -----------------------------------------------------------
-
-  /**
-   * Delete the cluster identified by the given request object.
-   *
-   * @param request  the request object which identifies which cluster to delete
-   *
-   * @throws AmbariException thrown if the resource cannot be deleted
-   */
-  public void deleteCluster(ClusterRequest request) throws AmbariException;
-
-  /**
-   * Delete the service identified by the given request object.
-   *
-   * @param requests  the request object which identifies which service to delete
-   *
-   * @return a track action response
-   *
-   * @throws AmbariException thrown if the resource cannot be deleted
-   */
-  public RequestStatusResponse deleteServices(Set<ServiceRequest> requests)
-      throws AmbariException;
-
-  /**
-   * Delete the component identified by the given request object.
-   *
-   * @param requests  the request object which identifies which component to delete
-   *
-   * @return a track action response
-   *
-   * @throws AmbariException thrown if the resource cannot be deleted
-   */
-  public RequestStatusResponse deleteComponents(
-      Set<ServiceComponentRequest> requests) throws AmbariException;
-
-  /**
-   * Delete the host identified by the given request object.
-   *
-   * @param requests  the request object which identifies which host to delete
-   *
-   * @return a track action response
-   *
-   * @throws AmbariException thrown if the resource cannot be deleted
-   */
-  public void deleteHosts(Set<HostRequest> requests)
-      throws AmbariException;
-
-  /**
-   * Delete the host component identified by the given request object.
-   *
-   * @param requests  the request object which identifies which host component to delete
-   *
-   * @return a track action response
-   *
-   * @throws AmbariException thrown if the resource cannot be deleted
-   */
-  public RequestStatusResponse deleteHostComponents(
-      Set<ServiceComponentHostRequest> requests) throws AmbariException;
-  
-  /**
-   * Deletes the users specified.
-   * 
-   * @param requests  the users to delete
-   * 
-   * @throws  AmbariException if the resources cannot be deleted
-   */
-  public void deleteUsers(Set<UserRequest> requests) throws AmbariException;  
-
-  public RequestStatusResponse createActions(Set<ActionRequest> request)
-      throws AmbariException;
-
-  public Set<ActionResponse> getActions(Set<ActionRequest> request)
-      throws AmbariException;
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
deleted file mode 100644
index b205a8c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ /dev/null
@@ -1,3274 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import java.net.InetAddress;
-import java.util.*;
-import java.util.Map.Entry;
-
-import org.apache.ambari.server.*;
-import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.actionmanager.HostRoleCommand;
-import org.apache.ambari.server.actionmanager.RequestStatus;
-import org.apache.ambari.server.actionmanager.Stage;
-import org.apache.ambari.server.actionmanager.StageFactory;
-import org.apache.ambari.server.agent.ExecutionCommand;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.metadata.ActionMetadata;
-import org.apache.ambari.server.metadata.RoleCommandOrder;
-import org.apache.ambari.server.security.authorization.User;
-import org.apache.ambari.server.security.authorization.Users;
-import org.apache.ambari.server.stageplanner.RoleGraph;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.ComponentInfo;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigFactory;
-import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.RepositoryInfo;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.ServiceComponent;
-import org.apache.ambari.server.state.ServiceComponentFactory;
-import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.state.ServiceComponentHostEvent;
-import org.apache.ambari.server.state.ServiceComponentHostFactory;
-import org.apache.ambari.server.state.ServiceFactory;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.StackInfo;
-import org.apache.ambari.server.state.State;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEvent;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpInProgressEvent;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStopEvent;
-import org.apache.ambari.server.utils.StageUtils;
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.gson.Gson;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.Singleton;
-
-@Singleton
-public class AmbariManagementControllerImpl implements
-    AmbariManagementController {
-
-  private final static Logger LOG =
-      LoggerFactory.getLogger(AmbariManagementControllerImpl.class);
-
-  private final Clusters clusters;
-
-  private String baseLogDir = "/tmp/ambari/";
-
-  private final ActionManager actionManager;
-
-  @SuppressWarnings("unused")
-  private final Injector injector;
-
-  private final Gson gson;
-
-  private static RoleCommandOrder rco;
-  static {
-    rco = new RoleCommandOrder();
-    RoleCommandOrder.initialize();
-  }
-
-  @Inject
-  private ServiceFactory serviceFactory;
-  @Inject
-  private ServiceComponentFactory serviceComponentFactory;
-  @Inject
-  private ServiceComponentHostFactory serviceComponentHostFactory;
-  @Inject
-  private ConfigFactory configFactory;
-  @Inject
-  private StageFactory stageFactory;
-  @Inject
-  private ActionMetadata actionMetadata;
-  @Inject
-  private AmbariMetaInfo ambariMetaInfo;
-  @Inject
-  private Users users;
-  @Inject
-  private HostsMap hostsMap;
-  @Inject
-  private Configuration configs;
-
-
-  
-  final private String masterHostname;
-
-  final private static String JDK_RESOURCE_LOCATION =
-      "/resources/";
- 
-  final private String jdkResourceUrl;
-
-  @Inject
-  public AmbariManagementControllerImpl(ActionManager actionManager,
-      Clusters clusters, Injector injector) throws Exception {
-    this.clusters = clusters;
-    this.actionManager = actionManager;
-    this.injector = injector;
-    injector.injectMembers(this);
-    this.gson = injector.getInstance(Gson.class);
-    LOG.info("Initializing the AmbariManagementControllerImpl");
-    this.masterHostname =  InetAddress.getLocalHost().getCanonicalHostName();
-    
-    if (configs != null) {
-      this.jdkResourceUrl = "http://" + masterHostname + ":"
-          + configs.getClientApiPort()
-          + JDK_RESOURCE_LOCATION; 
-    } else {
-    		this.jdkResourceUrl = null;
-    }
-  }
-
-  @Override
-  public void createCluster(ClusterRequest request)
-      throws AmbariException {
-    if (request.getClusterName() == null
-        || request.getClusterName().isEmpty()
-        || request.getClusterId() != null) {
-      throw new IllegalArgumentException("Cluster name should be provided" +
-          " and clusterId should be null");
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Received a createCluster request"
-          + ", clusterName=" + request.getClusterName()
-          + ", request=" + request);
-    }
-
-    if (request.getStackVersion() == null
-        || request.getStackVersion().isEmpty()) {
-      throw new IllegalArgumentException("Stack information should be"
-          + " provided when creating a cluster");
-    }
-    StackId stackId = new StackId(request.getStackVersion());
-    StackInfo stackInfo = ambariMetaInfo.getStackInfo(stackId.getStackName(),
-        stackId.getStackVersion());
-    if (stackInfo == null) {
-      throw new StackNotFoundException(stackId.getStackName(),
-          stackId.getStackVersion());
-    }
-
-    // FIXME add support for desired configs at cluster level
-
-    boolean foundInvalidHosts = false;
-    StringBuilder invalidHostsStr = new StringBuilder();
-    if (request.getHostNames() != null) {
-      for (String hostname : request.getHostNames()) {
-        try {
-          clusters.getHost(hostname);
-        } catch (HostNotFoundException e) {
-          if (foundInvalidHosts) {
-            invalidHostsStr.append(",");
-          }
-          foundInvalidHosts = true;
-          invalidHostsStr.append(hostname);
-        }
-      }
-    }
-    if (foundInvalidHosts) {
-      throw new HostNotFoundException(invalidHostsStr.toString());
-    }
-
-    clusters.addCluster(request.getClusterName());
-    Cluster c = clusters.getCluster(request.getClusterName());
-    if (request.getStackVersion() != null) {
-      c.setDesiredStackVersion(
-          new StackId(request.getStackVersion()));
-    }
-
-    if (request.getHostNames() != null) {
-      clusters.mapHostsToCluster(request.getHostNames(),
-          request.getClusterName());
-    }
-
-  }
-
-  @Override
-  public synchronized void createServices(Set<ServiceRequest> requests)
-      throws AmbariException {
-
-    if (requests.isEmpty()) {
-      LOG.warn("Received an empty requests set");
-      return;
-    }
-
-    // do all validation checks
-    Map<String, Set<String>> serviceNames = new HashMap<String, Set<String>>();
-    Set<String> duplicates = new HashSet<String>();
-    for (ServiceRequest request : requests) {
-      if (request.getClusterName() == null
-          || request.getClusterName().isEmpty()
-          || request.getServiceName() == null
-          || request.getServiceName().isEmpty()) {
-        throw new IllegalArgumentException("Cluster name and service name"
-            + " should be provided when creating a service");
-      }
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Received a createService request"
-            + ", clusterName=" + request.getClusterName()
-            + ", serviceName=" + request.getServiceName()
-            + ", request=" + request);
-      }
-
-      if (!serviceNames.containsKey(request.getClusterName())) {
-        serviceNames.put(request.getClusterName(), new HashSet<String>());
-      }
-      if (serviceNames.get(request.getClusterName())
-          .contains(request.getServiceName())) {
-        // throw error later for dup
-        duplicates.add(request.getServiceName());
-        continue;
-      }
-      serviceNames.get(request.getClusterName()).add(request.getServiceName());
-
-      if (request.getDesiredState() != null
-          && !request.getDesiredState().isEmpty()) {
-        State state = State.valueOf(request.getDesiredState());
-        if (!state.isValidDesiredState()
-            || state != State.INIT) {
-          throw new IllegalArgumentException("Invalid desired state"
-              + " only INIT state allowed during creation"
-              + ", providedDesiredState=" + request.getDesiredState());
-        }
-      }
-
-      Cluster cluster;
-      try {
-        cluster = clusters.getCluster(request.getClusterName());
-      } catch (ClusterNotFoundException e) {
-        throw new ParentObjectNotFoundException("Attempted to add a service to a cluster which doesn't exist", e);
-      }
-      try {
-        Service s = cluster.getService(request.getServiceName());
-        if (s != null) {
-          // throw error later for dup
-          duplicates.add(request.getServiceName());
-          continue;
-        }
-      } catch (ServiceNotFoundException e) {
-        // Expected
-      }
-
-      StackId stackId = cluster.getDesiredStackVersion();
-      if (!ambariMetaInfo.isValidService(stackId.getStackName(),
-          stackId.getStackVersion(), request.getServiceName())) {
-        throw new IllegalArgumentException("Unsupported or invalid service"
-            + " in stack"
-            + ", clusterName=" + request.getClusterName()
-            + ", serviceName=" + request.getServiceName()
-            + ", stackInfo=" + stackId.getStackId());
-      }
-    }
-
-    // ensure only a single cluster update
-    if (serviceNames.size() != 1) {
-      throw new IllegalArgumentException("Invalid arguments, updates allowed"
-          + "on only one cluster at a time");
-    }
-
-    // Validate dups
-    if (!duplicates.isEmpty()) {
-      StringBuilder svcNames = new StringBuilder();
-      boolean first = true;
-      for (String svcName : duplicates) {
-        if (!first) {
-          svcNames.append(",");
-        }
-        first = false;
-        svcNames.append(svcName);
-      }
-      String clusterName = requests.iterator().next().getClusterName();
-      String msg;
-      if (duplicates.size() == 1) {
-        msg = "Attempted to create a service which already exists: "
-            + ", clusterName=" + clusterName  + " serviceName=" + svcNames.toString();
-      } else {
-        msg = "Attempted to create services which already exist: "
-            + ", clusterName=" + clusterName  + " serviceNames=" + svcNames.toString();
-      }
-      throw new DuplicateResourceException(msg);
-    }
-
-    // now to the real work
-    for (ServiceRequest request : requests) {
-      Cluster cluster = clusters.getCluster(request.getClusterName());
-
-      // FIXME initialize configs based off service.configVersions
-      Map<String, Config> configs = new HashMap<String, Config>();
-
-      State state = State.INIT;
-
-      // Already checked that service does not exist
-      Service s = serviceFactory.createNew(cluster, request.getServiceName());
-
-      s.setDesiredState(state);
-      s.updateDesiredConfigs(configs);
-      s.setDesiredStackVersion(cluster.getDesiredStackVersion());
-      cluster.addService(s);
-      s.persist();
-    }
-
-  }
-
-  @Override
-  public synchronized void createComponents(
-      Set<ServiceComponentRequest> requests) throws AmbariException {
-
-    if (requests.isEmpty()) {
-      LOG.warn("Received an empty requests set");
-      return;
-    }
-
-    // do all validation checks
-    Map<String, Map<String, Set<String>>> componentNames =
-        new HashMap<String, Map<String,Set<String>>>();
-    Set<String> duplicates = new HashSet<String>();
-
-    for (ServiceComponentRequest request : requests) {
-      if (request.getClusterName() == null
-          || request.getClusterName().isEmpty()
-          || request.getComponentName() == null
-          || request.getComponentName().isEmpty()) {
-        throw new IllegalArgumentException("Invalid arguments"
-            + ", clustername and componentname should be"
-            + " non-null and non-empty when trying to create a"
-            + " component");
-      }
-
-      Cluster cluster;
-      try {
-        cluster = clusters.getCluster(request.getClusterName());
-      } catch (ClusterNotFoundException e) {
-        throw new ParentObjectNotFoundException(
-            "Attempted to add a component to a cluster which doesn't exist:", e);
-      }
-
-      if (request.getServiceName() == null
-          || request.getServiceName().isEmpty()) {
-        StackId stackId = cluster.getDesiredStackVersion();
-        String serviceName =
-            ambariMetaInfo.getComponentToService(stackId.getStackName(),
-                stackId.getStackVersion(), request.getComponentName());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Looking up service name for component"
-              + ", componentName=" + request.getComponentName()
-              + ", serviceName=" + serviceName);
-        }
-
-        if (serviceName == null
-            || serviceName.isEmpty()) {
-          throw new AmbariException("Could not find service for component"
-              + ", componentName=" + request.getComponentName()
-              + ", clusterName=" + cluster.getClusterName()
-              + ", stackInfo=" + stackId.getStackId());
-        }
-        request.setServiceName(serviceName);
-      }
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Received a createComponent request"
-            + ", clusterName=" + request.getClusterName()
-            + ", serviceName=" + request.getServiceName()
-            + ", componentName=" + request.getComponentName()
-            + ", request=" + request);
-      }
-
-      if (!componentNames.containsKey(request.getClusterName())) {
-        componentNames.put(request.getClusterName(),
-            new HashMap<String, Set<String>>());
-      }
-      if (!componentNames.get(request.getClusterName())
-          .containsKey(request.getServiceName())) {
-        componentNames.get(request.getClusterName()).put(
-            request.getServiceName(), new HashSet<String>());
-      }
-      if (componentNames.get(request.getClusterName())
-          .get(request.getServiceName()).contains(request.getComponentName())){
-        // throw error later for dup
-        duplicates.add("[clusterName=" + request.getClusterName() + ", serviceName=" + request.getServiceName() +
-            ", componentName=" + request.getComponentName() + "]");
-        continue;
-      }
-      componentNames.get(request.getClusterName())
-          .get(request.getServiceName()).add(request.getComponentName());
-
-      if (request.getDesiredState() != null
-          && !request.getDesiredState().isEmpty()) {
-        State state = State.valueOf(request.getDesiredState());
-        if (!state.isValidDesiredState()
-            || state != State.INIT) {
-          throw new IllegalArgumentException("Invalid desired state"
-              + " only INIT state allowed during creation"
-              + ", providedDesiredState=" + request.getDesiredState());
-        }
-      }
-
-      Service s;
-      try {
-        s = cluster.getService(request.getServiceName());
-      } catch (ServiceNotFoundException e) {
-        throw new ParentObjectNotFoundException(
-            "Attempted to add a component to a service which doesn't exist:", e);
-      }
-      try {
-        ServiceComponent sc = s.getServiceComponent(request.getComponentName());
-        if (sc != null) {
-          // throw error later for dup
-          duplicates.add("[clusterName=" + request.getClusterName() + ", serviceName=" + request.getServiceName() +
-              ", componentName=" + request.getComponentName() + "]");
-          continue;
-        }
-      } catch (AmbariException e) {
-        // Expected
-      }
-
-      StackId stackId = s.getDesiredStackVersion();
-      if (!ambariMetaInfo.isValidServiceComponent(stackId.getStackName(),
-          stackId.getStackVersion(), s.getName(), request.getComponentName())) {
-        throw new IllegalArgumentException("Unsupported or invalid component"
-            + " in stack"
-            + ", clusterName=" + request.getClusterName()
-            + ", serviceName=" + request.getServiceName()
-            + ", componentName=" + request.getComponentName()
-            + ", stackInfo=" + stackId.getStackId());
-      }
-    }
-
-    // ensure only a single cluster update
-    if (componentNames.size() != 1) {
-      throw new IllegalArgumentException("Invalid arguments, updates allowed"
-          + "on only one cluster at a time");
-    }
-
-    // Validate dups
-    if (!duplicates.isEmpty()) {
-      StringBuilder names = new StringBuilder();
-      boolean first = true;
-      for (String cName : duplicates) {
-        if (!first) {
-          names.append(",");
-        }
-        first = false;
-        names.append(cName);
-      }
-      String msg;
-      if (duplicates.size() == 1) {
-        msg = "Attempted to create a component which already exists: ";
-      } else {
-        msg = "Attempted to create components which already exist: ";
-      }
-      throw new DuplicateResourceException(msg + names.toString());
-    }
-
-
-    // now doing actual work
-    for (ServiceComponentRequest request : requests) {
-      Cluster cluster = clusters.getCluster(request.getClusterName());
-      Service s = cluster.getService(request.getServiceName());
-      ServiceComponent sc = serviceComponentFactory.createNew(s,
-          request.getComponentName());
-      sc.setDesiredStackVersion(s.getDesiredStackVersion());
-
-      if (request.getDesiredState() != null
-          && !request.getDesiredState().isEmpty()) {
-        State state = State.valueOf(request.getDesiredState());
-        sc.setDesiredState(state);
-      } else {
-        sc.setDesiredState(s.getDesiredState());
-      }
-
-      // FIXME fix config versions to configs conversion
-      Map<String, Config> configs = new HashMap<String, Config>();
-      if (request.getConfigVersions() != null) {
-      }
-
-      sc.updateDesiredConfigs(configs);
-      s.addServiceComponent(sc);
-      sc.persist();
-    }
-
-  }
-
-  @Override
-  public synchronized void createHosts(Set<HostRequest> requests)
-      throws AmbariException {
-
-    if (requests.isEmpty()) {
-      LOG.warn("Received an empty requests set");
-      return;
-    }
-
-    Set<String> duplicates = new HashSet<String>();
-    Set<String> unknowns = new HashSet<String>();
-    Set<String> allHosts = new HashSet<String>();
-    for (HostRequest request : requests) {
-      if (request.getHostname() == null
-          || request.getHostname().isEmpty()) {
-        throw new IllegalArgumentException("Invalid arguments, hostname"
-            + " cannot be null");
-      }
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Received a createHost request"
-            + ", hostname=" + request.getHostname()
-            + ", request=" + request);
-      }
-
-      if (allHosts.contains(request.getHostname())) {
-        // throw dup error later
-        duplicates.add(request.getHostname());
-        continue;
-      }
-      allHosts.add(request.getHostname());
-
-      try {
-        // ensure host is registered
-        clusters.getHost(request.getHostname());
-      }
-      catch (HostNotFoundException e) {
-        unknowns.add(request.getHostname());
-        continue;
-      }
-
-      if (request.getClusterName() != null) {
-        try {
-          // validate that cluster_name is valid
-          clusters.getCluster(request.getClusterName());
-        } catch (ClusterNotFoundException e) {
-          throw new ParentObjectNotFoundException("Attempted to add a host to a cluster which doesn't exist: "
-              + " clusterName=" + request.getClusterName());
-        }
-      }
-    }
-
-    if (!duplicates.isEmpty()) {
-      StringBuilder names = new StringBuilder();
-      boolean first = true;
-      for (String hName : duplicates) {
-        if (!first) {
-          names.append(",");
-        }
-        first = false;
-        names.append(hName);
-      }
-      throw new IllegalArgumentException("Invalid request contains"
-          + " duplicate hostnames"
-          + ", hostnames=" + names.toString());
-    }
-
-    if (!unknowns.isEmpty()) {
-      StringBuilder names = new StringBuilder();
-      boolean first = true;
-      for (String hName : unknowns) {
-        if (!first) {
-          names.append(",");
-        }
-        first = false;
-        names.append(hName);
-      }
-
-      throw new IllegalArgumentException("Attempted to add unknown hosts to a cluster.  " +
-          "These hosts have not been registered with the server: " + names.toString());
-    }
-
-    for (HostRequest request : requests) {
-      if (request.getClusterName() != null) {
-        clusters.mapHostToCluster(request.getHostname(), request.getClusterName());
-      }
-
-      if (request.getHostAttributes() != null) {
-        clusters.getHost(request.getHostname()).
-            setHostAttributes(request.getHostAttributes());
-      }
-    }
-  }
-
-  @Override
-  public synchronized void createHostComponents(Set<ServiceComponentHostRequest> requests)
-      throws AmbariException {
-
-    if (requests.isEmpty()) {
-      LOG.warn("Received an empty requests set");
-      return;
-    }
-
-    // do all validation checks
-    Map<String, Map<String, Map<String, Set<String>>>> hostComponentNames =
-        new HashMap<String, Map<String, Map<String, Set<String>>>>();
-    Set<String> duplicates = new HashSet<String>();
-    for (ServiceComponentHostRequest request : requests) {
-      if (request.getClusterName() == null
-          || request.getClusterName().isEmpty()
-          || request.getComponentName() == null
-          || request.getComponentName().isEmpty()
-          || request.getHostname() == null
-          || request.getHostname().isEmpty()) {
-        throw new IllegalArgumentException("Invalid arguments,"
-            + " clustername, componentname and hostname should not be null"
-            + " when trying to create a hostcomponent");
-      }
-
-      Cluster cluster;
-      try {
-        cluster = clusters.getCluster(request.getClusterName());
-      } catch (ClusterNotFoundException e) {
-        throw new ParentObjectNotFoundException(
-            "Attempted to add a host_component to a cluster which doesn't exist: ", e);
-      }
-
-      if (request.getServiceName() == null
-          || request.getServiceName().isEmpty()) {
-        StackId stackId = cluster.getDesiredStackVersion();
-        String serviceName =
-            ambariMetaInfo.getComponentToService(stackId.getStackName(),
-                stackId.getStackVersion(), request.getComponentName());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Looking up service name for component"
-              + ", componentName=" + request.getComponentName()
-              + ", serviceName=" + serviceName);
-        }
-        if (serviceName == null
-            || serviceName.isEmpty()) {
-          throw new AmbariException("Could not find service for component"
-              + ", componentName=" + request.getComponentName()
-              + ", clusterName=" + cluster.getClusterName()
-              + ", stackInfo=" + stackId.getStackId());
-        }
-        request.setServiceName(serviceName);
-      }
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Received a createHostComponent request"
-            + ", clusterName=" + request.getClusterName()
-            + ", serviceName=" + request.getServiceName()
-            + ", componentName=" + request.getComponentName()
-            + ", hostname=" + request.getHostname()
-            + ", request=" + request);
-      }
-
-      if (!hostComponentNames.containsKey(request.getClusterName())) {
-        hostComponentNames.put(request.getClusterName(),
-            new HashMap<String, Map<String,Set<String>>>());
-      }
-      if (!hostComponentNames.get(request.getClusterName())
-          .containsKey(request.getServiceName())) {
-        hostComponentNames.get(request.getClusterName()).put(
-            request.getServiceName(), new HashMap<String, Set<String>>());
-      }
-      if (!hostComponentNames.get(request.getClusterName())
-          .get(request.getServiceName())
-          .containsKey(request.getComponentName())) {
-        hostComponentNames.get(request.getClusterName())
-            .get(request.getServiceName()).put(request.getComponentName(),
-                new HashSet<String>());
-      }
-      if (hostComponentNames.get(request.getClusterName())
-          .get(request.getServiceName()).get(request.getComponentName())
-          .contains(request.getHostname())) {
-        duplicates.add("[clusterName=" + request.getClusterName() + ", hostName=" + request.getHostname() +
-            ", componentName=" +request.getComponentName() +']');
-        continue;
-      }
-      hostComponentNames.get(request.getClusterName())
-          .get(request.getServiceName()).get(request.getComponentName())
-          .add(request.getHostname());
-
-      if (request.getDesiredState() != null
-          && !request.getDesiredState().isEmpty()) {
-        State state = State.valueOf(request.getDesiredState());
-        if (!state.isValidDesiredState()
-            || state != State.INIT) {
-          throw new IllegalArgumentException("Invalid desired state"
-              + " only INIT state allowed during creation"
-              + ", providedDesiredState=" + request.getDesiredState());
-        }
-      }
-
-      Service s;
-      try {
-        s = cluster.getService(request.getServiceName());
-      } catch (ServiceNotFoundException e) {
-        throw new IllegalArgumentException(
-            "The service[" + request.getServiceName() + "] associated with the component[" +
-            request.getComponentName() + "] doesn't exist for the cluster[" + request.getClusterName() + "]");
-      }
-      ServiceComponent sc = s.getServiceComponent(
-          request.getComponentName());
-
-      Host host;
-      try {
-        host = clusters.getHost(request.getHostname());
-      } catch (HostNotFoundException e) {
-        throw new ParentObjectNotFoundException(
-            "Attempted to add a host_component to a host that doesn't exist: ", e);
-      }
-      Set<Cluster> mappedClusters =
-          clusters.getClustersForHost(request.getHostname());
-      boolean validCluster = false;
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Looking to match host to cluster"
-            + ", hostnameViaReg=" + host.getHostName()
-            + ", hostname=" + request.getHostname()
-            + ", clusterName=" + request.getClusterName()
-            + ", hostClusterMapCount=" + mappedClusters.size());
-      }
-      for (Cluster mappedCluster : mappedClusters) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Host belongs to cluster"
-              + ", hostname=" + request.getHostname()
-              + ", clusterName=" + mappedCluster.getClusterName());
-        }
-        if (mappedCluster.getClusterName().equals(
-            request.getClusterName())) {
-          validCluster = true;
-          break;
-        }
-      }
-      if (!validCluster) {
-        throw new ParentObjectNotFoundException("Attempted to add a host_component to a host that doesn't exist: " +
-            "clusterName=" + request.getClusterName() + ", hostName=" + request.getHostname());
-      }
-      try {
-        ServiceComponentHost sch = sc.getServiceComponentHost(
-            request.getHostname());
-        if (sch != null) {
-          duplicates.add("[clusterName=" + request.getClusterName() + ", hostName=" + request.getHostname() +
-              ", componentName=" +request.getComponentName() +']');
-          continue;
-        }
-      } catch (AmbariException e) {
-        // Expected
-      }
-    }
-
-    // ensure only a single cluster update
-    if (hostComponentNames.size() != 1) {
-      throw new IllegalArgumentException("Invalid arguments - updates allowed"
-          + " on only one cluster at a time");
-    }
-
-    if (!duplicates.isEmpty()) {
-      StringBuilder names = new StringBuilder();
-      boolean first = true;
-      for (String hName : duplicates) {
-        if (!first) {
-          names.append(",");
-        }
-        first = false;
-        names.append(hName);
-      }
-      String msg;
-      if (duplicates.size() == 1) {
-        msg = "Attempted to create a host_component which already exists: ";
-      } else {
-        msg = "Attempted to create host_component's which already exist: ";
-      }
-      throw new DuplicateResourceException(msg + names.toString());
-    }
-
-    // now doing actual work
-    for (ServiceComponentHostRequest request : requests) {
-      Cluster cluster = clusters.getCluster(request.getClusterName());
-      Service s = cluster.getService(request.getServiceName());
-      ServiceComponent sc = s.getServiceComponent(
-          request.getComponentName());
-
-      StackId stackId = sc.getDesiredStackVersion();
-      ComponentInfo compInfo = ambariMetaInfo.getComponentCategory(
-          stackId.getStackName(), stackId.getStackVersion(),
-          s.getName(), sc.getName());
-      boolean isClient = compInfo.isClient();
-
-      ServiceComponentHost sch =
-          serviceComponentHostFactory.createNew(sc, request.getHostname(),
-              isClient);
-
-      if (request.getDesiredState() != null
-          && !request.getDesiredState().isEmpty()) {
-        State state = State.valueOf(request.getDesiredState());
-        sch.setDesiredState(state);
-      } 
-
-      sch.setDesiredStackVersion(sc.getDesiredStackVersion());
-
-      // TODO fix config versions to configs conversion
-      Map<String, Config> configs = new HashMap<String, Config>();
-      if (request.getConfigVersions() != null) {
-      }
-
-      sch.updateDesiredConfigs(configs);
-      sc.addServiceComponentHost(sch);
-      sch.persist();
-    }
-
-  }
-
-  public synchronized void createConfiguration(
-      ConfigurationRequest request) throws AmbariException {
-    if (null == request.getClusterName() || request.getClusterName().isEmpty()
-        || null == request.getType() || request.getType().isEmpty()
-        || null == request.getVersionTag() || request.getVersionTag().isEmpty()
-        || null == request.getConfigs() || request.getConfigs().isEmpty()) {
-      throw new IllegalArgumentException("Invalid Arguments,"
-          + " clustername, config type, config version and configs should not"
-          + " be null or empty");
-    }
-
-    Cluster cluster = clusters.getCluster(request.getClusterName());
-
-    Map<String, Config> configs = cluster.getDesiredConfigsByType(
-        request.getType());
-    if (null == configs) {
-      configs = new HashMap<String, Config>();
-    }
-
-    Config config = configs.get(request.getVersionTag());
-    if (configs.containsKey(request.getVersionTag())) {
-      throw new AmbariException("Configuration with that tag exists for '"
-          + request.getType() + "'");
-    }
-
-    config = configFactory.createNew (cluster, request.getType(),
-        request.getConfigs());
-    config.setVersionTag(request.getVersionTag());
-
-    config.persist();
-
-    cluster.addDesiredConfig(config);
-  }
-
-  @Override
-  public void createUsers(Set<UserRequest> requests) throws AmbariException {
-
-    for (UserRequest request : requests) {
-
-      if (null == request.getUsername() || request.getUsername().isEmpty() ||
-          null == request.getPassword() || request.getPassword().isEmpty()) {
-        throw new AmbariException("Username and password must be supplied.");
-      }
-
-      User user = users.getAnyUser(request.getUsername());
-      if (null != user)
-        throw new AmbariException("User already exists.");
-
-      users.createUser(request.getUsername(), request.getPassword());
-
-      if (0 != request.getRoles().size()) {
-        user = users.getAnyUser(request.getUsername());
-        if (null != user) {
-          for (String role : request.getRoles()) {
-            if (!user.getRoles().contains(role))
-              users.addRoleToUser(user, role);
-          }
-        }
-      }
-    }
-
-  }
-
-  private Stage createNewStage(Cluster cluster, long requestId) {
-    String logDir = baseLogDir + "/" + requestId;
-    Stage stage = new Stage(requestId, logDir, cluster.getClusterName());
-    return stage;
-  }
-
-  private void createHostAction(Cluster cluster,
-      Stage stage, ServiceComponentHost scHost,
-      Map<String, Map<String, String>> configurations,
-      RoleCommand command,
-      long nowTimestamp,
-      ServiceComponentHostEvent event) throws AmbariException {
-
-    stage.addHostRoleExecutionCommand(scHost.getHostName(), Role.valueOf(scHost
-        .getServiceComponentName()), command,
-        event, scHost.getClusterName(),
-        scHost.getServiceName());
-    ExecutionCommand execCmd = stage.getExecutionCommandWrapper(scHost.getHostName(),
-        scHost.getServiceComponentName()).getExecutionCommand();
-
-    // Generate cluster host info
-    execCmd.setClusterHostInfo(
-        StageUtils.getClusterHostInfo(cluster, hostsMap));
-
-    Host host = clusters.getHost(scHost.getHostName());
-
-    execCmd.setConfigurations(configurations);
-
-    // send stack info to agent
-    StackId stackId = scHost.getDesiredStackVersion();
-    Map<String, List<RepositoryInfo>> repos = ambariMetaInfo.getRepository(
-        stackId.getStackName(), stackId.getStackVersion());
-    String repoInfo = "";
-    if (!repos.containsKey(host.getOsType())) {
-      // FIXME should this be an error?
-      LOG.warn("Could not retrieve repo information for host"
-          + ", hostname=" + scHost.getHostName()
-          + ", clusterName=" + cluster.getClusterName()
-          + ", stackInfo=" + stackId.getStackId());
-    } else {
-      repoInfo = gson.toJson(repos.get(host.getOsType()));
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Sending repo information to agent"
-          + ", hostname=" + scHost.getHostName()
-          + ", clusterName=" + cluster.getClusterName()
-          + ", stackInfo=" + stackId.getStackId()
-          + ", repoInfo=" + repoInfo);
-    }
-
-    Map<String, String> params = new TreeMap<String, String>();
-    params.put("repo_info", repoInfo);
-    params.put("jdk_location", this.jdkResourceUrl);
-    execCmd.setHostLevelParams(params);
-
-    Map<String, String> roleParams = new TreeMap<String, String>();
-    execCmd.setRoleParams(roleParams);
-
-    return;
-  }
-
-  private synchronized Set<ClusterResponse> getClusters(ClusterRequest request)
-      throws AmbariException {
-
-    Set<ClusterResponse> response = new HashSet<ClusterResponse>();
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Received a getClusters request"
-          + ", clusterName=" + request.getClusterName()
-          + ", clusterId=" + request.getClusterId()
-          + ", stackInfo=" + request.getStackVersion());
-    }
-
-    if (request.getClusterName() != null) {
-      Cluster c = clusters.getCluster(request.getClusterName());
-      response.add(c.convertToResponse());
-      return response;
-    } else if (request.getClusterId() != null) {
-      Cluster c = clusters.getClusterById(request.getClusterId());
-      response.add(c.convertToResponse());
-      return response;
-    }
-
-    Map<String, Cluster> allClusters = clusters.getClusters();
-    for (Cluster c : allClusters.values()) {
-      if (request.getStackVersion() != null) {
-        if (!request.getStackVersion().equals(
-            c.getDesiredStackVersion().getStackId())) {
-          // skip non matching stack versions
-          continue;
-        }
-      }
-      response.add(c.convertToResponse());
-    }
-    StringBuilder builder = new StringBuilder();
-    if (LOG.isDebugEnabled()) {
-      clusters.debugDump(builder);
-      LOG.debug("Cluster State for cluster " + builder.toString());
-    }
-    return response;
-  }
-
-  private synchronized Set<ServiceResponse> getServices(ServiceRequest request)
-      throws AmbariException {
-    if (request.getClusterName() == null
-        || request.getClusterName().isEmpty()) {
-      throw new AmbariException("Invalid arguments, cluster name"
-          + " cannot be null");
-    }
-    String clusterName = request.getClusterName();
-    final Cluster cluster;
-    try {
-      cluster = clusters.getCluster(clusterName);
-    } catch (ObjectNotFoundException e) {
-      throw new ParentObjectNotFoundException("Parent Cluster resource doesn't exist", e);
-    }
-
-    Set<ServiceResponse> response = new HashSet<ServiceResponse>();
-    if (request.getServiceName() != null) {
-      Service s = cluster.getService(request.getServiceName());
-      response.add(s.convertToResponse());
-      return response;
-    }
-
-    // TODO support search on predicates?
-
-    boolean checkDesiredState = false;
-    State desiredStateToCheck = null;
-    if (request.getDesiredState() != null
-        && !request.getDesiredState().isEmpty()) {
-      desiredStateToCheck = State.valueOf(request.getDesiredState());
-      if (!desiredStateToCheck.isValidDesiredState()) {
-        throw new IllegalArgumentException("Invalid arguments, invalid desired"
-            + " state, desiredState=" + desiredStateToCheck);
-      }
-      checkDesiredState = true;
-    }
-
-    for (Service s : cluster.getServices().values()) {
-      if (checkDesiredState
-          && (desiredStateToCheck != s.getDesiredState())) {
-        // skip non matching state
-        continue;
-      }
-      response.add(s.convertToResponse());
-    }
-    return response;
-
-  }
-
-  private synchronized Set<ServiceComponentResponse> getComponents(
-      ServiceComponentRequest request) throws AmbariException {
-    if (request.getClusterName() == null
-        || request.getClusterName().isEmpty()) {
-      throw new IllegalArgumentException("Invalid arguments, cluster name"
-          + " should be non-null");
-    }
-
-    final Cluster cluster;
-    try {
-      cluster = clusters.getCluster(request.getClusterName());
-    } catch (ObjectNotFoundException e) {
-      throw new ParentObjectNotFoundException("Parent Cluster resource doesn't exist", e);
-    }
-
-    Set<ServiceComponentResponse> response =
-        new HashSet<ServiceComponentResponse>();
-
-    if (request.getComponentName() != null) {
-      if (request.getServiceName() == null) {
-        StackId stackId = cluster.getDesiredStackVersion();
-        String serviceName =
-            ambariMetaInfo.getComponentToService(stackId.getStackName(),
-                stackId.getStackVersion(), request.getComponentName());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Looking up service name for component"
-              + ", componentName=" + request.getComponentName()
-              + ", serviceName=" + serviceName);
-        }
-        if (serviceName == null
-            || serviceName.isEmpty()) {
-          throw new AmbariException("Could not find service for component"
-              + ", componentName=" + request.getComponentName()
-              + ", clusterName=" + cluster.getClusterName()
-              + ", stackInfo=" + stackId.getStackId());
-        }
-        request.setServiceName(serviceName);
-      }
-
-      final Service s;
-      try {
-        s = cluster.getService(request.getServiceName());
-      } catch (ObjectNotFoundException e) {
-        throw new ParentObjectNotFoundException("Parent Service resource doesn't exist", e);
-      }
-
-      ServiceComponent sc = s.getServiceComponent(request.getComponentName());
-      response.add(sc.convertToResponse());
-      return response;
-    }
-
-    boolean checkDesiredState = false;
-    State desiredStateToCheck = null;
-    if (request.getDesiredState() != null
-        && !request.getDesiredState().isEmpty()) {
-      desiredStateToCheck = State.valueOf(request.getDesiredState());
-      if (!desiredStateToCheck.isValidDesiredState()) {
-        throw new IllegalArgumentException("Invalid arguments, invalid desired"
-            + " state, desiredState=" + desiredStateToCheck);
-      }
-      checkDesiredState = true;
-    }
-
-    Set<Service> services = new HashSet<Service>();
-    if (request.getServiceName() != null
-        && !request.getServiceName().isEmpty()) {
-      services.add(cluster.getService(request.getServiceName()));
-    } else {
-      services.addAll(cluster.getServices().values());
-    }
-
-    for (Service s : services) {
-      // filter on request.getDesiredState()
-      for (ServiceComponent sc : s.getServiceComponents().values()) {
-        if (checkDesiredState
-            && (desiredStateToCheck != sc.getDesiredState())) {
-          // skip non matching state
-          continue;
-        }
-        response.add(sc.convertToResponse());
-      }
-    }
-    return response;
-  }
-
-  private synchronized Set<HostResponse> getHosts(HostRequest request)
-      throws AmbariException {
-
-    //TODO/FIXME host can only belong to a single cluster so get host directly from Cluster
-    //TODO/FIXME what is the requirement for filtering on host attributes?
-
-    List<Host>        hosts;
-    Set<HostResponse> response = new HashSet<HostResponse>();
-    Cluster           cluster  = null;
-
-    String clusterName = request.getClusterName();
-    String hostName    = request.getHostname();
-
-    if (clusterName != null) {
-      //validate that cluster exists, throws exception if it doesn't.
-      try {
-        cluster = clusters.getCluster(clusterName);
-      } catch (ObjectNotFoundException e) {
-        throw new ParentObjectNotFoundException("Parent Cluster resource doesn't exist", e);
-      }
-    }
-
-    if (hostName == null) {
-      hosts = clusters.getHosts();
-    } else {
-      hosts = new ArrayList<Host>();
-      try {
-        hosts.add(clusters.getHost(request.getHostname()));
-      } catch (HostNotFoundException e) {
-        // add cluster name
-        throw new HostNotFoundException(clusterName, hostName);
-      }
-    }
-
-    for (Host h : hosts) {
-      if (clusterName != null) {
-        if (clusters.getClustersForHost(h.getHostName()).contains(cluster)) {
-          HostResponse r = h.convertToResponse();
-          r.setClusterName(clusterName);
-          response.add(r);
-        } else if (hostName != null) {
-          throw new HostNotFoundException(clusterName, hostName);
-        }
-      } else {
-        HostResponse r = h.convertToResponse();
-
-        Set<Cluster> clustersForHost = clusters.getClustersForHost(h.getHostName());
-        //todo: host can only belong to a single cluster
-        if (clustersForHost != null && clustersForHost.size() != 0) {
-          r.setClusterName(clustersForHost.iterator().next().getClusterName());
-        }
-        response.add(r);
-      }
-    }
-    return response;
-  }
-
-  private synchronized Set<ServiceComponentHostResponse> getHostComponents(
-      ServiceComponentHostRequest request) throws AmbariException {
-    if (request.getClusterName() == null
-        || request.getClusterName().isEmpty()) {
-      throw new IllegalArgumentException("Invalid arguments, cluster name should not be null");
-    }
-
-    final Cluster cluster;
-    try {
-      cluster = clusters.getCluster(request.getClusterName());
-    } catch (ClusterNotFoundException e) {
-      throw new ParentObjectNotFoundException("Parent Cluster resource doesn't exist", e);
-    }
-
-    if (request.getHostname() != null) {
-      try {
-        if (! clusters.getClustersForHost(request.getHostname()).contains(cluster)) {
-          // case where host exists but not associated with given cluster
-          throw new ParentObjectNotFoundException("Parent Host resource doesn't exist",
-              new HostNotFoundException(request.getClusterName(), request.getHostname()));
-        }
-      } catch (HostNotFoundException e) {
-        // creating new HostNotFoundException to add cluster name
-        throw new ParentObjectNotFoundException("Parent Host resource doesn't exist",
-            new HostNotFoundException(request.getClusterName(), request.getHostname()));
-      }
-    }
-
-    if (request.getComponentName() != null) {
-      if (request.getServiceName() == null
-          || request.getServiceName().isEmpty()) {
-        StackId stackId = cluster.getDesiredStackVersion();
-        String serviceName =
-            ambariMetaInfo.getComponentToService(stackId.getStackName(),
-                stackId.getStackVersion(), request.getComponentName());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Looking up service name for component"
-              + ", componentName=" + request.getComponentName()
-              + ", serviceName=" + serviceName
-              + ", stackInfo=" + stackId.getStackId());
-        }
-        if (serviceName == null
-            || serviceName.isEmpty()) {
-          throw new ServiceComponentHostNotFoundException(
-              cluster.getClusterName(), null, request.getComponentName(),request.getHostname());
-        }
-        request.setServiceName(serviceName);
-      }
-    }
-
-    Set<Service> services = new HashSet<Service>();
-    if (request.getServiceName() != null && !request.getServiceName().isEmpty()) {
-      services.add(cluster.getService(request.getServiceName()));
-    } else {
-      services.addAll(cluster.getServices().values());
-    }
-
-    Set<ServiceComponentHostResponse> response =
-        new HashSet<ServiceComponentHostResponse>();
-
-    boolean checkDesiredState = false;
-    State desiredStateToCheck = null;
-    if (request.getDesiredState() != null
-        && !request.getDesiredState().isEmpty()) {
-      desiredStateToCheck = State.valueOf(request.getDesiredState());
-      if (!desiredStateToCheck.isValidDesiredState()) {
-        throw new IllegalArgumentException("Invalid arguments, invalid desired"
-            + " state, desiredState=" + desiredStateToCheck);
-      }
-      checkDesiredState = true;
-    }
-
-    for (Service s : services) {
-      // filter on component name if provided
-      Set<ServiceComponent> components = new HashSet<ServiceComponent>();
-      if (request.getComponentName() != null) {
-        components.add(s.getServiceComponent(request.getComponentName()));
-      } else {
-        components.addAll(s.getServiceComponents().values());
-      }
-      for(ServiceComponent sc : components) {
-        if (request.getComponentName() != null) {
-          if (!sc.getName().equals(request.getComponentName())) {
-            continue;
-          }
-        }
-
-        // filter on hostname if provided
-        // filter on desired state if provided
-
-        if (request.getHostname() != null) {
-          try {
-            ServiceComponentHost sch = sc.getServiceComponentHost(
-                request.getHostname());
-            if (checkDesiredState
-                && (desiredStateToCheck != sch.getDesiredState())) {
-              continue;
-            }
-            ServiceComponentHostResponse r = sch.convertToResponse();
-            response.add(r);
-          } catch (ServiceComponentHostNotFoundException e) {
-            if (request.getServiceName() != null && request.getComponentName() != null) {
-              throw new ServiceComponentHostNotFoundException(cluster.getClusterName(),
-                  request.getServiceName(), request.getComponentName(),request.getHostname());
-            } else {
-              // ignore this since host_component was not specified
-              // this is an artifact of how we get host_components and can happen
-              // in case where we get all host_components for a host
-            }
-
-          }
-        } else {
-          for (ServiceComponentHost sch :
-              sc.getServiceComponentHosts().values()) {
-            if (checkDesiredState
-                && (desiredStateToCheck != sch.getDesiredState())) {
-              continue;
-            }
-            ServiceComponentHostResponse r = sch.convertToResponse();
-            response.add(r);
-          }
-        }
-      }
-    }
-    return response;
-  }
-
-
-  private synchronized Set<ConfigurationResponse> getConfigurations(
-      ConfigurationRequest request) throws AmbariException {
-    if (request.getClusterName() == null) {
-      throw new IllegalArgumentException("Invalid arguments, cluster name"
-          + " should not be null");
-    }
-
-    Cluster cluster = clusters.getCluster(request.getClusterName());
-
-    Set<ConfigurationResponse> responses = new HashSet<ConfigurationResponse>();
-
-    // !!! if only one, then we need full properties
-    if (null != request.getType() && null != request.getVersionTag()) {
-      Config config = cluster.getDesiredConfig(request.getType(),
-          request.getVersionTag());
-      if (null != config) {
-        ConfigurationResponse response = new ConfigurationResponse(
-            cluster.getClusterName(), config.getType(), config.getVersionTag(),
-            config.getProperties());
-        responses.add(response);
-      }
-    }
-    else {
-      if (null != request.getType()) {
-        Map<String, Config> configs = cluster.getDesiredConfigsByType(
-            request.getType());
-
-        if (null != configs) {
-          for (Entry<String, Config> entry : configs.entrySet()) {
-            ConfigurationResponse response = new ConfigurationResponse(
-                cluster.getClusterName(), request.getType(),
-                entry.getValue().getVersionTag(), new HashMap<String, String>());
-            responses.add(response);
-          }
-        }
-      } else {
-        // !!! all configuration
-        Collection<Config> all = cluster.getAllConfigs();
-
-        for (Config config : all) {
-          ConfigurationResponse response = new ConfigurationResponse(
-             cluster.getClusterName(), config.getType(), config.getVersionTag(),
-             new HashMap<String, String>());
-
-          responses.add(response);
-        }
-      }
-    }
-
-    return responses;
-
-  }
-
-
-  @Override
-  public synchronized RequestStatusResponse updateCluster(ClusterRequest request)
-      throws AmbariException {
-    // for now only update host list supported
-    if (request.getClusterName() == null
-        || request.getClusterName().isEmpty()) {
-      throw new IllegalArgumentException("Invalid arguments, cluster name"
-          + " should not be null");
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Received a updateCluster request"
-          + ", clusterName=" + request.getClusterName()
-          + ", request=" + request);
-    }
-
-    final Cluster c = clusters.getCluster(request.getClusterName());
-    clusters.mapHostsToCluster(request.getHostNames(),
-        request.getClusterName());
-
-    if (!request.getStackVersion().equals(
-        c.getDesiredStackVersion().getStackId())) {
-      throw new IllegalArgumentException("Update of desired stack version"
-          + " not supported");
-    }
-
-    return null;
-  }
-
-  // FIXME refactor code out of all update functions
-  /*
-  private TrackActionResponse triggerStateChange(State newState, Service s,
-      ServiceComponent sc, ServiceComponentHost sch) {
-    return null;
-  }
-  */
-  
-  private String getJobTrackerHost(Cluster cluster) {
-    try {
-      Service svc = cluster.getService("MAPREDUCE");
-      ServiceComponent sc = svc.getServiceComponent(Role.JOBTRACKER.toString());
-      if (sc.getServiceComponentHosts() != null
-          && !sc.getServiceComponentHosts().isEmpty()) {
-        return sc.getServiceComponentHosts().keySet().iterator().next();
-      }
-    } catch (AmbariException ex) {
-      return null;
-    }
-    return null;
-  }
-
-  private RequestStatusResponse doStageCreation(Cluster cluster,
-      Map<State, List<Service>> changedServices,
-      Map<State, List<ServiceComponent>> changedComps,
-      Map<String, Map<State, List<ServiceComponentHost>>> changedScHosts)
-          throws AmbariException {
-
-    // TODO handle different transitions?
-    // Say HDFS to stopped and MR to started, what order should actions be done
-    // in?
-
-    // TODO additional validation?
-    // verify all configs
-    // verify all required components
-
-    if ((changedServices == null || changedServices.isEmpty())
-        && (changedComps == null || changedComps.isEmpty())
-        && (changedScHosts == null || changedScHosts.isEmpty())) {
-      return null;
-    }
-
-    Long requestId = null;
-    List<Stage> stages = null;
-
-    Set<String> smokeTestServices =
-        new HashSet<String>();
-
-    // smoke test any service that goes from installed to started
-    if (changedServices != null) {
-      for (Entry<State, List<Service>> entry : changedServices.entrySet()) {
-        if (State.STARTED != entry.getKey()) {
-          continue;
-        }
-        for (Service s : entry.getValue()) {
-          if (State.INSTALLED == s.getDesiredState()) {
-            smokeTestServices.add(s.getName());
-          }
-        }
-      }
-    }
-
-    Map<String, Map<String, Integer>> changedComponentCount =
-        new HashMap<String, Map<String, Integer>>();
-    for (Map<State, List<ServiceComponentHost>> stateScHostMap :
-      changedScHosts.values()) {
-      for (Entry<State, List<ServiceComponentHost>> entry :
-          stateScHostMap.entrySet()) {
-        if (State.STARTED != entry.getKey()) {
-          continue;
-        }
-        for (ServiceComponentHost sch : entry.getValue()) {
-          if (State.START_FAILED != sch.getState()
-              && State.INSTALLED != sch.getState()) {
-            continue;
-          }
-          if (!changedComponentCount.containsKey(sch.getServiceName())) {
-            changedComponentCount.put(sch.getServiceName(),
-                new HashMap<String, Integer>());
-          }
-          if (!changedComponentCount.get(sch.getServiceName())
-              .containsKey(sch.getServiceComponentName())) {
-            changedComponentCount.get(sch.getServiceName())
-                .put(sch.getServiceComponentName(), 1);
-          } else {
-            Integer i = changedComponentCount.get(sch.getServiceName())
-                .get(sch.getServiceComponentName());
-            changedComponentCount.get(sch.getServiceName())
-              .put(sch.getServiceComponentName(), ++i);
-          }
-        }
-      }
-    }
-
-    for (Entry<String, Map<String, Integer>> entry :
-        changedComponentCount.entrySet()) {
-      String serviceName = entry.getKey();
-      // smoke test service if more than one component is started
-      if (entry.getValue().size() > 1) {
-        smokeTestServices.add(serviceName);
-        continue;
-      }
-      for (String componentName :
-        changedComponentCount.get(serviceName).keySet()) {
-        ServiceComponent sc = cluster.getService(serviceName)
-            .getServiceComponent(componentName);
-        StackId stackId = sc.getDesiredStackVersion();
-        ComponentInfo compInfo = ambariMetaInfo.getComponentCategory(
-            stackId.getStackName(), stackId.getStackVersion(), serviceName,
-            componentName);
-        if (compInfo.isMaster()) {
-          smokeTestServices.add(serviceName);
-        }
-
-        // FIXME if master check if we need to run a smoke test for the master
-      }
-    }
-
-    if (!changedScHosts.isEmpty()
-        || !smokeTestServices.isEmpty()) {
-      long nowTimestamp = System.currentTimeMillis();
-      requestId = Long.valueOf(actionManager.getNextRequestId());
-
-      // FIXME cannot work with a single stage
-      // multiple stages may be needed for reconfigure
-      long stageId = 0;
-      Stage stage = createNewStage(cluster, requestId.longValue());
-      stage.setStageId(stageId);
-      //HACK
-      String jobtrackerHost = this.getJobTrackerHost(cluster);
-      for (String compName : changedScHosts.keySet()) {
-        for (State newState : changedScHosts.get(compName).keySet()) {
-          for (ServiceComponentHost scHost :
-              changedScHosts.get(compName).get(newState)) {
-            RoleCommand roleCommand;
-            State oldSchState = scHost.getState();
-            ServiceComponentHostEvent event;
-            switch(newState) {
-              case INSTALLED:
-                if (oldSchState == State.INIT
-                    || oldSchState == State.UNINSTALLED
-                    || oldSchState == State.INSTALLED
-                    || oldSchState == State.INSTALLING
-                    || oldSchState == State.INSTALL_FAILED) {
-                  roleCommand = RoleCommand.INSTALL;
-                  event = new ServiceComponentHostInstallEvent(
-                      scHost.getServiceComponentName(), scHost.getHostName(),
-                      nowTimestamp,
-                      scHost.getDesiredStackVersion().getStackId());
-                } else if (oldSchState == State.STARTED
-                    || oldSchState == State.START_FAILED
-                    || oldSchState == State.INSTALLED
-                    || oldSchState == State.STOP_FAILED) {
-                  roleCommand = RoleCommand.STOP;
-                  event = new ServiceComponentHostStopEvent(
-                      scHost.getServiceComponentName(), scHost.getHostName(),
-                      nowTimestamp);
-                } else {
-                  throw new AmbariException("Invalid transition for"
-                      + " servicecomponenthost"
-                      + ", clusterName=" + cluster.getClusterName()
-                      + ", clusterId=" + cluster.getClusterId()
-                      + ", serviceName=" + scHost.getServiceName()
-                      + ", componentName=" + scHost.getServiceComponentName()
-                      + ", hostname=" + scHost.getHostName()
-                      + ", currentState=" + oldSchState
-                      + ", newDesiredState=" + newState);
-                }
-                break;
-              case STARTED:
-                StackId stackId = scHost.getDesiredStackVersion();
-                ComponentInfo compInfo = ambariMetaInfo.getComponentCategory(
-                    stackId.getStackName(), stackId.getStackVersion(), scHost.getServiceName(),
-                    scHost.getServiceComponentName());
-                if (oldSchState == State.INSTALLED
-                    || oldSchState == State.START_FAILED || oldSchState == State.STARTING) {
-                  roleCommand = RoleCommand.START;
-                  event = new ServiceComponentHostStartEvent(
-                      scHost.getServiceComponentName(), scHost.getHostName(),
-                      nowTimestamp, scHost.getDesiredConfigVersionsRecursive());
-                } else {
-                  String error = "Invalid transition for"
-                      + " servicecomponenthost"
-                      + ", clusterName=" + cluster.getClusterName()
-                      + ", clusterId=" + cluster.getClusterId()
-                      + ", serviceName=" + scHost.getServiceName()
-                      + ", componentName=" + scHost.getServiceComponentName()
-                      + ", hostname=" + scHost.getHostName()
-                      + ", currentState=" + oldSchState
-                      + ", newDesiredState=" + newState;
-                  if (compInfo.isMaster()) {
-                    throw new AmbariException(error);
-                  } else {
-                    LOG.info("Ignoring: " + error);
-                    continue;
-                  }
-                }
-                break;
-              case UNINSTALLED:
-                if (oldSchState == State.INSTALLED
-                    || oldSchState == State.UNINSTALL_FAILED) {
-                  roleCommand = RoleCommand.UNINSTALL;
-                  event = new ServiceComponentHostStartEvent(
-                      scHost.getServiceComponentName(), scHost.getHostName(),
-                      nowTimestamp, scHost.getDesiredConfigVersionsRecursive());
-                } else {
-                  throw new AmbariException("Invalid transition for"
-                      + " servicecomponenthost"
-                      + ", clusterName=" + cluster.getClusterName()
-                      + ", clusterId=" + cluster.getClusterId()
-                      + ", serviceName=" + scHost.getServiceName()
-                      + ", componentName=" + scHost.getServiceComponentName()
-                      + ", hostname=" + scHost.getHostName()
-                      + ", currentState=" + oldSchState
-                      + ", newDesiredState=" + newState);
-                }
-                break;
-              case INIT:
-                throw new AmbariException("Unsupported transition to INIT for"
-                    + " servicecomponenthost"
-                    + ", clusterName=" + cluster.getClusterName()
-                    + ", clusterId=" + cluster.getClusterId()
-                    + ", serviceName=" + scHost.getServiceName()
-                    + ", componentName=" + scHost.getServiceComponentName()
-                    + ", hostname=" + scHost.getHostName()
-                    + ", currentState=" + oldSchState
-                    + ", newDesiredState=" + newState);
-              default:
-                throw new AmbariException("Unsupported state change operation"
-                    + ", newState=" + newState.toString());
-            }
-
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Create a new host action"
-                  + ", requestId=" + requestId.longValue()
-                  + ", componentName=" + scHost.getServiceComponentName()
-                  + ", hostname=" + scHost.getHostName()
-                  + ", roleCommand=" + roleCommand.name());
-            }
-
-            Map<String, Config> configs = scHost.getDesiredConfigs();
-            // Clone configurations for the command
-            Map<String, Map<String, String>> configurations =
-                new TreeMap<String, Map<String, String>>();
-            for (Config config : configs.values()) {
-              if (LOG.isDebugEnabled()) {
-                LOG.debug("Cloning configs for execution command"
-                    + ", configType=" + config.getType()
-                    + ", configVersionTag=" + config.getVersionTag()
-                    + ", clusterName=" + scHost.getClusterName()
-                    + ", serviceName=" + scHost.getServiceName()
-                    + ", componentName=" + scHost.getServiceComponentName()
-                    + ", hostname=" + scHost.getHostName());
-              }
-              configurations.put(config.getType(),
-                  config.getProperties());
-            }
-            // HACK HACK HACK
-            if ((!scHost.getHostName().equals(jobtrackerHost))
-                && configurations.get("global") != null) {
-              if (LOG.isDebugEnabled()) {
-                LOG.debug("Setting rca_enabled to false for host "
-                    + scHost.getHostName());
-              }
-              configurations.get("global").put("rca_enabled", "false");
-            }
-            createHostAction(cluster, stage, scHost, configurations,
-                roleCommand, nowTimestamp, event);
-          }
-        }
-      }
-
-      for (String serviceName : smokeTestServices) {
-        Service s = cluster.getService(serviceName);
-
-        // find service component host
-        String clientHost = getClientHostForRunningAction(cluster, s);
-        String smokeTestRole =
-            actionMetadata.getServiceCheckAction(serviceName);
-
-        if (clientHost == null || smokeTestRole == null) {
-          LOG.info("Nothing to do for service check as could not find role or"
-              + " or host to run check on"
-              + ", clusterName=" + cluster.getClusterName()
-              + ", serviceName=" + serviceName
-              + ", clientHost=" + clientHost
-              + ", serviceCheckRole=" + smokeTestRole);
-          continue;
-        }
-
-        stage.addHostRoleExecutionCommand(clientHost,
-            Role.valueOf(smokeTestRole),
-            RoleCommand.EXECUTE,
-            new ServiceComponentHostOpInProgressEvent(null, clientHost,
-                nowTimestamp), cluster.getClusterName(), serviceName);
-
-        Map<String, Map<String, String>> configurations =
-            new TreeMap<String, Map<String, String>>();
-        Map<String, Config> allConfigs = cluster.getService(serviceName).getDesiredConfigs();
-        if (allConfigs != null) {
-          for (Map.Entry<String, Config> entry: allConfigs.entrySet()) {
-            configurations.put(entry.getValue().getType(), entry.getValue().getProperties());
-          }
-        }
-        
-        stage.getExecutionCommandWrapper(clientHost,
-            smokeTestRole).getExecutionCommand()
-            .setConfigurations(configurations);
-
-        // Generate cluster host info
-        stage.getExecutionCommandWrapper(clientHost, smokeTestRole)
-            .getExecutionCommand()
-            .setClusterHostInfo(StageUtils.getClusterHostInfo(cluster, hostsMap));
-      }
-
-      RoleGraph rg = new RoleGraph(rco);
-      rg.build(stage);
-      stages = rg.getStages();
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Triggering Action Manager"
-            + ", clusterName=" + cluster.getClusterName()
-            + ", requestId=" + requestId.longValue()
-            + ", stagesCount=" + stages.size());
-      }
-      actionManager.sendActions(stages);
-    }
-
-    if (changedServices != null) {
-      for (Entry<State, List<Service>> entry : changedServices.entrySet()) {
-        State newState = entry.getKey();
-        for (Service s : entry.getValue()) {
-          if (s.isClientOnlyService()
-              && newState == State.STARTED) {
-            continue;
-          }
-          s.setDesiredState(newState);
-        }
-      }
-    }
-
-    if (changedComps != null) {
-      for (Entry<State, List<ServiceComponent>> entry :
-          changedComps.entrySet()){
-        State newState = entry.getKey();
-        for (ServiceComponent sc : entry.getValue()) {
-          sc.setDesiredState(newState);
-        }
-      }
-    }
-
-    for (Map<State, List<ServiceComponentHost>> stateScHostMap :
-        changedScHosts.values()) {
-      for (Entry<State, List<ServiceComponentHost>> entry :
-          stateScHostMap.entrySet()) {
-        State newState = entry.getKey();
-        for (ServiceComponentHost sch : entry.getValue()) {
-          sch.setDesiredState(newState);
-        }
-      }
-    }
-
-    if (stages == null || stages.isEmpty()
-        || requestId == null) {
-      return null;
-    }
-    return getRequestStatusResponse(requestId.longValue());
-  }
-
-  private boolean isValidStateTransition(State oldState,
-      State newState) {
-    switch(newState) {
-      case INSTALLED:
-        if (oldState == State.INIT
-            || oldState == State.UNINSTALLED
-            || oldState == State.INSTALLED
-            || oldState == State.INSTALLING
-            || oldState == State.STARTED
-            || oldState == State.START_FAILED
-            || oldState == State.INSTALL_FAILED
-            || oldState == State.STOP_FAILED) {
-          return true;
-        }
-        break;
-      case STARTED:
-        if (oldState == State.INSTALLED
-            || oldState == State.STARTING
-            || oldState == State.STARTED
-            || oldState == State.START_FAILED) {
-          return true;
-        }
-        break;
-      case UNINSTALLED:
-        if (oldState == State.INSTALLED
-            || oldState == State.UNINSTALLED
-            || oldState == State.UNINSTALL_FAILED) {
-          return true;
-        }
-      case INIT:
-        if (oldState == State.UNINSTALLED
-            || oldState == State.INIT
-            || oldState == State.WIPEOUT_FAILED) {
-          return true;
-        }
-    }
-    return false;
-  }
-
-
-  private boolean isValidDesiredStateTransition(State oldState,
-      State newState) {
-    switch(newState) {
-      case INSTALLED:
-        if (oldState == State.INIT
-            || oldState == State.UNINSTALLED
-            || oldState == State.INSTALLED
-            || oldState == State.STARTED) {
-          return true;
-        }
-        break;
-      case STARTED:
-        if (oldState == State.INSTALLED
-            || oldState == State.STARTED) {
-          return true;
-        }
-        break;
-    }
-    return false;
-  }
-
-  private void safeToUpdateConfigsForServiceComponentHost(
-      ServiceComponentHost sch,
-      State currentState, State newDesiredState)
-          throws AmbariException {
-
-    if (newDesiredState != null) {
-      if (!(newDesiredState == State.INIT
-          || newDesiredState == State.INSTALLED
-          || newDesiredState == State.STARTED)) {
-        throw new AmbariException("Changing of configs not supported"
-            + " for this transition"
-            + ", clusterName=" + sch.getClusterName()
-            + ", serviceName=" + sch.getServiceName()
-            + ", componentName=" + sch.getServiceComponentName()
-            + ", hostname=" + sch.getHostName()
-            + ", currentState=" + currentState
-            + ", newDesiredState=" + newDesiredState);
-      }
-    }
-  }
-
-  private void safeToUpdateConfigsForServiceComponent(
-      ServiceComponent sc,
-      State currentDesiredState, State newDesiredState)
-          throws AmbariException {
-    for (ServiceComponentHost sch :
-      sc.getServiceComponentHosts().values()) {
-      safeToUpdateConfigsForServiceComponentHost(sch,
-        sch.getState(), newDesiredState);
-    }
-  }
-
-  private void safeToUpdateConfigsForService(Service service,
-      State currentDesiredState, State newDesiredState)
-          throws AmbariException {
-    for (ServiceComponent component :
-        service.getServiceComponents().values()) {
-      safeToUpdateConfigsForServiceComponent(component,
-          component.getDesiredState(), newDesiredState);
-    }
-  }
-
-  @Override
-  public synchronized RequestStatusResponse updateServices(
-      Set<ServiceRequest> requests) throws AmbariException {
-
-    if (requests.isEmpty()) {
-      LOG.warn("Received an empty requests set");
-      return null;
-    }
-
-    Map<State, List<Service>> changedServices
-      = new HashMap<State, List<Service>>();
-    Map<State, List<ServiceComponent>> changedComps =
-        new HashMap<State, List<ServiceComponent>>();
-    Map<String, Map<State, List<ServiceComponentHost>>> changedScHosts =
-        new HashMap<String, Map<State, List<ServiceComponentHost>>>();
-
-    Set<String> clusterNames = new HashSet<String>();
-    Map<String, Set<String>> serviceNames = new HashMap<String, Set<String>>();
-    Set<State> seenNewStates = new HashSet<State>();
-
-    for (ServiceRequest request : requests) {
-      if (request.getClusterName() == null
-          || request.getClusterName().isEmpty()
-          || request.getServiceName() == null
-          || request.getServiceName().isEmpty()) {
-        throw new IllegalArgumentException("Invalid arguments, cluster name"
-            + " and service name should be provided to update services");
-      }
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Received a updateService request"
-            + ", clusterName=" + request.getClusterName()
-            + ", serviceName=" + request.getServiceName()
-            + ", request=" + request.toString());
-      }
-
-      clusterNames.add(request.getClusterName());
-
-      if (clusterNames.size() > 1) {
-        throw new IllegalArgumentException("Updates to multiple clusters is not"
-            + " supported");
-      }
-
-      if (!serviceNames.containsKey(request.getClusterName())) {
-        serviceNames.put(request.getClusterName(), new HashSet<String>());
-      }
-      if (serviceNames.get(request.getClusterName())
-          .contains(request.getServiceName())) {
-        // TODO throw single exception
-        throw new IllegalArgumentException("Invalid request contains duplicate"
-            + " service names");
-      }
-      serviceNames.get(request.getClusterName()).add(request.getServiceName());
-
-      Cluster cluster = clusters.getCluster(request.getClusterName());
-      Service s = cluster.getService(request.getServiceName());
-      State oldState = s.getDesiredState();
-      State newState = null;
-      if (request.getDesiredState() != null) {
-        newState = State.valueOf(request.getDesiredState());
-        if (!newState.isValidDesiredState()) {
-          throw new IllegalArgumentException("Invalid arguments, invalid"
-              + " desired state, desiredState=" + newState);
-        }
-      }
-
-      if (request.getConfigVersions() != null) {
-        safeToUpdateConfigsForService(s, oldState, newState);
-
-        for (Entry<String,String> entry :
-            request.getConfigVersions().entrySet()) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Attaching config to service"
-                + ", clusterName=" + cluster.getClusterName()
-                + ", serviceName=" + s.getName()
-                + ", configType=" + entry.getKey()
-                + ", configTag=" + entry.getValue());
-          }
-          Config config = cluster.getDesiredConfig(
-              entry.getKey(), entry.getValue());
-          if (null == config) {
-            // throw error for invalid config
-            throw new AmbariException("Trying to update service with"
-                + " invalid configs"
-                + ", clusterName=" + cluster.getClusterName()
-                + ", clusterId=" + cluster.getClusterId()
-                + ", serviceName=" + s.getName()
-                + ", invalidConfigType=" + entry.getKey()
-                + ", invalidConfigTag=" + entry.getValue());
-          }
-        }
-      }
-
-
-      if (newState == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Nothing to do for new updateService request"
-              + ", clusterName=" + request.getClusterName()
-              + ", serviceName=" + request.getServiceName()
-              + ", newDesiredState=null");
-        }
-        continue;
-      }
-
-      seenNewStates.add(newState);
-
-      if (newState != oldState) {
-        if (!isValidDesiredStateTransition(oldState, newState)) {
-          throw new AmbariException("Invalid transition for"
-              + " service"
-              + ", clusterName=" + cluster.getClusterName()
-              + ", clusterId=" + cluster.getClusterId()
-              + ", serviceName=" + s.getName()
-              + ", currentDesiredState=" + oldState
-              + ", newDesiredState=" + newState);
-
-        }
-        if (!changedServices.containsKey(newState)) {
-          changedServices.put(newState, new ArrayList<Service>());
-        }
-        changedServices.get(newState).add(s);
-      }
-
-      // TODO should we check whether all servicecomponents and
-      // servicecomponenthosts are in the required desired state?
-
-      for (ServiceComponent sc : s.getServiceComponents().values()) {
-        State oldScState = sc.getDesiredState();
-        if (newState != oldScState) {
-          if (sc.isClientComponent() &&
-              !newState.isValidClientComponentState()) {
-            continue;
-          }
-          if (!isValidDesiredStateTransition(oldScState, newState)) {
-            throw new AmbariException("Invalid transition for"
-                + " servicecomponent"
-                + ", clusterName=" + cluster.getClusterName()
-                + ", clusterId=" + cluster.getClusterId()
-                + ", serviceName=" + sc.getServiceName()
-                + ", componentName=" + sc.getName()
-                + ", currentDesiredState=" + oldScState
-                + ", newDesiredState=" + newState);
-          }
-          if (!changedComps.containsKey(newState)) {
-            changedComps.put(newState, new ArrayList<ServiceComponent>());
-          }
-          changedComps.get(newState).add(sc);
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Handling update to ServiceComponent"
-              + ", clusterName=" + request.getClusterName()
-              + ", serviceName=" + s.getName()
-              + ", componentName=" + sc.getName()
-              + ", currentDesiredState=" + oldScState
-              + ", newDesiredState=" + newState);
-        }
-        for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()){
-          State oldSchState = sch.getState();
-          if (newState == oldSchState) {
-            sch.setDesiredState(newState);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Ignoring ServiceComponentHost"
-                  + ", clusterName=" + request.getClusterName()
-                  + ", serviceName=" + s.getName()
-                  + ", componentName=" + sc.getName()
-                  + ", hostname=" + sch.getHostName()
-                  + ", currentState=" + oldSchState
-                  + ", newDesiredState=" + newState);
-            }
-            continue;
-          }
-          if (sc.isClientComponent() &&
-              !newState.isValidClientComponentState()) {
-            continue;
-          }
-          /** 
-           * This is hack for now wherein we dont fail if the 
-           * sch is in INSTALL_FAILED 
-           */
-          if (!isValidStateTransition(oldSchState, newState)) {
-            String error = "Invalid transition for"
-                + " servicecomponenthost"
-                + ", clusterName=" + cluster.getClusterName()
-                + ", clusterId=" + cluster.getClusterId()
-                + ", serviceName=" + sch.getServiceName()
-                + ", componentName=" + sch.getServiceComponentName()
-                + ", hostname=" + sch.getHostName()
-                + ", currentState=" + oldSchState
-                + ", newDesiredState=" + newState;
-            StackId sid = cluster.getDesiredStackVersion();
-            
-            if ( ambariMetaInfo.getComponentCategory(
-                sid.getStackName(), sid.getStackVersion(), sc.getServiceName(),
-                sch.getServiceComponentName()).isMaster()) {
-              throw new AmbariException(error);
-            } else {
-              LOG.warn("Ignoring: " + error);
-              continue;
-            }
-          }
-          if (!changedScHosts.containsKey(sc.getName())) {
-            changedScHosts.put(sc.getName(),
-                new HashMap<State, List<ServiceComponentHost>>());
-          }
-          if (!changedScHosts.get(sc.getName()).containsKey(newState)) {
-            changedScHosts.get(sc.getName()).put(newState,
-                new ArrayList<ServiceComponentHost>());
-          }
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Handling update to ServiceComponentHost"
-                + ", clusterName=" + request.getClusterName()
-                + ", serviceName=" + s.getName()
-                + ", componentName=" + sc.getName()
-                + ", hostname=" + sch.getHostName()
-                + ", currentState=" + oldSchState
-                + ", newDesiredState=" + newState);
-          }
-          changedScHosts.get(sc.getName()).get(newState).add(sch);
-        }
-      }
-    }
-
-    if (seenNewStates.size() > 1) {
-      // TODO should we handle this scenario
-      throw new IllegalArgumentException("Cannot handle different desired state"
-          + " changes for a set of services at the same time");
-    }
-
-    for (ServiceRequest request : requests) {
-      Cluster cluster = clusters.getCluster(request.getClusterName());
-      Service s = cluster.getService(request.getServiceName());
-      if (request.getConfigVersions() != null) {
-        Map<String, Config> updated = new HashMap<String, Config>();
-
-        for (Entry<String,String> entry : request.getConfigVersions().entrySet()) {
-          Config config = cluster.getDesiredConfig(entry.getKey(), entry.getValue());
-          updated.put(config.getType(), config);
-        }
-
-        if (!updated.isEmpty()) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Updating service configs, attaching configs"
-                + ", clusterName=" + request.getClusterName()
-                + ", serviceName=" + s.getName()
-                + ", configCount=" + updated.size());
-          }
-          s.updateDesiredConfigs(updated);
-          s.persist();
-        }
-
-        for (ServiceComponent sc : s.getServiceComponents().values()) {
-          sc.deleteDesiredConfigs(updated.keySet());
-          for (ServiceComponentHost sch :
-            sc.getServiceComponentHosts().values()) {
-            sch.deleteDesiredConfigs(updated.keySet());
-            sch.persist();
-          }
-          sc.persist();
-        }
-      }
-    }
-
-    Cluster cluster = clusters.getCluster(clusterNames.iterator().next());
-
-    return doStageCreation(cluster, changedServices,
-        changedComps, changedScHosts);
-  }
-
-  @Override
-  public synchronized RequestStatusResponse updateComponents(
-      Set<ServiceComponentRequest> requests) throws AmbariException {
-
-    if (requests.isEmpty()) {
-      LOG.warn("Received an empty requests set");
-      return null;
-    }
-
-    Map<State, List<ServiceComponent>> changedComps =
-        new HashMap<State, List<ServiceComponent>>();
-    Map<String, Map<State, List<ServiceComponentHost>>> changedScHosts =
-        new HashMap<String, Map<State, List<ServiceComponentHost>>>();
-
-    Set<String> clusterNames = new HashSet<String>();
-    Map<String, Map<String, Set<String>>> componentNames =
-        new HashMap<String, Map<String,Set<String>>>();
-    Set<State> seenNewStates = new HashSet<State>();
-
-    for (ServiceComponentRequest request : requests) {
-      if (request.getClusterName() == null
-          || request.getClusterName().isEmpty()
-          || request.getComponentName() == null
-          || request.getComponentName().isEmpty()) {
-        throw new IllegalArgumentException("Invalid arguments, cluster name"
-            + ", service name and component name should be provided to"
-            + " update components");
-      }
-
-      Cluster cluster = clusters.getCluster(request.getClusterName());
-
-      if (request.getServiceName() == null
-          || request.getServiceName().isEmpty()) {
-        StackId stackId = cluster.getDesiredStackVersion();
-        String serviceName =
-            ambariMetaInfo.getComponentToService(stackId.getStackName(),
-                stackId.getStackVersion(), request.getComponentName());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Looking up service name for component"
-              + ", componentName=" + request.getComponentName()
-              + ", serviceName=" + serviceName);
-        }
-
-        if (serviceName == null
-            || serviceName.isEmpty()) {
-          throw new AmbariException("Could not find service for component"
-              + ", componentName=" + request.getComponentName()
-              + ", clusterName=" + cluster.getClusterName()
-              + ", stackInfo=" + stackId.getStackId());
-        }
-        request.setServiceName(serviceName);
-      }
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Received a updateComponent request"
-            + ", clusterName=" + request.getClusterName()
-            + ", serviceName=" + request.getServiceName()
-            + ", componentName=" + request.getComponentName()
-            + ", request=" + request);
-      }
-
-      clusterNames.add(request.getClusterName());
-
-      if (clusterNames.size() > 1) {
-        // FIXME throw correct error
-        throw new IllegalArgumentException("Updates to multiple clusters is not"
-            + " supported");
-      }
-
-      if (!componentNames.containsKey(request.getClusterName())) {
-        componentNames.put(request.getClusterName(),
-            new HashMap<String, Set<String>>());
-      }
-      if (!componentNames.get(request.getClusterName())
-          .containsKey(request.getServiceName())) {
-        componentNames.get(request.getClusterName()).put(
-            request.getServiceName(), new HashSet<String>());
-      }
-      if (componentNames.get(request.getClusterName())
-          .get(request.getServiceName()).contains(request.getComponentName())){
-        // throw error later for dup
-        throw new IllegalArgumentException("Invalid request contains duplicate"
-            + " service components");
-      }
-      componentNames.get(request.getClusterName())
-          .get(request.getServiceName()).add(request.getComponentName());
-
-      Service s = cluster.getService(request.getServiceName());
-      ServiceComponent sc = s.getServiceComponent(
-        request.getComponentName());
-      State oldState = sc.getDesiredState();
-      State newState = null;
-      if (request.getDesiredState() != null) {
-        newState = State.valueOf(request.getDesiredState());
-        if (!newState.isValidDesiredState()) {
-          throw new IllegalArgumentException("Invalid arguments, invalid"
-              + " desired state, desiredState=" + newState.toString());
-        }
-      }
-
-      if (request.getConfigVersions() != null) {
-        safeToUpdateConfigsForServiceComponent(sc, oldState, newState);
-
-        for (Entry<String,String> entry :
-            request.getConfigVersions().entrySet()) {
-          Config config = cluster.getDesiredConfig(
-              entry.getKey(), entry.getValue());
-          if (null == config) {
-            // throw error for invalid config
-            throw new AmbariException("Trying to update servicecomponent with"
-                + " invalid configs"
-                + ", clusterName=" + cluster.getClusterName()
-                + ", clusterId=" + cluster.getClusterId()
-                + ", serviceName=" + s.getName()
-                + ", componentName=" + sc.getName()
-                + ", invalidConfigType=" + entry.getKey()
-                + ", invalidConfigTag=" + entry.getValue());
-          }
-        }
-      }
-
-      if (newState == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Nothing to do for new updateServiceComponent request"
-              + ", clusterName=" + request.getClusterName()
-              + ", serviceName=" + request.getServiceName()
-              + ", componentName=" + request.getComponentName()
-              + ", newDesiredState=null");
-        }
-        continue;
-      }
-
-      if (sc.isClientComponent() &&
-          !newState.isValidClientComponentState()) {
-        throw new AmbariException("Invalid desired state for a client"
-            + " component");
-      }
-
-      seenNewStates.add(newState);
-
-      State oldScState = sc.getDesiredState();
-      if (newState != oldScState) {
-        if (!isValidDesiredStateTransition(oldScState, newState)) {
-          // FIXME throw correct error
-          throw new AmbariException("Invalid transition for"
-              + " servicecomponent"
-              + ", clusterName=" + cluster.getClusterName()
-              + ", clusterId=" + cluster.getClusterId()
-              + ", serviceName=" + sc.getServiceName()
-              + ", componentName=" + sc.getName()
-              + ", currentDesiredState=" + oldScState
-              + ", newDesiredState=" + newState);
-        }
-        if (!changedComps.containsKey(newState)) {
-          changedComps.put(newState, new ArrayList<ServiceComponent>());
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Handling update to ServiceComponent"
-              + ", clusterName=" + request.getClusterName()
-              + ", serviceName=" + s.getName()
-              + ", componentName=" + sc.getName()
-              + ", currentDesiredState=" + oldScState
-              + ", newDesiredState=" + newState);
-        }
-        changedComps.get(newState).add(sc);
-      }
-
-      for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-        State oldSchState = sch.getState();
-        if (newState == oldSchState) {
-          sch.setDesiredState(newState);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Ignoring ServiceComponentHost"
-                + ", clusterName=" + request.getClusterName()
-                + ", serviceName=" + s.getName()
-                + ", componentName=" + sc.getName()
-                + ", hostname=" + sch.getHostName()
-                + ", currentState=" + oldSchState
-                + ", newDesiredState=" + newState);
-          }
-          continue;
-        }
-        if (!isValidStateTransition(oldSchState, newState)) {
-          // FIXME throw correct error
-          throw new AmbariException("Invalid transition for"
-              + " servicecomponenthost"
-              + ", clusterName=" + cluster.getClusterName()
-              + ", clusterId=" + cluster.getClusterId()
-              + ", serviceName=" + sch.getServiceName()
-              + ", componentName=" + sch.getServiceComponentName()
-              + ", hostname=" + sch.getHostName()
-              + ", currentState=" + oldSchState
-              + ", newDesiredState=" + newState);
-        }
-        if (!changedScHosts.containsKey(sc.getName())) {
-          changedScHosts.put(sc.getName(),
-              new HashMap<State, List<ServiceComponentHost>>());
-        }
-        if (!changedScHosts.get(sc.getName()).containsKey(newState)) {
-          changedScHosts.get(sc.getName()).put(newState,
-              new ArrayList<ServiceComponentHost>());
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Handling update to ServiceComponentHost"
-              + ", clusterName=" + request.getClusterName()
-              + ", serviceName=" + s.getName()
-              + ", componentName=" + sc.getName()
-              + ", hostname=" + sch.getHostName()
-              + ", currentState=" + oldSchState
-              + ", newDesiredState=" + newState);
-        }
-        changedScHosts.get(sc.getName()).get(newState).add(sch);
-      }
-    }
-
-    if (seenNewStates.size() > 1) {
-      // FIXME should we handle this scenario
-      throw new IllegalArgumentException("Cannot handle different desired"
-          + " state changes for a set of service components at the same time");
-    }
-
-    // TODO additional validation?
-
-    // TODO if all components reach a common state, should service state be
-    // modified?
-
-    for (ServiceComponentRequest request : requests) {
-      Cluster cluster = clusters.getCluster(request.getClusterName());
-      Service s = cluster.getService(request.getServiceName());
-      ServiceComponent sc = s.getServiceComponent(
-          request.getComponentName());
-      if (request.getConfigVersions() != null) {
-        Map<String, Config> updated = new HashMap<String, Config>();
-
-        for (Entry<String,String> entry :
-          request.getConfigVersions().entrySet()) {
-          Config config = cluster.getDesiredConfig(
-              entry.getKey(), entry.getValue());
-          updated.put(config.getType(), config);
-        }
-
-        if (!updated.isEmpty()) {
-          sc.updateDesiredConfigs(updated);
-          for (ServiceComponentHost sch :
-              sc.getServiceComponentHosts().values()) {
-            sch.deleteDesiredConfigs(updated.keySet());
-            sch.persist();
-          }
-          sc.persist();
-        }
-      }
-    }
-
-    Cluster cluster = clusters.getCluster(clusterNames.iterator().next());
-
-    return doStageCreation(cluster, null,
-        changedComps, changedScHosts);
-  }
-
-  @Override
-  public synchronized void updateHosts(Set<HostRequest> requests)
-      throws AmbariException {
-
-    if (requests.isEmpty()) {
-      LOG.warn("Received an empty requests set");
-      return;
-    }
-
-    for (HostRequest request : requests) {
-      if (request.getHostname() == null
-          || request.getHostname().isEmpty()) {
-        throw new IllegalArgumentException("Invalid arguments, hostname should"
-            + " be provided");
-      }
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Received a updateHost request"
-            + ", hostname=" + request.getHostname()
-            + ", request=" + request);
-      }
-
-      Host h = clusters.getHost(request.getHostname());
-
-      try {
-        //todo: the below method throws an exception when trying to create a duplicate mapping.
-        //todo: this is done to detect duplicates during host create.  Unless it is allowable to
-        //todo: add a host to a cluster by modifying the cluster_name prop, we should not do this mapping here.
-        //todo: Determine if it is allowable to associate a host to a cluster via this mechanism.
-        clusters.mapHostToCluster(request.getHostname(), request.getClusterName());
-      } catch (DuplicateResourceException e) {
-        // do nothing
-      }
-
-      if (null != request.getHostAttributes())
-        h.setHostAttributes(request.getHostAttributes());
-
-      if (null != request.getRackInfo()) {
-        h.setRackInfo(request.getRackInfo());
-      }
-      
-      if (null != request.getPublicHostName()) {
-        h.setPublicHostName(request.getPublicHostName());
-      }
-
-      //todo: if attempt was made to update a property other than those
-      //todo: that are allowed above, should throw exception
-    }
-  }
-
-  @Override
-  public synchronized RequestStatusResponse updateHostComponents(
-      Set<ServiceComponentHostRequest> requests) throws AmbariException {
-
-    if (requests.isEmpty()) {
-      LOG.warn("Received an empty requests set");
-      return null;
-    }
-
-    Map<String, Map<State, List<ServiceComponentHost>>> changedScHosts =
-        new HashMap<String, Map<State, List<ServiceComponentHost>>>();
-
-    Set<String> clusterNames = new HashSet<String>();
-    Map<String, Map<String, Map<String, Set<String>>>> hostComponentNames =
-        new HashMap<String, Map<String, Map<String, Set<String>>>>();
-    Set<State> seenNewStates = new HashSet<State>();
-
-    for (ServiceComponentHostRequest request : requests) {
-      if (request.getClusterName() == null
-          || request.getClusterName().isEmpty()
-          || request.getComponentName() == null
-          || request.getComponentName().isEmpty()
-          || request.getHostname() == null
-          || request.getHostname().isEmpty()) {
-        throw new IllegalArgumentException("Invalid arguments"
-            + ", cluster name, component name and host name should be"
-            + " provided to update host components");
-      }
-
-      Cluster cluster = clusters.getCluster(request.getClusterName());
-
-      if (request.getServiceName() == null
-          || request.getServiceName().isEmpty()) {
-        StackId stackId = cluster.getDesiredStackVersion();
-        String serviceName =
-            ambariMetaInfo.getComponentToService(stackId.getStackName(),
-                stackId.getStackVersion(), request.getComponentName());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Looking up service name for component"
-              + ", componentName=" + request.getComponentName()
-              + ", serviceName=" + serviceName);
-        }
-
-        if (serviceName == null
-            || serviceName.isEmpty()) {
-          throw new AmbariException("Could not find service for component"
-              + ", componentName=" + request.getComponentName()
-              + ", clusterName=" + cluster.getClusterName()
-              + ", stackInfo=" + stackId.getStackId());
-        }
-        request.setServiceName(serviceName);
-      }
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Received a createHostComponent request"
-            + ", clusterName=" + request.getClusterName()
-            + ", serviceName=" + request.getServiceName()
-            + ", componentName=" + request.getComponentName()
-            + ", hostname=" + request.getHostname()
-            + ", request=" + request);
-      }
-
-      clusterNames.add(request.getClusterName());
-
-      if (clusterNames.size() > 1) {
-        throw new IllegalArgumentException("Updates to multiple clusters is not"
-            + " supported");
-      }
-
-      if (!hostComponentNames.containsKey(request.getClusterName())) {
-        hostComponentNames.put(request.getClusterName(),
-            new HashMap<String, Map<String,Set<String>>>());
-      }
-      if (!hostComponentNames.get(request.getClusterName())
-          .containsKey(request.getServiceName())) {
-        hostComponentNames.get(request.getClusterName()).put(
-            request.getServiceName(), new HashMap<String, Set<String>>());
-      }
-      if (!hostComponentNames.get(request.getClusterName())
-          .get(request.getServiceName())
-          .containsKey(request.getComponentName())) {
-        hostComponentNames.get(request.getClusterName())
-            .get(request.getServiceName()).put(request.getComponentName(),
-                new HashSet<String>());
-      }
-      if (hostComponentNames.get(request.getClusterName())
-          .get(request.getServiceName()).get(request.getComponentName())
-          .contains(request.getHostname())) {
-        throw new IllegalArgumentException("Invalid request contains duplicate"
-            + " hostcomponents");
-      }
-      hostComponentNames.get(request.getClusterName())
-          .get(request.getServiceName()).get(request.getComponentName())
-          .add(request.getHostname());
-
-
-      Service s = cluster.getService(request.getServiceName());
-      ServiceComponent sc = s.getServiceComponent(
-        request.getComponentName());
-      ServiceComponentHost sch = sc.getServiceComponentHost(
-        request.getHostname());
-      State oldState = sch.getState();
-      State newState = null;
-      if (request.getDesiredState() != null) {
-        newState = State.valueOf(request.getDesiredState());
-        if (!newState.isValidDesiredState()) {
-          throw new IllegalArgumentException("Invalid arguments, invalid"
-              + " desired state, desiredState=" + newState.toString());
-        }
-      }
-
-      if (request.getConfigVersions() != null) {
-        safeToUpdateConfigsForServiceComponentHost(sch, oldState, newState);
-
-        for (Entry<String,String> entry :
-            request.getConfigVersions().entrySet()) {
-          Config config = cluster.getDesiredConfig(
-              entry.getKey(), entry.getValue());
-          if (null == config) {
-            throw new AmbariException("Trying to update servicecomponenthost"
-                + " with invalid configs"
-                + ", clusterName=" + cluster.getClusterName()
-                + ", clusterId=" + cluster.getClusterId()
-                + ", serviceName=" + s.getName()
-                + ", componentName=" + sc.getName()
-                + ", hostname=" + sch.getHostName()
-                + ", invalidConfigType=" + entry.getKey()
-                + ", invalidConfigTag=" + entry.getValue());
-          }
-        }
-      }
-
-      if (newState == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Nothing to do for new updateServiceComponentHost request"
-              + ", clusterName=" + request.getClusterName()
-              + ", serviceName=" + request.getServiceName()
-              + ", componentName=" + request.getComponentName()
-              + ", hostname=" + request.getHostname()
-              + ", newDesiredState=null");
-        }
-        continue;
-      }
-
-      if (sc.isClientComponent() &&
-          !newState.isValidClientComponentState()) {
-        throw new IllegalArgumentException("Invalid desired state for a client"
-            + " component");
-      }
-
-      seenNewStates.add(newState);
-
-      State oldSchState = sch.getState();
-      if (newState == oldSchState) {
-        sch.setDesiredState(newState);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Ignoring ServiceComponentHost"
-              + ", clusterName=" + request.getClusterName()
-              + ", serviceName=" + s.getName()
-              + ", componentName=" + sc.getName()
-              + ", hostname=" + sch.getHostName()
-              + ", currentState=" + oldSchState
-              + ", newDesiredState=" + newState);
-        }
-        continue;
-      } 
-      
-      if (!isValidStateTransition(oldSchState, newState)) {
-        throw new AmbariException("Invalid transition for"
-            + " servicecomponenthost"
-            + ", clusterName=" + cluster.getClusterName()
-            + ", clusterId=" + cluster.getClusterId()
-            + ", serviceName=" + sch.getServiceName()
-            + ", componentName=" + sch.getServiceComponentName()
-            + ", hostname=" + sch.getHostName()
-            + ", currentState=" + oldSchState
-            + ", newDesiredState=" + newState);
-      }
-      if (!changedScHosts.containsKey(sc.getName())) {
-        changedScHosts.put(sc.getName(),
-            new HashMap<State, List<ServiceComponentHost>>());
-      }
-      if (!changedScHosts.get(sc.getName()).containsKey(newState)) {
-        changedScHosts.get(sc.getName()).put(newState,
-            new ArrayList<ServiceComponentHost>());
-      }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Handling update to ServiceComponentHost"
-            + ", clusterName=" + request.getClusterName()
-            + ", serviceName=" + s.getName()
-            + ", componentName=" + sc.getName()
-            + ", hostname=" + sch.getHostName()
-            + ", currentState=" + oldSchState
-            + ", newDesiredState=" + newState);
-      }
-      changedScHosts.get(sc.getName()).get(newState).add(sch);
-    }
-
-    if (seenNewStates.size() > 1) {
-      // FIXME should we handle this scenario
-      throw new IllegalArgumentException("Cannot handle different desired"
-          + " state changes for a set of service components at the same time");
-    }
-
-
-    // TODO additional validation?
-    for (ServiceComponentHostRequest request : requests) {
-      Cluster cluster = clusters.getCluster(request.getClusterName());
-      Service s = cluster.getService(request.getServiceName());
-      ServiceComponent sc = s.getServiceComponent(
-          request.getComponentName());
-      ServiceComponentHost sch = sc.getServiceComponentHost(
-          request.getHostname());
-      if (request.getConfigVersions() != null) {
-        Map<String, Config> updated = new HashMap<String, Config>();
-
-        for (Entry<String,String> entry : request.getConfigVersions().entrySet()) {
-          Config config = cluster.getDesiredConfig(
-              entry.getKey(), entry.getValue());
-          updated.put(config.getType(), config);
-
-          if (!updated.isEmpty()) {
-            sch.updateDesiredConfigs(updated);
-            sch.persist();
-          }
-        }
-      }
-    }
-
-    Cluster cluster = clusters.getCluster(clusterNames.iterator().next());
-
-    return doStageCreation(cluster, null,
-        null, changedScHosts);
-  }
-
-  @Override
-  public synchronized void updateUsers(Set<UserRequest> requests) throws AmbariException {
-    for (UserRequest request : requests) {
-      User u = users.getAnyUser(request.getUsername());
-      if (null == u)
-        continue;
-
-      if (null != request.getOldPassword() && null != request.getPassword()) {
-        users.modifyPassword(u.getUserName(), request.getOldPassword(),
-            request.getPassword());
-      }
-
-      if (request.getRoles().size() > 0) {
-        for (String role : u.getRoles()) {
-          users.removeRoleFromUser(u, role);
-        }
-
-        for (String role : request.getRoles()) {
-          users.addRoleToUser(u, role);
-        }
-      }
-
-    }
-  }
-
-  @Override
-  public synchronized void deleteCluster(ClusterRequest request)
-      throws AmbariException {
-
-    if (request.getClusterName() == null
-        || request.getClusterName().isEmpty()) {
-      // FIXME throw correct error
-      throw new AmbariException("Invalid arguments");
-    }
-    LOG.info("Received a delete cluster request"
-        + ", clusterName=" + request.getClusterName());
-    if (request.getHostNames() != null) {
-      // FIXME treat this as removing a host from a cluster?
-    } else {
-      // deleting whole cluster
-      clusters.deleteCluster(request.getClusterName());
-    }
-  }
-
-  @Override
-  public RequestStatusResponse deleteServices(Set<ServiceRequest> request)
-      throws AmbariException {
-
-    for (ServiceRequest serviceRequest : request) {
-      if (StringUtils.isEmpty(serviceRequest.getClusterName()) || StringUtils.isEmpty(serviceRequest.getServiceName())) {
-        // FIXME throw correct error
-        throw new AmbariException("invalid arguments");
-      } else {
-        clusters.getCluster(serviceRequest.getClusterName()).deleteService(serviceRequest.getServiceName());
-      }
-    }
-    return null;
-
-  }
-
-  @Override
-  public RequestStatusResponse deleteComponents(
-      Set<ServiceComponentRequest> request) throws AmbariException {
-    throw new AmbariException("Delete components not supported");
-  }
-
-  @Override
-  public void deleteHosts(Set<HostRequest> request)
-      throws AmbariException {
-    throw new AmbariException("Delete hosts not supported");
-  }
-
-  @Override
-  public RequestStatusResponse deleteHostComponents(
-      Set<ServiceComponentHostRequest> request) throws AmbariException {
-    throw new AmbariException("Delete host components not supported");
-  }
-
-  @Override
-  public void deleteUsers(Set<UserRequest> requests)
-    throws AmbariException {
-
-    for (UserRequest r : requests) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Received a delete user request"
-            + ", username=" + r.getUsername());
-      }
-      User u = users.getAnyUser(r.getUsername());
-      if (null != u)
-        users.removeUser(u);
-    }
-  }
-
-  @Override
-  public Set<ActionResponse> getActions(Set<ActionRequest> request)
-      throws AmbariException {
-    Set<ActionResponse> responses = new HashSet<ActionResponse>();
-
-    for (ActionRequest actionRequest : request) {
-      if (actionRequest.getServiceName() == null) {
-        LOG.warn("No service name specified - skipping request");
-        //TODO throw error?
-        continue;
-      }
-      ActionResponse actionResponse = new ActionResponse();
-      actionResponse.setClusterName(actionRequest.getClusterName());
-      actionResponse.setServiceName(actionRequest.getServiceName());
-      if (actionMetadata.getActions(actionRequest.getServiceName()) != null
-          && !actionMetadata.getActions(actionRequest.getServiceName())
-              .isEmpty()) {
-        actionResponse.setActionName(actionMetadata.getActions(
-            actionRequest.getServiceName()).get(0));
-      }
-      responses.add(actionResponse);
-    }
-
-    return responses;
-  }
-
-  public Set<RequestStatusResponse> getRequestsByStatus(RequestsByStatusesRequest request) {
-
-    //TODO implement.  Throw UnsupportedOperationException if it is not supported.
-    return Collections.emptySet();
-  }
-
-  private RequestStatusResponse getRequestStatusResponse(long requestId) {
-    RequestStatusResponse response = new RequestStatusResponse(requestId);
-    List<HostRoleCommand> hostRoleCommands =
-        actionManager.getRequestTasks(requestId);
-    List<ShortTaskStatus> tasks = new ArrayList<ShortTaskStatus>();
-
-    for (HostRoleCommand hostRoleCommand : hostRoleCommands) {
-      tasks.add(new ShortTaskStatus(hostRoleCommand));
-    }
-    response.setTasks(tasks);
-
-    return response;
-  }
-
-  @Override
-  public Set<RequestStatusResponse> getRequestStatus(
-      RequestStatusRequest request) throws AmbariException{
-    Set<RequestStatusResponse> response = new HashSet<RequestStatusResponse>();
-    if (request.getRequestId() == null) {
-      RequestStatus requestStatus = RequestStatus.IN_PROGRESS;
-      if (request.getRequestStatus() != null) {
-        requestStatus = RequestStatus.valueOf(request.getRequestStatus());
-      }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Received a Get Request Status request"
-            + ", requestId=null"
-            + ", requestStatus=" + requestStatus);
-      }
-      List<Long> requestIds = actionManager.getRequestsByStatus(requestStatus);
-      for (Long requestId : requestIds) {
-        response.add(getRequestStatusResponse(requestId.longValue()));
-      }
-    } else {
-      RequestStatusResponse requestStatusResponse = getRequestStatusResponse(
-          request.getRequestId().longValue());
-
-      //todo: correlate request with cluster
-      if (requestStatusResponse.getTasks().size() == 0) {
-        //todo: should be thrown lower in stack but we only want to throw if id was specified
-        //todo: and we currently iterate over all id's and invoke for each if id is not specified
-        throw new ObjectNotFoundException("Request resource doesn't exist.");
-      } else {
-        response.add(requestStatusResponse);
-      }
-    }
-    return response;
-  }
-
-  @Override
-  public Set<TaskStatusResponse> getTaskStatus(Set<TaskStatusRequest> requests)
-      throws AmbariException {
-
-    Collection<Long> requestIds = new ArrayList<Long>();
-    Collection<Long> taskIds = new ArrayList<Long>();
-
-    for (TaskStatusRequest request : requests) {
-      if (request.getTaskId() != null) {
-        taskIds.add(request.getTaskId());
-      } else {
-        requestIds.add(request.getRequestId());
-      }
-    }
-
-    Set<TaskStatusResponse> responses = new HashSet<TaskStatusResponse>();
-    for (HostRoleCommand command : actionManager.getTasksByRequestAndTaskIds(requestIds, taskIds)) {
-      responses.add(new TaskStatusResponse(command));
-    }
-
-    return responses;
-  }
-
-  @Override
-  public Set<ClusterResponse> getClusters(Set<ClusterRequest> requests) throws AmbariException {
-    Set<ClusterResponse> response = new HashSet<ClusterResponse>();
-    for (ClusterRequest request : requests) {
-      try {
-        response.addAll(getClusters(request));
-      } catch (ClusterNotFoundException e) {
-        if (requests.size() == 1) {
-          // only throw exception if 1 request.
-          // there will be > 1 request in case of OR predicate
-          throw e;
-        }
-      }
-    }
-    return response;
-  }
-
-  @Override
-  public Set<ServiceResponse> getServices(Set<ServiceRequest> requests)
-      throws AmbariException {
-    Set<ServiceResponse> response = new HashSet<ServiceResponse>();
-    for (ServiceRequest request : requests) {
-      try {
-        response.addAll(getServices(request));
-      } catch (ServiceNotFoundException e) {
-        if (requests.size() == 1) {
-          // only throw exception if 1 request.
-          // there will be > 1 request in case of OR predicate
-          throw e;
-        }
-      }
-    }
-    return response;
-  }
-
-  @Override
-  public Set<ServiceComponentResponse> getComponents(
-      Set<ServiceComponentRequest> requests) throws AmbariException {
-    Set<ServiceComponentResponse> response =
-        new HashSet<ServiceComponentResponse>();
-    for (ServiceComponentRequest request : requests) {
-      try {
-        response.addAll(getComponents(request));
-      } catch (ServiceComponentNotFoundException e) {
-        if (requests.size() == 1) {
-          // only throw exception if 1 request.
-          // there will be > 1 request in case of OR predicate
-          throw e;
-        }
-      }
-    }
-    return response;
-  }
-
-  @Override
-  public Set<HostResponse> getHosts(Set<HostRequest> requests)
-      throws AmbariException {
-    Set<HostResponse> response = new HashSet<HostResponse>();
-    for (HostRequest request : requests) {
-      try {
-        response.addAll(getHosts(request));
-      } catch (HostNotFoundException e) {
-        if (requests.size() == 1) {
-          // only throw exception if 1 request.
-          // there will be > 1 request in case of OR predicate
-          throw e;
-        }
-      }
-    }
-    return response;
-  }
-
-  @Override
-  public Set<ServiceComponentHostResponse> getHostComponents(
-      Set<ServiceComponentHostRequest> requests) throws AmbariException {
-    Set<ServiceComponentHostResponse> response =
-        new HashSet<ServiceComponentHostResponse>();
-    for (ServiceComponentHostRequest request : requests) {
-      try {
-        response.addAll(getHostComponents(request));
-      } catch (ServiceComponentHostNotFoundException e) {
-        if (requests.size() == 1) {
-          // only throw exception if 1 request.
-          // there will be > 1 request in case of OR predicate
-          throw e;
-        }
-      } catch (ServiceNotFoundException e) {
-        if (requests.size() == 1) {
-          // only throw exception if 1 request.
-          // there will be > 1 request in case of OR predicate
-          // In 'OR' case, a host_component may be included in predicate
-          // that has no corresponding service
-          throw e;
-        }
-      } catch (ServiceComponentNotFoundException e) {
-        if (requests.size() == 1) {
-          // only throw exception if 1 request.
-          // there will be > 1 request in case of OR predicate
-          // In 'OR' case, a host_component may be included in predicate
-          // that has no corresponding component
-          throw e;
-        }
-      } catch (ParentObjectNotFoundException e) {
-        // If there is only one request, always throw exception.
-        // There will be > 1 request in case of OR predicate.
-
-        // For HostNotFoundException, only throw exception if host_name is
-        // provided in URL.  If host_name is part of query, don't throw exception.
-        boolean throwException = true;
-        if (requests.size() > 1 && HostNotFoundException.class.isInstance(e.getCause())) {
-          for (ServiceComponentHostRequest r : requests) {
-            if (r.getHostname() == null) {
-              // host_name provided in query since all requests don't have host_name set
-              throwException = false;
-              break;
-            }
-          }
-        }
-        if (throwException) throw e;
-      }
-    }
-    return response;
-  }
-
-  @Override
-  public Set<ConfigurationResponse> getConfigurations(
-      Set<ConfigurationRequest> requests) throws AmbariException {
-    Set<ConfigurationResponse> response =
-        new HashSet<ConfigurationResponse>();
-    for (ConfigurationRequest request : requests) {
-      response.addAll(getConfigurations(request));
-    }
-    return response;
-  }
-
-  @Override
-  public Set<UserResponse> getUsers(Set<UserRequest> requests)
-      throws AmbariException {
-
-    Set<UserResponse> responses = new HashSet<UserResponse>();
-
-    for (UserRequest r : requests) {
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Received a getUsers request"
-            + ", userRequest=" + r.toString());
-      }
-      // get them all
-      if (null == r.getUsername()) {
-        for (User u : users.getAllUsers()) {
-          UserResponse resp = new UserResponse(u.getUserName(), u.isLdapUser());
-          resp.setRoles(new HashSet<String>(u.getRoles()));
-          responses.add(resp);
-        }
-      } else {
-
-        User u = users.getAnyUser(r.getUsername());
-        if (null == u) {
-          if (requests.size() == 1) {
-            // only throw exceptin if there is a single request
-            // if there are multiple requests, this indicates an OR predicate
-            throw new ObjectNotFoundException("Cannot find user '"
-                + r.getUsername() + "'");
-          }
-        } else {
-          UserResponse resp = new UserResponse(u.getUserName(), u.isLdapUser());
-          resp.setRoles(new HashSet<String>(u.getRoles()));
-          responses.add(resp);
-        }
-      }
-    }
-
-    return responses;
-  }
-
-  @Override
-  public Map<String, String> getHostComponentDesiredConfigMapping(ServiceComponentHostRequest request)
-    throws AmbariException {
-
-    Map<String, String> map = new HashMap<String, String>();
-
-    for (ServiceComponentHostResponse r : getHostComponents(request)) {
-      map.putAll(r.getDesiredConfigs());
-    }
-
-    return map;
-  }
-
-  private String getClientHostForRunningAction(Cluster cluster,
-      Service service) throws AmbariException {
-    StackId stackId = service.getDesiredStackVersion();
-    ComponentInfo compInfo =
-        ambariMetaInfo.getServiceInfo(stackId.getStackName(),
-            stackId.getStackVersion(), service.getName()).getClientComponent();
-    if (compInfo != null) {
-      try {
-        ServiceComponent serviceComponent =
-            service.getServiceComponent(compInfo.getName());
-        if (!serviceComponent.getServiceComponentHosts().isEmpty()) {
-          return serviceComponent.getServiceComponentHosts()
-              .keySet().iterator().next();
-        }
-      } catch (ServiceComponentNotFoundException e) {
-        LOG.warn("Could not find required component to run action"
-            + ", clusterName=" + cluster.getClusterName()
-            + ", serviceName=" + service.getName()
-            + ", componentName=" + compInfo.getName());
-
-
-      }
-    }
-
-    // any component will do
-    Map<String, ServiceComponent> components = service.getServiceComponents();
-    if (components.isEmpty()) {
-      return null;
-    }
-
-    for (ServiceComponent serviceComponent : components.values()) {
-      if (serviceComponent.getServiceComponentHosts().isEmpty()) {
-        continue;
-      }
-      return serviceComponent.getServiceComponentHosts()
-          .keySet().iterator().next();
-    }
-    return null;
-  }
-
-  private void addServiceCheckAction(ActionRequest actionRequest, Stage stage)
-      throws AmbariException {
-    String clusterName = actionRequest.getClusterName();
-    String componentName = actionMetadata.getClient(actionRequest
-        .getServiceName());
-
-    String hostName;
-    if (componentName != null) {
-      Map<String, ServiceComponentHost> components = clusters
-          .getCluster(clusterName).getService(actionRequest.getServiceName())
-          .getServiceComponent(componentName).getServiceComponentHosts();
-
-      if (components.isEmpty()) {
-        throw new AmbariException("Hosts not found, component="
-            + componentName + ", service=" + actionRequest.getServiceName()
-            + ", cluster=" + clusterName);
-      }
-
-      hostName = components.keySet().iterator().next();
-    } else {
-      Map<String, ServiceComponent> components = clusters
-          .getCluster(clusterName).getService(actionRequest.getServiceName())
-          .getServiceComponents();
-
-      if (components.isEmpty()) {
-        throw new AmbariException("Components not found, service="
-            + actionRequest.getServiceName() + ", cluster=" + clusterName);
-      }
-
-      ServiceComponent serviceComponent = components.values().iterator()
-          .next();
-
-      if (serviceComponent.getServiceComponentHosts().isEmpty()) {
-        throw new AmbariException("Hosts not found, component="
-            + serviceComponent.getName() + ", service="
-            + actionRequest.getServiceName() + ", cluster=" + clusterName);
-      }
-
-      hostName = serviceComponent.getServiceComponentHosts().keySet()
-          .iterator().next();
-    }
-
-    stage.addHostRoleExecutionCommand(hostName, Role.valueOf(actionRequest
-        .getActionName()), RoleCommand.EXECUTE,
-        new ServiceComponentHostOpInProgressEvent(componentName, hostName,
-            System.currentTimeMillis()), clusterName, actionRequest
-            .getServiceName());
-
-    stage.getExecutionCommandWrapper(hostName, actionRequest.getActionName()).getExecutionCommand()
-        .setRoleParams(actionRequest.getParameters());
-
-    Map<String, Map<String, String>> configurations = new TreeMap<String, Map<String, String>>();
-    Map<String, Config> allConfigs = clusters.getCluster(clusterName)
-        .getService(actionRequest.getServiceName()).getDesiredConfigs();
-    if (allConfigs != null) {
-      for (Map.Entry<String, Config> entry: allConfigs.entrySet()) {
-        configurations.put(entry.getValue().getType(), entry.getValue().getProperties());
-      }
-    }
-    
-    stage.getExecutionCommandWrapper(hostName,
-        actionRequest.getActionName()).getExecutionCommand()
-        .setConfigurations(configurations); 
-    
-    // Generate cluster host info
-    stage
-        .getExecutionCommandWrapper(hostName, actionRequest.getActionName())
-        .getExecutionCommand()
-        .setClusterHostInfo(
-            StageUtils.getClusterHostInfo(clusters.getCluster(clusterName), hostsMap));
-  }
-
-  private void addDecommissionDatanodeAction(
-      ActionRequest decommissionRequest, Stage stage)
-      throws AmbariException {
-    // Find hdfs admin host, just decommission from namenode.
-    String clusterName = decommissionRequest.getClusterName();
-    String serviceName = decommissionRequest.getServiceName();
-    String namenodeHost = clusters.getCluster(clusterName)
-        .getService(serviceName).getServiceComponent(Role.NAMENODE.toString())
-        .getServiceComponentHosts().keySet().iterator().next();
-
-    String excludeFileTag = null;
-    if (decommissionRequest.getParameters() != null
-        && (decommissionRequest.getParameters().get("excludeFileTag") != null)) {
-      excludeFileTag = decommissionRequest.getParameters()
-          .get("excludeFileTag");
-    }
-
-    if (excludeFileTag == null) {
-      throw new IllegalArgumentException("No exclude file specified"
-          + " when decommissioning datanodes");
-    }
-
-    Config config = clusters.getCluster(clusterName).getDesiredConfig(
-        "hdfs-exclude-file", excludeFileTag);
-
-    Map<String, Map<String, String>> configurations =
-        new TreeMap<String, Map<String, String>>();
-    configurations.put(config.getType(), config.getProperties());
-
-    Map<String, Config> hdfsSiteConfig = clusters.getCluster(clusterName).getService("HDFS")
-        .getDesiredConfigs();
-    if (hdfsSiteConfig != null) {
-      for (Map.Entry<String, Config> entry: hdfsSiteConfig.entrySet()) {
-        configurations
-          .put(entry.getValue().getType(), entry.getValue().getProperties());
-      }
-    }
-    
-    stage.addHostRoleExecutionCommand(
-        namenodeHost,
-        Role.DECOMMISSION_DATANODE,
-        RoleCommand.EXECUTE,
-        new ServiceComponentHostOpInProgressEvent(Role.DECOMMISSION_DATANODE
-            .toString(), namenodeHost, System.currentTimeMillis()),
-        clusterName, serviceName);
-    stage.getExecutionCommandWrapper(namenodeHost,
-        Role.DECOMMISSION_DATANODE.toString()).getExecutionCommand()
-        .setConfigurations(configurations);
-    
-  }
-
-  @Override
-  public RequestStatusResponse createActions(Set<ActionRequest> request)
-      throws AmbariException {
-    String clusterName = null;
-
-    String logDir = ""; //TODO empty for now
-
-    for (ActionRequest actionRequest : request) {
-      if (actionRequest.getClusterName() == null
-          || actionRequest.getClusterName().isEmpty()
-          || actionRequest.getServiceName() == null
-          || actionRequest.getServiceName().isEmpty()
-          || actionRequest.getActionName() == null
-          || actionRequest.getActionName().isEmpty()) {
-        throw new AmbariException("Invalid action request : " + "cluster="
-            + actionRequest.getClusterName() + ", service="
-            + actionRequest.getServiceName() + ", action="
-            + actionRequest.getActionName());
-      } else if (clusterName == null) {
-        clusterName = actionRequest.getClusterName();
-      } else if (!clusterName.equals(actionRequest.getClusterName())) {
-        throw new AmbariException("Requests for different clusters found");
-      }
-    }
-
-    Stage stage = stageFactory.createNew(actionManager.getNextRequestId(),
-        logDir, clusterName);
-    stage.setStageId(0);
-    for (ActionRequest actionRequest : request) {
-      if (actionRequest.getActionName().contains("SERVICE_CHECK")) {
-        addServiceCheckAction(actionRequest, stage);
-      } else if (actionRequest.getActionName().equals("DECOMMISSION_DATANODE")) {
-        addDecommissionDatanodeAction(actionRequest, stage);
-      } else {
-        throw new AmbariException("Unsupported action");
-      }
-    }
-    RoleGraph rg = new RoleGraph(rco);
-    rg.build(stage);
-    List<Stage> stages = rg.getStages();
-    if (stages != null && !stages.isEmpty()) {
-      actionManager.sendActions(stages);
-      return getRequestStatusResponse(stage.getRequestId());
-    } else {
-      throw new AmbariException("Stage was not created");
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
deleted file mode 100644
index 563e1bd..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
+++ /dev/null
@@ -1,403 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-
-import java.io.File;
-import java.net.BindException;
-import java.util.Map;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.agent.HeartBeatHandler;
-import org.apache.ambari.server.agent.rest.AgentResource;
-import org.apache.ambari.server.api.rest.BootStrapResource;
-import org.apache.ambari.server.api.services.*;
-import org.apache.ambari.server.bootstrap.BootStrapImpl;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.PersistenceType;
-import org.apache.ambari.server.resources.ResourceManager;
-import org.apache.ambari.server.resources.api.rest.GetResource;
-import org.apache.ambari.server.security.CertificateManager;
-import org.apache.ambari.server.security.authorization.AmbariLdapAuthenticationProvider;
-import org.apache.ambari.server.security.authorization.AmbariLocalUserDetailsService;
-import org.apache.ambari.server.security.authorization.Users;
-import org.apache.ambari.server.security.unsecured.rest.CertificateDownload;
-import org.apache.ambari.server.security.unsecured.rest.CertificateSign;
-import org.apache.ambari.server.state.Clusters;
-import org.eclipse.jetty.server.Connector;
-import org.eclipse.jetty.server.Server;
-import org.eclipse.jetty.server.nio.SelectChannelConnector;
-import org.eclipse.jetty.server.ssl.SslSelectChannelConnector;
-import org.eclipse.jetty.servlet.DefaultServlet;
-import org.eclipse.jetty.servlet.FilterHolder;
-import org.eclipse.jetty.servlet.ServletContextHandler;
-import org.eclipse.jetty.servlet.ServletHolder;
-import org.eclipse.jetty.util.ssl.SslContextFactory;
-import org.eclipse.jetty.util.thread.QueuedThreadPool;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
-import org.springframework.context.support.ClassPathXmlApplicationContext;
-import org.springframework.security.crypto.password.PasswordEncoder;
-import org.springframework.web.context.WebApplicationContext;
-import org.springframework.web.context.support.GenericWebApplicationContext;
-import org.springframework.web.filter.DelegatingFilterProxy;
-
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.Singleton;
-import com.google.inject.persist.Transactional;
-import com.sun.jersey.spi.container.servlet.ServletContainer;
-
-@Singleton
-public class AmbariServer {
-  private static Logger LOG = LoggerFactory.getLogger(AmbariServer.class);
-  public static final int AGENT_ONE_WAY_AUTH = 8440;
-  public static final int AGENT_TWO_WAY_AUTH = 8441;
-  public static final int CLIENT_SSL_API_PORT = 8443;
-
-  private Server server = null;
-  private Server serverForAgent = null;
-
-  public volatile boolean running = true; // true while controller runs
-
-  final String CONTEXT_PATH = "/";
-  final String SPRING_CONTEXT_LOCATION =
-      "classpath:/webapp/WEB-INF/spring-security.xml";
-
-  @Inject
-  Configuration configs;
-  @Inject
-  CertificateManager certMan;
-  @Inject
-  Injector injector;
-  @Inject
-  AmbariMetaInfo ambariMetaInfo;
-
-  public String getServerOsType() {
-    return configs.getServerOsType();
-  }
-
-
-  private static AmbariManagementController clusterController = null;
-
-  public static AmbariManagementController getController() {
-    return clusterController;
-  }
-
-  public void run() throws Exception {
-    performStaticInjection();
-    addInMemoryUsers();
-    server = new Server();
-    serverForAgent = new Server();
-
-    try {
-      ClassPathXmlApplicationContext parentSpringAppContext =
-          new ClassPathXmlApplicationContext();
-      parentSpringAppContext.refresh();
-      ConfigurableListableBeanFactory factory = parentSpringAppContext.
-          getBeanFactory();
-      factory.registerSingleton("guiceInjector",
-          injector);
-      factory.registerSingleton("passwordEncoder",
-          injector.getInstance(PasswordEncoder.class));
-      factory.registerSingleton("ambariLocalUserService",
-          injector.getInstance(AmbariLocalUserDetailsService.class));
-      factory.registerSingleton("ambariLdapAuthenticationProvider",
-          injector.getInstance(AmbariLdapAuthenticationProvider.class));
-      //Spring Security xml config depends on this Bean
-
-      String[] contextLocations = {SPRING_CONTEXT_LOCATION};
-      ClassPathXmlApplicationContext springAppContext = new
-          ClassPathXmlApplicationContext(contextLocations, parentSpringAppContext);
-      //setting ambari web context
-
-      ServletContextHandler root = new ServletContextHandler(server, CONTEXT_PATH,
-          ServletContextHandler.SECURITY | ServletContextHandler.SESSIONS);
-
-      //Changing session cookie name to avoid conflicts
-      root.getSessionHandler().getSessionManager().setSessionCookie("AMBARISESSIONID");
-
-      GenericWebApplicationContext springWebAppContext = new GenericWebApplicationContext();
-      springWebAppContext.setServletContext(root.getServletContext());
-      springWebAppContext.setParent(springAppContext);
-      /* Configure web app context */
-      root.setResourceBase(configs.getWebAppDir());
-
-      root.getServletContext().setAttribute(
-          WebApplicationContext.ROOT_WEB_APPLICATION_CONTEXT_ATTRIBUTE,
-          springWebAppContext);
-
-      certMan.initRootCert();
-
-      ServletContextHandler agentroot = new ServletContextHandler(serverForAgent,
-          "/", ServletContextHandler.SESSIONS );
-
-      ServletHolder rootServlet = root.addServlet(DefaultServlet.class, "/");
-      rootServlet.setInitOrder(1);
-
-      /* Configure default servlet for agent server */
-      rootServlet = agentroot.addServlet(DefaultServlet.class, "/");
-      rootServlet.setInitOrder(1);
-
-      //Spring Security Filter initialization
-      DelegatingFilterProxy springSecurityFilter = new DelegatingFilterProxy();
-      springSecurityFilter.setTargetBeanName("springSecurityFilterChain");
-
-      if (configs.getApiAuthentication()) {
-        root.addFilter(new FilterHolder(springSecurityFilter), "/api/*", 1);
-      }
-
-
-      //Secured connector for 2-way auth
-      SslSelectChannelConnector sslConnectorTwoWay = new  
-          SslSelectChannelConnector();
-      sslConnectorTwoWay.setPort(AGENT_TWO_WAY_AUTH);
-
-      Map<String, String> configsMap = configs.getConfigsMap();
-      String keystore = configsMap.get(Configuration.SRVR_KSTR_DIR_KEY) +
-          File.separator + configsMap.get(Configuration.KSTR_NAME_KEY);
-      String srvrCrtPass = configsMap.get(Configuration.SRVR_CRT_PASS_KEY);
-      sslConnectorTwoWay.setKeystore(keystore);
-      sslConnectorTwoWay.setTruststore(keystore);
-      sslConnectorTwoWay.setPassword(srvrCrtPass);
-      sslConnectorTwoWay.setKeyPassword(srvrCrtPass);
-      sslConnectorTwoWay.setTrustPassword(srvrCrtPass);
-      sslConnectorTwoWay.setKeystoreType("PKCS12");
-      sslConnectorTwoWay.setTruststoreType("PKCS12");
-      sslConnectorTwoWay.setNeedClientAuth(true);
-
-      //Secured connector for 1-way auth
-      //SslSelectChannelConnector sslConnectorOneWay = new SslSelectChannelConnector();
-      SslContextFactory contextFactory = new SslContextFactory(true);
-      //sslConnectorOneWay.setPort(AGENT_ONE_WAY_AUTH);
-      contextFactory.setKeyStorePath(keystore);
-      // sslConnectorOneWay.setKeystore(keystore);
-      contextFactory.setTrustStore(keystore);
-      // sslConnectorOneWay.setTruststore(keystore);
-      contextFactory.setKeyStorePassword(srvrCrtPass);
-      // sslConnectorOneWay.setPassword(srvrCrtPass);
-
-      contextFactory.setKeyManagerPassword(srvrCrtPass);
-
-      // sslConnectorOneWay.setKeyPassword(srvrCrtPass);
-
-      contextFactory.setTrustStorePassword(srvrCrtPass);
-      //sslConnectorOneWay.setTrustPassword(srvrCrtPass);
-
-      contextFactory.setKeyStoreType("PKCS12");
-      //sslConnectorOneWay.setKeystoreType("PKCS12");
-      contextFactory.setTrustStoreType("PKCS12");
-
-      //sslConnectorOneWay.setTruststoreType("PKCS12");
-      contextFactory.setNeedClientAuth(false);
-      // sslConnectorOneWay.setWantClientAuth(false);
-      // sslConnectorOneWay.setNeedClientAuth(false);
-      SslSelectChannelConnector sslConnectorOneWay = new SslSelectChannelConnector(contextFactory);
-      sslConnectorOneWay.setPort(AGENT_ONE_WAY_AUTH);
-      sslConnectorOneWay.setAcceptors(2);
-      sslConnectorTwoWay.setAcceptors(2);
-      serverForAgent.setConnectors(new Connector[]{ sslConnectorOneWay, sslConnectorTwoWay});
-
-      ServletHolder sh = new ServletHolder(ServletContainer.class);
-      sh.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
-          "com.sun.jersey.api.core.PackagesResourceConfig");
-      sh.setInitParameter("com.sun.jersey.config.property.packages",
-          "org.apache.ambari.server.api.rest;" +
-              "org.apache.ambari.server.api.services;" +
-          "org.apache.ambari.eventdb.webservice");
-      root.addServlet(sh, "/api/v1/*");
-      sh.setInitOrder(2);
-
-      ServletHolder agent = new ServletHolder(ServletContainer.class);
-      agent.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
-          "com.sun.jersey.api.core.PackagesResourceConfig");
-      agent.setInitParameter("com.sun.jersey.config.property.packages",
-          "org.apache.ambari.server.agent.rest");
-      agent.setInitParameter("com.sun.jersey.api.json.POJOMappingFeature",
-          "true");
-      agentroot.addServlet(agent, "/agent/v1/*");
-      agent.setInitOrder(3);
-
-      ServletHolder cert = new ServletHolder(ServletContainer.class);
-      cert.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
-          "com.sun.jersey.api.core.PackagesResourceConfig");
-      cert.setInitParameter("com.sun.jersey.config.property.packages",
-          "org.apache.ambari.server.security.unsecured.rest");
-      agentroot.addServlet(cert, "/*");
-      cert.setInitOrder(4);
-
-      ServletHolder resources = new ServletHolder(ServletContainer.class);
-      resources.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
-          "com.sun.jersey.api.core.PackagesResourceConfig");
-      resources.setInitParameter("com.sun.jersey.config.property.packages",
-          "org.apache.ambari.server.resources.api.rest");
-      root.addServlet(resources, "/resources/*");
-      resources.setInitOrder(6);
-
-      //Set jetty thread pool
-      serverForAgent.setThreadPool(new QueuedThreadPool(25));
-      server.setThreadPool(new QueuedThreadPool(25));
-
-      /* Configure the API server to use the NIO connectors */
-      SelectChannelConnector apiConnector;
-
-      if (configs.getApiSSLAuthentication()) {
-        SslSelectChannelConnector sapiConnector = new SslSelectChannelConnector();
-        sapiConnector.setPort(CLIENT_SSL_API_PORT);
-        sapiConnector.setKeystore(keystore);
-        sapiConnector.setTruststore(keystore);
-        sapiConnector.setPassword(srvrCrtPass);
-        sapiConnector.setKeyPassword(srvrCrtPass);
-        sapiConnector.setTrustPassword(srvrCrtPass);
-        sapiConnector.setKeystoreType("PKCS12");
-        sapiConnector.setTruststoreType("PKCS12");
-        apiConnector = sapiConnector;
-      } 
-      else  {
-        apiConnector = new SelectChannelConnector();
-        apiConnector.setPort(configs.getClientApiPort());
-      }
-
-      server.addConnector(apiConnector);
-
-      server.setStopAtShutdown(true);
-      serverForAgent.setStopAtShutdown(true);
-      springAppContext.start();
-
-      LOG.info("********* Initializing Meta Info **********");
-      ambariMetaInfo.init();
-
-      String osType = getServerOsType();
-      if (osType == null || osType.isEmpty()) {
-        throw new RuntimeException(Configuration.OS_VERSION_KEY + " is not "
-            + " set in the ambari.properties file");
-      }
-
-      //Start action scheduler
-      LOG.info("********* Initializing Clusters **********");
-      Clusters clusters = injector.getInstance(Clusters.class);
-      StringBuilder clusterDump = new StringBuilder();
-      clusters.debugDump(clusterDump);
-      LOG.info("********* Current Clusters State *********");
-      LOG.info(clusterDump.toString());
-
-      LOG.info("********* Initializing ActionManager **********");
-      ActionManager manager = injector.getInstance(ActionManager.class);
-      LOG.info("********* Initializing Controller **********");
-      AmbariManagementController controller = injector.getInstance(
-          AmbariManagementController.class);
-
-      clusterController = controller;
-
-      // FIXME need to figure out correct order of starting things to
-      // handle restart-recovery correctly
-
-      /*
-       * Start the server after controller state is recovered.
-       */
-      server.start();
-
-      serverForAgent.start();
-      LOG.info("********* Started Server **********");
-
-      manager.start();
-      LOG.info("********* Started ActionManager **********");
-
-      //TODO: Remove this code when APIs are ready for testing.
-      //      RequestInjectorForTest testInjector = new RequestInjectorForTest(controller, clusters);
-      //      Thread testInjectorThread = new Thread(testInjector);
-      //      testInjectorThread.start();
-
-      server.join();
-      LOG.info("Joined the Server");
-    } catch(BindException bindException) {
-      LOG.error("Could not bind to server port - instance may already be running. " +
-          "Terminating this instance.", bindException);
-      throw bindException;
-    }
-  }
-
-  /**
-   * Creates default users and roles if in-memory database is used
-   */
-  @Transactional
-  protected void addInMemoryUsers() {
-    if (configs.getPersistenceType() == PersistenceType.IN_MEMORY) {
-      LOG.info("In-memory database is used - creating default users");
-      Users users = injector.getInstance(Users.class);
-
-      users.createDefaultRoles();
-      users.createUser("admin", "admin");
-      users.createUser("user", "user");
-      try {
-        users.promoteToAdmin(users.getLocalUser("admin"));
-      } catch (AmbariException e) {
-        throw new RuntimeException(e);
-      }
-    }
-  }
-
-  public void stop() throws Exception {
-    try {
-      server.stop();
-    } catch (Exception e) {
-      LOG.error("Error stopping the server", e);
-    }
-  }
-
-  /**
-   * Static injection replacement to wait Persistence Service start
-   */
-  public void performStaticInjection() {
-    AgentResource.init(injector.getInstance(HeartBeatHandler.class));
-    CertificateDownload.init(injector.getInstance(CertificateManager.class));
-    CertificateSign.init(injector.getInstance(CertificateManager.class));
-    GetResource.init(injector.getInstance(ResourceManager.class));
-    PersistKeyValueService.init(injector.getInstance(PersistKeyValueImpl.class));
-    KeyService.init(injector.getInstance(PersistKeyValueImpl.class));
-    AmbariMetaService.init(injector.getInstance(AmbariMetaInfo.class));
-    BootStrapResource.init(injector.getInstance(BootStrapImpl.class));
-  }
-
-  public static void main(String[] args) throws Exception {
-
-    Injector injector = Guice.createInjector(new ControllerModule());
-    AmbariServer server = null;
-    try {
-      LOG.info("Getting the controller");
-      injector.getInstance(GuiceJpaInitializer.class);
-      server = injector.getInstance(AmbariServer.class);
-      CertificateManager certMan = injector.getInstance(CertificateManager.class);
-      certMan.initRootCert();
-      if (server != null) {
-        server.run();
-      }
-    } catch (Throwable t) {
-      LOG.error("Failed to run the Ambari Server", t);
-      if (server != null) {
-        server.stop();
-      }
-      System.exit(-1);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ClusterRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ClusterRequest.java
deleted file mode 100644
index 2a313a8..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ClusterRequest.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import java.util.Set;
-
-/**
- * Used for create Cluster
- */
-public class ClusterRequest {
-
-  private Long clusterId; // for GET
-
-  private String clusterName; // for GET/CREATE/UPDATE
-
-  private String stackVersion; // for CREATE/UPDATE
-
-  Set<String> hostNames; // CREATE/UPDATE
-
-  public ClusterRequest(Long clusterId, String clusterName,
-      String stackVersion, Set<String> hostNames) {
-    super();
-    this.clusterId = clusterId;
-    this.clusterName = clusterName;
-    this.stackVersion = stackVersion;
-    this.hostNames = hostNames;
-  }
-
-  /**
-   * @return the clusterId
-   */
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  /**
-   * @return the clusterName
-   */
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  /**
-   * @return the stackVersion
-   */
-  public String getStackVersion() {
-    return stackVersion;
-  }
-
-  /**
-   * @param clusterId the clusterId to set
-   */
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  /**
-   * @param clusterName the clusterName to set
-   */
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-
-  /**
-   * @param stackVersion the stackVersion to set
-   */
-  public void setStackVersion(String stackVersion) {
-    this.stackVersion = stackVersion;
-  }
-
-  public Set<String> getHostNames() {
-    return hostNames;
-  }
-
-  public void setHostNames(Set<String> hostNames) {
-    this.hostNames = hostNames;
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("{"
-        + " clusterName=" + clusterName
-        + ", clusterId=" + clusterId
-        + ", hosts=[");
-    if (hostNames != null) {
-      int i = 0;
-      for (String hostName : hostNames) {
-        if (i != 0) {
-          sb.append(",");
-        }
-        ++i;
-        sb.append(hostName);
-      }
-    }
-    sb.append("] }");
-    return sb.toString();
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ClusterResponse.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ClusterResponse.java
deleted file mode 100644
index 9c27fa9..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ClusterResponse.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import java.util.Set;
-
-public class ClusterResponse {
-
-  private final Long clusterId;
-
-  private final String clusterName;
-
-  private final Set<String> hostNames;
-
-  private final String desiredStackVersion;
-
-  public ClusterResponse(Long clusterId, String clusterName,
-      Set<String> hostNames, String desiredStackVersion) {
-    super();
-    this.clusterId = clusterId;
-    this.clusterName = clusterName;
-    this.hostNames = hostNames;
-    this.desiredStackVersion = desiredStackVersion;
-  }
-
-  /**
-   * @return the clusterId
-   */
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  /**
-   * @return the clusterName
-   */
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  /**
-   * @return the host names
-   */
-  public Set<String> getHostNames() {
-    return hostNames;
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("{"
-        + " clusterName=" + clusterName
-        + ", clusterId=" + clusterId
-        + ", desiredStackVersion=" + desiredStackVersion
-        + ", hosts=[");
-    if (hostNames != null) {
-      int i = 0;
-      for (String hostName : hostNames) {
-        if (i != 0) {
-          sb.append(",");
-        }
-        ++i;
-        sb.append(hostName);
-      }
-    }
-    sb.append("] }");
-    return sb.toString();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ClusterResponse that = (ClusterResponse) o;
-
-    if (clusterId != null ?
-        !clusterId.equals(that.clusterId) : that.clusterId != null) {
-      return false;
-    }
-    if (clusterName != null ?
-        !clusterName.equals(that.clusterName) : that.clusterName != null) {
-      return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId != null ? clusterId.intValue() : 0;
-    result = 71 * result + (clusterName != null ? clusterName.hashCode() : 0);
-    return result;
-  }
-
-  /**
-   * @return the desiredStackVersion
-   */
-  public String getDesiredStackVersion() {
-    return desiredStackVersion;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationRequest.java
deleted file mode 100644
index 3ac3f9f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationRequest.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller;
-
-import java.util.Map;
-
-/**
- * This class encapsulates a configuration update request.
- * The configuration properties are grouped at service level. It is assumed that
- * different components of a service don't overload same property name.
- */
-public class ConfigurationRequest {
-
-  private String clusterName;
-
-  private String type;
-
-  private String tag;
-
-  private Map<String, String> configs;
-
-  public ConfigurationRequest(String clusterName,
-                              String type,
-                              String tag,
-                              Map<String, String> configs) {
-    super();
-    this.clusterName = clusterName;
-    this.configs = configs;
-    this.type = type;
-    this.tag = tag;
-    this.configs = configs;
-  }
-
-  /**
-   * @return the type
-   */
-  public String getType() {
-    return type;
-  }
-
-  /**
-   * @param type the type to set
-   */
-  public void setType(String type) {
-    this.type = type;
-  }
-
-  /**
-   * @return the versionTag
-   */
-  public String getVersionTag() {
-    return tag;
-  }
-
-  /**
-   * @param versionTag the versionTag to set
-   */
-  public void setVersionTag(String versionTag) {
-    this.tag = versionTag;
-  }
-
-  /**
-   * @return the configs
-   */
-  public Map<String, String> getConfigs() {
-    return configs;
-  }
-
-  /**
-   * @param configs the configs to set
-   */
-  public void setConfigs(Map<String, String> configs) {
-    this.configs = configs;
-  }
-
-  /**
-   * @return the clusterName
-   */
-  public String getClusterName() {
-    return clusterName;
-  }
-
-
-  /**
-   * @param clusterName the clusterName to set
-   */
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationResponse.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationResponse.java
deleted file mode 100644
index 4edc9f3..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationResponse.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller;
-
-import java.util.Map;
-
-/**
- * This class encapsulates a configuration update request.
- * The configuration properties are grouped at service level. It is assumed that
- * different components of a service don't overload same property name.
- */
-public class ConfigurationResponse {
-
-  private final String clusterName;
-
-  private final String type;
-
-  private String versionTag;
-
-  private Map<String, String> configs;
-
-
-  public ConfigurationResponse(String clusterName,
-                               String type, String versionTag,
-                               Map<String, String> configs) {
-    super();
-    this.clusterName = clusterName;
-    this.configs = configs;
-    this.type = type;
-    this.versionTag = versionTag;
-    this.configs = configs;
-  }
-
-
-  /**
-   * @return the versionTag
-   */
-  public String getVersionTag() {
-    return versionTag;
-  }
-
-  /**
-   * @param versionTag the versionTag to set
-   */
-  public void setVersionTag(String versionTag) {
-    this.versionTag = versionTag;
-  }
-
-  /**
-   * @return the configs
-   */
-  public Map<String, String> getConfigs() {
-    return configs;
-  }
-
-  /**
-   * @param configs the configs to set
-   */
-  public void setConfigs(Map<String, String> configs) {
-    this.configs = configs;
-  }
-
-  /**
-   * @return the type
-   */
-  public String getType() {
-    return type;
-  }
-
-  /**
-   * @return the clusterName
-   */
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ConfigurationResponse that =
-        (ConfigurationResponse) o;
-
-    if (clusterName != null ?
-        !clusterName.equals(that.clusterName) : that.clusterName != null) {
-      return false;
-    }
-    if (type != null ?
-        !type.equals(that.type) : that.type != null) {
-      return false;
-    }
-    if (versionTag != null ?
-        !versionTag.equals(that.versionTag) : that.versionTag != null){
-      return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterName != null ? clusterName.hashCode() : 0;
-    result = 71 * result + (type != null ? type.hashCode() : 0);
-    result = 71 * result + (versionTag != null ? versionTag.hashCode():0);
-    return result;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
deleted file mode 100644
index 3529da7..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import com.google.gson.Gson;
-import com.google.inject.Scopes;
-import com.google.inject.assistedinject.FactoryModuleBuilder;
-import com.google.inject.persist.jpa.JpaPersistModule;
-import org.apache.ambari.server.actionmanager.*;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.orm.PersistenceType;
-import org.apache.ambari.server.state.*;
-import org.apache.ambari.server.state.cluster.ClusterFactory;
-import org.apache.ambari.server.state.cluster.ClusterImpl;
-import org.apache.ambari.server.state.cluster.ClustersImpl;
-import org.apache.ambari.server.state.host.HostFactory;
-import org.apache.ambari.server.state.host.HostImpl;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.name.Names;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostImpl;
-import org.springframework.security.crypto.password.PasswordEncoder;
-import org.springframework.security.crypto.password.StandardPasswordEncoder;
-
-import java.util.Properties;
-
-/**
- * Used for injection purposes.
- */
-public class ControllerModule extends AbstractModule {
-
-  private final Configuration configuration;
-  private final AmbariMetaInfo ambariMetaInfo;
-  private final HostsMap hostsMap;
-
-  public ControllerModule() throws Exception {
-    configuration = new Configuration();
-    ambariMetaInfo = new AmbariMetaInfo(configuration);
-    hostsMap = new HostsMap(configuration);
-  }
-
-  public ControllerModule(Properties properties) throws Exception {
-    configuration = new Configuration(properties);
-    ambariMetaInfo = new AmbariMetaInfo(configuration);
-    hostsMap = new HostsMap(configuration);
-  }
-
-  @Override
-  protected void configure() {
-    installFactories();
-
-    bind(Configuration.class).toInstance(configuration);
-    bind(AmbariMetaInfo.class).toInstance(ambariMetaInfo);
-    bind(HostsMap.class).toInstance(hostsMap);
-    bind(PasswordEncoder.class).toInstance(new StandardPasswordEncoder());
-
-    JpaPersistModule jpaPersistModule = new JpaPersistModule(configuration.getPersistenceType().getUnitName());
-    if (configuration.getPersistenceType() == PersistenceType.POSTGRES) {
-      Properties properties = new Properties();
-      properties.setProperty("javax.persistence.jdbc.user", configuration.getDatabaseUser());
-      properties.setProperty("javax.persistence.jdbc.password", configuration.getDatabasePassword());
-      jpaPersistModule.properties(properties);
-    }
-
-    install(jpaPersistModule);
-
-
-    bind(Gson.class).in(Scopes.SINGLETON);
-    bind(Clusters.class).to(ClustersImpl.class);
-    bind(ActionDBAccessor.class).to(ActionDBAccessorImpl.class);
-    bindConstant().annotatedWith(Names.named("schedulerSleeptime")).to(10000L);
-    bindConstant().annotatedWith(Names.named("actionTimeout")).to(300000L);
-    bind(AmbariManagementController.class)
-        .to(AmbariManagementControllerImpl.class);
-  }
-
-  private void installFactories() {
-    install(new FactoryModuleBuilder().implement(
-        Cluster.class, ClusterImpl.class).build(ClusterFactory.class));
-    install(new FactoryModuleBuilder().implement(
-        Host.class, HostImpl.class).build(HostFactory.class));
-    install(new FactoryModuleBuilder().implement(
-        Service.class, ServiceImpl.class).build(ServiceFactory.class));
-    install(new FactoryModuleBuilder().implement(
-        ServiceComponent.class, ServiceComponentImpl.class).build(
-        ServiceComponentFactory.class));
-    install(new FactoryModuleBuilder().implement(
-        ServiceComponentHost.class, ServiceComponentHostImpl.class).build(
-        ServiceComponentHostFactory.class));
-    install(new FactoryModuleBuilder().implement(
-        Config.class, ConfigImpl.class).build(ConfigFactory.class));
-    install(new FactoryModuleBuilder().build(StageFactory.class));
-    install(new FactoryModuleBuilder().build(HostRoleCommandFactory.class));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/HostRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/HostRequest.java
deleted file mode 100644
index 2fd5a83..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/HostRequest.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-public class HostRequest {
-
-  private String hostname;
-  private String publicHostname;
-  private String clusterName; // CREATE/UPDATE
-  private Map<String, String> hostAttributes; // CREATE/UPDATE
-  private String rackInfo;
-
-  public HostRequest(String hostname, String clusterName, Map<String, String> hostAttributes) {
-    this.hostname = hostname;
-    this.clusterName = clusterName;
-    this.hostAttributes = hostAttributes;
-  }
-
-  public String getHostname() {
-    return hostname;
-  }
-
-  public void setHostname(String hostname) {
-    this.hostname = hostname;
-  }
-
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-
-  public Map<String, String> getHostAttributes() {
-    return hostAttributes;
-  }
-
-  public void setHostAttributes(Map<String, String> hostAttributes) {
-    this.hostAttributes = hostAttributes;
-  }
-  
-  public String getRackInfo() {
-    return rackInfo;
-  }
-  
-  public void setRackInfo(String info) {
-    rackInfo = info;
-  }
-  
-  public String getPublicHostName() {
-    return publicHostname;
-  }
-  
-  public void setPublicHostName(String name) {
-    publicHostname = name;
-  }
-
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("{ hostname=").append(hostname).append(", clusterName=").append(clusterName);
-    if (hostAttributes != null) {
-      sb.append(", hostAttributes=[");
-      int i = 0;
-      for (Entry<String, String> attr : hostAttributes.entrySet()) {
-        if (i != 0) {
-          sb.append(",");
-        }
-        ++i;
-        sb.append(attr.getKey() + "=" + attr.getValue());
-      }
-      sb.append(']');
-    }
-    sb.append(" }");
-    return sb.toString();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/HostResponse.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/HostResponse.java
deleted file mode 100644
index 9bbf363..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/HostResponse.java
+++ /dev/null
@@ -1,459 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.ambari.server.agent.AgentEnv;
-import org.apache.ambari.server.agent.DiskInfo;
-import org.apache.ambari.server.state.AgentVersion;
-import org.apache.ambari.server.state.HostHealthStatus;
-
-public class HostResponse {
-
-  private String hostname;
-
-  private String clusterName;
-
-  /**
-   * Host IP if ipv4 interface available
-   */
-  private String ipv4;
-
-  /**
-   * Host IP if ipv6 interface available
-   */
-  private String ipv6;
-
-  /**
-   * Count of cores on Host
-   */
-  private int cpuCount;
-
-  /**
-   * Os Architecture
-   */
-  private String osArch;
-
-  /**
-   * OS Type
-   */
-  private String osType;
-
-  /**
-   * OS Information
-   */
-  private String osInfo;
-
-  /**
-   * Amount of available memory for the Host
-   */
-  private long availableMemBytes;
-
-  /**
-   * Amount of physical memory for the Host
-   */
-  private long totalMemBytes;
-
-  /**
-   * Disks mounted on the Host
-   */
-  private List<DiskInfo> disksInfo;
-
-  /**
-   * Last heartbeat timestamp from the Host
-   */
-  private long lastHeartbeatTime;
-  
-  /**
-   * Last environment information
-   */
-  private AgentEnv lastAgentEnv;
-
-  /**
-   * Last registration timestamp for the Host
-   */
-  private long lastRegistrationTime;
-
-  /**
-   * Rack to which the Host belongs to
-   */
-  private String rackInfo;
-
-  /**
-   * Additional Host attributes
-   */
-  private Map<String, String> hostAttributes;
-
-  /**
-   * Version of agent running on the Host
-   */
-  private AgentVersion agentVersion;
-
-  /**
-   * Host Health Status
-   */
-  private HostHealthStatus healthStatus;
-  
-  /**
-   * Public name.
-   */
-  private String publicHostname = null;
-
-  /**
-   * Host State
-   */
-  private String hostState;
-
-  public HostResponse(String hostname, String clusterName,
-                      String ipv4, String ipv6, int cpuCount, String osArch, String osType,
-                      String osInfo, long availableMemBytes, long totalMemBytes,
-                      List<DiskInfo> disksInfo, long lastHeartbeatTime,
-                      long lastRegistrationTime, String rackInfo,
-                      Map<String, String> hostAttributes, AgentVersion agentVersion,
-                      HostHealthStatus healthStatus, String hostState) {
-    super();
-    this.hostname = hostname;
-    this.clusterName = clusterName;
-    this.ipv4 = ipv4;
-    this.ipv6 = ipv6;
-    this.cpuCount = cpuCount;
-    this.osArch = osArch;
-    this.osType = osType;
-    this.osInfo = osInfo;
-    this.availableMemBytes = availableMemBytes;
-    this.totalMemBytes = totalMemBytes;
-    this.disksInfo = disksInfo;
-    this.lastHeartbeatTime = lastHeartbeatTime;
-    this.lastRegistrationTime = lastRegistrationTime;
-    this.rackInfo = rackInfo;
-    this.hostAttributes = hostAttributes;
-    this.agentVersion = agentVersion;
-    this.healthStatus = healthStatus;
-    this.setHostState(hostState);
-  }
-
-  //todo: why are we passing in empty strings for host/cluster name instead of null?
-  public HostResponse(String hostname) {
-    this(hostname, "", "", "",
-        0, "", "",
-        "", 0, 0, new ArrayList<DiskInfo>(),
-        0, 0, "",
-        new HashMap<String, String>(),
-        null, null, null);
-  }
-
-  /**
-   * @return the hostname
-   */
-  public String getHostname() {
-    return hostname;
-  }
-
-  /**
-   * @param hostname the hostname to set
-   */
-  public void setHostname(String hostname) {
-    this.hostname = hostname;
-  }
-
-  /**
-   * @return the clusterNames
-   */
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  /**
-   * @param clusterName the name of the associated cluster
-   */
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-
-  /**
-   * @return the ipv4
-   */
-  public String getIpv4() {
-    return ipv4;
-  }
-
-  /**
-   * @param ipv4 the ipv4 to set
-   */
-  public void setIpv4(String ipv4) {
-    this.ipv4 = ipv4;
-  }
-
-  /**
-   * @return the ipv6
-   */
-  public String getIpv6() {
-    return ipv6;
-  }
-
-  /**
-   * @param ipv6 the ipv6 to set
-   */
-  public void setIpv6(String ipv6) {
-    this.ipv6 = ipv6;
-  }
-
-  /**
-   * @return the cpuCount
-   */
-  public int getCpuCount() {
-    return cpuCount;
-  }
-
-  /**
-   * @param cpuCount the cpuCount to set
-   */
-  public void setCpuCount(int cpuCount) {
-    this.cpuCount = cpuCount;
-  }
-
-  /**
-   * @return the osArch
-   */
-  public String getOsArch() {
-    return osArch;
-  }
-
-  /**
-   * @param osArch the osArch to set
-   */
-  public void setOsArch(String osArch) {
-    this.osArch = osArch;
-  }
-
-  /**
-   * @return the osType
-   */
-  public String getOsType() {
-    return osType;
-  }
-
-  /**
-   * @param osType the osType to set
-   */
-  public void setOsType(String osType) {
-    this.osType = osType;
-  }
-
-  /**
-   * @return the osInfo
-   */
-  public String getOsInfo() {
-    return osInfo;
-  }
-
-  /**
-   * @param osInfo the osInfo to set
-   */
-  public void setOsInfo(String osInfo) {
-    this.osInfo = osInfo;
-  }
-
-  /**
-   * @return the availableMemBytes
-   */
-  public long getAvailableMemBytes() {
-    return availableMemBytes;
-  }
-
-  /**
-   * @param availableMemBytes the availableMemBytes to set
-   */
-  public void setAvailableMemBytes(long availableMemBytes) {
-    this.availableMemBytes = availableMemBytes;
-  }
-
-  /**
-   * @return the totalMemBytes
-   */
-  public long getTotalMemBytes() {
-    return totalMemBytes;
-  }
-
-  /**
-   * @param totalMemBytes the totalMemBytes to set
-   */
-  public void setTotalMemBytes(long totalMemBytes) {
-    this.totalMemBytes = totalMemBytes;
-  }
-
-  /**
-   * @return the disksInfo
-   */
-  public List<DiskInfo> getDisksInfo() {
-    return disksInfo;
-  }
-
-  /**
-   * @param disksInfo the disksInfo to set
-   */
-  public void setDisksInfo(List<DiskInfo> disksInfo) {
-    this.disksInfo = disksInfo;
-  }
-
-  /**
-   * @return the lastHeartbeatTime
-   */
-  public long getLastHeartbeatTime() {
-    return lastHeartbeatTime;
-  }
-
-  /**
-   * @param lastHeartbeatTime the lastHeartbeatTime to set
-   */
-  public void setLastHeartbeatTime(long lastHeartbeatTime) {
-    this.lastHeartbeatTime = lastHeartbeatTime;
-  }
-
-  /**
-   * @return the lastRegistrationTime
-   */
-  public long getLastRegistrationTime() {
-    return lastRegistrationTime;
-  }
-
-  /**
-   * @param lastRegistrationTime the lastRegistrationTime to set
-   */
-  public void setLastRegistrationTime(long lastRegistrationTime) {
-    this.lastRegistrationTime = lastRegistrationTime;
-  }
-
-  /**
-   * @return the rackInfo
-   */
-  public String getRackInfo() {
-    return rackInfo;
-  }
-
-  /**
-   * @param rackInfo the rackInfo to set
-   */
-  public void setRackInfo(String rackInfo) {
-    this.rackInfo = rackInfo;
-  }
-
-  /**
-   * @return the hostAttributes
-   */
-  public Map<String, String> getHostAttributes() {
-    return hostAttributes;
-  }
-
-  /**
-   * @param hostAttributes the hostAttributes to set
-   */
-  public void setHostAttributes(Map<String, String> hostAttributes) {
-    this.hostAttributes = hostAttributes;
-  }
-
-  /**
-   * @return the agentVersion
-   */
-  public AgentVersion getAgentVersion() {
-    return agentVersion;
-  }
-
-  /**
-   * @param agentVersion the agentVersion to set
-   */
-  public void setAgentVersion(AgentVersion agentVersion) {
-    this.agentVersion = agentVersion;
-  }
-
-  /**
-   * @return the healthStatus
-   */
-  public HostHealthStatus getHealthStatus() {
-    return healthStatus;
-  }
-
-  /**
-   * @param healthStatus the healthStatus to set
-   */
-  public void setHealthStatus(HostHealthStatus healthStatus) {
-    this.healthStatus = healthStatus;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    HostResponse that = (HostResponse) o;
-
-    if (hostname != null ?
-        !hostname.equals(that.hostname) : that.hostname != null) {
-      return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = hostname != null ? hostname.hashCode() : 0;
-    return result;
-  }
-
-  public String getPublicHostName() {
-    return publicHostname;
-  }
-  
-  public void setPublicHostName(String name) {
-    publicHostname = name;
-  }
-
-  /**
-   * @return the hostState
-   */
-  public String getHostState() {
-    return hostState;
-  }
-
-  /**
-   * @param hostState the hostState to set
-   */
-  public void setHostState(String hostState) {
-    this.hostState = hostState;
-  }
-
-  
-  public AgentEnv getLastAgentEnv() {
-    return lastAgentEnv;
-  }
-  
-  /**
-   * @param lastAgentEnv
-   */
-  public void setLastAgentEnv(AgentEnv agentEnv) {
-    lastAgentEnv = agentEnv;
-  }
-  
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/HostsMap.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/HostsMap.java
deleted file mode 100644
index f7e5876..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/HostsMap.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Properties;
-
-import org.apache.ambari.server.configuration.Configuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-
-/**
- * Stores the mapping of hostnames to be used in any configuration on 
- * the server.
- *  
- */
-@Singleton
-public class HostsMap {
-  private final static Logger LOG = LoggerFactory
-      .getLogger(HostsMap.class);
-
-  private String hostsMapFile;
-  private Properties hostsMap;
-
-  @Inject
-  public HostsMap(Configuration conf) {
-    hostsMapFile = conf.getHostsMapFile();
-    setupMap();
-  }
-  
-  public HostsMap(String file) {
-    hostsMapFile = file;
-  }
-
-  public void setupMap() {
-    InputStream inputStream = null;
-    LOG.info("Using hostsmap file " + this.hostsMapFile);
-    try {
-      if (hostsMapFile != null) {
-        hostsMap = new Properties();
-        inputStream = new FileInputStream(new File(hostsMapFile));
-        // load the properties
-        hostsMap.load(inputStream);
-      }
-    } catch (FileNotFoundException fnf) {
-      LOG.info("No configuration file " + hostsMapFile + " found in classpath.", fnf);
-    } catch (IOException ie) {
-      throw new IllegalArgumentException("Can't read configuration file " +
-          hostsMapFile, ie);
-    } finally {
-      if (inputStream != null) {
-        try {
-          inputStream.close();
-        } catch(IOException io) {
-          //ignore 
-        }
-      }
-    }
-  }
-
-/**
- * Return map of the hostname if available
- * @param hostName hostname map
- * @return 
- */
-public String getHostMap(String hostName) {
-  if (hostsMapFile == null) 
-    return hostName;
-  return hostsMap.getProperty(hostName, hostName);
-}
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/RequestStatusRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/RequestStatusRequest.java
deleted file mode 100644
index a13154c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/RequestStatusRequest.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-public class RequestStatusRequest {
-
-  private final Long requestId;
-  private final String requestStatus;
-
-  public RequestStatusRequest(Long requestId, String requestStatus) {
-    super();
-    this.requestId = requestId;
-    this.requestStatus = requestStatus;
-  }
-
-  /**
-   * @return the requestId
-   */
-  public Long getRequestId() {
-    return requestId;
-  }
-
-  /**
-   * @return the requestStatus
-   */
-  public String getRequestStatus() {
-    return requestStatus;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/RequestStatusResponse.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/RequestStatusResponse.java
deleted file mode 100644
index 775517e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/RequestStatusResponse.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import java.util.List;
-
-public class RequestStatusResponse {
-
-  // Request ID for tracking async operations
-  private final Long requestId;
-
-  List<ShortTaskStatus> tasks;
-
-  // TODO how are logs to be sent back?
-  private String logs;
-
-  // TODO stage specific information
-
-  public RequestStatusResponse(Long requestId) {
-    super();
-    this.requestId = requestId;
-  }
-
-  /**
-   * @return the logs
-   */
-  public String getLogs() {
-    return logs;
-  }
-
-  /**
-   * @param logs the logs to set
-   */
-  public void setLogs(String logs) {
-    this.logs = logs;
-  }
-
-  /**
-   * @return the requestId
-   */
-  public long getRequestId() {
-    return requestId;
-  }
-
-  public List<ShortTaskStatus> getTasks() {
-    return tasks;
-  }
-
-  public void setTasks(List<ShortTaskStatus> tasks) {
-    this.tasks = tasks;
-  }
-
-
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/RequestsByStatusesRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/RequestsByStatusesRequest.java
deleted file mode 100644
index e86432d..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/RequestsByStatusesRequest.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.ambari.server.actionmanager.HostRoleStatus;
-
-public class RequestsByStatusesRequest {
-  Set<String> statuses;
-
-  public RequestsByStatusesRequest() {
-    statuses = new HashSet<String>();
-    statuses.add(HostRoleStatus.PENDING.toString());
-    statuses.add(HostRoleStatus.QUEUED.toString());
-    statuses.add(HostRoleStatus.IN_PROGRESS.toString());
-  }
-
-  public RequestsByStatusesRequest(Set<String> statuses) {
-    this.statuses = statuses;
-  }
-
-  public Set<String> getStatuses() {
-    return statuses;
-  }
-
-  public void setStatuses(Set<String> statuses) {
-    this.statuses = statuses;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
deleted file mode 100644
index 77e5e01..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import java.util.Map;
-
-public class ServiceComponentHostRequest {
-
-  private String clusterName; // REF
-
-  private String serviceName;
-
-  private String componentName;
-
-  private String hostname;
-
-  // Config type -> version mapping
-  private Map<String, String> configVersions; // CREATE/UPDATE
-
-  private String desiredState; // CREATE/UPDATE
-
-  public ServiceComponentHostRequest(String clusterName,
-                                     String serviceName,
-                                     String componentName, String hostname,
-                                     Map<String, String> configVersions, String desiredState) {
-    super();
-    this.clusterName = clusterName;
-    this.serviceName = serviceName;
-    this.componentName = componentName;
-    this.hostname = hostname;
-    this.configVersions = configVersions;
-    this.desiredState = desiredState;
-  }
-
-  /**
-   * @return the serviceName
-   */
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  /**
-   * @param serviceName the serviceName to set
-   */
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  /**
-   * @return the componentName
-   */
-  public String getComponentName() {
-    return componentName;
-  }
-
-  /**
-   * @param componentName the componentName to set
-   */
-  public void setComponentName(String componentName) {
-    this.componentName = componentName;
-  }
-
-  /**
-   * @return the hostname
-   */
-  public String getHostname() {
-    return hostname;
-  }
-
-  /**
-   * @param hostname the hostname to set
-   */
-  public void setHostname(String hostname) {
-    this.hostname = hostname;
-  }
-
-  /**
-   * @return the configVersions
-   */
-  public Map<String, String> getConfigVersions() {
-    return configVersions;
-  }
-
-  /**
-   * @param configVersions the configVersions to set
-   */
-  public void setConfigVersions(Map<String, String> configVersions) {
-    this.configVersions = configVersions;
-  }
-
-  /**
-   * @return the desiredState
-   */
-  public String getDesiredState() {
-    return desiredState;
-  }
-
-  /**
-   * @param desiredState the desiredState to set
-   */
-  public void setDesiredState(String desiredState) {
-    this.desiredState = desiredState;
-  }
-
-  /**
-   * @return the clusterName
-   */
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  /**
-   * @param clusterName the clusterName to set
-   */
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-
-
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("{"
-        + " clusterName=" + clusterName
-        + ", serviceName=" + serviceName
-        + ", componentName=" + componentName
-        + ", hostname=" + hostname
-        + ", desiredState=" + desiredState
-        + "}");
-    return sb.toString();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
deleted file mode 100644
index 0d9b4ed..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import java.util.Map;
-
-public class ServiceComponentHostResponse {
-
-  private String clusterName; // REF
-
-  private String serviceName;
-
-  private String componentName;
-
-  private String hostname;
-
-  // Config type -> version mapping
-  private Map<String, String> configs;
-
-  private Map<String, String> desiredConfigs;
-
-  private String liveState;
-
-  private String stackVersion;
-
-  private String desiredState;
-
-  public ServiceComponentHostResponse(String clusterName, String serviceName,
-                                      String componentName, String hostname,
-                                      Map<String, String> configVersions,
-                                      Map<String, String> desiredConfigs,
-                                      String liveState,
-                                      String stackVersion, String desiredState) {
-    super();
-    this.clusterName = clusterName;
-    this.serviceName = serviceName;
-    this.componentName = componentName;
-    this.hostname = hostname;
-    this.configs = configVersions;
-    this.desiredConfigs = desiredConfigs;
-    this.liveState = liveState;
-    this.stackVersion = stackVersion;
-    this.desiredState = desiredState;
-  }
-
-  /**
-   * @return the serviceName
-   */
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  /**
-   * @param serviceName the serviceName to set
-   */
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  /**
-   * @return the componentName
-   */
-  public String getComponentName() {
-    return componentName;
-  }
-
-  /**
-   * @param componentName the componentName to set
-   */
-  public void setComponentName(String componentName) {
-    this.componentName = componentName;
-  }
-
-  /**
-   * @return the hostname
-   */
-  public String getHostname() {
-    return hostname;
-  }
-
-  /**
-   * @param hostname the hostname to set
-   */
-  public void setHostname(String hostname) {
-    this.hostname = hostname;
-  }
-
-  /**
-   * @return the configVersions
-   */
-  public Map<String, String> getConfigs() {
-    return configs;
-  }
-
-  /**
-   * @param configVersions the configVersions to set
-   */
-  public void setConfigs(Map<String, String> configVersions) {
-    this.configs = configVersions;
-  }
-
-  /**
-   * @return the liveState
-   */
-  public String getLiveState() {
-    return liveState;
-  }
-
-  /**
-   * @param liveState the liveState to set
-   */
-  public void setLiveState(String liveState) {
-    this.liveState = liveState;
-  }
-
-  /**
-   * @return the stackVersion
-   */
-  public String getStackVersion() {
-    return stackVersion;
-  }
-
-  /**
-   * @param stackVersion the stackVersion to set
-   */
-  public void setStackVersion(String stackVersion) {
-    this.stackVersion = stackVersion;
-  }
-
-  /**
-   * @return the desiredState
-   */
-  public String getDesiredState() {
-    return desiredState;
-  }
-
-  /**
-   * @param desiredState the desiredState to set
-   */
-  public void setDesiredState(String desiredState) {
-    this.desiredState = desiredState;
-  }
-
-  /**
-   * @return the clusterName
-   */
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  /**
-   * @param clusterName the clusterName to set
-   */
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ServiceComponentHostResponse that =
-        (ServiceComponentHostResponse) o;
-
-    if (clusterName != null ?
-        !clusterName.equals(that.clusterName) : that.clusterName != null) {
-      return false;
-    }
-    if (serviceName != null ?
-        !serviceName.equals(that.serviceName) : that.serviceName != null) {
-      return false;
-    }
-    if (componentName != null ?
-        !componentName.equals(that.componentName) : that.componentName != null){
-      return false;
-    }
-    if (hostname != null ?
-        !hostname.equals(that.hostname) : that.hostname != null) {
-      return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterName != null ? clusterName.hashCode() : 0;
-    result = 71 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 71 * result + (componentName != null ? componentName.hashCode():0);
-    result = 71 * result + (hostname != null ? hostname.hashCode() : 0);
-    return result;
-  }
-
-  public Map<String, String> getDesiredConfigs() {
-    return desiredConfigs;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java
deleted file mode 100644
index 277a00f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import java.util.Map;
-
-public class ServiceComponentRequest {
-
-  private String clusterName; // REF
-
-  private String serviceName; // GET/CREATE/UPDATE/DELETE
-
-  private String componentName; // GET/CREATE/UPDATE/DELETE
-
-  // Config type -> version mapping
-  private Map<String, String> configVersions; // CREATE/UPDATE
-
-  private String desiredState; // CREATE/UPDATE
-
-  public ServiceComponentRequest(String clusterName,
-                                 String serviceName, String componentName,
-                                 Map<String, String> configVersions, String desiredState) {
-    super();
-    this.clusterName = clusterName;
-    this.serviceName = serviceName;
-    this.componentName = componentName;
-    this.configVersions = configVersions;
-    this.desiredState = desiredState;
-  }
-
-  /**
-   * @return the serviceName
-   */
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  /**
-   * @param serviceName the serviceName to set
-   */
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  /**
-   * @return the componentName
-   */
-  public String getComponentName() {
-    return componentName;
-  }
-
-  /**
-   * @param componentName the componentName to set
-   */
-  public void setComponentName(String componentName) {
-    this.componentName = componentName;
-  }
-
-  /**
-   * @return the configVersions
-   */
-  public Map<String, String> getConfigVersions() {
-    return configVersions;
-  }
-
-  /**
-   * @param configVersions the configVersions to set
-   */
-  public void setConfigVersions(Map<String, String> configVersions) {
-    this.configVersions = configVersions;
-  }
-
-  /**
-   * @return the desiredState
-   */
-  public String getDesiredState() {
-    return desiredState;
-  }
-
-  /**
-   * @param desiredState the desiredState to set
-   */
-  public void setDesiredState(String desiredState) {
-    this.desiredState = desiredState;
-  }
-
-  /**
-   * @return the clusterName
-   */
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  /**
-   * @param clusterName the clusterName to set
-   */
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java
deleted file mode 100644
index f2c1f88..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java
+++ /dev/null
@@ -1,187 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import java.util.Map;
-
-public class ServiceComponentResponse {
-
-  private Long clusterId; // REF
-
-  private String clusterName; // REF
-
-  private String serviceName;
-
-  private String componentName;
-
-  // Config type -> version mapping
-  private Map<String, String> configVersions;
-
-  private String desiredStackVersion;
-
-  private String desiredState;
-
-  public ServiceComponentResponse(Long clusterId, String clusterName,
-                                  String serviceName,
-                                  String componentName,
-                                  Map<String, String> configVersions,
-                                  String desiredStackVersion,
-                                  String desiredState) {
-    super();
-    this.clusterId = clusterId;
-    this.clusterName = clusterName;
-    this.serviceName = serviceName;
-    this.componentName = componentName;
-    this.configVersions = configVersions;
-    this.desiredStackVersion = desiredStackVersion;
-    this.desiredState = desiredState;
-  }
-
-  /**
-   * @return the serviceName
-   */
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  /**
-   * @param serviceName the serviceName to set
-   */
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  /**
-   * @return the componentName
-   */
-  public String getComponentName() {
-    return componentName;
-  }
-
-  /**
-   * @param componentName the componentName to set
-   */
-  public void setComponentName(String componentName) {
-    this.componentName = componentName;
-  }
-
-  /**
-   * @return the configVersions
-   */
-  public Map<String, String> getConfigVersions() {
-    return configVersions;
-  }
-
-  /**
-   * @param configVersions the configVersions to set
-   */
-  public void setConfigVersions(Map<String, String> configVersions) {
-    this.configVersions = configVersions;
-  }
-
-  /**
-   * @return the clusterId
-   */
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  /**
-   * @param clusterId the clusterId to set
-   */
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  /**
-   * @return the clusterName
-   */
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  /**
-   * @param clusterName the clusterName to set
-   */
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-
-  /**
-   * @return the desiredState
-   */
-  public String getDesiredState() {
-    return desiredState;
-  }
-
-  /**
-   * @param desiredState the desiredState to set
-   */
-  public void setDesiredState(String desiredState) {
-    this.desiredState = desiredState;
-  }
-
-  /**
-   * @return the desiredStackVersion
-   */
-  public String getDesiredStackVersion() {
-    return desiredStackVersion;
-  }
-
-  /**
-   * @param desiredStackVersion the desiredStackVersion to set
-   */
-  public void setDesiredStackVersion(String desiredStackVersion) {
-    this.desiredStackVersion = desiredStackVersion;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ServiceComponentResponse that =
-        (ServiceComponentResponse) o;
-
-    if (clusterName != null ?
-        !clusterName.equals(that.clusterName) : that.clusterName != null) {
-      return false;
-    }
-    if (serviceName != null ?
-        !serviceName.equals(that.serviceName) : that.serviceName != null) {
-      return false;
-    }
-    if (componentName != null ?
-        !componentName.equals(that.componentName) : that.componentName != null){
-      return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId != null? clusterId.intValue() : 0;
-    result = 71 * result + (clusterName != null ? clusterName.hashCode() : 0);
-    result = 71 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 71 * result + (componentName != null ? componentName.hashCode():0);
-    return result;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
deleted file mode 100644
index b2c8c73..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller;
-
-import java.util.Map;
-import java.util.Map.Entry;
-
-public class ServiceRequest {
-
-  private String clusterName; // REF
-
-  private String serviceName; // GET/CREATE/UPDATE/DELETE
-
-  // Config type -> version mapping
-  private Map<String, String> configVersions; // CREATE/UPDATE
-
-  private String desiredState; // CREATE/UPDATE
-
-  public ServiceRequest(String clusterName, String serviceName,
-                        Map<String, String> configVersions, String desiredState) {
-    super();
-    this.clusterName = clusterName;
-    this.serviceName = serviceName;
-    this.configVersions = configVersions;
-    this.desiredState = desiredState;
-  }
-
-  /**
-   * @return the serviceName
-   */
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  /**
-   * @param serviceName the serviceName to set
-   */
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  /**
-   * @return the configVersions
-   */
-  public Map<String, String> getConfigVersions() {
-    return configVersions;
-  }
-
-  /**
-   * @param configVersions the configVersions to set
-   */
-  public void setConfigVersions(Map<String, String> configVersions) {
-    this.configVersions = configVersions;
-  }
-
-  /**
-   * @return the desiredState
-   */
-  public String getDesiredState() {
-    return desiredState;
-  }
-
-  /**
-   * @param desiredState the desiredState to set
-   */
-  public void setDesiredState(String desiredState) {
-    this.desiredState = desiredState;
-  }
-
-  /**
-   * @return the clusterName
-   */
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  /**
-   * @param clusterName the clusterName to set
-   */
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("clusterName=" + clusterName
-        + ", serviceName=" + serviceName
-        + ", desiredState=" + desiredState
-        + ", configs=[ ");
-    if (configVersions != null) {
-      for (Entry<String, String> entry : configVersions.entrySet()) {
-        sb.append("{ type=" + entry.getKey()
-            + ", versionTag=" + entry.getValue() + "}, ");
-      }
-    }
-    sb.append(" ]");
-    return sb.toString();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java
deleted file mode 100644
index 64dc8e7..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import java.util.Map;
-
-public class ServiceResponse {
-
-  private Long clusterId;
-
-  private String clusterName;
-
-  private String serviceName;
-
-  private String desiredStackVersion;
-
-  private String desiredState;
-
-  // Config type -> version mapping
-  private Map<String, String> configVersions;
-
-  public ServiceResponse(Long clusterId, String clusterName,
-                         String serviceName,
-                         Map<String, String> configVersions,
-                         String desiredStackVersion, String desiredState) {
-    super();
-    this.clusterId = clusterId;
-    this.clusterName = clusterName;
-    this.serviceName = serviceName;
-    this.configVersions = configVersions;
-    this.setDesiredStackVersion(desiredStackVersion);
-    this.setDesiredState(desiredState);
-  }
-
-  /**
-   * @return the serviceName
-   */
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  /**
-   * @param serviceName the serviceName to set
-   */
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  /**
-   * @return the configVersions
-   */
-  public Map<String, String> getConfigVersions() {
-    return configVersions;
-  }
-
-  /**
-   * @param configVersions the configVersions to set
-   */
-  public void setConfigVersions(Map<String, String> configVersions) {
-    this.configVersions = configVersions;
-  }
-
-  /**
-   * @return the clusterId
-   */
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  /**
-   * @param clusterId the clusterId to set
-   */
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  /**
-   * @return the clusterName
-   */
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  /**
-   * @param clusterName the clusterName to set
-   */
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-
-  /**
-   * @return the desiredState
-   */
-  public String getDesiredState() {
-    return desiredState;
-  }
-
-  /**
-   * @param desiredState the desiredState to set
-   */
-  public void setDesiredState(String desiredState) {
-    this.desiredState = desiredState;
-  }
-
-  /**
-   * @return the desiredStackVersion
-   */
-  public String getDesiredStackVersion() {
-    return desiredStackVersion;
-  }
-
-  /**
-   * @param desiredStackVersion the desiredStackVersion to set
-   */
-  public void setDesiredStackVersion(String desiredStackVersion) {
-    this.desiredStackVersion = desiredStackVersion;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ServiceResponse that = (ServiceResponse) o;
-
-    if (clusterId != null ?
-        !clusterId.equals(that.clusterId) : that.clusterId != null) {
-      return false;
-    }
-    if (clusterName != null ?
-        !clusterName.equals(that.clusterName) : that.clusterName != null) {
-      return false;
-    }
-    if (serviceName != null ?
-        !serviceName.equals(that.serviceName) : that.serviceName != null) {
-      return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId != null? clusterId.intValue() : 0;
-    result = 71 * result + (clusterName != null ? clusterName.hashCode() : 0);
-    result = 71 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    return result;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ShortTaskStatus.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ShortTaskStatus.java
deleted file mode 100644
index 66a65d3..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ShortTaskStatus.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import org.apache.ambari.server.actionmanager.HostRoleCommand;
-
-public class ShortTaskStatus {
-  protected long taskId;
-  protected long stageId;
-  protected String hostName;
-  protected String role;
-  protected String command;
-  protected String status;
-
-  public ShortTaskStatus() {
-  }
-
-  public ShortTaskStatus(int taskId, long stageId, String hostName, String role, String command, String status) {
-    this.taskId = taskId;
-    this.stageId = stageId;
-    this.hostName = hostName;
-    this.role = role;
-    this.command = command;
-    this.status = status;
-  }
-
-  public ShortTaskStatus(HostRoleCommand hostRoleCommand) {
-    this.taskId = hostRoleCommand.getTaskId();
-    this.stageId = hostRoleCommand.getStageId();
-    this.command = hostRoleCommand.getRoleCommand().toString();
-    this.hostName = hostRoleCommand.getHostName();
-    this.role = hostRoleCommand.getRole().toString();
-    this.status = hostRoleCommand.getStatus().toString();
-  }
-
-  public long getTaskId() {
-    return taskId;
-  }
-
-  public void setTaskId(long taskId) {
-    this.taskId = taskId;
-  }
-
-  public long getStageId() {
-    return stageId;
-  }
-
-  public void setStageId(long stageId) {
-    this.stageId = stageId;
-  }
-
-  public String getHostName() {
-    return hostName;
-  }
-
-  public void setHostName(String hostName) {
-    this.hostName = hostName;
-  }
-
-  public String getRole() {
-    return role;
-  }
-
-  public void setRole(String role) {
-    this.role = role;
-  }
-
-  public String getCommand() {
-    return command;
-  }
-
-  public void setCommand(String command) {
-    this.command = command;
-  }
-
-  public String getStatus() {
-    return status;
-  }
-
-  public void setStatus(String status) {
-    this.status = status;
-  }
-
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("ShortTaskStatusDump "
-        + ", stageId=" + stageId
-        + ", taskId=" + taskId
-        + ", hostname=" + hostName
-        + ", role=" + role
-        + ", command=" + command
-        + ", status=" + status);
-    return sb.toString();
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/TaskStatusRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/TaskStatusRequest.java
deleted file mode 100644
index c966e7f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/TaskStatusRequest.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-public class TaskStatusRequest {
-  protected Long requestId;
-  protected Long taskId;
-
-  public TaskStatusRequest() {
-  }
-
-  public TaskStatusRequest(Long requestId, Long taskId) {
-    this.requestId = requestId;
-    this.taskId = taskId;
-  }
-
-  public Long getRequestId() {
-    return requestId;
-  }
-
-  public void setRequestId(Long requestId) {
-    this.requestId = requestId;
-  }
-
-  public Long getTaskId() {
-    return taskId;
-  }
-
-  public void setTaskId(Long taskId) {
-    this.taskId = taskId;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/TaskStatusResponse.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/TaskStatusResponse.java
deleted file mode 100644
index 7f068e2..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/TaskStatusResponse.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import org.apache.ambari.server.actionmanager.HostRoleCommand;
-
-public class TaskStatusResponse extends ShortTaskStatus {
-  private long requestId;
-  private int exitCode;
-  private String stderr;
-  private String stdout;
-  private long startTime;
-  private short attemptCount;
-
-  public TaskStatusResponse() {
-  }
-
-  public TaskStatusResponse(long requestId,
-                            int taskId, long stageId, String hostName, String role, String command, String status,
-                            int exitCode, String stderr, String stdout, long startTime, short attemptCount) {
-    super(taskId, stageId, hostName, role, command, status);
-    this.requestId = requestId;
-    this.exitCode = exitCode;
-    this.stderr = stderr;
-    this.stdout = stdout;
-    this.startTime = startTime;
-    this.attemptCount = attemptCount;
-  }
-
-  public TaskStatusResponse(HostRoleCommand hostRoleCommand) {
-    super(hostRoleCommand);
-    this.requestId = hostRoleCommand.getRequestId();
-    this.exitCode = hostRoleCommand.getExitCode();
-    this.stderr = hostRoleCommand.getStderr();
-    this.stdout = hostRoleCommand.getStdout();
-    this.startTime = hostRoleCommand.getStartTime();
-    this.attemptCount = hostRoleCommand.getAttemptCount();
-  }
-
-  public long getRequestId() {
-    return requestId;
-  }
-
-  public void setRequestId(long requestId) {
-    this.requestId = requestId;
-  }
-
-  public int getExitCode() {
-    return exitCode;
-  }
-
-  public void setExitCode(int exitCode) {
-    this.exitCode = exitCode;
-  }
-
-  public String getStderr() {
-    return stderr;
-  }
-
-  public void setStderr(String stderr) {
-    this.stderr = stderr;
-  }
-
-  public String getStdout() {
-    return stdout;
-  }
-
-  public void setStdout(String stdout) {
-    this.stdout = stdout;
-  }
-
-  public long getStartTime() {
-    return startTime;
-  }
-
-  public void setStartTime(long startTime) {
-    this.startTime = startTime;
-  }
-
-  public short getAttemptCount() {
-    return attemptCount;
-  }
-
-  public void setAttemptCount(short attemptCount) {
-    this.attemptCount = attemptCount;
-  }
-  
-  @Override
-  public String toString() {
-      return super.toString();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/UserRequest.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/UserRequest.java
deleted file mode 100644
index 3ef8495..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/UserRequest.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller;
-
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- * Represents a user maintenance request.
- */
-public class UserRequest {
-  private String userName;
-  private String password;
-  private String oldPassword;
-  private Set<String> roles = new HashSet<String>();
-
-  public UserRequest(String name) {
-    this.userName = name;
-  }
-
-  public String getUsername() {
-    return userName;
-  }
-
-  public Set<String> getRoles() {
-    return roles;
-  }
-
-  public void setRoles(Set<String> userRoles) {
-    roles = userRoles;
-  }
-
-  public String getPassword() {
-    return password;
-  }
-
-  public void setPassword(String userPass) {
-    password = userPass;
-  }
-
-  public String getOldPassword() {
-    return oldPassword;
-  }
-
-  public void setOldPassword(String oldUserPass) {
-    oldPassword = oldUserPass;
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("User"
-        + ", username=" + userName
-        + ", roles=[ ");
-    if (roles != null && !roles.isEmpty()) {
-      boolean first = true;
-      for (String role : roles) {
-        if (!first) {
-          sb.append(",");
-        }
-        first = false;
-        sb.append(role);
-      }
-    }
-    sb.append(" ]");
-    return sb.toString();
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/UserResponse.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/UserResponse.java
deleted file mode 100644
index 4bf3a48..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/UserResponse.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller;
-
-import java.util.Collections;
-import java.util.Set;
-
-/**
- * Represents a user maintenance request.
- */
-public class UserResponse {
-
-  private Set<String> roles = Collections.emptySet();
-  private final String userName;
-  private final boolean isLdapUser;
-
-  public UserResponse(String name, boolean isLdapUser) {
-    this.userName = name;
-    this.isLdapUser = isLdapUser;
-  }
-
-  public String getUsername() {
-    return userName;
-  }
-
-  public Set<String> getRoles() {
-    return roles;
-  }
-
-  public void setRoles(Set<String> userRoles) {
-    roles = userRoles;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    UserResponse that = (UserResponse) o;
-
-    if (userName != null ?
-        !userName.equals(that.userName) : that.userName != null) {
-      return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = userName != null ? userName.hashCode() : 0;
-    return result;
-  }
-
-  /**
-   * @return the isLdapUser
-   */
-  public boolean isLdapUser() {
-    return isLdapUser;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaComponentPropertyProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaComponentPropertyProvider.java
deleted file mode 100644
index c7df3ac..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaComponentPropertyProvider.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.ganglia;
-
-import org.apache.ambari.server.controller.internal.PropertyInfo;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.StreamProvider;
-
-import java.util.Collections;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Ganglia property provider implementation for component resources.
- */
-public class GangliaComponentPropertyProvider extends GangliaPropertyProvider {
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  public GangliaComponentPropertyProvider(Map<String, Map<String, PropertyInfo>> componentMetrics,
-                                          StreamProvider streamProvider,
-                                          GangliaHostProvider hostProvider,
-                                          String clusterNamePropertyId,
-                                          String componentNamePropertyId) {
-
-    super(componentMetrics, streamProvider, hostProvider,
-        clusterNamePropertyId, null, componentNamePropertyId);
-  }
-
-
-  // ----- GangliaPropertyProvider -------------------------------------------
-
-  @Override
-  protected String getHostName(Resource resource) {
-    return "__SummaryInfo__";
-  }
-
-  @Override
-  protected String getComponentName(Resource resource) {
-    return (String) resource.getPropertyValue(getComponentNamePropertyId());
-  }
-
-  @Override
-  protected Set<String> getGangliaClusterNames(Resource resource, String clusterName) {
-    return Collections.singleton(GANGLIA_CLUSTER_NAME_MAP.get(getComponentName(resource)));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaHostComponentPropertyProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaHostComponentPropertyProvider.java
deleted file mode 100644
index b8484fd..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaHostComponentPropertyProvider.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.ganglia;
-
-import org.apache.ambari.server.controller.internal.PropertyInfo;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.StreamProvider;
-
-import java.util.Collections;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Ganglia property provider implementation for host component resources.
- */
-public class GangliaHostComponentPropertyProvider extends GangliaPropertyProvider {
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  public GangliaHostComponentPropertyProvider(Map<String, Map<String, PropertyInfo>> componentPropertyInfoMap,
-                                              StreamProvider streamProvider,
-                                              GangliaHostProvider hostProvider,
-                                              String clusterNamePropertyId,
-                                              String hostNamePropertyId,
-                                              String componentNamePropertyId) {
-
-    super(componentPropertyInfoMap, streamProvider, hostProvider,
-        clusterNamePropertyId, hostNamePropertyId, componentNamePropertyId);
-  }
-
-
-  // ----- GangliaPropertyProvider -------------------------------------------
-
-  @Override
-  protected String getHostName(Resource resource) {
-    return (String) resource.getPropertyValue(getHostNamePropertyId());
-  }
-
-  @Override
-  protected String getComponentName(Resource resource) {
-    return (String) resource.getPropertyValue(getComponentNamePropertyId());
-  }
-
-  @Override
-  protected Set<String> getGangliaClusterNames(Resource resource, String clusterName) {
-    return Collections.singleton(GANGLIA_CLUSTER_NAME_MAP.get(getComponentName(resource)));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaHostPropertyProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaHostPropertyProvider.java
deleted file mode 100644
index d37b777..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaHostPropertyProvider.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.ganglia;
-
-import org.apache.ambari.server.controller.internal.PropertyInfo;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.StreamProvider;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Ganglia property provider implementation for host resources.
- */
-public class GangliaHostPropertyProvider extends GangliaPropertyProvider{
-
-  /**
-   * Set of Ganglia cluster names.
-   */
-  private static final Set<String> GANGLIA_CLUSTER_NAMES = new HashSet<String>();
-
-  static {
-    GANGLIA_CLUSTER_NAMES.add("HDPNameNode");
-    GANGLIA_CLUSTER_NAMES.add("HDPSlaves");
-    GANGLIA_CLUSTER_NAMES.add("HDPJobTracker");
-    GANGLIA_CLUSTER_NAMES.add("HDPHBaseMaster");
-  }
-
-  // ----- Constructors ------------------------------------------------------
-
-  public GangliaHostPropertyProvider(Map<String, Map<String, PropertyInfo>> componentPropertyInfoMap,
-                                     StreamProvider streamProvider,
-                                     GangliaHostProvider hostProvider,
-                                     String clusterNamePropertyId,
-                                     String hostNamePropertyId) {
-
-    super(componentPropertyInfoMap, streamProvider, hostProvider,
-        clusterNamePropertyId, hostNamePropertyId, null);
-  }
-
-
-  // ----- GangliaPropertyProvider -------------------------------------------
-
-  @Override
-  protected String getHostName(Resource resource) {
-    return (String) resource.getPropertyValue(getHostNamePropertyId());
-  }
-
-  @Override
-  protected String getComponentName(Resource resource) {
-    return "*";
-  }
-
-  @Override
-  protected Set<String> getGangliaClusterNames(Resource resource, String clusterName) {
-    return GANGLIA_CLUSTER_NAMES;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaHostProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaHostProvider.java
deleted file mode 100644
index 37e1e05..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaHostProvider.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.ganglia;
-
-import org.apache.ambari.server.controller.spi.SystemException;
-
-/**
- *  Provider of Ganglia host information.
- */
-public interface GangliaHostProvider {
-
-  /**
-   * Get the Ganglia server host name for the given cluster name.
-   *
-   * @param clusterName  the cluster name
-   *
-   * @return the Ganglia server
-   *
-   * @throws SystemException if unable to get the Ganglia server host name
-   */
-  public String getGangliaCollectorHostName(String clusterName) throws SystemException;
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaMetric.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaMetric.java
deleted file mode 100644
index e8b6d29..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaMetric.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.ganglia;
-
-
-/**
- * Data structure for temporal data returned from Ganglia Web.
- */
-public class GangliaMetric {
-
-  // Note that the member names correspond to the names in the JSON returned from Ganglia Web.
-
-  /**
-   * The name.
-   */
-  private String ds_name;
-
-  /**
-   * The ganglia cluster name.
-   */
-  private String cluster_name;
-
-  /**
-   * The graph type.
-   */
-  private String graph_type;
-
-  /**
-   * The host name.
-   */
-  private String host_name;
-
-  /**
-   * The metric name.
-   */
-  private String metric_name;
-
-  /**
-   * The temporal data points.
-   */
-  private Number[][] datapoints;
-
-
-  // ----- GangliaMetric -----------------------------------------------------
-
-  public String getDs_name() {
-    return ds_name;
-  }
-
-  public void setDs_name(String ds_name) {
-    this.ds_name = ds_name;
-  }
-
-  public String getCluster_name() {
-    return cluster_name;
-  }
-
-  public void setCluster_name(String cluster_name) {
-    this.cluster_name = cluster_name;
-  }
-
-  public String getGraph_type() {
-    return graph_type;
-  }
-
-  public void setGraph_type(String graph_type) {
-    this.graph_type = graph_type;
-  }
-
-  public String getHost_name() {
-    return host_name;
-  }
-
-  public void setHost_name(String host_name) {
-    this.host_name = host_name;
-  }
-
-  public String getMetric_name() {
-    return metric_name;
-  }
-
-  public void setMetric_name(String metric_name) {
-    this.metric_name = metric_name;
-  }
-
-  public Number[][] getDatapoints() {
-    return datapoints;
-  }
-
-  public void setDatapoints(Number[][] datapoints) {
-    this.datapoints = datapoints;
-  }
-
-
-  // ----- Object overrides --------------------------------------------------
-
-  @Override
-  public String toString() {
-    StringBuilder stringBuilder = new StringBuilder();
-
-    stringBuilder.append("\n");
-    stringBuilder.append("name=");
-    stringBuilder.append(ds_name);
-    stringBuilder.append("\n");
-    stringBuilder.append("cluster name=");
-    stringBuilder.append(cluster_name);
-    stringBuilder.append("\n");
-    stringBuilder.append("graph type=");
-    stringBuilder.append(graph_type);
-    stringBuilder.append("\n");
-    stringBuilder.append("host name=");
-    stringBuilder.append(host_name);
-    stringBuilder.append("\n");
-    stringBuilder.append("api name=");
-    stringBuilder.append(metric_name);
-    stringBuilder.append("\n");
-
-    stringBuilder.append("datapoints (value/timestamp):");
-    stringBuilder.append("\n");
-
-
-    boolean first = true;
-    stringBuilder.append("[");
-    for (Number[] m : datapoints) {
-      if (!first) {
-        stringBuilder.append(",");
-      }
-      stringBuilder.append("[");
-      stringBuilder.append(m[0]);
-      stringBuilder.append(",");
-      stringBuilder.append(m[1].longValue());
-      stringBuilder.append("]");
-      first = false;
-    }
-    stringBuilder.append("]");
-
-    return stringBuilder.toString();
-  }
-
-  public static class TemporalMetric {
-    private Number m_value;
-    private Number m_time;
-
-    public TemporalMetric(Number value, Number time) {
-      m_value = value;
-      m_time = time;
-    }
-
-    public Number getValue() {
-      return m_value;
-    }
-
-    public Number getTime() {
-      return m_time;
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProvider.java
deleted file mode 100644
index 8f2adad..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProvider.java
+++ /dev/null
@@ -1,535 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.ganglia;
-
-import org.apache.ambari.server.controller.internal.AbstractPropertyProvider;
-import org.apache.ambari.server.controller.internal.PropertyInfo;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.StreamProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.util.*;
-
-/**
- * Abstract property provider implementation for a Ganglia source.
- */
-public abstract class GangliaPropertyProvider extends AbstractPropertyProvider {
-
-  private final StreamProvider streamProvider;
-
-  private final GangliaHostProvider hostProvider;
-
-  private final String clusterNamePropertyId;
-
-  private final String hostNamePropertyId;
-
-  private final String componentNamePropertyId;
-
-  /**
-   * Map of Ganglia cluster names keyed by component type.
-   */
-  public static final Map<String, String> GANGLIA_CLUSTER_NAME_MAP = new HashMap<String, String>();
-
-  static {
-    GANGLIA_CLUSTER_NAME_MAP.put("NAMENODE", "HDPNameNode");
-    GANGLIA_CLUSTER_NAME_MAP.put("DATANODE", "HDPSlaves");
-    GANGLIA_CLUSTER_NAME_MAP.put("JOBTRACKER", "HDPJobTracker");
-    GANGLIA_CLUSTER_NAME_MAP.put("TASKTRACKER", "HDPSlaves");
-    GANGLIA_CLUSTER_NAME_MAP.put("HBASE_MASTER", "HDPHBaseMaster");
-    GANGLIA_CLUSTER_NAME_MAP.put("HBASE_CLIENT", "HDPSlaves");
-    GANGLIA_CLUSTER_NAME_MAP.put("HBASE_REGIONSERVER", "HDPSlaves");
-  }
-
-  protected final static Logger LOG =
-      LoggerFactory.getLogger(GangliaPropertyProvider.class);
-
-  // ----- Constructors ------------------------------------------------------
-
-  public GangliaPropertyProvider(Map<String, Map<String, PropertyInfo>> componentPropertyInfoMap,
-                                 StreamProvider streamProvider,
-                                 GangliaHostProvider hostProvider,
-                                 String clusterNamePropertyId,
-                                 String hostNamePropertyId,
-                                 String componentNamePropertyId) {
-
-    super(componentPropertyInfoMap);
-
-    this.streamProvider           = streamProvider;
-    this.hostProvider             = hostProvider;
-    this.clusterNamePropertyId    = clusterNamePropertyId;
-    this.hostNamePropertyId       = hostNamePropertyId;
-    this.componentNamePropertyId  = componentNamePropertyId;
-  }
-
-
-  // ----- PropertyProvider --------------------------------------------------
-
-  @Override
-  public Set<Resource> populateResources(Set<Resource> resources, Request request, Predicate predicate)
-      throws SystemException {
-
-    Set<String> ids = getRequestPropertyIds(request, predicate);
-    if (ids.isEmpty()) {
-      return resources;
-    }
-
-    Set<Resource> keepers = new HashSet<Resource>();
-
-    Map<String, Map<TemporalInfo, RRDRequest>> requestMap = getRRDRequests(resources, request, ids);
-
-    // For each cluster...
-    for (Map.Entry<String, Map<TemporalInfo, RRDRequest>> clusterEntry : requestMap.entrySet()) {
-      // For each request ...
-      for (RRDRequest rrdRequest : clusterEntry.getValue().values() ) {
-        //todo: property provider can reduce set of resources
-        keepers.addAll(rrdRequest.populateResources());
-      }
-    }
-    //todo: ignoring keepers returned by the provider
-    return resources;
-  }
-
-
-  // ----- GangliaPropertyProvider -------------------------------------------
-
-  /**
-   * Get the host name for the given resource.
-   *
-   * @param resource  the resource
-   *
-   * @return the host name
-   */
-  protected abstract String getHostName(Resource resource);
-
-  /**
-   * Get the component name for the given resource.
-   *
-   * @param resource  the resource
-   *
-   * @return the component name
-   */
-  protected abstract String getComponentName(Resource resource);
-
-  /**
-   * Get the ganglia cluster name for the given resource.
-   *
-   *
-   * @param resource  the resource
-   *
-   * @return the ganglia cluster name
-   */
-  protected abstract Set<String> getGangliaClusterNames(Resource resource, String clusterName);
-
-
-  /**
-   * Get the component name property id.
-   *
-   * @return the component name property id
-   */
-  protected String getComponentNamePropertyId() {
-    return componentNamePropertyId;
-  }
-
-  /**
-   * Get the host name property id.
-   *
-   * @return the host name property id
-   */
-  protected String getHostNamePropertyId() {
-    return hostNamePropertyId;
-  }
-
-  /**
-   * Get the stream provider.
-   *
-   * @return the stream provider
-   */
-  public StreamProvider getStreamProvider() {
-    return streamProvider;
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Get the request objects containing all the information required to
-   * make single requests to the Ganglia rrd script.
-   * Requests are created per cluster name / temporal information but
-   * can span multiple resources and metrics.
-   *
-   * @param resources  the resources being populated
-   * @param request    the request
-   * @param ids        the relevant property ids
-   *
-   * @return a map of maps of rrd requests keyed by cluster name / temporal info
-   */
-  private Map<String, Map<TemporalInfo, RRDRequest>> getRRDRequests(Set<Resource> resources,
-                                                                    Request request,
-                                                                    Set<String> ids) {
-
-    Map<String, Map<TemporalInfo, RRDRequest>> requestMap =
-        new HashMap<String, Map<TemporalInfo, RRDRequest>>();
-
-    for (Resource resource : resources) {
-      String clusterName = (String) resource.getPropertyValue(clusterNamePropertyId);
-      Map<TemporalInfo, RRDRequest> requests = requestMap.get(clusterName);
-      if (requests == null) {
-        requests = new HashMap<TemporalInfo, RRDRequest>();
-        requestMap.put(clusterName, requests);
-      }
-
-      Set<String> gangliaClusterNames = getGangliaClusterNames(resource, clusterName);
-
-      for (String gangliaClusterName : gangliaClusterNames) {
-        ResourceKey key =
-            new ResourceKey(getHostName(resource), gangliaClusterName);
-
-        for (String id : ids) {
-          Map<String, PropertyInfo> propertyInfoMap = getPropertyInfoMap(getComponentName(resource), id);
-
-          for (Map.Entry<String, PropertyInfo> entry : propertyInfoMap.entrySet()) {
-            String propertyId = entry.getKey();
-            PropertyInfo propertyInfo = entry.getValue();
-
-            TemporalInfo temporalInfo = request.getTemporalInfo(id);
-
-            if ((temporalInfo == null && propertyInfo.isPointInTime()) || (temporalInfo != null && propertyInfo.isTemporal())) {
-              RRDRequest rrdRequest = requests.get(temporalInfo);
-              if (rrdRequest == null) {
-                rrdRequest = new RRDRequest(clusterName, temporalInfo);
-                requests.put(temporalInfo, rrdRequest);
-              }
-              rrdRequest.putResource(key, resource);
-              rrdRequest.putPropertyId(propertyInfo.getPropertyId(), propertyId);
-            }
-          }
-        }
-      }
-    }
-    return requestMap;
-  }
-
-  /**
-   * Get the spec to locate the Ganglia stream from the given
-   * request info.
-   *
-   * @param clusterName   the cluster name
-   * @param clusterSet    the set of ganglia cluster names
-   * @param hostSet       the set of host names
-   * @param metricSet     the set of metric names
-   * @param temporalInfo  the temporal information
-   *
-   * @return the spec
-   *
-   * @throws SystemException if unable to get the Ganglia Collector host name
-   */
-  private String getSpec(String clusterName,
-                         Set<String> clusterSet,
-                         Set<String> hostSet,
-                         Set<String> metricSet,
-                         TemporalInfo temporalInfo) throws SystemException {
-
-    String clusters = getSetString(clusterSet, -1);
-    String hosts    = getSetString(hostSet, 100);
-    String metrics  = getSetString(metricSet, 50);
-
-    StringBuilder sb = new StringBuilder();
-
-    sb.append("http://").
-        append(hostProvider.getGangliaCollectorHostName(clusterName)).
-        append("/cgi-bin/rrd.py?c=").
-        append(clusters);
-
-    if (hosts.length() > 0) {
-      sb.append("&h=").append(hosts);
-    }
-
-    if (metrics.length() > 0) {
-      sb.append("&m=").append(metrics);
-    }
-
-    if (temporalInfo != null) {
-      long startTime = temporalInfo.getStartTime();
-      if (startTime != -1) {
-        sb.append("&s=").append(startTime);
-      }
-
-      long endTime = temporalInfo.getEndTime();
-      if (endTime != -1) {
-        sb.append("&e=").append(endTime);
-      }
-
-      long step = temporalInfo.getStep();
-      if (step != -1) {
-        sb.append("&r=").append(step);
-      }
-    }
-    else {
-      sb.append("&e=now");
-      sb.append("&pt=true");
-    }
-
-    return sb.toString();
-  }
-
-  /**
-   * Get value from the given metric.
-   *
-   * @param metric      the metric
-   * @param isTemporal  indicates whether or not this a temporal metric
-   *
-   * @return a range of temporal data or a point in time value if not temporal
-   */
-  private static Object getValue(GangliaMetric metric, boolean isTemporal) {
-    Number[][] dataPoints = metric.getDatapoints();
-
-    if (isTemporal) {
-      return dataPoints;
-    } else {
-      // return the value of the last data point
-      int length = dataPoints.length;
-      return length > 0 ? dataPoints[length - 1][0] : 0;
-    }
-  }
-
-  /**
-   * Get a comma delimited string from the given set of strings or
-   * an empty string if the size of the given set is greater than
-   * the given limit.
-   *
-   * @param set    the set of strings
-   * @param limit  the upper size limit for the list
-   *
-   * @return a comma delimited string of strings
-   */
-  private static String getSetString(Set<String> set, int limit) {
-    StringBuilder sb = new StringBuilder();
-
-    if (limit == -1 || set.size() <= limit) {
-      for (String cluster : set) {
-        if (sb.length() > 0) {
-          sb.append(",");
-        }
-        sb.append(cluster);
-      }
-    }
-    return sb.toString();
-  }
-
-
-  // ----- inner classes -----------------------------------------------------
-
-
-  // ----- RRDRequest ----------------------------------------------------
-
-  /**
-   * The information required to make a single RRD request.
-   */
-  private class RRDRequest {
-    private final String clusterName;
-    private final TemporalInfo temporalInfo;
-    private final Map<ResourceKey, Set<Resource>> resources = new HashMap<ResourceKey, Set<Resource>>();
-    private final Map<String, Set<String>> metrics = new HashMap<String, Set<String>>();
-    private final Set<String> clusterSet = new HashSet<String>();
-    private final Set<String> hostSet = new HashSet<String>();
-
-
-    private RRDRequest(String clusterName, TemporalInfo temporalInfo) {
-      this.clusterName  = clusterName;
-      this.temporalInfo = temporalInfo;
-    }
-
-    public void putResource(ResourceKey key, Resource resource) {
-      clusterSet.add(key.getClusterName());
-      hostSet.add(key.getHostName());
-      Set<Resource> resourceSet = resources.get(key);
-      if (resourceSet == null) {
-        resourceSet = new HashSet<Resource>();
-        resources.put(key, resourceSet);
-      }
-      resourceSet.add(resource);
-    }
-
-    public void putPropertyId(String metric, String id) {
-      Set<String> propertyIds = metrics.get(metric);
-
-      if (propertyIds == null) {
-        propertyIds = new HashSet<String>();
-        metrics.put(metric, propertyIds);
-      }
-      propertyIds.add(id);
-    }
-
-    /**
-     * Populate the associated resources by making the rrd request.
-     *
-     * @return a collection of populated resources
-     *
-     * @throws SystemException if unable to populate the resources
-     */
-    public Collection<Resource> populateResources() throws SystemException {
-
-      String spec = getSpec(clusterName, clusterSet, hostSet, metrics.keySet(), temporalInfo);
-      BufferedReader reader = null;
-      try {
-        reader = new BufferedReader(new InputStreamReader(
-            getStreamProvider().readFrom(spec)));
-
-        int startTime = convertToNumber(reader.readLine()).intValue();
-
-        String dsName = reader.readLine();
-        while(! dsName.equals("[AMBARI_END]")) {
-          GangliaMetric metric = new GangliaMetric();
-          List<GangliaMetric.TemporalMetric> listTemporalMetrics =
-              new ArrayList<GangliaMetric.TemporalMetric>();
-
-          metric.setDs_name(dsName);
-          metric.setCluster_name(reader.readLine());
-          metric.setHost_name(reader.readLine());
-          metric.setMetric_name(reader.readLine());
-
-          int time = convertToNumber(reader.readLine()).intValue();
-          int step = convertToNumber(reader.readLine()).intValue();
-
-          String val = reader.readLine();
-          while(! val.equals("[AMBARI_DP_END]")) {
-            listTemporalMetrics.add(
-                new GangliaMetric.TemporalMetric(convertToNumber(val), time));
-            time += step;
-            val = reader.readLine();
-          }
-
-          //todo: change setter in GangliaMetric to take collection
-          Number[][] datapointsArray = new Number[listTemporalMetrics.size()][2];
-          for (int i = 0; i < listTemporalMetrics.size(); ++i) {
-            GangliaMetric.TemporalMetric m = listTemporalMetrics.get(i);
-            datapointsArray[i][0] = m.getValue();
-            datapointsArray[i][1] = m.getTime();
-          }
-          metric.setDatapoints(datapointsArray);
-
-          ResourceKey key = new ResourceKey(metric.getHost_name(), metric.getCluster_name());
-          Set<Resource> resourceSet = resources.get(key);
-          if (resourceSet != null) {
-            for (Resource resource : resourceSet) {
-              populateResource(resource, metric);
-            }
-          }
-
-          dsName = reader.readLine();
-        }
-        int endTime = convertToNumber(reader.readLine()).intValue();
-
-        if (LOG.isInfoEnabled()) {
-          LOG.info("Ganglia resource population time: " + (endTime - startTime));
-        }
-      } catch (IOException e) {
-        if (LOG.isErrorEnabled()) {
-          LOG.error("Caught exception getting Ganglia metrics : spec=" + spec, e);
-        }
-      } finally {
-        if (reader != null) {
-          try {
-            reader.close();
-          } catch (IOException e) {
-            if (LOG.isWarnEnabled()) {
-              LOG.warn("Unable to close http input steam : spec=" + spec, e);
-            }
-          }
-        }
-      }
-      //todo: filter out resources and return keepers
-      return Collections.emptySet();
-    }
-
-    /**
-     * Populate the given resource with the given Ganglia metric.
-     *
-     * @param resource       the resource
-     * @param gangliaMetric  the Ganglia metrics
-     */
-    private void populateResource(Resource resource, GangliaMetric gangliaMetric) {
-      Set<String> propertyIdSet = metrics.get(gangliaMetric.getMetric_name());
-      if (propertyIdSet != null) {
-        Map<String, PropertyInfo> metricsMap = getComponentMetrics().get(getComponentName(resource));
-        if (metricsMap != null) {
-          for (String propertyId : propertyIdSet) {
-            if (propertyId != null) {
-              if (metricsMap.containsKey(propertyId)){
-                resource.setProperty(propertyId, getValue(gangliaMetric, temporalInfo != null));
-              }
-            }
-          }
-        }
-      }
-    }
-
-    private Number convertToNumber(String s) {
-      return s.contains(".") ? Double.parseDouble(s) : Long.parseLong(s);
-    }
-  }
-
-
-  // ----- ResourceKey ---------------------------------------------------
-
-  /**
-   * Key used to associate information from a Ganglia metric to a resource.
-   */
-  private static class ResourceKey {
-    private final String hostName;
-    private final String gangliaClusterName;
-
-    private ResourceKey(String hostName, String gangliaClusterName) {
-      this.hostName           = hostName;
-      this.gangliaClusterName = gangliaClusterName;
-    }
-
-    public String getHostName() {
-      return hostName;
-    }
-
-    public String getClusterName() {
-      return gangliaClusterName;
-    }
-
-    @Override
-    public boolean equals(Object o) {
-      if (this == o) return true;
-      if (o == null || getClass() != o.getClass()) return false;
-
-      ResourceKey that = (ResourceKey) o;
-
-      return
-          !(gangliaClusterName != null ? !gangliaClusterName.equals(that.gangliaClusterName) : that.gangliaClusterName != null) &&
-          !(hostName != null ? !hostName.equals(that.hostName) : that.hostName != null);
-
-    }
-
-    @Override
-    public int hashCode() {
-      int result = hostName != null ? hostName.hashCode() : 0;
-      result = 31 * result + (gangliaClusterName != null ? gangliaClusterName.hashCode() : 0);
-      return result;
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaReportPropertyProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaReportPropertyProvider.java
deleted file mode 100644
index e325d86..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaReportPropertyProvider.java
+++ /dev/null
@@ -1,228 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.ganglia;
-
-import org.apache.ambari.server.controller.internal.AbstractPropertyProvider;
-import org.apache.ambari.server.controller.internal.PropertyInfo;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.StreamProvider;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.type.TypeReference;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Property provider implementation for a Ganglia source. This provider is specialized
- * to pull metrics from existing Ganglia reports.
- */
-public class GangliaReportPropertyProvider extends AbstractPropertyProvider {
-
-  private final StreamProvider streamProvider;
-
-  private final GangliaHostProvider hostProvider;
-
-  private final String clusterNamePropertyId;
-
-
-  // ----- Constants --------------------------------------------------------
-
-  protected final static Logger LOG =
-      LoggerFactory.getLogger(GangliaReportPropertyProvider.class);
-
-  private static final String GANGLIA_CLUSTER_NAME = "HDPSlaves";
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  public GangliaReportPropertyProvider(Map<String, Map<String, PropertyInfo>> componentPropertyInfoMap,
-                                       StreamProvider streamProvider,
-                                       GangliaHostProvider hostProvider,
-                                       String clusterNamePropertyId) {
-    super(componentPropertyInfoMap);
-
-    this.streamProvider        = streamProvider;
-    this.hostProvider          = hostProvider;
-    this.clusterNamePropertyId = clusterNamePropertyId;
-  }
-
-
-  // ----- PropertyProvider --------------------------------------------------
-
-  @Override
-  public Set<Resource> populateResources(Set<Resource> resources, Request request, Predicate predicate)
-      throws SystemException {
-
-    Set<Resource> keepers = new HashSet<Resource>();
-    for (Resource resource : resources) {
-      if (populateResource(resource, request, predicate)) {
-        keepers.add(resource);
-      }
-    }
-    return keepers;
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Populate a resource by obtaining the requested Ganglia RESOURCE_METRICS.
-   *
-   * @param resource  the resource to be populated
-   * @param request   the request
-   * @param predicate the predicate
-   *
-   * @return true if the resource was successfully populated with the requested properties
-   *
-   * @throws SystemException if unable to populate the resource
-   */
-  private boolean populateResource(Resource resource, Request request, Predicate predicate)
-      throws SystemException {
-
-    Set<String> propertyIds = getPropertyIds();
-
-    if (propertyIds.isEmpty()) {
-      return true;
-    }
-    String clusterName = (String) resource.getPropertyValue(clusterNamePropertyId);
-
-    if (hostProvider.getGangliaCollectorHostName(clusterName) == null) {
-      if (LOG.isWarnEnabled()) {
-        LOG.warn("Attempting to get metrics but the Ganglia server is unknown. Resource=" + resource +
-            " : Cluster=" + clusterName);
-      }
-      return true;
-    }
-
-    setProperties(resource, clusterName, request, getRequestPropertyIds(request, predicate));
-
-    return true;
-  }
-
-  private boolean setProperties(Resource resource, String clusterName, Request request, Set<String> ids)
-      throws SystemException {
-
-    Map<String, Map<String, String>> propertyIdMaps = getPropertyIdMaps(request, ids);
-
-    for (Map.Entry<String, Map<String, String>> entry : propertyIdMaps.entrySet()) {
-      Map<String, String>  map = entry.getValue();
-      String report = entry.getKey();
-
-      String spec = getSpec(clusterName, report);
-
-      try {
-        List<GangliaMetric> gangliaMetrics = new ObjectMapper().readValue(streamProvider.readFrom(spec),
-            new TypeReference<List<GangliaMetric>>() {});
-
-        if (gangliaMetrics != null) {
-          for (GangliaMetric gangliaMetric : gangliaMetrics) {
-
-            String propertyId = map.get(gangliaMetric.getMetric_name());
-            if (propertyId != null) {
-              resource.setProperty(propertyId, getValue(gangliaMetric));
-            }
-          }
-        }
-      } catch (IOException e) {
-        if (LOG.isErrorEnabled()) {
-          LOG.error("Caught exception getting Ganglia metrics : " + e + " : spec=" + spec);
-        }
-        return false;
-      }
-    }
-    return true;
-  }
-
-  private Map<String, Map<String, String>> getPropertyIdMaps(Request request, Set<String> ids) {
-    Map<String, Map<String, String>> propertyMap = new HashMap<String, Map<String, String>>();
-
-    for (String id : ids) {
-      Map<String, PropertyInfo> propertyInfoMap = getPropertyInfoMap("*", id);
-
-      for (Map.Entry<String, PropertyInfo> entry : propertyInfoMap.entrySet()) {
-        String propertyId = entry.getKey();
-        PropertyInfo propertyInfo = entry.getValue();
-
-        TemporalInfo temporalInfo = request.getTemporalInfo(id);
-
-        if (temporalInfo != null && propertyInfo.isTemporal()) {
-          String propertyName = propertyInfo.getPropertyId();
-          String report = null;
-          // format : report_name.metric_name
-          int dotIndex = propertyName.lastIndexOf('.');
-          if (dotIndex != -1){
-            report = propertyName.substring(0, dotIndex);
-            propertyName = propertyName.substring(dotIndex + 1);
-          }
-          if (report !=  null) {
-            Map<String, String> map = propertyMap.get(report);
-            if (map == null) {
-              map = new HashMap<String, String>();
-              propertyMap.put(report, map);
-            }
-            map.put(propertyName, propertyId);
-          }
-        }
-      }
-    }
-    return propertyMap;
-  }
-
-  /**
-   * Get value from the given metric.
-   *
-   * @param metric     the metric
-   */
-  private Object getValue(GangliaMetric metric) {
-      return metric.getDatapoints();
-  }
-
-  /**
-   * Get the spec to locate the Ganglia stream from the given
-   * request info.
-   *
-   *
-   * @param clusterName     the cluster name
-   * @param report          the report
-   *
-   * @return the spec
-   *
-   * @throws SystemException if unable to ge the Ganglia Collector host name
-   */
-  protected String getSpec(String clusterName, String report) throws SystemException {
-
-    StringBuilder sb = new StringBuilder();
-
-    sb.append("http://").
-        append(hostProvider.getGangliaCollectorHostName(clusterName)).
-        append("/ganglia/graph.php?c=").
-        append(GANGLIA_CLUSTER_NAME).
-        append("&g=").
-        append(report).
-        append("&json=1");
-
-    return sb.toString();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/ClusterDefinition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/ClusterDefinition.java
deleted file mode 100644
index b6dfa30..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/ClusterDefinition.java
+++ /dev/null
@@ -1,429 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Defines the cluster created by gsInstaller.
- */
-public class ClusterDefinition {
-
-  private static final String CLUSTER_DEFINITION_FILE = "gsInstaller-hosts.txt";
-  private static final String DEFAULT_CLUSTER_NAME    = "ambari";
-  private static final String CLUSTER_NAME_TAG        = "CLUSTER=";
-  private static final String DEFAULT_VERSION_ID      = "HDP-1.2.0";
-  private static final String VERSION_ID_TAG          = "VERSION=";
-
-  private final Set<String> services = new HashSet<String>();
-  private final Set<String> hosts = new HashSet<String>();
-  private final Map<String, Set<String>> components = new HashMap<String, Set<String>>();
-  private final Map<String, Map<String, Set<String>>> hostComponents = new HashMap<String, Map<String, Set<String>>>();
-
-  private final GSInstallerStateProvider stateProvider;
-  private String clusterName;
-  private String versionId;
-
-  /**
-   * Index of host names to host component state.
-   */
-  private final Map<String, Set<HostComponentState>> hostStateMap = new HashMap<String, Set<HostComponentState>>();
-
-  /**
-   * Index of service names to host component state.
-   */
-  private final Map<String, Set<HostComponentState>> serviceStateMap = new HashMap<String, Set<HostComponentState>>();
-
-  /**
-   * Index of component names to host component state.
-   */
-  private final Map<String, Set<HostComponentState>> componentStateMap = new HashMap<String, Set<HostComponentState>>();
-
-  /**
-   * Index of host component names to host component state.
-   */
-  private final Map<String, HostComponentState> hostComponentStateMap = new HashMap<String, HostComponentState>();
-
-  /**
-   * Expiry for the health value.
-   */
-  private static final int DEFAULT_STATE_EXPIRY = 15000;
-
-  /**
-   * Component name mapping to account for differences in what is provided by the gsInstaller
-   * and what is expected by the Ambari providers.
-   */
-  private static final Map<String, String> componentNameMap = new HashMap<String, String>();
-
-  static {
-    componentNameMap.put("GANGLIA", "GANGLIA_SERVER");
-  }
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Create a cluster definition.
-   *
-   * @param stateProvider  the state provider
-   */
-  public ClusterDefinition(GSInstallerStateProvider stateProvider) {
-    this(stateProvider, DEFAULT_STATE_EXPIRY);
-  }
-
-  /**
-   * Create a cluster definition.
-   *
-   * @param stateProvider  the state provider
-   * @param stateExpiry    the state expiry
-   */
-  public ClusterDefinition(GSInstallerStateProvider stateProvider, int stateExpiry) {
-    this.stateProvider = stateProvider;
-    this.clusterName   = DEFAULT_CLUSTER_NAME;
-    this.versionId     = DEFAULT_VERSION_ID;
-    readClusterDefinition();
-    setHostComponentState(stateExpiry);
-  }
-
-  // ----- ClusterDefinition -------------------------------------------------
-
-  /**
-   * Get the name of the cluster.
-   *
-   * @return the cluster name
-   */
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  /**
-   * Get the name of the cluster.
-   *
-   * @return the cluster name
-   */
-  public String getVersionId() {
-    return versionId;
-  }
-
-  /**
-   * Get the services for the cluster.
-   *
-   * @return the set of service names
-   */
-  public Set<String> getServices() {
-    return services;
-  }
-
-  /**
-   * Get the hosts for the cluster.
-   *
-   * @return the set of hosts names
-   */
-  public Set<String> getHosts() {
-    return hosts;
-  }
-
-  /**
-   * Get the components for the given service.
-   *
-   * @param service  the service name
-   *
-   * @return the set of component names for the given service name
-   */
-  public Set<String> getComponents(String service) {
-    return components.get(service);
-  }
-
-  /**
-   * Get the host components for the given service and host.
-   *
-   * @param service  the service name
-   * @param host     the host name
-   *
-   * @return the set of host component names for the given service and host names
-   */
-  public Set<String> getHostComponents(String service, String host) {
-    Set<String> resultSet = null;
-    Map<String, Set<String>> serviceHostComponents = hostComponents.get(service);
-    if (serviceHostComponents != null) {
-      resultSet = serviceHostComponents.get(host);
-    }
-    return resultSet == null ? Collections.<String>emptySet() : resultSet;
-  }
-
-  /**
-   * Get the host state from the given host name.
-   *
-   * @param hostName  the host name
-   *
-   * @return the host state
-   */
-  public String getHostState(String hostName) {
-    return isHealthy(hostStateMap.get(hostName)) ? "HEALTHY" : "INIT";
-  }
-
-  /**
-   * Get the service state from the given service name.
-   *
-   * @param serviceName  the service name
-   *
-   * @return the service state
-   */
-  public String getServiceState(String serviceName) {
-    return isHealthy(serviceStateMap.get(serviceName)) ? "STARTED" : "INIT";
-  }
-
-  /**
-   * Get the component state from the give service name and component name.
-   *
-   * @param serviceName    the service name
-   * @param componentName  the component name
-   *
-   * @return the component state
-   */
-  public String getComponentState(String serviceName, String componentName) {
-    return isHealthy(componentStateMap.get(getComponentKey(serviceName, componentName))) ? "STARTED" : "INIT";
-  }
-
-  /**
-   * Get the host component name from the given host name, service name and component name.
-   *
-   * @param hostName       the host name
-   * @param serviceName    the service name
-   * @param componentName  the component name
-   *
-   * @return the host component state
-   */
-  public String getHostComponentState(String hostName, String serviceName, String componentName) {
-    return isHealthy(hostComponentStateMap.get(getHostComponentKey(hostName, serviceName, componentName))) ? "STARTED" : "INIT";
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Read the gsInstaller cluster definition file.
-   */
-  private void readClusterDefinition() {
-    try {
-      InputStream    is = this.getClass().getClassLoader().getResourceAsStream(CLUSTER_DEFINITION_FILE);
-      BufferedReader br = new BufferedReader(new InputStreamReader(is));
-
-      String line;
-      while ((line = br.readLine()) != null) {
-        line = line.trim();
-        if (line.startsWith(CLUSTER_NAME_TAG)) {
-          clusterName = line.substring(CLUSTER_NAME_TAG.length());
-        }
-        else if (line.startsWith(VERSION_ID_TAG)) {
-          versionId = line.substring(VERSION_ID_TAG.length());
-        }
-        else {
-          String[] parts = line.split("\\s+");
-          assert(parts.length == 3);
-
-          String serviceName   = parts[0];
-          String componentName = parts[1];
-          String hostName      = parts[2];
-
-          // translate the component name if required
-          if (componentNameMap.containsKey(componentName)) {
-            componentName = componentNameMap.get(componentName);
-          }
-
-          services.add(serviceName);
-          Set<String> serviceComponents = components.get(serviceName);
-          if (serviceComponents == null) {
-            serviceComponents = new HashSet<String>();
-            components.put(serviceName, serviceComponents);
-          }
-          serviceComponents.add(componentName);
-
-          Map<String, Set<String>> serviceHostComponents = hostComponents.get(serviceName);
-          if (serviceHostComponents == null) {
-            serviceHostComponents = new HashMap<String, Set<String>>();
-            hostComponents.put(serviceName, serviceHostComponents);
-          }
-
-          Set<String> hostHostComponents = serviceHostComponents.get(hostName);
-          if (hostHostComponents == null) {
-            hostHostComponents = new HashSet<String>();
-            serviceHostComponents.put(hostName, hostHostComponents);
-          }
-          hostHostComponents.add(componentName);
-          hosts.add(hostName);
-        }
-      }
-    } catch (IOException e) {
-      String msg = "Caught exception reading " + CLUSTER_DEFINITION_FILE + ".";
-      throw new IllegalStateException(msg, e);
-    }
-  }
-
-  /**
-   * Set the host component state maps.
-   */
-  private void setHostComponentState(int stateExpiry) {
-    for (Map.Entry<String, Map<String, Set<String>>> serviceEntry : hostComponents.entrySet()) {
-      String serviceName = serviceEntry.getKey();
-
-      for (Map.Entry<String, Set<String>> hostEntry : serviceEntry.getValue().entrySet()) {
-        String hostName = hostEntry.getKey();
-
-        for (String componentName : hostEntry.getValue()) {
-
-          HostComponentState state = new HostComponentState(hostName, componentName, stateExpiry);
-
-          // add state to hosts
-          addState(hostName, hostStateMap, state);
-
-          // add state to services
-          addState(serviceName, serviceStateMap, state);
-
-          // add state to components
-          addState(getComponentKey(serviceName, componentName), componentStateMap, state);
-
-          // add state to host components
-          hostComponentStateMap.put(getHostComponentKey(hostName, serviceName, componentName), state);
-        }
-      }
-    }
-  }
-
-  /**
-   * Add the given host component state object to the given map of state objects.
-   *
-   * @param hostName  the host name
-   * @param stateMap  the map of state objects
-   * @param state     the state
-   */
-  private static void addState(String hostName, Map<String, Set<HostComponentState>> stateMap, HostComponentState state) {
-    Set<HostComponentState> states = stateMap.get(hostName);
-    if (states == null) {
-      states = new HashSet<HostComponentState>();
-      stateMap.put(hostName, states);
-    }
-    states.add(state);
-  }
-
-  /**
-   * Get a key from the given service name and component name.
-   *
-   * @param serviceName    the service name
-   * @param componentName  the component name
-   *
-   * @return the key
-   */
-  private String getComponentKey(String serviceName, String componentName) {
-    return serviceName + "." + componentName;
-  }
-
-  /**
-   * Get a key from the given host name, service name and component name.
-   *
-   * @param hostName       the host name
-   * @param serviceName    the service name
-   * @param componentName  the component name
-   *
-   * @return the key
-   */
-  private String getHostComponentKey(String hostName, String serviceName, String componentName) {
-    return hostName + "." + serviceName + "." + componentName;
-  }
-
-  /**
-   * Determine whether or not the host components associated
-   * with the given states are healthy.
-   *
-   * @param states  the states
-   *
-   * @return true if the associated host components are healthy
-   */
-  private boolean isHealthy(Set<HostComponentState> states) {
-    if (states != null) {
-      for (HostComponentState state : states) {
-        if (!state.isHealthy()) {
-          return false;
-        }
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Determine whether or not the host component associated
-   * with the given state is healthy.
-   *
-   * @param state  the state
-   *
-   * @return true if the associated host component is healthy
-   */
-  private boolean isHealthy(HostComponentState state) {
-    return state == null || state.isHealthy();
-  }
-
-
-  // ----- inner classes -----------------------------------------------------
-
-  /**
-   * A state object used to check the health of a host component.
-   */
-  private class HostComponentState {
-    private final String hostName;
-    private final String componentName;
-    private final int expiry;
-    private boolean healthy = true;
-    private long lastAccess;
-
-    // ----- Constructor -----------------------------------------------------
-
-    /**
-     * Constructor.
-     *
-     * @param hostName       the host name
-     * @param componentName  the component name
-     */
-    HostComponentState(String hostName, String componentName, int expiry) {
-      this.hostName      = hostName;
-      this.componentName = componentName;
-      this.expiry        = expiry;
-    }
-
-    /**
-     * Determine whether or not the associated host component is healthy.
-     *
-     * @return true if the associated host component is healthy
-     */
-    public boolean isHealthy() {
-      if (System.currentTimeMillis() - lastAccess > expiry) {
-        // health value has expired... get it again
-        healthy = stateProvider.isHealthy(hostName, componentName);
-        this.lastAccess = System.currentTimeMillis();
-      }
-      return healthy;
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerClusterProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerClusterProvider.java
deleted file mode 100644
index 784f51c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerClusterProvider.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-/**
- * A cluster resource provider for a gsInstaller defined cluster.
- */
-public class GSInstallerClusterProvider extends GSInstallerResourceProvider{
-
-  // Clusters
-  protected static final String CLUSTER_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("Clusters", "cluster_name");
-  protected static final String CLUSTER_VERSION_PROPERTY_ID = PropertyHelper.getPropertyId("Clusters", "version");
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Construct a resource provider based on the given cluster definition.
-   *
-   * @param clusterDefinition  the cluster definition
-   */
-  public GSInstallerClusterProvider(ClusterDefinition clusterDefinition) {
-    super(Resource.Type.Cluster, clusterDefinition);
-    initClusterResources();
-  }
-
-
-  // ----- GSInstallerResourceProvider ---------------------------------------
-
-  @Override
-  public void updateProperties(Resource resource, Request request, Predicate predicate) {
-    // Do nothing
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Create the resources based on the cluster definition.
-   */
-  private void initClusterResources() {
-    Resource cluster = new ResourceImpl(Resource.Type.Cluster);
-    ClusterDefinition clusterDefinition = getClusterDefinition();
-    cluster.setProperty(CLUSTER_NAME_PROPERTY_ID, clusterDefinition.getClusterName());
-    cluster.setProperty(CLUSTER_VERSION_PROPERTY_ID, clusterDefinition.getVersionId());
-
-    addResource(cluster);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProvider.java
deleted file mode 100644
index 8680026..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProvider.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.Set;
-
-/**
- * A component resource provider for a gsInstaller defined cluster.
- */
-public class GSInstallerComponentProvider extends GSInstallerResourceProvider{
-
-  // Components
-  protected static final String COMPONENT_CLUSTER_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name");
-  protected static final String COMPONENT_SERVICE_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceComponentInfo", "service_name");
-  protected static final String COMPONENT_COMPONENT_NAME_PROPERTY_ID  = PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name");
-  protected static final String COMPONENT_STATE_PROPERTY_ID           = PropertyHelper.getPropertyId("ServiceComponentInfo", "state");
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Construct a resource provider based on the given cluster definition.
-   *
-   * @param clusterDefinition  the cluster definition
-   */
-  public GSInstallerComponentProvider(ClusterDefinition clusterDefinition) {
-    super(Resource.Type.Component, clusterDefinition);
-    initComponentResources();
-  }
-
-
-  // ----- GSInstallerResourceProvider ---------------------------------------
-
-  @Override
-  public void updateProperties(Resource resource, Request request, Predicate predicate) {
-
-    Set<String> propertyIds = getRequestPropertyIds(request, predicate);
-    if (contains(propertyIds, COMPONENT_STATE_PROPERTY_ID)) {
-      String serviceName   = (String) resource.getPropertyValue(COMPONENT_SERVICE_NAME_PROPERTY_ID);
-      String componentName = (String) resource.getPropertyValue(COMPONENT_COMPONENT_NAME_PROPERTY_ID);
-      resource.setProperty(COMPONENT_STATE_PROPERTY_ID, getClusterDefinition().getComponentState(serviceName, componentName));
-    }
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Create the resources based on the cluster definition.
-   */
-  private void initComponentResources() {
-    String      clusterName = getClusterDefinition().getClusterName();
-    Set<String> services    = getClusterDefinition().getServices();
-    for (String serviceName : services) {
-      Set<String> components = getClusterDefinition().getComponents(serviceName);
-      for (String componentName : components) {
-        Resource component = new ResourceImpl(Resource.Type.Component);
-        component.setProperty(COMPONENT_CLUSTER_NAME_PROPERTY_ID, clusterName);
-        component.setProperty(COMPONENT_SERVICE_NAME_PROPERTY_ID, serviceName);
-        component.setProperty(COMPONENT_COMPONENT_NAME_PROPERTY_ID, componentName);
-
-        addResource(component);
-      }
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProvider.java
deleted file mode 100644
index e04a258..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProvider.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.Set;
-
-/**
- * A host component resource provider for a gsInstaller defined cluster.
- */
-public class GSInstallerHostComponentProvider extends GSInstallerResourceProvider{
-
-  // Host Components
-  protected static final String HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID   = PropertyHelper.getPropertyId("HostRoles", "cluster_name");
-  protected static final String HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID   = PropertyHelper.getPropertyId("HostRoles", "service_name");
-  protected static final String HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "component_name");
-  protected static final String HOST_COMPONENT_HOST_NAME_PROPERTY_ID      = PropertyHelper.getPropertyId("HostRoles", "host_name");
-  protected static final String HOST_COMPONENT_STATE_PROPERTY_ID          = PropertyHelper.getPropertyId("HostRoles", "state");
-  protected static final String HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID  = PropertyHelper.getPropertyId("HostRoles", "desired_state");
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Construct a resource provider based on the given cluster definition.
-   *
-   * @param clusterDefinition  the cluster definition
-   */
-  public GSInstallerHostComponentProvider(ClusterDefinition clusterDefinition) {
-    super(Resource.Type.HostComponent, clusterDefinition);
-    initHostComponentResources();
-  }
-
-
-  // ----- GSInstallerResourceProvider ---------------------------------------
-
-  @Override
-  public void updateProperties(Resource resource, Request request, Predicate predicate) {
-    Set<String> propertyIds = getRequestPropertyIds(request, predicate);
-    if (contains(propertyIds, HOST_COMPONENT_STATE_PROPERTY_ID) ||
-        contains(propertyIds, HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID)) {
-      String serviceName   = (String) resource.getPropertyValue(HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID);
-      String componentName = (String) resource.getPropertyValue(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
-      String hostName      = (String) resource.getPropertyValue(HOST_COMPONENT_HOST_NAME_PROPERTY_ID);
-
-      String hostComponentState = getClusterDefinition().getHostComponentState(hostName, serviceName, componentName);
-
-      resource.setProperty(HOST_COMPONENT_STATE_PROPERTY_ID, hostComponentState);
-      resource.setProperty(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID, hostComponentState);
-    }
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Create the resources based on the cluster definition.
-   */
-  private void initHostComponentResources() {
-    String      clusterName = getClusterDefinition().getClusterName();
-    Set<String> services    = getClusterDefinition().getServices();
-    for (String serviceName : services) {
-      Set<String> hosts = getClusterDefinition().getHosts();
-      for (String hostName : hosts) {
-        Set<String> hostComponents = getClusterDefinition().getHostComponents(serviceName, hostName);
-        for (String componentName : hostComponents) {
-          Resource hostComponent = new ResourceImpl(Resource.Type.HostComponent);
-          hostComponent.setProperty(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, clusterName);
-          hostComponent.setProperty(HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID, serviceName);
-          hostComponent.setProperty(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, componentName);
-          hostComponent.setProperty(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, hostName);
-
-          addResource(hostComponent);
-        }
-      }
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostProvider.java
deleted file mode 100644
index cab9811..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostProvider.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.Set;
-
-/**
- * A host resource provider for a gsInstaller defined cluster.
- */
-public class GSInstallerHostProvider extends GSInstallerResourceProvider{
-
-  // Hosts
-  protected static final String HOST_CLUSTER_NAME_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "cluster_name");
-  protected static final String HOST_NAME_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "host_name");
-  protected static final String HOST_STATE_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "host_state");
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Construct a resource provider based on the given cluster definition.
-   *
-   * @param clusterDefinition  the cluster definition
-   */
-  public GSInstallerHostProvider(ClusterDefinition clusterDefinition) {
-    super(Resource.Type.Host, clusterDefinition);
-    initHostResources();
-  }
-
-
-  // ----- GSInstallerResourceProvider ---------------------------------------
-
-  @Override
-  public void updateProperties(Resource resource, Request request, Predicate predicate) {
-    Set<String> propertyIds = getRequestPropertyIds(request, predicate);
-    if (contains(propertyIds, HOST_STATE_PROPERTY_ID)) {
-      String hostName = (String) resource.getPropertyValue(HOST_NAME_PROPERTY_ID);
-      resource.setProperty(HOST_STATE_PROPERTY_ID, getClusterDefinition().getHostState(hostName));
-    }
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Create the resources based on the cluster definition.
-   */
-  private void initHostResources() {
-    ClusterDefinition clusterDefinition = getClusterDefinition();
-    String            clusterName       = clusterDefinition.getClusterName();
-    Set<String>       hosts             = clusterDefinition.getHosts();
-
-    for (String hostName : hosts) {
-      Resource host = new ResourceImpl(Resource.Type.Host);
-      host.setProperty(HOST_CLUSTER_NAME_PROPERTY_ID, clusterName);
-      host.setProperty(HOST_NAME_PROPERTY_ID, hostName);
-
-      addResource(host);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerNoOpProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerNoOpProvider.java
deleted file mode 100644
index 0272d76..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerNoOpProvider.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.gsinstaller;
-
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-
-/**
- * A NO-OP resource provider for a gsInstaller defined cluster.
- */
-public class GSInstallerNoOpProvider extends GSInstallerResourceProvider{
-
-  // ----- GSInstallerResourceProvider ---------------------------------------
-
-  @Override
-  public void updateProperties(Resource resource, Request request, Predicate predicate) {
-    // Do nothing
-  }
-
-  // ----- Constructors ------------------------------------------------------
-
-  public GSInstallerNoOpProvider(Resource.Type type, ClusterDefinition clusterDefinition) {
-    super(type, clusterDefinition);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerProviderModule.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerProviderModule.java
deleted file mode 100644
index 2e85ba5..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerProviderModule.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import org.apache.ambari.server.controller.internal.AbstractProviderModule;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-
-import java.io.IOException;
-import java.net.HttpURLConnection;
-import java.net.URL;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * A provider module implementation that uses the GSInstaller resource provider.
- */
-public class GSInstallerProviderModule extends AbstractProviderModule implements GSInstallerStateProvider{
-
-  private final ClusterDefinition clusterDefinition;
-
-  private static final Map<String, String> PORTS = new HashMap<String, String>();
-
-  static {
-    PORTS.put("NAMENODE",           "50070");
-    PORTS.put("DATANODE",           "50075");
-    PORTS.put("JOBTRACKER",         "50030");
-    PORTS.put("TASKTRACKER",        "50060");
-    PORTS.put("HBASE_MASTER",       "60010");
-    PORTS.put("HBASE_REGIONSERVER", "60030");
-  }
-
-  private static final int TIMEOUT = 5000;
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  public GSInstallerProviderModule() {
-    clusterDefinition = new ClusterDefinition(this);
-  }
-
-
-  // ----- GSInstallerStateProvider ------------------------------------------
-
-  @Override
-  public boolean isHealthy(String hostName, String componentName) {
-    String port = PORTS.get(componentName);
-    if (port != null) {
-      StringBuilder sb = new StringBuilder();
-      sb.append("http://").append(hostName);
-      sb.append(":").append(port);
-
-      try {
-        HttpURLConnection connection = (HttpURLConnection) new URL(sb.toString()).openConnection();
-
-        connection.setRequestMethod("HEAD");
-        connection.setConnectTimeout(TIMEOUT);
-        connection.setReadTimeout(TIMEOUT);
-
-        int code = connection.getResponseCode();
-
-        return code >= 200 && code <= 399;
-      } catch (IOException exception) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-
-  // ----- utility methods ---------------------------------------------------
-
-  @Override
-  protected ResourceProvider createResourceProvider(Resource.Type type) {
-    return GSInstallerResourceProvider.getResourceProvider(type, clusterDefinition);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerResourceProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerResourceProvider.java
deleted file mode 100644
index c612c02..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerResourceProvider.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
-import org.apache.ambari.server.controller.spi.NoSuchResourceException;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.RequestStatus;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.spi.SystemException;
-import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
-import org.apache.ambari.server.controller.utilities.PredicateHelper;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-
-/**
- * An abstract resource provider for a gsInstaller defined cluster.
- */
-public abstract class GSInstallerResourceProvider implements ResourceProvider {
-
-  private final ClusterDefinition clusterDefinition;
-
-  private final Set<Resource> resources = new HashSet<Resource>();
-
-  private final Resource.Type type;
-
-  private final Set<String> propertyIds;
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Construct a resource provider based on the given cluster definition.
-   *
-   * @param clusterDefinition  the cluster definition
-   */
-  public GSInstallerResourceProvider(Resource.Type type, ClusterDefinition clusterDefinition) {
-    this.type              = type;
-    this.clusterDefinition = clusterDefinition;
-
-    Set<String> propertyIds = PropertyHelper.getPropertyIds(type);
-    this.propertyIds = new HashSet<String>(propertyIds);
-    this.propertyIds.addAll(PropertyHelper.getCategories(propertyIds));
-  }
-
-
-  // ----- ResourceProvider --------------------------------------------------
-
-  @Override
-  public RequestStatus createResources(Request request)
-      throws SystemException, UnsupportedPropertyException, ResourceAlreadyExistsException, NoSuchParentResourceException {
-    throw new UnsupportedOperationException("Management operations are not supported");
-  }
-
-  @Override
-  public Set<Resource> getResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    Set<Resource> resultSet = new HashSet<Resource>();
-
-    for (Resource resource : resources) {
-      if (predicate == null || predicate.evaluate(resource)) {
-        ResourceImpl newResource = new ResourceImpl(resource);
-        updateProperties(newResource, request, predicate);
-        resultSet.add(newResource);
-      }
-    }
-    return resultSet;
-  }
-
-  @Override
-  public RequestStatus updateResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    throw new UnsupportedOperationException("Management operations are not supported");
-  }
-
-  @Override
-  public RequestStatus deleteResources(Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    throw new UnsupportedOperationException("Management operations are not supported");
-  }
-
-  @Override
-  public Map<Resource.Type, String> getKeyPropertyIds() {
-    return PropertyHelper.getKeyPropertyIds(type);
-  }
-
-  @Override
-  public Set<String> checkPropertyIds(Set<String> propertyIds) {
-    propertyIds = new HashSet<String>(propertyIds);
-    propertyIds.removeAll(this.propertyIds);
-    return propertyIds;
-  }
-
-
-  // ----- GSInstallerResourceProvider ---------------------------------------
-
-  /**
-   * Update the resource with any properties handled by the resource provider.
-   *
-   * @param resource   the resource to update
-   * @param request    the request
-   * @param predicate  the predicate
-   */
-  public abstract void updateProperties(Resource resource, Request request, Predicate predicate);
-
-
-  // ----- accessors ---------------------------------------------------------
-
-  /**
-   * Get the configuration provider.
-   *
-   * @return the configuration provider
-   */
-  protected ClusterDefinition getClusterDefinition() {
-    return clusterDefinition;
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Get the set of property ids required to satisfy the given request.
-   *
-   * @param request              the request
-   * @param predicate            the predicate
-   *
-   * @return the set of property ids needed to satisfy the request
-   */
-  protected Set<String> getRequestPropertyIds(Request request, Predicate predicate) {
-    Set<String> propertyIds  = request.getPropertyIds();
-
-    // if no properties are specified, then return them all
-    if (propertyIds == null || propertyIds.isEmpty()) {
-      return new HashSet<String>(this.propertyIds);
-    }
-
-    propertyIds = new HashSet<String>(propertyIds);
-
-    if (predicate != null) {
-      propertyIds.addAll(PredicateHelper.getPropertyIds(predicate));
-    }
-    return propertyIds;
-  }
-
-  /**
-   * Check to see if the given set contains a property or category id that matches the given property id.
-   *
-   * @param ids         the set of property/category ids
-   * @param propertyId  the property id
-   *
-   * @return true if the given set contains a property id or category that matches the given property id
-   */
-  protected static boolean contains(Set<String> ids, String propertyId) {
-    boolean contains = ids.contains(propertyId);
-
-    if (!contains) {
-      String category = PropertyHelper.getPropertyCategory(propertyId);
-      while (category != null && !contains) {
-        contains = ids.contains(category);
-        category = PropertyHelper.getPropertyCategory(category);
-      }
-    }
-    return contains;
-  }
-
-  /**
-  * Add a resource to the set of resources provided by this provider.
-  *
-  * @param resource  the resource to add
-  */
-  protected void addResource(Resource resource) {
-    resources.add(resource);
-  }
-
-  /**
-   * Factory method for obtaining a resource provider based on a given type.
-   *
-   * @param type               the resource type
-   * @param clusterDefinition  the cluster definition
-   *
-   * @return a new resource provider
-   */
-  public static ResourceProvider getResourceProvider(Resource.Type type,
-                                                     ClusterDefinition clusterDefinition) {
-    switch (type) {
-      case Cluster:
-        return new GSInstallerClusterProvider(clusterDefinition);
-      case Service:
-        return new GSInstallerServiceProvider(clusterDefinition);
-      case Component:
-        return new GSInstallerComponentProvider(clusterDefinition);
-      case Host:
-        return new GSInstallerHostProvider(clusterDefinition);
-      case HostComponent:
-        return new GSInstallerHostComponentProvider(clusterDefinition);
-      default:
-        return new GSInstallerNoOpProvider(type, clusterDefinition);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProvider.java
deleted file mode 100644
index 89b1c86..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProvider.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.Set;
-
-/**
- * A service resource provider for a gsInstaller defined cluster.
- */
-public class GSInstallerServiceProvider extends GSInstallerResourceProvider{
-
-  // Services
-  protected static final String SERVICE_CLUSTER_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceInfo", "cluster_name");
-  protected static final String SERVICE_SERVICE_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceInfo", "service_name");
-  protected static final String SERVICE_SERVICE_STATE_PROPERTY_ID   = PropertyHelper.getPropertyId("ServiceInfo", "state");
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Construct a resource provider based on the given cluster definition.
-   *
-   * @param clusterDefinition  the cluster definition
-   */
-  public GSInstallerServiceProvider(ClusterDefinition clusterDefinition) {
-    super(Resource.Type.Service, clusterDefinition);
-    initServiceResources();
-  }
-
-
-  // ----- GSInstallerResourceProvider ---------------------------------------
-
-  @Override
-  public void updateProperties(Resource resource, Request request, Predicate predicate) {
-    Set<String> propertyIds = getRequestPropertyIds(request, predicate);
-    if (contains(propertyIds, SERVICE_SERVICE_STATE_PROPERTY_ID)) {
-      String serviceName = (String) resource.getPropertyValue(SERVICE_SERVICE_NAME_PROPERTY_ID);
-      resource.setProperty(SERVICE_SERVICE_STATE_PROPERTY_ID, getClusterDefinition().getServiceState(serviceName));
-    }
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Create the resources based on the cluster definition.
-   */
-  private void initServiceResources() {
-    String      clusterName = getClusterDefinition().getClusterName();
-    Set<String> services    = getClusterDefinition().getServices();
-
-    for (String serviceName : services) {
-      Resource service = new ResourceImpl(Resource.Type.Service);
-      service.setProperty(SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
-      service.setProperty(SERVICE_SERVICE_NAME_PROPERTY_ID, serviceName);
-
-      addResource(service);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerStateProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerStateProvider.java
deleted file mode 100644
index aef907c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerStateProvider.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-/**
- * Interface to provide component state to the gsInstaller resource provider.
- */
-public interface GSInstallerStateProvider {
-  /**
-   * Determine whether or not the host component identified by the given host name
-   * and component name is healthy.
-   *
-   * @param hostName       the host name
-   * @param componentName  the component name
-   *
-   * @return true if the host component is healthy
-   */
-  public boolean isHealthy(String hostName, String componentName);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractPropertyProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractPropertyProvider.java
deleted file mode 100644
index a6c6902..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractPropertyProvider.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.spi.PropertyProvider;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- *  Abstract property provider implementation.
- */
-public abstract class AbstractPropertyProvider extends BaseProvider implements PropertyProvider {
-
-  /**
-   * The property/metric information for this provider keyed by component name / property id.
-   */
-  private final Map<String, Map<String, PropertyInfo>> componentMetrics;
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Construct a provider.
-   *
-   * @param componentMetrics map of metrics for this provider
-   */
-  public AbstractPropertyProvider(Map<String, Map<String, PropertyInfo>> componentMetrics) {
-    super(PropertyHelper.getPropertyIds(componentMetrics));
-    this.componentMetrics = componentMetrics;
-  }
-
-
-  // ----- accessors ---------------------------------------------------------
-
-  /**
-   * Get the map of metrics for this provider.
-   *
-   * @return the map of metric / property info.
-   */
-  public Map<String, Map<String, PropertyInfo>> getComponentMetrics() {
-    return componentMetrics;
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Get a map of metric / property info based on the given component name and property id.
-   * Note that the property id may map to multiple metrics if the property id is a category.
-   *
-   * @param componentName  the component name
-   * @param propertyId     the property id; may be a category
-   *
-   * @return a map of metrics
-   */
-  protected Map<String, PropertyInfo> getPropertyInfoMap(String componentName, String propertyId) {
-    Map<String, PropertyInfo> componentMetricMap = componentMetrics.get(componentName);
-    if (componentMetricMap == null) {
-      return Collections.emptyMap();
-    }
-
-    PropertyInfo propertyInfo = componentMetricMap.get(propertyId);
-    if (propertyInfo != null) {
-      return Collections.singletonMap(propertyId, propertyInfo);
-    }
-
-    if (!propertyId.endsWith("/")){
-      propertyId += "/";
-    }
-    Map<String, PropertyInfo> propertyInfoMap = new HashMap<String, PropertyInfo>();
-    for (Map.Entry<String, PropertyInfo> entry : componentMetricMap.entrySet()) {
-      if (entry.getKey().startsWith(propertyId)) {
-        propertyInfoMap.put(entry.getKey(), entry.getValue());
-      }
-    }
-    return propertyInfoMap;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
deleted file mode 100644
index 1a3cf9c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
+++ /dev/null
@@ -1,465 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.AmbariServer;
-import org.apache.ambari.server.controller.ganglia.GangliaComponentPropertyProvider;
-import org.apache.ambari.server.controller.ganglia.GangliaHostComponentPropertyProvider;
-import org.apache.ambari.server.controller.ganglia.GangliaHostPropertyProvider;
-import org.apache.ambari.server.controller.ganglia.GangliaReportPropertyProvider;
-import org.apache.ambari.server.controller.ganglia.GangliaHostProvider;
-import org.apache.ambari.server.controller.jmx.JMXHostProvider;
-import org.apache.ambari.server.controller.jmx.JMXPropertyProvider;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import com.google.inject.Inject;
-import org.apache.ambari.server.state.Service;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.*;
-
-/**
- * An abstract provider module implementation.
- */
-public abstract class AbstractProviderModule implements ProviderModule, ResourceProviderObserver, JMXHostProvider, GangliaHostProvider {
-
-  private static final String CLUSTER_NAME_PROPERTY_ID                  = PropertyHelper.getPropertyId("Clusters", "cluster_name");
-  private static final String HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID   = PropertyHelper.getPropertyId("HostRoles", "cluster_name");
-  private static final String HOST_COMPONENT_HOST_NAME_PROPERTY_ID      = PropertyHelper.getPropertyId("HostRoles", "host_name");
-  private static final String HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "component_name");
-  private static final String GANGLIA_SERVER                            = "GANGLIA_SERVER";
-  private static final String PROPERTIES_CATEGORY = "properties";
-  private static final Map<Service.Type, String> serviceConfigVersions =
-    Collections.synchronizedMap(new HashMap<Service.Type, String>());
-  private static final Map<Service.Type, String> serviceConfigTypes = new HashMap<Service.Type, String>();
-  private static final Map<Service.Type, Map<String, String>> serviceDesiredProperties = new
-    HashMap<Service.Type, Map<String, String>>();
-  private static final Map<String, Service.Type> componentServiceMap = new
-    HashMap<String, Service.Type>();
-
-  static {
-    serviceConfigTypes.put(Service.Type.HDFS, "hdfs-site");
-    serviceConfigTypes.put(Service.Type.MAPREDUCE, "mapred-site");
-    serviceConfigTypes.put(Service.Type.HBASE, "hbase-site");
-
-    componentServiceMap.put("NAMENODE", Service.Type.HDFS);
-    componentServiceMap.put("DATANODE", Service.Type.HDFS);
-    componentServiceMap.put("JOBTRACKER", Service.Type.MAPREDUCE);
-    componentServiceMap.put("TASKTRACKER", Service.Type.MAPREDUCE);
-    componentServiceMap.put("HBASE_MASTER", Service.Type.HBASE);
-
-    Map<String, String> initPropMap = new HashMap<String, String>();
-    initPropMap.put("NAMENODE", "dfs.http.address");
-    initPropMap.put("DATANODE", "dfs.datanode.http.address");
-    serviceDesiredProperties.put(Service.Type.HDFS, initPropMap);
-    initPropMap = new HashMap<String, String>();
-    initPropMap.put("JOBTRACKER", "mapred.job.tracker.http.address");
-    initPropMap.put("TASKTRACKER", "mapred.task.tracker.http.address");
-    serviceDesiredProperties.put(Service.Type.MAPREDUCE, initPropMap);
-    initPropMap = new HashMap<String, String>();
-    initPropMap.put("HBASE_MASTER", "hbase.master.info.port");
-    serviceDesiredProperties.put(Service.Type.HBASE, initPropMap);
-  }
-
-  /**
-   * The map of resource providers.
-   */
-  private final Map<Resource.Type, ResourceProvider> resourceProviders = new HashMap<Resource.Type, ResourceProvider>();
-
-  /**
-   * The map of lists of property providers.
-   */
-  private final Map<Resource.Type,List<PropertyProvider>> propertyProviders = new HashMap<Resource.Type, List<PropertyProvider>>();
-
-  @Inject
-  private AmbariManagementController managementController;
-
-  /**
-   * The map of host components.
-   */
-  private Map<String, Map<String, String>> clusterHostComponentMap;
-
-  /**
-   * The host name of the Ganglia collector.
-   */
-  private Map<String, String> clusterGangliaCollectorMap;
-
-  /**
-   * JMX ports read from the configs
-   */
-  private Map<String, Map<String, String>> jmxPortMap = Collections
-    .synchronizedMap(new HashMap<String, Map<String, String>>());
-
-  private volatile boolean initialized = false;
-
-  protected final static Logger LOG =
-      LoggerFactory.getLogger(AbstractProviderModule.class);
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Create a default provider module.
-   */
-  public AbstractProviderModule() {
-    if (managementController == null) {
-      managementController = AmbariServer.getController();
-    }
-  }
-
-
-  // ----- ProviderModule ----------------------------------------------------
-
-  @Override
-  public ResourceProvider getResourceProvider(Resource.Type type) {
-    if (!propertyProviders.containsKey(type)) {
-      registerResourceProvider(type);
-    }
-    return resourceProviders.get(type);
-  }
-
-  @Override
-  public List<PropertyProvider> getPropertyProviders(Resource.Type type) {
-
-    if (!propertyProviders.containsKey(type)) {
-      createPropertyProviders(type);
-    }
-    return propertyProviders.get(type);
-  }
-
-
-  // ----- ResourceProviderObserver ------------------------------------------
-
-  @Override
-  public void update(ResourceProviderEvent event) {
-    Resource.Type type = event.getResourceType();
-
-    if (type == Resource.Type.Cluster ||
-        type == Resource.Type.Host ||
-        type == Resource.Type.HostComponent) {
-      resetInit();
-    }
-  }
-
-
-  // ----- JMXHostProvider ---------------------------------------------------
-
-  @Override
-  public String getHostName(String clusterName, String componentName) throws SystemException {
-    checkInit();
-    return clusterHostComponentMap.get(clusterName).get(componentName);
-  }
-
-  @Override
-  public String getPort(String clusterName, String componentName) throws
-    SystemException {
-    Map<String,String> clusterJmxPorts = jmxPortMap.get(clusterName);
-    if (clusterJmxPorts == null) {
-      synchronized (jmxPortMap) {
-        if (clusterJmxPorts == null) {
-          clusterJmxPorts = new HashMap<String, String>();
-          jmxPortMap.put(clusterName, clusterJmxPorts);
-        }
-      }
-    }
-    Service.Type service = componentServiceMap.get(componentName);
-    if (service != null) {
-      try {
-        String currVersion = getDesiredConfigVersion(clusterName, service.name(),
-          serviceConfigTypes.get(service));
-
-        String oldVersion = serviceConfigVersions.get(service);
-        if (!currVersion.equals(oldVersion)) {
-          serviceConfigVersions.put(service, currVersion);
-          Map<String, String> portMap = getDesiredConfigMap(clusterName,
-            currVersion, serviceConfigTypes.get(service),
-            serviceDesiredProperties.get(service));
-          for (String compName : portMap.keySet()) {
-            clusterJmxPorts.put(compName, getPortString(portMap.get
-              (compName)));
-          }
-        }
-      } catch (Exception e) {
-        LOG.error("Exception initializing jmx port maps. " + e);
-      }
-    }
-
-    LOG.debug("jmxPortMap -> " + jmxPortMap);
-    return clusterJmxPorts.get(componentName);
-  }
-
-  // ----- GangliaHostProvider -----------------------------------------------
-
-  @Override
-  public String getGangliaCollectorHostName(String clusterName) throws SystemException {
-    checkInit();
-    return clusterGangliaCollectorMap.get(clusterName);
-  }
-
-
-  // ----- utility methods ---------------------------------------------------
-
-  protected abstract ResourceProvider createResourceProvider(Resource.Type type);
-
-  protected void registerResourceProvider(Resource.Type type) {
-    ResourceProvider resourceProvider = createResourceProvider(type);
-
-    if (resourceProvider instanceof ObservableResourceProvider) {
-      ((ObservableResourceProvider)resourceProvider).addObserver(this);
-    }
-
-    putResourceProvider(type, resourceProvider);
-  }
-
-  protected void putResourceProvider(Resource.Type type, ResourceProvider resourceProvider) {
-    resourceProviders.put( type , resourceProvider);
-  }
-
-  protected void putPropertyProviders(Resource.Type type, List<PropertyProvider> providers) {
-    propertyProviders.put(type, providers);
-  }
-
-  protected void createPropertyProviders(Resource.Type type) {
-
-    List<PropertyProvider> providers = new LinkedList<PropertyProvider>();
-
-    URLStreamProvider streamProvider = new URLStreamProvider();
-
-    switch (type){
-      case Cluster :
-        providers.add(new GangliaReportPropertyProvider(
-            PropertyHelper.getGangliaPropertyIds(type),
-            streamProvider,
-            this,
-            PropertyHelper.getPropertyId("Clusters", "cluster_name")));
-        break;
-      case Host :
-        providers.add(new GangliaHostPropertyProvider(
-            PropertyHelper.getGangliaPropertyIds(type),
-            streamProvider,
-            this,
-            PropertyHelper.getPropertyId("Hosts", "cluster_name"),
-            PropertyHelper.getPropertyId("Hosts", "host_name")
-        ));
-        break;
-      case Component :
-        providers.add(new JMXPropertyProvider(
-            PropertyHelper.getJMXPropertyIds(type),
-            streamProvider,
-            this,
-            PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name"),
-            null,
-            PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name")));
-
-        providers.add(new GangliaComponentPropertyProvider(
-            PropertyHelper.getGangliaPropertyIds(type),
-            streamProvider,
-            this,
-            PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name"),
-            PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name")));
-        break;
-      case HostComponent:
-        providers.add(new JMXPropertyProvider(
-            PropertyHelper.getJMXPropertyIds(type),
-            streamProvider,
-            this,
-            PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
-            PropertyHelper.getPropertyId("HostRoles", "host_name"),
-            PropertyHelper.getPropertyId("HostRoles", "component_name")));
-
-        providers.add(new GangliaHostComponentPropertyProvider(
-            PropertyHelper.getGangliaPropertyIds(type),
-            streamProvider,
-            this,
-            PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
-            PropertyHelper.getPropertyId("HostRoles", "host_name"),
-            PropertyHelper.getPropertyId("HostRoles", "component_name")));
-        
-        providers.add(new HttpProxyPropertyProvider(
-            new URLStreamProvider(1500),
-            PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
-            PropertyHelper.getPropertyId("HostRoles", "host_name"),
-            PropertyHelper.getPropertyId("HostRoles", "component_name")));
-        
-        break;
-      default :
-        break;
-    }
-    putPropertyProviders(type, providers);
-  }
-
-  private void checkInit() throws SystemException{
-    if (!initialized) {
-      synchronized (this) {
-        if (!initialized) {
-          initProviderMaps();
-          initialized = true;
-        }
-      }
-    }
-  }
-
-  private void resetInit() {
-    if (initialized) {
-      synchronized (this) {
-        initialized = false;
-      }
-    }
-  }
-
-  private void initProviderMaps() throws SystemException{
-    ResourceProvider provider = getResourceProvider(Resource.Type.Cluster);
-    Request          request  = PropertyHelper.getReadRequest(CLUSTER_NAME_PROPERTY_ID);
-
-    try {
-      jmxPortMap = new HashMap<String, Map<String, String>>();
-      Set<Resource> clusters = provider.getResources(request, null);
-
-      clusterHostComponentMap    = new HashMap<String, Map<String, String>>();
-      clusterGangliaCollectorMap = new HashMap<String, String>();
-
-      for (Resource cluster : clusters) {
-
-        String clusterName = (String) cluster.getPropertyValue(CLUSTER_NAME_PROPERTY_ID);
-
-        // initialize the host component map and Ganglia server from the known hosts components...
-        provider = getResourceProvider(Resource.Type.HostComponent);
-
-        request = PropertyHelper.getReadRequest(HOST_COMPONENT_HOST_NAME_PROPERTY_ID,
-            HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
-
-        Predicate predicate = new PredicateBuilder().property(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID).
-          equals(clusterName).toPredicate();
-
-        Set<Resource>       hostComponents   = provider.getResources(request, predicate);
-        Map<String, String> hostComponentMap = clusterHostComponentMap.get(clusterName);
-
-        if (hostComponentMap == null) {
-          hostComponentMap = new HashMap<String, String>();
-          clusterHostComponentMap.put(clusterName, hostComponentMap);
-        }
-
-        for (Resource hostComponent : hostComponents) {
-          String componentName = (String) hostComponent.getPropertyValue(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
-          String hostName      = (String) hostComponent.getPropertyValue(HOST_COMPONENT_HOST_NAME_PROPERTY_ID);
-
-          hostComponentMap.put(componentName, hostName);
-
-          // record the Ganglia server for the current cluster
-          if (componentName.equals(GANGLIA_SERVER)) {
-            clusterGangliaCollectorMap.put(clusterName, hostName);
-          }
-        }
-      }
-    } catch (UnsupportedPropertyException e) {
-      if (LOG.isErrorEnabled()) {
-        LOG.error("Caught UnsupportedPropertyException while trying to get the host mappings.", e);
-      }
-      throw new SystemException("An exception occurred while initializing the host mappings: " + e, e);
-    } catch (NoSuchResourceException e) {
-      if (LOG.isErrorEnabled()) {
-        LOG.error("Caught NoSuchResourceException exception while trying to get the host mappings.", e);
-      }
-      throw new SystemException("An exception occurred while initializing the host mappings: " + e, e);
-    } catch (NoSuchParentResourceException e) {
-      if (LOG.isErrorEnabled()) {
-        LOG.error("Caught NoSuchParentResourceException exception while trying to get the host mappings.", e);
-      }
-      throw new SystemException("An exception occurred while initializing the host mappings: " + e, e);
-    }
-  }
-
-  private String getPortString(String value) {
-    return value != null && value.contains(":") ? value.substring
-      (value.lastIndexOf(":") + 1, value.length()) : value;
-  }
-
-  private String getDesiredConfigVersion(String clusterName,
-      String serviceName, String configType) throws
-      NoSuchParentResourceException, UnsupportedPropertyException,
-      SystemException {
-
-    // Get config version tag
-    ResourceProvider serviceResourceProvider = getResourceProvider(Resource.Type.Service);
-    Predicate basePredicate = new PredicateBuilder().property
-      (ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID).equals(clusterName).and()
-      .property(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals(serviceName).toPredicate();
-
-    Set<Resource> serviceResource = null;
-    try {
-      serviceResource = serviceResourceProvider.getResources(
-        PropertyHelper.getReadRequest(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID,
-          ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID,
-          ServiceResourceProvider.SERVICE_DESIRED_CONFIGS_PROPERTY_ID), basePredicate);
-    } catch (NoSuchResourceException e) {
-      LOG.error("Resource for the desired config not found. " + e);
-    }
-
-    String versionTag = "version1";
-    if (serviceResource != null) {
-      for (Resource res : serviceResource) {
-        Map<String, String> configs = (Map<String,
-          String>) res.getPropertyValue(ServiceResourceProvider.SERVICE_DESIRED_CONFIGS_PROPERTY_ID);
-        if (configs != null) {
-          versionTag = configs.get(configType);
-        }
-      }
-    }
-    return versionTag;
-  }
-
-  private Map<String, String> getDesiredConfigMap(String clusterName,
-      String versionTag, String configType, Map<String, String> keys) throws
-        NoSuchParentResourceException, UnsupportedPropertyException,
-        SystemException {
-    // Get desired configs based on the tag
-    ResourceProvider configResourceProvider = getResourceProvider(Resource.Type.Configuration);
-    Predicate configPredicate = new PredicateBuilder().property
-      (ConfigurationResourceProvider.CONFIGURATION_CLUSTER_NAME_PROPERTY_ID).equals(clusterName).and()
-      .property(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID).equals(configType).and()
-      .property(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID).equals(versionTag).toPredicate();
-    Set<Resource> configResources = null;
-    try {
-      configResources = configResourceProvider.getResources
-        (PropertyHelper.getReadRequest(ConfigurationResourceProvider.CONFIGURATION_CLUSTER_NAME_PROPERTY_ID,
-          ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID,
-          ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID), configPredicate);
-    } catch (NoSuchResourceException e) {
-      LOG.info("Resource for the desired config not found. " + e);
-      return Collections.EMPTY_MAP;
-    }
-    Map<String, String> mConfigs = new HashMap<String, String>();
-    if (configResources != null) {
-      for (Resource res : configResources) {
-        for (String key : keys.keySet()) {
-          String value = (String) res.getPropertyValue
-            (PropertyHelper.getPropertyId(PROPERTIES_CATEGORY, keys.get(key)));
-          LOG.debug("PROPERTY -> key: " + keys.get(key) + ", " +
-            "value: " + value);
-
-          mConfigs.put(key, value);
-        }
-      }
-    }
-    return mConfigs;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractResourceProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractResourceProvider.java
deleted file mode 100644
index 751b012..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractResourceProvider.java
+++ /dev/null
@@ -1,378 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.DuplicateResourceException;
-import org.apache.ambari.server.ObjectNotFoundException;
-import org.apache.ambari.server.ParentObjectNotFoundException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PredicateHelper;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Abstract resource provider implementation that maps to an Ambari management controller.
- */
-public abstract class AbstractResourceProvider extends BaseProvider implements ResourceProvider, ObservableResourceProvider {
-
-  /**
-   * The management controller to delegate to.
-   */
-  private final AmbariManagementController managementController;
-
-  /**
-   * Key property mapping by resource type.
-   */
-  private final Map<Resource.Type, String> keyPropertyIds;
-
-  /**
-   * Observers of this observable resource provider.
-   */
-  private final Set<ResourceProviderObserver> observers = new HashSet<ResourceProviderObserver>();
-
-
-  protected final static Logger LOG =
-      LoggerFactory.getLogger(AbstractResourceProvider.class);
-
-    // ----- Constructors ------------------------------------------------------
-  /**
-   * Create a  new resource provider for the given management controller.
-   *
-   * @param propertyIds           the property ids
-   * @param keyPropertyIds        the key property ids
-   * @param managementController  the management controller
-   */
-  protected AbstractResourceProvider(Set<String> propertyIds,
-                                     Map<Resource.Type, String> keyPropertyIds,
-                                     AmbariManagementController managementController) {
-    super(propertyIds);
-    this.keyPropertyIds       = keyPropertyIds;
-    this.managementController = managementController;
-  }
-
-
-  // ----- ResourceProvider --------------------------------------------------
-
-  @Override
-  public Map<Resource.Type, String> getKeyPropertyIds() {
-    return keyPropertyIds;
-  }
-
-
-  // ----- ObservableResourceProvider ----------------------------------------
-
-  @Override
-  public void updateObservers(ResourceProviderEvent event) {
-    for (ResourceProviderObserver observer : observers) {
-      observer.update(event);
-    }
-  }
-
-  @Override
-  public void addObserver(ResourceProviderObserver observer) {
-    observers.add(observer);
-  }
-
-
-  // ----- accessors ---------------------------------------------------------
-
-  /**
-   * Get the associated management controller.
-   *
-   * @return the associated management controller
-   */
-  protected AmbariManagementController getManagementController() {
-    return managementController;
-  }
-
-
-  // ----- utility methods ---------------------------------------------------
-
-  /**
-   * Get the set of property ids that uniquely identify the resources
-   * of this provider.
-   *
-   * @return the set of primary key properties
-   */
-  protected abstract Set<String> getPKPropertyIds();
-
-  /**
-   * Notify all listeners of a creation event.
-   *
-   * @param type     the type of the resources being created
-   * @param request  the request used to create the resources
-   */
-  protected void notifyCreate(Resource.Type type, Request request) {
-    updateObservers(new ResourceProviderEvent(type, ResourceProviderEvent.Type.Create, request, null));
-  }
-
-  /**
-   * Notify all listeners of a update event.
-   *
-   * @param type       the type of the resources being updated
-   * @param request    the request used to update the resources
-   * @param predicate  the predicate used to update the resources
-   */
-  protected void notifyUpdate(Resource.Type type, Request request, Predicate predicate) {
-    updateObservers(new ResourceProviderEvent(type, ResourceProviderEvent.Type.Update, request, predicate));
-  }
-
-  /**
-   * Notify all listeners of a delete event.
-   *
-   * @param type       the type of the resources being deleted
-   * @param predicate  the predicate used to delete the resources
-   */
-  protected void notifyDelete(Resource.Type type, Predicate predicate) {
-    updateObservers(new ResourceProviderEvent(type, ResourceProviderEvent.Type.Delete, null, predicate));
-  }
-
-  /**
-   * Get a set of properties from the given predicate.  The returned set of
-   * property/value mappings is required to generate delete or get requests
-   * to the back end which does not deal with predicates.  Note that the
-   * single predicate can result in multiple backend requests.
-   *
-   * @param givenPredicate           the predicate
-   *
-   * @return the set of properties used to build request objects
-   */
-  protected Set<Map<String, Object>> getPropertyMaps(Predicate givenPredicate)
-    throws UnsupportedPropertyException, SystemException, NoSuchResourceException, NoSuchParentResourceException {
-
-    SimplifyingPredicateVisitor visitor = new SimplifyingPredicateVisitor(getPropertyIds());
-    PredicateHelper.visit(givenPredicate, visitor);
-    List<BasePredicate> predicates = visitor.getSimplifiedPredicates();
-
-    Set<Map<String, Object>> propertyMaps = new HashSet<Map<String, Object>>();
-
-    for (BasePredicate predicate : predicates) {
-      propertyMaps.add(PredicateHelper.getProperties(predicate));
-    }
-    return propertyMaps;
-  }
-
-  /**
-   * Get a set of properties from the given property map and predicate.  The
-   * returned set of property/value mappings is required to generate update or create
-   * requests to the back end which does not deal with predicates.  Note that
-   * the single property map & predicate can result in multiple backend requests.
-   *
-   * @param requestPropertyMap  the request properties (for update)
-   * @param givenPredicate           the predicate
-   *
-   * @return the set of properties used to build request objects
-   */
-  protected Set<Map<String, Object>> getPropertyMaps(Map<String, Object> requestPropertyMap,
-                                                         Predicate givenPredicate)
-      throws UnsupportedPropertyException, SystemException, NoSuchResourceException, NoSuchParentResourceException {
-
-    Set<Map<String, Object>> propertyMaps = new HashSet<Map<String, Object>>();
-
-    Set<String> pkPropertyIds = getPKPropertyIds();
-    if (requestPropertyMap != null && !pkPropertyIds.equals(PredicateHelper.getPropertyIds(givenPredicate))) {
-
-      for (Resource resource : getResources(PropertyHelper.getReadRequest(pkPropertyIds), givenPredicate)) {
-        Map<String, Object> propertyMap = new HashMap<String, Object>(PropertyHelper.getProperties(resource));
-        propertyMap.putAll(requestPropertyMap);
-        propertyMaps.add(propertyMap);
-      }
-    }
-    else {
-      Map<String, Object> propertyMap = new HashMap<String, Object>(PredicateHelper.getProperties(givenPredicate));
-      propertyMap.putAll(requestPropertyMap);
-      propertyMaps.add(propertyMap);
-    }
-
-    return propertyMaps;
-  }
-
-  /**
-   * Get a request status
-   *
-   * @return the request status
-   */
-  protected RequestStatus getRequestStatus(RequestStatusResponse response) {
-    if (response != null){
-      Resource requestResource = new ResourceImpl(Resource.Type.Request);
-      requestResource.setProperty(PropertyHelper.getPropertyId("Requests", "id"), response.getRequestId());
-      requestResource.setProperty(PropertyHelper.getPropertyId("Requests", "status"), "InProgress");
-      return new RequestStatusImpl(requestResource);
-    }
-    return new RequestStatusImpl(null);
-  }
-
-  /**
-   * Invoke a command against the Ambari backend to create resources and map
-   * any {@link AmbariException} to the types appropriate for the
-   * {@link ResourceProvider} interface.
-   *
-   * @param command  the command to invoke
-   * @param <T>      the type of the response
-   *
-   * @return the response
-   *
-   * @throws SystemException                thrown if a system exception occurred
-   * @throws ResourceAlreadyExistsException thrown if a resource already exists
-   * @throws NoSuchParentResourceException  thrown if a parent of a resource doesn't exist
-   */
-  protected <T> T createResources(Command<T> command)
-      throws SystemException, ResourceAlreadyExistsException, NoSuchParentResourceException {
-    try {
-      return command.invoke();
-    } catch (ParentObjectNotFoundException e) {
-      throw new NoSuchParentResourceException(e.getMessage(), e);
-    } catch (DuplicateResourceException e) {
-      throw new ResourceAlreadyExistsException(e.getMessage());
-    } catch (AmbariException e) {
-      if (LOG.isErrorEnabled()) {
-        LOG.error("Caught AmbariException when creating a resource", e);
-      }
-      throw new SystemException("An internal system exception occurred: " + e.getMessage(), e);
-    }
-  }
-
-  /**
-   * Invoke a command against the Ambari backend to get resources and map
-   * any {@link AmbariException} to the types appropriate for the
-   * {@link ResourceProvider} interface.
-   *
-   * @param command  the command to invoke
-   * @param <T>      the type of the response
-   *
-   * @return the response
-   *
-   * @throws SystemException                thrown if a system exception occurred
-   * @throws NoSuchParentResourceException  thrown if a parent of a resource doesn't exist
-   */
-  protected <T> T getResources (Command<T> command)
-      throws SystemException, NoSuchResourceException, NoSuchParentResourceException {
-    try {
-      return command.invoke();
-    } catch (ObjectNotFoundException e) {
-      throw new NoSuchResourceException("The requested resource doesn't exist: " + e.getMessage(), e);
-    } catch (ParentObjectNotFoundException e) {
-      throw new NoSuchParentResourceException(e.getMessage(), e);
-    } catch (AmbariException e) {
-      if (LOG.isErrorEnabled()) {
-        LOG.error("Caught AmbariException when getting a resource", e);
-      }
-      throw new SystemException("An internal system exception occurred: " + e.getMessage(), e);
-    }
-  }
-
-  /**
-   * Invoke a command against the Ambari backend to modify resources and map
-   * any {@link AmbariException} to the types appropriate for the
-   * {@link ResourceProvider} interface.
-   *
-   * @param command  the command to invoke
-   * @param <T>      the type of the response
-   *
-   * @return the response
-   *
-   * @throws SystemException                thrown if a system exception occurred
-   * @throws NoSuchParentResourceException  thrown if a parent of a resource doesn't exist
-   */
-  protected <T> T modifyResources (Command<T> command)
-      throws SystemException, NoSuchResourceException, NoSuchParentResourceException {
-    try {
-      return command.invoke();
-    } catch (ObjectNotFoundException e) {
-      throw new NoSuchResourceException("The specified resource doesn't exist: " + e.getMessage(), e);
-    } catch (ParentObjectNotFoundException e) {
-      throw new NoSuchParentResourceException(e.getMessage(), e);
-    } catch (AmbariException e) {
-      if (LOG.isErrorEnabled()) {
-        LOG.error("Caught AmbariException when modifying a resource", e);
-      }
-      throw new SystemException("An internal system exception occurred: " + e.getMessage(), e);
-    }
-  }
-
-  /**
-   * Factory method for obtaining a resource provider based on a given type and management controller.
-   *
-   *
-   * @param type                  the resource type
-   * @param propertyIds           the property ids
-   * @param managementController  the management controller
-   *
-   * @return a new resource provider
-   */
-  public static ResourceProvider getResourceProvider(Resource.Type type,
-                                                     Set<String> propertyIds,
-                                                     Map<Resource.Type, String> keyPropertyIds,
-                                                     AmbariManagementController managementController) {
-    switch (type) {
-      case Cluster:
-        return new ClusterResourceProvider(propertyIds, keyPropertyIds, managementController);
-      case Service:
-        return new ServiceResourceProvider(propertyIds, keyPropertyIds, managementController);
-      case Component:
-        return new ComponentResourceProvider(propertyIds, keyPropertyIds, managementController);
-      case Host:
-        return new HostResourceProvider(propertyIds, keyPropertyIds, managementController);
-      case HostComponent:
-        return new HostComponentResourceProvider(propertyIds, keyPropertyIds, managementController);
-      case Configuration:
-        return new ConfigurationResourceProvider(propertyIds, keyPropertyIds, managementController);
-      case Action:
-        return new ActionResourceProvider(propertyIds, keyPropertyIds, managementController);
-      case Request:
-        return new RequestResourceProvider(propertyIds, keyPropertyIds, managementController);
-      case Task:
-        return new TaskResourceProvider(propertyIds, keyPropertyIds, managementController);
-      case User:
-        return new UserResourceProvider(propertyIds, keyPropertyIds, managementController);
-      default:
-        throw new IllegalArgumentException("Unknown type " + type);
-    }
-  }
-
-
-  // ----- Inner interface ---------------------------------------------------
-
-  /**
-   * Command to invoke against the Ambari backend.
-   *
-   * @param <T>  the response type
-   */
-  protected interface Command<T> {
-    /**
-     * Invoke this command.
-     *
-     * @return  the response
-     *
-     * @throws AmbariException thrown if a problem occurred during invocation
-     */
-    public T invoke() throws AmbariException;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ActionResourceProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ActionResourceProvider.java
deleted file mode 100644
index 7f0891f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ActionResourceProvider.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.ActionRequest;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-/**
- * Resource provider for action resources.
- */
-class ActionResourceProvider extends AbstractResourceProvider {
-
-  // ----- Property ID constants ---------------------------------------------
-
-  // Actions
-  protected static final String ACTION_CLUSTER_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("Actions", "cluster_name");
-  protected static final String ACTION_SERVICE_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("Actions", "service_name");
-  protected static final String ACTION_ACTION_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("Actions", "action_name");
-
-
-  private static Set<String> pkPropertyIds =
-      new HashSet<String>(Arrays.asList(new String[]{
-          ACTION_CLUSTER_NAME_PROPERTY_ID,
-          ACTION_SERVICE_NAME_PROPERTY_ID}));
-
-  ActionResourceProvider(Set<String> propertyIds,
-                         Map<Resource.Type, String> keyPropertyIds,
-                         AmbariManagementController managementController) {
-
-    super(propertyIds, keyPropertyIds, managementController);
-  }
-
-  @Override
-  public RequestStatus createResources(Request request)
-      throws SystemException,
-             UnsupportedPropertyException,
-             ResourceAlreadyExistsException,
-             NoSuchParentResourceException {
-
-    final Set<ActionRequest> requests = new HashSet<ActionRequest>();
-    for (Map<String, Object> propertyMap : request.getProperties()) {
-      requests.add(getRequest(propertyMap));
-    }
-    return getRequestStatus(createResources(new Command<RequestStatusResponse>() {
-      @Override
-      public RequestStatusResponse invoke() throws AmbariException {
-        return getManagementController().createActions(requests);
-      }
-    }));
-  }
-
-  @Override
-  public Set<Resource> getResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    throw new UnsupportedOperationException("Not currently supported.");
-  }
-
-  @Override
-  public RequestStatus updateResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    throw new UnsupportedOperationException("Not currently supported.");
-  }
-
-  @Override
-  public RequestStatus deleteResources(Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    throw new UnsupportedOperationException("Not currently supported.");
-  }
-
-  @Override
-  public Set<String> checkPropertyIds(Set<String> propertyIds) {
-    propertyIds = super.checkPropertyIds(propertyIds);
-
-    if (propertyIds.isEmpty()) {
-      return propertyIds;
-    }
-    Set<String> unsupportedProperties = new HashSet<String>();
-
-    for (String propertyId : propertyIds) {
-      String propertyCategory = PropertyHelper.getPropertyCategory(propertyId);
-      if (propertyCategory == null || !propertyCategory.equals("parameters")) {
-        unsupportedProperties.add(propertyId);
-      }
-    }
-    return unsupportedProperties;
-  }
-
-  @Override
-  protected Set<String> getPKPropertyIds() {
-    return pkPropertyIds;
-  }
-
-  private ActionRequest getRequest(Map<String, Object> properties) {
-    Map<String, String> params = new HashMap<String, String>();
-    for (Entry<String, Object> entry : properties.entrySet()) {
-      String propertyid = entry.getKey();
-
-      String propertyCategory = PropertyHelper.getPropertyCategory(propertyid);
-      if (propertyCategory != null &&
-          propertyCategory.equals("parameters") &&
-          null != entry.getValue()) {
-        params.put(PropertyHelper.getPropertyName(propertyid), entry.getValue().toString());
-      }
-    }
-    return new ActionRequest(
-        (String)  properties.get(ACTION_CLUSTER_NAME_PROPERTY_ID),
-        (String)  properties.get(ACTION_SERVICE_NAME_PROPERTY_ID),
-        (String)  properties.get(ACTION_ACTION_NAME_PROPERTY_ID),
-        params);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseProvider.java
deleted file mode 100644
index 1b7f49f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseProvider.java
+++ /dev/null
@@ -1,245 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PredicateHelper;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Base provider implementation for both property and resource providers.
- */
-public abstract class BaseProvider {
-
-  /**
-   * Set of property ids supported by this provider.
-   */
-  private final Set<String> propertyIds;
-
-  /**
-   * Set of category ids supported by this provider.
-   */
-  private final Set<String> categoryIds;
-
-  /**
-   * Combined property and category ids.
-   */
-  private final Set<String> combinedIds;
-
-  /**
-   * The logger.
-   */
-  protected final static Logger LOG =
-      LoggerFactory.getLogger(BaseProvider.class);
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Construct a provider.
-   *
-   * @param propertyIds  the properties associated with this provider
-   */
-  public BaseProvider(Set<String> propertyIds) {
-    this.propertyIds = new HashSet<String>(propertyIds);
-    this.categoryIds = PropertyHelper.getCategories(propertyIds);
-    this.combinedIds = new HashSet<String>(propertyIds);
-    this.combinedIds.addAll(this.categoryIds);
-  }
-
-
-  // ----- BaseProvider --------------------------------------------------
-
-  public Set<String> checkPropertyIds(Set<String> propertyIds) {
-    if (!this.propertyIds.containsAll(propertyIds)) {
-      Set<String> unsupportedPropertyIds = new HashSet<String>(propertyIds);
-      unsupportedPropertyIds.removeAll(this.combinedIds);
-
-      // If the property id is not in the set of known property ids we may still allow it if
-      // its parent category is a known property. This allows for Map type properties where
-      // we want to treat property as a category and the entries as individual properties.
-      Set<String> categoryProperties = new HashSet<String>();
-      for (String unsupportedPropertyId : unsupportedPropertyIds) {
-        String category = PropertyHelper.getPropertyCategory(unsupportedPropertyId);
-        while (category != null) {
-          if (this.propertyIds.contains(category)) {
-            categoryProperties.add(unsupportedPropertyId);
-          }
-          category = PropertyHelper.getPropertyCategory(category);
-        }
-      }
-      unsupportedPropertyIds.removeAll(categoryProperties);
-
-      return unsupportedPropertyIds;
-    }
-    return Collections.emptySet();
-  }
-
-  /**
-   * Get the set of property ids required to satisfy the given request.
-   *
-   * @param request              the request
-   * @param predicate            the predicate
-   *
-   * @return the set of property ids needed to satisfy the request
-   */
-  protected Set<String> getRequestPropertyIds(Request request, Predicate predicate) {
-    Set<String> propertyIds  = request.getPropertyIds();
-
-    // if no properties are specified, then return them all
-    if (propertyIds == null || propertyIds.isEmpty()) {
-      return new HashSet<String>(this.propertyIds);
-    }
-
-    propertyIds = new HashSet<String>(propertyIds);
-
-    if (predicate != null) {
-      propertyIds.addAll(PredicateHelper.getPropertyIds(predicate));
-    }
-
-    if (!this.combinedIds.containsAll(propertyIds)) {
-      Set<String> keepers = new HashSet<String>();
-      Set<String> unsupportedPropertyIds = new HashSet<String>(propertyIds);
-      unsupportedPropertyIds.removeAll(this.combinedIds);
-
-      // Add the categories to account for map properties where the entries will not be
-      // in the provider property list ids but the map (category) might be.
-      for (String unsupportedPropertyId : unsupportedPropertyIds) {
-        String category = PropertyHelper.getPropertyCategory(unsupportedPropertyId);
-        while (category != null) {
-          if (this.propertyIds.contains(category)) {
-            keepers.add(unsupportedPropertyId);
-            break;
-          }
-          category = PropertyHelper.getPropertyCategory(category);
-        }
-      }
-      propertyIds.retainAll(this.combinedIds);
-      propertyIds.addAll(keepers);
-    }
-    return propertyIds;
-  }
-
-  /**
-   * Set a property value on the given resource for the given id and value.
-   * Make sure that the id is in the given set of requested ids.
-   *
-   * @param resource      the resource
-   * @param propertyId    the property id
-   * @param value         the value to set
-   * @param requestedIds  the requested set of property ids
-   */
-  protected static boolean setResourceProperty(Resource resource, String propertyId, Object value, Set<String> requestedIds) {
-    boolean contains = requestedIds.contains(propertyId);
-
-    if (!contains) {
-      String category = PropertyHelper.getPropertyCategory(propertyId);
-      while (category != null && !contains) {
-        contains = requestedIds.contains(category);
-        category = PropertyHelper.getPropertyCategory(category);
-      }
-    }
-
-    if (contains) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting property for resource"
-            + ", resourceType=" + resource.getType()
-            + ", propertyId=" + propertyId
-            + ", value=" + value);
-      }
-
-      // If the value is a Map then set all of its entries as properties
-      if (!setResourceMapProperty(resource, propertyId, value)){
-        resource.setProperty(propertyId, value);
-      }
-    }
-    else {
-
-      if (value instanceof Map<?, ?>) {
-        // This map wasn't requested, but maybe some of its entries were...
-        Map<?, ?> mapValue = (Map) value;
-
-        for (Map.Entry entry : mapValue.entrySet()) {
-          String entryPropertyId = PropertyHelper.getPropertyId(propertyId, entry.getKey().toString());
-          Object entryValue      = entry.getValue();
-
-          contains = setResourceProperty(resource, entryPropertyId, entryValue, requestedIds) || contains;
-        }
-      }
-
-      if (!contains && LOG.isDebugEnabled()) {
-        LOG.debug("Skipping property for resource as not in requestedIds"
-            + ", resourceType=" + resource.getType()
-            + ", propertyId=" + propertyId
-            + ", value=" + value);
-      }
-    }
-    return contains;
-  }
-
-  /**
-   * If the given value is a Map then add its entries to the resource as properties.
-   *
-   * @param resource    the resource
-   * @param propertyId  the property id of the given value
-   * @param value       the property value
-   */
-  private static boolean setResourceMapProperty(Resource resource, String propertyId, Object value) {
-    if (value instanceof Map<?, ?>) {
-      Map<?, ?> mapValue = (Map) value;
-
-      if (mapValue.isEmpty()) {
-        resource.addCategory(propertyId);
-      } else {
-        for (Map.Entry entry : mapValue.entrySet()) {
-          String entryPropertyId = PropertyHelper.getPropertyId(propertyId, entry.getKey().toString());
-          Object entryValue      = entry.getValue();
-
-          // If the value is a Map then set all of its entries as properties
-          if (!setResourceMapProperty(resource, entryPropertyId, entryValue)){
-            resource.setProperty(entryPropertyId, entryValue);
-          }
-        }
-      }
-      return true;
-    }
-    return false;
-  }
-
-
-  // ----- accessors ---------------------------------------------------------
-
-  /**
-   * Get the property ids supported by this property adapter.
-   *
-   * @return the property ids supported by this provider
-   */
-  public Set<String> getPropertyIds() {
-    return propertyIds;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterControllerImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterControllerImpl.java
deleted file mode 100644
index 6091169..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterControllerImpl.java
+++ /dev/null
@@ -1,479 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.spi.ProviderModule;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PredicateHelper;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Set;
-
-/**
- * Default cluster controller implementation.
- */
-public class ClusterControllerImpl implements ClusterController {
-  private final static Logger LOG =
-      LoggerFactory.getLogger(ClusterControllerImpl.class);
-  
-  /**
-   * Module of providers for this controller.
-   */
-  private final ProviderModule providerModule;
-
-  /**
-   * Map of resource providers keyed by resource type.
-   */
-  private final Map<Resource.Type, ResourceProvider> resourceProviders =
-      new HashMap<Resource.Type, ResourceProvider>();
-
-  /**
-   * Map of property provider lists keyed by resource type.
-   */
-  private final Map<Resource.Type, List<PropertyProvider>> propertyProviders =
-      new HashMap<Resource.Type, List<PropertyProvider>>();
-
-  /**
-   * Map of schemas keyed by resource type.
-   */
-  private final Map<Resource.Type, Schema> schemas =
-      new HashMap<Resource.Type, Schema>();
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  public ClusterControllerImpl(ProviderModule providerModule) {
-    this.providerModule = providerModule;
-  }
-
-
-  // ----- ClusterController -------------------------------------------------
-
-  @Override
-  public Iterable<Resource> getResources(Resource.Type type, Request request, Predicate predicate)
-      throws UnsupportedPropertyException,
-             SystemException,
-             NoSuchParentResourceException,
-             NoSuchResourceException {
-
-    ResourceProvider provider = ensureResourceProvider(type);
-    ensurePropertyProviders(type);
-    Set<Resource> resources;
-
-    if (provider == null) {
-      resources = Collections.emptySet();
-    } else {
-      LOG.info("Using resource provider "
-          + provider.getClass().getName()
-          + " for request type " + type.toString());
-
-      checkProperties(type, request, predicate);
-
-      resources = provider.getResources(request, predicate);
-      resources = populateResources(type, resources, request, predicate);
-    }
-    
-    return new ResourceIterable(resources, predicate);
-  }
-
-  @Override
-  public Schema getSchema(Resource.Type type) {
-    Schema schema;
-
-    synchronized (schemas) {
-      schema = schemas.get(type);
-      if (schema == null) {
-        schema = new SchemaImpl(ensureResourceProvider(type));
-        schemas.put(type, schema);
-      }
-    }
-    return schema;
-  }
-
-  @Override
-  public RequestStatus createResources(Resource.Type type, Request request)
-      throws UnsupportedPropertyException,
-             SystemException,
-             ResourceAlreadyExistsException,
-             NoSuchParentResourceException {
-
-    ResourceProvider provider = ensureResourceProvider(type);
-    if (provider != null) {
-
-      checkProperties(type, request, null);
-
-      return provider.createResources(request);
-    }
-    return null;
-  }
-
-  @Override
-  public RequestStatus updateResources(Resource.Type type, Request request, Predicate predicate)
-      throws UnsupportedPropertyException,
-             SystemException,
-             NoSuchResourceException,
-             NoSuchParentResourceException {
-
-    ResourceProvider provider = ensureResourceProvider(type);
-    if (provider != null) {
-
-      if (!checkProperties(type, request, predicate)) {
-        predicate = resolvePredicate(type, predicate);
-        if (predicate == null) {
-          return null;
-        }
-      }
-        return provider.updateResources(request, predicate);
-    }
-    return null;
-  }
-
-  @Override
-  public RequestStatus deleteResources(Resource.Type type, Predicate predicate)
-      throws UnsupportedPropertyException,
-             SystemException,
-             NoSuchResourceException,
-             NoSuchParentResourceException {
-
-    ResourceProvider provider = ensureResourceProvider(type);
-    if (provider != null) {
-      if (!checkProperties(type, null, predicate)) {
-        predicate = resolvePredicate(type, predicate);
-        if (predicate == null) {
-          return null;
-        }
-      }
-        return provider.deleteResources(predicate);
-    }
-    return null;
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Check to make sure that all the property ids specified in the given request and
-   * predicate are supported by the resource provider or property providers for the
-   * given type.
-   *
-   * @param type       the resource type
-   * @param request    the request
-   * @param predicate  the predicate
-   *
-   * @return true if all of the properties specified in the request and predicate are supported by
-   *         the resource provider for the given type; false if any of the properties specified in
-   *         the request and predicate are not supported by the resource provider but are supported
-   *         by a property provider for the given type.
-   *
-   * @throws UnsupportedPropertyException thrown if any of the properties specified in the request
-   *                                      and predicate are not supported by either the resource
-   *                                      provider or a property provider for the given type
-   */
-  private boolean checkProperties(Resource.Type type, Request request, Predicate predicate)
-      throws UnsupportedPropertyException {
-    Set<String> requestPropertyIds = request == null ? new HashSet<String>() :
-        PropertyHelper.getAssociatedPropertyIds(request);
-
-    if (predicate != null) {
-      requestPropertyIds.addAll(PredicateHelper.getPropertyIds(predicate));
-    }
-
-    if (requestPropertyIds.size() > 0) {
-      ResourceProvider provider = ensureResourceProvider(type);
-      requestPropertyIds = provider.checkPropertyIds(requestPropertyIds);
-
-      if (requestPropertyIds.size() > 0) {
-        List<PropertyProvider> propertyProviders = ensurePropertyProviders(type);
-        for (PropertyProvider propertyProvider : propertyProviders) {
-          requestPropertyIds = propertyProvider.checkPropertyIds(requestPropertyIds);
-          if (requestPropertyIds.size() == 0) {
-            return false;
-          }
-        }
-        throw new UnsupportedPropertyException(type, requestPropertyIds);
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Check to see if any of the property ids specified in the given request and
-   * predicate are handled by an associated property provider.  if so, then use
-   * the given predicate to obtain a new predicate that can be completely
-   * processed by an update or delete operation on a resource provider for
-   * the given resource type.  This means that the new predicate should only
-   * reference the key property ids for this type.
-   *
-   * @param type       the resource type
-   * @param predicate  the predicate
-   *
-   * @return the given predicate if a new one is not required; a new predicate if required
-   *
-   * @throws UnsupportedPropertyException thrown if any of the properties specified in the request
-   *                                      and predicate are not supported by either the resource
-   *                                      provider or a property provider for the given type
-   *
-   * @throws SystemException thrown for internal exceptions
-   * @throws NoSuchResourceException if the resource that is requested doesn't exist
-   * @throws NoSuchParentResourceException if a parent resource of the requested resource doesn't exist
-   */
-  private Predicate resolvePredicate(Resource.Type type, Predicate predicate)
-    throws UnsupportedPropertyException,
-        SystemException,
-        NoSuchResourceException,
-        NoSuchParentResourceException{
-
-    ResourceProvider provider = ensureResourceProvider(type);
-
-    Set<String>  keyPropertyIds = new HashSet<String>(provider.getKeyPropertyIds().values());
-    Request      readRequest    = PropertyHelper.getReadRequest(keyPropertyIds);
-
-    Iterable<Resource> resources = getResources(type, readRequest, predicate);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    PredicateBuilder.PredicateBuilderWithPredicate pbWithPredicate = null;
-
-    for (Resource resource : resources) {
-      if (pbWithPredicate != null) {
-        pb = pbWithPredicate.or();
-      }
-
-      pb              = pb.begin();
-      pbWithPredicate = null;
-
-      for (String keyPropertyId : keyPropertyIds) {
-        if (pbWithPredicate != null) {
-          pb = pbWithPredicate.and();
-        }
-        pbWithPredicate =
-            pb.property(keyPropertyId).equals((Comparable) resource.getPropertyValue(keyPropertyId));
-      }
-      if (pbWithPredicate != null) {
-        pbWithPredicate = pbWithPredicate.end();
-      }
-    }
-    return pbWithPredicate == null ? null : pbWithPredicate.toPredicate();
-  }
-
-  /**
-   * Populate the given resources from the associated property providers.  This
-   * method may filter the resources based on the predicate and return a subset
-   * of the given resources.
-   *
-   * @param type       the resource type
-   * @param resources  the resources to be populated
-   * @param request    the request
-   * @param predicate  the predicate
-   *
-   * @return the set of resources that were successfully populated
-   *
-   * @throws SystemException if unable to populate the resources
-   */
-  private Set<Resource> populateResources(Resource.Type type,
-                                          Set<Resource> resources,
-                                          Request request,
-                                          Predicate predicate) throws SystemException {
-    Set<Resource> keepers = resources;
-
-    for (PropertyProvider propertyProvider : propertyProviders.get(type)) {
-      if (providesRequestProperties(propertyProvider, request, predicate)) {
-        keepers = propertyProvider.populateResources(keepers, request, predicate);
-      }
-    }
-    return keepers;
-  }
-
-  /**
-   * Indicates whether or not the given property provider can service the given request.
-   *
-   * @param provider   the property provider
-   * @param request    the request
-   * @param predicate  the predicate
-   *
-   * @return true if the given provider can service the request
-   */
-  private boolean providesRequestProperties(PropertyProvider provider, Request request, Predicate predicate) {
-    Set<String> requestPropertyIds = new HashSet<String>(request.getPropertyIds());
-
-    if (requestPropertyIds.size() == 0) {
-      return true;
-    }
-    requestPropertyIds.addAll(PredicateHelper.getPropertyIds(predicate));
-
-    int size = requestPropertyIds.size();
-
-    return size > provider.checkPropertyIds(requestPropertyIds).size();
-  }
-
-  /**
-   * Get the resource provider for the given type, creating it if required.
-   *
-   * @param type  the resource type
-   *
-   * @return the resource provider
-   */
-  private ResourceProvider ensureResourceProvider(Resource.Type type) {
-    synchronized (resourceProviders) {
-      if (!resourceProviders.containsKey(type)) {
-        resourceProviders.put(type, providerModule.getResourceProvider(type));
-      }
-    }
-    return resourceProviders.get(type);
-  }
-
-  /**
-   * Get the list of property providers for the given type.
-   *
-   * @param type  the resource type
-   *
-   * @return the list of property providers
-   */
-  private List<PropertyProvider> ensurePropertyProviders(Resource.Type type) {
-    synchronized (propertyProviders) {
-      if (!propertyProviders.containsKey(type)) {
-        propertyProviders.put(type, providerModule.getPropertyProviders(type));
-      }
-    }
-    return propertyProviders.get(type);
-  }
-
-
-  // ----- ResourceIterable inner class --------------------------------------
-
-  private static class ResourceIterable implements Iterable<Resource> {
-
-    /**
-     * The resources to iterate over.
-     */
-    private final Set<Resource> resources;
-
-    /**
-     * The predicate used to filter the set.
-     */
-    private final Predicate predicate;
-
-    // ----- Constructors ----------------------------------------------------
-
-    /**
-     * Create a ResourceIterable.
-     *
-     * @param resources  the set of resources to iterate over
-     * @param predicate  the predicate used to filter the set of resources
-     */
-    private ResourceIterable(Set<Resource> resources, Predicate predicate) {
-      this.resources = resources;
-      this.predicate = predicate;
-    }
-
-    // ----- Iterable --------------------------------------------------------
-
-    @Override
-    public Iterator<Resource> iterator() {
-      return new ResourceIterator(resources, predicate);
-    }
-  }
-
-
-  // ----- ResourceIterator inner class --------------------------------------
-
-  private static class ResourceIterator implements Iterator<Resource> {
-
-    /**
-     * The underlying iterator.
-     */
-    private final Iterator<Resource> iterator;
-
-    /**
-     * The predicate used to filter the resource being iterated over.
-     */
-    private final Predicate predicate;
-
-    /**
-     * The next resource.
-     */
-    private Resource nextResource;
-
-
-    // ----- Constructors ----------------------------------------------------
-
-    /**
-     * Create a new ResourceIterator.
-     *
-     * @param resources  the set of resources to iterate over
-     * @param predicate  the predicate used to filter the set of resources
-     */
-    private ResourceIterator(Set<Resource> resources, Predicate predicate) {
-      this.iterator     = resources.iterator();
-      this.predicate    = predicate;
-      this.nextResource = getNextResource();
-    }
-
-    // ----- Iterator --------------------------------------------------------
-
-    @Override
-    public boolean hasNext() {
-      return nextResource != null;
-    }
-
-    @Override
-    public Resource next() {
-      if (nextResource == null) {
-        throw new NoSuchElementException("Iterator has no more elements.");
-      }
-
-      Resource currentResource = nextResource;
-      this.nextResource = getNextResource();
-
-      return currentResource;
-    }
-
-    @Override
-    public void remove() {
-      throw new UnsupportedOperationException("Remove not supported.");
-    }
-
-    // ----- helper methods --------------------------------------------------
-
-    /**
-     * Get the next resource.
-     *
-     * @return the next resource.
-     */
-    private Resource getNextResource() {
-      while (iterator.hasNext()) {
-        Resource next = iterator.next();
-        
-        if (predicate == null || predicate.evaluate(next)) {
-          return next;
-        }
-      }
-      return null;
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
deleted file mode 100644
index 4d807e6..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
+++ /dev/null
@@ -1,193 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.ClusterRequest;
-import org.apache.ambari.server.controller.ClusterResponse;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
-import org.apache.ambari.server.controller.spi.NoSuchResourceException;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.RequestStatus;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException;
-import org.apache.ambari.server.controller.spi.SystemException;
-import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
-import org.apache.ambari.server.controller.utilities.PredicateHelper;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Resource provider for cluster resources.
- */
-class ClusterResourceProvider extends AbstractResourceProvider {
-
-  // ----- Property ID constants ---------------------------------------------
-
-  // Clusters
-  protected static final String CLUSTER_ID_PROPERTY_ID      = PropertyHelper.getPropertyId("Clusters", "cluster_id");
-  protected static final String CLUSTER_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("Clusters", "cluster_name");
-  protected static final String CLUSTER_VERSION_PROPERTY_ID = PropertyHelper.getPropertyId("Clusters", "version");
-
-
-  private static Set<String> pkPropertyIds =
-      new HashSet<String>(Arrays.asList(new String[]{
-          CLUSTER_ID_PROPERTY_ID}));
-
-  // ----- Constructors ----------------------------------------------------
-
-  /**
-   * Create a  new resource provider for the given management controller.
-   *
-   * @param propertyIds           the property ids
-   * @param keyPropertyIds        the key property ids
-   * @param managementController  the management controller
-   */
-  ClusterResourceProvider(Set<String> propertyIds,
-                          Map<Resource.Type, String> keyPropertyIds,
-                          AmbariManagementController managementController) {
-    super(propertyIds, keyPropertyIds, managementController);
-  }
-
-// ----- ResourceProvider ------------------------------------------------
-
-  @Override
-  public RequestStatus createResources(Request request)
-      throws SystemException,
-             UnsupportedPropertyException,
-             ResourceAlreadyExistsException,
-             NoSuchParentResourceException {
-
-    for (final Map<String, Object> properties : request.getProperties()) {
-      createResources(new Command<Void>() {
-        @Override
-        public Void invoke() throws AmbariException {
-          getManagementController().createCluster(getRequest(properties));
-          return null;
-        }
-      });
-    }
-    notifyCreate(Resource.Type.Cluster, request);
-
-    return getRequestStatus(null);
-  }
-
-  @Override
-  public Set<Resource> getResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    final ClusterRequest clusterRequest = getRequest(PredicateHelper.getProperties(predicate));
-    Set<String> requestedIds = getRequestPropertyIds(request, predicate);
-
-    // TODO : handle multiple requests
-    Set<ClusterResponse> responses = getResources(new Command<Set<ClusterResponse>>() {
-      @Override
-      public Set<ClusterResponse> invoke() throws AmbariException {
-        return getManagementController().getClusters(Collections.singleton(clusterRequest));
-      }
-    });
-
-    Set<Resource> resources = new HashSet<Resource>();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Found clusters matching getClusters request"
-          + ", clusterResponseCount=" + responses.size());
-    }
-    for (ClusterResponse response : responses) {
-      Resource resource = new ResourceImpl(Resource.Type.Cluster);
-      setResourceProperty(resource, CLUSTER_ID_PROPERTY_ID, response.getClusterId(), requestedIds);
-      setResourceProperty(resource, CLUSTER_NAME_PROPERTY_ID, response.getClusterName(), requestedIds);
-
-      resource.setProperty(CLUSTER_VERSION_PROPERTY_ID,
-          response.getDesiredStackVersion());
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding ClusterResponse to resource"
-            + ", clusterResponse=" + response.toString());
-      }
-
-      resources.add(resource);
-    }
-    return resources;
-  }
-
-  @Override
-  public RequestStatus updateResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    for (Map<String, Object> propertyMap : getPropertyMaps(request.getProperties().iterator().next(), predicate)) {
-      final ClusterRequest clusterRequest = getRequest(propertyMap);
-
-      modifyResources(new Command<RequestStatusResponse>() {
-        @Override
-        public RequestStatusResponse invoke() throws AmbariException {
-          return getManagementController().updateCluster(clusterRequest);
-        }
-      });
-    }
-    notifyUpdate(Resource.Type.Cluster, request, predicate);
-    return getRequestStatus(null);
-  }
-
-  @Override
-  public RequestStatus deleteResources(Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
-      final ClusterRequest clusterRequest = getRequest(propertyMap);
-      modifyResources(new Command<Void>() {
-        @Override
-        public Void invoke() throws AmbariException {
-          getManagementController().deleteCluster(clusterRequest);
-          return null;
-        }
-      });
-    }
-    notifyDelete(Resource.Type.Cluster, predicate);
-    return getRequestStatus(null);
-  }
-
-  // ----- utility methods -------------------------------------------------
-
-  @Override
-  protected Set<String> getPKPropertyIds() {
-    return pkPropertyIds;
-  }
-
-  /**
-   * Get a cluster request object from a map of property values.
-   *
-   * @param properties  the predicate
-   *
-   * @return the cluster request object
-   */
-  private ClusterRequest getRequest(Map<String, Object> properties) {
-    return new ClusterRequest(
-        (Long) properties.get(CLUSTER_ID_PROPERTY_ID),
-        (String) properties.get(CLUSTER_NAME_PROPERTY_ID),
-        (String) properties.get(CLUSTER_VERSION_PROPERTY_ID),
-        null);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
deleted file mode 100644
index 3050b73..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
+++ /dev/null
@@ -1,208 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.ServiceComponentRequest;
-import org.apache.ambari.server.controller.ServiceComponentResponse;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Resource provider for component resources.
- */
-class ComponentResourceProvider extends AbstractResourceProvider {
-
-
-  // ----- Property ID constants ---------------------------------------------
-
-  // Components
-  protected static final String COMPONENT_CLUSTER_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name");
-  protected static final String COMPONENT_SERVICE_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceComponentInfo", "service_name");
-  protected static final String COMPONENT_COMPONENT_NAME_PROPERTY_ID  = PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name");
-  protected static final String COMPONENT_STATE_PROPERTY_ID           = PropertyHelper.getPropertyId("ServiceComponentInfo", "state");
-  protected static final String COMPONENT_DESIRED_CONFIGS_PROPERTY_ID = PropertyHelper.getPropertyId("ServiceComponentInfo", "desired_configs");
-
-
-  private static Set<String> pkPropertyIds =
-      new HashSet<String>(Arrays.asList(new String[]{
-          COMPONENT_CLUSTER_NAME_PROPERTY_ID,
-          COMPONENT_SERVICE_NAME_PROPERTY_ID,
-          COMPONENT_COMPONENT_NAME_PROPERTY_ID}));
-
-  // ----- Constructors ----------------------------------------------------
-
-  /**
-   * Create a  new resource provider for the given management controller.
-   *
-   * @param propertyIds           the property ids
-   * @param keyPropertyIds        the key property ids
-   * @param managementController  the management controller
-   */
-  ComponentResourceProvider(Set<String> propertyIds,
-                            Map<Resource.Type, String> keyPropertyIds,
-                            AmbariManagementController managementController) {
-    super(propertyIds, keyPropertyIds, managementController);
-  }
-
-  // ----- ResourceProvider ------------------------------------------------
-
-  @Override
-  public RequestStatus createResources(Request request)
-      throws SystemException,
-             UnsupportedPropertyException,
-             ResourceAlreadyExistsException,
-             NoSuchParentResourceException {
-
-    final Set<ServiceComponentRequest> requests = new HashSet<ServiceComponentRequest>();
-    for (Map<String, Object> propertyMap : request.getProperties()) {
-      requests.add(getRequest(propertyMap));
-    }
-
-    createResources(new Command<Void>() {
-      @Override
-      public Void invoke() throws AmbariException {
-        getManagementController().createComponents(requests);
-        return null;
-      }
-    });
-
-    notifyCreate(Resource.Type.Component, request);
-
-    return getRequestStatus(null);
-  }
-
-  @Override
-  public Set<Resource> getResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    final Set<ServiceComponentRequest> requests = new HashSet<ServiceComponentRequest>();
-
-    for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
-      requests.add(getRequest(propertyMap));
-    }
-
-    Set<ServiceComponentResponse> responses = getResources(new Command<Set<ServiceComponentResponse>>() {
-      @Override
-      public Set<ServiceComponentResponse> invoke() throws AmbariException {
-        return getManagementController().getComponents(requests);
-      }
-    });
-
-    Set<String>   requestedIds = getRequestPropertyIds(request, predicate);
-    Set<Resource> resources    = new HashSet<Resource>();
-
-    for (ServiceComponentResponse response : responses) {
-      Resource resource = new ResourceImpl(Resource.Type.Component);
-      setResourceProperty(resource, COMPONENT_CLUSTER_NAME_PROPERTY_ID, response.getClusterName(), requestedIds);
-      setResourceProperty(resource, COMPONENT_SERVICE_NAME_PROPERTY_ID, response.getServiceName(), requestedIds);
-      setResourceProperty(resource, COMPONENT_COMPONENT_NAME_PROPERTY_ID, response.getComponentName(), requestedIds);
-      setResourceProperty(resource, COMPONENT_STATE_PROPERTY_ID,
-          response.getDesiredState(), requestedIds);
-      setResourceProperty(resource, COMPONENT_DESIRED_CONFIGS_PROPERTY_ID,
-          response.getConfigVersions(), requestedIds);
-      resources.add(resource);
-    }
-    return resources;
-  }
-
-  @Override
-  public RequestStatus updateResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    final Set<ServiceComponentRequest> requests = new HashSet<ServiceComponentRequest>();
-    for (Map<String, Object> propertyMap : getPropertyMaps(request.getProperties().iterator().next(), predicate)) {
-      ServiceComponentRequest compRequest = getRequest(propertyMap);
-      Map<String, String>     configMap   = new HashMap<String,String>();
-
-      for (Map.Entry<String,Object> entry : propertyMap.entrySet()) {
-        String propertyCategory = PropertyHelper.getPropertyCategory(entry.getKey());
-        if (propertyCategory != null && propertyCategory.equals("config")) {
-          configMap.put(PropertyHelper.getPropertyName(entry.getKey()), (String) entry.getValue());
-        }
-      }
-
-      if (0 != configMap.size())
-        compRequest.setConfigVersions(configMap);
-
-      requests.add(compRequest);
-    }
-
-    RequestStatusResponse response = modifyResources(new Command<RequestStatusResponse>() {
-      @Override
-      public RequestStatusResponse invoke() throws AmbariException {
-        return getManagementController().updateComponents(requests);
-      }
-    });
-
-    notifyUpdate(Resource.Type.Component, request, predicate);
-
-    return getRequestStatus(response);
-  }
-
-  @Override
-  public RequestStatus deleteResources(Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    final Set<ServiceComponentRequest> requests = new HashSet<ServiceComponentRequest>();
-    for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
-      requests.add(getRequest(propertyMap));
-    }
-
-    RequestStatusResponse response = modifyResources(new Command<RequestStatusResponse>() {
-      @Override
-      public RequestStatusResponse invoke() throws AmbariException {
-        return getManagementController().deleteComponents(requests);
-      }
-    });
-
-    notifyDelete(Resource.Type.Component, predicate);
-    return getRequestStatus(response);
-  }
-
-  // ----- utility methods -------------------------------------------------
-
-  @Override
-  protected Set<String> getPKPropertyIds() {
-    return pkPropertyIds;
-  }
-
-  /**
-   * Get a component request object from a map of property values.
-   *
-   * @param properties  the predicate
-   *
-   * @return the component request object
-   */
-  private ServiceComponentRequest getRequest(Map<String, Object> properties) {
-    return new ServiceComponentRequest(
-        (String) properties.get(COMPONENT_CLUSTER_NAME_PROPERTY_ID),
-        (String) properties.get(COMPONENT_SERVICE_NAME_PROPERTY_ID),
-        (String) properties.get(COMPONENT_COMPONENT_NAME_PROPERTY_ID),
-        null,
-        (String) properties.get(COMPONENT_STATE_PROPERTY_ID));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigurationResourceProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigurationResourceProvider.java
deleted file mode 100644
index 9428009..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigurationResourceProvider.java
+++ /dev/null
@@ -1,285 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.ConfigurationRequest;
-import org.apache.ambari.server.controller.ConfigurationResponse;
-import org.apache.ambari.server.controller.ServiceComponentHostRequest;
-import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
-import org.apache.ambari.server.controller.spi.NoSuchResourceException;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.RequestStatus;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException;
-import org.apache.ambari.server.controller.spi.SystemException;
-import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
-import org.apache.ambari.server.controller.utilities.PredicateHelper;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.Map.Entry;
-
-/**
- * Resource provider for configuration resources.
- */
-class ConfigurationResourceProvider extends AbstractResourceProvider {
-
-  // ----- Property ID constants ---------------------------------------------
-
-  // Configurations (values are part of query strings and body post, so they don't have defined categories)
-  protected static final String CONFIGURATION_CLUSTER_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("Config", "cluster_name");
-  // TODO : should these be Config/type and Config/tag to be consistent?
-  protected static final String CONFIGURATION_CONFIG_TYPE_PROPERTY_ID  = PropertyHelper.getPropertyId(null, "type");
-  protected static final String CONFIGURATION_CONFIG_TAG_PROPERTY_ID   = PropertyHelper.getPropertyId(null, "tag");
-
-  private static final String CONFIG_HOST_NAME      = PropertyHelper.getPropertyId("Config", "host_name");
-  private static final String CONFIG_COMPONENT_NAME = PropertyHelper.getPropertyId("Config", "component_name");
-
-  /**
-   * The primary key property ids for the configuration resource type.
-   */
-  private static Set<String> pkPropertyIds =
-      new HashSet<String>(Arrays.asList(new String[]{
-          CONFIGURATION_CLUSTER_NAME_PROPERTY_ID,
-          CONFIGURATION_CONFIG_TYPE_PROPERTY_ID}));
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Constructor
-   *
-   * @param propertyIds           the property ids supported by this provider
-   * @param keyPropertyIds        the key properties for this provider
-   * @param managementController  the associated management controller
-   */
-  ConfigurationResourceProvider(Set<String> propertyIds,
-                                Map<Resource.Type, String> keyPropertyIds,
-                                AmbariManagementController managementController) {
-
-    super(propertyIds, keyPropertyIds, managementController);
-  }
-
-
-  // ----- ResourceProvider --------------------------------------------------
-
-  @Override
-  public RequestStatus createResources(Request request)
-      throws SystemException,
-             UnsupportedPropertyException,
-             ResourceAlreadyExistsException,
-             NoSuchParentResourceException {
-
-    for (Map<String, Object> map : request.getProperties()) {
-
-      String cluster = (String) map.get(CONFIGURATION_CLUSTER_NAME_PROPERTY_ID);
-      String type = (String) map.get(CONFIGURATION_CONFIG_TYPE_PROPERTY_ID);
-      String tag  = (String) map.get(CONFIGURATION_CONFIG_TAG_PROPERTY_ID);
-
-      Map<String, String> configMap = new HashMap<String, String>();
-
-      for (Entry<String, Object> entry : map.entrySet()) {
-        String propertyCategory = PropertyHelper.getPropertyCategory(entry.getKey());
-        if (propertyCategory != null && propertyCategory.equals("properties") && null != entry.getValue()) {
-          configMap.put(PropertyHelper.getPropertyName(entry.getKey()), entry.getValue().toString());
-        }
-      }
-
-      final ConfigurationRequest configRequest = new ConfigurationRequest(cluster, type, tag, configMap);
-
-      createResources(new Command<Void>() {
-        @Override
-        public Void invoke() throws AmbariException {
-          getManagementController().createConfiguration(configRequest);
-          return null;
-        }
-      });
-
-    }
-    return getRequestStatus(null);
-  }
-
-  @Override
-  public Set<Resource> getResources(Request request, Predicate predicate)
-    throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    Map<String, Object> map = PredicateHelper.getProperties(predicate);
-    
-    if (map.containsKey(CONFIG_HOST_NAME) && map.containsKey(CONFIG_COMPONENT_NAME)) {
-      final ServiceComponentHostRequest hostComponentRequest = new ServiceComponentHostRequest(
-          (String) map.get(CONFIGURATION_CLUSTER_NAME_PROPERTY_ID),
-          null,
-          (String) map.get(CONFIG_COMPONENT_NAME),
-          (String) map.get(CONFIG_HOST_NAME),
-          null, null);
-      
-      Map<String, String> mappints = getResources(new Command<Map<String, String>>() {
-        @Override
-        public Map<String, String> invoke() throws AmbariException {
-          return getManagementController().getHostComponentDesiredConfigMapping(hostComponentRequest);
-        }
-      });
-
-      Set<Resource> resources = new HashSet<Resource>();
-      
-      for (Entry<String, String> entry : mappints.entrySet()) {
-      
-        Resource resource = new ResourceImpl(Resource.Type.Configuration);
-        
-        resource.setProperty(CONFIGURATION_CLUSTER_NAME_PROPERTY_ID, map.get(CONFIGURATION_CLUSTER_NAME_PROPERTY_ID));
-        resource.setProperty(CONFIG_COMPONENT_NAME, map.get(CONFIG_COMPONENT_NAME));
-        resource.setProperty(CONFIG_HOST_NAME, map.get(CONFIG_HOST_NAME));
-
-        resource.setProperty(CONFIGURATION_CONFIG_TYPE_PROPERTY_ID, entry.getKey());
-        resource.setProperty(CONFIGURATION_CONFIG_TAG_PROPERTY_ID, entry.getValue());
-        
-        resources.add(resource);
-      }
-      return resources;
-      
-    } else {
-      // TODO : handle multiple requests
-      final ConfigurationRequest configRequest = getRequest(map);
-      
-      Set<ConfigurationResponse> responses = getResources(new Command<Set<ConfigurationResponse>>() {
-        @Override
-        public Set<ConfigurationResponse> invoke() throws AmbariException {
-          return getManagementController().getConfigurations(Collections.singleton(configRequest));
-        }
-      });
-
-      Set<Resource> resources = new HashSet<Resource>();
-      for (ConfigurationResponse response : responses) {
-        Resource resource = new ResourceImpl(Resource.Type.Configuration);
-        resource.setProperty(CONFIGURATION_CLUSTER_NAME_PROPERTY_ID, response.getClusterName());
-        resource.setProperty(CONFIGURATION_CONFIG_TYPE_PROPERTY_ID, response.getType());
-        resource.setProperty(CONFIGURATION_CONFIG_TAG_PROPERTY_ID, response.getVersionTag());
-        
-        if (null != response.getConfigs() && response.getConfigs().size() > 0) {
-          Map<String, String> configs = response.getConfigs();
-
-          for (Entry<String, String> entry : configs.entrySet()) {
-            String id = PropertyHelper.getPropertyId("properties", entry.getKey());
-            resource.setProperty(id, entry.getValue());
-          }
-        }
-        resources.add(resource);
-      }
-      return resources;
-    }
-  }
-
-  /**
-   * Throws an exception, as Configurations cannot be updated.
-   */
-  @Override
-  public RequestStatus updateResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    throw new UnsupportedOperationException("Cannot update a Configuration resource.");
-  }
-
-  /**
-   * Throws an exception, as Configurations cannot be deleted.
-   */
-  @Override
-  public RequestStatus deleteResources(Predicate predicate) throws SystemException,
-      UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    throw new UnsupportedOperationException("Cannot delete a Configuration resource.");
-  }
-
-  @Override
-  public Set<String> checkPropertyIds(Set<String> propertyIds) {
-    propertyIds = super.checkPropertyIds(propertyIds);
-
-    if (propertyIds.isEmpty()) {
-      return propertyIds;
-    }
-    Set<String> unsupportedProperties = new HashSet<String>();
-
-    for (String propertyId : propertyIds) {
-
-      // TODO : hack to allow for inconsistent property names
-      // for example, the tag property can come here as Config/tag, /tag or tag
-      if (!propertyId.equals("tag") && !propertyId.equals("type") &&
-          !propertyId.equals("/tag") && !propertyId.equals("/type") &&
-          !propertyId.equals("properties")) {
-
-        String propertyCategory = PropertyHelper.getPropertyCategory(propertyId);
-
-        if (propertyCategory == null || !propertyCategory.equals("properties")) {
-          unsupportedProperties.add(propertyId);
-        }
-      }
-    }
-    return unsupportedProperties;
-  }
-
-  // ----- AbstractResourceProvider ------------------------------------------
-
-  @Override
-  protected Set<String> getPKPropertyIds() {
-    return pkPropertyIds;
-  }
-
-
-  // ----- utility methods ---------------------------------------------------
-
-  /**
-   * Get the config related property ids from the given map of property ids.
-   *
-   * @param propertyIdMap  the map of property ids
-   *
-   * @return  a subset of the given map containing olny the property ids that have a
-   *          category of "config"
-   */
-  public static Map<String, String> getConfigPropertyValues(Map<String, Object> propertyIdMap) {
-    Map<String, String> configMap = new HashMap<String, String>();
-
-    for (Map.Entry<String,Object> entry : propertyIdMap.entrySet()) {
-      String propertyId = entry.getKey();
-      if (PropertyHelper.getPropertyCategory(propertyId).equals("config")) {
-        configMap.put(PropertyHelper.getPropertyName(propertyId), (String) entry.getValue());
-      }
-    }
-    return configMap;
-  }
-
-  /**
-   * Get a configuration request object from the given map of properties.
-   *
-   * @param properties  the map of properties
-   *
-   * @return a configuration request
-   */
-  private ConfigurationRequest getRequest(Map<String, Object> properties) {
-    String type = (String) properties.get(CONFIGURATION_CONFIG_TYPE_PROPERTY_ID);
-    String tag  = (String) properties.get(CONFIGURATION_CONFIG_TAG_PROPERTY_ID);
-
-    return new ConfigurationRequest(
-        (String) properties.get(CONFIGURATION_CLUSTER_NAME_PROPERTY_ID),
-        type, tag, new HashMap<String, String>());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/DefaultProviderModule.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/DefaultProviderModule.java
deleted file mode 100644
index 71dabd9..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/DefaultProviderModule.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import com.google.inject.Inject;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.AmbariServer;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-/**
- * The default provider module implementation.
- */
-public class DefaultProviderModule extends AbstractProviderModule {
-  @Inject
-  private AmbariManagementController managementController;
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Create a default provider module.
-   */
-  public DefaultProviderModule() {
-    if (managementController == null) {
-      managementController = AmbariServer.getController();
-    }
-  }
-
-
-  // ----- utility methods ---------------------------------------------------
-
-  @Override
-  protected ResourceProvider createResourceProvider(Resource.Type type) {
-    return AbstractResourceProvider.getResourceProvider(type, PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type), managementController);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
deleted file mode 100644
index 2c2f282..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.ServiceComponentHostRequest;
-import org.apache.ambari.server.controller.ServiceComponentHostResponse;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Resource provider for host component resources.
- */
-class HostComponentResourceProvider extends AbstractResourceProvider {
-
-  // ----- Property ID constants ---------------------------------------------
-
-  // Host Components
-  protected static final String HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID   = PropertyHelper.getPropertyId("HostRoles", "cluster_name");
-  protected static final String HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID   = PropertyHelper.getPropertyId("HostRoles", "service_name");
-  protected static final String HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "component_name");
-  protected static final String HOST_COMPONENT_HOST_NAME_PROPERTY_ID      = PropertyHelper.getPropertyId("HostRoles", "host_name");
-  protected static final String HOST_COMPONENT_STATE_PROPERTY_ID          = PropertyHelper.getPropertyId("HostRoles", "state");
-  protected static final String HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID  = PropertyHelper.getPropertyId("HostRoles", "desired_state");
-  protected static final String HOST_COMPONENT_CONFIGS_PROPERTY_ID          = PropertyHelper.getPropertyId("HostRoles", "configs");
-  protected static final String HOST_COMPONENT_DESIRED_CONFIGS_PROPERTY_ID  = PropertyHelper.getPropertyId("HostRoles", "desired_configs");
-
-  private static Set<String> pkPropertyIds =
-      new HashSet<String>(Arrays.asList(new String[]{
-          HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID,
-          HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID,
-          HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID,
-          HOST_COMPONENT_HOST_NAME_PROPERTY_ID}));
-
-  // ----- Constructors ----------------------------------------------------
-
-  /**
-   * Create a  new resource provider for the given management controller.
-   *
-   * @param propertyIds           the property ids
-   * @param keyPropertyIds        the key property ids
-   * @param managementController  the management controller
-   */
-  HostComponentResourceProvider(Set<String> propertyIds,
-                                Map<Resource.Type, String> keyPropertyIds,
-                                AmbariManagementController managementController) {
-    super(propertyIds, keyPropertyIds, managementController);
-  }
-
-  // ----- ResourceProvider ------------------------------------------------
-
-  @Override
-  public RequestStatus createResources(Request request)
-      throws SystemException,
-             UnsupportedPropertyException,
-             ResourceAlreadyExistsException,
-             NoSuchParentResourceException {
-
-    final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
-    for (Map<String, Object> propertyMap : request.getProperties()) {
-      requests.add(getRequest(propertyMap));
-    }
-
-    createResources(new Command<Void>() {
-      @Override
-      public Void invoke() throws AmbariException {
-        getManagementController().createHostComponents(requests);
-        return null;
-      }
-    });
-
-    notifyCreate(Resource.Type.HostComponent, request);
-
-    return getRequestStatus(null);
-  }
-
-  @Override
-  public Set<Resource> getResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
-
-    for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
-      requests.add(getRequest(propertyMap));
-    }
-
-    Set<Resource> resources    = new HashSet<Resource>();
-    Set<String>   requestedIds = getRequestPropertyIds(request, predicate);
-
-    Set<ServiceComponentHostResponse> responses = getResources(new Command<Set<ServiceComponentHostResponse>>() {
-      @Override
-      public Set<ServiceComponentHostResponse> invoke() throws AmbariException {
-        return getManagementController().getHostComponents(requests);
-      }
-    });
-
-    for (ServiceComponentHostResponse response : responses) {
-      Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-      setResourceProperty(resource, HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, response.getClusterName(), requestedIds);
-      setResourceProperty(resource, HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID, response.getServiceName(), requestedIds);
-      setResourceProperty(resource, HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, response.getComponentName(), requestedIds);
-      setResourceProperty(resource, HOST_COMPONENT_HOST_NAME_PROPERTY_ID, response.getHostname(), requestedIds);
-      setResourceProperty(resource, HOST_COMPONENT_STATE_PROPERTY_ID, response.getLiveState(), requestedIds);
-      setResourceProperty(resource, HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID, response.getDesiredState(), requestedIds);
-      setResourceProperty(resource, HOST_COMPONENT_CONFIGS_PROPERTY_ID,
-          response.getConfigs(), requestedIds);
-      setResourceProperty(resource, HOST_COMPONENT_DESIRED_CONFIGS_PROPERTY_ID,
-          response.getDesiredConfigs(), requestedIds);
-      resources.add(resource);
-    }
-    return resources;
-  }
-
-  @Override
-  public RequestStatus updateResources(Request request, Predicate predicate)
-        throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
-    RequestStatusResponse response = null;
-
-    Iterator<Map<String,Object>> iterator = request.getProperties().iterator();
-    if (iterator.hasNext()) {
-      for (Map<String, Object> propertyMap : getPropertyMaps(request.getProperties().iterator().next(), predicate)) {
-        requests.add(getRequest(propertyMap));
-      }
-      response = modifyResources(new Command<RequestStatusResponse>() {
-        @Override
-        public RequestStatusResponse invoke() throws AmbariException {
-          return getManagementController().updateHostComponents(requests);
-        }
-      });
-
-      notifyUpdate(Resource.Type.HostComponent, request, predicate);
-    }
-    return getRequestStatus(response);
-  }
-
-  @Override
-  public RequestStatus deleteResources(Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
-    for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
-      requests.add(getRequest(propertyMap));
-    }
-    RequestStatusResponse response = modifyResources(new Command<RequestStatusResponse>() {
-      @Override
-      public RequestStatusResponse invoke() throws AmbariException {
-        return getManagementController().deleteHostComponents(requests);
-      }
-    });
-
-    notifyDelete(Resource.Type.HostComponent, predicate);
-
-    return getRequestStatus(response);
-  }
-
-  @Override
-  public Set<String> checkPropertyIds(Set<String> propertyIds) {
-    propertyIds = super.checkPropertyIds(propertyIds);
-
-    if (propertyIds.isEmpty()) {
-      return propertyIds;
-    }
-    Set<String> unsupportedProperties = new HashSet<String>();
-
-    for (String propertyId : propertyIds) {
-      if (!propertyId.equals("config")) {
-        String propertyCategory = PropertyHelper.getPropertyCategory(propertyId);
-        if (propertyCategory == null || !propertyCategory.equals("config")) {
-          unsupportedProperties.add(propertyId);
-        }
-      }
-    }
-    return unsupportedProperties;
-  }
-
-
-  // ----- utility methods -------------------------------------------------
-
-  @Override
-  protected Set<String> getPKPropertyIds() {
-    return pkPropertyIds;
-  }
-
-  /**
-   * Get a component request object from a map of property values.
-   *
-   * @param properties  the predicate
-   *
-   * @return the component request object
-   */
-  private ServiceComponentHostRequest getRequest(Map<String, Object> properties) {
-    ServiceComponentHostRequest serviceComponentHostRequest = new ServiceComponentHostRequest(
-        (String) properties.get(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID),
-        (String) properties.get(HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID),
-        (String) properties.get(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID),
-        (String) properties.get(HOST_COMPONENT_HOST_NAME_PROPERTY_ID),
-        null,
-        (String) properties.get(HOST_COMPONENT_STATE_PROPERTY_ID));
-
-    Map<String, String> configMappings =
-        ConfigurationResourceProvider.getConfigPropertyValues(properties);
-
-    if (configMappings.size() > 0) {
-      serviceComponentHostRequest.setConfigVersions(configMappings);
-    }
-    return serviceComponentHostRequest;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
deleted file mode 100644
index c156baf..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
+++ /dev/null
@@ -1,262 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.internal;
-
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.HostRequest;
-import org.apache.ambari.server.controller.HostResponse;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-
-/**
- * Resource provider for host resources.
- */
-class HostResourceProvider extends AbstractResourceProvider {
-
-  // ----- Property ID constants ---------------------------------------------
-
-  // Hosts
-  protected static final String HOST_CLUSTER_NAME_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "cluster_name");
-  protected static final String HOST_NAME_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "host_name");
-  protected static final String HOST_PUBLIC_NAME_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "public_host_name");
-  protected static final String HOST_IP_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "ip");
-  protected static final String HOST_TOTAL_MEM_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "total_mem");
-  protected static final String HOST_CPU_COUNT_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "cpu_count");
-  protected static final String HOST_OS_ARCH_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "os_arch");
-  protected static final String HOST_OS_TYPE_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "os_type");
-  protected static final String HOST_RACK_INFO_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "rack_info");
-  protected static final String HOST_LAST_HEARTBEAT_TIME_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "last_heartbeat_time");
-  protected static final String HOST_LAST_REGISTRATION_TIME_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "last_registration_time");
-  protected static final String HOST_DISK_INFO_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "disk_info");
-  protected static final String HOST_HOST_STATUS_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "host_status");
-  protected static final String HOST_HOST_HEALTH_REPORT_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "host_health_report");
-  protected static final String HOST_STATE_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "host_state");
-  protected static final String HOST_LAST_AGENT_ENV_PROPERTY_ID =
-      PropertyHelper.getPropertyId("Hosts", "last_agent_env");
-
-  private static Set<String> pkPropertyIds =
-      new HashSet<String>(Arrays.asList(new String[]{
-          HOST_NAME_PROPERTY_ID}));
-
-  // ----- Constructors ----------------------------------------------------
-
-  /**
-   * Create a  new resource provider for the given management controller.
-   *
-   * @param propertyIds           the property ids
-   * @param keyPropertyIds        the key property ids
-   * @param managementController  the management controller
-   */
-  HostResourceProvider(Set<String> propertyIds,
-                       Map<Resource.Type, String> keyPropertyIds,
-                       AmbariManagementController managementController) {
-    super(propertyIds, keyPropertyIds, managementController);
-  }
-
-  // ----- ResourceProvider ------------------------------------------------
-
-  @Override
-  public RequestStatus createResources(Request request)
-      throws SystemException,
-          UnsupportedPropertyException,
-          ResourceAlreadyExistsException,
-          NoSuchParentResourceException {
-
-    final Set<HostRequest> requests = new HashSet<HostRequest>();
-    for (Map<String, Object> propertyMap : request.getProperties()) {
-      requests.add(getRequest(propertyMap));
-    }
-    createResources(new Command<Void>() {
-      @Override
-      public Void invoke() throws AmbariException {
-        getManagementController().createHosts(requests);
-        return null;
-      }
-    });
-
-    notifyCreate(Resource.Type.Host, request);
-
-    return getRequestStatus(null);
-  }
-
-  @Override
-  public Set<Resource> getResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    final Set<HostRequest> requests = new HashSet<HostRequest>();
-
-    if (predicate == null) {
-      requests.add(getRequest(null));
-    }
-    else {
-      for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
-        requests.add(getRequest(propertyMap));
-      }
-    }
-
-    Set<HostResponse> responses = getResources(new Command<Set<HostResponse>>() {
-      @Override
-      public Set<HostResponse> invoke() throws AmbariException {
-        return getManagementController().getHosts(requests);
-      }
-    });
-
-    Set<String>   requestedIds = getRequestPropertyIds(request, predicate);
-    Set<Resource> resources    = new HashSet<Resource>();
-
-    for (HostResponse response : responses) {
-      Resource resource = new ResourceImpl(Resource.Type.Host);
-
-      // TODO : properly handle more than one cluster
-      if (response.getClusterName() != null
-          && !response.getClusterName().isEmpty()) {
-        setResourceProperty(resource, HOST_CLUSTER_NAME_PROPERTY_ID,
-            response.getClusterName(), requestedIds);
-      }
-
-      setResourceProperty(resource, HOST_NAME_PROPERTY_ID,
-          response.getHostname(), requestedIds);
-      setResourceProperty(resource, HOST_PUBLIC_NAME_PROPERTY_ID,
-          response.getPublicHostName(), requestedIds);
-      setResourceProperty(resource, HOST_IP_PROPERTY_ID,
-          response.getIpv4(), requestedIds);
-      setResourceProperty(resource, HOST_TOTAL_MEM_PROPERTY_ID,
-          response.getTotalMemBytes(), requestedIds);
-      setResourceProperty(resource, HOST_CPU_COUNT_PROPERTY_ID,
-          (long) response.getCpuCount(), requestedIds);
-      setResourceProperty(resource, HOST_OS_ARCH_PROPERTY_ID,
-          response.getOsArch(), requestedIds);
-      setResourceProperty(resource, HOST_OS_TYPE_PROPERTY_ID,
-          response.getOsType(), requestedIds);
-      setResourceProperty(resource, HOST_RACK_INFO_PROPERTY_ID,
-          response.getRackInfo(), requestedIds);
-      setResourceProperty(resource, HOST_LAST_HEARTBEAT_TIME_PROPERTY_ID,
-          response.getLastHeartbeatTime(), requestedIds);
-      setResourceProperty(resource, HOST_LAST_AGENT_ENV_PROPERTY_ID,
-          response.getLastAgentEnv(), requestedIds);
-      setResourceProperty(resource, HOST_LAST_REGISTRATION_TIME_PROPERTY_ID,
-          response.getLastRegistrationTime(), requestedIds);
-      setResourceProperty(resource, HOST_HOST_STATUS_PROPERTY_ID,
-          response.getHealthStatus().getHealthStatus().toString(),requestedIds);
-      setResourceProperty(resource, HOST_HOST_HEALTH_REPORT_PROPERTY_ID,
-          response.getHealthStatus().getHealthReport(), requestedIds);
-      setResourceProperty(resource, HOST_DISK_INFO_PROPERTY_ID,
-          response.getDisksInfo(), requestedIds);
-      setResourceProperty(resource, HOST_STATE_PROPERTY_ID,
-          response.getHostState(), requestedIds);
-      resources.add(resource);
-    }
-    return resources;
-  }
-
-  @Override
-  public RequestStatus updateResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    final Set<HostRequest> requests = new HashSet<HostRequest>();
-    for (Map<String, Object> propertyMap : getPropertyMaps(request.getProperties().iterator().next(), predicate)) {
-      requests.add(getRequest(propertyMap));
-    }
-
-    modifyResources(new Command<Void>() {
-      @Override
-      public Void invoke() throws AmbariException {
-        getManagementController().updateHosts(requests);
-        return null;
-      }
-    });
-
-    notifyUpdate(Resource.Type.Host, request, predicate);
-
-    return getRequestStatus(null);
-  }
-
-  @Override
-  public RequestStatus deleteResources(Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    final Set<HostRequest> requests = new HashSet<HostRequest>();
-    for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
-      requests.add(getRequest(propertyMap));
-    }
-
-    modifyResources(new Command<Void>() {
-      @Override
-      public Void invoke() throws AmbariException {
-        getManagementController().deleteHosts(requests);
-        return null;
-      }
-    });
-
-    notifyDelete(Resource.Type.Host, predicate);
-
-    return getRequestStatus(null);
-  }
-
-  // ----- utility methods -------------------------------------------------
-
-  @Override
-  protected Set<String> getPKPropertyIds() {
-    return pkPropertyIds;
-  }
-
-  /**
-   * Get a host request object from a map of property values.
-   *
-   * @param properties  the predicate
-   *
-   * @return the component request object
-   */
-  private HostRequest getRequest(Map<String, Object> properties) {
-
-    if (properties == null) {
-      return  new HostRequest(null, null, null);
-    }
-
-    HostRequest hostRequest = new HostRequest(
-        (String) properties.get(HOST_NAME_PROPERTY_ID),
-        (String) properties.get(HOST_CLUSTER_NAME_PROPERTY_ID),
-        null);
-    hostRequest.setPublicHostName((String) properties.get(HOST_PUBLIC_NAME_PROPERTY_ID));
-    hostRequest.setRackInfo((String) properties.get(HOST_RACK_INFO_PROPERTY_ID));
-
-    return hostRequest;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HttpProxyPropertyProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HttpProxyPropertyProvider.java
deleted file mode 100644
index 6e69ff4..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HttpProxyPropertyProvider.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.internal;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.PropertyProvider;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.SystemException;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.controller.utilities.StreamProvider;
-import org.apache.commons.io.IOUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Property provider that is used to read HTTP data from another server.
- */
-public class HttpProxyPropertyProvider extends BaseProvider implements PropertyProvider {
-
-  protected final static Logger LOG =
-      LoggerFactory.getLogger(HttpProxyPropertyProvider.class);
-
-  private static final Map<String, String> URL_TEMPLATES = new HashMap<String, String>();
-  private static final Map<String, String> MAPPINGS = new HashMap<String, String>();
-  
-  static {
-    URL_TEMPLATES.put("NAGIOS_SERVER", "http://%s/ambarinagios/nagios/nagios_alerts.php?q1=alerts&alert_type=all");
-    
-    MAPPINGS.put("NAGIOS_SERVER", PropertyHelper.getPropertyId("HostRoles", "nagios_alerts"));
-  }
-  
-  private StreamProvider streamProvider = null;
-  // !!! not yet used, but make consistent
-  private String clusterNamePropertyId = null;
-  private String hostNamePropertyId = null;
-  private String componentNamePropertyId = null;
-  
-  public HttpProxyPropertyProvider(
-      StreamProvider stream,
-      String clusterNamePropertyId,
-      String hostNamePropertyId,
-      String componentNamePropertyId) {
-
-    super(new HashSet<String>(MAPPINGS.values()));
-    this.streamProvider = stream;
-    this.clusterNamePropertyId = clusterNamePropertyId;
-    this.hostNamePropertyId = hostNamePropertyId;
-    this.componentNamePropertyId = componentNamePropertyId;
-  }
-
-  /**
-   * This method only checks if an HTTP-type property should be fulfilled.  No
-   * modification is performed on the resources.
-   */
-  @Override
-  public Set<Resource> populateResources(Set<Resource> resources,
-      Request request, Predicate predicate) throws SystemException {
-    
-    Set<String> ids = getRequestPropertyIds(request, predicate);
-    
-    if (0 == ids.size())
-      return resources;
-
-    for (Resource resource : resources) {
-      
-      Object hostName = resource.getPropertyValue(hostNamePropertyId);
-      Object componentName = resource.getPropertyValue(componentNamePropertyId);
-      
-      if (null != hostName && null != componentName &&
-          MAPPINGS.containsKey(componentName.toString()) &&
-          URL_TEMPLATES.containsKey(componentName.toString())) {
-        
-        String template = URL_TEMPLATES.get(componentName.toString());
-        String propertyId = MAPPINGS.get(componentName.toString());
-        String url = String.format(template, hostName);
-        
-        getHttpResponse(resource, url, propertyId);
-      }
-    }
-    
-    return resources;
-  }
-
-  private void getHttpResponse(Resource r, String url, String propertyIdToSet) {
-    
-    InputStream in = null;
-    try {
-      in = streamProvider.readFrom(url);
-      
-      String str = IOUtils.toString(in, "UTF-8");
-      
-      r.setProperty(propertyIdToSet, str);
-    }
-    catch (IOException ioe) {
-      LOG.error("Error reading HTTP response from " + url);
-    }
-    finally {
-      if (null != in) {
-        try {
-          in.close();
-        }
-        catch (IOException ioe) {
-          // 
-        }
-      }
-    }
-    
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ObservableResourceProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ObservableResourceProvider.java
deleted file mode 100644
index c0c1dc4..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ObservableResourceProvider.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-/**
- * A resource provider that accepts observers that listen for resource provider events.
- */
-public interface ObservableResourceProvider {
-
-  /**
-   * Update all registered observers with the given event.
-   *
-   * @param event  the event
-   */
-  public void updateObservers(ResourceProviderEvent event);
-
-  /**
-   * Add an observer.
-   *
-   * @param observer  the observer
-   */
-  public void addObserver(ResourceProviderObserver observer);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PropertyInfo.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PropertyInfo.java
deleted file mode 100644
index 82cdc9d..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PropertyInfo.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-/**
- * Property identifier information.
- */
-public class PropertyInfo {
-  private final String propertyId;
-  private final boolean temporal;
-  private final boolean pointInTime;
-
-  public PropertyInfo(String propertyId, boolean temporal, boolean pointInTime) {
-    this.propertyId = propertyId;
-    this.temporal = temporal;
-    this.pointInTime = pointInTime;
-  }
-
-  public String getPropertyId() {
-    return propertyId;
-  }
-
-  public boolean isTemporal() {
-    return temporal;
-  }
-
-  public boolean isPointInTime() {
-    return pointInTime;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PropertyPredicateVisitor.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PropertyPredicateVisitor.java
deleted file mode 100644
index a27d5da..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/PropertyPredicateVisitor.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.predicate.AlwaysPredicate;
-import org.apache.ambari.server.controller.predicate.ArrayPredicate;
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.predicate.CategoryPredicate;
-import org.apache.ambari.server.controller.predicate.ComparisonPredicate;
-import org.apache.ambari.server.controller.predicate.PropertyPredicate;
-import org.apache.ambari.server.controller.predicate.PredicateVisitor;
-import org.apache.ambari.server.controller.predicate.UnaryPredicate;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Predicate visitor for extracting property values from the {@link PropertyPredicate}s of a predicate graph.
- */
-public class PropertyPredicateVisitor implements PredicateVisitor {
-  private final Map<String, Object> properties = new HashMap<String, Object>();
-
-  @Override
-  public void acceptComparisonPredicate(ComparisonPredicate predicate) {
-    properties.put(predicate.getPropertyId(), predicate.getValue());
-  }
-
-  @Override
-  public void acceptArrayPredicate(ArrayPredicate predicate) {
-    BasePredicate[] predicates = predicate.getPredicates();
-    for (BasePredicate predicate1 : predicates) {
-      predicate1.accept(this);
-    }
-  }
-
-  @Override
-  public void acceptUnaryPredicate(UnaryPredicate predicate) {
-    //Do nothing
-  }
-
-  @Override
-  public void acceptAlwaysPredicate(AlwaysPredicate predicate) {
-    //Do nothing
-  }
-
-  @Override
-  public void acceptCategoryPredicate(CategoryPredicate predicate) {
-    // Do nothing
-  }
-
-
-  // ----- accessors ---------------------------------------------------------
-
-  /**
-   * Get the properties.
-   *
-   * @return the properties
-   */
-  public Map<String, Object> getProperties() {
-    return properties;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestImpl.java
deleted file mode 100644
index 23bc483..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestImpl.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.TemporalInfo;
-
-import java.util.*;
-import java.util.Map.Entry;
-
-/**
- * Default request implementation.
- */
-public class RequestImpl implements Request {
-
-  /**
-   * The property ids associated with this request.  Used for requests that
-   * get resource values.
-   */
-  private final Set<String> propertyIds;
-
-  /**
-   * The properties associated with this request.  Used for requests that create
-   * resources or update resource values.
-   */
-  private final Set<Map<String, Object>> properties;
-
-  /**
-   * Map of property to temporal info.
-   */
-  private Map<String, TemporalInfo> m_mapTemporalInfo = new HashMap<String, TemporalInfo>();
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Create a request.
-   *
-   * @param propertyIds      the property ids associated with the request; may be null
-   * @param properties       the properties associated with the request; may be null
-   * @param mapTemporalInfo  the temporal info
-   */
-  public RequestImpl(Set<String> propertyIds, Set<Map<String, Object>> properties,
-                     Map<String, TemporalInfo> mapTemporalInfo) {
-    this.propertyIds = propertyIds == null ?
-        Collections.unmodifiableSet(new HashSet<String>()) :
-        Collections.unmodifiableSet(propertyIds);
-
-    this.properties = properties == null ?
-        Collections.unmodifiableSet(new HashSet<Map<String, Object>>()) :
-        Collections.unmodifiableSet(properties);
-
-    setTemporalInfo(mapTemporalInfo);
-  }
-
-
-  // ----- Request -----------------------------------------------------------
-
-  @Override
-  public Set<String> getPropertyIds() {
-    return propertyIds;
-  }
-
-  @Override
-  public Set<Map<String, Object>> getProperties() {
-    return properties;
-  }
-
-  @Override
-  public TemporalInfo getTemporalInfo(String id) {
-    return m_mapTemporalInfo.get(id);
-  }
-
-  private void setTemporalInfo(Map<String, TemporalInfo> mapTemporalInfo) {
-    m_mapTemporalInfo = mapTemporalInfo;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    RequestImpl request = (RequestImpl) o;
-
-    return !(properties == null ? request.properties != null : !properties.equals(request.properties)) &&
-        !(propertyIds == null ? request.propertyIds != null : !propertyIds.equals(request.propertyIds));
-  }
-
-  @Override
-  public int hashCode() {
-    int result = propertyIds != null ? propertyIds.hashCode() : 0;
-    result = 31 * result + (properties != null ? properties.hashCode() : 0);
-    return result;
-  }
-
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("Request:"
-        + ", propertyIds=[");
-    for (String pId : propertyIds) {
-      sb.append(" { propertyName=").append(pId).append(" }, ");
-    }
-    sb.append(" ], properties=[ ");
-    for (Map<String, Object> map : properties) {
-      for (Entry<String, Object> entry : map.entrySet()) {
-        sb.append(" { propertyName=").append(entry.getKey()).append(", propertyValue=").
-            append(entry.getValue().toString()).append(" }, ");
-      }
-    }
-    sb.append(" ], temporalInfo=[");
-    if (m_mapTemporalInfo == null) {
-      sb.append("null");
-    } else {
-      for (Entry<String, TemporalInfo> entry :
-        m_mapTemporalInfo.entrySet()) {
-        sb.append(" { propertyName=").append(entry.getKey()).append(", temporalInfo=").
-            append(entry.getValue().toString());
-      }
-    }
-    sb.append(" ]");
-    return sb.toString();
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
deleted file mode 100644
index 6fa7515..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.RequestStatusRequest;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
-import org.apache.ambari.server.controller.spi.NoSuchResourceException;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.RequestStatus;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.SystemException;
-import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
-import org.apache.ambari.server.controller.utilities.PredicateHelper;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Resource provider for request resources.
- */
-class RequestResourceProvider extends AbstractResourceProvider {
-
-  // ----- Property ID constants ---------------------------------------------
-  // Requests
-  protected static final String REQUEST_CLUSTER_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("Requests", "cluster_name");
-  protected static final String REQUEST_ID_PROPERTY_ID           = PropertyHelper.getPropertyId("Requests", "id");
-  protected static final String REQUEST_STATUS_PROPERTY_ID       = PropertyHelper.getPropertyId("Requests", "request_status");
-
-  private static Set<String> pkPropertyIds =
-      new HashSet<String>(Arrays.asList(new String[]{
-          REQUEST_ID_PROPERTY_ID}));
-
-  // ----- Constructors ----------------------------------------------------
-
-  /**
-   * Create a  new resource provider for the given management controller.
-   *
-   * @param propertyIds           the property ids
-   * @param keyPropertyIds        the key property ids
-   * @param managementController  the management controller
-   */
-  RequestResourceProvider(Set<String> propertyIds,
-                          Map<Resource.Type, String> keyPropertyIds,
-                          AmbariManagementController managementController) {
-    super(propertyIds, keyPropertyIds, managementController);
-  }
-
-  // ----- ResourceProvider ------------------------------------------------
-
-  @Override
-  public RequestStatus createResources(Request request) {
-    throw new UnsupportedOperationException("Not currently supported.");
-  }
-
-  @Override
-  public Set<Resource> getResources(Request request, Predicate predicate)
-    throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    Set<String>         requestedIds         = getRequestPropertyIds(request, predicate);
-    Map<String, Object> predicateProperties  = PredicateHelper.getProperties(predicate);
-
-    final RequestStatusRequest requestStatusRequest = getRequest(predicateProperties);
-
-    String clusterName = (String) predicateProperties.get(REQUEST_CLUSTER_NAME_PROPERTY_ID);
-
-    Set<RequestStatusResponse> responses = getResources(new Command<Set<RequestStatusResponse>>() {
-      @Override
-      public Set<RequestStatusResponse> invoke() throws AmbariException {
-        return getManagementController().getRequestStatus(requestStatusRequest);
-      }
-    });
-
-
-    Set<Resource> resources = new HashSet<Resource>();
-    for (RequestStatusResponse response : responses) {
-      Resource resource = new ResourceImpl(Resource.Type.Request);
-      setResourceProperty(resource, REQUEST_CLUSTER_NAME_PROPERTY_ID, clusterName, requestedIds);
-      setResourceProperty(resource, REQUEST_ID_PROPERTY_ID, response.getRequestId(), requestedIds);
-      resources.add(resource);
-    }
-    return resources;
-  }
-
-  @Override
-  public RequestStatus updateResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    throw new UnsupportedOperationException("Not currently supported.");
-  }
-
-  @Override
-  public RequestStatus deleteResources(Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    throw new UnsupportedOperationException("Not currently supported.");
-  }
-
-  // ----- utility methods -------------------------------------------------
-
-  @Override
-  protected Set<String> getPKPropertyIds() {
-    return pkPropertyIds;
-  }
-
-  /**
-   * Get a component request object from a map of property values.
-   *
-   * @param properties  the predicate
-   *
-   * @return the component request object
-   */
-  private RequestStatusRequest getRequest(Map<String, Object> properties) {
-    Long requestId = null;
-    if (properties.get(REQUEST_ID_PROPERTY_ID) != null) {
-      requestId = Long.valueOf((String) properties
-          .get(REQUEST_ID_PROPERTY_ID));
-    }
-    String requestStatus = null;
-    if (properties.get(REQUEST_STATUS_PROPERTY_ID) != null) {
-      requestStatus = (String)properties.get(REQUEST_STATUS_PROPERTY_ID);
-    }
-    return new RequestStatusRequest(requestId, requestStatus);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestStatusImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestStatusImpl.java
deleted file mode 100644
index eb2f0da..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestStatusImpl.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.spi.RequestStatus;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.Collections;
-import java.util.Set;
-
-/**
- * Default request status implementation.
- */
-public class RequestStatusImpl implements RequestStatus{
-
-  private final Resource requestResource;
-
-  public RequestStatusImpl(Resource requestResource) {
-    this.requestResource = requestResource;
-  }
-
-  @Override
-  public Set<Resource> getAssociatedResources() {
-    return Collections.emptySet();  // TODO : handle in M4
-  }
-
-  @Override
-  public Resource getRequestResource() {
-    return requestResource;
-  }
-
-  @Override
-  public Status getStatus() {
-
-    return requestResource == null ? Status.Complete :
-        Status.valueOf((String) requestResource.getPropertyValue(PropertyHelper.getPropertyId("Requests", "status")));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceImpl.java
deleted file mode 100644
index b6e0318..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceImpl.java
+++ /dev/null
@@ -1,208 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.api.util.TreeNode;
-import org.apache.ambari.server.api.util.TreeNodeImpl;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Simple resource implementation.
- */
-public class ResourceImpl implements Resource {
-
-  /**
-   * The resource type.
-   */
-  private final Type type;
-
-  /**
-   * Tree of categories/properties.
-   * Each category is a sub node and each node contains a map of properties(n/v pairs).
-   */
-  private final TreeNode<Map<String, Object>> m_treeProperties =
-      new TreeNodeImpl<Map<String, Object>>(null, new HashMap<String, Object>(), null);
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Create a resource of the given type.
-   *
-   * @param type  the resource type
-   */
-  public ResourceImpl(Type type) {
-    this.type = type;
-  }
-
-  /**
-   * Copy constructor
-   *
-   * @param resource  the resource to copy
-   */
-  public ResourceImpl(Resource resource) {
-    this.type = resource.getType();
-
-    for (Map.Entry<String, Map<String, Object>> categoryEntry : resource.getPropertiesMap().entrySet()) {
-      String category = categoryEntry.getKey();
-      Map<String, Object> propertyMap = categoryEntry.getValue();
-      if (propertyMap != null) {
-        for (Map.Entry<String, Object> propertyEntry : propertyMap.entrySet()) {
-          String propertyId    = (category == null ? "" : category + "/") + propertyEntry.getKey();
-          Object propertyValue = propertyEntry.getValue();
-          setProperty(propertyId, propertyValue);
-        }
-      }
-    }
-  }
-
-
-  // ----- Resource ----------------------------------------------------------
-
-  @Override
-  public Type getType() {
-    return type;
-  }
-
-  @Override
-  public TreeNode<Map<String, Object>> getProperties() {
-    return m_treeProperties;
-  }
-
-  @Override
-  public Map<String, Map<String, Object>> getPropertiesMap() {
-    Map<String, Map<String, Object>> mapProps = new HashMap<String, Map<String, Object>>();
-    addNodeToMap(m_treeProperties, mapProps, null);
-
-    return mapProps;
-  }
-
-  @Override
-  public void setProperty(String id, Object value) {
-    String category = PropertyHelper.getPropertyCategory(id);
-    TreeNode<Map<String, Object>> node;
-    if (category == null) {
-      node = m_treeProperties;
-    } else {
-      node = m_treeProperties.getChild(category);
-      if (node == null) {
-        String[] tokens = category.split("/");
-        node = m_treeProperties;
-        for (String t : tokens) {
-          TreeNode<Map<String, Object>> child = node.getChild(t);
-          if (child == null) {
-            child = node.addChild(new HashMap<String, Object>(), t);
-          }
-          node = child;
-        }
-      }
-    }
-    node.getObject().put(PropertyHelper.getPropertyName(id), value);
-  }
-
-  @Override
-  public void addCategory(String id) {
-    TreeNode<Map<String, Object>> node;
-    if (id != null) {
-      node = m_treeProperties.getChild(id);
-      if (node == null) {
-        String[] tokens = id.split("/");
-        node = m_treeProperties;
-        for (String t : tokens) {
-          TreeNode<Map<String, Object>> child = node.getChild(t);
-          if (child == null) {
-            child = node.addChild(new HashMap<String, Object>(), t);
-          }
-          node = child;
-        }
-      }
-    }
-  }
-
-  @Override
-  public Object getPropertyValue(String id) {
-    String category = PropertyHelper.getPropertyCategory(id);
-    TreeNode<Map<String, Object>> node = (category == null) ? m_treeProperties :
-        m_treeProperties.getChild(category);
-
-    return node == null ? null : node.getObject().get(PropertyHelper.getPropertyName(id));
-  }
-
-
-  // ----- Object overrides --------------------------------------------------
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-
-    sb.append("Resource : ").append(type).append("\n");
-    sb.append("Properties:\n");
-
-    printPropertyNode(m_treeProperties, sb, null, "  ");
-
-    return sb.toString();
-  }
-
-
-  // ----- class private methods ---------------------------------------------
-
-  /**
-   * Recursively prints the properties for a given node and it's children to a StringBuffer.
-   *
-   * @param node      the node to print properties for
-   * @param sb        the SringBuffer to print to
-   * @param category  the absolute category name
-   * @param indent    the indent to be used
-   */
-  private void printPropertyNode(TreeNode<Map<String, Object>> node, StringBuilder sb, String category, String indent) {
-    if (node.getParent() != null) {
-      category = category == null ? node.getName() : category + '/' + node.getName();
-      sb.append(indent).append("Category: ").append(category).append('\n');
-      indent += "  ";
-    }
-    for (Map.Entry<String, Object> entry : node.getObject().entrySet()) {
-      sb.append(indent).append(entry.getKey()).append('=').append(entry.getValue()).append('\n');
-    }
-
-    for (TreeNode<Map<String, Object>> n : node.getChildren()) {
-      printPropertyNode(n, sb, category, indent);
-    }
-  }
-
-  /**
-   * Add the node properties to the specified map.
-   * Makes recursive calls for each child node.
-   *
-   * @param node      the node whose properties are to be added
-   * @param mapProps  the map that the props are to be added to
-   * @param path      the current category hierarchy
-   */
-  private void addNodeToMap(TreeNode<Map<String, Object>> node, Map<String, Map<String, Object>> mapProps, String path) {
-    path = path == null ? node.getName() : path + "/" + node.getName();
-    mapProps.put(path, node.getObject());
-
-    for (TreeNode<Map<String, Object>> child : node.getChildren()) {
-      addNodeToMap(child, mapProps, path);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceProviderEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceProviderEvent.java
deleted file mode 100644
index 950f03a..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceProviderEvent.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-
-/**
- * Resource provider event used to update resource provider observers.
- */
-public class ResourceProviderEvent {
-  private final Resource.Type resourceType;
-  private final Type type;
-  private final Request request;
-  private final Predicate predicate;
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  public ResourceProviderEvent(Resource.Type resourceType, Type type, Request request, Predicate predicate) {
-    this.resourceType = resourceType;
-    this.type = type;
-    this.request = request;
-    this.predicate = predicate;
-  }
-
-  // ----- ResourceProviderEvent ---------------------------------------------
-
-  /**
-   * Get the associated resource type.
-   *
-   * @return the resource type
-   */
-  public Resource.Type getResourceType() {
-    return resourceType;
-  }
-
-  /**
-   * Get the event type.
-   *
-   * @return the event type
-   */
-  public Type getType() {
-    return type;
-  }
-
-  /**
-   * Get the request object that was used for the operation that generated this event.
-   *
-   * @return the request object
-   */
-  public Request getRequest() {
-    return request;
-  }
-
-  /**
-   * Get the predicate object that was used for the operation that generated this event.
-   *
-   * @return the predicate object
-   */
-  public Predicate getPredicate() {
-    return predicate;
-  }
-
-
-  // ----- event type enumeration --------------------------------------------
-
-  public enum Type {
-    Create,
-    Update,
-    Delete
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceProviderObserver.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceProviderObserver.java
deleted file mode 100644
index a5dd3df..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceProviderObserver.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-/**
- * An object that listens for events from a resource provider.
- */
-public interface ResourceProviderObserver {
-  /**
-   * Update this observer with an event from a resource provider.
-   *
-   * @param event the event
-   */
-  public void update(ResourceProviderEvent event);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/SchemaImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/SchemaImpl.java
deleted file mode 100644
index 4227dbf..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/SchemaImpl.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.spi.Schema;
-
-import java.util.Collections;
-
-
-/**
- * Simple schema implementation.
- */
-public class SchemaImpl implements Schema {
-  /**
-   * The associated resource provider.
-   */
-  private final ResourceProvider resourceProvider;
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Create a new schema for the given providers.
-   *
-   * @param resourceProvider   the resource provider
-   *
-   */
-  public SchemaImpl(ResourceProvider resourceProvider) {
-    this.resourceProvider   = resourceProvider;
-  }
-
-
-  // ----- Schema ------------------------------------------------------------
-
-  @Override
-  public String getKeyPropertyId(Resource.Type type) {
-    return resourceProvider.getKeyPropertyIds().get(type);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
deleted file mode 100644
index 56774bb..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.ServiceRequest;
-import org.apache.ambari.server.controller.ServiceResponse;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Resource provider for service resources.
- */
-class ServiceResourceProvider extends AbstractResourceProvider {
-
-
-  // ----- Property ID constants ---------------------------------------------
-
-  // Services
-  protected static final String SERVICE_CLUSTER_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceInfo", "cluster_name");
-  protected static final String SERVICE_SERVICE_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceInfo", "service_name");
-  protected static final String SERVICE_SERVICE_STATE_PROPERTY_ID   = PropertyHelper.getPropertyId("ServiceInfo", "state");
-  protected static final String SERVICE_DESIRED_CONFIGS_PROPERTY_ID = PropertyHelper.getPropertyId("ServiceInfo", "desired_configs");
-
-
-  private static Set<String> pkPropertyIds =
-      new HashSet<String>(Arrays.asList(new String[]{
-          SERVICE_CLUSTER_NAME_PROPERTY_ID,
-          SERVICE_SERVICE_NAME_PROPERTY_ID}));
-
-  // ----- Constructors ----------------------------------------------------
-
-  /**
-   * Create a  new resource provider for the given management controller.
-   *
-   * @param propertyIds           the property ids
-   * @param keyPropertyIds        the key property ids
-   * @param managementController  the management controller
-   */
-  ServiceResourceProvider(Set<String> propertyIds,
-                          Map<Resource.Type, String> keyPropertyIds,
-                          AmbariManagementController managementController) {
-    super(propertyIds, keyPropertyIds, managementController);
-  }
-
-  // ----- ResourceProvider ------------------------------------------------
-
-  @Override
-  public RequestStatus createResources(Request request)
-      throws SystemException,
-             UnsupportedPropertyException,
-             ResourceAlreadyExistsException,
-             NoSuchParentResourceException {
-
-    final Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
-    for (Map<String, Object> propertyMap : request.getProperties()) {
-      requests.add(getRequest(propertyMap));
-    }
-    createResources(new Command<Void>() {
-      @Override
-      public Void invoke() throws AmbariException {
-        getManagementController().createServices(requests);
-        return null;
-      }
-    });
-    notifyCreate(Resource.Type.Service, request);
-
-    return getRequestStatus(null);
-  }
-
-  @Override
-  public Set<Resource> getResources(Request request, Predicate predicate) throws
-      SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    final Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
-
-    for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
-      requests.add(getRequest(propertyMap));
-    }
-
-    Set<ServiceResponse> responses = getResources(new Command<Set<ServiceResponse>>() {
-      @Override
-      public Set<ServiceResponse> invoke() throws AmbariException {
-        return getManagementController().getServices(requests);
-      }
-    });
-
-    Set<String>   requestedIds = getRequestPropertyIds(request, predicate);
-    Set<Resource> resources    = new HashSet<Resource>();
-
-    for (ServiceResponse response : responses) {
-      Resource resource = new ResourceImpl(Resource.Type.Service);
-      setResourceProperty(resource, SERVICE_CLUSTER_NAME_PROPERTY_ID,
-          response.getClusterName(), requestedIds);
-      setResourceProperty(resource, SERVICE_SERVICE_NAME_PROPERTY_ID,
-          response.getServiceName(), requestedIds);
-      setResourceProperty(resource, SERVICE_DESIRED_CONFIGS_PROPERTY_ID,
-          response.getConfigVersions(), requestedIds);
-      setResourceProperty(resource, SERVICE_SERVICE_STATE_PROPERTY_ID,
-          response.getDesiredState(), requestedIds);
-      resources.add(resource);
-    }
-    return resources;
-  }
-
-  @Override
-  public RequestStatus updateResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    final Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
-    RequestStatusResponse     response = null;
-
-    Iterator<Map<String,Object>> iterator = request.getProperties().iterator();
-    if (iterator.hasNext()) {
-      for (Map<String, Object> propertyMap : getPropertyMaps(iterator.next(), predicate)) {
-        requests.add(getRequest(propertyMap));
-      }
-      response = modifyResources(new Command<RequestStatusResponse>() {
-        @Override
-        public RequestStatusResponse invoke() throws AmbariException {
-          return getManagementController().updateServices(requests);
-        }
-      });
-    }
-    notifyUpdate(Resource.Type.Service, request, predicate);
-
-    return getRequestStatus(response);
-  }
-
-  @Override
-  public RequestStatus deleteResources(Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    final Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
-    for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
-      requests.add(getRequest(propertyMap));
-    }
-    RequestStatusResponse response = modifyResources(new Command<RequestStatusResponse>() {
-      @Override
-      public RequestStatusResponse invoke() throws AmbariException {
-        return getManagementController().deleteServices(requests);
-      }
-    });
-
-    notifyDelete(Resource.Type.Service, predicate);
-    return getRequestStatus(response);
-  }
-
-  @Override
-  public Set<String> checkPropertyIds(Set<String> propertyIds) {
-    propertyIds = super.checkPropertyIds(propertyIds);
-
-    if (propertyIds.isEmpty()) {
-      return propertyIds;
-    }
-    Set<String> unsupportedProperties = new HashSet<String>();
-
-    for (String propertyId : propertyIds) {
-      if (!propertyId.equals("config")) {
-        String propertyCategory = PropertyHelper.getPropertyCategory(propertyId);
-        if (propertyCategory == null || !propertyCategory.equals("config")) {
-          unsupportedProperties.add(propertyId);
-        }
-      }
-    }
-    return unsupportedProperties;
-  }
-
-
-// ----- utility methods -------------------------------------------------
-
-  @Override
-  protected Set<String> getPKPropertyIds() {
-    return pkPropertyIds;
-  }
-
-  /**
-   * Get a service request object from a map of property values.
-   *
-   * @param properties  the predicate
-   *
-   * @return the service request object
-   */
-  private ServiceRequest getRequest(Map<String, Object> properties) {
-    ServiceRequest svcRequest = new ServiceRequest(
-        (String) properties.get(SERVICE_CLUSTER_NAME_PROPERTY_ID),
-        (String) properties.get(SERVICE_SERVICE_NAME_PROPERTY_ID),
-        null,
-        (String) properties.get(SERVICE_SERVICE_STATE_PROPERTY_ID));
-
-    Map<String, String> configMappings =
-        ConfigurationResourceProvider.getConfigPropertyValues(properties);
-
-    if (configMappings.size() > 0) {
-      svcRequest.setConfigVersions(configMappings);
-    }
-    return svcRequest;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/SimplifyingPredicateVisitor.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/SimplifyingPredicateVisitor.java
deleted file mode 100644
index df907c8..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/SimplifyingPredicateVisitor.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.predicate.AlwaysPredicate;
-import org.apache.ambari.server.controller.predicate.AndPredicate;
-import org.apache.ambari.server.controller.predicate.ArrayPredicate;
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.predicate.CategoryPredicate;
-import org.apache.ambari.server.controller.predicate.ComparisonPredicate;
-import org.apache.ambari.server.controller.predicate.EqualsPredicate;
-import org.apache.ambari.server.controller.predicate.OrPredicate;
-import org.apache.ambari.server.controller.predicate.PredicateVisitor;
-import org.apache.ambari.server.controller.predicate.UnaryPredicate;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-
-/**
- * A predicate visitor used to simplify by doing the following ...
- *
- * 1) distribute across OR (e.g. A && ( B || C ) becomes ( A && B ) || ( A && C )).
- * This is done because an individual back end request object can not handle OR.  Instead
- * we need to break up the predicate to form multiple requests and take the union of all
- * the responses.
- * 2) convert predicates based on unsupported properties to AlwaysPredicate.
- * Unsupported properties are those not returned by the resource provider.  For these
- * properties we need to wait until the property provider that handles the property has
- * been called.
- * 3) convert predicates based on any operator other than == to AlwaysPredicate.
- * The back end requests can not handle any operator other than equals.  The complete predicate
- * will get applied further down the line if necessary.
- *
- * After visiting a predicate, the visitor should be able to supply a list of predicates that can be
- * used to generate requests to the backend, working around the restrictions above.
- *
- * Note that the results acquired using the generated predicates may be a super set of what is actually
- * desired given the original predicate, but the original predicate will be applied at a suitable time
- * down the line if required.
- */
-public class SimplifyingPredicateVisitor implements PredicateVisitor {
-
-  private final Set<String> supportedProperties;
-  private BasePredicate lastVisited = null;
-
-  public SimplifyingPredicateVisitor(Set<String> supportedProperties) {
-    this.supportedProperties = supportedProperties;
-  }
-
-  public List<BasePredicate> getSimplifiedPredicates() {
-    if (lastVisited == null) {
-      return Collections.emptyList();
-    }
-    if (lastVisited instanceof OrPredicate) {
-      return Arrays.asList(((OrPredicate) lastVisited).getPredicates());
-    }
-    return Collections.singletonList(lastVisited);
-  }
-
-  @Override
-  public void acceptComparisonPredicate(ComparisonPredicate predicate) {
-    if (predicate instanceof EqualsPredicate &&
-        supportedProperties.contains(predicate.getPropertyId())) {
-      lastVisited = predicate;
-    }
-    else {
-      lastVisited = AlwaysPredicate.INSTANCE;
-    }
-  }
-
-  @Override
-  public void acceptArrayPredicate(ArrayPredicate arrayPredicate) {
-    List<BasePredicate> predicateList = new LinkedList<BasePredicate>();
-    boolean hasOrs = false;
-
-    BasePredicate[] predicates = arrayPredicate.getPredicates();
-    if (predicates.length > 0) {
-      for (BasePredicate predicate : predicates) {
-        predicate.accept(this);
-        predicateList.add(lastVisited);
-        if (lastVisited instanceof OrPredicate) {
-          hasOrs = true;
-        }
-      }
-    }
-    // distribute so that A && ( B || C ) becomes ( A && B ) || ( A && C )
-    if (hasOrs && arrayPredicate instanceof AndPredicate) {
-      int size = predicateList.size();
-      List<BasePredicate> andPredicateList = new LinkedList<BasePredicate>();
-
-      for (int i = 0; i < size; ++i) {
-        for (int j = i + 1; j < size; ++j) {
-          andPredicateList.addAll(distribute(predicateList.get(i), predicateList.get(j)));
-        }
-      }
-      lastVisited = OrPredicate.instance(andPredicateList.toArray(new BasePredicate[andPredicateList.size()]));
-    }
-    else {
-      lastVisited = arrayPredicate.create(predicateList.toArray(new BasePredicate[predicateList.size()]));
-    }
-  }
-
-  @Override
-  public void acceptUnaryPredicate(UnaryPredicate predicate) {
-    lastVisited = predicate;
-  }
-
-  @Override
-  public void acceptAlwaysPredicate(AlwaysPredicate predicate) {
-    lastVisited = predicate;
-  }
-
-  private static List<BasePredicate> distribute(BasePredicate left, BasePredicate right) {
-
-    if (left instanceof OrPredicate) {
-      return distributeOr((OrPredicate) left, right);
-    }
-
-    if (right instanceof OrPredicate) {
-      return distributeOr((OrPredicate) right, left);
-    }
-    return Collections.singletonList(left.equals(right) ?
-        left : AndPredicate.instance(left, right));
-  }
-
-  private static List<BasePredicate> distributeOr(OrPredicate orPredicate, BasePredicate other) {
-    List<BasePredicate> andPredicateList = new LinkedList<BasePredicate>();
-    OrPredicate otherOr = null;
-
-    if (other instanceof OrPredicate) {
-      otherOr = (OrPredicate) other;
-    }
-
-    for (BasePredicate basePredicate : orPredicate.getPredicates()) {
-
-      if (otherOr != null) {
-        andPredicateList.addAll(distributeOr(otherOr, basePredicate));
-      }
-      else {
-        andPredicateList.add(basePredicate.equals(other) ?
-            basePredicate : AndPredicate.instance(basePredicate, other));
-      }
-    }
-    return andPredicateList;
-  }
-
-  @Override
-  public void acceptCategoryPredicate(CategoryPredicate predicate) {
-    lastVisited = predicate;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/TaskResourceProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/TaskResourceProvider.java
deleted file mode 100644
index e51ac85..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/TaskResourceProvider.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.TaskStatusRequest;
-import org.apache.ambari.server.controller.TaskStatusResponse;
-import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
-import org.apache.ambari.server.controller.spi.NoSuchResourceException;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.RequestStatus;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.SystemException;
-import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
-import org.apache.ambari.server.controller.utilities.PredicateHelper;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Resource provider for task resources.
- */
-class TaskResourceProvider extends AbstractResourceProvider {
-
-  // ----- Property ID constants ---------------------------------------------
-
-  // Tasks
-  protected static final String TASK_CLUSTER_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("Tasks", "cluster_name");
-  protected static final String TASK_REQUEST_ID_PROPERTY_ID   = PropertyHelper.getPropertyId("Tasks", "request_id");
-  protected static final String TASK_ID_PROPERTY_ID           = PropertyHelper.getPropertyId("Tasks", "id");
-  protected static final String TASK_STAGE_ID_PROPERTY_ID     = PropertyHelper.getPropertyId("Tasks", "stage_id");
-  protected static final String TASK_HOST_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("Tasks", "host_name");
-  protected static final String TASK_ROLE_PROPERTY_ID         = PropertyHelper.getPropertyId("Tasks", "role");
-  protected static final String TASK_COMMAND_PROPERTY_ID      = PropertyHelper.getPropertyId("Tasks", "command");
-  protected static final String TASK_STATUS_PROPERTY_ID       = PropertyHelper.getPropertyId("Tasks", "status");
-  protected static final String TASK_EXIT_CODE_PROPERTY_ID    = PropertyHelper.getPropertyId("Tasks", "exit_code");
-  protected static final String TASK_STDERR_PROPERTY_ID       = PropertyHelper.getPropertyId("Tasks", "stderr");
-  protected static final String TASK_STOUT_PROPERTY_ID        = PropertyHelper.getPropertyId("Tasks", "stdout");
-  protected static final String TASK_START_TIME_PROPERTY_ID   = PropertyHelper.getPropertyId("Tasks", "start_time");
-  protected static final String TASK_ATTEMPT_CNT_PROPERTY_ID  = PropertyHelper.getPropertyId("Tasks", "attempt_cnt");
-
-
-  private static Set<String> pkPropertyIds =
-      new HashSet<String>(Arrays.asList(new String[]{
-          TASK_ID_PROPERTY_ID}));
-
-  // ----- Constructors ----------------------------------------------------
-
-  /**
-   * Create a  new resource provider for the given management controller.
-   *
-   * @param propertyIds           the property ids
-   * @param keyPropertyIds        the key property ids
-   * @param managementController  the management controller
-   */
-  TaskResourceProvider(Set<String> propertyIds,
-                       Map<Resource.Type, String> keyPropertyIds,
-                       AmbariManagementController managementController) {
-    super(propertyIds, keyPropertyIds, managementController);
-  }
-
-  // ----- ResourceProvider ------------------------------------------------
-
-  @Override
-  public RequestStatus createResources(Request request) {
-    throw new UnsupportedOperationException("Not currently supported.");
-  }
-
-  @Override
-  public Set<Resource> getResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    Set<String> requestedIds = getRequestPropertyIds(request, predicate);
-    final Map<String, Object> predicateProperties = PredicateHelper.getProperties(predicate);
-
-    String clusterName = (String) predicateProperties.get(TASK_CLUSTER_NAME_PROPERTY_ID);
-    Long   request_id  = new Long((String) predicateProperties.get(TASK_REQUEST_ID_PROPERTY_ID));
-
-    // TODO : handle multiple requests
-
-    Set<TaskStatusResponse> responses = getResources(new Command<Set<TaskStatusResponse>>() {
-      @Override
-      public Set<TaskStatusResponse> invoke() throws AmbariException {
-        return getManagementController().getTaskStatus(Collections.singleton(getRequest(predicateProperties)));
-      }
-    });
-    
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Printing size of responses " + responses.size());
-      for (TaskStatusResponse response : responses) {
-        LOG.debug("Printing response from management controller "
-            + response.toString());
-      }
-    }
-
-    Set<Resource> resources = new HashSet<Resource>();
-    for (TaskStatusResponse response : responses) {
-      Resource resource = new ResourceImpl(Resource.Type.Task);
-
-      setResourceProperty(resource, TASK_CLUSTER_NAME_PROPERTY_ID, clusterName, requestedIds);
-      setResourceProperty(resource, TASK_REQUEST_ID_PROPERTY_ID, request_id, requestedIds);
-      setResourceProperty(resource, TASK_ID_PROPERTY_ID, response.getTaskId(), requestedIds);
-      setResourceProperty(resource, TASK_STAGE_ID_PROPERTY_ID, response.getStageId(), requestedIds);
-      setResourceProperty(resource, TASK_HOST_NAME_PROPERTY_ID, response.getHostName(), requestedIds);
-      setResourceProperty(resource, TASK_ROLE_PROPERTY_ID, response.getRole(), requestedIds);
-      setResourceProperty(resource, TASK_COMMAND_PROPERTY_ID, response.getCommand(), requestedIds);
-      setResourceProperty(resource, TASK_STATUS_PROPERTY_ID, response.getStatus(), requestedIds);
-      setResourceProperty(resource, TASK_EXIT_CODE_PROPERTY_ID, response.getExitCode(), requestedIds);
-      setResourceProperty(resource, TASK_STDERR_PROPERTY_ID, response.getStderr(), requestedIds);
-      setResourceProperty(resource, TASK_STOUT_PROPERTY_ID, response.getStdout(), requestedIds);
-      setResourceProperty(resource, TASK_START_TIME_PROPERTY_ID, response.getStartTime(), requestedIds);
-      setResourceProperty(resource, TASK_ATTEMPT_CNT_PROPERTY_ID, response.getAttemptCount(), requestedIds);
-      resources.add(resource);
-    }
-    return resources;
-  }
-
-  @Override
-  public RequestStatus updateResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    throw new UnsupportedOperationException("Not currently supported.");
-  }
-
-  @Override
-  public RequestStatus deleteResources(Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    throw new UnsupportedOperationException("Not currently supported.");
-  }
-
-  // ----- utility methods -------------------------------------------------
-
-  @Override
-  protected Set<String> getPKPropertyIds() {
-    return pkPropertyIds;
-  }
-
-  /**
-   * Get a component request object from a map of property values.
-   *
-   * @param properties  the predicate
-   *
-   * @return the component request object
-   */
-  private TaskStatusRequest getRequest(Map<String, Object> properties) {
-    String taskId = (String) properties.get(TASK_ID_PROPERTY_ID);
-    Long task_id = (taskId == null? null: Long.valueOf(taskId));
-    return new TaskStatusRequest(
-        Long.valueOf((String) properties.get(TASK_REQUEST_ID_PROPERTY_ID)),
-        task_id);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/TemporalInfoImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/TemporalInfoImpl.java
deleted file mode 100644
index bce228f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/TemporalInfoImpl.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.spi.TemporalInfo;
-
-/**
-* Temporal query data.
-*/
-public class TemporalInfoImpl implements TemporalInfo {
-  private long m_startTime;
-  private long m_endTime;
-  private long m_step;
-
-  public TemporalInfoImpl(long startTime, long endTime, long step) {
-    m_startTime = startTime;
-    m_endTime = endTime;
-    m_step = step;
-  }
-
-  @Override
-  public Long getStartTime() {
-    return m_startTime;
-  }
-
-  @Override
-  public Long getEndTime() {
-    return m_endTime;
-  }
-
-  @Override
-  public Long getStep() {
-    return m_step;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    TemporalInfoImpl that = (TemporalInfoImpl) o;
-    return m_endTime == that.m_endTime &&
-           m_startTime == that.m_startTime &&
-           m_step == that.m_step;
-
-  }
-
-  @Override
-  public int hashCode() {
-    int result = (int) (m_startTime ^ (m_startTime >>> 32));
-    result = 31 * result + (int) (m_endTime ^ (m_endTime >>> 32));
-    result = 31 * result + (int) (m_step ^ (m_step >>> 32));
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/URLStreamProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/URLStreamProvider.java
deleted file mode 100644
index 7c8dee5..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/URLStreamProvider.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URL;
-import java.net.URLConnection;
-
-import org.apache.ambari.server.controller.utilities.StreamProvider;
-
-/**
- * URL based implementation of a stream provider.
- */
-public class URLStreamProvider implements StreamProvider {
-  
-  private int connTimeout = -1;
-  
-  public URLStreamProvider() {
-  }
-  
-  /**
-   * Provide the connection timeout for the underlying connection.
-   * 
-   * @param connectionTimeout time, in milliseconds, to attempt a connection
-   */
-  public URLStreamProvider(int connectionTimeout) {
-    connTimeout = connectionTimeout;
-  }
-  
-  @Override
-  public InputStream readFrom(String spec) throws IOException {
-    URLConnection connection = new URL(spec).openConnection();
-    if (connTimeout > 0) {
-      connection.setConnectTimeout(connTimeout);
-    }
-    connection.setDoOutput(true);
-    return connection.getInputStream();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UserResourceProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UserResourceProvider.java
deleted file mode 100644
index 042ecad..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UserResourceProvider.java
+++ /dev/null
@@ -1,200 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.UserRequest;
-import org.apache.ambari.server.controller.UserResponse;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Resource provider for user resources.
- */
-class UserResourceProvider extends AbstractResourceProvider {
-
-  // ----- Property ID constants ---------------------------------------------
-
-  // Users
-  protected static final String USER_USERNAME_PROPERTY_ID     = PropertyHelper.getPropertyId("Users", "user_name");
-  protected static final String USER_ROLES_PROPERTY_ID        = PropertyHelper.getPropertyId("Users", "roles");
-  protected static final String USER_PASSWORD_PROPERTY_ID     = PropertyHelper.getPropertyId("Users", "password");
-  protected static final String USER_OLD_PASSWORD_PROPERTY_ID = PropertyHelper.getPropertyId("Users", "old_password");
-  protected static final String USER_LDAP_USER_PROPERTY_ID    = PropertyHelper.getPropertyId("Users", "ldap_user");
-
-  private static Set<String> pkPropertyIds =
-      new HashSet<String>(Arrays.asList(new String[]{
-          USER_USERNAME_PROPERTY_ID}));
-
-  /**
-   * Create a new resource provider for the given management controller.
-   */
-  UserResourceProvider(Set<String> propertyIds,
-                       Map<Resource.Type, String> keyPropertyIds,
-                       AmbariManagementController managementController) {
-    super(propertyIds, keyPropertyIds, managementController);
-  }
-
-  @Override
-  public RequestStatus createResources(Request request)
-      throws SystemException,
-      UnsupportedPropertyException,
-      ResourceAlreadyExistsException,
-      NoSuchParentResourceException {
-    final Set<UserRequest> requests = new HashSet<UserRequest>();
-    for (Map<String, Object> propertyMap : request.getProperties()) {
-      requests.add(getRequest(propertyMap));
-    }
-
-    createResources(new Command<Void>() {
-      @Override
-      public Void invoke() throws AmbariException {
-        getManagementController().createUsers(requests);
-        return null;
-      }
-    });
-
-    return getRequestStatus(null);
-  }
-
-  @Override
-  public Set<Resource> getResources(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-    final Set<UserRequest> requests = new HashSet<UserRequest>();
-
-    if (predicate == null) {
-      requests.add(getRequest(null));
-    } else {
-      for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
-        requests.add(getRequest(propertyMap));
-      }
-    }
-
-    Set<UserResponse> responses = getResources(new Command<Set<UserResponse>>() {
-      @Override
-      public Set<UserResponse> invoke() throws AmbariException {
-        return getManagementController().getUsers(requests);
-      }
-    });
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Found user responses matching get user request"
-          + ", userRequestSize=" + requests.size()
-          + ", userResponseSize=" + responses.size());
-    }
-
-    Set<String>   requestedIds = getRequestPropertyIds(request, predicate);
-    Set<Resource> resources    = new HashSet<Resource>();
-
-    for (UserResponse userResponse : responses) {
-      ResourceImpl resource = new ResourceImpl(Resource.Type.User);
-
-      setResourceProperty(resource, USER_USERNAME_PROPERTY_ID,
-          userResponse.getUsername(), requestedIds);
-
-      setResourceProperty(resource, USER_ROLES_PROPERTY_ID,
-          userResponse.getRoles(), requestedIds);
-
-      setResourceProperty(resource, USER_LDAP_USER_PROPERTY_ID,
-          userResponse.isLdapUser(), requestedIds);
-
-      resources.add(resource);
-    }
-
-    return resources;
-  }
-
-  @Override
-  public RequestStatus updateResources(Request request, Predicate predicate)
-    throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    final Set<UserRequest> requests = new HashSet<UserRequest>();
-
-    for (Map<String, Object> propertyMap : getPropertyMaps(request.getProperties().iterator().next(), predicate)) {
-      UserRequest req = getRequest(propertyMap);
-
-      requests.add(req);
-    }
-
-    modifyResources(new Command<Void>() {
-      @Override
-      public Void invoke() throws AmbariException {
-        getManagementController().updateUsers(requests);
-        return null;
-      }
-    });
-
-    return getRequestStatus(null);
-  }
-
-  @Override
-  public RequestStatus deleteResources(Predicate predicate)
-      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    final Set<UserRequest> requests = new HashSet<UserRequest>();
-
-    for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
-      UserRequest req = getRequest(propertyMap);
-
-      requests.add(req);
-    }
-
-    modifyResources(new Command<Void>() {
-      @Override
-      public Void invoke() throws AmbariException {
-        getManagementController().deleteUsers(requests);
-        return null;
-      }
-    });
-
-    return getRequestStatus(null);
-  }
-
-  @Override
-  protected Set<String> getPKPropertyIds() {
-    return pkPropertyIds;
-  }
-
-  private UserRequest getRequest(Map<String, Object> properties) {
-    if (properties == null) {
-      return new UserRequest(null);
-    }
-
-    UserRequest request = new UserRequest ((String) properties.get(USER_USERNAME_PROPERTY_ID));
-
-    request.setPassword((String) properties.get(USER_PASSWORD_PROPERTY_ID));
-    request.setOldPassword((String) properties.get(USER_OLD_PASSWORD_PROPERTY_ID));
-
-    // TODO - support array/sets directly out of the request
-    if (null != properties.get(USER_ROLES_PROPERTY_ID)) {
-      HashSet<String> roles = new HashSet<String>();
-
-      Collections.addAll(roles, ((String) properties.get(USER_ROLES_PROPERTY_ID)).split(","));
-
-      request.setRoles(roles);
-    }
-
-    return request;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/ConnectionFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/ConnectionFactory.java
deleted file mode 100644
index c8444b1..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/ConnectionFactory.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.jdbc;
-
-import java.sql.Connection;
-import java.sql.SQLException;
-
-/**
- * Simple JDBC connection factory interface.
- */
-public interface ConnectionFactory {
-  /**
-   * Get a connection.
-   *
-   * @return the connection
-   *
-   * @throws SQLException thrown if the connection cannot be obtained
-   */
-  public Connection getConnection() throws SQLException;
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/JDBCProviderModule.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/JDBCProviderModule.java
deleted file mode 100644
index e0275f7..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/JDBCProviderModule.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.jdbc;
-
-import org.apache.ambari.server.controller.internal.AbstractProviderModule;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.utilities.DBHelper;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-/**
- * A provider module implementation that uses the JDBC resource provider.
- */
-public class JDBCProviderModule extends AbstractProviderModule {
-  // ----- utility methods ---------------------------------------------------
-
-  @Override
-  protected ResourceProvider createResourceProvider(Resource.Type type) {
-    return new JDBCResourceProvider(DBHelper.CONNECTION_FACTORY, type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/JDBCResourceProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/JDBCResourceProvider.java
deleted file mode 100644
index 2f2eab1..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/JDBCResourceProvider.java
+++ /dev/null
@@ -1,457 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.jdbc;
-
-import org.apache.ambari.server.controller.internal.BaseProvider;
-import org.apache.ambari.server.controller.internal.RequestStatusImpl;
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.predicate.PredicateVisitorAcceptor;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PredicateHelper;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.sql.Connection;
-import java.sql.DatabaseMetaData;
-import java.sql.ResultSet;
-import java.sql.ResultSetMetaData;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Generic JDBC based resource provider.
- * TODO : Not used. Move to Test for API integration testing.
- */
-public class JDBCResourceProvider extends BaseProvider implements ResourceProvider {
-
-    private final Resource.Type type;
-
-  private final ConnectionFactory connectionFactory;
-
-    /**
-     * The schema for this provider's resource type.
-     */
-    private final Map<Resource.Type, String> keyPropertyIds;
-
-    /**
-     * Key mappings used for joins.
-     */
-    private final Map<String, Map<String, String>> importedKeys = new HashMap<String, Map<String, String>>();
-
-    protected final static Logger LOG =
-            LoggerFactory.getLogger(JDBCResourceProvider.class);
-
-    public JDBCResourceProvider(ConnectionFactory connectionFactory,
-                                Resource.Type type,
-                                Set<String> propertyIds,
-                                Map<Resource.Type, String> keyPropertyIds) {
-      super(propertyIds);
-      this.connectionFactory = connectionFactory;
-      this.type = type;
-      this.keyPropertyIds = keyPropertyIds;
-    }
-
-    @Override
-    public Set<Resource> getResources(Request request, Predicate predicate)
-        throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-        Set<Resource> resources = new HashSet<Resource>();
-        Set<String> propertyIds = getRequestPropertyIds(request, predicate);
-
-        // Can't allow these properties with the old schema...
-        propertyIds.remove(PropertyHelper.getPropertyId("Clusters", "cluster_id"));
-        propertyIds.remove(PropertyHelper.getPropertyId("Hosts", "disk_info"));
-        propertyIds.remove(PropertyHelper.getPropertyId("Hosts", "public_host_name"));
-        propertyIds.remove(PropertyHelper.getPropertyId("Hosts", "last_registration_time"));
-        propertyIds.remove(PropertyHelper.getPropertyId("Hosts", "host_state"));
-        propertyIds.remove(PropertyHelper.getPropertyId("Hosts", "last_heartbeat_time"));
-        propertyIds.remove(PropertyHelper.getPropertyId("Hosts", "host_health_report"));
-        propertyIds.remove(PropertyHelper.getPropertyId("Hosts", "host_status"));
-        propertyIds.remove(PropertyHelper.getPropertyId("ServiceInfo", "desired_configs"));
-        propertyIds.remove(PropertyHelper.getPropertyId("ServiceComponentInfo", "desired_configs"));
-        propertyIds.remove(PropertyHelper.getPropertyId("HostRoles", "configs"));
-        propertyIds.remove(PropertyHelper.getPropertyId("HostRoles", "desired_configs"));
-
-        Connection connection = null;
-        Statement statement = null;
-        ResultSet rs = null;
-        try {
-            connection = connectionFactory.getConnection();
-
-
-            for (String table : getTables(propertyIds)) {
-                getImportedKeys(connection, table);
-            }
-
-            String sql = getSelectSQL(propertyIds, predicate);
-            statement = connection.createStatement();
-
-            rs = statement.executeQuery(sql);
-
-            while (rs.next()) {
-                ResultSetMetaData metaData = rs.getMetaData();
-                int columnCount = metaData.getColumnCount();
-
-                final ResourceImpl resource = new ResourceImpl(type);
-                for (int i = 1; i <= columnCount; ++i) {
-                    String propertyId = PropertyHelper.getPropertyId(metaData.getTableName(i), metaData.getColumnName(i));
-                    if (propertyIds.contains(propertyId)) {
-                        resource.setProperty(propertyId, rs.getString(i));
-                    }
-                }
-                resources.add(resource);
-            }
-            statement.close();
-
-        } catch (SQLException e) {
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Caught exception getting resource.", e);
-            }
-            return Collections.emptySet();
-        } finally {
-            try {
-                if (rs != null) rs.close();
-            } catch (SQLException e) {
-                LOG.error("Exception while closing ResultSet", e);
-            }
-
-            try {
-                if (statement != null) statement.close();
-            } catch (SQLException e) {
-                LOG.error("Exception while closing statment", e);
-            }
-
-            try {
-                if (connection != null) connection.close();
-            } catch (SQLException e) {
-                LOG.error("Exception while closing statment", e);
-            }
-
-        }
-
-
-        return resources;
-    }
-
-    @Override
-    public RequestStatus createResources(Request request)
-        throws SystemException,
-               UnsupportedPropertyException,
-               ResourceAlreadyExistsException,
-               NoSuchParentResourceException {
-
-        try {
-            Connection connection = connectionFactory.getConnection();
-
-            try {
-
-                Set<Map<String, Object>> propertySet = request.getProperties();
-
-                for (Map<String, Object> properties : propertySet) {
-                    String sql = getInsertSQL(properties);
-
-                    Statement statement = connection.createStatement();
-
-                    statement.execute(sql);
-
-                    statement.close();
-                }
-            } finally {
-                connection.close();
-            }
-
-        } catch (SQLException e) {
-            throw new IllegalStateException("DB error : ", e);
-        }
-
-        return getRequestStatus();
-    }
-
-    @Override
-    public RequestStatus updateResources(Request request, Predicate predicate)
-        throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-        try {
-            Connection connection = connectionFactory.getConnection();
-            try {
-                Set<Map<String, Object>> propertySet = request.getProperties();
-
-                Map<String, Object> properties = propertySet.iterator().next();
-
-                String sql = getUpdateSQL(properties, predicate);
-
-                Statement statement = connection.createStatement();
-
-                statement.execute(sql);
-
-                statement.close();
-            } finally {
-                connection.close();
-            }
-
-        } catch (SQLException e) {
-            throw new IllegalStateException("DB error : ", e);
-        }
-
-        return getRequestStatus();
-    }
-
-    @Override
-    public RequestStatus deleteResources(Predicate predicate)
-        throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-        try {
-            Connection connection = connectionFactory.getConnection();
-            try {
-                String sql = getDeleteSQL(predicate);
-
-                Statement statement = connection.createStatement();
-                statement.execute(sql);
-                statement.close();
-            } finally {
-                connection.close();
-            }
-
-        } catch (SQLException e) {
-            throw new IllegalStateException("DB error : ", e);
-        }
-
-        return getRequestStatus();
-    }
-
-
-    private String getInsertSQL(Map<String, Object> properties) {
-
-        StringBuilder columns = new StringBuilder();
-        StringBuilder values = new StringBuilder();
-        String table = null;
-
-
-        for (Map.Entry<String, Object> entry : properties.entrySet()) {
-            String propertyId = entry.getKey();
-            Object propertyValue = entry.getValue();
-
-            table = PropertyHelper.getPropertyCategory(propertyId);
-
-
-            if (columns.length() > 0) {
-                columns.append(", ");
-            }
-            columns.append(PropertyHelper.getPropertyName(propertyId));
-
-            if (values.length() > 0) {
-                values.append(", ");
-            }
-            values.append("'");
-            values.append(propertyValue);
-            values.append("'");
-        }
-
-        return "insert into " + table + " (" +
-                columns + ") values (" + values + ")";
-    }
-
-    private String getSelectSQL(Set<String> propertyIds, Predicate predicate) {
-
-        StringBuilder columns = new StringBuilder();
-        Set<String> tableSet = new HashSet<String>();
-
-        for (String propertyId : propertyIds) {
-            if (columns.length() > 0) {
-                columns.append(", ");
-            }
-          String propertyCategory = PropertyHelper.getPropertyCategory(propertyId);
-          columns.append(propertyCategory).append(".").append(PropertyHelper.getPropertyName(propertyId));
-            tableSet.add(propertyCategory);
-        }
-
-
-        boolean haveWhereClause = false;
-        StringBuilder whereClause = new StringBuilder();
-        if (predicate != null &&
-                propertyIds.containsAll(PredicateHelper.getPropertyIds(predicate)) &&
-                predicate instanceof PredicateVisitorAcceptor) {
-
-            SQLPredicateVisitor visitor = new SQLPredicateVisitor();
-            ((PredicateVisitorAcceptor) predicate).accept(visitor);
-            whereClause.append(visitor.getSQL());
-            haveWhereClause = true;
-        }
-
-        StringBuilder joinClause = new StringBuilder();
-
-        if (tableSet.size() > 1) {
-
-            for (String table : tableSet) {
-                Map<String, String> joinKeys = importedKeys.get(table);
-                if (joinKeys != null) {
-                    for (Map.Entry<String, String> entry : joinKeys.entrySet()) {
-                        String category1 = PropertyHelper.getPropertyCategory(entry.getKey());
-                        String category2 = PropertyHelper.getPropertyCategory(entry.getValue());
-                        if (tableSet.contains(category1) && tableSet.contains(category2)) {
-                            if (haveWhereClause) {
-                                joinClause.append(" AND ");
-                            }
-                            joinClause.append(category1).append(".").append(PropertyHelper.getPropertyName(entry.getKey()));
-                            joinClause.append(" = ");
-                            joinClause.append(category2).append(".").append(PropertyHelper.getPropertyName(entry.getValue()));
-                            tableSet.add(category1);
-                            tableSet.add(category2);
-
-                            haveWhereClause = true;
-                        }
-                    }
-                }
-            }
-        }
-
-        StringBuilder tables = new StringBuilder();
-
-        for (String table : tableSet) {
-            if (tables.length() > 0) {
-                tables.append(", ");
-            }
-            tables.append(table);
-        }
-
-        String sql = "select " + columns + " from " + tables;
-
-        if (haveWhereClause) {
-            sql = sql + " where " + whereClause + joinClause;
-        }
-
-        return sql;
-    }
-
-    private String getDeleteSQL(Predicate predicate) {
-
-        StringBuilder whereClause = new StringBuilder();
-        if (predicate instanceof BasePredicate) {
-
-            BasePredicate basePredicate = (BasePredicate) predicate;
-
-            SQLPredicateVisitor visitor = new SQLPredicateVisitor();
-            basePredicate.accept(visitor);
-            whereClause.append(visitor.getSQL());
-
-            String table = PropertyHelper.getPropertyCategory(basePredicate.getPropertyIds().iterator().next());
-
-            return "delete from " + table + " where " + whereClause;
-        }
-        throw new IllegalStateException("Can't generate SQL.");
-    }
-
-    private String getUpdateSQL(Map<String, Object> properties, Predicate predicate) {
-
-        if (predicate instanceof BasePredicate) {
-
-            StringBuilder whereClause = new StringBuilder();
-
-            BasePredicate basePredicate = (BasePredicate) predicate;
-
-            SQLPredicateVisitor visitor = new SQLPredicateVisitor();
-            basePredicate.accept(visitor);
-            whereClause.append(visitor.getSQL());
-
-            String table = PropertyHelper.getPropertyCategory(basePredicate.getPropertyIds().iterator().next());
-
-
-            StringBuilder setClause = new StringBuilder();
-            for (Map.Entry<String, Object> entry : properties.entrySet()) {
-
-                if (setClause.length() > 0) {
-                    setClause.append(", ");
-                }
-                setClause.append(PropertyHelper.getPropertyName(entry.getKey()));
-                setClause.append(" = ");
-                setClause.append("'");
-                setClause.append(entry.getValue());
-                setClause.append("'");
-            }
-
-            return "update " + table + " set " + setClause + " where " + whereClause;
-        }
-        throw new IllegalStateException("Can't generate SQL.");
-    }
-
-    @Override
-    public Map<Resource.Type, String> getKeyPropertyIds() {
-        return keyPropertyIds;
-    }
-
-    /**
-     * Lazily populate the imported key mappings for the given table.
-     *
-     * @param connection the connection to use to obtain the database meta data
-     * @param table      the table
-     * @throws SQLException thrown if the meta data for the given connection cannot be obtained
-     */
-    private void getImportedKeys(Connection connection, String table) throws SQLException {
-        if (!this.importedKeys.containsKey(table)) {
-
-            Map<String, String> importedKeys = new HashMap<String, String>();
-            this.importedKeys.put(table, importedKeys);
-
-            DatabaseMetaData metaData = connection.getMetaData();
-
-            ResultSet rs = metaData.getImportedKeys(connection.getCatalog(), null, table);
-
-            while (rs.next()) {
-
-                String pkPropertyId = PropertyHelper.getPropertyId(
-                    rs.getString("PKTABLE_NAME"), rs.getString("PKCOLUMN_NAME"));
-
-                String fkPropertyId = PropertyHelper.getPropertyId(
-                    rs.getString("FKTABLE_NAME"), rs.getString("FKCOLUMN_NAME"));
-
-                importedKeys.put(pkPropertyId, fkPropertyId);
-            }
-        }
-    }
-
-    /**
-     * Get a request status
-     *
-     * @return the request status
-     */
-    private RequestStatus getRequestStatus() {
-        return new RequestStatusImpl(null);
-    }
-
-    /**
-     * Get the set of tables associated with the given property ids.
-     *
-     * @param propertyIds the property ids
-     * @return the set of tables
-     */
-    private static Set<String> getTables(Set<String> propertyIds) {
-        Set<String> tables = new HashSet<String>();
-        for (String propertyId : propertyIds) {
-            tables.add(PropertyHelper.getPropertyCategory(propertyId));
-        }
-        return tables;
-    }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/SQLPredicateVisitor.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/SQLPredicateVisitor.java
deleted file mode 100644
index 5961af7..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/SQLPredicateVisitor.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.jdbc;
-
-import org.apache.ambari.server.controller.predicate.AlwaysPredicate;
-import org.apache.ambari.server.controller.predicate.ArrayPredicate;
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.predicate.CategoryPredicate;
-import org.apache.ambari.server.controller.predicate.ComparisonPredicate;
-import org.apache.ambari.server.controller.predicate.PredicateVisitor;
-import org.apache.ambari.server.controller.predicate.UnaryPredicate;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-/**
- * Predicate visitor used to generate a SQL where clause from a predicate graph.
- */
-public class SQLPredicateVisitor implements PredicateVisitor {
-
-  /**
-   * The string builder.
-   */
-  private final StringBuilder stringBuilder = new StringBuilder();
-
-
-  // ----- PredicateVisitor --------------------------------------------------
-
-  @Override
-  public void acceptComparisonPredicate(ComparisonPredicate predicate) {
-    String propertyId = predicate.getPropertyId();
-
-    String propertyCategory = PropertyHelper.getPropertyCategory(propertyId);
-    if (propertyCategory != null) {
-      stringBuilder.append(propertyCategory).append(".");
-    }
-    stringBuilder.append(PropertyHelper.getPropertyName(propertyId));
-
-    stringBuilder.append(" ").append(predicate.getOperator()).append(" \"");
-    stringBuilder.append(predicate.getValue());
-    stringBuilder.append("\"");
-
-  }
-
-  @Override
-  public void acceptArrayPredicate(ArrayPredicate predicate) {
-    BasePredicate[] predicates = predicate.getPredicates();
-    if (predicates.length > 0) {
-
-      stringBuilder.append("(");
-      for (int i = 0; i < predicates.length; i++) {
-        if (i > 0) {
-          stringBuilder.append(" ").append(predicate.getOperator()).append(" ");
-        }
-        predicates[i].accept(this);
-      }
-      stringBuilder.append(")");
-    }
-  }
-
-  @Override
-  public void acceptUnaryPredicate(UnaryPredicate predicate) {
-    stringBuilder.append(predicate.getOperator()).append("(");
-    predicate.getPredicate().accept(this);
-    stringBuilder.append(")");
-  }
-
-  @Override
-  public void acceptAlwaysPredicate(AlwaysPredicate predicate) {
-    stringBuilder.append("TRUE");
-  }
-
-  @Override
-  public void acceptCategoryPredicate(CategoryPredicate predicate) {
-    // Do nothing
-  }
-
-
-  // ----- SQLPredicateVisitor -----------------------------------------------
-
-  public String getSQL() {
-    return stringBuilder.toString();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/SQLiteConnectionFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/SQLiteConnectionFactory.java
deleted file mode 100644
index 205e358..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/SQLiteConnectionFactory.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.jdbc;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-
-/**
- * Connection factory implementation for SQLite.
- */
-public class SQLiteConnectionFactory implements ConnectionFactory {
-
-  /**
-   * The connection URL minus the db file.
-   */
-  private static final String CONNECTION_URL = "jdbc:sqlite:";
-
-  /**
-   * The filename of the SQLite db file.
-   */
-  private final String dbFile;
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Create a connection factory.
-   *
-   * @param dbFile  the SQLite DB filename
-   */
-  public SQLiteConnectionFactory(String dbFile) {
-    this.dbFile = dbFile;
-    try {
-      Class.forName("org.sqlite.JDBC");
-    } catch (ClassNotFoundException e) {
-      throw new IllegalStateException("Can't load SQLite.", e);
-    }
-  }
-
-
-  // ----- ConnectionFactory -------------------------------------------------
-
-  @Override
-  public Connection getConnection() throws SQLException {
-    return DriverManager.getConnection(CONNECTION_URL + dbFile);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
deleted file mode 100644
index b14733f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.jmx;
-
-import org.apache.ambari.server.controller.spi.SystemException;
-
-/**
- * Provider of JMX host information.
- */
-public interface JMXHostProvider {
-
-  /**
-   * Get the JMX host name for the given cluster name and component name.
-   *
-   * @param clusterName    the cluster name
-   * @param componentName  the component name
-   *
-   * @return the JMX host name
-   *
-   * @throws SystemException if unable to get the JMX host name
-   */
-  public String getHostName(String clusterName, String componentName)
-      throws SystemException;
-
-  /**
-   * Get the port for the specified cluster name and component.
-   *
-   * @param clusterName    the cluster name
-   * @param componentName  the component name
-   *
-   * @return the port for the specified cluster name and component
-   *
-   * @throws SystemException if unable to get the JMX port
-   */
-  public String getPort(String clusterName, String componentName)
-      throws SystemException;
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXMetricHolder.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXMetricHolder.java
deleted file mode 100644
index 4f5b747..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXMetricHolder.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.jmx;
-
-import java.util.List;
-import java.util.Map;
-
-/**
- *
- */
-public final class JMXMetricHolder {
-
-  private List<Map<String, Object>> beans;
-
-  public List<Map<String, Object>> getBeans() {
-    return beans;
-  }
-
-  public void setBeans(List<Map<String, Object>> beans) {
-    this.beans = beans;
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder stringBuilder = new StringBuilder();
-
-    for (Map<String, Object> map : beans) {
-      for (Map.Entry<String, Object> entry : map.entrySet()) {
-        stringBuilder.append("    ").append(entry.toString()).append("\n");
-      }
-    }
-    return stringBuilder.toString();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
deleted file mode 100644
index 6914670..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
+++ /dev/null
@@ -1,296 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.jmx;
-
-import org.apache.ambari.server.controller.internal.AbstractPropertyProvider;
-import org.apache.ambari.server.controller.internal.PropertyInfo;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.StreamProvider;
-import org.codehaus.jackson.map.DeserializationConfig;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.ObjectReader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Property provider implementation for JMX sources.
- */
-public class JMXPropertyProvider extends AbstractPropertyProvider {
-
-  private static final String NAME_KEY = "name";
-  private static final String PORT_KEY = "tag.port";
-
-  private final StreamProvider streamProvider;
-
-  private final JMXHostProvider jmxHostProvider;
-
-  private static final Map<String, String> DEFAULT_JMX_PORTS = new HashMap<String, String>();
-
-  private final String clusterNamePropertyId;
-
-  private final String hostNamePropertyId;
-
-  private final String componentNamePropertyId;
-
-  private final static ObjectReader objectReader;
-
-
-  static {
-    DEFAULT_JMX_PORTS.put("NAMENODE",           "50070");
-    DEFAULT_JMX_PORTS.put("DATANODE",           "50075");
-    DEFAULT_JMX_PORTS.put("JOBTRACKER",         "50030");
-    DEFAULT_JMX_PORTS.put("TASKTRACKER",        "50060");
-    DEFAULT_JMX_PORTS.put("HBASE_MASTER",       "60010");
-    DEFAULT_JMX_PORTS.put("HBASE_REGIONSERVER", "60030");
-
-    ObjectMapper objectMapper = new ObjectMapper();
-    objectMapper.configure(DeserializationConfig.Feature.USE_ANNOTATIONS, false);
-    objectReader = objectMapper.reader(JMXMetricHolder.class);
-  }
-
-  protected final static Logger LOG =
-      LoggerFactory.getLogger(JMXPropertyProvider.class);
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Create a JMX property provider.
-   *
-   * @param componentMetrics         the map of supported metrics
-   * @param streamProvider           the stream provider
-   * @param jmxHostProvider          the host mapping
-   * @param clusterNamePropertyId    the cluster name property id
-   * @param hostNamePropertyId       the host name property id
-   * @param componentNamePropertyId  the component name property id
-   */
-  public JMXPropertyProvider(Map<String, Map<String, PropertyInfo>> componentMetrics,
-                             StreamProvider streamProvider,
-                             JMXHostProvider jmxHostProvider,
-                             String clusterNamePropertyId,
-                             String hostNamePropertyId,
-                             String componentNamePropertyId) {
-
-    super(componentMetrics);
-
-    this.streamProvider           = streamProvider;
-    this.jmxHostProvider          = jmxHostProvider;
-    this.clusterNamePropertyId    = clusterNamePropertyId;
-    this.hostNamePropertyId       = hostNamePropertyId;
-    this.componentNamePropertyId  = componentNamePropertyId;
-  }
-
-
-  // ----- PropertyProvider --------------------------------------------------
-
-  @Override
-  public Set<Resource> populateResources(Set<Resource> resources, Request request, Predicate predicate)
-      throws SystemException {
-
-
-    Set<Resource> keepers = new HashSet<Resource>();
-    for (Resource resource : resources) {
-      if (populateResource(resource, request, predicate)) {
-        keepers.add(resource);
-      }
-    }
-    return keepers;
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Get the spec to locate the JMX stream from the given host and port
-   *
-   * @param hostName  the host name
-   * @param port      the port
-   *
-   * @return the spec
-   */
-  protected String getSpec(String hostName, String port) {
-    return "http://" + hostName + ":" + port + "/jmx";
-  }
-
-  /**
-   * Populate a resource by obtaining the requested JMX properties.
-   *
-   * @param resource  the resource to be populated
-   * @param request   the request
-   * @param predicate the predicate
-   *
-   * @return true if the resource was successfully populated with the requested properties
-   */
-  private boolean populateResource(Resource resource, Request request, Predicate predicate)
-      throws SystemException {
-
-    Set<String> ids = getRequestPropertyIds(request, predicate);
-    if (ids.isEmpty()) {
-      return true;
-    }
-
-    String componentName = (String) resource.getPropertyValue(componentNamePropertyId);
-
-    if (getComponentMetrics().get(componentName) == null) {
-      // If there are no metrics defined for the given component then there is nothing to do.
-      return true;
-    }
-
-    String clusterName = (String) resource.getPropertyValue(clusterNamePropertyId);
-
-    String port = getPort(clusterName, componentName);
-    if (port == null) {
-      String error = "Unable to get JMX metrics.  No port value for " + componentName;
-      logError(error, null);
-      throw new SystemException(error, null);
-    }
-
-    String hostName = getHost(resource, clusterName, componentName);
-    if (hostName == null) {
-      String error = "Unable to get JMX metrics.  No host name for " + componentName;
-      logError(error, null);
-      throw new SystemException(error, null);
-    }
-
-    String      spec = getSpec(hostName, port);
-    InputStream in   = null;
-    try {
-      in = streamProvider.readFrom(spec);
-      JMXMetricHolder metricHolder = objectReader.readValue(in);
-
-      Map<String, Map<String, Object>> categories = new HashMap<String, Map<String, Object>>();
-
-      for (Map<String, Object> bean : metricHolder.getBeans()) {
-        String category = getCategory(bean);
-        if (category != null) {
-          categories.put(category, bean);
-        }
-      }
-
-      for (String propertyId : ids) {
-        Map<String, PropertyInfo> propertyInfoMap = getPropertyInfoMap(componentName, propertyId);
-
-        for (Map.Entry<String, PropertyInfo> entry : propertyInfoMap.entrySet()) {
-
-          PropertyInfo propertyInfo = entry.getValue();
-          propertyId = entry.getKey();
-
-          if (propertyInfo.isPointInTime()) {
-
-            String property = propertyInfo.getPropertyId();
-            String category = "";
-
-            List<String> keyList = new LinkedList<String>();
-            int keyStartIndex = property.indexOf('[', 0);
-            int firstKeyIndex = keyStartIndex > -1 ? keyStartIndex : property.length();
-            while (keyStartIndex > -1) {
-              int keyEndIndex = property.indexOf(']', keyStartIndex);
-              if (keyEndIndex > -1 & keyEndIndex > keyStartIndex) {
-                keyList.add(property.substring(keyStartIndex + 1, keyEndIndex));
-                keyStartIndex = property.indexOf('[', keyEndIndex);
-              }
-              else {
-                keyStartIndex = -1;
-              }
-            }
-
-            int dotIndex = property.lastIndexOf('.', firstKeyIndex - 1);
-            if (dotIndex != -1){
-              category = property.substring(0, dotIndex);
-              property = property.substring(dotIndex + 1, firstKeyIndex);
-            }
-
-            Map<String, Object> properties = categories.get(category);
-            if (properties != null && properties.containsKey(property)) {
-              Object value = properties.get(property);
-              if (keyList.size() > 0 && value instanceof Map) {
-                Map map = (Map) value;
-                for (String key : keyList) {
-                  value = map.get(key);
-                  if (value instanceof Map) {
-                    map = (Map) value;
-                  }
-                  else {
-                    break;
-                  }
-                }
-              }
-              resource.setProperty(propertyId, value);
-            }
-          }
-        }
-      }
-    } catch (IOException e) {
-      logError(spec, e);
-    } finally {
-      if (in != null) {
-        try {
-          in.close();
-        } catch (IOException e) {
-            logError("Unable to close http input steam : spec=" + spec, e);
-        }
-      }
-    }
-
-    return true;
-  }
-
-  private String getPort(String clusterName, String componentName) throws SystemException {
-    String port = jmxHostProvider.getPort(clusterName, componentName);
-    return port == null ? DEFAULT_JMX_PORTS.get(componentName) : port;
-  }
-
-  private String getHost(Resource resource, String clusterName, String componentName) throws SystemException {
-    return hostNamePropertyId == null ?
-        jmxHostProvider.getHostName(clusterName, componentName) :
-        (String) resource.getPropertyValue(hostNamePropertyId);
-  }
-
-  private String getCategory(Map<String, Object> bean) {
-    if (bean.containsKey(NAME_KEY)) {
-      String name = (String) bean.get(NAME_KEY);
-
-      if (bean.containsKey(PORT_KEY)) {
-        String port = (String) bean.get(PORT_KEY);
-        name = name.replace("ForPort" + port, "");
-      }
-      return name;
-    }
-    return null;
-  }
-
-  private static void logError(String error, IOException e) {
-    if (LOG.isErrorEnabled()) {
-      if (e == null) {
-        LOG.error("Caught exception getting JMX metrics : spec=" + error);
-      } else {
-        LOG.error("Caught exception getting JMX metrics : spec=" + error, e);
-      }
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/AlwaysPredicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/AlwaysPredicate.java
deleted file mode 100644
index fd2be16..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/AlwaysPredicate.java
+++ /dev/null
@@ -1,46 +0,0 @@
-package org.apache.ambari.server.controller.predicate;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.util.Collections;
-import java.util.Set;
-
-/**
- * A predicate that always evaluates to true.
- */
-public class AlwaysPredicate implements BasePredicate {
-  public static final AlwaysPredicate INSTANCE = new AlwaysPredicate();
-
-  @Override
-  public boolean evaluate(Resource resource) {
-    return true;
-  }
-
-  @Override
-  public Set<String> getPropertyIds() {
-    return Collections.emptySet();
-  }
-
-  @Override
-  public void accept(PredicateVisitor visitor) {
-    visitor.acceptAlwaysPredicate(this);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/AndPredicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/AndPredicate.java
deleted file mode 100644
index 700228a..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/AndPredicate.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.predicate;
-
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.util.Arrays;
-import java.util.LinkedList;
-import java.util.List;
-
-
-/**
- * Predicate which evaluates to true if all of the predicates in a predicate
- * array evaluate to true.
- */
-public class AndPredicate extends ArrayPredicate {
-
-  public AndPredicate(BasePredicate... predicates) {
-    super(predicates);
-  }
-
-  @Override
-  public BasePredicate create(BasePredicate... predicates) {
-    return instance(predicates);
-  }
-
-  public static BasePredicate instance(BasePredicate... predicates) {
-    List<BasePredicate> predicateList = new LinkedList<BasePredicate>();
-
-    // Simplify the predicate array
-    for (BasePredicate predicate : predicates) {
-      if (!(predicate instanceof AlwaysPredicate)) {
-        if (predicate instanceof AndPredicate) {
-          predicateList.addAll(Arrays.asList(((AndPredicate) predicate).getPredicates()));
-        }
-        else {
-          predicateList.add(predicate);
-        }
-      }
-    }
-
-    return predicateList.size() == 1 ?
-        predicateList.get(0) :
-        new AndPredicate(predicateList.toArray(new BasePredicate[predicateList.size()]));
-  }
-
-  @Override
-  public boolean evaluate(Resource resource) {
-    Predicate[] predicates = getPredicates();
-    for (Predicate predicate : predicates) {
-      if (!predicate.evaluate(resource)) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  @Override
-  public String getOperator() {
-    return "AND";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/ArrayPredicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/ArrayPredicate.java
deleted file mode 100644
index 4bf7f49..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/ArrayPredicate.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.predicate;
-
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- * Predicate which evaluates an array of predicates.
- */
-public abstract class ArrayPredicate implements BasePredicate {
-  private final BasePredicate[] predicates;
-  private final Set<String> propertyIds = new HashSet<String>();
-
-  public ArrayPredicate(BasePredicate... predicates) {
-    this.predicates = predicates;
-    for (BasePredicate predicate : predicates) {
-      propertyIds.addAll(predicate.getPropertyIds());
-    }
-  }
-
-  /**
-   * Factory method.
-   *
-   * @param predicates  the predicate array
-   *
-   * @return a new ArrayPredicate
-   */
-  public abstract BasePredicate create(BasePredicate... predicates);
-
-
-  public BasePredicate[] getPredicates() {
-    return predicates;
-  }
-
-  @Override
-  public Set<String> getPropertyIds() {
-    return propertyIds;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (!(o instanceof ArrayPredicate)) return false;
-
-    ArrayPredicate that = (ArrayPredicate) o;
-
-    if (propertyIds != null ? !propertyIds.equals(that.propertyIds) : that.propertyIds != null) return false;
-
-    // don't care about array order
-    Set<BasePredicate> setThisPredicates = new HashSet<BasePredicate>(Arrays.asList(predicates));
-    Set<BasePredicate> setThatPredicates = new HashSet<BasePredicate>(Arrays.asList(that.predicates));
-    return setThisPredicates.equals(setThatPredicates);
-  }
-
-  @Override
-  public int hashCode() {
-    // don't care about array order
-    int result = predicates != null ? new HashSet<BasePredicate>(Arrays.asList(predicates)).hashCode() : 0;
-    result = 31 * result + (propertyIds != null ? propertyIds.hashCode() : 0);
-    return result;
-  }
-
-  @Override
-  public void accept(PredicateVisitor visitor) {
-    visitor.acceptArrayPredicate(this);
-  }
-
-  public abstract String getOperator();
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/BasePredicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/BasePredicate.java
deleted file mode 100644
index 51748a2..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/BasePredicate.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.predicate;
-
-import org.apache.ambari.server.controller.spi.Predicate;
-
-import java.util.Set;
-
-/**
- * An extended predicate interface which allows for the retrieval of any
- * associated property ids.
- */
-public interface BasePredicate extends Predicate, PredicateVisitorAcceptor {
-  public Set<String> getPropertyIds();
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/CategoryIsEmptyPredicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/CategoryIsEmptyPredicate.java
deleted file mode 100644
index 69d90cb..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/CategoryIsEmptyPredicate.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.predicate;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.util.Map;
-
-/**
- * Predicate that checks if the associated property category is empty.  If the associated
- * property id references a Map property then treat the Map as a category.
- */
-public class CategoryIsEmptyPredicate extends CategoryPredicate {
-
-  public CategoryIsEmptyPredicate(String propertyId) {
-    super(propertyId);
-  }
-
-  @Override
-  public boolean evaluate(Resource resource) {
-    String propertyId = getPropertyId();
-
-    // If the property exists as a Map then check isEmpty
-    Object value = resource.getPropertyValue(propertyId);
-    if (value instanceof Map) {
-      Map<?,?> mapValue = (Map) value;
-      return mapValue.isEmpty();
-    }
-    // Get the category
-    Map<String, Object> properties = resource.getPropertiesMap().get(propertyId);
-    return properties == null ? true : properties.isEmpty();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/CategoryPredicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/CategoryPredicate.java
deleted file mode 100644
index d6771d4..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/CategoryPredicate.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.predicate;
-
-/**
- * Base class for predicates based on property categories.
- */
-public abstract class CategoryPredicate extends PropertyPredicate implements BasePredicate {
-  public CategoryPredicate(String propertyId) {
-    super(propertyId);
-  }
-
-  @Override
-  public void accept(PredicateVisitor visitor) {
-    visitor.acceptCategoryPredicate(this);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/ComparisonPredicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/ComparisonPredicate.java
deleted file mode 100644
index c98e7e6..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/ComparisonPredicate.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.predicate;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.text.NumberFormat;
-import java.text.ParsePosition;
-
-/**
- * Predicate that compares a given value to a {@link Resource} property.
- */
-public abstract class ComparisonPredicate<T> extends PropertyPredicate implements BasePredicate {
-  private final Comparable<T> value;
-  private final String stringValue;
-  private final Double doubleValue;
-
-  protected ComparisonPredicate(String propertyId, Comparable<T> value) {
-    super(propertyId);
-    this.value = value;
-
-    if (value instanceof Number) {
-      stringValue = null;
-      doubleValue = ((Number) value).doubleValue();
-    }
-    else if (value instanceof String) {
-      stringValue = (String) value;
-      doubleValue = stringToDouble(stringValue);
-    }
-    else {
-      stringValue = null;
-      doubleValue = null;
-    }
-  }
-
-  public Comparable<T> getValue() {
-    return value;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (!(o instanceof ComparisonPredicate)) return false;
-    if (!super.equals(o)) return false;
-
-    ComparisonPredicate that = (ComparisonPredicate) o;
-
-    return !(value != null ? !value.equals(that.value) : that.value != null);
-  }
-
-  @Override
-  public int hashCode() {
-    int result = super.hashCode();
-    result = 31 * result + (value != null ? value.hashCode() : 0);
-    return result;
-  }
-
-  @Override
-  public void accept(PredicateVisitor visitor) {
-    visitor.acceptComparisonPredicate(this);
-  }
-
-  protected int compareValueTo(Object propertyValue) throws ClassCastException{
-
-    if (doubleValue != null) {
-      if (propertyValue instanceof Number ) {
-        return (int) (doubleValue - ((Number) propertyValue).doubleValue());
-      }
-      else if (propertyValue instanceof String) {
-        Double doubleFromString = stringToDouble((String) propertyValue);
-        if (doubleFromString != null) {
-          return (int) (doubleValue - doubleFromString);
-        }
-      }
-    }
-    if (stringValue != null) {
-      return stringValue.compareTo(propertyValue.toString());
-    }
-
-    return getValue().compareTo((T) propertyValue);
-  }
-
-  private Double stringToDouble(String stringValue) {
-    ParsePosition parsePosition = new ParsePosition(0);
-    NumberFormat  numberFormat  = NumberFormat.getInstance();
-    Number        parsedNumber  = numberFormat.parse((String) value, parsePosition);
-
-    return parsePosition.getIndex() == stringValue.length() ? parsedNumber.doubleValue() : null;
-  }
-
-  public abstract String getOperator();
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/EqualsPredicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/EqualsPredicate.java
deleted file mode 100644
index e0b59d4..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/EqualsPredicate.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.predicate;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-/**
- * Predicate that checks equality of a given value to a {@link Resource} property.
- */
-public class EqualsPredicate<T> extends ComparisonPredicate<T> {
-
-  public EqualsPredicate(String propertyId, Comparable<T> value) {
-    super(propertyId, value);
-  }
-
-  @Override
-  public boolean evaluate(Resource resource) {
-    Object propertyValue = resource.getPropertyValue(getPropertyId());
-    return propertyValue != null && compareValueTo(propertyValue) == 0;
-  }
-
-  @Override
-  public String getOperator() {
-    return "=";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/GreaterEqualsPredicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/GreaterEqualsPredicate.java
deleted file mode 100644
index 2e048b5..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/GreaterEqualsPredicate.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.predicate;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-/**
- * Predicate that checks if a given value is greater than or equal to a {@link Resource} property.
- */
-public class GreaterEqualsPredicate<T> extends ComparisonPredicate<T> {
-
-  public GreaterEqualsPredicate(String propertyId, Comparable<T> value) {
-    super(propertyId, value);
-  }
-
-  @Override
-  public boolean evaluate(Resource resource) {
-    Object propertyValue = resource.getPropertyValue(getPropertyId());
-    return propertyValue != null && compareValueTo(propertyValue) <= 0;
-  }
-
-  @Override
-  public String getOperator() {
-    return ">=";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/GreaterPredicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/GreaterPredicate.java
deleted file mode 100644
index db3e8c8..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/GreaterPredicate.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-/**
- * Predicate that checks if a given value is greater than a {@link Resource} property.
- */
-public class GreaterPredicate<T> extends ComparisonPredicate<T> {
-
-  public GreaterPredicate(String propertyId, Comparable<T> value) {
-    super(propertyId, value);
-  }
-
-  @Override
-  public boolean evaluate(Resource resource) {
-    Object propertyValue = resource.getPropertyValue(getPropertyId());
-    return propertyValue != null && compareValueTo(propertyValue) < 0;
-  }
-
-  @Override
-  public String getOperator() {
-    return ">";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/LessEqualsPredicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/LessEqualsPredicate.java
deleted file mode 100644
index 560078c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/LessEqualsPredicate.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-
-/**
- * Predicate that checks if a given value is less than or equal to a {@link Resource} property.
- */
-public class LessEqualsPredicate<T> extends ComparisonPredicate<T> {
-
-  public LessEqualsPredicate(String propertyId, Comparable<T> value) {
-    super(propertyId, value);
-  }
-
-  @Override
-  public boolean evaluate(Resource resource) {
-    Object propertyValue = resource.getPropertyValue(getPropertyId());
-    return propertyValue != null && compareValueTo(propertyValue) >= 0;
-  }
-
-  @Override
-  public String getOperator() {
-    return "<=";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/LessPredicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/LessPredicate.java
deleted file mode 100644
index bdbe08d..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/LessPredicate.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-/**
- * Predicate that checks if a given value is less than a {@link Resource} property.
- */
-public class LessPredicate<T> extends ComparisonPredicate<T> {
-
-  public LessPredicate(String propertyId, Comparable<T> value) {
-    super(propertyId, value);
-  }
-
-  @Override
-  public boolean evaluate(Resource resource) {
-    Object propertyValue = resource.getPropertyValue(getPropertyId());
-    return propertyValue != null && compareValueTo(propertyValue) > 0;
-  }
-
-  @Override
-  public String getOperator() {
-    return "<";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/NotPredicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/NotPredicate.java
deleted file mode 100644
index 6c9c1f6..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/NotPredicate.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-/**
- * Predicate that negates the evaluation of another predicate.
- */
-public class NotPredicate extends UnaryPredicate {
-
-  public NotPredicate(BasePredicate predicate) {
-    super(predicate);
-  }
-
-  @Override
-  public boolean evaluate(Resource resource) {
-    return !getPredicate().evaluate(resource);
-  }
-
-  @Override
-  public String getOperator() {
-    return "NOT";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/OrPredicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/OrPredicate.java
deleted file mode 100644
index b090282..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/OrPredicate.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.util.Arrays;
-import java.util.LinkedList;
-import java.util.List;
-
-/**
- * Predicate which evaluates to true if any of the predicates in a predicate
- * array evaluate to true.
- */
-public class OrPredicate extends ArrayPredicate {
-
-  public OrPredicate(BasePredicate... predicates) {
-    super(predicates);
-  }
-
-  @Override
-  public BasePredicate create(BasePredicate... predicates) {
-    return instance(predicates);
-  }
-
-  public static BasePredicate instance(BasePredicate... predicates) {
-    List<BasePredicate> predicateList = new LinkedList<BasePredicate>();
-
-    // Simplify the predicate array
-    for (BasePredicate predicate : predicates) {
-      if (predicate instanceof AlwaysPredicate) {
-        return predicate;
-      }
-      else if (predicate instanceof OrPredicate) {
-        predicateList.addAll(Arrays.asList(((OrPredicate) predicate).getPredicates()));
-      }
-      else {
-        predicateList.add(predicate);
-      }
-    }
-    return predicateList.size() == 1 ?
-        predicateList.get(0) :
-        new OrPredicate(predicateList.toArray(new BasePredicate[predicateList.size()]));
-  }
-
-  @Override
-  public boolean evaluate(Resource resource) {
-    BasePredicate[] predicates = getPredicates();
-    for (BasePredicate predicate : predicates) {
-      if (predicate.evaluate(resource)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  @Override
-  public String getOperator() {
-    return "OR";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PredicateVisitor.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PredicateVisitor.java
deleted file mode 100644
index 3316a00..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PredicateVisitor.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-/**
- * A visitor of predicates.
- */
-public interface PredicateVisitor {
-
-  public void acceptComparisonPredicate(ComparisonPredicate predicate);
-
-  public void acceptArrayPredicate(ArrayPredicate predicate);
-
-  public void acceptUnaryPredicate(UnaryPredicate predicate);
-
-  public void acceptAlwaysPredicate(AlwaysPredicate predicate);
-
-  public void acceptCategoryPredicate(CategoryPredicate predicate);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PredicateVisitorAcceptor.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PredicateVisitorAcceptor.java
deleted file mode 100644
index 6af2cb8..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PredicateVisitorAcceptor.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-/**
- * An acceptor of predicate visitors.
- */
-public interface PredicateVisitorAcceptor {
-
-  /**
-   * Accept the given visitor.
-   *
-   * @param visitor  the visitor
-   */
-  public void accept(PredicateVisitor visitor);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PropertyPredicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PropertyPredicate.java
deleted file mode 100644
index 5715d2a..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/PropertyPredicate.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import java.util.Collections;
-import java.util.Set;
-
-/**
- * Predicate that is associated with a resource property.
- */
-public abstract class PropertyPredicate implements BasePredicate {
-  private final String propertyId;
-
-  public PropertyPredicate(String propertyId) {
-    assert (propertyId != null);
-    this.propertyId = propertyId;
-  }
-
-  @Override
-  public Set<String> getPropertyIds() {
-    return Collections.singleton(propertyId);
-  }
-
-  public String getPropertyId() {
-    return propertyId;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-
-    if (this == o) {
-      return true;
-    }
-
-    if (!(o instanceof PropertyPredicate)) {
-      return false;
-    }
-
-    PropertyPredicate that = (PropertyPredicate) o;
-
-    return propertyId == null ? that.propertyId == null : propertyId.equals(that.propertyId);
-  }
-
-  @Override
-  public int hashCode() {
-    return propertyId != null ? propertyId.hashCode() : 0;
-  }
-}
-
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/UnaryPredicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/UnaryPredicate.java
deleted file mode 100644
index 6f708b0..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/predicate/UnaryPredicate.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import java.util.Set;
-
-/**
- * Predicate that operates on one other predicate.
- */
-public abstract class UnaryPredicate implements BasePredicate {
-  private final BasePredicate predicate;
-
-  public UnaryPredicate(BasePredicate predicate) {
-    assert(predicate != null);
-    this.predicate = predicate;
-  }
-
-  public BasePredicate getPredicate() {
-    return predicate;
-  }
-
-  @Override
-  public Set<String> getPropertyIds() {
-    return predicate.getPropertyIds();
-  }
-
-  @Override
-  public void accept(PredicateVisitor visitor) {
-    visitor.acceptUnaryPredicate(this);
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (!(o instanceof UnaryPredicate)) return false;
-
-    UnaryPredicate that = (UnaryPredicate) o;
-
-    return predicate.equals(that.predicate);
-  }
-
-  @Override
-  public int hashCode() {
-    return predicate.hashCode();
-  }
-
-  public abstract String getOperator();
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ClusterController.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ClusterController.java
deleted file mode 100644
index 45acae5..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ClusterController.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.spi;
-
-
-/**
- * The cluster controller is the main access point for accessing resources
- * from the backend sources.  A cluster controller maintains a mapping of
- * resource providers keyed by resource types.
- */
-public interface ClusterController {
-
-  // ----- Monitoring ------------------------------------------------------
-
-  /**
-   * Get the resources of the given type filtered by the given request and
-   * predicate objects.
-   *
-   * @param type      the type of the requested resources
-   * @param request   the request object which defines the desired set of properties
-   * @param predicate the predicate object which filters which resources are returned
-   *
-   * @return an iterable object of the requested resources
-   *
-   * @throws UnsupportedPropertyException thrown if the request or predicate contain
-   *                                      unsupported property ids
-   * @throws SystemException an internal exception occurred
-   * @throws NoSuchResourceException no matching resource(s) found
-   * @throws NoSuchParentResourceException a specified parent resource doesn't exist
-   */
-  public Iterable<Resource> getResources(Resource.Type type,
-                                         Request request,
-                                         Predicate predicate)
-      throws UnsupportedPropertyException,
-             SystemException,
-             NoSuchResourceException,
-             NoSuchParentResourceException;
-
-  /**
-   * Get the {@link Schema schema} for the given resource type.  The schema
-   * for a given resource type describes the properties and categories provided
-   * by that type of resource.
-   *
-   * @param type the resource type
-   * @return the schema object for the given resource
-   */
-  public Schema getSchema(Resource.Type type);
-
-
-  // ----- Management -------------------------------------------------------
-
-  /**
-   * Create the resources defined by the properties in the given request object.
-   *
-   * @param type     the type of the resources
-   * @param request  the request object which defines the set of properties
-   *                 for the resources to be created
-   *
-   * @throws UnsupportedPropertyException thrown if the request contains
-   *                                      unsupported property ids
-   * @throws SystemException an internal exception occurred
-   * @throws ResourceAlreadyExistsException attempted to create a resource that already exists
-   * @throws NoSuchParentResourceException a specified parent resource doesn't exist
-   */
-  public RequestStatus createResources(Resource.Type type, Request request)
-      throws UnsupportedPropertyException,
-             SystemException,
-             ResourceAlreadyExistsException,
-             NoSuchParentResourceException;
-
-  /**
-   * Update the resources selected by the given predicate with the properties
-   * from the given request object.
-   *
-   *
-   * @param type       the type of the resources
-   * @param request    the request object which defines the set of properties
-   *                   for the resources to be updated
-   * @param predicate  the predicate object which can be used to filter which
-   *                   resources are updated
-   *
-   * @throws UnsupportedPropertyException thrown if the request or predicate
-   *                                      contain unsupported property ids
-   * @throws SystemException an internal exception occurred
-   * @throws NoSuchResourceException no matching resource(s) found
-   * @throws NoSuchParentResourceException a specified parent resource doesn't exist
-   */
-  public RequestStatus updateResources(Resource.Type type,
-                                       Request request,
-                                       Predicate predicate)
-      throws UnsupportedPropertyException,
-             SystemException,
-             NoSuchResourceException,
-             NoSuchParentResourceException;
-
-  /**
-   * Delete the resources selected by the given predicate.
-   *
-   * @param type      the type of the resources
-   * @param predicate the predicate object which can be used to filter which
-   *                  resources are deleted
-   *
-   * @throws UnsupportedPropertyException thrown if the predicate contains
-   *                                      unsupported property ids
-   * @throws SystemException an internal exception occurred
-   * @throws NoSuchResourceException no matching resource(s) found
-   * @throws NoSuchParentResourceException a specified parent resource doesn't exist
-   */
-  public RequestStatus deleteResources(Resource.Type type, Predicate predicate)
-      throws UnsupportedPropertyException,
-             SystemException,
-             NoSuchResourceException,
-             NoSuchParentResourceException ;
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/NoSuchParentResourceException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/NoSuchParentResourceException.java
deleted file mode 100644
index 308841f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/NoSuchParentResourceException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.spi;
-
-/**
- * Indicates that a parent of a resource doesn't exist.
- */
-public class NoSuchParentResourceException extends Exception {
-
-  /**
-   * Constructor.
-   *
-   * @param msg        the message
-   * @param throwable  the root exception
-   */
-  public NoSuchParentResourceException(String msg, Throwable throwable) {
-    super(msg, throwable);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/NoSuchResourceException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/NoSuchResourceException.java
deleted file mode 100644
index 5cc2d84..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/NoSuchResourceException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.spi;
-
-/**
- * Indicates that a resource doesn't exist.
- */
-public class NoSuchResourceException extends Exception {
-
-  /**
-   * Constructor.
-   *
-   * @param msg        message
-   * @param throwable  root exception
-   */
-  public NoSuchResourceException(String msg, Throwable throwable) {
-    super(msg, throwable);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Predicate.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Predicate.java
deleted file mode 100644
index ae24b06..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Predicate.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.spi;
-
-/**
- * The predicate is used to filter the resources returned from the cluster
- * controller.  The predicate can examine a resource object and determine
- * whether or not it should be included in the returned results.
- */
-public interface Predicate {
-  /**
-   * Evaluate the predicate for the given resource.
-   *
-   * @param resource the resource to evaluate the predicate against
-   * @return the result of applying the predicate to the given resource
-   */
-  public boolean evaluate(Resource resource);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/PropertyProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/PropertyProvider.java
deleted file mode 100644
index 6829c1b..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/PropertyProvider.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.spi;
-
-import java.util.Set;
-
-/**
- * The property provider is used to plug in various property sources into a
- * resource provider.  The property provider is able to populate, or partially
- * populate a given resource object with property values.
- */
-public interface PropertyProvider {
-
-  /**
-   * Populate the given set of resource with any properties that this property
-   * provider can provide and return a populated set of resources.  The provider
-   * may drop resources from the original set if it determines that the don't
-   * meet the conditions of the predicate.
-   *
-   * @param resources  the resources to be populated
-   * @param request    the request object which defines the desired set of properties
-   * @param predicate  the predicate object which filters which resources are returned
-   *
-   * @return the populated set of resources
-   *
-   * @throws SystemException thrown if resources cannot be populated
-   */
-  public Set<Resource> populateResources(Set<Resource> resources, Request request, Predicate predicate)
-      throws SystemException;
-
-  /**
-   * Check whether the set of given property ids is supported by this resource
-   * provider.
-   *
-   * @return a subset of the given property id set containing any property ids not
-   *         supported by this resource provider.  An empty return set indicates
-   *         that all of the given property ids are supported.
-   */
-  public Set<String> checkPropertyIds(Set<String> propertyIds);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ProviderModule.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ProviderModule.java
deleted file mode 100644
index 33dfc24..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ProviderModule.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.spi;
-
-import java.util.List;
-
-/**
- *  Interface to allow the plugging in of resource adapters.
- */
-public interface ProviderModule {
-  /**
-   * Get a resource adapter for the given resource type.
-   *
-   * @param type  the resource type
-   *
-   * @return the resource adapter
-   */
-  public ResourceProvider getResourceProvider(Resource.Type type);
-
-  /**
-   * Get the list of property providers for the given resource type.
-   *
-   * @param type  the resource type
-   *
-   * @return the list of property providers
-   */
-  public List<PropertyProvider> getPropertyProviders(Resource.Type type);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Request.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Request.java
deleted file mode 100644
index 05ccb65..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Request.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.spi;
-
-import java.util.Map;
-import java.util.Set;
-
-/**
- * The request object carries the properties or property ids required to
- * satisfy a resource request.  The request object also contains any
- * temporal (date range) information, if any, for each requested property.
- */
-public interface Request {
-
-  /**
-   * Get the set of property ids being requested.  Used for requests to get
-   * resources.  An empty set signifies that all supported properties should
-   * be returned (i.e. select * ).
-   *
-   * @return the set of property ids being requested
-   */
-  public Set<String> getPropertyIds();
-
-  /**
-   * Get the property values of the request.  Used
-   * for requests to update or create resources.  Each value
-   * in the set is a map of properties for a resource being
-   * created/updated.  Each map contains property values keyed
-   * by property ids.
-   *
-   * @return the set of properties being requested
-   */
-  public Set<Map<String, Object>> getProperties();
-
-  /**
-   * Get the {@link TemporalInfo temporal information} for the given property
-   * id for this request, if any.
-   *
-   * @param id the property id
-   * @return the temporal information for the given property id; null if noe exists
-   */
-  public TemporalInfo getTemporalInfo(String id);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/RequestStatus.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/RequestStatus.java
deleted file mode 100644
index f9847a9..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/RequestStatus.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.spi;
-
-import java.util.Set;
-
-/**
- * A RequestStatus represents the result of an asynchronous operation on resources. Methods are
- * provided to check the status of the operation and to retrieve the set of resources involved
- * in the operation.
- */
-public interface RequestStatus {
-
-  /**
-   * Get the resources involved in the operation initiated by the request.
-   *
-   * @return the set of resources
-   */
-  public Set<Resource> getAssociatedResources();
-
-  /**
-   * Get the resource of type request for the asynchronous request.
-   *
-   * @return the request resource
-   */
-  public Resource getRequestResource();
-
-  /**
-   * Get the status of the operation initiated by the request.
-   *
-   * @return the status
-   */
-  public Status getStatus();
-
-  /**
-   * Request status.
-   */
-  public enum Status {
-    Accepted,
-    InProgress,
-    Complete
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Resource.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Resource.java
deleted file mode 100644
index e2bb857..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Resource.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.spi;
-
-
-import org.apache.ambari.server.api.util.TreeNode;
-
-import java.util.Map;
-
-/**
- * The resource object represents a requested resource.  The resource
- * contains a collection of values for the requested properties.
- */
-public interface Resource {
-  /**
-   * Get the resource type.
-   *
-   * @return the resource type
-   */
-  public Type getType();
-
-  /**
-   * Get the properties contained by this resource.
-   * Each category is contained in a sub-node.
-   *
-   * @return resource properties tree
-   */
-  public TreeNode<Map<String, Object>> getProperties();
-
-  /**
-   * Obtain the properties contained by this group in a map structure.
-   * The category/property hierarchy is flattened into a map where
-   * each key is the absolute category name and the corresponding
-   * value is a map of properties(name/value pairs) for that category.
-   *
-   * @return  resource properties map
-   */
-  public Map<String, Map<String, Object>> getPropertiesMap();
-
-  /**
-   * Set a property value for the given property id on this resource.
-   *
-   * @param id    the property id
-   * @param value the value
-   */
-  public void setProperty(String id, Object value);
-
-  /**
-   * Add an empty category to this resource.
-   *
-   * @param id    the category id
-   */
-  public void addCategory(String id);
-
-  /**
-   * Get a property value for the given property id from this resource.
-   *
-   * @param id the property id
-   * @return the property value
-   */
-  public Object getPropertyValue(String id);
-
-  /**
-   * Resource types.
-   */
-  public enum Type {
-    Cluster,
-    Service,
-    Host,
-    Component,
-    HostComponent,
-    Configuration,
-    Action,
-    Request,
-    Task,
-    User
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ResourceAlreadyExistsException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ResourceAlreadyExistsException.java
deleted file mode 100644
index 9f5da16..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ResourceAlreadyExistsException.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.spi;
-
-/**
- * Indicates that a resource already exists.
- */
-public class ResourceAlreadyExistsException extends Exception {
-  /**
-   * Constructor.
-   *
-   * @param msg  msg
-   */
-  public ResourceAlreadyExistsException(String msg) {
-    super(msg);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ResourceProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ResourceProvider.java
deleted file mode 100644
index de1c9dd..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ResourceProvider.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.spi;
-
-import java.util.Map;
-import java.util.Set;
-
-/**
- * The resource provider allows for the plugging in of a back end data store
- * for a resource type.  The resource provider is associated with a specific
- * resource type and can be queried for a list of resources of that type.
- * The resource provider plugs into and is used by the
- * {@link ClusterController cluster controller} to obtain a list of resources
- * for a given request.
- */
-public interface ResourceProvider {
-
-  /**
-   * Create the resources defined by the properties in the given request object.
-   *
-   *
-   * @param request  the request object which defines the set of properties
-   *                 for the resources to be created
-   *
-   * @return the request status
-   *
-   * @throws SystemException an internal system exception occurred
-   * @throws UnsupportedPropertyException the request contains unsupported property ids
-   * @throws ResourceAlreadyExistsException attempted to create a resource which already exists
-   * @throws NoSuchParentResourceException a parent resource of the resource to create doesn't exist
-   */
-  public RequestStatus createResources(Request request)
-      throws SystemException,
-      UnsupportedPropertyException,
-      ResourceAlreadyExistsException,
-      NoSuchParentResourceException;
-
-  /**
-   * Get a set of {@link Resource resources} based on the given request and predicate
-   * information.
-   * </p>
-   * Note that it is not required for this resource provider to completely filter
-   * the set of resources based on the given predicate.  It may not be possible
-   * since some of the properties involved may be provided by another
-   * {@link PropertyProvider provider}.  This partial filtering is allowed because
-   * the predicate will always be applied by the calling cluster controller.  The
-   * predicate is made available at this level so that some pre-filtering can be done
-   * as an optimization.
-   * </p>
-   * A simple implementation of a resource provider may choose to just return all of
-   * the resources of a given type and allow the calling cluster controller to filter
-   * based on the predicate.
-   *
-   *
-   * @param request    the request object which defines the desired set of properties
-   * @param predicate  the predicate object which can be used to filter which
-   *                   resources are returned
-   * @return a set of resources based on the given request and predicate information
-   *
-   * @throws SystemException an internal system exception occurred
-   * @throws UnsupportedPropertyException the request contains unsupported property ids
-   * @throws NoSuchResourceException the requested resource instance doesn't exist
-   * @throws NoSuchParentResourceException a parent resource of the requested resource doesn't exist
-   */
-  public Set<Resource> getResources(Request request, Predicate predicate)
-      throws SystemException,
-      UnsupportedPropertyException,
-      NoSuchResourceException,
-      NoSuchParentResourceException;
-
-  /**
-   * Update the resources selected by the given predicate with the properties
-   * from the given request object.
-   *
-   *
-   *
-   * @param request    the request object which defines the set of properties
-   *                   for the resources to be updated
-   * @param predicate  the predicate object which can be used to filter which
-   *                   resources are updated
-   *
-   * @return the request status
-   *
-   * @throws SystemException an internal system exception occurred
-   * @throws UnsupportedPropertyException the request contains unsupported property ids
-   * @throws NoSuchResourceException the resource instance to be updated doesn't exist
-   * @throws NoSuchParentResourceException a parent resource of the resource doesn't exist
-   */
-  public RequestStatus updateResources(Request request, Predicate predicate)
-      throws SystemException,
-      UnsupportedPropertyException,
-      NoSuchResourceException,
-      NoSuchParentResourceException;
-
-  /**
-   * Delete the resources selected by the given predicate.
-   *
-   *
-   *
-   * @param predicate the predicate object which can be used to filter which
-   *                  resources are deleted
-   *
-   * @return the request status
-   *
-   * @throws SystemException an internal system exception occurred
-   * @throws UnsupportedPropertyException the request contains unsupported property ids
-   * @throws NoSuchResourceException the resource instance to be deleted doesn't exist
-   * @throws NoSuchParentResourceException a parent resource of the resource doesn't exist
-   */
-  public RequestStatus deleteResources(Predicate predicate)
-      throws SystemException,
-      UnsupportedPropertyException,
-      NoSuchResourceException,
-      NoSuchParentResourceException;
-
-  /**
-   * Get the key property ids for the resource type associated with this resource
-   * provider.  The key properties are those that uniquely identify the resource.
-   *</p>
-   * For example, the resource 'HostComponent' is uniquely identified by
-   * its associated 'Cluster', 'Host' and 'Component' resources.  The key property ids
-   * for a 'HostComponent' resource includes the property ids of the foreign key
-   * references from the 'HostComponent' to 'Cluster', 'Host' and 'Component' resources.
-   *
-   * @return a map of key property ids
-   */
-  public Map<Resource.Type, String> getKeyPropertyIds();
-
-  /**
-   * Check whether the set of given property ids is supported by this resource
-   * provider.
-   *
-   * @return a subset of the given property id set containing any property ids not
-   *         supported by this resource provider.  An empty return set indicates
-   *         that all of the given property ids are supported.
-   */
-  public Set<String> checkPropertyIds(Set<String> propertyIds);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Schema.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Schema.java
deleted file mode 100644
index b2d7517..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/Schema.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.spi;
-
-
-/**
- * The schema is used to describe all of the properties that a resource type
- * supports.
- */
-public interface Schema {
-
-  /**
-   * Get the property id for the property that uniquely identifies
-   * the given resource type for the resource described by this schema.
-   * </p>
-   * For example, the resource 'HostComponent' is uniquely identified by
-   * its associated 'Cluster', 'Host' and 'Component' resources.  Passing
-   * the 'Host' resource type to this method on a schema object of a 'HostComponent'
-   * resource will return the id of the property of the foreign key reference from
-   * the 'HostComponent' to the 'Host'.
-   *
-   * @param type the resource type
-   * @return the key property id for the given resource type
-   */
-  public String getKeyPropertyId(Resource.Type type);
-  }
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/SystemException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/SystemException.java
deleted file mode 100644
index 0426657..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/SystemException.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.spi;
-
-/**
- * Indicates that a system exception occurred.
- */
-public class SystemException extends Exception {
-  /**
-   * Constructor.
-   *
-   * @param msg        message
-   * @param throwable  root exception
-   */
-  public SystemException(String msg, Throwable throwable) {
-    super(msg, throwable);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/TemporalInfo.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/TemporalInfo.java
deleted file mode 100644
index dca61be..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/TemporalInfo.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.spi;
-
-/**
- * Temporal query data.
- */
-public interface TemporalInfo {
-  /**
-   * Get the start of the requested time range.  The time is given in
-   * seconds since the Unix epoch.
-   *
-   * @return the start time in seconds
-   */
-  Long getStartTime();
-
-  /**
-   * Get the end of the requested time range.  The time is given in
-   * seconds since the Unix epoch.
-   *
-   * @return the end time in seconds
-   */
-  Long getEndTime();
-
-  /**
-   * Get the requested time between each data point of the temporal
-   * data.  The time is given in seconds.
-   *
-   * @return the step time in seconds
-   */
-  Long getStep();
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/UnsupportedPropertyException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/UnsupportedPropertyException.java
deleted file mode 100644
index 5ace50c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/spi/UnsupportedPropertyException.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.spi;
-
-import java.util.Set;
-
-/**
- * Thrown to indicate that the requested properties are not supported for the
- * associated resource type.
- */
-public class UnsupportedPropertyException extends Exception {
-  /**
-   * The resource type.
-   */
-  private final Resource.Type type;
-
-  /**
-   * The unsupported property ids.
-   */
-  private final Set<String> propertyIds;
-
-  /**
-   * Construct an UnsupportedPropertyException.
-   *
-   * @param type         the resource type
-   * @param propertyIds  the unsupported property ids
-   */
-  public UnsupportedPropertyException(Resource.Type type, Set<String> propertyIds) {
-    super("The properties " + propertyIds +
-        " specified in the request or predicate are not supported for the resource type " +
-        type + ".");
-    this.type = type;
-    this.propertyIds = propertyIds;
-  }
-
-  /**
-   * Get the resource type.
-   *
-   * @return the resource type
-   */
-  public Resource.Type getType() {
-    return type;
-  }
-
-  /**
-   * Get the unsupported property ids.
-   *
-   * @return the unsupported property ids
-   */
-  public Set<String> getPropertyIds() {
-    return propertyIds;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/ClusterControllerHelper.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/ClusterControllerHelper.java
deleted file mode 100644
index 3b0c12a..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/ClusterControllerHelper.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.utilities;
-
-import org.apache.ambari.server.controller.spi.ProviderModule;
-import org.apache.ambari.server.controller.internal.ClusterControllerImpl;
-import org.apache.ambari.server.controller.spi.ClusterController;
-
-/**
- * Temporary class to bootstrap a cluster controller.  TODO : Replace this global state with injection.
- */
-public class ClusterControllerHelper {
-
-  private static String PROVIDER_MODULE_CLASS = System.getProperty("provider.module.class",
-      "org.apache.ambari.server.controller.internal.DefaultProviderModule");
-
-  private static ClusterController controller;
-
-  public static synchronized ClusterController getClusterController() {
-    if (controller == null) {
-      try {
-        Class<?> implClass = Class.forName(PROVIDER_MODULE_CLASS);
-        ProviderModule providerModule = (ProviderModule) implClass.newInstance();
-        controller = new ClusterControllerImpl(providerModule);
-
-      } catch (Exception e) {
-        throw new IllegalStateException("Can't create provider module " + PROVIDER_MODULE_CLASS, e);
-      }
-    }
-    return controller;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DBHelper.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DBHelper.java
deleted file mode 100644
index e23de5e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DBHelper.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.utilities;
-
-import org.apache.ambari.server.controller.jdbc.ConnectionFactory;
-import org.apache.ambari.server.controller.jdbc.SQLiteConnectionFactory;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.type.TypeReference;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- *
- */
-public class DBHelper {
-  private static String DB_FILE_NAME = System.getProperty("ambariapi.dbfile", "src/test/resources/data.db");
-
-  public static final ConnectionFactory CONNECTION_FACTORY = new SQLiteConnectionFactory(DB_FILE_NAME);
-
-  private static final Map<String, String> HOSTS = readHosts();
-
-  public static Map<String, String> getHosts() {
-    return HOSTS;
-  }
-
-  private static Map<String, String> readHosts() {
-    Map<String, String> hosts = new HashMap<String, String>();
-
-    try {
-      Connection connection = CONNECTION_FACTORY.getConnection();
-
-      try {
-        String sql = "select attributes from hosts";
-
-        Statement statement = connection.createStatement();
-
-        ResultSet rs = statement.executeQuery(sql);
-
-        ObjectMapper mapper = new ObjectMapper();
-
-        while (rs.next()) {
-          String attributes = rs.getString(1);
-
-          if (!attributes.startsWith("[]")) {
-            try {
-              Map<String, String> attributeMap = mapper.readValue(attributes, new TypeReference<Map<String, String>>() {
-              });
-              hosts.put(attributeMap.get("privateFQDN"), attributeMap.get("publicFQDN"));
-            } catch (IOException e) {
-              throw new IllegalStateException("Can't read hosts " + attributes, e);
-            }
-          }
-        }
-
-        statement.close();
-      } finally {
-        connection.close();
-      }
-
-    } catch (SQLException e) {
-      throw new IllegalStateException("Can't access DB.", e);
-    }
-
-    return hosts;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PredicateBuilder.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PredicateBuilder.java
deleted file mode 100644
index dc8771b..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PredicateBuilder.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.utilities;
-
-import org.apache.ambari.server.controller.predicate.AndPredicate;
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.predicate.EqualsPredicate;
-import org.apache.ambari.server.controller.predicate.GreaterEqualsPredicate;
-import org.apache.ambari.server.controller.predicate.GreaterPredicate;
-import org.apache.ambari.server.controller.predicate.LessEqualsPredicate;
-import org.apache.ambari.server.controller.predicate.LessPredicate;
-import org.apache.ambari.server.controller.predicate.NotPredicate;
-import org.apache.ambari.server.controller.predicate.OrPredicate;
-
-import java.util.LinkedList;
-import java.util.List;
-
-/**
- * Builder for predicates.
- */
-public class PredicateBuilder {
-
-  private String propertyId;
-  private List<BasePredicate> predicates = new LinkedList<BasePredicate>();
-  private Operator operator = null;
-  private final PredicateBuilder outer;
-  private boolean done = false;
-  private boolean not = false;
-
-  public PredicateBuilder() {
-    this.outer = null;
-  }
-
-  private PredicateBuilder(PredicateBuilder outer) {
-    this.outer = outer;
-  }
-
-  private enum Operator {
-    And,
-    Or
-  }
-
-  public PredicateBuilderWithProperty property(String id) {
-    checkDone();
-    propertyId = id;
-    return new PredicateBuilderWithProperty();
-  }
-
-  public PredicateBuilder not() {
-    not = true;
-    return this;
-  }
-
-  public PredicateBuilder begin() {
-    checkDone();
-    return new PredicateBuilder(this);
-  }
-
-  public BasePredicate toPredicate() {
-    return getPredicate();
-  }
-
-  private void checkDone() {
-    if (done) {
-      throw new IllegalStateException("Can't reuse a predicate builder.");
-    }
-  }
-
-  private PredicateBuilderWithPredicate getPredicateBuilderWithPredicate() {
-    return new PredicateBuilderWithPredicate();
-  }
-
-  private void addPredicate(BasePredicate predicate) {
-    predicates.add(predicate);
-  }
-
-  private void handleComparator() {
-    if (operator == null) {
-      return;
-    }
-
-    if (predicates.size() == 0) {
-      throw new IllegalStateException("No left operand.");
-    }
-    BasePredicate predicate;
-
-    switch (operator) {
-      case And:
-        predicate = new AndPredicate(predicates.toArray(new BasePredicate[predicates.size()]));
-        break;
-      case Or:
-        predicate = new OrPredicate(predicates.toArray(new BasePredicate[predicates.size()]));
-        break;
-      default:
-        throw new IllegalStateException("Unknown operator " + this.operator);
-    }
-    predicates.clear();
-    addPredicate(predicate);
-  }
-
-  private BasePredicate getPredicate() {
-    handleComparator();
-
-    if (predicates.size() == 1) {
-      BasePredicate predicate = predicates.get(0);
-      if (not) {
-        predicate = new NotPredicate(predicate);
-        not = false;
-      }
-      return predicate;
-    }
-    throw new IllegalStateException("Can't return a predicate.");
-  }
-
-  public class PredicateBuilderWithProperty {
-
-    // ----- Equals -----
-    public <T>PredicateBuilderWithPredicate equals(Comparable<T> value) {
-      if (propertyId == null) {
-        throw new IllegalStateException("No property.");
-      }
-      addPredicate(new EqualsPredicate<T>(propertyId, value));
-
-      return new PredicateBuilderWithPredicate();
-    }
-
-    // ----- Greater than -----
-    public <T>PredicateBuilderWithPredicate greaterThan(Comparable<T> value) {
-      if (propertyId == null) {
-        throw new IllegalStateException("No property.");
-      }
-      addPredicate(new GreaterPredicate<T>(propertyId, value));
-
-      return new PredicateBuilderWithPredicate();
-    }
-
-    // ----- Greater than equal to -----
-    public <T>PredicateBuilderWithPredicate greaterThanEqualTo(Comparable<T> value) {
-      if (propertyId == null) {
-        throw new IllegalStateException("No property.");
-      }
-      addPredicate(new GreaterEqualsPredicate<T>(propertyId, value));
-
-      return new PredicateBuilderWithPredicate();
-    }
-
-    // ----- Less than -----
-    public <T>PredicateBuilderWithPredicate lessThan(Comparable<T> value) {
-      if (propertyId == null) {
-        throw new IllegalStateException("No property.");
-      }
-      addPredicate(new LessPredicate<T>(propertyId, value));
-
-      return new PredicateBuilderWithPredicate();
-    }
-
-    // ----- Less than equal to -----
-    public <T>PredicateBuilderWithPredicate lessThanEqualTo(Comparable<T> value) {
-      if (propertyId == null) {
-        throw new IllegalStateException("No property.");
-      }
-      addPredicate(new LessEqualsPredicate<T>(propertyId, value));
-
-      return new PredicateBuilderWithPredicate();
-    }
-  }
-
-  public class PredicateBuilderWithPredicate {
-    public PredicateBuilder and() {
-
-      if (operator != Operator.And) {
-        handleComparator();
-        operator = Operator.And;
-      }
-      return PredicateBuilder.this;
-    }
-
-    public PredicateBuilder or() {
-
-      if (operator != Operator.Or) {
-        handleComparator();
-        operator = Operator.Or;
-      }
-      return PredicateBuilder.this;
-    }
-
-    public BasePredicate toPredicate() {
-      if (outer != null) {
-        throw new IllegalStateException("Unbalanced block - missing end.");
-      }
-      done = true;
-      return getPredicate();
-    }
-
-    public PredicateBuilderWithPredicate end() {
-      if (outer == null) {
-        throw new IllegalStateException("Unbalanced block - missing begin.");
-      }
-      outer.addPredicate(getPredicate());
-      return outer.getPredicateBuilderWithPredicate();
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PredicateHelper.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PredicateHelper.java
deleted file mode 100644
index 381fcac..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PredicateHelper.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.utilities;
-
-import org.apache.ambari.server.controller.internal.PropertyPredicateVisitor;
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.predicate.PredicateVisitor;
-import org.apache.ambari.server.controller.predicate.PredicateVisitorAcceptor;
-import org.apache.ambari.server.controller.spi.Predicate;
-
-import java.util.Collections;
-import java.util.Map;
-import java.util.Set;
-
-/**
- *
- */
-public class PredicateHelper {
-
-  public static Set<String> getPropertyIds(Predicate predicate) {
-    if (predicate instanceof BasePredicate) {
-      return ((BasePredicate) predicate).getPropertyIds();
-    }
-    return Collections.emptySet();
-  }
-
-  public static void visit(Predicate predicate, PredicateVisitor visitor) {
-    if (predicate instanceof PredicateVisitorAcceptor) {
-      ((PredicateVisitorAcceptor) predicate).accept(visitor);
-    }
-  }
-
-  /**
-   * Get a map of property values from a given predicate.
-   *
-   * @param predicate  the predicate
-   *
-   * @return the map of properties
-   */
-  public static Map<String, Object> getProperties(Predicate predicate) {
-    if (predicate == null) {
-      return Collections.emptyMap();
-    }
-    PropertyPredicateVisitor visitor = new PropertyPredicateVisitor();
-    visit(predicate, visitor);
-    return visitor.getProperties();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
deleted file mode 100644
index ab772b3..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
+++ /dev/null
@@ -1,332 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.utilities;
-
-import org.apache.ambari.server.controller.internal.PropertyInfo;
-import org.apache.ambari.server.controller.internal.RequestImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.TemporalInfo;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.type.TypeReference;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- *
- */
-public class PropertyHelper {
-
-  private static final String PROPERTIES_FILE = "properties.json";
-  private static final String GANGLIA_PROPERTIES_FILE = "ganglia_properties.json";
-  private static final String JMX_PROPERTIES_FILE = "jmx_properties.json";
-  private static final String KEY_PROPERTIES_FILE = "key_properties.json";
-  private static final char EXTERNAL_PATH_SEP = '/';
-
-  private static final Map<Resource.Type, Set<String>> PROPERTY_IDS = readPropertyIds(PROPERTIES_FILE);
-  private static final Map<Resource.Type, Map<String, Map<String, PropertyInfo>>> JMX_PROPERTY_IDS = readPropertyProviderIds(JMX_PROPERTIES_FILE);
-  private static final Map<Resource.Type, Map<String, Map<String, PropertyInfo>>> GANGLIA_PROPERTY_IDS = readPropertyProviderIds(GANGLIA_PROPERTIES_FILE);
-  private static final Map<Resource.Type, Map<Resource.Type, String>> KEY_PROPERTY_IDS = readKeyPropertyIds(KEY_PROPERTIES_FILE);
-
-  public static String getPropertyId(String category, String name) {
-    String propertyId =  (category == null || category.isEmpty())? name :
-           (name == null || name.isEmpty()) ? category : category + EXTERNAL_PATH_SEP + name;
-
-    if (propertyId.endsWith("/")) {
-      propertyId = propertyId.substring(0, propertyId.length() - 1);
-    }
-    return propertyId;
-  }
-
-
-  public static Set<String> getPropertyIds(Resource.Type resourceType) {
-    Set<String> propertyIds = PROPERTY_IDS.get(resourceType);
-    return propertyIds == null ? Collections.<String>emptySet() : propertyIds;
-  }
-
-  /**
-   * Extract the set of property ids from a component PropertyInfo map.
-   *
-   * @param componentPropertyInfoMap  the map
-   *
-   * @return the set of property ids
-   */
-  public static Set<String> getPropertyIds(Map<String, Map<String, PropertyInfo>> componentPropertyInfoMap ) {
-    Set<String> propertyIds = new HashSet<String>();
-
-    for (Map.Entry<String, Map<String, PropertyInfo>> entry : componentPropertyInfoMap.entrySet()) {
-      propertyIds.addAll(entry.getValue().keySet());
-    }
-    return propertyIds;
-  }
-
-  public static Map<String, Map<String, PropertyInfo>> getGangliaPropertyIds(Resource.Type resourceType) {
-    return GANGLIA_PROPERTY_IDS.get(resourceType);
-  }
-
-  public static Map<String, Map<String, PropertyInfo>> getJMXPropertyIds(Resource.Type resourceType) {
-    return JMX_PROPERTY_IDS.get(resourceType);
-  }
-
-  public static Map<Resource.Type, String> getKeyPropertyIds(Resource.Type resourceType) {
-    return KEY_PROPERTY_IDS.get(resourceType);
-  }
-
-  /**
-   * Helper to get a property name from a string.
-   *
-   * @param absProperty  the fully qualified property
-   *
-   * @return the property name
-   */
-  public static String getPropertyName(String absProperty) {
-    int lastPathSep = absProperty.lastIndexOf(EXTERNAL_PATH_SEP);
-
-    return lastPathSep == -1 ? absProperty : absProperty.substring(lastPathSep + 1);
-  }
-
-  /**
-   * Helper to get a property category from a string.
-   *
-   * @param absProperty  the fully qualified property
-   *
-   * @return the property category; null if there is no category
-   */
-  public static String getPropertyCategory(String absProperty) {
-    int lastPathSep = absProperty.lastIndexOf(EXTERNAL_PATH_SEP);
-    return lastPathSep == -1 ? null : absProperty.substring(0, lastPathSep);
-  }
-
-  /**
-   * Get the set of categories for the given property ids.
-   *
-   * @param propertyIds  the property ids
-   *
-   * @return the set of categories
-   */
-  public static Set<String> getCategories(Set<String> propertyIds) {
-    Set<String> categories = new HashSet<String>();
-    for (String property : propertyIds) {
-      String category = PropertyHelper.getPropertyCategory(property);
-      while (category != null) {
-        categories.add(category);
-        category = PropertyHelper.getPropertyCategory(category);
-      }
-    }
-    return categories;
-  }
-
-  /**
-   * Get all of the property ids associated with the given request.
-   *
-   * @param request  the request
-   *
-   * @return the associated properties
-   */
-  public static Set<String> getAssociatedPropertyIds(Request request) {
-    Set<String> ids = request.getPropertyIds();
-
-    if (ids != null) {
-      ids = new HashSet<String>(ids);
-    } else {
-      ids = new HashSet<String>();
-    }
-
-    Set<Map<String, Object>> properties = request.getProperties();
-    if (properties != null) {
-      for (Map<String, Object> propertyMap : properties) {
-        ids.addAll(propertyMap.keySet());
-      }
-    }
-    return ids;
-  }
-
-  /**
-   * Get a map of all the property values keyed by property id for the given resource.
-   *
-   * @param resource  the resource
-   *
-   * @return the map of properties for the given resource
-   */
-  public static Map<String, Object> getProperties(Resource resource) {
-    Map<String, Object> properties = new HashMap<String, Object>();
-
-    Map<String, Map<String, Object>> categories = resource.getPropertiesMap();
-
-    for (Map.Entry<String, Map<String, Object>> categoryEntry : categories.entrySet()) {
-      for (Map.Entry<String, Object>  propertyEntry : categoryEntry.getValue().entrySet()) {
-        properties.put(getPropertyId(categoryEntry.getKey(), propertyEntry.getKey()), propertyEntry.getValue());
-      }
-    }
-    return properties;
-  }
-
-  /**
-   * Factory method to create a create request from the given set of property maps.
-   * Each map contains the properties to be used to create a resource.  Multiple maps in the
-   * set should result in multiple creates.
-   *
-   * @param properties   the properties associated with the request; may be null
-   */
-  public static Request getCreateRequest(Set<Map<String, Object>> properties) {
-    return new RequestImpl(null,  properties, null);
-  }
-
-  /**
-   * Factory method to create a read request from the given set of property ids.  The set of
-   * property ids represents the properties of interest for the query.
-   *
-   * @param propertyIds  the property ids associated with the request; may be null
-   */
-  public static Request getReadRequest(Set<String> propertyIds) {
-    return new RequestImpl(propertyIds,  null, null);
-  }
-
-  /**
-   * Factory method to create a read request from the given set of property ids.  The set of
-   * property ids represents the properties of interest for the query.
-   *
-   * @param propertyIds      the property ids associated with the request; may be null
-   * @param mapTemporalInfo  the temporal info
-   */
-  public static Request getReadRequest(Set<String> propertyIds, Map<String,
-      TemporalInfo> mapTemporalInfo) {
-    return new RequestImpl(propertyIds,  null, mapTemporalInfo);
-  }
-
-  /**
-   * Factory method to create a read request from the given set of property ids.  The set of
-   * property ids represents the properties of interest for the query.
-   *
-   * @param propertyIds  the property ids associated with the request; may be null
-   */
-  public static Request getReadRequest(String ... propertyIds) {
-    return new RequestImpl(new HashSet<String>(Arrays.asList(propertyIds)),  null, null);
-  }
-
-  /**
-   * Factory method to create an update request from the given map of properties.
-   * The properties values in the given map are used to update the resource.
-   *
-   * @param properties   the properties associated with the request; may be null
-   */
-  public static Request getUpdateRequest(Map<String, Object> properties) {
-    return new RequestImpl(null,  Collections.singleton(properties), null);
-  }
-
-  private static Map<Resource.Type, Map<String, Map<String, PropertyInfo>>> readPropertyProviderIds(String filename) {
-    ObjectMapper mapper = new ObjectMapper();
-
-    try {
-      Map<Resource.Type, Map<String, Map<String, Metric>>> resourceMetricMap =
-          mapper.readValue(ClassLoader.getSystemResourceAsStream(filename),
-              new TypeReference<Map<Resource.Type, Map<String, Map<String, Metric>>>>() {});
-
-      Map<Resource.Type, Map<String, Map<String, PropertyInfo>>> resourceMetrics =
-          new HashMap<Resource.Type, Map<String, Map<String, PropertyInfo>>>();
-
-      for (Map.Entry<Resource.Type, Map<String, Map<String, Metric>>> resourceEntry : resourceMetricMap.entrySet()) {
-        Map<String, Map<String, PropertyInfo>> componentMetrics = new HashMap<String, Map<String, PropertyInfo>>();
-
-        for (Map.Entry<String, Map<String, Metric>> componentEntry : resourceEntry.getValue().entrySet()) {
-          Map<String, PropertyInfo> metrics = new HashMap<String, PropertyInfo>();
-
-          for (Map.Entry<String, Metric> metricEntry : componentEntry.getValue().entrySet()) {
-            String property = metricEntry.getKey();
-            Metric metric   = metricEntry.getValue();
-
-            metrics.put(property, new PropertyInfo(metric.getMetric(), metric.isTemporal(), metric.isPointInTime()));
-          }
-          componentMetrics.put(componentEntry.getKey(), metrics);
-        }
-        resourceMetrics.put(resourceEntry.getKey(), componentMetrics);
-      }
-      return resourceMetrics;
-    } catch (IOException e) {
-      throw new IllegalStateException("Can't read properties file " + filename, e);
-    }
-  }
-
-  private static Map<Resource.Type, Set<String>> readPropertyIds(String filename) {
-    ObjectMapper mapper = new ObjectMapper();
-
-    try {
-      return mapper.readValue(ClassLoader.getSystemResourceAsStream(filename), new TypeReference<Map<Resource.Type, Set<String>>>() {
-      });
-    } catch (IOException e) {
-      throw new IllegalStateException("Can't read properties file " + filename, e);
-    }
-  }
-
-  private static Map<Resource.Type, Map<Resource.Type, String>> readKeyPropertyIds(String filename) {
-    ObjectMapper mapper = new ObjectMapper();
-
-    try {
-      return mapper.readValue(ClassLoader.getSystemResourceAsStream(filename), new TypeReference<Map<Resource.Type, Map<Resource.Type, String>>>() {
-      });
-    } catch (IOException e) {
-      throw new IllegalStateException("Can't read properties file " + filename, e);
-    }
-  }
-
-  protected static class Metric {
-    private String metric;
-    private boolean pointInTime;
-    private boolean temporal;
-
-    private Metric() {
-    }
-
-    protected Metric(String metric, boolean pointInTime, boolean temporal) {
-      this.metric = metric;
-      this.pointInTime = pointInTime;
-      this.temporal = temporal;
-    }
-
-    public String getMetric() {
-      return metric;
-    }
-
-    public void setMetric(String metric) {
-      this.metric = metric;
-    }
-
-    public boolean isPointInTime() {
-      return pointInTime;
-    }
-
-    public void setPointInTime(boolean pointInTime) {
-      this.pointInTime = pointInTime;
-    }
-
-    public boolean isTemporal() {
-      return temporal;
-    }
-
-    public void setTemporal(boolean temporal) {
-      this.temporal = temporal;
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/StreamProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/StreamProvider.java
deleted file mode 100644
index 1a5d59e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/StreamProvider.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.utilities;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-/**
- * A provider of input stream from a property source.
- */
-public interface StreamProvider {
-  public InputStream readFrom(String spec) throws IOException;
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
deleted file mode 100644
index de04dd4..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.metadata;
-
-import com.google.inject.Singleton;
-import org.apache.ambari.server.Role;
-
-import java.util.*;
-
-/**
- * Contains metadata about actions supported by services
- */
-@Singleton
-public class ActionMetadata {
-  private final Map<String, List<String>> serviceActions = new HashMap<String, List<String>>();
-  private final Map<String, String> serviceClients = new HashMap<String, String>();
-  private final Map<String, String> serviceCheckActions =
-      new HashMap<String, String>();
-
-  public ActionMetadata() {
-    fillServiceActions();
-    fillServiceClients();
-    fillServiceCheckActions();
-  }
-
-  private void fillServiceClients() {
-    serviceClients.put("hdfs"       , Role.HDFS_CLIENT.toString());
-    serviceClients.put("hbase"      , Role.HBASE_CLIENT.toString());
-    serviceClients.put("mapreduce"  , Role.MAPREDUCE_CLIENT.toString());
-    serviceClients.put("zookeeper"  , Role.ZOOKEEPER_CLIENT.toString());
-    serviceClients.put("hive"       , Role.HIVE_CLIENT.toString());
-    serviceClients.put("hcat"       , Role.HCAT.toString());
-    serviceClients.put("oozie"      , Role.OOZIE_CLIENT.toString());
-    serviceClients.put("pig"        , Role.PIG.toString());
-    serviceClients.put("sqoop"      , Role.SQOOP.toString());
-  }
-
-  private void fillServiceActions() {
-    serviceActions.put("hdfs"       , Arrays.asList(Role.HDFS_SERVICE_CHECK.toString()));
-    serviceActions.put("hbase"      , Arrays.asList(Role.HBASE_SERVICE_CHECK.toString()));
-    serviceActions.put("mapreduce"  , Arrays.asList(Role.MAPREDUCE_SERVICE_CHECK.toString()));
-    serviceActions.put("zookeeper"  , Arrays.asList(Role.ZOOKEEPER_QUORUM_SERVICE_CHECK.toString()));
-    serviceActions.put("hive"       , Arrays.asList(Role.HIVE_SERVICE_CHECK.toString()));
-    serviceActions.put("hcat"       , Arrays.asList(Role.HCAT_SERVICE_CHECK.toString()));
-    serviceActions.put("oozie"      , Arrays.asList(Role.OOZIE_SERVICE_CHECK.toString()));
-    serviceActions.put("pig"        , Arrays.asList(Role.PIG_SERVICE_CHECK.toString()));
-    serviceActions.put("sqoop"      , Arrays.asList(Role.SQOOP_SERVICE_CHECK.toString()));
-    serviceActions.put("webhcat"  , Arrays.asList(Role.WEBHCAT_SERVICE_CHECK.toString()));
-  }
-
-  private void fillServiceCheckActions() {
-    serviceCheckActions.put("hdfs", Role.HDFS_SERVICE_CHECK.toString());
-    serviceCheckActions.put("hbase", Role.HBASE_SERVICE_CHECK.toString());
-    serviceCheckActions.put("mapreduce",
-        Role.MAPREDUCE_SERVICE_CHECK.toString());
-    serviceCheckActions.put("zookeeper",
-        Role.ZOOKEEPER_QUORUM_SERVICE_CHECK.toString());
-    serviceCheckActions.put("hive", Role.HIVE_SERVICE_CHECK.toString());
-    serviceCheckActions.put("hcat", Role.HCAT_SERVICE_CHECK.toString());
-    serviceCheckActions.put("oozie", Role.OOZIE_SERVICE_CHECK.toString());
-    serviceCheckActions.put("pig", Role.PIG_SERVICE_CHECK.toString());
-    serviceCheckActions.put("sqoop", Role.SQOOP_SERVICE_CHECK.toString());
-    serviceCheckActions.put("webhcat",
-        Role.WEBHCAT_SERVICE_CHECK.toString());
-  }
-
-  public List<String> getActions(String serviceName) {
-    List<String> result = serviceActions.get(serviceName.toLowerCase());
-    if (result != null) {
-      return result;
-    } else {
-      return Collections.emptyList();
-    }
-  }
-
-  public String getClient(String serviceName) {
-    return serviceClients.get(serviceName.toLowerCase());
-  }
-
-  public String getServiceCheckAction(String serviceName) {
-    return serviceCheckActions.get(serviceName.toLowerCase());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java
deleted file mode 100644
index 9f66f4c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.metadata;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.RoleCommand;
-import org.apache.ambari.server.stageplanner.RoleGraphNode;
-
-/**
- * This class is used to establish the order between two roles. This class
- * should not be used to determine the dependencies.
- */
-public class RoleCommandOrder {
-
-  private static class RoleCommandPair {
-    Role role;
-    RoleCommand cmd;
-
-    public RoleCommandPair(Role _role, RoleCommand _cmd) {
-      if (_role == null || _cmd == null) {
-        throw new IllegalArgumentException("role = "+_role+", cmd = "+_cmd);
-      }
-      this.role = _role;
-      this.cmd = _cmd;
-    }
-
-    @Override
-    public int hashCode() {
-      return (role.toString() + cmd.toString()).hashCode();
-    }
-
-    @Override
-    public boolean equals(Object other) {
-      if (other != null && (other instanceof RoleCommandPair)
-          && ((RoleCommandPair) other).role.equals(role)
-          && ((RoleCommandPair) other).cmd.equals(cmd)) {
-        return true;
-      }
-      return false;
-    }
-  }
-
-  /**
-   * key -> blocked role command value -> set of blocker role commands.
-   */
-  private static Map<RoleCommandPair, Set<RoleCommandPair>> dependencies = new HashMap<RoleCommandPair, Set<RoleCommandPair>>();
-
-  private static void addDependency(Role blockedRole,
-      RoleCommand blockedCommand, Role blockerRole, RoleCommand blockerCommand) {
-    RoleCommandPair rcp1 = new RoleCommandPair(blockedRole, blockedCommand);
-    RoleCommandPair rcp2 = new RoleCommandPair(blockerRole, blockerCommand);
-    if (dependencies.get(rcp1) == null) {
-      dependencies.put(rcp1, new HashSet<RoleCommandPair>());
-    }
-    dependencies.get(rcp1).add(rcp2);
-  }
-
-  public static void initialize() {
-    addDependency(Role.SECONDARY_NAMENODE, RoleCommand.START, Role.NAMENODE,
-        RoleCommand.START);
-    addDependency(Role.HBASE_MASTER, RoleCommand.START, Role.ZOOKEEPER_SERVER,
-        RoleCommand.START);
-    addDependency(Role.HBASE_MASTER, RoleCommand.START, Role.NAMENODE,
-        RoleCommand.START);
-    addDependency(Role.HBASE_MASTER, RoleCommand.START, Role.DATANODE,
-        RoleCommand.START);
-    addDependency(Role.HBASE_REGIONSERVER, RoleCommand.START,
-        Role.HBASE_MASTER, RoleCommand.START);
-    addDependency(Role.JOBTRACKER, RoleCommand.START, Role.NAMENODE,
-        RoleCommand.START);
-    addDependency(Role.JOBTRACKER, RoleCommand.START, Role.DATANODE,
-        RoleCommand.START);
-    addDependency(Role.TASKTRACKER, RoleCommand.START, Role.NAMENODE,
-        RoleCommand.START);
-    addDependency(Role.TASKTRACKER, RoleCommand.START, Role.DATANODE,
-        RoleCommand.START);
-    addDependency(Role.OOZIE_SERVER, RoleCommand.START, Role.JOBTRACKER,
-        RoleCommand.START);
-    addDependency(Role.OOZIE_SERVER, RoleCommand.START, Role.TASKTRACKER,
-        RoleCommand.START);
-    addDependency(Role.HIVE_SERVER, RoleCommand.START, Role.TASKTRACKER,
-        RoleCommand.START);
-    addDependency(Role.HIVE_SERVER, RoleCommand.START, Role.DATANODE,
-        RoleCommand.START);
-    addDependency(Role.WEBHCAT_SERVER, RoleCommand.START, Role.TASKTRACKER,
-        RoleCommand.START);
-    addDependency(Role.WEBHCAT_SERVER, RoleCommand.START, Role.DATANODE,
-        RoleCommand.START);
-    addDependency(Role.WEBHCAT_SERVER, RoleCommand.START, Role.HIVE_SERVER,
-        RoleCommand.START);
-    addDependency(Role.HIVE_METASTORE, RoleCommand.START, Role.MYSQL_SERVER,
-        RoleCommand.START);
-    addDependency(Role.HIVE_SERVER, RoleCommand.START, Role.MYSQL_SERVER,
-        RoleCommand.START);
-
-    // Service checks
-    addDependency(Role.HDFS_SERVICE_CHECK, RoleCommand.EXECUTE, Role.NAMENODE,
-        RoleCommand.START);
-    addDependency(Role.HDFS_SERVICE_CHECK, RoleCommand.EXECUTE, Role.DATANODE,
-        RoleCommand.START);
-    addDependency(Role.MAPREDUCE_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.JOBTRACKER, RoleCommand.START);
-    addDependency(Role.MAPREDUCE_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.TASKTRACKER, RoleCommand.START);
-    addDependency(Role.OOZIE_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.OOZIE_SERVER, RoleCommand.START);
-    addDependency(Role.WEBHCAT_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.WEBHCAT_SERVER, RoleCommand.START);
-    addDependency(Role.HBASE_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.HBASE_MASTER, RoleCommand.START);
-    addDependency(Role.HBASE_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.HBASE_REGIONSERVER, RoleCommand.START);
-    addDependency(Role.HIVE_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.HIVE_SERVER, RoleCommand.START);
-    addDependency(Role.HIVE_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.HIVE_METASTORE, RoleCommand.START);
-    addDependency(Role.HCAT_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.HIVE_SERVER, RoleCommand.START);
-    addDependency(Role.PIG_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.JOBTRACKER, RoleCommand.START);
-    addDependency(Role.PIG_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.TASKTRACKER, RoleCommand.START);
-    addDependency(Role.SQOOP_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.JOBTRACKER, RoleCommand.START);
-    addDependency(Role.SQOOP_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.TASKTRACKER, RoleCommand.START);
-    addDependency(Role.ZOOKEEPER_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.ZOOKEEPER_SERVER, RoleCommand.START);
-    addDependency(Role.ZOOKEEPER_QUORUM_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.ZOOKEEPER_SERVER, RoleCommand.START);
-    
-    addDependency(Role.ZOOKEEPER_SERVER, RoleCommand.STOP,
-        Role.HBASE_MASTER, RoleCommand.STOP);
-    addDependency(Role.ZOOKEEPER_SERVER, RoleCommand.STOP,
-        Role.HBASE_REGIONSERVER, RoleCommand.STOP);
-    addDependency(Role.NAMENODE, RoleCommand.STOP,
-        Role.HBASE_MASTER, RoleCommand.STOP);
-    addDependency(Role.DATANODE, RoleCommand.STOP,
-        Role.HBASE_MASTER, RoleCommand.STOP);
-    addDependency(Role.HBASE_MASTER, RoleCommand.STOP,
-        Role.HBASE_REGIONSERVER, RoleCommand.STOP);
-    addDependency(Role.NAMENODE, RoleCommand.STOP,
-        Role.JOBTRACKER, RoleCommand.STOP);
-    addDependency(Role.NAMENODE, RoleCommand.STOP,
-        Role.TASKTRACKER, RoleCommand.STOP);
-    addDependency(Role.DATANODE, RoleCommand.STOP,
-        Role.JOBTRACKER, RoleCommand.STOP);
-    addDependency(Role.DATANODE, RoleCommand.STOP,
-        Role.TASKTRACKER, RoleCommand.STOP);
-  }
-
-  /**
-   * Returns the dependency order. -1 => rgn1 before rgn2, 0 => they can be
-   * parallel 1 => rgn2 before rgn1
-   * 
-   * @param roleGraphNode
-   * @param roleGraphNode2
-   */
-  public int order(RoleGraphNode rgn1, RoleGraphNode rgn2) {
-    RoleCommandPair rcp1 = new RoleCommandPair(rgn1.getRole(),
-        rgn1.getCommand());
-    RoleCommandPair rcp2 = new RoleCommandPair(rgn2.getRole(),
-        rgn2.getCommand());
-    if ((dependencies.get(rcp1) != null)
-        && (dependencies.get(rcp1).contains(rcp2))) {
-      return 1;
-    } else if ((dependencies.get(rcp2) != null)
-        && (dependencies.get(rcp2).contains(rcp1))) {
-      return -1;
-    } else if (!rgn2.getCommand().equals(rgn1.getCommand())) {
-      return compareCommands(rgn1, rgn2);
-    }
-    return 0;
-  }
-
-  private int compareCommands(RoleGraphNode rgn1, RoleGraphNode rgn2) {
-    RoleCommand rc1 = rgn1.getCommand();
-    RoleCommand rc2 = rgn2.getCommand();
-    if (rc1.equals(rc2)) {
-      //If its coming here means roles have no dependencies.
-      return 0;
-    }
-   
-    if ((rc1.equals(RoleCommand.START) && rc2.equals(RoleCommand.EXECUTE)) ||
-        (rc2.equals(RoleCommand.START) && rc1.equals(RoleCommand.EXECUTE))) {
-      //START and execute are independent, role order matters
-      return 0;
-    }
-    
-    if (rc1.equals(RoleCommand.INSTALL)) {
-      return -1;
-    } else if (rc2.equals(RoleCommand.INSTALL)) {
-      return 1;
-    } else if (rc1.equals(RoleCommand.START) || rc1.equals(RoleCommand.EXECUTE)) {
-      return -1;
-    } else if (rc2.equals(RoleCommand.START) || rc2.equals(RoleCommand.EXECUTE)) {
-      return 1;
-    } else if (rc1.equals(RoleCommand.STOP)) {
-      return -1;
-    } else if (rc2.equals(RoleCommand.STOP)) {
-      return 1;
-    }
-    return 0;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/GuiceJpaInitializer.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/GuiceJpaInitializer.java
deleted file mode 100644
index f547b97..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/GuiceJpaInitializer.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm;
-
-import com.google.inject.Inject;
-import com.google.inject.persist.PersistService;
-
-/**
- * This class needs to be instantiated with guice to initialize Guice-persist
- */
-public class GuiceJpaInitializer {
-
-  @Inject
-  public GuiceJpaInitializer(PersistService service) {
-    service.start();
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/PersistenceType.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/PersistenceType.java
deleted file mode 100644
index 39a761b..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/PersistenceType.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.orm;
-
-public enum PersistenceType {
-  IN_MEMORY("ambari-javadb"),
-  POSTGRES("ambari-postgres");
-
-  String unitName;
-
-  PersistenceType(String unitName) {
-    this.unitName = unitName;
-  }
-
-  public String getUnitName() {
-    return unitName;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
deleted file mode 100644
index 95ece0f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import java.util.List;
-
-import javax.persistence.EntityManager;
-import javax.persistence.NoResultException;
-import javax.persistence.TypedQuery;
-
-import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-
-public class ClusterDAO {
-
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-
-  /**
-   * Looks for Cluster by ID
-   * @param id ID of Cluster
-   * @return Found entity or NULL
-   */
-  @Transactional
-  public ClusterEntity findById(long id) {
-    return entityManagerProvider.get().find(ClusterEntity.class, id);
-  }
-
-  @Transactional
-  public ClusterEntity findByName(String clusterName) {
-    TypedQuery<ClusterEntity> query = entityManagerProvider.get().createNamedQuery("clusterByName", ClusterEntity.class);
-    query.setParameter("clusterName", clusterName);
-    try {
-      return query.getSingleResult();
-    } catch (NoResultException ignored) {
-      return null;
-    }
-  }
-
-  @Transactional
-  public List<ClusterEntity> findAll() {
-    TypedQuery<ClusterEntity> query = entityManagerProvider.get().createNamedQuery("allClusters", ClusterEntity.class);
-    try {
-      return query.getResultList();
-    } catch (NoResultException ignored) {
-    }
-    return null;
-  }
-
-  /**
-   * Create Cluster entity in Database
-   * @param clusterEntity entity to create
-   */
-  @Transactional
-  public void create(ClusterEntity clusterEntity) {
-    entityManagerProvider.get().persist(clusterEntity);
-  }
-
-  /**
-   * Creates a cluster configuration in the DB.
-   */
-  @Transactional
-  public void createConfig(ClusterConfigEntity entity) {
-    entityManagerProvider.get().persist(entity);
-  }
-
-  /**
-   * Retrieve entity data from DB
-   * @param clusterEntity entity to refresh
-   */
-  @Transactional
-  public void refresh(ClusterEntity clusterEntity) {
-    entityManagerProvider.get().refresh(clusterEntity);
-  }
-
-  @Transactional
-  public ClusterEntity merge(ClusterEntity clusterEntity) {
-    return entityManagerProvider.get().merge(clusterEntity);
-  }
-
-  @Transactional
-  public void remove(ClusterEntity clusterEntity) {
-    entityManagerProvider.get().remove(merge(clusterEntity));
-  }
-
-  @Transactional
-  public void removeByName(String clusterName) {
-    remove(findByName(clusterName));
-  }
-
-  @Transactional
-  public void removeByPK(long id) {
-    remove(findById(id));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterServiceDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterServiceDAO.java
deleted file mode 100644
index cc05517..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterServiceDAO.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
-import org.apache.ambari.server.orm.entities.ClusterServiceEntityPK;
-
-import javax.persistence.EntityManager;
-import javax.persistence.NoResultException;
-import javax.persistence.TypedQuery;
-
-public class ClusterServiceDAO {
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-
-  @Transactional
-  public ClusterServiceEntity findByPK(ClusterServiceEntityPK clusterServiceEntityPK) {
-    return entityManagerProvider.get().find(ClusterServiceEntity.class, clusterServiceEntityPK);
-  }
-
-  @Transactional
-  public ClusterServiceEntity findByClusterAndServiceNames(String  clusterName, String serviceName) {
-    TypedQuery<ClusterServiceEntity> query = entityManagerProvider.get()
-            .createNamedQuery("clusterServiceByClusterAndServiceNames", ClusterServiceEntity.class);
-    query.setParameter("clusterName", clusterName);
-    query.setParameter("serviceName", serviceName);
-
-    try {
-      return query.getSingleResult();
-    } catch (NoResultException ignored) {
-      return null;
-    }
-  }
-
-  @Transactional
-  public void refresh(ClusterServiceEntity clusterServiceEntity) {
-    entityManagerProvider.get().refresh(clusterServiceEntity);
-  }
-
-  @Transactional
-  public void create(ClusterServiceEntity clusterServiceEntity) {
-    entityManagerProvider.get().persist(clusterServiceEntity);
-  }
-
-  @Transactional
-  public ClusterServiceEntity merge(ClusterServiceEntity clusterServiceEntity) {
-    return entityManagerProvider.get().merge(clusterServiceEntity);
-  }
-
-  @Transactional
-  public void remove(ClusterServiceEntity clusterServiceEntity) {
-    entityManagerProvider.get().remove(merge(clusterServiceEntity));
-  }
-
-  @Transactional
-  public void removeByPK(ClusterServiceEntityPK clusterServiceEntityPK) {
-    remove(findByPK(clusterServiceEntityPK));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterStateDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterStateDAO.java
deleted file mode 100644
index b22e47f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterStateDAO.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.orm.entities.ClusterStateEntity;
-
-import javax.persistence.EntityManager;
-
-public class ClusterStateDAO {
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-
-  @Transactional
-  public ClusterStateEntity findByPK(String clusterName) {
-    return entityManagerProvider.get().find(ClusterStateEntity.class, clusterName);
-  }
-
-  @Transactional
-  public void refresh(ClusterStateEntity clusterStateEntity) {
-    entityManagerProvider.get().refresh(clusterStateEntity);
-  }
-
-  @Transactional
-  public void create(ClusterStateEntity clusterStateEntity) {
-    entityManagerProvider.get().persist(clusterStateEntity);
-  }
-
-  @Transactional
-  public ClusterStateEntity merge(ClusterStateEntity clusterStateEntity) {
-    return entityManagerProvider.get().merge(clusterStateEntity);
-  }
-
-  @Transactional
-  public void remove(ClusterStateEntity clusterStateEntity) {
-    entityManagerProvider.get().remove(merge(clusterStateEntity));
-  }
-
-  @Transactional
-  public void removeByPK(String clusterName) {
-    remove(findByPK(clusterName));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ComponentConfigMappingDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ComponentConfigMappingDAO.java
deleted file mode 100644
index 1b6d067..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ComponentConfigMappingDAO.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-
-import javax.persistence.EntityManager;
-import javax.persistence.TypedQuery;
-
-import org.apache.ambari.server.orm.entities.ComponentConfigMappingEntity;
-
-public class ComponentConfigMappingDAO {
-
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-  @Inject
-  DaoUtils daoUtils;
-
-  @Transactional
-  public List<ComponentConfigMappingEntity> findByType(
-      Collection<String> configTypes) {
-    TypedQuery<ComponentConfigMappingEntity> query =
-        entityManagerProvider.get().createQuery(
-            "SELECT config FROM ComponentConfigMappingEntity config"
-            + " WHERE config.configType IN ?1",
-        ComponentConfigMappingEntity.class);
-    return daoUtils.selectList(query, configTypes);
-  }
-
-  @Transactional
-  public List<ComponentConfigMappingEntity> findByComponentAndType(long clusterId, String serviceName, String componentName,
-                                                                   Collection<String> configTypes) {
-    if (configTypes.isEmpty()) {
-      return Collections.emptyList();
-    }
-    TypedQuery<ComponentConfigMappingEntity> query = entityManagerProvider.get().createQuery("SELECT config " +
-        "FROM ComponentConfigMappingEntity config " +
-        "WHERE " +
-        "config.clusterId = ?1 " +
-        "AND config.serviceName = ?2 " +
-        "AND config.componentName = ?3 " +
-        "AND config.configType IN ?4",
-        ComponentConfigMappingEntity.class);
-    return daoUtils.selectList(query, clusterId, serviceName, componentName, configTypes);
-  }
-
-  @Transactional
-  public void refresh(
-      ComponentConfigMappingEntity componentConfigMappingEntity) {
-    entityManagerProvider.get().refresh(componentConfigMappingEntity);
-  }
-
-  @Transactional
-  public ComponentConfigMappingEntity merge(
-      ComponentConfigMappingEntity componentConfigMappingEntity) {
-    return entityManagerProvider.get().merge(
-        componentConfigMappingEntity);
-  }
-
-  @Transactional
-  public void remove(
-      ComponentConfigMappingEntity componentConfigMappingEntity) {
-    entityManagerProvider.get().remove(merge(componentConfigMappingEntity));
-  }
-
-  @Transactional
-  public void removeByType(Collection<String> configTypes) {
-    for (ComponentConfigMappingEntity entity : findByType(configTypes)) {
-      remove(entity);
-    }
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/DaoUtils.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/DaoUtils.java
deleted file mode 100644
index 9e68418..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/DaoUtils.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Singleton;
-
-import javax.persistence.NoResultException;
-import javax.persistence.Query;
-import javax.persistence.TypedQuery;
-import java.util.Collections;
-import java.util.List;
-
-@Singleton
-class DaoUtils {
-
-  public <T> List<T> selectList(TypedQuery<T> query, Object... parameters) {
-    setParameters(query, parameters);
-    try {
-      return query.getResultList();
-    } catch (NoResultException ignored) {
-      return Collections.emptyList();
-    }
-  }
-
-  public <T> T selectSingle(TypedQuery<T> query, Object... parameters) {
-    setParameters(query, parameters);
-    try {
-      return query.getSingleResult();
-    } catch (NoResultException ignored) {
-      return null;
-    }
-  }
-
-  public int executeUpdate(Query query, Object... parameters) {
-    setParameters(query, parameters);
-    return query.executeUpdate();
-  }
-
-  public void setParameters(Query query, Object... parameters) {
-    for (int i = 0; i < parameters.length; i++) {
-      query.setParameter(i+1, parameters[i]);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExecutionCommandDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExecutionCommandDAO.java
deleted file mode 100644
index 12f56e2..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExecutionCommandDAO.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.orm.entities.ExecutionCommandEntity;
-
-import javax.persistence.EntityManager;
-
-public class ExecutionCommandDAO {
-
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-
-  @Transactional
-  public ExecutionCommandEntity findByPK(long taskId) {
-    return entityManagerProvider.get().find(ExecutionCommandEntity.class, taskId);
-  }
-
-  @Transactional
-  public void create(ExecutionCommandEntity stageEntity) {
-    entityManagerProvider.get().persist(stageEntity);
-  }
-
-  @Transactional
-  public ExecutionCommandEntity merge(ExecutionCommandEntity stageEntity) {
-    return entityManagerProvider.get().merge(stageEntity);
-  }
-
-  @Transactional
-  public void remove(ExecutionCommandEntity stageEntity) {
-    entityManagerProvider.get().remove(merge(stageEntity));
-  }
-
-  @Transactional
-  public void removeByPK(int taskId) {
-    remove(findByPK(taskId));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentConfigMappingDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentConfigMappingDAO.java
deleted file mode 100644
index 7ed22bf..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentConfigMappingDAO.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-
-import javax.persistence.EntityManager;
-import javax.persistence.TypedQuery;
-
-import org.apache.ambari.server.orm.entities.HostComponentConfigMappingEntity;
-
-public class HostComponentConfigMappingDAO {
-
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-  @Inject
-  DaoUtils daoUtils;
-
-  @Transactional
-  public List<HostComponentConfigMappingEntity> findByType(
-      Collection<String> configTypes) {
-    TypedQuery<HostComponentConfigMappingEntity> query =
-        entityManagerProvider.get().createQuery(
-            "SELECT config FROM HostComponentConfigMappingEntity config"
-            + " WHERE config.configType IN ?1",
-        HostComponentConfigMappingEntity.class);
-    return daoUtils.selectList(query, configTypes);
-  }
-
-  @Transactional
-  public void refresh(
-      HostComponentConfigMappingEntity componentConfigMappingEntity) {
-    entityManagerProvider.get().refresh(componentConfigMappingEntity);
-  }
-
-  @Transactional
-  public HostComponentConfigMappingEntity merge(
-      HostComponentConfigMappingEntity componentConfigMappingEntity) {
-    return entityManagerProvider.get().merge(
-        componentConfigMappingEntity);
-  }
-
-  @Transactional
-  public void remove(
-      HostComponentConfigMappingEntity componentConfigMappingEntity) {
-    entityManagerProvider.get().remove(merge(componentConfigMappingEntity));
-  }
-
-  @Transactional
-  public void removeByType(Collection<String> configTypes) {
-    for (HostComponentConfigMappingEntity entity : findByType(configTypes)) {
-      remove(entity);
-    }
-  }
-
-  @Transactional
-  public List<HostComponentConfigMappingEntity> findByHostComponentAndType(
-      long clusterId, String serviceName, String componentName,
-      String hostname,
-      Collection<String> configTypes) {
-    if (configTypes.isEmpty()) {
-      return new ArrayList<HostComponentConfigMappingEntity>();
-    }    
-    TypedQuery<HostComponentConfigMappingEntity> query =
-        entityManagerProvider.get().createQuery(
-            "SELECT config FROM HostComponentConfigMappingEntity config"
-            + " WHERE "
-            + " config.clusterId = ?1"
-            + " AND config.serviceName = ?2"
-            + " AND config.componentName = ?3"
-            + " AND config.hostName = ?4"
-            + " AND config.configType IN ?5",
-        HostComponentConfigMappingEntity.class);
-    return daoUtils.selectList(query, clusterId, serviceName,
-        componentName, hostname, configTypes);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredConfigMappingDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredConfigMappingDAO.java
deleted file mode 100644
index 1921ed3..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredConfigMappingDAO.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-
-import javax.persistence.EntityManager;
-import javax.persistence.TypedQuery;
-
-import org.apache.ambari.server.orm.entities.HostComponentDesiredConfigMappingEntity;
-
-public class HostComponentDesiredConfigMappingDAO {
-
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-  @Inject
-  DaoUtils daoUtils;
-
-  @Transactional
-  public List<HostComponentDesiredConfigMappingEntity> findByType(
-      Collection<String> configTypes) {
-    TypedQuery<HostComponentDesiredConfigMappingEntity> query =
-        entityManagerProvider.get().createQuery(
-            "SELECT config FROM HostComponentDesiredConfigMappingEntity config"
-            + " WHERE config.configType IN ?1",
-        HostComponentDesiredConfigMappingEntity.class);
-    return daoUtils.selectList(query, configTypes);
-  }
-
-  @Transactional
-  public List<HostComponentDesiredConfigMappingEntity> findByHostComponentAndType(
-      long clusterId, String serviceName, String componentName,
-      String hostname,
-      Collection<String> configTypes) {
-    if (configTypes.isEmpty()) {
-      return new ArrayList<HostComponentDesiredConfigMappingEntity>();
-    }
-    TypedQuery<HostComponentDesiredConfigMappingEntity> query =
-        entityManagerProvider.get().createQuery(
-            "SELECT config FROM HostComponentDesiredConfigMappingEntity config"
-                + " WHERE "
-                + " config.clusterId = ?1"
-                + " AND config.serviceName = ?2"
-                + " AND config.componentName = ?3"
-                + " AND config.hostName = ?4"
-                + " AND config.configType IN ?5",
-            HostComponentDesiredConfigMappingEntity.class);
-    return daoUtils.selectList(query, clusterId, serviceName,
-        componentName, hostname, configTypes);
-  }
-
-  @Transactional
-  public void refresh(
-      HostComponentDesiredConfigMappingEntity componentConfigMappingEntity) {
-    entityManagerProvider.get().refresh(componentConfigMappingEntity);
-  }
-
-  @Transactional
-  public HostComponentDesiredConfigMappingEntity merge(
-      HostComponentDesiredConfigMappingEntity componentConfigMappingEntity) {
-    return entityManagerProvider.get().merge(
-        componentConfigMappingEntity);
-  }
-
-  @Transactional
-  public void remove(
-      HostComponentDesiredConfigMappingEntity componentConfigMappingEntity) {
-    entityManagerProvider.get().remove(merge(componentConfigMappingEntity));
-  }
-
-  @Transactional
-  public void removeByType(Collection<String> configTypes) {
-    for (HostComponentDesiredConfigMappingEntity entity : findByType(configTypes)) {
-      remove(entity);
-    }
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java
deleted file mode 100644
index 399ecc6..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentDesiredStateDAO.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntityPK;
-
-import javax.persistence.EntityManager;
-
-public class HostComponentDesiredStateDAO {
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-
-  @Transactional
-  public HostComponentDesiredStateEntity findByPK(HostComponentDesiredStateEntityPK primaryKey) {
-    return entityManagerProvider.get().find(HostComponentDesiredStateEntity.class, primaryKey);
-  }
-
-  @Transactional
-  public void refresh(HostComponentDesiredStateEntity hostComponentDesiredStateEntity) {
-    entityManagerProvider.get().refresh(hostComponentDesiredStateEntity);
-  }
-
-  @Transactional
-  public void create(HostComponentDesiredStateEntity hostComponentDesiredStateEntity) {
-    entityManagerProvider.get().persist(hostComponentDesiredStateEntity);
-  }
-
-  @Transactional
-  public HostComponentDesiredStateEntity merge(HostComponentDesiredStateEntity hostComponentDesiredStateEntity) {
-    return entityManagerProvider.get().merge(hostComponentDesiredStateEntity);
-  }
-
-  @Transactional
-  public void remove(HostComponentDesiredStateEntity hostComponentDesiredStateEntity) {
-    entityManagerProvider.get().remove(merge(hostComponentDesiredStateEntity));
-  }
-
-  @Transactional
-  public void removeByPK(HostComponentDesiredStateEntityPK primaryKey) {
-    remove(findByPK(primaryKey));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java
deleted file mode 100644
index dc23476..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostComponentStateDAO.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntityPK;
-
-import javax.persistence.EntityManager;
-
-public class HostComponentStateDAO {
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-
-  @Transactional
-  public HostComponentStateEntity findByPK(HostComponentStateEntityPK primaryKey) {
-    return entityManagerProvider.get().find(HostComponentStateEntity.class, primaryKey);
-  }
-
-  @Transactional
-  public void refresh(HostComponentStateEntity hostComponentStateEntity) {
-    entityManagerProvider.get().refresh(hostComponentStateEntity);
-  }
-
-  @Transactional
-  public void create(HostComponentStateEntity hostComponentStateEntity) {
-    entityManagerProvider.get().persist(hostComponentStateEntity);
-  }
-
-  @Transactional
-  public HostComponentStateEntity merge(HostComponentStateEntity hostComponentStateEntity) {
-    return entityManagerProvider.get().merge(hostComponentStateEntity);
-  }
-
-  @Transactional
-  public void remove(HostComponentStateEntity hostComponentStateEntity) {
-    entityManagerProvider.get().remove(merge(hostComponentStateEntity));
-  }
-
-  @Transactional
-  public void removeByPK(HostComponentStateEntityPK primaryKey) {
-    remove(findByPK(primaryKey));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostDAO.java
deleted file mode 100644
index 154388d..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostDAO.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.orm.entities.StageEntity;
-
-import javax.persistence.EntityManager;
-import javax.persistence.NoResultException;
-import javax.persistence.TypedQuery;
-import java.util.Collections;
-import java.util.List;
-
-public class HostDAO {
-
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-
-  @Transactional
-  public HostEntity findByName(String hostName) {
-    return entityManagerProvider.get().find(HostEntity.class, hostName);
-  }
-
-  @Transactional
-  public List<HostEntity> findAll() {
-    TypedQuery<HostEntity> query = entityManagerProvider.get().createQuery("SELECT host FROM HostEntity host", HostEntity.class);
-    try {
-      return query.getResultList();
-    } catch (NoResultException e) {
-      return Collections.emptyList();
-    }
-  }
-
-  @Transactional
-  public List<HostEntity> findByStage(StageEntity stageEntity) {
-    TypedQuery<HostEntity> query = entityManagerProvider.get().createQuery(
-        "SELECT DISTINCT host FROM HostEntity host JOIN host.hostRoleCommandEntities command JOIN command.stage stage " +
-            "WHERE stage=:stageEntity", HostEntity.class);
-    query.setParameter("stageEntity", stageEntity);
-    try {
-      return query.getResultList();
-    } catch (NoResultException e) {
-      return Collections.emptyList();
-    }
-  }
-
-  /**
-   * Refreshes entity state from database
-   * @param hostEntity entity to refresh
-   */
-  @Transactional
-  public void refresh(HostEntity hostEntity) {
-    entityManagerProvider.get().refresh(hostEntity);
-  }
-
-  @Transactional
-  public void create(HostEntity hostEntity) {
-    entityManagerProvider.get().persist(hostEntity);
-  }
-
-  @Transactional
-  public HostEntity merge(HostEntity hostEntity) {
-    return entityManagerProvider.get().merge(hostEntity);
-  }
-
-  @Transactional
-  public void remove(HostEntity hostEntity) {
-    entityManagerProvider.get().remove(merge(hostEntity));
-  }
-
-  @Transactional
-  public void removeByName(String hostName) {
-    remove(findByName(hostName));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java
deleted file mode 100644
index d9c5ee9..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.actionmanager.HostRoleStatus;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
-import org.apache.ambari.server.orm.entities.StageEntity;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.persistence.EntityManager;
-import javax.persistence.Query;
-import javax.persistence.TypedQuery;
-
-import java.util.Collection;
-import java.util.List;
-
-public class HostRoleCommandDAO {
-
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-  @Inject
-  DaoUtils daoUtils;
-  private static Logger LOG = LoggerFactory.getLogger(HostRoleCommandDAO.class);
-
-  @Transactional
-  public HostRoleCommandEntity findByPK(long taskId) {
-    return entityManagerProvider.get().find(HostRoleCommandEntity.class, taskId);
-  }
-
-  @Transactional
-  public List<HostRoleCommandEntity> findByPKs(Collection<Long> taskIds) {
-    TypedQuery<HostRoleCommandEntity> query = entityManagerProvider.get().createQuery(
-        "SELECT task FROM HostRoleCommandEntity task WHERE task.taskId IN ?1 " +
-            "ORDER BY task.taskId",
-        HostRoleCommandEntity.class);
-    return daoUtils.selectList(query, taskIds);
-  }
-
-  @Transactional
-  public List<HostRoleCommandEntity> findByRequestIds(Collection<Long> requestIds) {
-    TypedQuery<HostRoleCommandEntity> query = entityManagerProvider.get().createQuery(
-        "SELECT task FROM HostRoleCommandEntity task " +
-            "WHERE task.requestId IN ?1 " +
-            "ORDER BY task.taskId", HostRoleCommandEntity.class);
-    return daoUtils.selectList(query, requestIds);
-  }
-
-  @Transactional
-  public List<HostRoleCommandEntity> findByRequestAndTaskIds(Collection<Long> requestIds, Collection<Long> taskIds) {
-    TypedQuery<HostRoleCommandEntity> query = entityManagerProvider.get().createQuery(
-        "SELECT DISTINCT task FROM HostRoleCommandEntity task " +
-            "WHERE task.requestId IN ?1 OR task.taskId IN ?2 " +
-            "ORDER BY task.taskId", HostRoleCommandEntity.class
-    );
-    return daoUtils.selectList(query, requestIds, taskIds);
-  }
-
-  @Transactional
-  public List<HostRoleCommandEntity> findSortedCommandsByStageAndHost(StageEntity stageEntity, HostEntity hostEntity) {
-    TypedQuery<HostRoleCommandEntity> query = entityManagerProvider.get().createQuery("SELECT hostRoleCommand " +
-        "FROM HostRoleCommandEntity hostRoleCommand " +
-        "WHERE hostRoleCommand.stage=?1 AND hostRoleCommand.host=?2 " +
-        "ORDER BY hostRoleCommand.taskId", HostRoleCommandEntity.class);
-    return daoUtils.selectList(query, stageEntity, hostEntity);
-  }
-
-  @Transactional
-  public List<HostRoleCommandEntity> findByHostRole(String hostName, long requestId, long stageId, Role role) {
-    TypedQuery<HostRoleCommandEntity> query = entityManagerProvider.get().createQuery("SELECT command " +
-        "FROM HostRoleCommandEntity command " +
-        "WHERE command.hostName=?1 AND command.requestId=?2 " +
-        "AND command.stageId=?3 AND command.role=?4 " +
-        "ORDER BY command.taskId", HostRoleCommandEntity.class);
-
-    return daoUtils.selectList(query, hostName, requestId, stageId, role);
-  }
-
-  @Transactional
-  public List<Long> getRequests() {
-    String queryStr = "SELECT DISTINCT command.requestId " +
-        "FROM HostRoleCommandEntity command ORDER BY command.requestId DESC";
-    TypedQuery<Long> query = entityManagerProvider.get().createQuery(queryStr,
-        Long.class);
-    query.setMaxResults(20);
-    return daoUtils.selectList(query);
-  }
-
-  @Transactional
-  public int updateStatusByRequestId(long requestId, HostRoleStatus target, Collection<HostRoleStatus> sources) {
-    Query query = entityManagerProvider.get().createQuery("UPDATE HostRoleCommandEntity command " +
-        "SET command.status=?1 " +
-        "WHERE command.requestId=?2 AND command.status IN ?3");
-
-    return daoUtils.executeUpdate(query, target, requestId, sources);
-  }
-
-  @Transactional
-  public List<HostRoleCommandEntity> findByRequest(long requestId) {
-    TypedQuery<HostRoleCommandEntity> query = entityManagerProvider.get().createQuery("SELECT command " +
-        "FROM HostRoleCommandEntity command " +
-        "WHERE command.requestId=?1 ORDER BY command.taskId", HostRoleCommandEntity.class);
-    return daoUtils.selectList(query, requestId);
-  }
-
-  @Transactional
-  public void create(HostRoleCommandEntity stageEntity) {
-    entityManagerProvider.get().persist(stageEntity);
-  }
-
-  @Transactional
-  public HostRoleCommandEntity merge(HostRoleCommandEntity stageEntity) {
-    HostRoleCommandEntity entity = entityManagerProvider.get().merge(stageEntity);
-    return entity;
-
-  }
-
-  @Transactional
-  public void remove(HostRoleCommandEntity stageEntity) {
-    entityManagerProvider.get().remove(merge(stageEntity));
-  }
-
-  @Transactional
-  public void removeByPK(int taskId) {
-    remove(findByPK(taskId));
-  }
-
-  @Transactional
-  public List<Long> getRequestsByTaskStatus(
-      Collection<HostRoleStatus> statuses, boolean match) {
-    String queryStr = "SELECT DISTINCT command.requestId "
-        + " FROM HostRoleCommandEntity command WHERE "
-        + " command.status";
-    if (!match) {
-      queryStr += " NOT";
-    }
-    queryStr += " IN ?1"
-        + " ORDER BY command.requestId DESC";
-    TypedQuery<Long> query = entityManagerProvider.get().createQuery(queryStr,
-        Long.class);
-    return daoUtils.selectList(query, statuses);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostStateDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostStateDAO.java
deleted file mode 100644
index 6a53724..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostStateDAO.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.orm.entities.HostStateEntity;
-
-import javax.persistence.EntityManager;
-
-public class HostStateDAO {
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-
-  @Transactional
-  public HostStateEntity findByHostName(String hostName) {
-    return entityManagerProvider.get().find(HostStateEntity.class, hostName);
-  }
-
-  @Transactional
-  public void refresh(HostStateEntity hostStateEntity) {
-    entityManagerProvider.get().refresh(hostStateEntity);
-  }
-
-  @Transactional
-  public void create(HostStateEntity hostStateEntity) {
-    entityManagerProvider.get().persist(hostStateEntity);
-  }
-
-  @Transactional
-  public HostStateEntity merge(HostStateEntity hostStateEntity) {
-    return entityManagerProvider.get().merge(hostStateEntity);
-  }
-
-  @Transactional
-  public void remove(HostStateEntity hostStateEntity) {
-    entityManagerProvider.get().remove(merge(hostStateEntity));
-  }
-
-  @Transactional
-  public void removeByHostName(String hostName) {
-    remove(findByHostName(hostName));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/KeyValueDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/KeyValueDAO.java
deleted file mode 100644
index dfc4863..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/KeyValueDAO.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.orm.entities.KeyValueEntity;
-
-import javax.persistence.EntityManager;
-import javax.persistence.TypedQuery;
-import java.util.Collection;
-
-public class KeyValueDAO {
-
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-  @Inject
-  DaoUtils daoUtils;
-
-  @Transactional
-  public KeyValueEntity findByKey(String key) {
-    return entityManagerProvider.get().find(KeyValueEntity.class, key);
-  }
-
-  @Transactional
-  public Collection<KeyValueEntity> findAll() {
-    TypedQuery<KeyValueEntity> query =
-        entityManagerProvider.get().createQuery("SELECT keyValue FROM KeyValueEntity keyValue", KeyValueEntity.class);
-    return daoUtils.selectList(query);
-  }
-
-  @Transactional
-  public void refresh(KeyValueEntity keyValueEntity) {
-    entityManagerProvider.get().refresh(keyValueEntity);
-  }
-
-  @Transactional
-  public void create(KeyValueEntity keyValueEntity) {
-    entityManagerProvider.get().persist(keyValueEntity);
-  }
-
-  @Transactional
-  public KeyValueEntity merge(KeyValueEntity keyValueEntity) {
-    return entityManagerProvider.get().merge(keyValueEntity);
-  }
-
-  @Transactional
-  public void remove(KeyValueEntity keyValueEntity) {
-    entityManagerProvider.get().remove(merge(keyValueEntity));
-  }
-
-  @Transactional
-  public void removeByHostName(String key) {
-    remove(findByKey(key));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleDAO.java
deleted file mode 100644
index 03a0b79..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleDAO.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.orm.entities.RoleEntity;
-
-import javax.persistence.EntityManager;
-
-public class RoleDAO {
-
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-
-  @Transactional
-  public RoleEntity findByName(String roleName) {
-    return entityManagerProvider.get().find(RoleEntity.class, roleName.toLowerCase());
-  }
-
-  @Transactional
-  public void create(RoleEntity role) {
-    role.setRoleName(role.getRoleName().toLowerCase());
-    entityManagerProvider.get().persist(role);
-  }
-
-  @Transactional
-  public RoleEntity merge(RoleEntity role) {
-    return entityManagerProvider.get().merge(role);
-  }
-
-  @Transactional
-  public void remove(RoleEntity role) {
-    entityManagerProvider.get().remove(merge(role));
-  }
-
-  @Transactional
-  public void removeByName(String roleName) {
-    remove(findByName(roleName));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleSuccessCriteriaDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleSuccessCriteriaDAO.java
deleted file mode 100644
index da29578..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/RoleSuccessCriteriaDAO.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.orm.entities.RoleSuccessCriteriaEntity;
-import org.apache.ambari.server.orm.entities.RoleSuccessCriteriaEntityPK;
-
-import javax.persistence.EntityManager;
-
-public class RoleSuccessCriteriaDAO {
-
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-
-  @Transactional
-  public RoleSuccessCriteriaEntity findByPK(RoleSuccessCriteriaEntityPK roleSuccessCriteriaEntityPK) {
-    entityManagerProvider.get().clear();
-    return entityManagerProvider.get().find(RoleSuccessCriteriaEntity.class, roleSuccessCriteriaEntityPK);
-  }
-
-  @Transactional
-  public void create(RoleSuccessCriteriaEntity stageEntity) {
-    entityManagerProvider.get().persist(stageEntity);
-  }
-
-  @Transactional
-  public RoleSuccessCriteriaEntity merge(RoleSuccessCriteriaEntity stageEntity) {
-    return entityManagerProvider.get().merge(stageEntity);
-  }
-
-  @Transactional
-  public void remove(RoleSuccessCriteriaEntity stageEntity) {
-    entityManagerProvider.get().remove(merge(stageEntity));
-  }
-
-  @Transactional
-  public void removeByPK(RoleSuccessCriteriaEntityPK roleSuccessCriteriaEntityPK) {
-    remove(findByPK(roleSuccessCriteriaEntityPK));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceComponentDesiredStateDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceComponentDesiredStateDAO.java
deleted file mode 100644
index 4a855d2..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceComponentDesiredStateDAO.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntityPK;
-import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
-
-import javax.persistence.EntityManager;
-
-public class ServiceComponentDesiredStateDAO {
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-
-  @Transactional
-  public ServiceComponentDesiredStateEntity findByPK(ServiceComponentDesiredStateEntityPK primaryKey) {
-    return entityManagerProvider.get().find(ServiceComponentDesiredStateEntity.class, primaryKey);
-  }
-
-  @Transactional
-  public void refresh(ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity) {
-    entityManagerProvider.get().refresh(serviceComponentDesiredStateEntity);
-  }
-
-  @Transactional
-  public void create(ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity) {
-    entityManagerProvider.get().persist(serviceComponentDesiredStateEntity);
-  }
-
-  @Transactional
-  public ServiceComponentDesiredStateEntity merge(ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity) {
-    return entityManagerProvider.get().merge(serviceComponentDesiredStateEntity);
-  }
-
-  @Transactional
-  public void remove(ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity) {
-    entityManagerProvider.get().remove(merge(serviceComponentDesiredStateEntity));
-  }
-
-  @Transactional
-  public void removeByPK(ServiceComponentDesiredStateEntityPK primaryKey) {
-    remove(findByPK(primaryKey));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceConfigMappingDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceConfigMappingDAO.java
deleted file mode 100644
index 35178b5..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceConfigMappingDAO.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.orm.entities.ServiceConfigMappingEntity;
-
-import javax.persistence.EntityManager;
-import javax.persistence.TypedQuery;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-
-public class ServiceConfigMappingDAO {
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-  @Inject
-  DaoUtils daoUtils;
-
-  @Transactional
-  public List<ServiceConfigMappingEntity> findByType(
-      Collection<String> configTypes) {
-    TypedQuery<ServiceConfigMappingEntity> query =
-        entityManagerProvider.get().createQuery(
-            "SELECT config FROM ServiceConfigMappingEntity config"
-                + " WHERE config.configType IN ?1",
-            ServiceConfigMappingEntity.class);
-    return daoUtils.selectList(query, configTypes);
-  }
-
-  @Transactional
-  public List<ServiceConfigMappingEntity> findByServiceAndType(
-      long clusterId, String serviceName,
-      Collection<String> configTypes) {
-    if (configTypes.isEmpty()) {
-      return Collections.emptyList();
-    }
-    TypedQuery<ServiceConfigMappingEntity> query =
-        entityManagerProvider.get().createQuery(
-            "SELECT config FROM ServiceConfigMappingEntity config"
-                + " WHERE "
-                + " config.clusterId = ?1"
-                + " AND config.serviceName = ?2"
-                + " AND config.configType IN ?3",
-            ServiceConfigMappingEntity.class);
-    return daoUtils.selectList(query, clusterId, serviceName, configTypes);
-  }
-
-  @Transactional
-  public void refresh(
-      ServiceConfigMappingEntity serviceConfigMappingEntity) {
-    entityManagerProvider.get().refresh(serviceConfigMappingEntity);
-  }
-
-  @Transactional
-  public ServiceConfigMappingEntity merge(
-      ServiceConfigMappingEntity serviceConfigMappingEntity) {
-    return entityManagerProvider.get().merge(
-        serviceConfigMappingEntity);
-  }
-
-  @Transactional
-  public void remove(
-      ServiceConfigMappingEntity serviceConfigMappingEntity) {
-    entityManagerProvider.get().remove(merge(serviceConfigMappingEntity));
-  }
-
-  @Transactional
-  public void removeByType(Collection<String> configTypes) {
-    for (ServiceConfigMappingEntity entity : findByType(configTypes)) {
-      remove(entity);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceDesiredStateDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceDesiredStateDAO.java
deleted file mode 100644
index cc04280..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceDesiredStateDAO.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntityPK;
-
-import javax.persistence.EntityManager;
-
-public class ServiceDesiredStateDAO {
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-
-  @Transactional
-  public ServiceDesiredStateEntity findByPK(ServiceDesiredStateEntityPK primaryKey) {
-    return entityManagerProvider.get().find(ServiceDesiredStateEntity.class, primaryKey);
-  }
-
-  @Transactional
-  public void refresh(ServiceDesiredStateEntity serviceDesiredStateEntity) {
-    entityManagerProvider.get().refresh(serviceDesiredStateEntity);
-  }
-
-  @Transactional
-  public void create(ServiceDesiredStateEntity serviceDesiredStateEntity) {
-    entityManagerProvider.get().persist(serviceDesiredStateEntity);
-  }
-
-  @Transactional
-  public ServiceDesiredStateEntity merge(ServiceDesiredStateEntity serviceDesiredStateEntity) {
-    return entityManagerProvider.get().merge(serviceDesiredStateEntity);
-  }
-
-  @Transactional
-  public void remove(ServiceDesiredStateEntity serviceDesiredStateEntity) {
-    entityManagerProvider.get().remove(merge(serviceDesiredStateEntity));
-  }
-
-  @Transactional
-  public void removeByPK(ServiceDesiredStateEntityPK primaryKey) {
-    remove(findByPK(primaryKey));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StageDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StageDAO.java
deleted file mode 100644
index aa861cd..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StageDAO.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.actionmanager.HostRoleStatus;
-import org.apache.ambari.server.orm.entities.StageEntity;
-import org.apache.ambari.server.orm.entities.StageEntityPK;
-import org.apache.ambari.server.utils.StageUtils;
-
-import javax.persistence.EntityManager;
-import javax.persistence.TypedQuery;
-import java.util.Collection;
-import java.util.List;
-
-public class StageDAO {
-
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-  @Inject
-  DaoUtils daoUtils;
-
-  @Transactional
-  public StageEntity findByPK(StageEntityPK stageEntityPK) {
-    return entityManagerProvider.get().find(StageEntity.class, stageEntityPK);
-  }
-
-  @Transactional
-  public long getLastRequestId() {
-    TypedQuery<Long> query = entityManagerProvider.get().createQuery("SELECT max(stage.requestId) FROM StageEntity stage", Long.class);
-    Long result = daoUtils.selectSingle(query);
-    if (result != null) {
-      return result;
-    } else {
-      return 0;
-    }
-  }
-
-  @Transactional
-  public StageEntity findByActionId(String actionId) {
-    long[] ids = StageUtils.getRequestStage(actionId);
-    StageEntityPK pk = new StageEntityPK();
-    pk.setRequestId(ids[0]);
-    pk.setStageId(ids[1]);
-    return findByPK(pk);
-  }
-
-  @Transactional
-  public List<StageEntity> findByRequestId(long requestId) {
-    TypedQuery<StageEntity> query = entityManagerProvider.get().createQuery("SELECT stage " +
-        "FROM StageEntity stage " +
-        "WHERE stage.requestId=?1 " +
-        "ORDER BY stage.stageId", StageEntity.class);
-    return daoUtils.selectList(query, requestId);
-  }
-
-  @Transactional
-  public List<StageEntity> findByCommandStatuses(Collection<HostRoleStatus> statuses) {
-//    TypedQuery<StageEntity> query = entityManagerProvider.get().createQuery("SELECT stage " +
-//        "FROM StageEntity stage JOIN stage.hostRoleCommands command " +
-//        "WHERE command.status IN ?1 " +
-//        "ORDER BY stage.requestId, stage.stageId", StageEntity.class);
-    TypedQuery<StageEntity> query = entityManagerProvider.get().createQuery("SELECT stage " +
-          "FROM StageEntity stage WHERE stage.stageId IN (SELECT hrce.stageId FROM " +
-          "HostRoleCommandEntity hrce WHERE stage.requestId = hrce.requestId and hrce.status IN ?1 ) " +
-          "ORDER BY stage.requestId, stage.stageId", StageEntity.class);
-    return daoUtils.selectList(query, statuses);
-  }
-
-  @Transactional
-  public void create(StageEntity stageEntity) {
-    entityManagerProvider.get().persist(stageEntity);
-  }
-
-  @Transactional
-  public StageEntity merge(StageEntity stageEntity) {
-    return entityManagerProvider.get().merge(stageEntity);
-  }
-
-  @Transactional
-  public void remove(StageEntity stageEntity) {
-    entityManagerProvider.get().remove(merge(stageEntity));
-  }
-
-  @Transactional
-  public void removeByPK(StageEntityPK stageEntityPK) {
-    remove(findByPK(stageEntityPK));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/UserDAO.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/UserDAO.java
deleted file mode 100644
index 220507c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/UserDAO.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.orm.dao;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.orm.entities.UserEntity;
-
-import javax.persistence.EntityManager;
-import javax.persistence.NoResultException;
-import javax.persistence.TypedQuery;
-import java.util.List;
-
-public class UserDAO {
-
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-  @Inject
-  DaoUtils daoUtils;
-
-  @Transactional
-  public UserEntity findByPK(Integer userPK) {
-    return entityManagerProvider.get().find(UserEntity.class, userPK);
-  }
-
-  @Transactional
-  public List<UserEntity> findAll() {
-    TypedQuery<UserEntity> query = entityManagerProvider.get().createQuery("SELECT user FROM UserEntity user", UserEntity.class);
-    return daoUtils.selectList(query);
-  }
-
-  @Transactional
-  public UserEntity findLocalUserByName(String userName) {
-    TypedQuery<UserEntity> query = entityManagerProvider.get().createNamedQuery("localUserByName", UserEntity.class);
-    query.setParameter("username", userName.toLowerCase());
-    try {
-      return query.getSingleResult();
-    } catch (NoResultException e) {
-      return null;
-    }
-  }
-
-  @Transactional
-  public UserEntity findLdapUserByName(String userName) {
-    TypedQuery<UserEntity> query = entityManagerProvider.get().createNamedQuery("ldapUserByName", UserEntity.class);
-    query.setParameter("username", userName.toLowerCase());
-    try {
-      return query.getSingleResult();
-    } catch (NoResultException e) {
-      return null;
-    }
-  }
-
-  @Transactional
-  public void create(UserEntity user) {
-    user.setUserName(user.getUserName().toLowerCase());
-    entityManagerProvider.get().persist(user);
-  }
-
-  @Transactional
-  public UserEntity merge(UserEntity user) {
-    user.setUserName(user.getUserName().toLowerCase());
-    return entityManagerProvider.get().merge(user);
-  }
-
-  @Transactional
-  public void remove(UserEntity user) {
-    entityManagerProvider.get().remove(merge(user));
-  }
-
-  @Transactional
-  public void removeByPK(Integer userPK) {
-    remove(findByPK(userPK));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
deleted file mode 100644
index bcbee2c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.*;
-import java.util.Collection;
-
-@IdClass(ClusterConfigEntityPK.class)
-@Table(name = "clusterconfig", schema = "ambari", catalog = "")
-@Entity
-public class ClusterConfigEntity {
-  private Long clusterId;
-  private String configJson;
-  private String type;
-  private String tag;
-  private long timestamp;
-  private Collection<HostComponentConfigMappingEntity> hostComponentConfigMappingEntities;
-  private Collection<ServiceConfigMappingEntity> serviceConfigMappingEntities;
-  private Collection<HostComponentDesiredConfigMappingEntity> hostComponentDesiredConfigMappingEntities;
-  private Collection<ComponentConfigMappingEntity> componentConfigMappingEntities;
-  
-  @Column(name = "cluster_id", nullable = false, insertable = false, updatable = false, length = 10)
-  @Id
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-  
-  @Column(name = "type_name")
-  @Id
-  public String getType() {
-    return type;
-  }
-  
-  public void setType(String typeName) {
-    type = typeName;
-  }
-  
-  @Column(name = "version_tag")
-  @Id
-  public String getTag() {
-    return tag;
-  }
-  
-  public void setTag(String versionTag) {
-    tag = versionTag;
-  }
-
-  @Column(name = "config_data", nullable = false, insertable = true, updatable = false, length=32000)
-  @Basic(fetch=FetchType.LAZY)
-  public String getData() {
-    return configJson;
-  }
-
-  public void setData(String data) {
-    this.configJson = data;
-  }
-  
-  @Column(name = "create_timestamp", nullable=false, insertable=true, updatable=false)
-  public long getTimestamp() {
-    return timestamp;
-  }
-  
-  public void setTimestamp(long stamp) {
-    timestamp = stamp;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ClusterConfigEntity that = (ClusterConfigEntity) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (configJson != null ? !configJson.equals(that.configJson) : that.configJson != null)
-      return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId !=null ? clusterId.intValue() : 0;
-    result = 31 * result + (configJson != null ? configJson.hashCode() : 0);
-    return result;
-  }
-
-  private ClusterEntity clusterEntity;
-
-  @ManyToOne
-  @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false)
-  public ClusterEntity getClusterEntity() {
-    return clusterEntity;
-  }
-
-  public void setClusterEntity(ClusterEntity clusterEntity) {
-    this.clusterEntity = clusterEntity;
-  }
-
-  @OneToMany(mappedBy = "clusterConfigEntity")
-  public Collection<HostComponentConfigMappingEntity> getHostComponentConfigMappingEntities() {
-    return hostComponentConfigMappingEntities;
-  }
-
-  public void setHostComponentConfigMappingEntities(Collection<HostComponentConfigMappingEntity> hostComponentConfigMappingEntities) {
-    this.hostComponentConfigMappingEntities = hostComponentConfigMappingEntities;
-  }
-
-  @OneToMany(mappedBy = "clusterConfigEntity")
-  public Collection<ServiceConfigMappingEntity> getServiceConfigMappingEntities() {
-    return serviceConfigMappingEntities;
-  }
-
-  public void setServiceConfigMappingEntities(Collection<ServiceConfigMappingEntity> serviceConfigMappingEntities) {
-    this.serviceConfigMappingEntities = serviceConfigMappingEntities;
-  }
-
-  @OneToMany(mappedBy = "clusterConfigEntity")
-  public Collection<HostComponentDesiredConfigMappingEntity> getHostComponentDesiredConfigMappingEntities() {
-    return hostComponentDesiredConfigMappingEntities;
-  }
-
-  public void setHostComponentDesiredConfigMappingEntities(Collection<HostComponentDesiredConfigMappingEntity> hostComponentDesiredConfigMappingEntities) {
-    this.hostComponentDesiredConfigMappingEntities = hostComponentDesiredConfigMappingEntities;
-  }
-
-  @OneToMany(mappedBy = "clusterConfigEntity")
-  public Collection<ComponentConfigMappingEntity> getComponentConfigMappingEntities() {
-    return componentConfigMappingEntities;
-  }
-
-  public void setComponentConfigMappingEntities(Collection<ComponentConfigMappingEntity> componentConfigMappingEntities) {
-    this.componentConfigMappingEntities = componentConfigMappingEntities;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntityPK.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntityPK.java
deleted file mode 100644
index aee7dd8..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntityPK.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Column;
-import javax.persistence.Id;
-import java.io.Serializable;
-
-@SuppressWarnings("serial")
-public class ClusterConfigEntityPK implements Serializable {
-  private Long clusterId;
-
-  @Id
-  @Column(name = "cluster_id", nullable = false, insertable = true, updatable = true, length = 10)
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  private String type;
-  @Id
-  @Column(name = "type_name", nullable = false, insertable = true, updatable = false)
-  public String getType() {
-    return type;
-  }
-
-  public void setType(String typeName) {
-    type = typeName;
-  }
-
-  private String tag;
-  @Id
-  @Column(name="version_tag", nullable = false, insertable = true, updatable = false)
-  public String getTag() {
-    return tag;
-  }
-
-  public void setTag(String configTag) {
-    tag = configTag;
-  }
-
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ClusterConfigEntityPK that = (ClusterConfigEntityPK) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (type != null ? !type.equals(that.type) : that.type != null) return false;
-    if (tag != null ? !tag.equals(that.tag) : that.tag != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId !=null ? clusterId.intValue() : 0;
-    result = 31 * result + (type != null ? type.hashCode() : 0);
-    result = 31 * result + (tag != null ? tag.hashCode() : 0);
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterEntity.java
deleted file mode 100644
index ac5e81d..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterEntity.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.*;
-import java.util.Collection;
-
-@javax.persistence.Table(name = "clusters", schema = "ambari", catalog = "")
-@NamedQueries({
-    @NamedQuery(name = "clusterByName", query =
-        "SELECT cluster " +
-            "FROM ClusterEntity cluster " +
-            "WHERE cluster.clusterName=:clusterName"),
-    @NamedQuery(name = "allClusters", query =
-        "SELECT clusters " +
-            "FROM ClusterEntity clusters")
-})
-@Entity
-@SequenceGenerator(name = "ambari.clusters_cluster_id_seq", allocationSize = 1)
-public class ClusterEntity {
-  private Long clusterId;
-
-  @javax.persistence.Column(name = "cluster_id", nullable = false, insertable = true, updatable = true)
-  @Id
-  @GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "ambari.clusters_cluster_id_seq")
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  private String clusterName;
-
-  @javax.persistence.Column(name = "cluster_name", nullable = false, insertable = true,
-          updatable = true, unique = true, length = 100)
-  @Basic
-  public String getClusterName() {
-    return clusterName;
-  }
-
-  public void setClusterName(String clusterName) {
-    this.clusterName = clusterName;
-  }
-
-  private String desiredClusterState = "";
-
-  @javax.persistence.Column(name = "desired_cluster_state", nullable = false, insertable = true, updatable = true)
-  @Basic
-  public String getDesiredClusterState() {
-    return desiredClusterState;
-  }
-
-  public void setDesiredClusterState(String desiredClusterState) {
-    this.desiredClusterState = desiredClusterState;
-  }
-
-  private String clusterInfo = "";
-
-  @javax.persistence.Column(name = "cluster_info", nullable = false, insertable = true, updatable = true)
-  @Basic
-  public String getClusterInfo() {
-    return clusterInfo;
-  }
-
-  public void setClusterInfo(String clusterInfo) {
-    this.clusterInfo = clusterInfo;
-  }
-
-  private String desiredStackVersion = "";
-
-  @javax.persistence.Column(name = "desired_stack_version", nullable = false, insertable = true, updatable = true)
-  @Basic
-  public String getDesiredStackVersion() {
-    return desiredStackVersion;
-  }
-
-  public void setDesiredStackVersion(String desiredStackVersion) {
-    this.desiredStackVersion = desiredStackVersion;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ClusterEntity that = (ClusterEntity) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (clusterInfo != null ? !clusterInfo.equals(that.clusterInfo) : that.clusterInfo != null) return false;
-    if (clusterName != null ? !clusterName.equals(that.clusterName) : that.clusterName != null) return false;
-    if (desiredClusterState != null ? !desiredClusterState.equals(that.desiredClusterState) : that.desiredClusterState != null)
-      return false;
-    if (desiredStackVersion != null ? !desiredStackVersion.equals(that.desiredStackVersion) : that.desiredStackVersion != null)
-      return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId != null ? clusterId.intValue() : 0;
-    result = 31 * result + (clusterName != null ? clusterName.hashCode() : 0);
-    result = 31 * result + (desiredClusterState != null ? desiredClusterState.hashCode() : 0);
-    result = 31 * result + (clusterInfo != null ? clusterInfo.hashCode() : 0);
-    result = 31 * result + (desiredStackVersion != null ? desiredStackVersion.hashCode() : 0);
-    return result;
-  }
-
-  private Collection<ClusterServiceEntity> clusterServiceEntities;
-
-  @OneToMany(mappedBy = "clusterEntity")
-  public Collection<ClusterServiceEntity> getClusterServiceEntities() {
-    return clusterServiceEntities;
-  }
-
-  public void setClusterServiceEntities(Collection<ClusterServiceEntity> clusterServiceEntities) {
-    this.clusterServiceEntities = clusterServiceEntities;
-  }
-
-  private ClusterStateEntity clusterStateEntity;
-
-  @OneToOne(mappedBy = "clusterEntity")
-  public ClusterStateEntity getClusterStateEntity() {
-    return clusterStateEntity;
-  }
-
-  public void setClusterStateEntity(ClusterStateEntity clusterStateEntity) {
-    this.clusterStateEntity = clusterStateEntity;
-  }
-
-  private Collection<HostEntity> hostEntities;
-
-  @ManyToMany(mappedBy = "clusterEntities")
-  public Collection<HostEntity> getHostEntities() {
-    return hostEntities;
-  }
-
-  public void setHostEntities(Collection<HostEntity> hostEntities) {
-    this.hostEntities = hostEntities;
-  }
-
-  private Collection<StageEntity> stages;
-
-  @OneToMany(mappedBy = "cluster", cascade = CascadeType.REMOVE)
-  public Collection<StageEntity> getStages() {
-    return stages;
-  }
-
-  public void setStages(Collection<StageEntity> stages) {
-    this.stages = stages;
-  }
-  
-  private Collection<ClusterConfigEntity> configEntities;
-  @OneToMany(mappedBy = "clusterEntity", cascade = CascadeType.ALL)
-  public Collection<ClusterConfigEntity> getClusterConfigEntities() {
-    return configEntities;
-  }
-  
-  public void setClusterConfigEntities(Collection<ClusterConfigEntity> entities) {
-    configEntities = entities;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntity.java
deleted file mode 100644
index 9d18c3d..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntity.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.*;
-import java.util.Collection;
-
-@javax.persistence.IdClass(ClusterServiceEntityPK.class)
-@javax.persistence.Table(name = "clusterservices", schema = "ambari", catalog = "")
-@NamedQueries({
-        @NamedQuery(name = "clusterServiceByClusterAndServiceNames", query =
-                "SELECT clusterService " +
-                        "FROM ClusterServiceEntity clusterService " +
-                        "JOIN clusterService.clusterEntity cluster " +
-                        "WHERE clusterService.serviceName=:serviceName AND cluster.clusterName=:clusterName")
-})
-@Entity
-public class ClusterServiceEntity {
-  private Long clusterId;
-
-  @javax.persistence.Column(name = "cluster_id", nullable = false, insertable = false, updatable = false, length = 10)
-  @Id
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  private String serviceName;
-
-  @javax.persistence.Column(name = "service_name", nullable = false, insertable = true, updatable = true)
-  @Id
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  private Integer serviceEnabled = 0;
-
-  @javax.persistence.Column(name = "service_enabled", nullable = false, insertable = true, updatable = true, length = 10)
-  @Basic
-  public int getServiceEnabled() {
-    return serviceEnabled;
-  }
-
-  public void setServiceEnabled(int serviceEnabled) {
-    this.serviceEnabled = serviceEnabled;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ClusterServiceEntity that = (ClusterServiceEntity) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (serviceEnabled != null ? !serviceEnabled.equals(that.serviceEnabled) : that.serviceEnabled != null)
-      return false;
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId !=null ? clusterId.intValue() : 0;
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 31 * result + serviceEnabled;
-    return result;
-  }
-
-  private ClusterEntity clusterEntity;
-
-  @ManyToOne
-  @javax.persistence.JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false)
-  public ClusterEntity getClusterEntity() {
-    return clusterEntity;
-  }
-
-  public void setClusterEntity(ClusterEntity clusterEntity) {
-    this.clusterEntity = clusterEntity;
-  }
-
-  private ServiceDesiredStateEntity serviceDesiredStateEntity;
-
-  @OneToOne(mappedBy = "clusterServiceEntity", cascade = CascadeType.ALL)
-  public ServiceDesiredStateEntity getServiceDesiredStateEntity() {
-    return serviceDesiredStateEntity;
-  }
-
-  public void setServiceDesiredStateEntity(ServiceDesiredStateEntity serviceDesiredStateEntity) {
-    this.serviceDesiredStateEntity = serviceDesiredStateEntity;
-  }
-
-  private Collection<ServiceComponentDesiredStateEntity> serviceComponentDesiredStateEntities;
-
-  @OneToMany(mappedBy = "clusterServiceEntity")
-  public Collection<ServiceComponentDesiredStateEntity> getServiceComponentDesiredStateEntities() {
-    return serviceComponentDesiredStateEntities;
-  }
-
-  public void setServiceComponentDesiredStateEntities(Collection<ServiceComponentDesiredStateEntity> serviceComponentDesiredStateEntities) {
-    this.serviceComponentDesiredStateEntities = serviceComponentDesiredStateEntities;
-  }
-
-  private Collection<ServiceConfigMappingEntity> serviceConfigMappings;
-  @OneToMany(mappedBy = "serviceEntity", cascade = CascadeType.ALL)
-  public Collection<ServiceConfigMappingEntity> getServiceConfigMappings() {
-    return serviceConfigMappings;
-  }
-
-  public void setServiceConfigMappings(Collection<ServiceConfigMappingEntity> entities) {
-    serviceConfigMappings = entities;
-  }
-
-
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntityPK.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntityPK.java
deleted file mode 100644
index e50374d..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterServiceEntityPK.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Column;
-import javax.persistence.Id;
-import java.io.Serializable;
-
-@SuppressWarnings("serial")
-public class ClusterServiceEntityPK implements Serializable {
-  private Long clusterId;
-
-  @Id
-  @Column(name = "cluster_id", nullable = false, insertable = true, updatable = true, length = 10)
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  private String serviceName;
-
-  @Id
-  @Column(name = "service_name", nullable = false, insertable = true, updatable = true)
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ClusterServiceEntityPK that = (ClusterServiceEntityPK) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId !=null ? clusterId.intValue() : 0;
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterStateEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterStateEntity.java
deleted file mode 100644
index 07429d3..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterStateEntity.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Basic;
-import javax.persistence.Entity;
-import javax.persistence.Id;
-import javax.persistence.OneToOne;
-
-@javax.persistence.Table(name = "clusterstate", schema = "ambari", catalog = "")
-@Entity
-public class ClusterStateEntity {
-  private Long clusterId;
-
-  @javax.persistence.Column(name = "cluster_id", nullable = false, insertable = false, updatable = false, length = 10)
-  @Id
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  private String currentClusterState = "";
-
-  @javax.persistence.Column(name = "current_cluster_state", nullable = false, insertable = true, updatable = true)
-  @Basic
-  public String getCurrentClusterState() {
-    return currentClusterState;
-  }
-
-  public void setCurrentClusterState(String currentClusterState) {
-    this.currentClusterState = currentClusterState;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ClusterStateEntity that = (ClusterStateEntity) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (currentClusterState != null ? !currentClusterState.equals(that.currentClusterState) : that.currentClusterState != null)
-      return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId !=null ? clusterId.intValue() : 0;
-    result = 31 * result + (currentClusterState != null ? currentClusterState.hashCode() : 0);
-    return result;
-  }
-
-  private ClusterEntity clusterEntity;
-
-  @OneToOne
-  @javax.persistence.JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false)
-  public ClusterEntity getClusterEntity() {
-    return clusterEntity;
-  }
-
-  public void setClusterEntity(ClusterEntity clusterEntity) {
-    this.clusterEntity = clusterEntity;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ComponentConfigMappingEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ComponentConfigMappingEntity.java
deleted file mode 100644
index 1ac96a0..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ComponentConfigMappingEntity.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.Id;
-import javax.persistence.IdClass;
-import javax.persistence.JoinColumn;
-import javax.persistence.JoinColumns;
-import javax.persistence.ManyToOne;
-import javax.persistence.Table;
-
-@IdClass(ComponentConfigMappingEntityPK.class)
-@Entity
-@Table(name = "componentconfigmapping", schema = "ambari", catalog = "")
-public class ComponentConfigMappingEntity {
-  private Long clusterId;
-  private String serviceName;
-  private String componentName;
-  private String configType;
-  private String configTag;
-  private Long timestamp;
-  private ServiceComponentDesiredStateEntity componentEntity;
-  private ClusterConfigEntity clusterConfigEntity;
-
-  @Column(name = "cluster_id", insertable = false, updatable = false, nullable = false)
-  @Id
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long id) {
-    clusterId = id;
-  }
-
-  @Column(name = "service_name", insertable = false, updatable = false, nullable = false)
-  @Id
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String name) {
-    serviceName = name;
-  }
-
-  @Column(name = "component_name", insertable = false, updatable = false, nullable = false)
-  @Id
-  public String getComponentName() {
-    return componentName;
-  }
-
-  public void setComponentName(String name) {
-    componentName = name;
-  }
-
-  @Column(name = "config_type", insertable = true, updatable = false, nullable = false)
-  @Id
-  public String getConfigType() {
-    return configType;
-  }
-
-  public void setConfigType(String type) {
-    configType = type;
-  }
-
-  @Column(name = "config_tag", nullable = false, insertable = true, updatable = true)
-  public String getVersionTag() {
-    return configTag;
-  }
-
-  public void setVersionTag(String tag) {
-    configTag = tag;
-  }
-
-  @Column(name="timestamp", nullable = false, insertable = true, updatable = true)
-  public Long getTimestamp() {
-    return timestamp;
-  }
-
-  public void setTimestamp(Long stamp) {
-    timestamp = stamp;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ComponentConfigMappingEntity that = (ComponentConfigMappingEntity) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
-    if (componentName != null ? !componentName.equals(that.componentName) : that.componentName != null) return false;
-    if (configType != null ? !configType.equals(that.configType) : that.configType != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId !=null ? clusterId.intValue() : 0;
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 31 * result + (componentName != null ? componentName.hashCode() : 0);
-    result = 31 * result + (configType != null ? configType.hashCode() : 0);
-    return result;
-  }
-
-
-  @ManyToOne
-  @JoinColumns({
-    @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false),
-    @JoinColumn(name = "service_name", referencedColumnName = "service_name", nullable = false),
-    @JoinColumn(name = "component_name", referencedColumnName = "component_name", nullable = false)
-  })
-  public ServiceComponentDesiredStateEntity getServiceComponentDesiredStateEntity() {
-    return componentEntity;
-  }
-
-  public void setServiceComponentDesiredStateEntity(ServiceComponentDesiredStateEntity entity) {
-    componentEntity = entity;
-  }
-
-  @ManyToOne
-  @JoinColumns({
-      @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false, insertable = false, updatable = false),
-      @JoinColumn(name = "config_type", referencedColumnName = "type_name", nullable = false, insertable = false, updatable = false),
-      @JoinColumn(name = "config_tag", referencedColumnName = "version_tag", nullable = false, insertable = false, updatable = false)
-  })
-  public ClusterConfigEntity getClusterConfigEntity() {
-    return clusterConfigEntity;
-  }
-
-  public void setClusterConfigEntity(ClusterConfigEntity clusterConfigEntity) {
-    this.clusterConfigEntity = clusterConfigEntity;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ComponentConfigMappingEntityPK.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ComponentConfigMappingEntityPK.java
deleted file mode 100644
index 9eeef26..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ComponentConfigMappingEntityPK.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.orm.entities;
-
-import java.io.Serializable;
-
-import javax.persistence.Column;
-import javax.persistence.Id;
-
-/**
- * @author ncole
- *
- */
-@SuppressWarnings("serial")
-public class ComponentConfigMappingEntityPK implements Serializable {
-  private Long clusterId;
-  private String serviceName;
-  private String componentName;
-  private String configType;
-
-  @Id
-  @Column(name = "cluster_id", insertable = true, updatable = true, nullable = false, length = 10)
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  @Id
-  @Column(name = "service_name", insertable = true, updatable = true, nullable = false)
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  @Column(name = "component_name", insertable = true, updatable = true, nullable = false)
-  @Id
-  public String getComponentName() {
-    return componentName;
-  }
-
-  public void setComponentName(String name) {
-    componentName = name;
-  }
-
-  @Column(name = "config_type", insertable = true, updatable = false, nullable = false)
-  @Id
-  public String getConfigType() {
-    return configType;
-  }
-
-  public void setConfigType(String type) {
-    configType = type;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ComponentConfigMappingEntityPK that = (ComponentConfigMappingEntityPK) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (componentName != null ? !componentName.equals(that.componentName) : that.componentName != null) return false;
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId != null ? clusterId.intValue() : 0;
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 31 * result + (componentName != null ? componentName.hashCode() : 0);
-    return result;
-  }
-
-
-
-
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExecutionCommandEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExecutionCommandEntity.java
deleted file mode 100644
index e8eaca5..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExecutionCommandEntity.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.*;
-import java.util.Arrays;
-
-@Table(name = "execution_command", schema = "ambari", catalog = "")
-@Entity
-public class ExecutionCommandEntity {
-  private Long taskId;
-
-  @Column(name = "task_id")
-  @Id
-  public Long getTaskId() {
-    return taskId;
-  }
-
-  public void setTaskId(Long taskId) {
-    this.taskId = taskId;
-  }
-
-  private byte[] command;
-
-  @Column(name = "command")
-  @Lob
-  @Basic
-  public byte[] getCommand() {
-    return command;
-  }
-
-  public void setCommand(byte[] command) {
-    this.command = command;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ExecutionCommandEntity that = (ExecutionCommandEntity) o;
-
-    if (command != null ? !Arrays.equals(command, that.command) : that.command != null) return false;
-    if (taskId != null ? !taskId.equals(that.taskId) : that.taskId != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = taskId != null ? taskId.hashCode() : 0;
-    result = 31 * result + (command != null ? Arrays.hashCode(command) : 0);
-    return result;
-  }
-
-  private HostRoleCommandEntity hostRoleCommand;
-
-  @OneToOne
-  @JoinColumn(name = "task_id", referencedColumnName = "task_id", nullable = false, insertable = false, updatable = false)
-  public HostRoleCommandEntity getHostRoleCommand() {
-    return hostRoleCommand;
-  }
-
-  public void setHostRoleCommand(HostRoleCommandEntity hostRoleCommandByTaskId) {
-    this.hostRoleCommand = hostRoleCommandByTaskId;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentConfigMappingEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentConfigMappingEntity.java
deleted file mode 100644
index df72439..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentConfigMappingEntity.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.Id;
-import javax.persistence.IdClass;
-import javax.persistence.JoinColumn;
-import javax.persistence.JoinColumns;
-import javax.persistence.ManyToOne;
-import javax.persistence.Table;
-
-@IdClass(HostComponentConfigMappingEntityPK.class)
-@Table(name = "hostcomponentconfigmapping", schema = "ambari", catalog = "")
-@Entity
-public class HostComponentConfigMappingEntity {
-  private Long clusterId;
-  private String serviceName;
-  private String componentName;
-  private String hostName;
-  private String configType;
-  private String configTag;
-  private Long timestamp;
-  private HostComponentStateEntity hostComponentStateEntity;
-  private ClusterConfigEntity clusterConfigEntity;
-
-  @Column(name = "cluster_id", insertable = false, updatable = false, nullable = false)
-  @Id
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long id) {
-    clusterId = id;
-  }
-
-  @Column(name = "service_name", insertable = false, updatable = false, nullable = false)
-  @Id
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String name) {
-    serviceName = name;
-  }
-
-  @Column(name = "component_name", insertable = false, updatable = false, nullable = false)
-  @Id
-  public String getComponentName() {
-    return componentName;
-  }
-
-  public void setComponentName(String name) {
-    componentName = name;
-  }
-
-  @Column(name = "host_name", insertable = false, updatable = false, nullable = false)
-  @Id
-  public String getHostName() {
-    return hostName;
-  }
-
-  public void setHostName(String name) {
-    hostName = name;
-  }
-
-  @Column(name = "config_type", insertable = true, updatable = false, nullable = false)
-  @Id
-  public String getConfigType() {
-    return configType;
-  }
-
-  public void setConfigType(String type) {
-    configType = type;
-  }
-
-  @Column(name = "config_tag", nullable = false, insertable = true, updatable = true)
-  public String getVersionTag() {
-    return configTag;
-  }
-
-  public void setVersionTag(String tag) {
-    configTag = tag;
-  }
-
-  @Column(name="timestamp", nullable = false, insertable = true, updatable = true)
-  public Long getTimestamp() {
-    return timestamp;
-  }
-
-  public void setTimestamp(Long stamp) {
-    timestamp = stamp;
-  }
-
-  @ManyToOne
-  @JoinColumns({
-    @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false),
-    @JoinColumn(name = "service_name", referencedColumnName = "service_name", nullable = false),
-    @JoinColumn(name = "component_name", referencedColumnName = "component_name", nullable = false),
-    @JoinColumn(name = "host_name", referencedColumnName = "host_name", nullable = false) })
-  public HostComponentStateEntity getHostComponentStateEntity() {
-    return hostComponentStateEntity;
-  }
-
-  public void setHostComponentStateEntity(HostComponentStateEntity entity) {
-    hostComponentStateEntity = entity;
-  }
-
-  @ManyToOne
-  @JoinColumns({
-      @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false, insertable = false, updatable = false),
-      @JoinColumn(name = "config_type", referencedColumnName = "type_name", nullable = false, insertable = false, updatable = false),
-      @JoinColumn(name = "config_tag", referencedColumnName = "version_tag", nullable = false, insertable = false, updatable = false)
-  })
-  public ClusterConfigEntity getClusterConfigEntity() {
-    return clusterConfigEntity;
-  }
-
-  public void setClusterConfigEntity(ClusterConfigEntity clusterConfigEntity) {
-    this.clusterConfigEntity = clusterConfigEntity;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    HostComponentConfigMappingEntity that = (HostComponentConfigMappingEntity) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
-    if (componentName != null ? !componentName.equals(that.componentName) : that.componentName != null) return false;
-    if (hostName != null ? !hostName.equals(that.hostName) : that.hostName != null) return false;
-    if (configType != null ? !configType.equals(that.configType) : that.configType != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId !=null ? clusterId.intValue() : 0;
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 31 * result + (componentName != null ? componentName.hashCode() : 0);
-    result = 31 * result + (hostName != null ? hostName.hashCode() : 0);
-    result = 31 * result + (configType != null ? configType.hashCode() : 0);
-    return result;
-  }
-
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentConfigMappingEntityPK.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentConfigMappingEntityPK.java
deleted file mode 100644
index a267039..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentConfigMappingEntityPK.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Column;
-import javax.persistence.Id;
-import java.io.Serializable;
-
-public class HostComponentConfigMappingEntityPK implements Serializable {
-  private Long clusterId;
-  private String serviceName;
-  private String componentName;
-  private String hostName;
-  private String configType;
-
-  @Id
-  @Column(name = "cluster_id", insertable = true, updatable = true, nullable = false, length = 10)
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  @Id
-  @Column(name = "service_name", insertable = true, updatable = true, nullable = false)
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  @Column(name = "component_name", insertable = true, updatable = true, nullable = false)
-  @Id
-  public String getComponentName() {
-    return componentName;
-  }
-
-  public void setComponentName(String name) {
-    componentName = name;
-  }
-
-  @Column(name = "host_name", insertable = true, updatable = true, nullable = false)
-  @Id
-  public String getHostName() {
-    return hostName;
-  }
-
-  public void setHostName(String name) {
-    hostName = name;
-  }
-
-  @Column(name = "config_type", insertable = true, updatable = false, nullable = false)
-  @Id
-  public String getConfigType() {
-    return configType;
-  }
-
-  public void setConfigType(String configType) {
-    this.configType = configType;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    HostComponentConfigMappingEntityPK that = (HostComponentConfigMappingEntityPK) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (componentName != null ? !componentName.equals(that.componentName) : that.componentName != null) return false;
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
-    if (hostName != null ? !hostName.equals(that.hostName) : that.hostName != null) return false;
-    if (configType != null ? !configType.equals(that.configType) : that.configType != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId != null ? clusterId.intValue() : 0;
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 31 * result + (componentName != null ? componentName.hashCode() : 0);
-    result = 31 * result + (hostName != null ? hostName.hashCode() : 0);
-    result = 31 * result + (configType != null ? configType.hashCode() : 0);
-    return result;
-  }
-
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredConfigMappingEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredConfigMappingEntity.java
deleted file mode 100644
index f308193..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredConfigMappingEntity.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.Id;
-import javax.persistence.IdClass;
-import javax.persistence.JoinColumn;
-import javax.persistence.JoinColumns;
-import javax.persistence.ManyToOne;
-import javax.persistence.Table;
-
-@IdClass(HostComponentDesiredConfigMappingEntityPK.class)
-@Table(name = "hostcomponentdesiredconfigmapping", schema = "ambari", catalog = "")
-@Entity
-public class HostComponentDesiredConfigMappingEntity {
-  private Long clusterId;
-  private String serviceName;
-  private String componentName;
-  private String hostName;
-  private String configType;
-  private String configTag;
-  private Long timestamp;
-  private HostComponentDesiredStateEntity hostComponentEntity;
-  private ClusterConfigEntity clusterConfigEntity;
-
-  @Column(name = "cluster_id", insertable = false, updatable = false, nullable = false)
-  @Id
-  public Long getClusterId() {
-    return clusterId;
-  }
-  
-  public void setClusterId(Long id) {
-    clusterId = id;
-  }
-  
-  @Column(name = "service_name", insertable = false, updatable = false, nullable = false)
-  @Id
-  public String getServiceName() {
-    return serviceName;
-  }
-  
-  public void setServiceName(String name) {
-    serviceName = name;
-  }
-  
-  @Column(name = "component_name", insertable = false, updatable = false, nullable = false)
-  @Id
-  public String getComponentName() {
-    return componentName;
-  }
-  
-  public void setComponentName(String name) {
-    componentName = name;
-  }
-  
-  @Column(name = "host_name", insertable = false, updatable = false, nullable = false)
-  @Id
-  public String getHostName() {
-    return hostName;
-  }
-  
-  public void setHostName(String name) {
-    hostName = name;
-  }
-
-  @Column(name = "config_type", insertable = true, updatable = false, nullable = false)
-  @Id
-  public String getConfigType() {
-    return configType;
-  }
-  
-  public void setConfigType(String type) {
-    configType = type;
-  }
-  
-  @Column(name = "config_tag", nullable = false, insertable = true, updatable = true)
-  public String getVersionTag() {
-    return configTag;
-  }
-  
-  public void setVersionTag(String tag) {
-    configTag = tag;
-  }
-  
-  @Column(name="timestamp", nullable = false, insertable = true, updatable = true)
-  public Long getTimestamp() {
-    return timestamp;
-  }
-  
-  public void setTimestamp(Long stamp) {
-    timestamp = stamp;
-  }
-  
-  @ManyToOne
-  @JoinColumns({
-    @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false),
-    @JoinColumn(name = "service_name", referencedColumnName = "service_name", nullable = false),
-    @JoinColumn(name = "component_name", referencedColumnName = "component_name", nullable = false),
-    @JoinColumn(name = "host_name", referencedColumnName = "host_name", nullable = false) })
-  public HostComponentDesiredStateEntity getHostComponentDesiredStateEntity() {
-    return hostComponentEntity;
-  }
-  
-  public void setHostComponentDesiredStateEntity(HostComponentDesiredStateEntity entity) {
-    hostComponentEntity = entity;
-  }
-
-  @ManyToOne
-  @JoinColumns({
-      @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false, insertable = false, updatable = false),
-      @JoinColumn(name = "config_type", referencedColumnName = "type_name", nullable = false, insertable = false, updatable = false),
-      @JoinColumn(name = "config_tag", referencedColumnName = "version_tag", nullable = false, insertable = false, updatable = false)
-  })
-  public ClusterConfigEntity getClusterConfigEntity() {
-    return clusterConfigEntity;
-  }
-
-  public void setClusterConfigEntity(ClusterConfigEntity clusterConfigEntity) {
-    this.clusterConfigEntity = clusterConfigEntity;
-  }
-
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    HostComponentDesiredConfigMappingEntity that = (HostComponentDesiredConfigMappingEntity) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
-    if (componentName != null ? !componentName.equals(that.componentName) : that.componentName != null) return false;
-    if (hostName != null ? !hostName.equals(that.hostName) : that.hostName != null) return false;
-    if (configType != null ? !configType.equals(that.configType) : that.configType != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId !=null ? clusterId.intValue() : 0;
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 31 * result + (componentName != null ? componentName.hashCode() : 0);
-    result = 31 * result + (hostName != null ? hostName.hashCode() : 0);
-    result = 31 * result + (configType != null ? configType.hashCode() : 0);
-    return result;
-  }  
-  
-  
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredConfigMappingEntityPK.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredConfigMappingEntityPK.java
deleted file mode 100644
index 165f466..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredConfigMappingEntityPK.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Column;
-import javax.persistence.Id;
-import java.io.Serializable;
-
-public class HostComponentDesiredConfigMappingEntityPK implements Serializable {
-  private Long clusterId;
-  private String serviceName;
-  private String componentName;
-  private String hostName;
-  private String configType;
-
-  @Id
-  @Column(name = "cluster_id", insertable = true, updatable = true, nullable = false, length = 10)
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  @Id
-  @Column(name = "service_name", insertable = true, updatable = true, nullable = false)
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  @Column(name = "component_name", insertable = true, updatable = true, nullable = false)
-  @Id
-  public String getComponentName() {
-    return componentName;
-  }
-
-  public void setComponentName(String name) {
-    componentName = name;
-  }
-
-  @Column(name = "host_name", insertable = true, updatable = true, nullable = false)
-  @Id
-  public String getHostName() {
-    return hostName;
-  }
-
-  public void setHostName(String name) {
-    hostName = name;
-  }
-
-  @Column(name = "config_type", insertable = true, updatable = false, nullable = false)
-  @Id
-  public String getConfigType() {
-    return configType;
-  }
-
-  public void setConfigType(String configType) {
-    this.configType = configType;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    HostComponentDesiredConfigMappingEntityPK that = (HostComponentDesiredConfigMappingEntityPK) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (componentName != null ? !componentName.equals(that.componentName) : that.componentName != null) return false;
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
-    if (hostName != null ? !hostName.equals(that.hostName) : that.hostName != null) return false;
-    if (configType != null ? !configType.equals(that.configType) : that.configType != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId != null ? clusterId.intValue() : 0;
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 31 * result + (componentName != null ? componentName.hashCode() : 0);
-    result = 31 * result + (hostName != null ? hostName.hashCode() : 0);
-    result = 31 * result + (configType != null ? configType.hashCode() : 0);
-    return result;
-  }
-
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
deleted file mode 100644
index b9453b7..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.orm.entities;
-
-import java.util.Collection;
-
-import javax.persistence.*;
-
-import org.apache.ambari.server.state.State;
-
-@javax.persistence.IdClass(HostComponentDesiredStateEntityPK.class)
-@javax.persistence.Table(name = "hostcomponentdesiredstate", schema = "ambari", catalog = "")
-@Entity
-public class HostComponentDesiredStateEntity {
-  private Long clusterId;
-
-  @javax.persistence.Column(name = "cluster_id", nullable = false, insertable = false, updatable = false, length = 10)
-  @Id
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  private String serviceName;
-
-  @javax.persistence.Column(name = "service_name", nullable = false, insertable = false, updatable = false)
-  @Id
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  private String hostName = "";
-
-  @javax.persistence.Column(name = "host_name", nullable = false, insertable = false, updatable = false)
-  @Id
-  public String getHostName() {
-    return hostName;
-  }
-
-  public void setHostName(String hostName) {
-    this.hostName = hostName;
-  }
-
-  private String componentName = "";
-
-  @javax.persistence.Column(name = "component_name", nullable = false, insertable = false, updatable = false)
-  @Id
-  public String getComponentName() {
-    return componentName;
-  }
-
-  public void setComponentName(String componentName) {
-    this.componentName = componentName;
-  }
-
-  private State desiredState = State.INIT;
-
-  @javax.persistence.Column(name = "desired_state", nullable = false, insertable = true, updatable = true)
-  @Enumerated(value = EnumType.STRING)
-  @Basic
-  public State getDesiredState() {
-    return desiredState;
-  }
-
-  public void setDesiredState(State desiredState) {
-    this.desiredState = desiredState;
-  }
-
-  private String desiredStackVersion = "";
-
-  @javax.persistence.Column(name = "desired_stack_version", nullable = false, insertable = true, updatable = true)
-  @Basic
-  public String getDesiredStackVersion() {
-    return desiredStackVersion;
-  }
-
-  public void setDesiredStackVersion(String desiredStackVersion) {
-    this.desiredStackVersion = desiredStackVersion;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    HostComponentDesiredStateEntity that = (HostComponentDesiredStateEntity) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (componentName != null ? !componentName.equals(that.componentName) : that.componentName != null) return false;
-    if (desiredStackVersion != null ? !desiredStackVersion.equals(that.desiredStackVersion) : that.desiredStackVersion != null)
-      return false;
-    if (desiredState != null ? !desiredState.equals(that.desiredState) : that.desiredState != null) return false;
-    if (hostName != null ? !hostName.equals(that.hostName) : that.hostName != null) return false;
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId != null ? clusterId.intValue() : 0;
-    result = 31 * result + (hostName != null ? hostName.hashCode() : 0);
-    result = 31 * result + (componentName != null ? componentName.hashCode() : 0);
-    result = 31 * result + (desiredState != null ? desiredState.hashCode() : 0);
-    result = 31 * result + (desiredStackVersion != null ? desiredStackVersion.hashCode() : 0);
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    return result;
-  }
-
-  private ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity;
-
-  @ManyToOne
-  @JoinColumns({
-      @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false),
-      @JoinColumn(name = "service_name", referencedColumnName = "service_name", nullable = false),
-      @JoinColumn(name = "component_name", referencedColumnName = "component_name", nullable = false)})
-  public ServiceComponentDesiredStateEntity getServiceComponentDesiredStateEntity() {
-    return serviceComponentDesiredStateEntity;
-  }
-
-  public void setServiceComponentDesiredStateEntity(ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity) {
-    this.serviceComponentDesiredStateEntity = serviceComponentDesiredStateEntity;
-  }
-
-  private HostEntity hostEntity;
-
-  @ManyToOne
-  @javax.persistence.JoinColumn(name = "host_name", referencedColumnName = "host_name", nullable = false)
-  public HostEntity getHostEntity() {
-    return hostEntity;
-  }
-
-  public void setHostEntity(HostEntity hostEntity) {
-    this.hostEntity = hostEntity;
-  }
-
-  private Collection<HostComponentDesiredConfigMappingEntity> desiredConfigMappingEntities;
-  @OneToMany(mappedBy = "hostComponentDesiredStateEntity", cascade = CascadeType.ALL)
-  public Collection<HostComponentDesiredConfigMappingEntity> getHostComponentDesiredConfigMappingEntities() {
-    return desiredConfigMappingEntities;
-  }
-
-  public void setHostComponentDesiredConfigMappingEntities(Collection<HostComponentDesiredConfigMappingEntity> entities) {
-    desiredConfigMappingEntities = entities;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntityPK.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntityPK.java
deleted file mode 100644
index fc92858..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntityPK.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Column;
-import javax.persistence.Id;
-import java.io.Serializable;
-
-@SuppressWarnings("serial")
-public class HostComponentDesiredStateEntityPK implements Serializable {
-  private Long clusterId;
-
-  @Id
-  @Column(name = "cluster_id", nullable = false, insertable = true, updatable = true, length = 10)
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  private String serviceName;
-
-  @Id
-  @Column(name = "service_name", nullable = false, insertable = true, updatable = true)
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  private String hostName;
-
-  @Id
-  @Column(name = "host_name", nullable = false, insertable = true, updatable = true)
-  public String getHostName() {
-    return hostName;
-  }
-
-  public void setHostName(String hostName) {
-    this.hostName = hostName;
-  }
-
-  private String componentName;
-
-  @Id
-  @Column(name = "component_name", nullable = false, insertable = true, updatable = true)
-  public String getComponentName() {
-    return componentName;
-  }
-
-  public void setComponentName(String componentName) {
-    this.componentName = componentName;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    HostComponentDesiredStateEntityPK that = (HostComponentDesiredStateEntityPK) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (componentName != null ? !componentName.equals(that.componentName) : that.componentName != null) return false;
-    if (hostName != null ? !hostName.equals(that.hostName) : that.hostName != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId !=null ? clusterId.intValue() : 0;
-    result = 31 * result + (hostName != null ? hostName.hashCode() : 0);
-    result = 31 * result + (componentName != null ? componentName.hashCode() : 0);
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java
deleted file mode 100644
index 8eaf827..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import java.util.Collection;
-
-import org.apache.ambari.server.state.State;
-
-import javax.persistence.*;
-
-@javax.persistence.IdClass(HostComponentStateEntityPK.class)
-@javax.persistence.Table(name = "hostcomponentstate", schema = "ambari", catalog = "")
-@Entity
-public class HostComponentStateEntity {
-  private Long clusterId;
-
-  @javax.persistence.Column(name = "cluster_id", nullable = false, insertable = false, updatable = false, length = 10)
-  @Id
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  private String serviceName;
-
-  @javax.persistence.Column(name = "service_name", nullable = false, insertable = false, updatable = false)
-  @Id
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  private String hostName = "";
-
-  @javax.persistence.Column(name = "host_name", nullable = false, insertable = false, updatable = false)
-  @Id
-  public String getHostName() {
-    return hostName;
-  }
-
-  public void setHostName(String hostName) {
-    this.hostName = hostName;
-  }
-
-  private String componentName;
-
-  @javax.persistence.Column(name = "component_name", nullable = false, insertable = false, updatable = false)
-  @Id
-  public String getComponentName() {
-    return componentName;
-  }
-
-  public void setComponentName(String componentName) {
-    this.componentName = componentName;
-  }
-
-  private State currentState = State.INIT;
-
-  @javax.persistence.Column(name = "current_state", nullable = false, insertable = true, updatable = true)
-  @Enumerated(value = EnumType.STRING)
-  public State getCurrentState() {
-    return currentState;
-  }
-
-  public void setCurrentState(State currentState) {
-    this.currentState = currentState;
-  }
-
-  private String currentStackVersion;
-
-  @javax.persistence.Column(name = "current_stack_version", nullable = false, insertable = true, updatable = true)
-  @Basic
-  public String getCurrentStackVersion() {
-    return currentStackVersion;
-  }
-
-  public void setCurrentStackVersion(String currentStackVersion) {
-    this.currentStackVersion = currentStackVersion;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    HostComponentStateEntity that = (HostComponentStateEntity) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (componentName != null ? !componentName.equals(that.componentName) : that.componentName != null) return false;
-    if (currentStackVersion != null ? !currentStackVersion.equals(that.currentStackVersion) : that.currentStackVersion != null)
-      return false;
-    if (currentState != null ? !currentState.equals(that.currentState) : that.currentState != null) return false;
-    if (hostName != null ? !hostName.equals(that.hostName) : that.hostName != null) return false;
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId != null ? clusterId.intValue() : 0;
-    result = 31 * result + (hostName != null ? hostName.hashCode() : 0);
-    result = 31 * result + (componentName != null ? componentName.hashCode() : 0);
-    result = 31 * result + (currentState != null ? currentState.hashCode() : 0);
-    result = 31 * result + (currentStackVersion != null ? currentStackVersion.hashCode() : 0);
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    return result;
-  }
-
-  private ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity;
-
-  @ManyToOne
-  @JoinColumns({
-      @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false),
-      @JoinColumn(name = "service_name", referencedColumnName = "service_name", nullable = false),
-      @JoinColumn(name = "component_name", referencedColumnName = "component_name", nullable = false)})
-  public ServiceComponentDesiredStateEntity getServiceComponentDesiredStateEntity() {
-    return serviceComponentDesiredStateEntity;
-  }
-
-  public void setServiceComponentDesiredStateEntity(ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity) {
-    this.serviceComponentDesiredStateEntity = serviceComponentDesiredStateEntity;
-  }
-
-  private HostEntity hostEntity;
-
-  @ManyToOne
-  @JoinColumn(name = "host_name", referencedColumnName = "host_name", nullable = false)
-  public HostEntity getHostEntity() {
-    return hostEntity;
-  }
-
-  public void setHostEntity(HostEntity hostEntity) {
-    this.hostEntity = hostEntity;
-  }
-
-  private Collection<HostComponentConfigMappingEntity> configMappingEntities;
-  @OneToMany(mappedBy = "hostComponentStateEntity", cascade = CascadeType.ALL)
-   public Collection<HostComponentConfigMappingEntity> getHostComponentConfigMappingEntities() {
-    return configMappingEntities;
-  }
-
-  public void setHostComponentConfigMappingEntities(Collection<HostComponentConfigMappingEntity> entities) {
-    configMappingEntities = entities;
-  }
-
-
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntityPK.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntityPK.java
deleted file mode 100644
index bc103a1..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntityPK.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Column;
-import javax.persistence.Id;
-import java.io.Serializable;
-
-@SuppressWarnings("serial")
-public class HostComponentStateEntityPK implements Serializable {
-  private Long clusterId;
-
-  @Id
-  @Column(name = "cluster_id", nullable = false, insertable = true, updatable = true, length = 10)
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  private String serviceName;
-
-  @Id
-  @Column(name = "service_name", nullable = false, insertable = true, updatable = true)
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  private String hostName;
-
-  @Id
-  @Column(name = "host_name", nullable = false, insertable = true, updatable = true)
-  public String getHostName() {
-    return hostName;
-  }
-
-  public void setHostName(String hostName) {
-    this.hostName = hostName;
-  }
-
-  private String componentName;
-
-  @Id
-  @Column(name = "component_name", nullable = false, insertable = true, updatable = true)
-  public String getComponentName() {
-    return componentName;
-  }
-
-  public void setComponentName(String componentName) {
-    this.componentName = componentName;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    HostComponentStateEntityPK that = (HostComponentStateEntityPK) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (componentName != null ? !componentName.equals(that.componentName) : that.componentName != null) return false;
-    if (hostName != null ? !hostName.equals(that.hostName) : that.hostName != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId !=null ? clusterId.intValue() : 0;
-    result = 31 * result + (hostName != null ? hostName.hashCode() : 0);
-    result = 31 * result + (componentName != null ? componentName.hashCode() : 0);
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostEntity.java
deleted file mode 100644
index bae7f03..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostEntity.java
+++ /dev/null
@@ -1,336 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.*;
-import java.util.Collection;
-
-@javax.persistence.Table(name = "hosts", schema = "ambari", catalog = "")
-@Entity
-public class HostEntity {
-  private String hostName;
-
-  @javax.persistence.Column(name = "host_name", nullable = false, insertable = true, updatable = true)
-  @Id
-  public String getHostName() {
-    return hostName;
-  }
-
-  public void setHostName(String hostName) {
-    this.hostName = hostName;
-  }
-
-  private String ipv4;
-
-  @javax.persistence.Column(name = "ipv4", nullable = true, insertable = true, updatable = true)
-  @Basic
-  public String getIpv4() {
-    return ipv4;
-  }
-
-  public void setIpv4(String ipv4) {
-    this.ipv4 = ipv4;
-  }
-
-  private String ipv6;
-
-  @javax.persistence.Column(name = "ipv6", nullable = true, insertable = true, updatable = true)
-  @Basic
-  public String getIpv6() {
-    return ipv6;
-  }
-
-  public void setIpv6(String ipv6) {
-    this.ipv6 = ipv6;
-  }
-  
-  private String publicHostName;
-  @Column(name="public_host_name", nullable = true, insertable = true, updatable = true)
-  @Basic
-  public String getPublicHostName() {
-    return publicHostName;
-  }
-  
-  public void setPublicHostName(String name) {
-    publicHostName = name;
-  }
-
-  private Long totalMem = 0L;
-
-  @javax.persistence.Column(name = "total_mem", nullable = false, insertable = true, updatable = true, length = 10)
-  @Basic
-  public Long getTotalMem() {
-    return totalMem;
-  }
-
-  public void setTotalMem(Long totalMem) {
-    this.totalMem = totalMem;
-  }
-
-  private Integer cpuCount = 0;
-
-  @javax.persistence.Column(name = "cpu_count", nullable = false, insertable = true, updatable = true, length = 10)
-  @Basic
-  public Integer getCpuCount() {
-    return cpuCount;
-  }
-
-  public void setCpuCount(Integer cpuCount) {
-    this.cpuCount = cpuCount;
-  }
-
-  private String cpuInfo = "";
-
-  @javax.persistence.Column(name = "cpu_info", nullable = false, insertable = true, updatable = true)
-  @Basic
-  public String getCpuInfo() {
-    return cpuInfo;
-  }
-
-  public void setCpuInfo(String cpuInfo) {
-    this.cpuInfo = cpuInfo;
-  }
-
-  private String osArch = "";
-
-  @javax.persistence.Column(name = "os_arch", nullable = false, insertable = true, updatable = true)
-  @Basic
-  public String getOsArch() {
-    return osArch;
-  }
-
-  public void setOsArch(String osArch) {
-    this.osArch = osArch;
-  }
-
-  private String disksInfo = "";
-
-  @javax.persistence.Column(name = "disks_info", nullable = false, insertable = true,
-		  updatable = true, length = 2000)
-  @Basic
-  public String getDisksInfo() {
-    return disksInfo;
-  }
-
-  public void setDisksInfo(String disksInfo) {
-    this.disksInfo = disksInfo;
-  }
-
-  private String osInfo = "";
-
-  @javax.persistence.Column(name = "os_info", nullable = false, insertable = true, updatable = true,
-      length = 1000)
-  @Basic
-  public String getOsInfo() {
-    return osInfo;
-  }
-
-  public void setOsInfo(String osInfo) {
-    this.osInfo = osInfo;
-  }
-
-  private String osType = "";
-
-  @javax.persistence.Column(name = "os_type", nullable = false, insertable = true, updatable = true)
-  @Basic
-  public String getOsType() {
-    return osType;
-  }
-
-  public void setOsType(String osType) {
-    this.osType = osType;
-  }
-
-  private String discoveryStatus = "";
-
-  @javax.persistence.Column(name = "discovery_status", nullable = false, insertable = true, updatable = true,
-      length = 2000)
-  @Basic
-  public String getDiscoveryStatus() {
-    return discoveryStatus;
-  }
-
-  public void setDiscoveryStatus(String discoveryStatus) {
-    this.discoveryStatus = discoveryStatus;
-  }
-
-  private Long lastRegistrationTime = 0L;
-
-  @javax.persistence.Column(name = "last_registration_time", nullable = false, insertable = true, updatable = true, length = 10)
-  @Basic
-  public Long getLastRegistrationTime() {
-    return lastRegistrationTime;
-  }
-
-  public void setLastRegistrationTime(Long lastRegistrationTime) {
-    this.lastRegistrationTime = lastRegistrationTime;
-  }
-
-  private String rackInfo = "/default-rack";
-
-  @javax.persistence.Column(name = "rack_info", nullable = false, insertable = true, updatable = true)
-  @Basic
-  public String getRackInfo() {
-    return rackInfo;
-  }
-
-  public void setRackInfo(String rackInfo) {
-    this.rackInfo = rackInfo;
-  }
-
-  private String hostAttributes = "";
-
-  @javax.persistence.Column(name = "host_attributes", nullable = false, insertable = true, updatable = true,
-      length = 20000)
-  @Basic
-  public String getHostAttributes() {
-    return hostAttributes;
-  }
-
-  public void setHostAttributes(String hostAttributes) {
-    this.hostAttributes = hostAttributes;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    HostEntity that = (HostEntity) o;
-
-    if (cpuCount != null ? !cpuCount.equals(that.cpuCount) : that.cpuCount != null) return false;
-    if (lastRegistrationTime != null ? !lastRegistrationTime.equals(that.lastRegistrationTime) : that.lastRegistrationTime != null) return false;
-    if (totalMem != null ? !totalMem.equals(that.totalMem) : that.totalMem != null) return false;
-    if (cpuInfo != null ? !cpuInfo.equals(that.cpuInfo) : that.cpuInfo != null) return false;
-    if (discoveryStatus != null ? !discoveryStatus.equals(that.discoveryStatus) : that.discoveryStatus != null)
-      return false;
-    if (disksInfo != null ? !disksInfo.equals(that.disksInfo) : that.disksInfo != null) return false;
-    if (hostAttributes != null ? !hostAttributes.equals(that.hostAttributes) : that.hostAttributes != null)
-      return false;
-    if (hostName != null ? !hostName.equals(that.hostName) : that.hostName != null) return false;
-    if (ipv4 != null ? !ipv4.equals(that.ipv4) : that.ipv4 != null) return false;
-    if (osArch != null ? !osArch.equals(that.osArch) : that.osArch != null) return false;
-    if (osInfo != null ? !osInfo.equals(that.osInfo) : that.osInfo != null) return false;
-    if (osType != null ? !osType.equals(that.osType) : that.osType != null) return false;
-    if (rackInfo != null ? !rackInfo.equals(that.rackInfo) : that.rackInfo != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = hostName != null ? hostName.hashCode() : 0;
-    result = 31 * result + (ipv4 != null ? ipv4.hashCode() : 0);
-    result = 31 * result + (totalMem != null ? totalMem.intValue() : 0);
-    result = 31 * result + cpuCount;
-    result = 31 * result + (cpuInfo != null ? cpuInfo.hashCode() : 0);
-    result = 31 * result + (osArch != null ? osArch.hashCode() : 0);
-    result = 31 * result + (disksInfo != null ? disksInfo.hashCode() : 0);
-    result = 31 * result + (osInfo != null ? osInfo.hashCode() : 0);
-    result = 31 * result + (osType != null ? osType.hashCode() : 0);
-    result = 31 * result + (discoveryStatus != null ? discoveryStatus.hashCode() : 0);
-    result = 31 * result + (lastRegistrationTime != null ? lastRegistrationTime.intValue() : 0);
-    result = 31 * result + (rackInfo != null ? rackInfo.hashCode() : 0);
-    result = 31 * result + (hostAttributes != null ? hostAttributes.hashCode() : 0);
-    return result;
-  }
-
-  private Collection<HostComponentDesiredStateEntity> hostComponentDesiredStateEntities;
-
-  @OneToMany(mappedBy = "hostEntity")
-  public Collection<HostComponentDesiredStateEntity> getHostComponentDesiredStateEntities() {
-    return hostComponentDesiredStateEntities;
-  }
-
-  public void setHostComponentDesiredStateEntities(Collection<HostComponentDesiredStateEntity> hostComponentDesiredStateEntities) {
-    this.hostComponentDesiredStateEntities = hostComponentDesiredStateEntities;
-  }
-
-  private Collection<HostComponentStateEntity> hostComponentStateEntities;
-
-  @OneToMany(mappedBy = "hostEntity")
-  public Collection<HostComponentStateEntity> getHostComponentStateEntities() {
-    return hostComponentStateEntities;
-  }
-
-  public void setHostComponentStateEntities(Collection<HostComponentStateEntity> hostComponentStateEntities) {
-    this.hostComponentStateEntities = hostComponentStateEntities;
-  }
-
-  private Collection<ClusterEntity> clusterEntities;
-
-  @ManyToMany
-//  @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id")
-  @JoinTable(name = "ClusterHostMapping", catalog = "", schema = "ambari",
-          joinColumns = {@JoinColumn(name = "host_name", referencedColumnName = "host_name")},
-          inverseJoinColumns = {@JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id")}
-
-  )
-  public Collection<ClusterEntity> getClusterEntities() {
-    return clusterEntities;
-  }
-
-  public void setClusterEntities(Collection<ClusterEntity> clusterEntities) {
-    this.clusterEntities = clusterEntities;
-  }
-
-  private HostStateEntity hostStateEntity;
-
-  @OneToOne(mappedBy = "hostEntity")
-  public HostStateEntity getHostStateEntity() {
-    return hostStateEntity;
-  }
-
-  public void setHostStateEntity(HostStateEntity hostStateEntity) {
-    this.hostStateEntity = hostStateEntity;
-  }
-
-  private Collection<HostRoleCommandEntity> hostRoleCommandEntities;
-
-  @OneToMany(mappedBy = "host")
-  public Collection<HostRoleCommandEntity> getHostRoleCommandEntities() {
-    return hostRoleCommandEntities;
-  }
-
-  public void setHostRoleCommandEntities(Collection<HostRoleCommandEntity> hostRoleCommandEntities) {
-    this.hostRoleCommandEntities = hostRoleCommandEntities;
-  }
-
-  //  private Collection<ServiceComponentStateEntity> serviceComponentStateEntities;
-//
-//  @OneToMany(mappedBy = "hostEntity")
-//  public Collection<ServiceComponentStateEntity> getServiceComponentStateEntities() {
-//    return serviceComponentStateEntities;
-//  }
-//
-//  public void setServiceComponentStateEntities(Collection<ServiceComponentStateEntity> serviceComponentStateEntities) {
-//    this.serviceComponentStateEntities = serviceComponentStateEntities;
-//  }
-
-//  private Collection<ServiceStateEntity> serviceStateEntities;
-//
-//  @OneToMany(mappedBy = "hostEntity")
-//  public Collection<ServiceStateEntity> getServiceStateEntities() {
-//    return serviceStateEntities;
-//  }
-//
-//  public void setServiceStateEntities(Collection<ServiceStateEntity> serviceStateEntities) {
-//    this.serviceStateEntities = serviceStateEntities;
-//  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java
deleted file mode 100644
index 4dfeb5e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostRoleCommandEntity.java
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.RoleCommand;
-import org.apache.ambari.server.actionmanager.HostRoleStatus;
-
-import javax.persistence.*;
-import java.util.Arrays;
-
-@Table(name = "host_role_command", schema = "ambari", catalog = "")
-@Entity
-@Cacheable(false)
-@SequenceGenerator(name = "ambari.host_role_command_task_id_seq", allocationSize = 1)
-public class HostRoleCommandEntity {
-  private Long taskId;
-
-  @Column(name = "task_id")
-  @Id
-  @GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "ambari.host_role_command_task_id_seq")
-  public Long getTaskId() {
-    return taskId;
-  }
-
-  public void setTaskId(Long taskId) {
-    this.taskId = taskId;
-  }
-
-  private Long requestId;
-
-  @Column(name = "request_id", insertable = false, updatable = false, nullable = false)
-  @Basic
-  public Long getRequestId() {
-    return requestId;
-  }
-
-  public void setRequestId(Long requestId) {
-    this.requestId = requestId;
-  }
-
-  private Long stageId;
-
-  @Column(name = "stage_id", insertable = false, updatable = false, nullable = false)
-  @Basic
-  public Long getStageId() {
-    return stageId;
-  }
-
-  public void setStageId(Long stageId) {
-    this.stageId = stageId;
-  }
-
-  private String hostName;
-
-  @Column(name = "host_name", insertable = false, updatable = false, nullable = false)
-  @Basic
-  public String getHostName() {
-    return hostName;
-  }
-
-  public void setHostName(String hostName) {
-    this.hostName = hostName;
-  }
-
-  private Role role;
-
-  @Column(name = "role")
-  @Enumerated(EnumType.STRING)
-  public Role getRole() {
-    return role;
-  }
-
-  public void setRole(Role role) {
-    this.role = role;
-  }
-
-  private String event = "";
-
-  @Column(name = "event", nullable = false, length = 32000)
-  @Basic
-  public String getEvent() {
-    return event;
-  }
-
-  public void setEvent(String event) {
-    this.event = event;
-  }
-
-  private Integer exitcode = 0;
-
-  @Column(name = "exitcode", nullable = false)
-  @Basic
-  public Integer getExitcode() {
-    return exitcode;
-  }
-
-  public void setExitcode(Integer exitcode) {
-    this.exitcode = exitcode;
-  }
-
-  private HostRoleStatus status;
-
-  @Column(name = "status")
-  @Enumerated(EnumType.STRING)
-  public HostRoleStatus getStatus() {
-    return status;
-  }
-
-  public void setStatus(HostRoleStatus status) {
-    this.status = status;
-  }
-
-  private byte[] stdError = new byte[0];
-
-  @Column(name = "std_error", nullable = false)
-  @Lob
-  @Basic
-  public byte[] getStdError() {
-    return stdError;
-  }
-
-  public void setStdError(byte[] stdError) {
-    this.stdError = stdError;
-  }
-
-  private byte[] stdOut = new byte[0];
-
-  @Column(name = "std_out", nullable = false)
-  @Lob
-  @Basic
-  public byte[] getStdOut() {
-    return stdOut;
-  }
-
-  public void setStdOut(byte[] stdOut) {
-    this.stdOut = stdOut;
-  }
-
-  private Long startTime = -1L;
-
-  @Column(name = "start_time", nullable = false)
-  @Basic
-  public Long getStartTime() {
-    return startTime;
-  }
-
-  public void setStartTime(Long startTime) {
-    this.startTime = startTime;
-  }
-
-  private Long lastAttemptTime = -1L;
-
-  @Column(name = "last_attempt_time", nullable = false)
-  @Basic
-  public Long getLastAttemptTime() {
-    return lastAttemptTime;
-  }
-
-  public void setLastAttemptTime(Long lastAttemptTime) {
-    this.lastAttemptTime = lastAttemptTime;
-  }
-
-  private Short attemptCount = 0;
-
-  @Column(name = "attempt_count", nullable = false)
-  @Basic
-  public Short getAttemptCount() {
-    return attemptCount;
-  }
-
-  public void setAttemptCount(Short attemptCount) {
-    this.attemptCount = attemptCount;
-  }
-
-  private RoleCommand roleCommand;
-
-  @Column(name = "role_command")
-  @Enumerated(EnumType.STRING)
-  public RoleCommand getRoleCommand() {
-    return roleCommand;
-  }
-
-  public void setRoleCommand(RoleCommand roleCommand) {
-    this.roleCommand = roleCommand;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    HostRoleCommandEntity that = (HostRoleCommandEntity) o;
-
-    if (attemptCount != null ? !attemptCount.equals(that.attemptCount) : that.attemptCount != null) return false;
-    if (event != null ? !event.equals(that.event) : that.event != null) return false;
-    if (exitcode != null ? !exitcode.equals(that.exitcode) : that.exitcode != null) return false;
-    if (hostName != null ? !hostName.equals(that.hostName) : that.hostName != null) return false;
-    if (lastAttemptTime != null ? !lastAttemptTime.equals(that.lastAttemptTime) : that.lastAttemptTime != null)
-      return false;
-    if (requestId != null ? !requestId.equals(that.requestId) : that.requestId != null) return false;
-    if (role != null ? !role.equals(that.role) : that.role != null) return false;
-    if (stageId != null ? !stageId.equals(that.stageId) : that.stageId != null) return false;
-    if (startTime != null ? !startTime.equals(that.startTime) : that.startTime != null) return false;
-    if (status != null ? !status.equals(that.status) : that.status != null) return false;
-    if (stdError != null ? !Arrays.equals(stdError, that.stdError) : that.stdError != null) return false;
-    if (stdOut != null ? !Arrays.equals(stdOut, that.stdOut) : that.stdOut != null) return false;
-    if (taskId != null ? !taskId.equals(that.taskId) : that.taskId != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = taskId != null ? taskId.hashCode() : 0;
-    result = 31 * result + (requestId != null ? requestId.hashCode() : 0);
-    result = 31 * result + (stageId != null ? stageId.hashCode() : 0);
-    result = 31 * result + (hostName != null ? hostName.hashCode() : 0);
-    result = 31 * result + (role != null ? role.hashCode() : 0);
-    result = 31 * result + (event != null ? event.hashCode() : 0);
-    result = 31 * result + (exitcode != null ? exitcode.hashCode() : 0);
-    result = 31 * result + (status != null ? status.hashCode() : 0);
-    result = 31 * result + (stdError != null ? Arrays.hashCode(stdError) : 0);
-    result = 31 * result + (stdOut != null ? Arrays.hashCode(stdOut) : 0);
-    result = 31 * result + (startTime != null ? startTime.hashCode() : 0);
-    result = 31 * result + (lastAttemptTime != null ? lastAttemptTime.hashCode() : 0);
-    result = 31 * result + (attemptCount != null ? attemptCount.hashCode() : 0);
-    return result;
-  }
-
-  private ExecutionCommandEntity executionCommand;
-
-  @OneToOne(mappedBy = "hostRoleCommand", cascade = CascadeType.REMOVE)
-  public ExecutionCommandEntity getExecutionCommand() {
-    return executionCommand;
-  }
-
-  public void setExecutionCommand(ExecutionCommandEntity executionCommandsByTaskId) {
-    this.executionCommand = executionCommandsByTaskId;
-  }
-
-  private StageEntity stage;
-
-  @ManyToOne
-  @JoinColumns({@JoinColumn(name = "request_id", referencedColumnName = "request_id", nullable = false), @JoinColumn(name = "stage_id", referencedColumnName = "stage_id", nullable = false)})
-  public StageEntity getStage() {
-    return stage;
-  }
-
-  public void setStage(StageEntity stage) {
-    this.stage = stage;
-  }
-
-  private HostEntity host;
-
-  @ManyToOne
-  @JoinColumn(name = "host_name", referencedColumnName = "host_name", nullable = false)
-  public HostEntity getHost() {
-    return host;
-  }
-
-  public void setHost(HostEntity host) {
-    this.host = host;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostStateEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostStateEntity.java
deleted file mode 100644
index 8cb00cf..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostStateEntity.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import org.apache.ambari.server.state.HostState;
-
-import javax.persistence.*;
-
-@javax.persistence.Table(name = "hoststate", schema = "ambari", catalog = "")
-@Entity
-public class HostStateEntity {
-  private String hostName;
-
-  @javax.persistence.Column(name = "host_name", nullable = false, insertable = false, updatable = false)
-  @Id
-  public String getHostName() {
-    return hostName;
-  }
-
-  public void setHostName(String hostName) {
-    this.hostName = hostName;
-  }
-
-  private Long availableMem = 0L;
-
-  @Column(name = "available_mem", nullable = false, insertable = true, updatable = true)
-  @Basic
-  public Long getAvailableMem() {
-    return availableMem;
-  }
-
-  public void setAvailableMem(Long availableMem) {
-    this.availableMem = availableMem;
-  }
-
-  private Long timeInState = 0L;
-
-  @javax.persistence.Column(name = "time_in_state", nullable = false, insertable = true, updatable = true, length = 10)
-  @Basic
-  public Long getTimeInState() {
-    return timeInState;
-  }
-
-  public void setTimeInState(Long timeInState) {
-    this.timeInState = timeInState;
-  }
-
-  private String healthStatus;
-
-  @Column(name = "health_status", insertable = true, updatable = true)
-  @Basic
-  public String getHealthStatus() {
-    return healthStatus;
-  }
-
-  public void setHealthStatus(String healthStatus) {
-    this.healthStatus = healthStatus;
-  }
-
-  private String agentVersion = "";
-
-  @javax.persistence.Column(name = "agent_version", nullable = false, insertable = true, updatable = true)
-  @Basic
-  public String getAgentVersion() {
-    return agentVersion;
-  }
-
-  public void setAgentVersion(String agentVersion) {
-    this.agentVersion = agentVersion;
-  }
-
-  private HostState currentState = HostState.INIT;
-
-  @javax.persistence.Column(name = "current_state", nullable = false, insertable = true, updatable = true)
-  @Enumerated(value = EnumType.STRING)
-  public HostState getCurrentState() {
-    return currentState;
-  }
-
-  public void setCurrentState(HostState currentState) {
-    this.currentState = currentState;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    HostStateEntity that = (HostStateEntity) o;
-
-    if (availableMem != null ? !availableMem.equals(that.availableMem) : that.availableMem != null) return false;
-    if (timeInState != null ? !timeInState.equals(that.timeInState) : that.timeInState!= null) return false;
-    if (agentVersion != null ? !agentVersion.equals(that.agentVersion) : that.agentVersion != null) return false;
-    if (currentState != null ? !currentState.equals(that.currentState) : that.currentState != null) return false;
-    if (hostName != null ? !hostName.equals(that.hostName) : that.hostName != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = hostName != null ? hostName.hashCode() : 0;
-    result = 31 * result + (availableMem != null ? availableMem.intValue() : 0);
-    result = 31 * result + (timeInState != null ? timeInState.intValue() : 0);
-    result = 31 * result + (agentVersion != null ? agentVersion.hashCode() : 0);
-    result = 31 * result + (currentState != null ? currentState.hashCode() : 0);
-    return result;
-  }
-
-//  private ClusterEntity clusterEntity;
-//
-//  @ManyToOne
-//  @javax.persistence.JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id")
-//  public ClusterEntity getClusterEntity() {
-//    return clusterEntity;
-//  }
-//
-//  public void setClusterEntity(ClusterEntity clusterEntity) {
-//    this.clusterEntity = clusterEntity;
-//  }
-
-  private HostEntity hostEntity;
-
-  @OneToOne
-  @JoinColumn(name = "host_name", referencedColumnName = "host_name", nullable = false)
-  public HostEntity getHostEntity() {
-    return hostEntity;
-  }
-
-  public void setHostEntity(HostEntity hostEntity) {
-    this.hostEntity = hostEntity;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/KeyValueEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/KeyValueEntity.java
deleted file mode 100644
index e11fd6b..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/KeyValueEntity.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.Id;
-import javax.persistence.Table;
-
-@Table(name = "key_value_store", schema = "ambari", catalog = "")
-@Entity
-public class KeyValueEntity {
-
-  private String key;
-  private String value;
-
-  @Column(name = "\"key\"", length = 255)
-  @Id
-  public String getKey() {
-    return key;
-  }
-
-  public void setKey(String key) {
-    this.key = key;
-  }
-
-  @Column(name = "\"value\"", length = 32000)
-  public String getValue() {
-    return value;
-  }
-
-  public void setValue(String value) {
-    this.value = value;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    KeyValueEntity that = (KeyValueEntity) o;
-
-    if (key != null ? !key.equals(that.key) : that.key != null) return false;
-    if (value != null ? !value.equals(that.value) : that.value != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = key != null ? key.hashCode() : 0;
-    result = 31 * result + (value != null ? value.hashCode() : 0);
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleEntity.java
deleted file mode 100644
index 992641e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleEntity.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.CascadeType;
-import javax.persistence.Entity;
-import javax.persistence.Id;
-import javax.persistence.JoinColumn;
-import javax.persistence.JoinTable;
-import javax.persistence.ManyToMany;
-import java.util.Set;
-
-@javax.persistence.Table(name = "roles", schema = "ambari", catalog = "")
-@Entity
-public class RoleEntity {
-
-  private String roleName;
-
-  @javax.persistence.Column(name = "role_name")
-  @Id
-  public String getRoleName() {
-    return roleName;
-  }
-
-  public void setRoleName(String roleName) {
-    this.roleName = roleName;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    RoleEntity that = (RoleEntity) o;
-
-    if (roleName != null ? !roleName.equals(that.roleName) : that.roleName != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    return roleName != null ? roleName.hashCode() : 0;
-  }
-
-  private Set<org.apache.ambari.server.orm.entities.UserEntity> userEntities;
-
-  @JoinTable(name = "user_roles", catalog = "", schema = "ambari",
-      joinColumns = {@JoinColumn(name = "role_name", referencedColumnName = "role_name")},
-      inverseJoinColumns = {@JoinColumn(name = "user_id", referencedColumnName = "user_id")})
-  @ManyToMany(cascade = CascadeType.ALL)
-  public Set<org.apache.ambari.server.orm.entities.UserEntity> getUserEntities() {
-    return userEntities;
-  }
-
-  public void setUserEntities(Set<org.apache.ambari.server.orm.entities.UserEntity> userEntities) {
-    this.userEntities = userEntities;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleSuccessCriteriaEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleSuccessCriteriaEntity.java
deleted file mode 100644
index 8d20cfd..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleSuccessCriteriaEntity.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import org.apache.ambari.server.Role;
-
-import javax.persistence.*;
-
-@IdClass(org.apache.ambari.server.orm.entities.RoleSuccessCriteriaEntityPK.class)
-@Table(name = "role_success_criteria", schema = "ambari", catalog = "")
-@Entity
-public class RoleSuccessCriteriaEntity {
-  private Long requestId;
-
-  @Column(name = "request_id", insertable = false, updatable = false, nullable = false)
-  @Id
-  public Long getRequestId() {
-    return requestId;
-  }
-
-  public void setRequestId(Long requestId) {
-    this.requestId = requestId;
-  }
-
-  private Long stageId;
-
-  @Column(name = "stage_id", insertable = false, updatable = false, nullable = false)
-  @Id
-  public Long getStageId() {
-    return stageId;
-  }
-
-  public void setStageId(Long stageId) {
-    this.stageId = stageId;
-  }
-
-  private Role role;
-
-  @Column(name = "role")
-  @Enumerated(EnumType.STRING)
-  @Id
-  public Role getRole() {
-    return role;
-  }
-
-  public void setRole(Role role) {
-    this.role = role;
-  }
-
-  private Double successFactor = 1d;
-
-  @Column(name = "success_factor", nullable = false)
-  @Basic
-  public Double getSuccessFactor() {
-    return successFactor;
-  }
-
-  public void setSuccessFactor(Double successFactor) {
-    this.successFactor = successFactor;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    RoleSuccessCriteriaEntity that = (RoleSuccessCriteriaEntity) o;
-
-    if (requestId != null ? !requestId.equals(that.requestId) : that.requestId != null) return false;
-    if (role != null ? !role.equals(that.role) : that.role != null) return false;
-    if (stageId != null ? !stageId.equals(that.stageId) : that.stageId != null) return false;
-    if (successFactor != null ? !successFactor.equals(that.successFactor) : that.successFactor != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = requestId != null ? requestId.hashCode() : 0;
-    result = 31 * result + (stageId != null ? stageId.hashCode() : 0);
-    result = 31 * result + (role != null ? role.hashCode() : 0);
-    result = 31 * result + (successFactor != null ? successFactor.hashCode() : 0);
-    return result;
-  }
-
-  private StageEntity stage;
-
-  @ManyToOne
-  @JoinColumns({@JoinColumn(name = "request_id", referencedColumnName = "request_id", nullable = false), @JoinColumn(name = "stage_id", referencedColumnName = "stage_id", nullable = false)})
-  public StageEntity getStage() {
-    return stage;
-  }
-
-  public void setStage(StageEntity stage) {
-    this.stage = stage;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleSuccessCriteriaEntityPK.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleSuccessCriteriaEntityPK.java
deleted file mode 100644
index f62094e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RoleSuccessCriteriaEntityPK.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import org.apache.ambari.server.Role;
-
-import javax.persistence.Column;
-import javax.persistence.EnumType;
-import javax.persistence.Enumerated;
-import javax.persistence.Id;
-import java.io.Serializable;
-
-@SuppressWarnings("serial")
-public class RoleSuccessCriteriaEntityPK implements Serializable {
-  private Long requestId;
-
-  @Id
-  @Column(name = "request_id")
-  public Long getRequestId() {
-    return requestId;
-  }
-
-  public void setRequestId(Long requestId) {
-    this.requestId = requestId;
-  }
-
-  private Long stageId;
-
-  @Id
-  @Column(name = "stage_id")
-  public Long getStageId() {
-    return stageId;
-  }
-
-  public void setStageId(Long stageId) {
-    this.stageId = stageId;
-  }
-
-  private Role role;
-
-  @Column(name = "role")
-  @Enumerated(EnumType.STRING)
-  @Id
-  public Role getRole() {
-    return role;
-  }
-
-  public void setRole(Role role) {
-    this.role = role;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    RoleSuccessCriteriaEntityPK that = (RoleSuccessCriteriaEntityPK) o;
-
-    if (requestId != null ? !requestId.equals(that.requestId) : that.requestId != null) return false;
-    if (role != null ? !role.equals(that.role) : that.role != null) return false;
-    if (stageId != null ? !stageId.equals(that.stageId) : that.stageId != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = requestId != null ? requestId.hashCode() : 0;
-    result = 31 * result + (stageId != null ? stageId.hashCode() : 0);
-    result = 31 * result + (role != null ? role.hashCode() : 0);
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java
deleted file mode 100644
index 1a31fe3..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import org.apache.ambari.server.state.State;
-
-import javax.persistence.*;
-import java.util.Collection;
-
-@javax.persistence.IdClass(ServiceComponentDesiredStateEntityPK.class)
-@javax.persistence.Table(name = "servicecomponentdesiredstate", schema = "ambari", catalog = "")
-@Entity
-public class ServiceComponentDesiredStateEntity {
-  private Long clusterId;
-
-  @javax.persistence.Column(name = "cluster_id", nullable = false, insertable = false, updatable = false, length = 10)
-  @Id
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  private String serviceName;
-
-  @javax.persistence.Column(name = "service_name", nullable = false, insertable = false, updatable = false)
-  @Id
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  private String componentName;
-
-  @javax.persistence.Column(name = "component_name", nullable = false, insertable = true, updatable = true)
-  @Id
-  public String getComponentName() {
-    return componentName;
-  }
-
-  public void setComponentName(String componentName) {
-    this.componentName = componentName;
-  }
-
-  private State desiredState = State.INIT;
-
-  @javax.persistence.Column(name = "desired_state", nullable = false, insertable = true, updatable = true)
-  @Enumerated(EnumType.STRING)
-  public State getDesiredState() {
-    return desiredState;
-  }
-
-  public void setDesiredState(State desiredState) {
-    this.desiredState = desiredState;
-  }
-
-  private String desiredStackVersion = "";
-
-  @javax.persistence.Column(name = "desired_stack_version", nullable = false, insertable = true, updatable = true)
-  @Basic
-  public String getDesiredStackVersion() {
-    return desiredStackVersion;
-  }
-
-  public void setDesiredStackVersion(String desiredStackVersion) {
-    this.desiredStackVersion = desiredStackVersion;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ServiceComponentDesiredStateEntity that = (ServiceComponentDesiredStateEntity) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (componentName != null ? !componentName.equals(that.componentName) : that.componentName != null) return false;
-    if (desiredState != null ? !desiredState.equals(that.desiredState) : that.desiredState != null) return false;
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
-    if (desiredStackVersion != null ? !desiredStackVersion.equals(that.desiredStackVersion) : that.desiredStackVersion != null)
-      return false;
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId != null ? clusterId.intValue() : 0;
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 31 * result + (componentName != null ? componentName.hashCode() : 0);
-    result = 31 * result + (desiredState != null ? desiredState.hashCode() : 0);
-    result = 31 * result + (desiredStackVersion != null ? desiredStackVersion.hashCode() : 0);
-
-    return result;
-  }
-
-  private ClusterServiceEntity clusterServiceEntity;
-
-  @ManyToOne
-  @javax.persistence.JoinColumns({@javax.persistence.JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false), @javax.persistence.JoinColumn(name = "service_name", referencedColumnName = "service_name", nullable = false)})
-  public ClusterServiceEntity getClusterServiceEntity() {
-    return clusterServiceEntity;
-  }
-
-  public void setClusterServiceEntity(ClusterServiceEntity clusterServiceEntity) {
-    this.clusterServiceEntity = clusterServiceEntity;
-  }
-
-  private Collection<HostComponentStateEntity> hostComponentStateEntities;
-
-  @OneToMany(mappedBy = "serviceComponentDesiredStateEntity")
-  public Collection<HostComponentStateEntity> getHostComponentStateEntities() {
-    return hostComponentStateEntities;
-  }
-
-  public void setHostComponentStateEntities(Collection<HostComponentStateEntity> hostComponentStateEntities) {
-    this.hostComponentStateEntities = hostComponentStateEntities;
-  }
-
-  private Collection<HostComponentDesiredStateEntity> hostComponentDesiredStateEntities;
-
-  @OneToMany(mappedBy = "serviceComponentDesiredStateEntity")
-  public Collection<HostComponentDesiredStateEntity> getHostComponentDesiredStateEntities() {
-    return hostComponentDesiredStateEntities;
-  }
-
-  public void setHostComponentDesiredStateEntities(Collection<HostComponentDesiredStateEntity> hostComponentDesiredStateEntities) {
-    this.hostComponentDesiredStateEntities = hostComponentDesiredStateEntities;
-  }
-
-  private Collection<ComponentConfigMappingEntity> configMappingEntities;
-  @OneToMany(mappedBy = "serviceComponentDesiredStateEntity", cascade = CascadeType.ALL)
-  public Collection<ComponentConfigMappingEntity> getComponentConfigMappingEntities() {
-    return configMappingEntities;
-  }
-  
-  public void setComponentConfigMappingEntities(Collection<ComponentConfigMappingEntity> entities) {
-    configMappingEntities = entities;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntityPK.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntityPK.java
deleted file mode 100644
index d56e555..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntityPK.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Column;
-import javax.persistence.Id;
-import java.io.Serializable;
-
-@SuppressWarnings("serial")
-public class ServiceComponentDesiredStateEntityPK implements Serializable {
-  private Long clusterId;
-
-  @Column(name = "cluster_id", nullable = false, insertable = true, updatable = true, length = 10)
-  @Id
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  private String serviceName;
-
-  @javax.persistence.Column(name = "service_name", nullable = false, insertable = false, updatable = false)
-  @Id
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  private String componentName;
-
-  @Id
-  @Column(name = "component_name", nullable = false, insertable = true, updatable = true)
-  public String getComponentName() {
-    return componentName;
-  }
-
-  public void setComponentName(String componentName) {
-    this.componentName = componentName;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ServiceComponentDesiredStateEntityPK that = (ServiceComponentDesiredStateEntityPK) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (componentName != null ? !componentName.equals(that.componentName) : that.componentName != null) return false;
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId != null ? clusterId.intValue() : 0;
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 31 * result + (componentName != null ? componentName.hashCode() : 0);
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigMappingEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigMappingEntity.java
deleted file mode 100644
index b2e53ba..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigMappingEntity.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.Id;
-import javax.persistence.IdClass;
-import javax.persistence.JoinColumn;
-import javax.persistence.JoinColumns;
-import javax.persistence.ManyToOne;
-import javax.persistence.Table;
-
-@IdClass(ServiceConfigMappingEntityPK.class)
-@Entity
-@Table(name="serviceconfigmapping", schema="ambari", catalog="")
-public class ServiceConfigMappingEntity {
-  private Long clusterId;
-  private String serviceName;
-  private String configType;
-  private String configVersion;
-  private Long timestamp;
-  private ClusterServiceEntity serviceEntity;
-  private ClusterConfigEntity clusterConfigEntity;
-
-  @Column(name = "cluster_id", nullable = false, insertable = false, updatable = false)
-  @Id
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long id) {
-    clusterId = id;
-  }
-
-  @Column(name = "service_name", nullable = false, insertable = false, updatable = false)
-  @Id
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String name) {
-    serviceName = name;
-  }
-
-  @Column(name = "config_type", nullable = false, insertable = true, updatable = false)
-  @Id
-  public String getConfigType() {
-    return configType;
-  }
-
-  public void setConfigType(String type) {
-    configType = type;
-  }
-
-  @Column(name = "config_tag", nullable = false, insertable = true, updatable = true)
-  public String getVersionTag() {
-    return configVersion;
-  }
-
-  public void setVersionTag(String tag) {
-    configVersion = tag;
-  }
-
-  @Column(name = "timestamp", nullable = false, insertable = true, updatable = true)
-  public Long getTimestamp() {
-    return timestamp;
-  }
-
-  public void setTimestamp(Long stamp) {
-    timestamp = stamp;
-  }
-
-  @ManyToOne
-  @JoinColumns({
-      @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false),
-      @JoinColumn(name = "service_name", referencedColumnName = "service_name", nullable = false) })
-  public ClusterServiceEntity getServiceEntity() {
-    return serviceEntity;
-  }
-
-  public void setServiceEntity(ClusterServiceEntity entity) {
-    serviceEntity = entity;
-  }
-
-  @ManyToOne
-  @JoinColumns({
-      @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false, insertable = false, updatable = false),
-      @JoinColumn(name = "config_type", referencedColumnName = "type_name", nullable = false, insertable = false, updatable = false),
-      @JoinColumn(name = "config_tag", referencedColumnName = "version_tag", nullable = false, insertable = false, updatable = false)
-  })
-  public ClusterConfigEntity getClusterConfigEntity() {
-    return clusterConfigEntity;
-  }
-
-  public void setClusterConfigEntity(ClusterConfigEntity clusterConfigEntity) {
-    this.clusterConfigEntity = clusterConfigEntity;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ServiceConfigMappingEntity that = (ServiceConfigMappingEntity) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
-    if (configType != null ? !configType.equals(that.configType) : that.configType != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId !=null ? clusterId.intValue() : 0;
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 31 * result + (configType != null ? configType.hashCode() : 0);
-    return result;
-  }
-
-
-
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigMappingEntityPK.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigMappingEntityPK.java
deleted file mode 100644
index 6e30064..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigMappingEntityPK.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Column;
-import javax.persistence.Id;
-import java.io.Serializable;
-
-@SuppressWarnings("serial")
-public class ServiceConfigMappingEntityPK implements Serializable {
-  private Long clusterId;
-  private String serviceName;
-  private String configType;
-
-  @Id
-  @Column(name = "cluster_id", nullable = false, insertable = true, updatable = true, length = 10)
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  @Id
-  @Column(name = "service_name", nullable = false, insertable = true, updatable = true)
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  @Id
-  @Column(name = "config_type", nullable = false, insertable = true, updatable = false)
-  public String getConfigType() {
-    return configType;
-  }
-
-  public void setConfigType(String type) {
-    configType = type;
-  }
-
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ServiceConfigMappingEntityPK that = (ServiceConfigMappingEntityPK) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId !=null ? clusterId.intValue() : 0;
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceDesiredStateEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceDesiredStateEntity.java
deleted file mode 100644
index f443dea..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceDesiredStateEntity.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import org.apache.ambari.server.state.State;
-
-import javax.persistence.*;
-
-@javax.persistence.IdClass(ServiceDesiredStateEntityPK.class)
-@javax.persistence.Table(name = "servicedesiredstate", schema = "ambari", catalog = "")
-@Entity
-public class ServiceDesiredStateEntity {
-  private Long clusterId;
-
-  @javax.persistence.Column(name = "cluster_id", nullable = false, insertable = false, updatable = false, length = 10)
-  @Id
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  private String serviceName;
-
-  @javax.persistence.Column(name = "service_name", nullable = false, insertable = false, updatable = false)
-  @Id
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  private State desiredState = State.INIT;
-
-  @javax.persistence.Column(name = "desired_state", nullable = false, insertable = true, updatable = true)
-  @Enumerated(value = EnumType.STRING)
-  public State getDesiredState() {
-    return desiredState;
-  }
-
-  public void setDesiredState(State desiredState) {
-    this.desiredState = desiredState;
-  }
-
-  private int desiredHostRoleMapping = 0;
-
-  @javax.persistence.Column(name = "desired_host_role_mapping", nullable = false, insertable = true, updatable = true, length = 10)
-  @Basic
-  public int getDesiredHostRoleMapping() {
-    return desiredHostRoleMapping;
-  }
-
-  public void setDesiredHostRoleMapping(int desiredHostRoleMapping) {
-    this.desiredHostRoleMapping = desiredHostRoleMapping;
-  }
-
-  private String desiredStackVersion = "";
-
-  @javax.persistence.Column(name = "desired_stack_version", nullable = false, insertable = true, updatable = true)
-  @Basic
-  public String getDesiredStackVersion() {
-    return desiredStackVersion;
-  }
-
-  public void setDesiredStackVersion(String desiredStackVersion) {
-    this.desiredStackVersion = desiredStackVersion;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ServiceDesiredStateEntity that = (ServiceDesiredStateEntity) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (desiredState != null ? !desiredState.equals(that.desiredState) : that.desiredState != null) return false;
-    if (desiredHostRoleMapping != that.desiredHostRoleMapping) return false;
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
-    if (desiredStackVersion != null ? !desiredStackVersion.equals(that.desiredStackVersion) : that.desiredStackVersion != null)
-      return false;
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId != null ? clusterId.intValue() : 0;
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 31 * result + (desiredState != null ? desiredState.hashCode() : 0);
-    result = 31 * result + desiredHostRoleMapping;
-    result = 31 * result + (desiredStackVersion != null ? desiredStackVersion.hashCode() : 0);
-    return result;
-  }
-
-  private ClusterServiceEntity clusterServiceEntity;
-
-  @OneToOne
-  @javax.persistence.JoinColumns(
-      {
-          @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false),
-          @JoinColumn(name = "service_name", referencedColumnName = "service_name", nullable = false)
-      })
-  public ClusterServiceEntity getClusterServiceEntity() {
-    return clusterServiceEntity;
-  }
-
-  public void setClusterServiceEntity(ClusterServiceEntity clusterServiceEntity) {
-    this.clusterServiceEntity = clusterServiceEntity;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceDesiredStateEntityPK.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceDesiredStateEntityPK.java
deleted file mode 100644
index 8fce36a..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceDesiredStateEntityPK.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Column;
-import javax.persistence.Id;
-import java.io.Serializable;
-
-@SuppressWarnings("serial")
-public class ServiceDesiredStateEntityPK implements Serializable {
-  private Long clusterId;
-
-  @javax.persistence.Column(name = "cluster_id", nullable = false, insertable = true, updatable = true, length = 10)
-  @Id
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  private String serviceName;
-
-  @Id
-  @Column(name = "service_name", nullable = false, insertable = true, updatable = true, length = 2147483647, precision = 0)
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ServiceDesiredStateEntityPK that = (ServiceDesiredStateEntityPK) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId != null ? clusterId.intValue() : 0;
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity.java
deleted file mode 100644
index 5dc8cd7..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntity.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.*;
-import java.util.Collection;
-
-@IdClass(org.apache.ambari.server.orm.entities.StageEntityPK.class)
-@Table(name = "stage", schema = "ambari", catalog = "")
-@Entity
-public class StageEntity {
-  private Long clusterId;
-
-  @Column(name = "cluster_id", insertable = false, updatable = false, nullable = false)
-  @Basic
-  public Long getClusterId() {
-    return clusterId;
-  }
-
-  public void setClusterId(Long clusterId) {
-    this.clusterId = clusterId;
-  }
-
-  private Long requestId;
-
-  @Column(name = "request_id")
-  @Id
-  public Long getRequestId() {
-    return requestId;
-  }
-
-  public void setRequestId(Long requestId) {
-    this.requestId = requestId;
-  }
-
-  private Long stageId = 0L;
-
-  @Column(name = "stage_id", nullable = false)
-  @Id
-  public Long getStageId() {
-    return stageId;
-  }
-
-  public void setStageId(Long stageId) {
-    this.stageId = stageId;
-  }
-
-  private String logInfo = "";
-
-  @Column(name = "log_info", nullable = false)
-  @Basic
-  public String getLogInfo() {
-    return logInfo;
-  }
-
-  public void setLogInfo(String logInfo) {
-    this.logInfo = logInfo;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    StageEntity that = (StageEntity) o;
-
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) return false;
-    if (logInfo != null ? !logInfo.equals(that.logInfo) : that.logInfo != null) return false;
-    if (requestId != null ? !requestId.equals(that.requestId) : that.requestId != null) return false;
-    if (stageId != null ? !stageId.equals(that.stageId) : that.stageId != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = clusterId != null ? clusterId.hashCode() : 0;
-    result = 31 * result + (requestId != null ? requestId.hashCode() : 0);
-    result = 31 * result + (stageId != null ? stageId.hashCode() : 0);
-    result = 31 * result + (logInfo != null ? logInfo.hashCode() : 0);
-    return result;
-  }
-
-  private ClusterEntity cluster;
-
-  @ManyToOne
-  @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id")
-  public ClusterEntity getCluster() {
-    return cluster;
-  }
-
-  public void setCluster(ClusterEntity cluster) {
-    this.cluster = cluster;
-  }
-
-  private Collection<HostRoleCommandEntity> hostRoleCommands;
-
-  @OneToMany(mappedBy = "stage", cascade = CascadeType.REMOVE)
-  public Collection<HostRoleCommandEntity> getHostRoleCommands() {
-    return hostRoleCommands;
-  }
-
-  public void setHostRoleCommands(Collection<HostRoleCommandEntity> hostRoleCommands) {
-    this.hostRoleCommands = hostRoleCommands;
-  }
-
-  private Collection<RoleSuccessCriteriaEntity> roleSuccessCriterias;
-
-  @OneToMany(mappedBy = "stage", cascade = CascadeType.REMOVE)
-  public Collection<RoleSuccessCriteriaEntity> getRoleSuccessCriterias() {
-    return roleSuccessCriterias;
-  }
-
-  public void setRoleSuccessCriterias(Collection<RoleSuccessCriteriaEntity> roleSuccessCriterias) {
-    this.roleSuccessCriterias = roleSuccessCriterias;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntityPK.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntityPK.java
deleted file mode 100644
index 0c227bc..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StageEntityPK.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Column;
-import javax.persistence.Id;
-import java.io.Serializable;
-
-@SuppressWarnings("serial")
-public class StageEntityPK implements Serializable {
-  private Long requestId;
-
-  @Id
-  @Column(name = "request_id")
-  public Long getRequestId() {
-    return requestId;
-  }
-
-  public void setRequestId(Long requestId) {
-    this.requestId = requestId;
-  }
-
-  private Long stageId;
-
-  @Id
-  @Column(name = "stage_id")
-  public Long getStageId() {
-    return stageId;
-  }
-
-  public void setStageId(Long stageId) {
-    this.stageId = stageId;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    StageEntityPK that = (StageEntityPK) o;
-
-    if (requestId != null ? !requestId.equals(that.requestId) : that.requestId != null) return false;
-    if (stageId != null ? !stageId.equals(that.stageId) : that.stageId != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = requestId != null ? requestId.hashCode() : 0;
-    result = 31 * result + (stageId != null ? stageId.hashCode() : 0);
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UserEntity.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UserEntity.java
deleted file mode 100644
index db258c1..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UserEntity.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.*;
-
-import java.util.Date;
-import java.util.Set;
-
-@Table(name = "users", schema = "ambari", catalog = "", uniqueConstraints = {@UniqueConstraint(columnNames = {"user_name", "ldap_user"})})
-@Entity
-@NamedQueries({
-    @NamedQuery(name = "localUserByName", query = "SELECT user FROM UserEntity user where lower(user.userName)=:username AND user.ldapUser=false"),
-    @NamedQuery(name = "ldapUserByName", query = "SELECT user FROM UserEntity user where lower(user.userName)=:username AND user.ldapUser=true")
-})
-@SequenceGenerator(name = "ambari.users_user_id_seq", allocationSize = 1)
-public class UserEntity {
-
-  private Integer userId;
-
-  @Id
-  @Column(name = "user_id")
-  @GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "ambari.users_user_id_seq")
-  public Integer getUserId() {
-    return userId;
-  }
-
-  public void setUserId(Integer userId) {
-    this.userId = userId;
-  }
-
-  private String userName;
-
-  @Column(name = "user_name")
-  public String getUserName() {
-    return userName;
-  }
-
-  public void setUserName(String userName) {
-    this.userName = userName;
-  }
-
-  private Boolean ldapUser = false;
-
-  @Column(name = "ldap_user")
-  public Boolean getLdapUser() {
-    return ldapUser;
-  }
-
-  public void setLdapUser(Boolean ldapUser) {
-    this.ldapUser = ldapUser;
-  }
-
-  private String userPassword;
-
-  @Column(name = "user_password")
-  @Basic
-  public String getUserPassword() {
-    return userPassword;
-  }
-
-  public void setUserPassword(String userPassword) {
-    this.userPassword = userPassword;
-  }
-
-  private Date createTime = new Date();
-
-  @Column(name = "create_time")
-  @Basic
-  @Temporal(value = TemporalType.TIMESTAMP)
-  public Date getCreateTime() {
-    return createTime;
-  }
-
-  public void setCreateTime(Date createTime) {
-    this.createTime = createTime;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    UserEntity that = (UserEntity) o;
-
-    if (userId != null ? !userId.equals(that.userId) : that.userId != null) return false;
-    if (createTime != null ? !createTime.equals(that.createTime) : that.createTime != null) return false;
-    if (ldapUser != null ? !ldapUser.equals(that.ldapUser) : that.ldapUser != null) return false;
-    if (userName != null ? !userName.equals(that.userName) : that.userName != null) return false;
-    if (userPassword != null ? !userPassword.equals(that.userPassword) : that.userPassword != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = userId != null ? userId.hashCode() : 0;
-    result = 31 * result + (userName != null ? userName.hashCode() : 0);
-    result = 31 * result + (userPassword != null ? userPassword.hashCode() : 0);
-    result = 31 * result + (ldapUser != null ? ldapUser.hashCode() : 0);
-    result = 31 * result + (createTime != null ? createTime.hashCode() : 0);
-    return result;
-  }
-
-  private Set<RoleEntity> roleEntities;
-
-  @ManyToMany(mappedBy = "userEntities")
-  public Set<RoleEntity> getRoleEntities() {
-    return roleEntities;
-  }
-
-  public void setRoleEntities(Set<RoleEntity> roleEntities) {
-    this.roleEntities = roleEntities;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/resources/ResourceManager.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/resources/ResourceManager.java
deleted file mode 100644
index f17564f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/resources/ResourceManager.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.resources;
-
-import java.io.File;
-
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-
-/**
- * Resource manager.
- */
-@Singleton
-public class ResourceManager {
-  private static Log LOG = LogFactory.getLog(ResourceManager.class);
-	
-  @Inject Configuration configs;
-  /**
-  * Returns resource file.
-  * @param resourcePath relational path to file
-  * @return resource file
-  */
-  public File getResource(String resourcePath) {
-    String resDir = configs.getConfigsMap().get(Configuration.RESOURCES_DIR_KEY);
-    String resourcePathIndep = resourcePath.replaceAll("/", File.separator);
-    File resourceFile = new File(resDir + File.separator + resourcePathIndep);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Resource requested from ResourceManager"
-          + ", resourceDir=" + resDir
-          + ", resourcePath=" + resourcePathIndep
-          + ", fileExists=" + resourceFile.exists());
-    }
-    return resourceFile;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/resources/api/rest/GetResource.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/resources/api/rest/GetResource.java
deleted file mode 100644
index 729cb2d..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/resources/api/rest/GetResource.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.resources.api.rest;
-
-
-import java.io.File;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import javax.ws.rs.Consumes;
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-
-import org.apache.ambari.server.resources.ResourceManager;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import com.google.inject.Inject;
-
-/**
- * Resource api.
- */
-@Path("/")
-public class GetResource {
-  private static Log LOG = LogFactory.getLog(GetResource.class);
-
-  private static ResourceManager resourceManager;
-
-  @Inject
-  public static void init(ResourceManager instance) {
-	  resourceManager = instance;
-  }
-
-
-  @GET
-  @Path("{resourcePath:.*}")
-  @Consumes(MediaType.TEXT_PLAIN)
-  @Produces(MediaType.APPLICATION_OCTET_STREAM)
-  public Response getResource(@PathParam("resourcePath") String resourcePath,
-      @Context HttpServletRequest req) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Received a resource request from agent"
-          + ", resourcePath=" + resourcePath);
-    }
-    File resourceFile = resourceManager.getResource(resourcePath);
-
-    if (!resourceFile.exists()) {
-    	return Response.status(HttpServletResponse.SC_NOT_FOUND).build();
-    }
-
-    return Response.ok(resourceFile).build();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/AmbariEntryPoint.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/AmbariEntryPoint.java
deleted file mode 100644
index 2028f46..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/AmbariEntryPoint.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security;
-
-import org.springframework.security.core.AuthenticationException;
-import org.springframework.security.web.AuthenticationEntryPoint;
-
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-
-public class AmbariEntryPoint implements AuthenticationEntryPoint {
-  @Override
-  public void commence(HttpServletRequest request, HttpServletResponse response, AuthenticationException authException) throws IOException, ServletException {
-    response.sendError(HttpServletResponse.SC_FORBIDDEN, authException.getMessage());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/CertificateManager.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/CertificateManager.java
deleted file mode 100644
index 349b610..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/CertificateManager.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.nio.charset.Charset;
-import java.text.MessageFormat;
-import java.util.Map;
-
-import org.apache.ambari.server.utils.ShellCommandUtil;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-
-/**
- * Ambari security.
- * Manages server and agent certificates
- */
-@Singleton
-public class CertificateManager {
-
-  @Inject Configuration configs;
-
-  private static final Log LOG = LogFactory.getLog(CertificateManager.class);
-
-
-  private static final String GEN_SRVR_KEY = "openssl genrsa -des3 " +
-      "-passout pass:{0} -out {1}/{2} 4096 ";
-  private static final String GEN_SRVR_REQ = "openssl req -passin pass:{0} " +
-      "-new -key {1}/{2} -out {1}/{3} -batch";
-  private static final String SIGN_SRVR_CRT = "openssl x509 " +
-      "-passin pass:{0} -req -days 365 -in {1}/{3} -signkey {1}/{2} " +
-      "-out {1}/{3} \n";
-  private static final String EXPRT_KSTR = "openssl pkcs12 -export" +
-      " -in {1}/{3} -inkey {1}/{2} -certfile {1}/{3} -out {1}/{4} " +
-      "-password pass:{0} -passin pass:{0} \n";
-  private static final String SIGN_AGENT_CRT = "openssl ca -config " +
-      "{0}/ca.config -in {0}/{1} -out {0}/{2} -batch -passin pass:{3} " +
-      "-keyfile {0}/{4} -cert {0}/{5}"; /**
-       * Verify that root certificate exists, generate it otherwise.
-       */
-  public void initRootCert() {
-    LOG.info("Initialization of root certificate");
-
-    boolean certExists = isCertExists();
-
-    LOG.info("Certificate exists:" + certExists);
-
-    if (!certExists) {
-      generateServerCertificate();
-    }
-  }
-
-  /**
-   * Checks root certificate state.
-   * @return "true" if certificate exists
-   */
-  private boolean isCertExists() {
-
-    Map<String, String> configsMap = configs.getConfigsMap();
-    String srvrKstrDir = configsMap.get(Configuration.SRVR_KSTR_DIR_KEY);
-    String srvrCrtName = configsMap.get(Configuration.SRVR_CRT_NAME_KEY);
-    File certFile = new File(srvrKstrDir + File.separator + srvrCrtName);
-    LOG.debug("srvrKstrDir = " + srvrKstrDir);
-    LOG.debug("srvrCrtName = " + srvrCrtName);
-    LOG.debug("certFile = " + certFile.getAbsolutePath());
-
-    return certFile.exists();
-  }
-
-
-  /**
-   * Runs os command
-   *
-   * @return command execution exit code
-   */
-  private int runCommand(String command) {
-    String line = null;
-    Process process = null;
-    BufferedReader br= null;
-    try {
-      process = Runtime.getRuntime().exec(command);
-      br = new BufferedReader(new InputStreamReader(
-          process.getInputStream(), Charset.forName("UTF8")));
-
-      while ((line = br.readLine()) != null) {
-        LOG.info(line);
-      }
-
-      try {
-        process.waitFor();
-        ShellCommandUtil.logOpenSslExitCode(command, process.exitValue());
-        return process.exitValue(); //command is executed
-      } catch (InterruptedException e) {
-        e.printStackTrace();
-      }
-    } catch (IOException e) {
-      e.printStackTrace();
-    } finally {
-      if (br != null) {
-        try {
-          br.close();
-        } catch (IOException ioe) {
-          ioe.printStackTrace();
-        }
-      }
-    }
-
-    return -1;//some exception occurred
-
-  }
-
-  private void generateServerCertificate() {
-    LOG.info("Generation of server certificate");
-
-    Map<String, String> configsMap = configs.getConfigsMap();
-    String srvrKstrDir = configsMap.get(Configuration.SRVR_KSTR_DIR_KEY);
-    String srvrCrtName = configsMap.get(Configuration.SRVR_CRT_NAME_KEY);
-    String srvrKeyName = configsMap.get(Configuration.SRVR_KEY_NAME_KEY);
-    String kstrName = configsMap.get(Configuration.KSTR_NAME_KEY);
-    String srvrCrtPass = configsMap.get(Configuration.SRVR_CRT_PASS_KEY);
-
-    Object[] scriptArgs = {srvrCrtPass, srvrKstrDir, srvrKeyName,
-        srvrCrtName, kstrName};
-
-    String command = MessageFormat.format(GEN_SRVR_KEY,scriptArgs);
-    runCommand(command);
-
-    command = MessageFormat.format(GEN_SRVR_REQ,scriptArgs);
-    runCommand(command);
-
-    command = MessageFormat.format(SIGN_SRVR_CRT,scriptArgs);
-    runCommand(command);
-
-    command = MessageFormat.format(EXPRT_KSTR,scriptArgs);
-    runCommand(command);
-
-  }
-
-  /**
-   * Returns server certificate content
-   * @return string with server certificate content
-   */
-  public String getServerCert() {
-    Map<String, String> configsMap = configs.getConfigsMap();
-    File certFile = new File(configsMap.get(Configuration.SRVR_KSTR_DIR_KEY) +
-        File.separator + configsMap.get(Configuration.SRVR_CRT_NAME_KEY));
-    String srvrCrtContent = null;
-    try {
-      srvrCrtContent = FileUtils.readFileToString(certFile);
-    } catch (IOException e) {
-      LOG.error(e.getMessage());
-    }
-    return srvrCrtContent;
-  }
-
-  /**
-   * Signs agent certificate
-   * Adds agent certificate to server keystore
-   * @return string with agent signed certificate content
-   */
-  public synchronized SignCertResponse signAgentCrt(String agentHostname, String agentCrtReqContent, String passphraseAgent) {
-    SignCertResponse response = new SignCertResponse();
-    LOG.info("Signing of agent certificate");
-    LOG.info("Verifying passphrase");
-
-
-
-    String passphraseSrvr = configs.getConfigsMap().get(Configuration.
-        PASSPHRASE_KEY).trim();
-
-    LOG.info("Pass phrase Server " + passphraseSrvr);
-    LOG.info("Pass phrase Agent " + passphraseAgent);
-
-    if (!passphraseSrvr.equals(passphraseAgent.trim())) {
-      LOG.warn("Incorrect passphrase from the agent");
-      response.setResult(SignCertResponse.ERROR_STATUS);
-      response.setMessage("Incorrect passphrase from the agent");
-      return response;
-    }
-
-    Map<String, String> configsMap = configs.getConfigsMap();
-    String srvrKstrDir = configsMap.get(Configuration.SRVR_KSTR_DIR_KEY);
-    String srvrCrtPass = configsMap.get(Configuration.SRVR_CRT_PASS_KEY);
-    String srvrCrtName = configsMap.get(Configuration.SRVR_CRT_NAME_KEY);
-    String srvrKeyName = configsMap.get(Configuration.SRVR_KEY_NAME_KEY);
-    String agentCrtReqName = agentHostname + ".csr";
-    String agentCrtName = agentHostname + ".crt";
-
-
-    File agentCrtReqFile = new File(srvrKstrDir + File.separator +
-        agentCrtReqName);
-    try {
-      FileUtils.writeStringToFile(agentCrtReqFile, agentCrtReqContent);
-    } catch (IOException e1) {
-      // TODO Auto-generated catch block
-      e1.printStackTrace();
-    }
-    Object[] scriptArgs = {srvrKstrDir,agentCrtReqName,agentCrtName,
-        srvrCrtPass,srvrKeyName,srvrCrtName};
-
-    String command = MessageFormat.format(SIGN_AGENT_CRT,scriptArgs);
-
-    LOG.debug(command);
-
-    int commandExitCode = runCommand(command); // ssl command execution
-    if(commandExitCode != 0) {
-      response.setResult(SignCertResponse.ERROR_STATUS);
-      response.setMessage(ShellCommandUtil.getOpenSslCommandResult(command, commandExitCode));
-      //LOG.warn(ShellCommandUtil.getOpenSslCommandResult(command, commandExitCode));
-      return response;
-    }
-
-    File agentCrtFile = new File(srvrKstrDir + File.separator + agentCrtName);
-    String agentCrtContent = "";
-    try {
-      agentCrtContent = FileUtils.readFileToString(agentCrtFile);
-    } catch (IOException e) {
-      e.printStackTrace();
-      LOG.error("Error reading signed agent certificate");
-      response.setResult(SignCertResponse.ERROR_STATUS);
-      response.setMessage("Error reading signed agent certificate");
-      return response;
-    }
-    response.setResult(SignCertResponse.OK_STATUS);
-    response.setSignedCa(agentCrtContent);
-    //LOG.info(ShellCommandUtil.getOpenSslCommandResult(command, commandExitCode));
-    return response;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/ClientSecurityType.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/ClientSecurityType.java
deleted file mode 100644
index 26d4da7..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/ClientSecurityType.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security;
-
-public enum ClientSecurityType {
-  LOCAL("local"),
-  LDAP("ldap");
-
-  private String value;
-  ClientSecurityType(String value) {
-    this.value = value;
-  }
-
-  /**
-   * Constructs enum object from string representation
-   * @param value string representation of client security type
-   * @return ClientSecurityType (defaults to LOCAL if not recognized)
-   */
-  public static ClientSecurityType fromString(String value) {
-    for (ClientSecurityType securityType : ClientSecurityType.values()) {
-      if (securityType.toString().equalsIgnoreCase(value)) {
-        return securityType;
-      }
-    }
-    return LOCAL;
-  }
-
-
-  @Override
-  public String toString() {
-    return value;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/SecurityFilter.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/SecurityFilter.java
deleted file mode 100644
index 400b2e3..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/SecurityFilter.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.ambari.server.security;
-
-import java.io.IOException;
-import java.util.regex.Pattern;
-
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServletRequest;
-
-import org.apache.ambari.server.controller.AmbariServer;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-public class SecurityFilter implements Filter {
-	
-  //Allowed pathes for one way auth https
-  private static String CA = "/ca";
-  private final static Log LOG = LogFactory.getLog(SecurityFilter.class);
-
-  @Override
-  public void destroy() {
-  }
-
-  @Override
-  public void doFilter(ServletRequest serReq, ServletResponse serResp,
-		FilterChain filtCh) throws IOException, ServletException {
-
-    HttpServletRequest req = (HttpServletRequest) serReq;
-    String reqUrl = req.getRequestURL().toString();
-	
-    if (serReq.getLocalPort() == AmbariServer.AGENT_ONE_WAY_AUTH) {
-      if (isRequestAllowed(reqUrl)) {
-        filtCh.doFilter(serReq, serResp);
-      }
-      else {
-        LOG.warn("This request is not allowed on this port");
-      }
-
-	}
-	else
-      filtCh.doFilter(serReq, serResp);
-  }
-
-  @Override
-  public void init(FilterConfig arg0) throws ServletException {
-  }
-
-  private boolean isRequestAllowed(String reqUrl) {
-	try {
-
-      boolean isMatch = Pattern.matches("https://[A-z]*:[0-9]*/cert/ca[/]*", reqUrl);
-		
-      if (isMatch)
-    	  return true;
-		
-		 isMatch = Pattern.matches("https://[A-z]*:[0-9]*/certs/[A-z0-9-.]*", reqUrl);
-		
-		 if (isMatch)
-			 return true;
-		
-		 isMatch = Pattern.matches("https://[A-z]*:[0-9]*/resources/.*", reqUrl);
-		
-		 if (isMatch)
-			 return true;
-		
-	} catch (Exception e) {
-	}
-  LOG.warn("Request " + reqUrl + " doesn't match any pattern.");
-	return false;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/SignCertResponse.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/SignCertResponse.java
deleted file mode 100644
index 374bb02..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/SignCertResponse.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.security;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.XmlType;
-
-/**
- *
- * Sign certificate response data model.
- *
- */
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlType(name = "", propOrder = {})
-public class SignCertResponse {
-	
-  public static final String ERROR_STATUS = "ERROR";
-  public static final String OK_STATUS = "OK";
-
-  @XmlElement
-  private String result;
-  @XmlElement
-  private String signedCa;
-  @XmlElement
-  private String message;
-
-  public String getResult() {
-    return result;
-  }
-  public void setResult(String result) {
-    this.result = result;
-  }
-  public String getSignedCa() {
-    return signedCa;
-  }
-  public void setSignedCa(String signedCa) {
-    this.signedCa = signedCa;
-  }
-
-  public String getMessage() {
-    return message;
-  }
-  public void setMessage(String message) {
-    this.message = message;
-  }
-}
-
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/SignMessage.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/SignMessage.java
deleted file mode 100644
index 6217c6c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/SignMessage.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.security;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.XmlType;
-
-/**
- *
- * Sign certificate request data model.
- *
- */
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-@XmlType(name = "", propOrder = {})
-public class SignMessage {
-
-  @XmlElement
-  private String csr;
-  @XmlElement
-  private String passphrase;
-  public String getCsr() {
-    return csr;
-  }
-  public void setCsr(String csr) {
-    this.csr = csr;
-  }
-  public String getPassphrase() {
-    return passphrase;
-  }
-  public void setPassphrase(String passphrase) {
-    this.passphrase = passphrase;
-  }
-}
-
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthenticationProvider.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthenticationProvider.java
deleted file mode 100644
index 9167237..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthenticationProvider.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security.authorization;
-
-import com.google.inject.Inject;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.security.ClientSecurityType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.security.authentication.AuthenticationProvider;
-import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
-import org.springframework.security.core.Authentication;
-import org.springframework.security.core.AuthenticationException;
-import org.springframework.security.core.userdetails.UsernameNotFoundException;
-import org.springframework.security.ldap.DefaultSpringSecurityContextSource;
-import org.springframework.security.ldap.authentication.BindAuthenticator;
-import org.springframework.security.ldap.authentication.LdapAuthenticationProvider;
-import org.springframework.security.ldap.search.FilterBasedLdapUserSearch;
-
-
-/**
- * Provides LDAP user authorization logic for Ambari Server
- */
-public class AmbariLdapAuthenticationProvider implements AuthenticationProvider {
-  private static final Logger log = LoggerFactory.getLogger(AmbariLdapAuthenticationProvider.class);
-
-  Configuration configuration;
-
-  private AmbariLdapAuthoritiesPopulator authoritiesPopulator;
-
-  private ThreadLocal<LdapServerProperties> ldapServerProperties = new ThreadLocal<LdapServerProperties>();
-  private ThreadLocal<LdapAuthenticationProvider> providerThreadLocal = new ThreadLocal<LdapAuthenticationProvider>();
-
-  @Inject
-  public AmbariLdapAuthenticationProvider(Configuration configuration, AmbariLdapAuthoritiesPopulator authoritiesPopulator) {
-    this.configuration = configuration;
-    this.authoritiesPopulator = authoritiesPopulator;
-  }
-
-  @Override
-  public Authentication authenticate(Authentication authentication) throws AuthenticationException {
-
-    if (isLdapEnabled()) {
-
-      return loadLdapAuthenticationProvider().authenticate(authentication);
-
-    } else {
-      return null;
-    }
-
-  }
-
-  @Override
-  public boolean supports(Class<?> authentication) {
-    return UsernamePasswordAuthenticationToken.class.isAssignableFrom(authentication);
-  }
-
-  /**
-   * Reloads LDAP Context Source and depending objects if properties were changed
-   * @return corresponding LDAP authentication provider
-   */
-  private LdapAuthenticationProvider loadLdapAuthenticationProvider() {
-    if (reloadLdapServerProperties()) {
-      log.info("LDAP Properties changed - rebuilding Context");
-      DefaultSpringSecurityContextSource springSecurityContextSource =
-              new DefaultSpringSecurityContextSource(ldapServerProperties.get().getLdapUrls(), ldapServerProperties.get().getBaseDN());
-
-      if (!ldapServerProperties.get().isAnonymousBind()) {
-        springSecurityContextSource.setUserDn(ldapServerProperties.get().getManagerDn());
-        springSecurityContextSource.setPassword(ldapServerProperties.get().getManagerPassword());
-      }
-
-      try {
-        springSecurityContextSource.afterPropertiesSet();
-      } catch (Exception e) {
-        log.error("LDAP Context Source not loaded ", e);
-        throw new UsernameNotFoundException("LDAP Context Source not loaded", e);
-      }
-
-      //TODO change properties
-      String userSearchBase = ldapServerProperties.get().getUserSearchBase();
-      String userSearchFilter = ldapServerProperties.get().getUserSearchFilter();
-
-      FilterBasedLdapUserSearch userSearch = new FilterBasedLdapUserSearch(userSearchBase, userSearchFilter, springSecurityContextSource);
-
-      BindAuthenticator bindAuthenticator = new BindAuthenticator(springSecurityContextSource);
-      bindAuthenticator.setUserSearch(userSearch);
-
-      LdapAuthenticationProvider authenticationProvider = new LdapAuthenticationProvider(bindAuthenticator, authoritiesPopulator);
-
-      providerThreadLocal.set(authenticationProvider);
-    }
-
-    return providerThreadLocal.get();
-  }
-
-
-  /**
-   * Check if LDAP authentication is enabled in server properties
-   * @return true if enabled
-   */
-  private boolean isLdapEnabled() {
-    return configuration.getClientSecurityType() == ClientSecurityType.LDAP;
-  }
-
-  /**
-   * Reloads LDAP Server properties from configuration
-   *
-   * @return true if properties were reloaded
-   */
-  private boolean reloadLdapServerProperties() {
-    LdapServerProperties properties = configuration.getLdapServerProperties();
-    if (!properties.equals(ldapServerProperties.get())) {
-      log.info("Reloading properties");
-      ldapServerProperties.set(properties);
-      return true;
-    }
-    return false;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthoritiesPopulator.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthoritiesPopulator.java
deleted file mode 100644
index 8109ca5..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthoritiesPopulator.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security.authorization;
-
-import com.google.inject.Inject;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.orm.dao.RoleDAO;
-import org.apache.ambari.server.orm.dao.UserDAO;
-import org.apache.ambari.server.orm.entities.RoleEntity;
-import org.apache.ambari.server.orm.entities.UserEntity;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.ldap.core.DirContextOperations;
-import org.springframework.security.core.GrantedAuthority;
-import org.springframework.security.ldap.userdetails.LdapAuthoritiesPopulator;
-
-import java.util.Collection;
-
-/**
- * Provides authorities population for LDAP user from local DB
- */
-public class AmbariLdapAuthoritiesPopulator implements LdapAuthoritiesPopulator {
-  private static final Logger log = LoggerFactory.getLogger(AmbariLdapAuthoritiesPopulator.class);
-
-  Configuration configuration;
-  private AuthorizationHelper authorizationHelper;
-  UserDAO userDAO;
-  RoleDAO roleDAO;
-
-  @Inject
-  public AmbariLdapAuthoritiesPopulator(Configuration configuration, AuthorizationHelper authorizationHelper,
-                                        UserDAO userDAO, RoleDAO roleDAO) {
-    this.configuration = configuration;
-    this.authorizationHelper = authorizationHelper;
-    this.userDAO = userDAO;
-    this.roleDAO = roleDAO;
-  }
-
-  @Override
-  @Transactional
-  public Collection<? extends GrantedAuthority> getGrantedAuthorities(DirContextOperations userData, String username) {
-    log.info("Get roles for user " + username + " from local DB");
-
-    UserEntity user = null;
-
-    user = userDAO.findLdapUserByName(username);
-
-    if (user == null) {
-      log.info("User " + username + " not present in local DB - creating");
-
-      UserEntity newUser = new UserEntity();
-      newUser.setLdapUser(true);
-      newUser.setUserName(username);
-
-      String roleName = (configuration.getConfigsMap().get(Configuration.USER_ROLE_NAME_KEY));
-      log.info("Using default role name " + roleName);
-
-      RoleEntity role = roleDAO.findByName(roleName);
-
-      if (role == null) {
-        log.info("Role " + roleName + " not present in local DB - creating");
-        role = new RoleEntity();
-        role.setRoleName(roleName);
-        roleDAO.create(role);
-        role = roleDAO.findByName(role.getRoleName());
-      }
-
-      userDAO.create(newUser);
-
-      user = userDAO.findLdapUserByName(newUser.getUserName());
-
-      user.getRoleEntities().add(role);
-      role.getUserEntities().add(user);
-      roleDAO.merge(role);
-      userDAO.merge(user);
-    }
-
-    return authorizationHelper.convertRolesToAuthorities(user.getRoleEntities());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLocalUserDetailsService.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLocalUserDetailsService.java
deleted file mode 100644
index 34ebbd8..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariLocalUserDetailsService.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security.authorization;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.orm.dao.RoleDAO;
-import org.apache.ambari.server.orm.dao.UserDAO;
-import org.apache.ambari.server.orm.entities.UserEntity;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.security.core.userdetails.User;
-import org.springframework.security.core.userdetails.UserDetails;
-import org.springframework.security.core.userdetails.UserDetailsService;
-import org.springframework.security.core.userdetails.UsernameNotFoundException;
-
-
-public class AmbariLocalUserDetailsService implements UserDetailsService {
-  private static final Logger log = LoggerFactory.getLogger(AmbariLocalUserDetailsService.class);
-
-  Injector injector;
-  Configuration configuration;
-  private AuthorizationHelper authorizationHelper;
-  UserDAO userDAO;
-  RoleDAO roleDAO;
-
-  @Inject
-  public AmbariLocalUserDetailsService(Injector injector, Configuration configuration,
-                                       AuthorizationHelper authorizationHelper, UserDAO userDAO, RoleDAO roleDAO) {
-    this.injector = injector;
-    this.configuration = configuration;
-    this.authorizationHelper = authorizationHelper;
-    this.userDAO = userDAO;
-    this.roleDAO = roleDAO;
-  }
-
-  /**
-   * Loads Spring Security UserDetails from identity storage according to Configuration
-   *
-   * @param username username
-   * @return UserDetails
-   * @throws UsernameNotFoundException when user not found or have empty roles
-   */
-  @Override
-  public UserDetails loadUserByUsername(String username) throws UsernameNotFoundException {
-    log.info("Loading user by name: " + username);
-
-    UserEntity user = userDAO.findLocalUserByName(username);
-
-    if (user == null) {
-      log.info("user not found ");
-      throw new UsernameNotFoundException("Username " + username + " not found");
-    }else if (user.getRoleEntities().isEmpty()) {
-      log.info("No authorities for user");
-      throw new UsernameNotFoundException("Username " + username + " has no roles");
-    }
-
-    return new User(user.getUserName(), user.getUserPassword(),
-            authorizationHelper.convertRolesToAuthorities(user.getRoleEntities()));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AuthorizationHelper.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AuthorizationHelper.java
deleted file mode 100644
index f9b0b0f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AuthorizationHelper.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security.authorization;
-
-import com.google.inject.Singleton;
-import org.apache.ambari.server.orm.entities.RoleEntity;
-import org.springframework.security.core.GrantedAuthority;
-import org.springframework.security.core.authority.SimpleGrantedAuthority;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-@Singleton
-/**
- * Provides utility methods for authentication functionality
- */
-public class AuthorizationHelper {
-
-  /**
-   * Converts collection of RoleEntities to collection of GrantedAuthorities
-   */
-  public Collection<GrantedAuthority> convertRolesToAuthorities(Collection<RoleEntity> roleEntities) {
-    List<GrantedAuthority> authorities = new ArrayList<GrantedAuthority>(roleEntities.size());
-
-    for (RoleEntity roleEntity : roleEntities) {
-      authorities.add(new SimpleGrantedAuthority(roleEntity.getRoleName().toUpperCase()));
-    }
-
-    return authorities;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/LdapServerProperties.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/LdapServerProperties.java
deleted file mode 100644
index a578fbd..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/LdapServerProperties.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security.authorization;
-
-import org.apache.commons.lang.StringUtils;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-/**
- * Describes LDAP Server connection parameters
- */
-public class LdapServerProperties {
-
-  private String primaryUrl;
-  private String secondaryUrl;
-  private boolean useSsl;
-  private boolean anonymousBind;
-  private String managerDn;
-  private String managerPassword;
-  private String baseDN;
-  private String userSearchBase = "";
-  private String usernameAttribute;
-
-  private static final String userSearchFilter = "({attribute}={0})";
-
-  public List<String> getLdapUrls() {
-    String protocol = useSsl ? "ldaps://" : "ldap://";
-
-    if (StringUtils.isEmpty(primaryUrl)) {
-      return Collections.emptyList();
-    } else {
-      List<String> list = new ArrayList<String>();
-      list.add(protocol + primaryUrl);
-      if (!StringUtils.isEmpty(secondaryUrl)) {
-        list.add(protocol + secondaryUrl);
-      }
-      return list;
-    }
-  }
-
-  public String getPrimaryUrl() {
-    return primaryUrl;
-  }
-
-  public void setPrimaryUrl(String primaryUrl) {
-    this.primaryUrl = primaryUrl;
-  }
-
-  public String getSecondaryUrl() {
-    return secondaryUrl;
-  }
-
-  public void setSecondaryUrl(String secondaryUrl) {
-    this.secondaryUrl = secondaryUrl;
-  }
-
-  public boolean isUseSsl() {
-    return useSsl;
-  }
-
-  public void setUseSsl(boolean useSsl) {
-    this.useSsl = useSsl;
-  }
-
-  public boolean isAnonymousBind() {
-    return anonymousBind;
-  }
-
-  public void setAnonymousBind(boolean anonymousBind) {
-    this.anonymousBind = anonymousBind;
-  }
-
-  public String getManagerDn() {
-    return managerDn;
-  }
-
-  public void setManagerDn(String managerDn) {
-    this.managerDn = managerDn;
-  }
-
-  public String getManagerPassword() {
-    return managerPassword;
-  }
-
-  public void setManagerPassword(String managerPassword) {
-    this.managerPassword = managerPassword;
-  }
-
-  public String getBaseDN() {
-    return baseDN;
-  }
-
-  public void setBaseDN(String baseDN) {
-    this.baseDN = baseDN;
-  }
-
-  public String getUserSearchBase() {
-    return userSearchBase;
-  }
-
-  public void setUserSearchBase(String userSearchBase) {
-    this.userSearchBase = userSearchBase;
-  }
-
-  public String getUserSearchFilter() {
-    return userSearchFilter.replace("{attribute}", usernameAttribute);
-  }
-
-  public String getUsernameAttribute() {
-    return usernameAttribute;
-  }
-
-  public void setUsernameAttribute(String usernameAttribute) {
-    this.usernameAttribute = usernameAttribute;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (this == obj) return true;
-    if (obj == null || getClass() != obj.getClass()) return false;
-
-    LdapServerProperties that = (LdapServerProperties) obj;
-
-    if (primaryUrl != null ? !primaryUrl.equals(that.primaryUrl) : that.primaryUrl != null) return false;
-    if (secondaryUrl != null ? !secondaryUrl.equals(that.secondaryUrl) : that.secondaryUrl != null) return false;
-    if (useSsl!=that.useSsl) return false;
-    if (anonymousBind!=that.anonymousBind) return false;
-    if (managerDn != null ? !managerDn.equals(that.managerDn) : that.managerDn != null) return false;
-    if (managerPassword != null ? !managerPassword.equals(that.managerPassword) : that.managerPassword != null)
-      return false;
-    if (baseDN != null ? !baseDN.equals(that.baseDN) : that.baseDN != null) return false;
-    if (userSearchBase != null ? !userSearchBase.equals(that.userSearchBase) : that.userSearchBase != null)
-      return false;
-    if (usernameAttribute != null ? !usernameAttribute.equals(that.usernameAttribute) : that.usernameAttribute != null)
-      return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = primaryUrl != null ? primaryUrl.hashCode() : 0;
-    result = 31 * result + (secondaryUrl != null ? secondaryUrl.hashCode() : 0);
-    result = 31 * result + (useSsl ? 1 : 0);
-    result = 31 * result + (anonymousBind ? 1 : 0);
-    result = 31 * result + (managerDn != null ? managerDn.hashCode() : 0);
-    result = 31 * result + (managerPassword != null ? managerPassword.hashCode() : 0);
-    result = 31 * result + (baseDN != null ? baseDN.hashCode() : 0);
-    result = 31 * result + (userSearchBase != null ? userSearchBase.hashCode() : 0);
-    result = 31 * result + (usernameAttribute != null ? usernameAttribute.hashCode() : 0);
-    return result;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/User.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/User.java
deleted file mode 100644
index 6693a8a..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/User.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security.authorization;
-
-import org.apache.ambari.server.orm.entities.RoleEntity;
-import org.apache.ambari.server.orm.entities.UserEntity;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-
-/**
- * Describes user of web-services
- */
-public class User {
-  final int userId;
-  final String userName;
-  final boolean ldapUser;
-  final Date createTime;
-  final Collection<String> roles = new ArrayList<String>();
-
-  User(UserEntity userEntity) {
-    userId = userEntity.getUserId();
-    userName = userEntity.getUserName();
-    createTime = userEntity.getCreateTime();
-    ldapUser = userEntity.getLdapUser();
-    for (RoleEntity roleEntity : userEntity.getRoleEntities()) {
-      roles.add(roleEntity.getRoleName());
-    }
-  }
-
-  public int getUserId() {
-    return userId;
-  }
-
-  public String getUserName() {
-    return userName;
-  }
-
-  public boolean isLdapUser() {
-    return ldapUser;
-  }
-
-  public Date getCreateTime() {
-    return createTime;
-  }
-
-  public Collection<String> getRoles() {
-    return roles;
-  }
-
-  @Override
-  public String toString() {
-    return (ldapUser ? "[LDAP]" : "[LOCAL]") + userName;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
deleted file mode 100644
index e649266..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security.authorization;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.orm.dao.RoleDAO;
-import org.apache.ambari.server.orm.dao.UserDAO;
-import org.apache.ambari.server.orm.entities.RoleEntity;
-import org.apache.ambari.server.orm.entities.UserEntity;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.security.crypto.password.PasswordEncoder;
-
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-import com.google.inject.persist.Transactional;
-
-/**
- * Provides high-level access to Users and Roles in database
- */
-@Singleton
-public class Users {
-
-  private final static Logger LOG = LoggerFactory.getLogger(Users.class);
-
-  @Inject
-  protected UserDAO userDAO;
-  @Inject
-  protected RoleDAO roleDAO;
-  @Inject
-  protected PasswordEncoder passwordEncoder;
-  @Inject
-  protected Configuration configuration;
-
-
-  public List<User> getAllUsers() {
-    List<UserEntity> userEntities = userDAO.findAll();
-    List<User> users = new ArrayList<User>(userEntities.size());
-
-    for (UserEntity userEntity : userEntities) {
-      users.add(new User(userEntity));
-    }
-
-    return users;
-  }
-
-  public User getUser(int userId) throws AmbariException {
-    UserEntity userEntity = userDAO.findByPK(userId);
-    if (userEntity != null) {
-      return new User(userEntity);
-    } else {
-      throw new AmbariException("User with id '" + userId + " not found");
-    }
-  }
-
-  public User getAnyUser(String userName) {
-    UserEntity userEntity = userDAO.findLdapUserByName(userName);
-    if (null == userEntity) {
-      userEntity = userDAO.findLocalUserByName(userName);
-    }
-
-    return (null == userEntity) ? null : new User(userEntity);
-  }
-
-  public User getLocalUser(String userName) throws AmbariException{
-    UserEntity userEntity = userDAO.findLocalUserByName(userName);
-    if (userEntity == null) {
-      throw new AmbariException("User doesn't exist");
-    }
-    return new User(userEntity);
-  }
-
-  public User getLdapUser(String userName) throws AmbariException{
-    UserEntity userEntity = userDAO.findLdapUserByName(userName);
-    if (userEntity == null) {
-      throw new AmbariException("User doesn't exist");
-    }
-    return new User(userEntity);
-  }
-
-  /**
-   * Modifies password of local user
-   * @throws AmbariException
-   */
-  public synchronized void modifyPassword(String userName, String oldPassword, String newPassword) throws AmbariException {
-    UserEntity userEntity = userDAO.findLocalUserByName(userName);
-    if (userEntity != null) {
-      if (passwordEncoder.matches(oldPassword, userEntity.getUserPassword())) {
-        userEntity.setUserPassword(passwordEncoder.encode(newPassword));
-        userDAO.merge(userEntity);
-      } else {
-        throw new AmbariException("Wrong password provided");
-      }
-
-    } else {
-      userEntity = userDAO.findLdapUserByName(userName);
-      if (userEntity != null) {
-        throw new AmbariException("Password of LDAP user cannot be modified");
-      } else {
-        throw new AmbariException("User " + userName + " not found");
-      }
-    }
-  }
-
-  /**
-   * Creates new local user with provided userName and password
-   */
-  @Transactional
-  public synchronized void createUser(String userName, String password) {
-    UserEntity userEntity = new UserEntity();
-    userEntity.setUserName(userName);
-    userEntity.setUserPassword(passwordEncoder.encode(password));
-    userEntity.setRoleEntities(new HashSet<RoleEntity>());
-
-    RoleEntity roleEntity = roleDAO.findByName(getUserRole());
-    if (roleEntity == null) {
-      createRole(getUserRole());
-    }
-    roleEntity = roleDAO.findByName(getUserRole());
-
-    userEntity.getRoleEntities().add(roleEntity);
-    userDAO.create(userEntity);
-
-    roleEntity.getUserEntities().add(userEntity);
-    roleDAO.merge(roleEntity);
-  }
-
-  @Transactional
-  public synchronized void removeUser(User user) throws AmbariException {
-    UserEntity userEntity = userDAO.findByPK(user.getUserId());
-    if (userEntity != null) {
-      userDAO.remove(userEntity);
-    } else {
-      throw new AmbariException("User " + user + " doesn't exist");
-    }
-  }
-
-  /**
-   * Grants ADMIN role to provided user
-   * @throws AmbariException
-   */
-  public synchronized void promoteToAdmin(User user) throws AmbariException{
-    addRoleToUser(user, getAdminRole());
-  }
-
-  /**
-   * Removes ADMIN role form provided user
-   * @throws AmbariException
-   */
-  public synchronized void demoteAdmin(User user) throws AmbariException {
-    removeRoleFromUser(user, getAdminRole());
-  }
-
-  @Transactional
-  public synchronized void addRoleToUser(User user, String role)
-      throws AmbariException {
-
-    UserEntity userEntity = userDAO.findByPK(user.getUserId());
-    if (userEntity == null) {
-      throw new AmbariException("User " + user + " doesn't exist");
-    }
-
-    RoleEntity roleEntity = roleDAO.findByName(role);
-    if (roleEntity == null) {
-      LOG.warn("Trying to add user to non-existent role"
-          + ", user=" + user.getUserName()
-          + ", role=" + role);
-      throw new AmbariException("Role " + role + " doesn't exist");
-    }
-
-    if (!userEntity.getRoleEntities().contains(roleEntity)) {
-      userEntity.getRoleEntities().add(roleEntity);
-      roleEntity.getUserEntities().add(userEntity);
-      userDAO.merge(userEntity);
-      roleDAO.merge(roleEntity);
-    } else {
-      throw new AmbariException("User " + user + " already owns role " + role);
-    }
-
-  }
-
-  @Transactional
-  public synchronized void removeRoleFromUser(User user, String role)
-      throws AmbariException {
-    UserEntity userEntity = userDAO.findByPK(user.getUserId());
-    if (userEntity == null) {
-      throw new AmbariException("User " + user + " doesn't exist");
-    }
-
-    RoleEntity roleEntity = roleDAO.findByName(role);
-    if (roleEntity == null) {
-      throw new AmbariException("Role " + role + " doesn't exist");
-    }
-
-    if (userEntity.getRoleEntities().contains(roleEntity)) {
-      userEntity.getRoleEntities().remove(roleEntity);
-      roleEntity.getUserEntities().remove(userEntity);
-      userDAO.merge(userEntity);
-      roleDAO.merge(roleEntity);
-    } else {
-      throw new AmbariException("User " + user + " doesn't own role " + role);
-    }
-
-  }
-
-  public String getUserRole() {
-    return configuration.getConfigsMap().get(Configuration.USER_ROLE_NAME_KEY);
-  }
-
-  public String getAdminRole() {
-    return configuration.getConfigsMap().get(Configuration.ADMIN_ROLE_NAME_KEY);
-  }
-
-  /**
-   * Creates new role
-   */
-  public void createRole(String role) {
-    RoleEntity roleEntity = new RoleEntity();
-    roleEntity.setRoleName(role);
-    roleDAO.create(roleEntity);
-  }
-
-  /**
-   * Creates ADMIN adn USER roles if not present
-   */
-  public synchronized void createDefaultRoles() {
-    if (roleDAO.findByName(getUserRole()) == null) {
-      createRole(getUserRole());
-    }
-    if (roleDAO.findByName(getAdminRole()) == null) {
-      createRole(getAdminRole());
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/unsecured/rest/CertificateDownload.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/unsecured/rest/CertificateDownload.java
deleted file mode 100644
index 2a9a313..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/unsecured/rest/CertificateDownload.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security.unsecured.rest;
-
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.MediaType;
-
-import org.apache.ambari.server.security.CertificateManager;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import com.google.inject.Inject;
-
-@Path("/cert/ca")
-public class CertificateDownload {
-  private static Log LOG = LogFactory.getLog(CertificateDownload.class);
-  private static CertificateManager certMan;
-
-  @Inject
-  public static void init(CertificateManager instance) {
-    certMan = instance;
-  }
-
-  @GET
-  @Produces({MediaType.TEXT_PLAIN})
-  public String downloadSrvrCrt() {
-    return certMan.getServerCert();
-  }
-
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/unsecured/rest/CertificateSign.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/unsecured/rest/CertificateSign.java
deleted file mode 100644
index 296886c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/security/unsecured/rest/CertificateSign.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security.unsecured.rest;
-
-
-import javax.servlet.http.HttpServletRequest;
-import javax.ws.rs.Consumes;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.MediaType;
-
-import org.apache.ambari.server.security.CertificateManager;
-import org.apache.ambari.server.security.SignCertResponse;
-import org.apache.ambari.server.security.SignMessage;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import com.google.inject.Inject;
-@Path("/certs")
-public class CertificateSign {
-  private static Log LOG = LogFactory.getLog(CertificateSign.class);
-  private static CertificateManager certMan;
-
-  @Inject
-  public static void init(CertificateManager instance) {
-    certMan = instance;
-  }
-
-  /**
-   * Signs agent certificate
-   * @response.representation.200.doc This API is invoked by Ambari agent running
-   *  on a cluster to register with the server.
-   * @response.representation.200.mediaType application/json
-   * @response.representation.406.doc Error in register message format
-   * @response.representation.408.doc Request Timed out
-   * @param message Register message
-   * @throws Exception
-   */
-  @Path("{hostName}")
-  @POST
-  @Consumes(MediaType.APPLICATION_JSON)
-  @Produces({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML})
-  public SignCertResponse signAgentCrt(@PathParam("hostName") String hostname,
-                                       SignMessage message, @Context HttpServletRequest req) {
-    return certMan.signAgentCrt(hostname, message.getCsr(), message.getPassphrase());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java
deleted file mode 100644
index 501afd6..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraph.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.stageplanner;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.apache.ambari.server.actionmanager.HostRoleCommand;
-import org.apache.ambari.server.actionmanager.Stage;
-import org.apache.ambari.server.metadata.RoleCommandOrder;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-public class RoleGraph {
-
-  private static Log LOG = LogFactory.getLog(RoleGraph.class);
-
-  Map<String, RoleGraphNode> graph = null;
-  private RoleCommandOrder roleDependencies;
-  private Stage initialStage = null;
-  private boolean sameHostOptimization = true;
-
-  public RoleGraph(RoleCommandOrder rd) {
-    this.roleDependencies = rd;
-  }
-
-  /**
-   * Given a stage builds a DAG of all execution commands within the stage.
-   */
-  public void build(Stage stage) {
-    if (stage == null) {
-      throw new IllegalArgumentException("Null stage");
-    }
-    graph = new TreeMap<String, RoleGraphNode>();
-    initialStage = stage;
-
-    Map<String, Map<String, HostRoleCommand>> hostRoleCommands = stage.getHostRoleCommands();
-    for (String host : hostRoleCommands.keySet()) {
-      for (String role : hostRoleCommands.get(host).keySet()) {
-        HostRoleCommand hostRoleCommand = hostRoleCommands.get(host).get(role);
-        RoleGraphNode rgn;
-        if (graph.get(role) == null) {
-          rgn = new RoleGraphNode(hostRoleCommand.getRole(),
-              hostRoleCommand.getRoleCommand());
-          graph.put(role, rgn);
-        }
-        rgn = graph.get(role);
-        rgn.addHost(host);
-      }
-    }
-
-    //Add edges
-    for (String roleI : graph.keySet()) {
-      for (String roleJ : graph.keySet()) {
-        if (roleI.equals(roleJ)) {
-          continue;
-        } else {
-          RoleGraphNode rgnI = graph.get(roleI);
-          RoleGraphNode rgnJ = graph.get(roleJ);
-          int order = roleDependencies.order(rgnI, rgnJ);
-          if (order == -1) {
-            rgnI.addEdge(rgnJ);
-          } else if (order == 1) {
-            rgnJ.addEdge(rgnI);
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Returns a list of stages that need to be executed one after another
-   * to execute the DAG generated in the last {@link #build(Stage)} call.
-   */
-  public List<Stage> getStages() {
-    long initialStageId = initialStage.getStageId();
-    List<Stage> stageList = new ArrayList<Stage>();
-    List<RoleGraphNode> firstStageNodes = new ArrayList<RoleGraphNode>();
-    while (!graph.isEmpty()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(this.stringifyGraph());
-      }
-
-      for (String role: graph.keySet()) {
-        RoleGraphNode rgn = graph.get(role);
-        if (rgn.getInDegree() == 0) {
-          firstStageNodes.add(rgn);
-        }
-      }
-      Stage aStage = getStageFromGraphNodes(initialStage, firstStageNodes);
-      aStage.setStageId(++initialStageId);
-      stageList.add(aStage);
-      //Remove first stage nodes from the graph, we know that none of
-      //these nodes have an incoming edges.
-      for (RoleGraphNode rgn : firstStageNodes) {
-        if (this.sameHostOptimization) {
-          //Perform optimization
-        }
-        removeZeroInDegreeNode(rgn.getRole().toString());
-      }
-      firstStageNodes.clear();
-    }
-    return stageList;
-  }
-
-  /**
-   * Assumes there are no incoming edges.
-   */
-  private synchronized void removeZeroInDegreeNode(String role) {
-    RoleGraphNode nodeToRemove = graph.remove(role);
-    for (RoleGraphNode edgeNode: nodeToRemove.getEdges()) {
-      edgeNode.decrementInDegree();
-    }
-  }
-
-  private Stage getStageFromGraphNodes(Stage origStage,
-      List<RoleGraphNode> stageGraphNodes) {
-    Stage newStage = new Stage(origStage.getRequestId(),
-        origStage.getLogDir(), origStage.getClusterName());
-    newStage.setSuccessFactors(origStage.getSuccessFactors());
-    for (RoleGraphNode rgn : stageGraphNodes) {
-      for (String host : rgn.getHosts()) {
-        newStage.addExecutionCommandWrapper(origStage, host, rgn.getRole());
-      }
-    }
-    return newStage;
-  }
-
-  public String stringifyGraph() {
-    StringBuilder builder = new StringBuilder();
-    builder.append("Graph:\n");
-    for (String role : graph.keySet()) {
-      builder.append(graph.get(role));
-      for (RoleGraphNode rgn : graph.get(role).getEdges()) {
-        builder.append(" --> ");
-        builder.append(rgn);
-      }
-      builder.append("\n");
-    }
-    return builder.toString();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraphNode.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraphNode.java
deleted file mode 100644
index 3c733a2..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/stageplanner/RoleGraphNode.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.stageplanner;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.RoleCommand;
-
-public class RoleGraphNode {
-  public RoleGraphNode(Role role, RoleCommand command) {
-    this.role = role;
-    this.command = command;
-  }
-  private Role role;
-  private RoleCommand command;
-  private int inDegree = 0;
-  private List<String> hosts = new ArrayList<String>();
-  private Map<String, RoleGraphNode> edges = new TreeMap<String, RoleGraphNode>();
-  public synchronized void addHost(String host) {
-    hosts.add(host);
-  }
-  public synchronized void addEdge(RoleGraphNode rgn) {
-    if (edges.containsKey(rgn.getRole().toString())) {
-      return;
-    }
-    edges.put(rgn.getRole().toString(), rgn);
-    rgn.incrementInDegree();
-  }
-  private synchronized void incrementInDegree() {
-    inDegree ++;
-  }
-  public Role getRole() {
-    return role;
-  }
-  public RoleCommand getCommand() {
-    return command;
-  }
-  public List<String> getHosts() {
-    return hosts;
-  }
-  public int getInDegree() {
-    return inDegree;
-  }
-  
-  Collection<RoleGraphNode> getEdges() {
-    return edges.values();
-  }
-  public synchronized void decrementInDegree() {
-    inDegree --;
-  }
-  
-  @Override
-  public String toString() {
-    StringBuilder builder = new StringBuilder();
-    builder.append("("+role+", "+command +", "+inDegree+")");
-    return builder.toString();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/AgentVersion.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/AgentVersion.java
deleted file mode 100644
index 6d7dd60..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/AgentVersion.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-/**
- * Agent Version representation
- */
-public class AgentVersion {
-
-  private final String version;
-
-  public AgentVersion(String version) {
-    this.version = version;
-  }
-
-  /**
-   * @return the version
-   */
-  public String getVersion() {
-    return version;
-  }
-
-  @Override
-  public boolean equals(Object object) {
-    if (!(object instanceof AgentVersion)) {
-      return false;
-    }
-    if (this == object) {
-      return true;
-    }
-    AgentVersion a = (AgentVersion) object;
-    return a.version.equals(this.version);
-  }
-
-  @Override
-  public int hashCode() {
-    int result = version != null ? version.hashCode() : 0;
-    return result;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
deleted file mode 100644
index e4396cb..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.ClusterResponse;
-
-public interface Cluster {
-
-  /**
-   * Get the cluster ID
-   */
-  public long getClusterId();
-
-  /**
-   * Get the Cluster Name
-   */
-  public String getClusterName();
-
-  /**
-   * Set the Cluster Name
-   */
-  public void setClusterName(String clusterName);
-
-  /**
-   * Add a service to a cluster
-   * @param service
-   */
-  public void addService(Service service) throws AmbariException;
-
-  /**
-   * Get a service
-   * @param serviceName
-   * @return
-   */
-  public Service getService(String serviceName) throws AmbariException;
-
-  /**
-   * Get all services
-   * @return
-   */
-  public Map<String, Service> getServices();
-
-  /**
-   * Get all ServiceComponentHosts on a given host
-   * @param hostname
-   * @return
-   */
-  public List<ServiceComponentHost> getServiceComponentHosts(String hostname);
-
-  /**
-   * Get Stack Version
-   * @return
-   */
-  public StackId getDesiredStackVersion();
-
-  /**
-   * Set stack version
-   * @param stackVersion
-   */
-  public void setDesiredStackVersion(StackId stackVersion);
-
-  public Map<String, Config> getDesiredConfigsByType(String configType);
-
-  public Config getDesiredConfig(String configType, String versionTag);
-
-  public void addDesiredConfig(Config config);
-
-  public Collection<Config> getAllConfigs();
-
-  public ClusterResponse convertToResponse() throws AmbariException;
-
-  public void refresh();
-
-  public void debugDump(StringBuilder sb);
-
-  Service addService(String serviceName) throws AmbariException;
-
-  public void deleteAllServices() throws AmbariException;
-
-  public void deleteService(String serviceName) throws AmbariException;
-
-  public boolean canBeRemoved();
-
-  public void delete() throws AmbariException;
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/Clusters.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/Clusters.java
deleted file mode 100644
index b246b8b..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/Clusters.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.ambari.server.state;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.AmbariException;
-
-/**
- * Single entity that tracks all clusters and hosts that are managed
- * by the Ambari server
- */
-public interface Clusters {
-
-  /**
-   * Add a new Cluster
-   * @param clusterName
-   */
-  public void addCluster(String clusterName) throws AmbariException;
-
-  /**
-   * Get the Cluster given the cluster name
-   * @param clusterName Name of the Cluster to retrieve
-   * @return
-   */
-  public Cluster getCluster(String clusterName) throws AmbariException;
-
-  /**
-   * Get all known clusters
-   * @return
-   */
-  public Map<String, Cluster> getClusters();
-
-  /**
-   * Get all hosts being tracked by the Ambari server
-   * @return
-   */
-  public List<Host> getHosts();
-
-  /**
-   * Returns all the cluster names for this hostname.
-   * @param hostname
-   * @return List of cluster names
-   * @throws AmbariException
-   */
-  public Set<Cluster> getClustersForHost(String hostname)
-      throws AmbariException;
-
-
-  /**
-   * Get a Host object managed by this server
-   * @param hostname Name of the host requested
-   * @return Host object
-   * @throws AmbariException
-   */
-  public Host getHost(String hostname) throws AmbariException;
-
-  /**
-   * Add a Host object to be managed by this server
-   * @param hostname Host to be added
-   * @throws AmbariException
-   */
-  public void addHost(String hostname) throws AmbariException;
-
-  /**
-   * Map host to the given cluster.
-   * A host can belong to multiple clusters.
-   * @param hostname
-   * @param clusterName
-   * @throws AmbariException
-   */
-  public void mapHostToCluster(String hostname, String clusterName)
-      throws AmbariException;
-
-
-  public void mapHostsToCluster(Set<String> hostnames, String clusterName)
-      throws AmbariException;
-
-  public void updateClusterName(String oldName, String newName);
-
-  public Cluster getClusterById(long id) throws AmbariException;
-
-  public void debugDump(StringBuilder sb);
-
-  public Map<String, Host> getHostsForCluster(String clusterName)
-      throws AmbariException;
-
-  public void deleteCluster(String clusterName) throws AmbariException;
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
deleted file mode 100644
index 0b8498b..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ComponentInfo.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-public class ComponentInfo {
-  private String name;
-  private String category;
-
-  public String getName() {
-    return name;
-  }
-
-  public void setName(String name) {
-    this.name = name;
-  }
-
-  public String getCategory() {
-    return category;
-  }
-
-  public void setCategory(String category) {
-    this.category = category;
-  }
-
-  public boolean isClient() {
-    return "CLIENT".equals(category);
-  }
-
-  public boolean isMaster() {
-    return "MASTER".equals(category);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
deleted file mode 100644
index 9a39bf0..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/Config.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import java.util.List;
-import java.util.Map;
-
-/**
- * Represents a single instance of a 'Config Type'
- */
-public interface Config {
-
-  /**
-   * @return Config Type
-   */
-  public String getType();
-
-  /**
-   * @return Version Tag this config instance is mapped to
-   */
-  public String getVersionTag();
-
-  /**
-   * @return Properties that define this config instance
-   */
-  public Map<String, String> getProperties();
-
-  /**
-   * Change the version tag
-   * @param versionTag
-   */
-  public void setVersionTag(String versionTag);
-
-  /**
-   * Replace properties with new provided set
-   * @param properties Property Map to replace existing one
-   */
-  public void setProperties(Map<String, String> properties);
-
-  /**
-   * Update provided properties' values.
-   * @param properties Property Map with updated values
-   */
-  public void updateProperties(Map<String, String> properties);
-
-  /**
-   * Delete certain properties
-   * @param properties Property keys to be deleted
-   */
-  public void deleteProperties(List<String> properties);
-  
-  /**
-   * Persist the configuration.
-   */
-  public void persist();
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
deleted file mode 100644
index 4678da7..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.state;
-
-import java.util.Map;
-
-import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
-
-/**
- * @author ncole
- *
- */
-public interface ConfigFactory {
-  
-  Config createNew(Cluster cluster, String type, Map<String, String> map);
-  
-  Config createExisting(Cluster cluster, ClusterConfigEntity entity);
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
deleted file mode 100644
index fe6dc0f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-
-import com.google.gson.Gson;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
-import com.google.inject.persist.Transactional;
-
-public class ConfigImpl implements Config {
-
-  private Cluster cluster;
-  private String type;
-  private String versionTag;
-  private Map<String, String> properties;
-  private ClusterConfigEntity entity;
-
-  @Inject
-  private ClusterDAO clusterDAO;
-  @Inject
-  private Gson gson;
-
-  @AssistedInject
-  public ConfigImpl(@Assisted Cluster cluster, @Assisted String type, @Assisted Map<String, String> properties, Injector injector) {
-    this.cluster = cluster;
-    this.type = type;
-    this.properties = properties;
-    injector.injectMembers(this);
-    
-  }
-  
-  @AssistedInject
-  public ConfigImpl(@Assisted Cluster cluster, @Assisted ClusterConfigEntity entity, Injector injector) {
-    this.cluster = cluster;
-    this.type = entity.getType();
-    this.versionTag = entity.getTag();
-    this.entity = entity;
-    injector.injectMembers(this);
-  }
-  
-  @Override
-  public String getType() {
-    return type;
-  }
-
-  @Override
-  public synchronized String getVersionTag() {
-    return versionTag;
-  }
-
-  @Override
-  public synchronized Map<String, String> getProperties() {
-    if (null != entity && null == properties) {
-      
-      properties = gson.<Map<String, String>>fromJson(entity.getData(), Map.class);
-      
-    }
-    return null == properties ? new HashMap<String, String>()
-        : new HashMap<String, String>(properties);
-  }
-
-  @Override
-  public synchronized void setVersionTag(String versionTag) {
-    this.versionTag = versionTag;
-  }
-
-  @Override
-  public synchronized void setProperties(Map<String, String> properties) {
-    this.properties = properties;
-  }
-
-  @Override
-  public synchronized void updateProperties(Map<String, String> properties) {
-    this.properties.putAll(properties);
-  }
-
-  @Override
-  public synchronized void deleteProperties(List<String> properties) {
-    for (String key : properties) {
-      this.properties.remove(key);
-    }
-  }
-  
-  @Transactional
-  @Override
-  public synchronized void persist() {
-    
-    ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
-    
-    ClusterConfigEntity entity = new ClusterConfigEntity();
-    entity.setClusterEntity(clusterEntity);
-    entity.setClusterId(Long.valueOf(cluster.getClusterId()));
-    entity.setType(type);
-    entity.setTag(getVersionTag());
-    entity.setTimestamp(new Date().getTime());
-    
-    entity.setData(gson.toJson(getProperties()));
-    clusterDAO.createConfig(entity);
-
-    clusterEntity.getClusterConfigEntities().add(entity);
-    clusterDAO.merge(clusterEntity);
-    cluster.refresh();
-
-  }
-
-
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java
deleted file mode 100644
index 11636ee..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/Host.java
+++ /dev/null
@@ -1,274 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import java.util.List;
-import java.util.Map;
-
-import org.apache.ambari.server.agent.AgentEnv;
-import org.apache.ambari.server.agent.DiskInfo;
-import org.apache.ambari.server.agent.HostInfo;
-import org.apache.ambari.server.controller.HostResponse;
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-
-public interface Host {
-
-  /**
-   * @return the hostName
-   */
-  public String getHostName();
-
-  /**
-   * @param hostName the hostName to set
-   */
-  public void setHostName(String hostName);
-
-  /**
-   * Gets the public-facing host name.
-   */
-  public void setPublicHostName(String hostName);
-  
-  /**
-   * Sets the public-facing host name. 
-   */
-  public String getPublicHostName();
-  
-  /**
-   * IPv4 assigned to the Host
-   * @return the ip or null if no IPv4 interface
-   */
-  public String getIPv4();
-
-  /**
-   * @param ip the ip to set
-   */
-  public void setIPv4(String ip);
-
-  /**
-   * IPv6 assigned to the Host
-   * @return the ip or null if no IPv6 interface
-   */
-  public String getIPv6();
-
-  /**
-   * @param ip the ip to set
-   */
-  public void setIPv6(String ip);
-
-  /**
-   * @return the cpuCount
-   */
-  public int getCpuCount();
-
-  /**
-   * @param cpuCount the cpuCount to set
-   */
-  public void setCpuCount(int cpuCount);
-
-  /**
-   * Get the Amount of physical memory for the Host.
-   * @return the totalMemBytes
-   */
-  public long getTotalMemBytes();
-
-  /**
-   * Set the Amount of physical memory for the Host.
-   * @param totalMemBytes the totalMemBytes to set
-   */
-  public void setTotalMemBytes(long totalMemBytes);
-
-  /**
-   * Get the Amount of available memory for the Host.
-   * In most cases, available should be same as total unless
-   * the agent on the host is configured to not use all
-   * available memory
-   * @return the availableMemBytes
-   */
-  public long getAvailableMemBytes();
-
-  /**
-   * Set the Amount of available memory for the Host.
-   * @param availableMemBytes the availableMemBytes to set
-   */
-  public void setAvailableMemBytes(long availableMemBytes);
-
-  /**
-   * Get the OS Architecture.
-   * i386, x86_64, etc.
-   * @return the osArch
-   */
-  public String getOsArch();
-
-  /**
-   * @param osArch the osArch to set
-   */
-  public void setOsArch(String osArch);
-
-  /**
-   * Get the General OS information.
-   * uname -a, /etc/*-release dump
-   * @return the osInfo
-   */
-  public String getOsInfo();
-
-  /**
-   * @param osInfo the osInfo to set
-   */
-  public void setOsInfo(String osInfo);
-
-  /**
-   * Get the OS Type: RHEL5/RHEL6/CentOS5/...
-   * Defined and match-able OS type
-   * @return the osType
-   */
-  public String getOsType();
-
-  /**
-   * @param osType the osType to set
-   */
-  public void setOsType(String osType);
-
-  /**
-   * Get information on disks available on the host.
-   * @return the disksInfo
-   */
-  public List<DiskInfo> getDisksInfo();
-
-  /**
-   * @param disksInfo the disksInfo to set
-   */
-  public void setDisksInfo(List<DiskInfo> disksInfo);
-
-  /**
-   * @return the healthStatus
-   */
-  public HostHealthStatus getHealthStatus();
-
-  /**
-   * @param healthStatus the healthStatus to set
-   */
-  public void setHealthStatus(HostHealthStatus healthStatus);
-
-  /**
-   * Get additional host attributes
-   * For example, public/hostname/IP for AWS
-   * @return the hostAttributes
-   */
-  public Map<String, String> getHostAttributes();
-
-  /**
-   * @param hostAttributes the hostAttributes to set
-   */
-  public void setHostAttributes(Map<String, String> hostAttributes);
-  /**
-   * @return the rackInfo
-   */
-  public String getRackInfo();
-
-  /**
-   * @param rackInfo the rackInfo to set
-   */
-  public void setRackInfo(String rackInfo);
-
-  /**
-   * Last time the host registered with the Ambari Server
-   * ( Unix timestamp )
-   * @return the lastRegistrationTime
-   */
-  public long getLastRegistrationTime();
-
-  /**
-   * @param lastRegistrationTime the lastRegistrationTime to set
-   */
-  public void setLastRegistrationTime(long lastRegistrationTime);
-
-  /**
-   * Last time the Ambari Server received a heartbeat from the Host
-   * ( Unix timestamp )
-   * @return the lastHeartbeatTime
-   */
-  public long getLastHeartbeatTime();
-
-  /**
-   * @param lastHeartbeatTime the lastHeartbeatTime to set
-   */
-  public void setLastHeartbeatTime(long lastHeartbeatTime);
-
-  /**
-   * Sets the latest agent environment that arrived in a heartbeat.
-   */
-  public void setLastAgentEnv(AgentEnv env);
-  
-  /**
-   * Gets the latest agent environment that arrived in a heartbeat.
-   */
-  public AgentEnv getLastAgentEnv();
-  
-  /**
-   * Version of the Ambari Agent running on the host
-   * @return the agentVersion
-   */
-  public AgentVersion getAgentVersion();
-
-  /**
-   * @param agentVersion the agentVersion to set
-   */
-  public void setAgentVersion(AgentVersion agentVersion);
-
-  /**
-   * Get Current Host State
-   * @return HostState
-   */
-  public HostState getState();
-
-  /**
-   * Set the State of the Host
-   * @param state Host State
-   */
-  public void setState(HostState state);
-
-  /**
-   * Send an event to the Host's StateMachine
-   * @param event HostEvent
-   * @throws InvalidStateTransitionException
-   */
-  public void handleEvent(HostEvent event)
-      throws InvalidStateTransitionException;
-
-  /**
-   * Get time spent in the current state i.e. the time since last state change.
-   * @return Time spent in current state.
-   */
-  public long getTimeInState();
-
-  /**
-   * @param timeInState the timeInState to set
-   */
-  public void setTimeInState(long timeInState);
-
-  public HostResponse convertToResponse();
-
-  boolean isPersisted();
-
-  void persist();
-
-  void refresh();
-
-  void importHostInfo(HostInfo hostInfo);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/HostEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/HostEvent.java
deleted file mode 100644
index 539e090..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/HostEvent.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import org.apache.ambari.server.state.fsm.event.AbstractEvent;
-
-/**
- * Base class for all events that affect the Host FSM
- */
-public abstract class HostEvent extends AbstractEvent<HostEventType> {
-
-  /**
-   * Hostname of the Host
-   */
-  private final String hostName;
-
-  public HostEvent(String hostName, HostEventType type) {
-    super(type);
-    this.hostName = hostName;
-  }
-
-  /**
-   * @return the hostName
-   */
-  public String getHostName() {
-    return hostName;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/HostEventType.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/HostEventType.java
deleted file mode 100644
index 06ad88f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/HostEventType.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.ambari.server.state;
-
-public enum HostEventType {
-  /**
-   * Event to denote when a registration request is received from a Host
-   */
-  HOST_REGISTRATION_REQUEST,
-  /**
-   * Host status check response received.
-   */
-  HOST_STATUS_UPDATES_RECEIVED,
-  /**
-   * A healthy heartbeat event received from the Host.
-   */
-  HOST_HEARTBEAT_HEALTHY,
-  /**
-   * No heartbeat received from the Host within the defined expiry interval.
-   */
-  HOST_HEARTBEAT_LOST,
-  /**
-   * A non-healthy heartbeat event received from the Host.
-   */
-  HOST_HEARTBEAT_UNHEALTHY
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/HostHealthStatus.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/HostHealthStatus.java
deleted file mode 100644
index 5e202b6..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/HostHealthStatus.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-public class HostHealthStatus {
-
-  private HealthStatus healthStatus;
-
-  private String healthReport;
-
-  public HostHealthStatus(HealthStatus healthStatus, String healthReport) {
-    super();
-    this.healthStatus = healthStatus;
-    this.healthReport = healthReport;
-  }
-
-  public synchronized HealthStatus getHealthStatus() {
-    return healthStatus;
-  }
-
-  public synchronized void setHealthStatus(HealthStatus healthStatus) {
-    this.healthStatus = healthStatus;
-  }
-
-  public synchronized void setHealthReport(String healthReport) {
-    this.healthReport = healthReport;
-  }
-
-  public synchronized String getHealthReport() {
-    return healthReport;
-  }
-
-  public static enum HealthStatus {
-    UNKNOWN,
-    HEALTHY,
-    UNHEALTHY
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/HostState.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/HostState.java
deleted file mode 100644
index 53052a6..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/HostState.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-public enum HostState {
-  /**
-   * New host state
-   */
-  INIT,
-  /**
-   * State when a registration request is received from the Host but
-   * the host has not responded to its status update check.
-   */
-  WAITING_FOR_HOST_STATUS_UPDATES,
-  /**
-   * State when the server is receiving heartbeats regularly from the Host
-   * and the state of the Host is healthy
-   */
-  HEALTHY,
-  /**
-   * State when the server has not received a heartbeat from the Host in the
-   * configured heartbeat expiry window.
-   */
-  HEARTBEAT_LOST,
-  /**
-   * Host is in unhealthy state as reported either by the Host itself or via
-   * any other additional means ( monitoring layer )
-   */
-  UNHEALTHY;
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
deleted file mode 100644
index 069d613..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-public class PropertyInfo {
-  private String name;
-  private String value;
-  private String description;
-  private String filename;
-
-  public String getName() {
-    return name;
-  }
-
-  public void setName(String name) {
-    this.name = name;
-  }
-
-  public String getValue() {
-    return value;
-  }
-
-  public void setValue(String value) {
-    this.value = value;
-  }
-
-  public String getDescription() {
-    return description;
-  }
-
-  public void setDescription(String description) {
-    this.description = description;
-  }
-
-  public String getFilename() {
-    return filename;
-  }
-
-  public void setFilename(String filename) {
-    this.filename = filename;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/RepositoryInfo.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/RepositoryInfo.java
deleted file mode 100644
index d826370..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/RepositoryInfo.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-public class RepositoryInfo {
-  private String baseUrl;
-  private String osType;
-  private String repoId;
-  private String repoName;
-  private String mirrorsList;
-
-  /**
-   * @return the baseUrl
-   */
-  public String getBaseUrl() {
-    return baseUrl;
-  }
-
-  /**
-   * @param baseUrl the baseUrl to set
-   */
-  public void setBaseUrl(String baseUrl) {
-    this.baseUrl = baseUrl;
-  }
-
-  /**
-   * @return the osType
-   */
-  public String getOsType() {
-    return osType;
-  }
-
-  /**
-   * @param osType the osType to set
-   */
-  public void setOsType(String osType) {
-    this.osType = osType;
-  }
-
-  /**
-   * @return the repoId
-   */
-  public String getRepoId() {
-    return repoId;
-  }
-
-  /**
-   * @param repoId the repoId to set
-   */
-  public void setRepoId(String repoId) {
-    this.repoId = repoId;
-  }
-
-  /**
-   * @return the repoName
-   */
-  public String getRepoName() {
-    return repoName;
-  }
-
-  /**
-   * @param repoName the repoName to set
-   */
-  public void setRepoName(String repoName) {
-    this.repoName = repoName;
-  }
-
-  /**
-   * @return the mirrorsList
-   */
-  public String getMirrorsList() {
-    return mirrorsList;
-  }
-
-  /**
-   * @param mirrorsList the mirrorsList to set
-   */
-  public void setMirrorsList(String mirrorsList) {
-    this.mirrorsList = mirrorsList;
-  }
-
-  @Override
-  public String toString() {
-    return "[ repoInfo: "
-        + ", osType=" + osType
-        + ", repoId=" + repoId
-        + ", baseUrl=" + baseUrl
-        + ", repoName=" + repoName
-        + ", mirrorsList=" + mirrorsList
-        + " ]";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
deleted file mode 100644
index d07f9b5..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import java.util.Map;
-
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.ServiceResponse;
-
-public interface Service {
-
-  public String getName();
-
-  public long getClusterId();
-
-  public Cluster getCluster();
-
-  public ServiceComponent getServiceComponent(String componentName)
-      throws AmbariException;
-
-  public Map<String, ServiceComponent> getServiceComponents();
-
-  public void addServiceComponents(Map<String, ServiceComponent> components)
-      throws AmbariException;
-
-  public void addServiceComponent(ServiceComponent component)
-      throws AmbariException;
-
-  public State getDesiredState();
-
-  public void setDesiredState(State state);
-
-  public Map<String, Config> getDesiredConfigs();
-
-  public void updateDesiredConfigs(Map<String, Config> configs);
-
-  public StackId getDesiredStackVersion();
-
-  public void setDesiredStackVersion(StackId stackVersion);
-
-  public ServiceResponse convertToResponse();
-
-  public void debugDump(StringBuilder sb);
-
-  boolean isPersisted();
-
-  @Transactional
-  void persist();
-
-  void refresh();
-
-  ServiceComponent addServiceComponent(String serviceComponentName)
-      throws AmbariException;
-
-  /**
-   * Find out whether the service and its components
-   * are in a state that it can be removed from a cluster
-   * @return
-   */
-  public boolean canBeRemoved();
-
-  public void deleteAllComponents() throws AmbariException;
-
-  public void deleteServiceComponent(String componentName)
-      throws AmbariException;
-
-  public boolean isClientOnlyService();
-
-  public void delete() throws AmbariException;
-
-  public enum Type {
-    HDFS,
-    MAPREDUCE,
-    HBASE,
-    HIVE,
-    OOZIE,
-    WEBHCAT,
-    SQOOP,
-    NAGIOS,
-    GANGLIA,
-    ZOOKEEPER
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
deleted file mode 100644
index ccc647d..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import java.util.Map;
-import java.util.Set;
-
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.ServiceComponentResponse;
-
-public interface ServiceComponent {
-
-  public String getName();
-
-  public String getServiceName();
-
-  public long getClusterId();
-
-  public String getClusterName();
-
-  public State getDesiredState();
-
-  public void setDesiredState(State state);
-
-  public Map<String, Config> getDesiredConfigs();
-
-  public void updateDesiredConfigs(Map<String, Config> configs);
-
-  public void deleteDesiredConfigs(Set<String> configTypes);
-
-  public StackId getDesiredStackVersion();
-
-  public void setDesiredStackVersion(StackId stackVersion);
-
-  public Map<String, ServiceComponentHost> getServiceComponentHosts();
-
-  public ServiceComponentHost getServiceComponentHost(String hostname)
-      throws AmbariException;
-
-  public void addServiceComponentHosts(Map<String, ServiceComponentHost>
-      hostComponents) throws AmbariException ;
-
-  public void addServiceComponentHost(ServiceComponentHost hostComponent)
-      throws AmbariException ;
-
-  public ServiceComponentResponse convertToResponse();
-
-  public void refresh();
-
-  boolean isPersisted();
-
-  @Transactional
-  void persist();
-
-  public void debugDump(StringBuilder sb);
-
-  public boolean isClientComponent();
-
-  public boolean canBeRemoved();
-
-  public void deleteAllServiceComponentHosts() throws AmbariException;
-
-  public void deleteServiceComponentHosts(String hostname)
-      throws AmbariException;
-
-  ServiceComponentHost addServiceComponentHost(
-      String hostName) throws AmbariException;
-
-  public void delete() throws AmbariException;
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentFactory.java
deleted file mode 100644
index 4b0ccd2..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentFactory.java
+++ /dev/null
@@ -1,28 +0,0 @@
-package org.apache.ambari.server.state;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
-
-public interface ServiceComponentFactory {
-
-  ServiceComponent createNew(Service service, String componentName);
-
-  ServiceComponent createExisting(Service service, ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
deleted file mode 100644
index 09266e3..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import java.util.Map;
-import java.util.Set;
-
-import com.google.inject.persist.Transactional;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.ServiceComponentHostResponse;
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-
-
-public interface ServiceComponentHost {
-
-  /**
-   * Get the Cluster that this object maps to
-   */
-  public long getClusterId();
-
-  /**
-   * Get the Cluster that this object maps to
-   */
-  public String getClusterName();
-
-  /**
-   * Get the Service this object maps to
-   * @return Name of the Service
-   */
-  public String getServiceName();
-
-  /**
-   * Get the ServiceComponent this object maps to
-   * @return Name of the ServiceComponent
-   */
-  public String getServiceComponentName();
-
-  /**
-   * Get the Host this object maps to
-   * @return Host's hostname
-   */
-  public String getHostName();
-
-  /**
-   * Send a ServiceComponentHostState event to the StateMachine
-   * @param event Event to handle
-   * @throws InvalidStateTransitionException
-   */
-  public void handleEvent(ServiceComponentHostEvent event)
-      throws InvalidStateTransitionException;
-
-  public State getDesiredState();
-
-  public void setDesiredState(State state);
-
-  public Map<String, Config> getDesiredConfigs();
-
-  public Map<String, String> getDesiredConfigVersionsRecursive();
-
-  public void updateDesiredConfigs(Map<String, Config> configs);
-
-  public void deleteDesiredConfigs(Set<String> configTypes);
-
-  public StackId getDesiredStackVersion();
-
-  public void setDesiredStackVersion(StackId stackVersion);
-
-  public State getState();
-
-  public void setState(State state);
-
-  public Map<String, Config> getConfigs() throws AmbariException;
-
-  public StackId getStackVersion();
-
-  public void setStackVersion(StackId stackVersion);
-
-  public ServiceComponentHostResponse convertToResponse();
-
-  boolean isPersisted();
-
-  @Transactional
-  void persist();
-
-  void refresh();
-
-  public void debugDump(StringBuilder sb);
-
-  public boolean canBeRemoved();
-
-  public void delete() throws AmbariException;
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostEvent.java
deleted file mode 100644
index 00a5ce2..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostEvent.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.ambari.server.state.fsm.event.AbstractEvent;
-import org.apache.ambari.server.state.svccomphost.*;
-import org.codehaus.jackson.annotate.JsonCreator;
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
- * Base class for all events that affect the ServiceComponentHost FSM
- */
-public abstract class ServiceComponentHostEvent
-    extends AbstractEvent<ServiceComponentHostEventType> {
-
-  /**
-   * ServiceComponent that this event relates to
-   */
-  private final String serviceComponentName;
-
-  /**
-   * Hostname of the Host that this event relates to
-   */
-  private final String hostName;
-
-  /**
-   * Time when the event was triggered
-   */
-  private final long opTimestamp;
-
-  // FIXME hack alert!!!
-  // This belongs to start event only
-  private final Map<String, String> configs;
-
-  // FIXME hack alert
-  // this belongs to install event only
-  private final String stackId;
-
-  public ServiceComponentHostEvent(ServiceComponentHostEventType type,
-      String serviceComponentName, String hostName, long opTimestamp,
-      Map<String, String> configs) {
-    this(type, serviceComponentName, hostName, opTimestamp,
-        configs, "");
-  }
-
-  public ServiceComponentHostEvent(ServiceComponentHostEventType type,
-      String serviceComponentName, String hostName, long opTimestamp) {
-    this(type, serviceComponentName, hostName, opTimestamp,
-        new HashMap<String, String>(), "");
-  }
-
-  public ServiceComponentHostEvent(ServiceComponentHostEventType type,
-      String serviceComponentName, String hostName, long opTimestamp,
-      String stackId) {
-    this(type, serviceComponentName, hostName, opTimestamp,
-        new HashMap<String, String>(), stackId);
-  }
-
-  public ServiceComponentHostEvent(ServiceComponentHostEventType type,
-      String serviceComponentName, String hostName, long opTimestamp,
-      Map<String, String> configs, String stackId) {
-    super(type);
-    this.serviceComponentName = serviceComponentName;
-    this.hostName = hostName;
-    this.opTimestamp = opTimestamp;
-    this.configs = configs;
-    this.stackId = stackId;
-  }
-
-  /**
-   * @return the serviceComponentName
-   */
-  public String getServiceComponentName() {
-    return serviceComponentName;
-  }
-
-  /**
-   * @return the hostName
-   */
-  public String getHostName() {
-    return hostName;
-  }
-
-  /**
-   * @return the opTimestamp
-   */
-  public long getOpTimestamp() {
-    return opTimestamp;
-  }
-
-  @JsonCreator
-  public static ServiceComponentHostEvent create(@JsonProperty("type") ServiceComponentHostEventType type,
-                                                 @JsonProperty("serviceComponentName") String serviceComponentName,
-                                                 @JsonProperty("hostName") String hostName,
-                                                 @JsonProperty("opTimestamp") long opTimestamp,
-                                                 @JsonProperty("configs") Map<String, String> configs,
-                                                 @JsonProperty("stackId") String stackId) {
-    switch (type) {
-      case HOST_SVCCOMP_INSTALL:
-        return new ServiceComponentHostInstallEvent(serviceComponentName, hostName, opTimestamp, stackId);
-      case HOST_SVCCOMP_OP_FAILED:
-        return new ServiceComponentHostOpFailedEvent(serviceComponentName, hostName, opTimestamp);
-      case HOST_SVCCOMP_OP_IN_PROGRESS:
-        return new ServiceComponentHostOpInProgressEvent(serviceComponentName, hostName, opTimestamp);
-      case HOST_SVCCOMP_OP_RESTART:
-        return new ServiceComponentHostOpRestartedEvent(serviceComponentName, hostName, opTimestamp);
-      case HOST_SVCCOMP_OP_SUCCEEDED:
-        return new ServiceComponentHostOpSucceededEvent(serviceComponentName, hostName, opTimestamp);
-      case HOST_SVCCOMP_START:
-        return new ServiceComponentHostStartEvent(serviceComponentName, hostName, opTimestamp, configs);
-      case HOST_SVCCOMP_STOP:
-        return new ServiceComponentHostStopEvent(serviceComponentName, hostName, opTimestamp);
-      case HOST_SVCCOMP_UNINSTALL:
-        return new ServiceComponentHostUninstallEvent(serviceComponentName, hostName, opTimestamp);
-      case HOST_SVCCOMP_WIPEOUT:
-        return new ServiceComponentHostWipeoutEvent(serviceComponentName, hostName, opTimestamp);
-    }
-    return null;
-  }
-
-  /**
-   * @return the configs
-   */
-  public Map<String, String> getConfigs() {
-    return configs;
-  }
-
-  /**
-   * @return the stackId
-   */
-  public String getStackId() {
-    return stackId;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostEventType.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostEventType.java
deleted file mode 100644
index 0560b6c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostEventType.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-public enum ServiceComponentHostEventType {
-  /**
-   * Operation in progress
-   */
-  HOST_SVCCOMP_OP_IN_PROGRESS,
-  /**
-   * Operation succeeded
-   */
-  HOST_SVCCOMP_OP_SUCCEEDED,
-  /**
-   * Operation failed.
-   */
-  HOST_SVCCOMP_OP_FAILED,
-  /**
-   * Re-starting a failed operation.
-   */
-  HOST_SVCCOMP_OP_RESTART,
-  /**
-   * Triggering an install.
-   */
-  HOST_SVCCOMP_INSTALL,
-  /**
-   * Triggering a start.
-   */
-  HOST_SVCCOMP_START,
-  /**
-   * Triggering a stop.
-   */
-  HOST_SVCCOMP_STOP,
-  /**
-   * Triggering an uninstall.
-   */
-  HOST_SVCCOMP_UNINSTALL,
-  /**
-   * Triggering a wipe-out ( restore to clean state ).
-   */
-  HOST_SVCCOMP_WIPEOUT
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostFactory.java
deleted file mode 100644
index d535fd9..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostFactory.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
-
-public interface ServiceComponentHostFactory {
-
-  ServiceComponentHost createNew(ServiceComponent serviceComponent,
-                                 String hostName, boolean isClient);
-
-  ServiceComponentHost createExisting(ServiceComponent serviceComponent,
-                                      HostComponentStateEntity stateEntity,
-                                      HostComponentDesiredStateEntity desiredStateEntity);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
deleted file mode 100644
index 1cf0bee..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
+++ /dev/null
@@ -1,552 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import java.util.*;
-import java.util.Map.Entry;
-
-import com.google.gson.Gson;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.ServiceComponentHostNotFoundException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.controller.ServiceComponentResponse;
-import org.apache.ambari.server.orm.dao.*;
-import org.apache.ambari.server.orm.entities.*;
-import org.apache.ambari.server.state.cluster.ClusterImpl;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ServiceComponentImpl implements ServiceComponent {
-
-  private final static Logger LOG =
-      LoggerFactory.getLogger(ServiceComponentImpl.class);
-
-  private final Service service;
-
-  @Inject
-  private Gson gson;
-  @Inject
-  private ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO;
-  @Inject
-  private ClusterServiceDAO clusterServiceDAO;
-  @Inject
-  private HostComponentStateDAO hostComponentStateDAO;
-  @Inject
-  private HostComponentDesiredStateDAO hostComponentDesiredStateDAO;
-  @Inject
-  private ServiceComponentHostFactory serviceComponentHostFactory;
-  @Inject
-  private AmbariMetaInfo ambariMetaInfo;
-  @Inject
-  private ComponentConfigMappingDAO componentConfigMappingDAO;
-
-  boolean persisted = false;
-  private ServiceComponentDesiredStateEntity desiredStateEntity;
-
-  // [ type -> versionTag ]
-  private Map<String, String>  desiredConfigs;
-
-  private Map<String, ServiceComponentHost> hostComponents;
-
-  private final boolean isClientComponent;
-
-
-
-  private void init() {
-    // TODO load during restart
-    // initialize from DB
-  }
-
-  @AssistedInject
-  public ServiceComponentImpl(@Assisted Service service,
-      @Assisted String componentName, Injector injector) {
-    injector.injectMembers(this);
-    this.service = service;
-    this.desiredStateEntity = new ServiceComponentDesiredStateEntity();
-    desiredStateEntity.setComponentName(componentName);
-    desiredStateEntity.setDesiredState(State.INIT);
-
-    this.desiredConfigs = new HashMap<String, String>();
-    setDesiredStackVersion(service.getDesiredStackVersion());
-
-    this.hostComponents = new HashMap<String, ServiceComponentHost>();
-
-    StackId stackId = service.getDesiredStackVersion();
-    ComponentInfo compInfo = ambariMetaInfo.getComponentCategory(
-        stackId.getStackName(), stackId.getStackVersion(), service.getName(),
-        componentName);
-    if (compInfo == null) {
-      throw new RuntimeException("Trying to create a ServiceComponent"
-          + " not recognized in stack info"
-          + ", clusterName=" + service.getCluster().getClusterName()
-          + ", serviceName=" + service.getName()
-          + ", componentName=" + componentName
-          + ", stackInfo=" + stackId.getStackId());
-    }
-    this.isClientComponent = compInfo.isClient();
-
-    init();
-  }
-
-  @AssistedInject
-  public ServiceComponentImpl(@Assisted Service service,
-                              @Assisted ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity,
-                              Injector injector) {
-    injector.injectMembers(this);
-    this.service = service;
-    this.desiredStateEntity = serviceComponentDesiredStateEntity;
-
-
-    this.desiredConfigs = new HashMap<String, String>();
-
-    this.hostComponents = new HashMap<String, ServiceComponentHost>();
-    for (HostComponentStateEntity hostComponentStateEntity : desiredStateEntity.getHostComponentStateEntities()) {
-      HostComponentDesiredStateEntityPK pk = new HostComponentDesiredStateEntityPK();
-      pk.setClusterId(hostComponentStateEntity.getClusterId());
-      pk.setServiceName(hostComponentStateEntity.getServiceName());
-      pk.setComponentName(hostComponentStateEntity.getComponentName());
-      pk.setHostName(hostComponentStateEntity.getHostName());
-
-      HostComponentDesiredStateEntity hostComponentDesiredStateEntity = hostComponentDesiredStateDAO.findByPK(pk);
-
-      hostComponents.put(hostComponentStateEntity.getHostName(),
-          serviceComponentHostFactory.createExisting(this,
-              hostComponentStateEntity, hostComponentDesiredStateEntity));
-   }
-
-    for (ComponentConfigMappingEntity entity : desiredStateEntity.getComponentConfigMappingEntities()) {
-      desiredConfigs.put(entity.getConfigType(), entity.getVersionTag());
-    }
-
-    StackId stackId = service.getDesiredStackVersion();
-    ComponentInfo compInfo = ambariMetaInfo.getComponentCategory(
-        stackId.getStackName(), stackId.getStackVersion(), service.getName(),
-        getName());
-    if (compInfo == null) {
-      throw new RuntimeException("Trying to create a ServiceComponent"
-          + " not recognized in stack info"
-          + ", clusterName=" + service.getCluster().getClusterName()
-          + ", serviceName=" + service.getName()
-          + ", componentName=" + getName()
-          + ", stackInfo=" + stackId.getStackId());
-    }
-    this.isClientComponent = compInfo.isClient();
-
-    persisted = true;
-  }
-
-  @Override
-  public synchronized String getName() {
-    return desiredStateEntity.getComponentName();
-  }
-
-  @Override
-  public synchronized String getServiceName() {
-    return service.getName();
-  }
-
-  @Override
-  public synchronized long getClusterId() {
-    return this.service.getClusterId();
-  }
-
-  @Override
-  public synchronized Map<String, ServiceComponentHost>
-      getServiceComponentHosts() {
-    return Collections.unmodifiableMap(hostComponents);
-  }
-
-  @Override
-  public synchronized void addServiceComponentHosts(
-      Map<String, ServiceComponentHost> hostComponents) throws AmbariException {
-    // TODO validation
-    for (Entry<String, ServiceComponentHost> entry :
-      hostComponents.entrySet()) {
-      if (!entry.getKey().equals(entry.getValue().getHostName())) {
-        throw new AmbariException("Invalid arguments in map"
-            + ", hostname does not match the key in map");
-      }
-    }
-    for (ServiceComponentHost sch : hostComponents.values()) {
-      addServiceComponentHost(sch);
-    }
-  }
-
-  @Override
-  public synchronized void addServiceComponentHost(
-      ServiceComponentHost hostComponent) throws AmbariException {
-    // TODO validation
-    // TODO ensure host belongs to cluster
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Adding a ServiceComponentHost to ServiceComponent"
-          + ", clusterName=" + service.getCluster().getClusterName()
-          + ", clusterId=" + service.getCluster().getClusterId()
-          + ", serviceName=" + service.getName()
-          + ", serviceComponentName=" + getName()
-          + ", hostname=" + hostComponent.getHostName());
-    }
-    if (hostComponents.containsKey(hostComponent.getHostName())) {
-      throw new AmbariException("Cannot add duplicate ServiceComponentHost"
-          + ", clusterName=" + service.getCluster().getClusterName()
-          + ", clusterId=" + service.getCluster().getClusterId()
-          + ", serviceName=" + service.getName()
-          + ", serviceComponentName=" + getName()
-          + ", hostname=" + hostComponent.getHostName());
-    }
-    // FIXME need a better approach of caching components by host
-    ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
-    clusterImpl.addServiceComponentHost(hostComponent);
-    this.hostComponents.put(hostComponent.getHostName(), hostComponent);
-  }
-
-  @Override
-  public synchronized ServiceComponentHost addServiceComponentHost(
-      String hostName) throws AmbariException {
-    // TODO validation
-    // TODO ensure host belongs to cluster
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Adding a ServiceComponentHost to ServiceComponent"
-          + ", clusterName=" + service.getCluster().getClusterName()
-          + ", clusterId=" + service.getCluster().getClusterId()
-          + ", serviceName=" + service.getName()
-          + ", serviceComponentName=" + getName()
-          + ", hostname=" + hostName);
-    }
-    if (hostComponents.containsKey(hostName)) {
-      throw new AmbariException("Cannot add duplicate ServiceComponentHost"
-          + ", clusterName=" + service.getCluster().getClusterName()
-          + ", clusterId=" + service.getCluster().getClusterId()
-          + ", serviceName=" + service.getName()
-          + ", serviceComponentName=" + getName()
-          + ", hostname=" + hostName);
-    }
-    ServiceComponentHost hostComponent =
-        serviceComponentHostFactory.createNew(this, hostName, true);
-    // FIXME need a better approach of caching components by host
-    ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
-    clusterImpl.addServiceComponentHost(hostComponent);
-
-    this.hostComponents.put(hostComponent.getHostName(), hostComponent);
-
-    return hostComponent;
-  }
-
-  @Override
-  public ServiceComponentHost getServiceComponentHost(String hostname)
-    throws AmbariException {
-    if (!hostComponents.containsKey(hostname)) {
-      throw new ServiceComponentHostNotFoundException(getClusterName(),
-          getServiceName(), getName(), hostname);
-    }
-    return this.hostComponents.get(hostname);
-  }
-
-  @Override
-  public synchronized State getDesiredState() {
-    return desiredStateEntity.getDesiredState();
-  }
-
-  @Override
-  public synchronized void setDesiredState(State state) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Setting DesiredState of Service"
-          + ", clusterName=" + service.getCluster().getClusterName()
-          + ", clusterId=" + service.getCluster().getClusterId()
-          + ", serviceName=" + service.getName()
-          + ", serviceComponentName=" + getName()
-          + ", oldDesiredState=" + getDesiredState()
-          + ", newDesiredState=" + state);
-    }
-    desiredStateEntity.setDesiredState(state);
-    saveIfPersisted();
-  }
-
-  @Override
-  public synchronized Map<String, Config> getDesiredConfigs() {
-    Map<String, Config> map = new HashMap<String, Config>();
-    for (Entry<String, String> entry : desiredConfigs.entrySet()) {
-      Config config = service.getCluster().getDesiredConfig(entry.getKey(), entry.getValue());
-      if (null != config) {
-        map.put(entry.getKey(), config);
-      }
-    }
-
-    Map<String, Config> svcConfigs = service.getDesiredConfigs();
-    for (Entry<String, Config> entry : svcConfigs.entrySet()) {
-      if (!map.containsKey(entry.getKey())) {
-        map.put(entry.getKey(), entry.getValue());
-      }
-    }
-
-    return Collections.unmodifiableMap(map);
-  }
-
-  @Override
-  public synchronized void updateDesiredConfigs(Map<String, Config> configs) {
-
-    for (Entry<String,Config> entry : configs.entrySet()) {
-      boolean contains = false;
-
-      for (ComponentConfigMappingEntity componentConfigMappingEntity : desiredStateEntity.getComponentConfigMappingEntities()) {
-        if (entry.getKey().equals(componentConfigMappingEntity.getConfigType())) {
-          contains = true;
-          componentConfigMappingEntity.setTimestamp(new Date().getTime());
-          componentConfigMappingEntity.setVersionTag(entry.getValue().getVersionTag());
-          if (persisted) {
-            componentConfigMappingDAO.merge(componentConfigMappingEntity);
-          }
-        }
-      }
-
-      if (!contains) {
-        ComponentConfigMappingEntity newEntity = new ComponentConfigMappingEntity();
-        newEntity.setClusterId(desiredStateEntity.getClusterId());
-        newEntity.setServiceName(desiredStateEntity.getServiceName());
-        newEntity.setComponentName(desiredStateEntity.getComponentName());
-        newEntity.setConfigType(entry.getKey());
-        newEntity.setVersionTag(entry.getValue().getVersionTag());
-        newEntity.setTimestamp(new Date().getTime());
-        newEntity.setServiceComponentDesiredStateEntity(desiredStateEntity);
-        desiredStateEntity.getComponentConfigMappingEntities().add(newEntity);
-
-      }
-
-
-      this.desiredConfigs.put(entry.getKey(), entry.getValue().getVersionTag());
-    }
-
-    saveIfPersisted();
-  }
-
-  @Override
-  public synchronized StackId getDesiredStackVersion() {
-    return gson.fromJson(desiredStateEntity.getDesiredStackVersion(), StackId.class);
-  }
-
-  @Override
-  public synchronized void setDesiredStackVersion(StackId stackVersion) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Setting DesiredStackVersion of Service"
-          + ", clusterName=" + service.getCluster().getClusterName()
-          + ", clusterId=" + service.getCluster().getClusterId()
-          + ", serviceName=" + service.getName()
-          + ", serviceComponentName=" + getName()
-          + ", oldDesiredStackVersion=" + getDesiredStackVersion()
-          + ", newDesiredStackVersion=" + stackVersion);
-    }
-    desiredStateEntity.setDesiredStackVersion(gson.toJson(stackVersion));
-    saveIfPersisted();
-  }
-
-  @Override
-  public synchronized ServiceComponentResponse convertToResponse() {
-    ServiceComponentResponse r  = new ServiceComponentResponse(
-        getClusterId(), service.getCluster().getClusterName(),
-        service.getName(), getName(), this.desiredConfigs,
-        getDesiredStackVersion().getStackId(),
-        getDesiredState().toString());
-    return r;
-  }
-
-  @Override
-  public String getClusterName() {
-    return service.getCluster().getClusterName();
-  }
-
-  @Override
-  public synchronized void debugDump(StringBuilder sb) {
-    sb.append("ServiceComponent={ serviceComponentName=" + getName()
-        + ", clusterName=" + service.getCluster().getClusterName()
-        + ", clusterId=" + service.getCluster().getClusterId()
-        + ", serviceName=" + service.getName()
-        + ", desiredStackVersion=" + getDesiredStackVersion()
-        + ", desiredState=" + getDesiredState().toString()
-        + ", hostcomponents=[ ");
-    boolean first = true;
-    for(ServiceComponentHost sch : hostComponents.values()) {
-      if (!first) {
-        sb.append(" , ");
-        first = false;
-      }
-      sb.append("\n        ");
-      sch.debugDump(sb);
-      sb.append(" ");
-    }
-    sb.append(" ] }");
-  }
-
-  @Override
-  public synchronized boolean isPersisted() {
-      return persisted;
-  }
-
-  @Override
-  public synchronized void persist() {
-    if (!persisted) {
-      persistEntities();
-      refresh();
-      service.refresh();
-      persisted = true;
-    } else {
-      saveIfPersisted();
-    }
-  }
-
-  @Transactional
-  protected void persistEntities() {
-    ClusterServiceEntityPK pk = new ClusterServiceEntityPK();
-    pk.setClusterId(service.getClusterId());
-    pk.setServiceName(service.getName());
-    ClusterServiceEntity serviceEntity = clusterServiceDAO.findByPK(pk);
-
-    desiredStateEntity.setClusterServiceEntity(serviceEntity);
-    serviceComponentDesiredStateDAO.create(desiredStateEntity);
-    clusterServiceDAO.merge(serviceEntity);
-  }
-
-  @Override
-  @Transactional
-  public synchronized void refresh() {
-    if (isPersisted()) {
-      ServiceComponentDesiredStateEntityPK pk = new ServiceComponentDesiredStateEntityPK();
-      pk.setComponentName(getName());
-      pk.setClusterId(getClusterId());
-      pk.setServiceName(getServiceName());
-      // TODO: desiredStateEntity is assigned in unsynchronized way, may be a bug
-      desiredStateEntity = serviceComponentDesiredStateDAO.findByPK(pk);
-      serviceComponentDesiredStateDAO.refresh(desiredStateEntity);
-    }
-  }
-
-  @Transactional
-  private synchronized void saveIfPersisted() {
-    if (isPersisted()) {
-      serviceComponentDesiredStateDAO.merge(desiredStateEntity);
-    }
-  }
-
-  @Override
-  public boolean isClientComponent() {
-    return this.isClientComponent;
-  }
-
-  @Override
-  public synchronized boolean canBeRemoved() {
-    if (!getDesiredState().isRemovableState()) {
-      return false;
-    }
-
-    for (ServiceComponentHost sch : hostComponents.values()) {
-      if (!sch.canBeRemoved()) {
-        LOG.warn("Found non removable hostcomponent when trying to"
-            + " delete service component"
-            + ", clusterName=" + getClusterName()
-            + ", serviceName=" + getServiceName()
-            + ", componentName=" + getName()
-            + ", hostname=" + sch.getHostName());
-        return false;
-      }
-    }
-    return true;
-  }
-
-  @Override
-  @Transactional
-  public synchronized void deleteAllServiceComponentHosts()
-      throws AmbariException {
-    LOG.info("Deleting all servicecomponenthosts for component"
-        + ", clusterName=" + getClusterName()
-        + ", serviceName=" + getServiceName()
-        + ", componentName=" + getName());
-    for (ServiceComponentHost sch : hostComponents.values()) {
-      if (!sch.canBeRemoved()) {
-        throw new AmbariException("Found non removable hostcomponent "
-            + " when trying to delete"
-            + " all hostcomponents from servicecomponent"
-            + ", clusterName=" + getClusterName()
-            + ", serviceName=" + getServiceName()
-            + ", componentName=" + getName()
-            + ", hostname=" + sch.getHostName());
-      }
-    }
-
-    for (ServiceComponentHost serviceComponentHost : hostComponents.values()) {
-      serviceComponentHost.delete();
-    }
-
-    hostComponents.clear();
-  }
-
-  @Override
-  public synchronized void deleteServiceComponentHosts(String hostname)
-      throws AmbariException {
-    ServiceComponentHost sch = getServiceComponentHost(hostname);
-    LOG.info("Deleting servicecomponenthost for cluster"
-        + ", clusterName=" + getClusterName()
-        + ", serviceName=" + getServiceName()
-        + ", componentName=" + getName()
-        + ", hostname=" + sch.getHostName());
-    if (!sch.canBeRemoved()) {
-      throw new AmbariException("Could not delete hostcomponent from cluster"
-          + ", clusterName=" + getClusterName()
-          + ", serviceName=" + getServiceName()
-          + ", componentName=" + getName()
-          + ", hostname=" + sch.getHostName());
-    }
-    sch.delete();
-    hostComponents.remove(hostname);
-  }
-
-  @Override
-  public synchronized void deleteDesiredConfigs(Set<String> configTypes) {
-    componentConfigMappingDAO.removeByType(configTypes);
-    for (String configType : configTypes) {
-      desiredConfigs.remove(configType);
-    }
-  }
-
-  @Override
-  @Transactional
-  public synchronized void delete() throws AmbariException {
-    deleteAllServiceComponentHosts();
-
-    if (persisted) {
-      removeEntities();
-      persisted = false;
-    }
-
-    desiredConfigs.clear();
-  }
-
-  @Transactional
-  protected void removeEntities() throws AmbariException {
-    ServiceComponentDesiredStateEntityPK pk = new ServiceComponentDesiredStateEntityPK();
-    pk.setClusterId(getClusterId());
-    pk.setComponentName(getName());
-    pk.setServiceName(getServiceName());
-
-    serviceComponentDesiredStateDAO.removeByPK(pk);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceFactory.java
deleted file mode 100644
index a3a041b..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceFactory.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
-
-public interface ServiceFactory {
-
-  Service createNew(Cluster cluster, String serviceName);
-
-  Service createExisting(Cluster cluster, ClusterServiceEntity serviceEntity);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
deleted file mode 100644
index 128cc29..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
+++ /dev/null
@@ -1,502 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import java.util.*;
-import java.util.Map.Entry;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.ServiceComponentNotFoundException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.controller.ServiceResponse;
-import org.apache.ambari.server.orm.dao.*;
-import org.apache.ambari.server.orm.entities.*;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.gson.Gson;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
-import com.google.inject.persist.Transactional;
-
-
-public class ServiceImpl implements Service {
-
-  private ClusterServiceEntity serviceEntity;
-  private ServiceDesiredStateEntity serviceDesiredStateEntity;
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ServiceImpl.class);
-
-  private boolean persisted = false;
-  private final Cluster cluster;
-  // [ String type -> Config Tag ], no need to hold the direct reference to the config
-  private Map<String, String> desiredConfigs;
-  private Map<String, ServiceComponent> components;
-  private final boolean isClientOnlyService;
-
-  @Inject
-  Gson gson;
-  @Inject
-  private ClusterServiceDAO clusterServiceDAO;
-  @Inject
-  private Clusters clusters;
-  @Inject
-  private ServiceDesiredStateDAO serviceDesiredStateDAO;
-  @Inject
-  private ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO;
-  @Inject
-  private ClusterDAO clusterDAO;
-  @Inject
-  private ServiceConfigMappingDAO serviceConfigMappingDAO;
-  @Inject
-  private ServiceComponentFactory serviceComponentFactory;
-  @Inject
-  private AmbariMetaInfo ambariMetaInfo;
-
-  private void init() {
-    // TODO load from DB during restart?
-  }
-
-  @AssistedInject
-  public ServiceImpl(@Assisted Cluster cluster, @Assisted String serviceName,
-      Injector injector) {
-    injector.injectMembers(this);
-    serviceEntity = new ClusterServiceEntity();
-    serviceEntity.setServiceName(serviceName);
-    serviceDesiredStateEntity = new ServiceDesiredStateEntity();
-
-    serviceDesiredStateEntity.setClusterServiceEntity(serviceEntity);
-    serviceEntity.setServiceDesiredStateEntity(serviceDesiredStateEntity);
-
-    this.cluster = cluster;
-    this.desiredConfigs = new HashMap<String, String>();
-
-    this.components = new HashMap<String, ServiceComponent>();
-
-    StackId stackId = cluster.getDesiredStackVersion();
-    setDesiredStackVersion(stackId);
-
-    ServiceInfo sInfo = ambariMetaInfo.getServiceInfo(stackId.getStackName(),
-        stackId.getStackVersion(), serviceName);
-    this.isClientOnlyService = sInfo.isClientOnlyService();
-
-    init();
-  }
-
-  @AssistedInject
-  public ServiceImpl(@Assisted Cluster cluster, @Assisted ClusterServiceEntity
-      serviceEntity, Injector injector) {
-    injector.injectMembers(this);
-    this.serviceEntity = serviceEntity;
-    this.cluster = cluster;
-
-    //TODO check for null states?
-    this.serviceDesiredStateEntity = serviceEntity.getServiceDesiredStateEntity();
-
-    this.desiredConfigs = new HashMap<String, String>();
-
-    this.components = new HashMap<String, ServiceComponent>();
-
-    if (!serviceEntity.getServiceComponentDesiredStateEntities().isEmpty()) {
-      for (ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity
-          : serviceEntity.getServiceComponentDesiredStateEntities()) {
-        components.put(serviceComponentDesiredStateEntity.getComponentName(),
-            serviceComponentFactory.createExisting(this,
-                serviceComponentDesiredStateEntity));
-      }
-    }
-
-    for (ServiceConfigMappingEntity mappingEntity :
-        serviceEntity.getServiceConfigMappings()) {
-      desiredConfigs.put(mappingEntity.getConfigType(),
-          mappingEntity.getVersionTag());
-    }
-
-    StackId stackId = getDesiredStackVersion();
-    ServiceInfo sInfo = ambariMetaInfo.getServiceInfo(stackId.getStackName(),
-        stackId.getStackVersion(), getName());
-    this.isClientOnlyService = sInfo.isClientOnlyService();
-
-    persisted = true;
-  }
-
-  @Override
-  public String getName() {
-      return serviceEntity.getServiceName();
-  }
-
-  @Override
-  public long getClusterId() {
-    return cluster.getClusterId();
-  }
-
-  @Override
-  public synchronized Map<String, ServiceComponent> getServiceComponents() {
-    return Collections.unmodifiableMap(components);
-  }
-
-  @Override
-  public synchronized void addServiceComponents(
-      Map<String, ServiceComponent> components) throws AmbariException {
-    for (ServiceComponent sc : components.values()) {
-      addServiceComponent(sc);
-    }
-  }
-
-  @Override
-  public synchronized void addServiceComponent(ServiceComponent component)
-      throws AmbariException {
-    // TODO validation
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Adding a ServiceComponent to Service"
-          + ", clusterName=" + cluster.getClusterName()
-          + ", clusterId=" + cluster.getClusterId()
-          + ", serviceName=" + getName()
-          + ", serviceComponentName=" + component.getName());
-    }
-    if (components.containsKey(component.getName())) {
-      throw new AmbariException("Cannot add duplicate ServiceComponent"
-          + ", clusterName=" + cluster.getClusterName()
-          + ", clusterId=" + cluster.getClusterId()
-          + ", serviceName=" + getName()
-          + ", serviceComponentName=" + component.getName());
-    }
-    this.components.put(component.getName(), component);
-  }
-
-  @Override
-  public synchronized ServiceComponent addServiceComponent(
-      String serviceComponentName) throws AmbariException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Adding a ServiceComponent to Service"
-          + ", clusterName=" + cluster.getClusterName()
-          + ", clusterId=" + cluster.getClusterId()
-          + ", serviceName=" + getName()
-          + ", serviceComponentName=" + serviceComponentName);
-    }
-    if (components.containsKey(serviceComponentName)) {
-      throw new AmbariException("Cannot add duplicate ServiceComponent"
-          + ", clusterName=" + cluster.getClusterName()
-          + ", clusterId=" + cluster.getClusterId()
-          + ", serviceName=" + getName()
-          + ", serviceComponentName=" + serviceComponentName);
-    }
-    ServiceComponent component = serviceComponentFactory.createNew(this, serviceComponentName);
-    this.components.put(component.getName(), component);
-    return component;
-  }
-
-  @Override
-  public ServiceComponent getServiceComponent(String componentName)
-      throws AmbariException {
-    if (!components.containsKey(componentName)) {
-      throw new ServiceComponentNotFoundException(cluster.getClusterName(),
-          getName(),
-          componentName);
-    }
-    return this.components.get(componentName);
-  }
-
-  @Override
-  public synchronized State getDesiredState() {
-    return this.serviceDesiredStateEntity.getDesiredState();
-  }
-
-  @Override
-  public synchronized void setDesiredState(State state) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Setting DesiredState of Service"
-          + ", clusterName=" + cluster.getClusterName()
-          + ", clusterId=" + cluster.getClusterId()
-          + ", serviceName=" + getName()
-          + ", oldDesiredState=" + this.getDesiredState()
-          + ", newDesiredState=" + state);
-    }
-    this.serviceDesiredStateEntity.setDesiredState(state);
-    saveIfPersisted();
-  }
-
-  @Override
-  public synchronized Map<String, Config> getDesiredConfigs() {
-    Map<String, Config> map = new HashMap<String, Config>();
-    for (Entry<String, String> entry : desiredConfigs.entrySet()) {
-      Config config = cluster.getDesiredConfig(entry.getKey(), entry.getValue());
-      if (null != config) {
-        map.put(entry.getKey(), config);
-      } else {
-        // FIXME this is an error - should throw a proper exception
-        throw new RuntimeException("Found an invalid config"
-            + ", clusterName=" + getCluster().getClusterName()
-            + ", serviceName=" + getName()
-            + ", configType=" + entry.getKey()
-            + ", configVersionTag=" + entry.getValue());
-      }
-    }
-    return Collections.unmodifiableMap(map);
-  }
-
-  @Override
-  public synchronized void updateDesiredConfigs(Map<String, Config> configs) {
-
-    for (Entry<String,Config> entry : configs.entrySet()) {
-      boolean contains = false;
-
-      for (ServiceConfigMappingEntity serviceConfigMappingEntity : serviceEntity.getServiceConfigMappings()) {
-        if (entry.getKey().equals(serviceConfigMappingEntity.getConfigType())) {
-          contains = true;
-          serviceConfigMappingEntity.setTimestamp(new Date().getTime());
-          serviceConfigMappingEntity.setVersionTag(entry.getValue().getVersionTag());
-        }
-      }
-
-      if (!contains) {
-        ServiceConfigMappingEntity newEntity = new ServiceConfigMappingEntity();
-        newEntity.setClusterId(serviceEntity.getClusterId());
-        newEntity.setServiceName(serviceEntity.getServiceName());
-        newEntity.setConfigType(entry.getKey());
-        newEntity.setVersionTag(entry.getValue().getVersionTag());
-        newEntity.setTimestamp(new Date().getTime());
-        newEntity.setServiceEntity(serviceEntity);
-        serviceEntity.getServiceConfigMappings().add(newEntity);
-
-      }
-
-
-      this.desiredConfigs.put(entry.getKey(), entry.getValue().getVersionTag());
-    }
-
-    saveIfPersisted();
-
-  }
-
-  @Override
-  public synchronized StackId getDesiredStackVersion() {
-    return gson.fromJson(serviceDesiredStateEntity.getDesiredStackVersion(), StackId.class);
-  }
-
-  @Override
-  public synchronized void setDesiredStackVersion(StackId stackVersion) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Setting DesiredStackVersion of Service"
-          + ", clusterName=" + cluster.getClusterName()
-          + ", clusterId=" + cluster.getClusterId()
-          + ", serviceName=" + getName()
-          + ", oldDesiredStackVersion=" + getDesiredStackVersion()
-          + ", newDesiredStackVersion=" + stackVersion);
-    }
-    serviceDesiredStateEntity.setDesiredStackVersion(gson.toJson(stackVersion));
-    saveIfPersisted();
-  }
-
-  @Override
-  public synchronized ServiceResponse convertToResponse() {
-    ServiceResponse r = new ServiceResponse(cluster.getClusterId(),
-        cluster.getClusterName(),
-        getName(),
-        desiredConfigs,
-        getDesiredStackVersion().getStackId(),
-        getDesiredState().toString());
-    return r;
-  }
-
-  @Override
-  public Cluster getCluster() {
-    return cluster;
-  }
-
-  @Override
-  public synchronized void debugDump(StringBuilder sb) {
-    sb.append("Service={ serviceName=" + getName()
-        + ", clusterName=" + cluster.getClusterName()
-        + ", clusterId=" + cluster.getClusterId()
-        + ", desiredStackVersion=" + getDesiredStackVersion()
-        + ", desiredState=" + getDesiredState().toString()
-        + ", configs=[");
-    boolean first = true;
-    if (desiredConfigs != null) {
-      for (Entry<String, String> entry : desiredConfigs.entrySet()) {
-        if (!first) {
-          sb.append(" , ");
-        }
-        first = false;
-        sb.append("{ Config type=" + entry.getKey()
-            + ", versionTag=" + entry.getValue() + "}");
-      }
-    }
-    sb.append("], components=[ ");
-
-    first = true;
-    for(ServiceComponent sc : components.values()) {
-      if (!first) {
-        sb.append(" , ");
-      }
-      first = false;
-      sb.append("\n      ");
-      sc.debugDump(sb);
-      sb.append(" ");
-    }
-    sb.append(" ] }");
-  }
-
-  @Override
-  public synchronized boolean isPersisted() {
-      return persisted;
-  }
-
-  @Override
-  public synchronized void persist() {
-    if (!persisted) {
-      persistEntities();
-      refresh();
-      cluster.refresh();
-      persisted = true;
-    } else {
-      saveIfPersisted();
-    }
-  }
-
-  @Transactional
-  protected void persistEntities() {
-    ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
-    serviceEntity.setClusterEntity(clusterEntity);
-    clusterServiceDAO.create(serviceEntity);
-    serviceDesiredStateDAO.create(serviceDesiredStateEntity);
-    clusterEntity.getClusterServiceEntities().add(serviceEntity);
-    clusterDAO.merge(clusterEntity);
-//    serviceEntity =
-        clusterServiceDAO.merge(serviceEntity);
-//    serviceDesiredStateEntity =
-        serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
-  }
-
-  @Transactional
-  private void saveIfPersisted() {
-    if (isPersisted()) {
-      clusterServiceDAO.merge(serviceEntity);
-      serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
-    }
-  }
-
-  @Override
-  @Transactional
-  public synchronized void refresh() {
-    if (isPersisted()) {
-      ClusterServiceEntityPK pk = new ClusterServiceEntityPK();
-      pk.setClusterId(getClusterId());
-      pk.setServiceName(getName());
-      serviceEntity = clusterServiceDAO.findByPK(pk);
-      serviceDesiredStateEntity = serviceEntity.getServiceDesiredStateEntity();
-      clusterServiceDAO.refresh(serviceEntity);
-      serviceDesiredStateDAO.refresh(serviceDesiredStateEntity);
-    }
-  }
-
-  @Override
-  public synchronized boolean canBeRemoved() {
-    if (!getDesiredState().isRemovableState()) {
-      return false;
-    }
-
-    for (ServiceComponent sc : components.values()) {
-      if (!sc.canBeRemoved()) {
-        LOG.warn("Found non removable component when trying to delete service"
-            + ", clusterName=" + cluster.getClusterName()
-            + ", serviceName=" + getName()
-            + ", componentName=" + sc.getName());
-        return false;
-      }
-    }
-    return true;
-  }
-
-  @Override
-  @Transactional
-  public synchronized void deleteAllComponents() throws AmbariException {
-    LOG.info("Deleting all components for service"
-        + ", clusterName=" + cluster.getClusterName()
-        + ", serviceName=" + getName());
-    // FIXME check dependencies from meta layer
-    for (ServiceComponent component : components.values()) {
-      if (!component.canBeRemoved()) {
-        throw new AmbariException("Found non removable component when trying to"
-            + " delete all components from service"
-            + ", clusterName=" + cluster.getClusterName()
-            + ", serviceName=" + getName()
-            + ", componentName=" + component.getName());
-      }
-    }
-
-    for (ServiceComponent serviceComponent : components.values()) {
-      serviceComponent.delete();
-    }
-
-    components.clear();
-  }
-
-  @Override
-  public synchronized void deleteServiceComponent(String componentName)
-      throws AmbariException {
-    ServiceComponent component = getServiceComponent(componentName);
-    LOG.info("Deleting servicecomponent for cluster"
-        + ", clusterName=" + cluster.getClusterName()
-        + ", serviceName=" + getName()
-        + ", componentName=" + componentName);
-    // FIXME check dependencies from meta layer
-    if (!component.canBeRemoved()) {
-      throw new AmbariException("Could not delete component from cluster"
-          + ", clusterName=" + cluster.getClusterName()
-          + ", serviceName=" + getName()
-          + ", componentName=" + componentName);
-    }
-
-    component.delete();
-    components.remove(componentName);
-  }
-
-  @Override
-  public boolean isClientOnlyService() {
-    return isClientOnlyService;
-  }
-
-  @Override
-  @Transactional
-  public synchronized void delete() throws AmbariException {
-    deleteAllComponents();
-
-    if (persisted) {
-      removeEntities();
-      persisted = false;
-    }
-
-    desiredConfigs.clear();
-  }
-
-  @Transactional
-  protected void removeEntities() throws AmbariException {
-    ClusterServiceEntityPK pk = new ClusterServiceEntityPK();
-    pk.setClusterId(getClusterId());
-    pk.setServiceName(getName());
-
-    clusterServiceDAO.removeByPK(pk);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
deleted file mode 100644
index 3a2cbc0..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.codehaus.jackson.annotate.JsonIgnore;
-import org.codehaus.jackson.map.annotate.JsonFilter;
-
-@JsonFilter("propertiesfilter")
-public class ServiceInfo {
-  private String name;
-    private String version;
-    private String user;
-    private String comment;
-  private List<PropertyInfo> properties;
-  private List<ComponentInfo> components;
-
-  public String getName() {
-    return name;
-  }
-
-  public void setName(String name) {
-    this.name = name;
-  }
-
-  public String getVersion() {
-    return version;
-  }
-
-  public void setVersion(String version) {
-    this.version = version;
-  }
-
-  public String getUser() {
-    return user;
-  }
-
-  public void setUser(String user) {
-    this.user = user;
-  }
-
-  public String getComment() {
-    return comment;
-  }
-
-  public void setComment(String comment) {
-    this.comment = comment;
-  }
-
-  public List<PropertyInfo> getProperties() {
-    if (properties == null) properties = new ArrayList<PropertyInfo>();
-    return properties;
-  }
-
-  public List<ComponentInfo> getComponents() {
-    if (components == null) components = new ArrayList<ComponentInfo>();
-    return components;
-  }
-
-  public boolean isClientOnlyService() {
-    if (components == null || components.isEmpty()) {
-      return false;
-    }
-    for (ComponentInfo compInfo : components) {
-      if (!compInfo.isClient()) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  public ComponentInfo getClientComponent() {
-    if (components == null || components.isEmpty()) {
-      return null;
-    }
-    for (ComponentInfo compInfo : components) {
-      if (compInfo.isClient()) {
-        return compInfo;
-      }
-    }
-    return components.get(0);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("Service name:" + name + "\nversion:" + version +
-        "\nuser:" + user + "\ncomment:" + comment);
-//    if(properties != null)
-//    for (PropertyInfo property : getProperties()) {
-//      sb.append("\tProperty name=" + property.getName() +
-    //"\nproperty value=" + property.getValue() + "\ndescription=" + property.getDescription());
-//    }
-    for(ComponentInfo component : getComponents()){
-      sb.append("\n\n\nComponent:\n");
-      sb.append("name="+ component.getName());
-      sb.append("\tcategory="+ component.getCategory() );
-    }
-
-    return sb.toString();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/StackId.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/StackId.java
deleted file mode 100644
index 683721b..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/StackId.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-public class StackId {
-  private String stackName;
-  private String stackVersion;
-
-  public StackId() {
-    super();
-    this.stackName = "";
-    this.stackVersion = "";
-  }
-
-  public StackId(String stackId) {
-    super();
-    parseStackIdHelper(this, stackId);
-  }
-
-  public StackId(StackInfo stackInfo) {
-    this.stackName = stackInfo.getName();
-    this.stackVersion = stackInfo.getVersion();
-  }
-
-  /**
-   * @return the stackName
-   */
-  public String getStackName() {
-    return stackName;
-  }
-
-  /**
-   * @return the stackVersion
-   */
-  public String getStackVersion() {
-    return stackVersion;
-  }
-
-  /**
-   * @return the stackVersion
-   */
-  public String getStackId() {
-    if (stackName.isEmpty()
-        && stackVersion.isEmpty()) {
-      return "";
-    }
-    return stackName + "-" + stackVersion;
-  }
-
-  /**
-   * @param stackId the stackVersion to set
-   */
-  public void setStackId(String stackId) {
-    parseStackIdHelper(this, stackId);
-  }
-
-  @Override
-  public boolean equals(Object object) {
-    if (!(object instanceof StackId)) {
-      return false;
-    }
-    if (this == object) {
-      return true;
-    }
-    StackId s = (StackId) object;
-    return stackVersion.equals(s.stackVersion);
-  }
-
-  @Override
-  public int hashCode() {
-    int result = stackVersion != null ? stackVersion.hashCode() : 0;
-    return result;
-  }
-
-  public String toString() {
-    return getStackId();
-  }
-
-  public static void parseStackIdHelper(StackId stackVersion,
-      String stackId) {
-    if (stackId.isEmpty()) {
-      stackVersion.stackName = "";
-      stackVersion.stackVersion = "";
-      return;
-    }
-    int pos = stackId.indexOf('-');
-    if (pos == -1
-        || (stackId.length() <= (pos+1))) {
-      throw new RuntimeException("Could not parse invalid Stack Id"
-          + ", stackId=" + stackId);
-    }
-    stackVersion.stackName = stackId.substring(0, pos);
-    stackVersion.stackVersion = stackId.substring(pos+1);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
deleted file mode 100644
index 5f074c1..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import java.util.ArrayList;
-import java.util.List;
-
-public class StackInfo {
-  private String name;
-  private String version;
-  private List<RepositoryInfo> repositories;
-  private List<ServiceInfo> services;
-
-  public String getName() {
-    return name;
-  }
-
-  public void setName(String name) {
-    this.name = name;
-  }
-
-  public String getVersion() {
-    return version;
-  }
-
-  public void setVersion(String version) {
-    this.version = version;
-  }
-
-  public List<RepositoryInfo> getRepositories() {
-    if( repositories == null ) repositories = new ArrayList<RepositoryInfo>();
-    return repositories;
-  }
-
-  public void setRepositories(List<RepositoryInfo> repositories) {
-    this.repositories = repositories;
-  }
-
-  public synchronized List<ServiceInfo> getServices() {
-    if (services == null) services = new ArrayList<ServiceInfo>();
-    return services;
-  }
-
-  public synchronized void setServices(List<ServiceInfo> services) {
-    this.services = services;
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("Stack name:" + name + "\nversion:" + version );//TODO add repository
-    if (services != null) {
-      sb.append("\n\t\tService:");
-      for (ServiceInfo service : services) {
-        sb.append("\t\t" + service.toString());
-      }
-    }
-
-    if (repositories != null) {
-      sb.append("\n\t\tRepositories:");
-      for (RepositoryInfo repository : repositories) {
-        sb.append("\t\t" + repository.toString());
-      }
-    }
-
-    return sb.toString();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/State.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/State.java
deleted file mode 100644
index 4beb15a..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/State.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-public enum State {
-  /**
-   * Initial/Clean state
-   */
-  INIT(0),
-  /**
-   * In the process of installing.
-   */
-  INSTALLING(1),
-  /**
-   * Install failed
-   */
-  INSTALL_FAILED(2),
-  /**
-   * State when install completed successfully
-   */
-  INSTALLED(3),
-  /**
-   * In the process of starting.
-   */
-  STARTING(4),
-  /**
-   * Start failed.
-   */
-  START_FAILED(5),
-  /**
-   * State when start completed successfully.
-   */
-  STARTED(6),
-  /**
-   * In the process of stopping.
-   */
-  STOPPING(7),
-  /**
-   * Stop failed
-   */
-  STOP_FAILED(8),
-
-  /**
-   * In the process of uninstalling.
-   */
-  UNINSTALLING(9),
-  /**
-   * Uninstall failed.
-   */
-  UNINSTALL_FAILED(10),
-  /**
-   * State when uninstall completed successfully.
-   */
-  UNINSTALLED(11),
-  /**
-   * In the process of wiping out the install
-   */
-  WIPING_OUT(12),
-  /**
-   * State when wipeout fails
-   */
-  WIPEOUT_FAILED(13);
-
-  private final int state;
-
-  private State(int state) {
-    this.state = state;
-  }
-
-  public boolean isValidDesiredState() {
-    switch (State.values()[this.state]) {
-      case INIT:
-      case INSTALLED:
-      case STARTED:
-      case UNINSTALLED:
-        return true;
-      default:
-        return false;
-    }
-  }
-
-  public boolean isInProgressState() {
-    switch (State.values()[this.state]) {
-      case INSTALLING:
-      case STARTING:
-      case STOPPING:
-      case UNINSTALLING:
-      case WIPING_OUT:
-        return true;
-      default:
-        return false;
-    }
-  }
-
-  public boolean isValidClientComponentState() {
-    switch (State.values()[this.state]) {
-      case STARTING:
-      case STARTED:
-      case START_FAILED:
-      case STOP_FAILED:
-      case STOPPING:
-        return false;
-      default:
-        return true;
-    }
-  }
-
-  /**
-   * Indicates whether or not the resource with this state
-   * can be removed.
-   *
-   * @return true if this is a removable state
-   */
-  public boolean isRemovableState() {
-    switch (State.values()[this.state]) {
-      case INIT:
-      case INSTALLING:
-      case INSTALLED:
-      case INSTALL_FAILED:
-      case UNINSTALLED:
-        return true;
-      default:
-        return false;
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/Action.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/Action.java
deleted file mode 100644
index 6d00813..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/Action.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.action;
-
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-
-public interface Action {
-
-  /**
-   * Get the Action ID for the action
-   * @return ActionId
-   */
-  public ActionId getId();
-
-  // TODO requires some form of ActionType to ensure only one running
-  // action per action type
-  // There may be gotchas such as de-commissioning should be allowed to happen
-  // on more than one host at a time
-
-
-  /**
-   * Get Start Time of the action
-   * @return Start time as a unix timestamp
-   */
-  public long getStartTime();
-
-  /**
-   * Get the last update time of the Action when its progress status
-   * was updated
-   * @return Last Update Time as a unix timestamp
-   */
-  public long getLastUpdateTime();
-
-  /**
-   * Time when the Action completed
-   * @return Completion Time as a unix timestamp
-   */
-  public long getCompletionTime();
-
-  /**
-   * Get the current state of the Action
-   * @return ActionState
-   */
-  public ActionState getState();
-
-  /**
-   * Set the State of the Action
-   * @param state ActionState
-   */
-  public void setState(ActionState state);
-
-  /**
-   * Send a ActionEvent to the Action's StateMachine
-   * @param event ActionEvent
-   * @throws InvalidStateTransitionException
-   */
-  public void handleEvent(ActionEvent event)
-      throws InvalidStateTransitionException;
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionCompletedEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionCompletedEvent.java
deleted file mode 100644
index f39fc3a..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionCompletedEvent.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.action;
-
-public class ActionCompletedEvent extends ActionEvent {
-
-  private final long completionTime;
-
-  // TODO
-  // need to add action report
-
-  public ActionCompletedEvent(ActionId actionId, long completionTime) {
-    super(ActionEventType.ACTION_COMPLETED, actionId);
-    this.completionTime = completionTime;
-  }
-
-  /**
-   * @return the completionTime
-   */
-  public long getCompletionTime() {
-    return completionTime;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionEvent.java
deleted file mode 100644
index cd8eacf..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionEvent.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.action;
-
-import org.apache.ambari.server.state.fsm.event.AbstractEvent;
-
-/**
- * Base class for all events that affect the Action FSM
- */
-public abstract class ActionEvent extends AbstractEvent<ActionEventType> {
-
-  /**
-   * ActionId identifying the action
-   */
-  private final ActionId actionId;
-
-  public ActionEvent(ActionEventType type, ActionId actionId) {
-    super(type);
-    this.actionId = actionId;
-  }
-
-  /**
-   * @return the actionId
-   */
-  public ActionId getActionId() {
-    return actionId;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionEventType.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionEventType.java
deleted file mode 100644
index 6b27203..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionEventType.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.action;
-
-public enum ActionEventType {
-  /**
-   * Initial state for the Action when triggered.
-   */
-  ACTION_INIT,
-  /**
-   * Action still in progress.
-   */
-  ACTION_IN_PROGRESS,
-  /**
-   * Action completed successfully.
-   */
-  ACTION_COMPLETED,
-  /**
-   * Action failed to complete successfully.
-   */
-  ACTION_FAILED
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionFailedEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionFailedEvent.java
deleted file mode 100644
index 6696743..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionFailedEvent.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.action;
-
-public class ActionFailedEvent extends ActionEvent {
-
-  private final long completionTime;
-
-  // TODO
-  // need to add action report
-
-  public ActionFailedEvent(ActionId actionId, long completionTime) {
-    super(ActionEventType.ACTION_FAILED, actionId);
-    this.completionTime = completionTime;
-  }
-
-  /**
-   * @return the completionTime
-   */
-  public long getCompletionTime() {
-    return completionTime;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionId.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionId.java
deleted file mode 100644
index 18d46b9..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionId.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.action;
-
-// TODO
-public class ActionId {
-
-  final long actionId;
-
-  final ActionType actionType;
-
-  public ActionId(long actionId, ActionType actionType) {
-    super();
-    this.actionId = actionId;
-    this.actionType = actionType;
-  }
-
-  public String toString() {
-    return "[ actionId=" + actionId
-        + ", actionType=" + actionType + "]";
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionImpl.java
deleted file mode 100644
index e0fc515..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionImpl.java
+++ /dev/null
@@ -1,314 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.action;
-
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-import org.apache.ambari.server.state.fsm.SingleArcTransition;
-import org.apache.ambari.server.state.fsm.StateMachine;
-import org.apache.ambari.server.state.fsm.StateMachineFactory;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-public class ActionImpl implements Action {
-
-  private static final Log LOG = LogFactory.getLog(ActionImpl.class);
-
-  private final Lock readLock;
-  private final Lock writeLock;
-
-  private ActionId id;
-
-  private long startTime;
-  private long lastUpdateTime;
-  private long completionTime;
-
-  // TODO
-  // need to add action report
-
-  private static final StateMachineFactory
-    <ActionImpl, ActionState, ActionEventType, ActionEvent>
-      stateMachineFactory
-        = new StateMachineFactory<ActionImpl, ActionState,
-          ActionEventType, ActionEvent>
-            (ActionState.INIT)
-
-    // define the state machine of a Action
-
-    .addTransition(ActionState.INIT, ActionState.IN_PROGRESS,
-        ActionEventType.ACTION_IN_PROGRESS, new ActionProgressUpdateTransition())
-    .addTransition(ActionState.INIT, ActionState.COMPLETED,
-        ActionEventType.ACTION_COMPLETED, new ActionCompletedTransition())
-    .addTransition(ActionState.INIT, ActionState.FAILED,
-        ActionEventType.ACTION_FAILED, new ActionFailedTransition())
-    .addTransition(ActionState.INIT, ActionState.IN_PROGRESS,
-        ActionEventType.ACTION_IN_PROGRESS, new ActionProgressUpdateTransition())
-    .addTransition(ActionState.IN_PROGRESS, ActionState.IN_PROGRESS,
-        ActionEventType.ACTION_IN_PROGRESS, new ActionProgressUpdateTransition())
-    .addTransition(ActionState.IN_PROGRESS, ActionState.COMPLETED,
-        ActionEventType.ACTION_COMPLETED, new ActionCompletedTransition())
-    .addTransition(ActionState.IN_PROGRESS, ActionState.FAILED,
-        ActionEventType.ACTION_FAILED, new ActionFailedTransition())
-    .addTransition(ActionState.COMPLETED, ActionState.INIT,
-        ActionEventType.ACTION_INIT, new NewActionTransition())
-    .addTransition(ActionState.FAILED, ActionState.INIT,
-        ActionEventType.ACTION_INIT, new NewActionTransition())
-    .installTopology();
-
-  private final StateMachine<ActionState, ActionEventType, ActionEvent>
-      stateMachine;
-
-  public ActionImpl(ActionId id, long startTime) {
-    super();
-    this.id = id;
-    this.stateMachine = stateMachineFactory.make(this);
-    ReadWriteLock rwLock = new ReentrantReadWriteLock();
-    this.readLock = rwLock.readLock();
-    this.writeLock = rwLock.writeLock();
-    this.startTime = startTime;
-    this.lastUpdateTime = -1;
-    this.completionTime = -1;
-  }
-
-  private void reset() {
-    try {
-      writeLock.lock();
-      this.startTime = -1;
-      this.lastUpdateTime = -1;
-      this.completionTime = -1;
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  static class NewActionTransition
-     implements SingleArcTransition<ActionImpl, ActionEvent> {
-
-    @Override
-    public void transition(ActionImpl action, ActionEvent event) {
-      ActionInitEvent e = (ActionInitEvent) event;
-      // TODO audit logs
-      action.reset();
-      action.setId(e.getActionId());
-      action.setStartTime(e.getStartTime());
-      LOG.info("Launching a new Action"
-          + ", actionId=" + action.getId()
-          + ", startTime=" + action.getStartTime());
-    }
-  }
-
-  static class ActionProgressUpdateTransition
-      implements SingleArcTransition<ActionImpl, ActionEvent> {
-
-    @Override
-    public void transition(ActionImpl action, ActionEvent event) {
-      ActionProgressUpdateEvent e = (ActionProgressUpdateEvent) event;
-      action.setLastUpdateTime(e.getProgressUpdateTime());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Progress update for Action"
-            + ", actionId=" + action.getId()
-            + ", startTime=" + action.getStartTime()
-            + ", lastUpdateTime=" + action.getLastUpdateTime());
-      }
-    }
-  }
-
-  static class ActionCompletedTransition
-     implements SingleArcTransition<ActionImpl, ActionEvent> {
-
-    @Override
-    public void transition(ActionImpl action, ActionEvent event) {
-      // TODO audit logs
-      ActionCompletedEvent e = (ActionCompletedEvent) event;
-      action.setCompletionTime(e.getCompletionTime());
-      action.setLastUpdateTime(e.getCompletionTime());
-
-      LOG.info("Action completed successfully"
-          + ", actionId=" + action.getId()
-          + ", startTime=" + action.getStartTime()
-          + ", completionTime=" + action.getCompletionTime());
-    }
-  }
-
-  static class ActionFailedTransition
-      implements SingleArcTransition<ActionImpl, ActionEvent> {
-
-    @Override
-    public void transition(ActionImpl action, ActionEvent event) {
-      // TODO audit logs
-      ActionFailedEvent e = (ActionFailedEvent) event;
-      action.setCompletionTime(e.getCompletionTime());
-      action.setLastUpdateTime(e.getCompletionTime());
-      LOG.info("Action failed to complete"
-          + ", actionId=" + action.getId()
-          + ", startTime=" + action.getStartTime()
-          + ", completionTime=" + action.getCompletionTime());
-    }
-  }
-
-
-  @Override
-  public ActionState getState() {
-    try {
-      readLock.lock();
-      return stateMachine.getCurrentState();
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setState(ActionState state) {
-    try {
-      writeLock.lock();
-      stateMachine.setCurrentState(state);
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public void handleEvent(ActionEvent event)
-      throws InvalidStateTransitionException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Handling Action event, eventType=" + event.getType().name()
-          + ", event=" + event.toString());
-    }
-    ActionState oldState = getState();
-    try {
-      writeLock.lock();
-      try {
-        stateMachine.doTransition(event.getType(), event);
-      } catch (InvalidStateTransitionException e) {
-        LOG.error("Can't handle Action event at current state"
-            + ", actionId=" + this.getId()
-            + ", currentState=" + oldState
-            + ", eventType=" + event.getType()
-            + ", event=" + event);
-        throw e;
-      }
-    }
-    finally {
-      writeLock.unlock();
-    }
-    if (oldState != getState()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Action transitioned to a new state"
-            + ", actionId=" + this.getId()
-            + ", oldState=" + oldState
-            + ", currentState=" + getState()
-            + ", eventType=" + event.getType().name()
-            + ", event=" + event);
-      }
-    }
-  }
-
-  @Override
-  public ActionId getId() {
-    try {
-      readLock.lock();
-      return id;
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  private void setId(ActionId id) {
-    try {
-      writeLock.lock();
-      this.id = id;
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public long getStartTime() {
-    try {
-      readLock.lock();
-      return startTime;
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  public void setStartTime(long startTime) {
-    try {
-      writeLock.lock();
-      this.startTime = startTime;
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public long getLastUpdateTime() {
-    try {
-      readLock.lock();
-      return lastUpdateTime;
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  public void setLastUpdateTime(long lastUpdateTime) {
-    try {
-      writeLock.lock();
-      this.lastUpdateTime = lastUpdateTime;
-    }
-    finally {
-      writeLock.unlock();
-    }
-
-  }
-
-  @Override
-  public long getCompletionTime() {
-    try {
-      readLock.lock();
-      return completionTime;
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  public void setCompletionTime(long completionTime) {
-    try {
-      writeLock.lock();
-      this.completionTime = completionTime;
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionInitEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionInitEvent.java
deleted file mode 100644
index ec81491..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionInitEvent.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.action;
-
-public class ActionInitEvent extends ActionEvent {
-
-  private final long startTime;
-
-  public ActionInitEvent(ActionId actionId, long startTime) {
-    super(ActionEventType.ACTION_INIT, actionId);
-    this.startTime = startTime;
-  }
-
-  /**
-   * @return the start time of the Action
-   */
-  public long getStartTime() {
-    return startTime;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionProgressUpdateEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionProgressUpdateEvent.java
deleted file mode 100644
index ae4b19a..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionProgressUpdateEvent.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.action;
-
-public class ActionProgressUpdateEvent extends ActionEvent {
-
-  private final long progressUpdateTime;
-
-  public ActionProgressUpdateEvent(ActionId actionId, long progressUpdateTime) {
-    super(ActionEventType.ACTION_IN_PROGRESS, actionId);
-    this.progressUpdateTime = progressUpdateTime;
-  }
-
-  /**
-   * @return the progressUpdateTime
-   */
-  public long getProgressUpdateTime() {
-    return progressUpdateTime;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionState.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionState.java
deleted file mode 100644
index b988c87..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionState.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.action;
-
-public enum ActionState {
-  /**
-   * Initial state for the Action.
-   * When a new action is triggered or set in motion.
-   */
-  INIT,
-  /**
-   * State when the Action is triggered on the cluster,
-   */
-  IN_PROGRESS,
-  /**
-   * State of successful completion
-   */
-  COMPLETED,
-  /**
-   * Action failed to complete successfully
-   */
-  FAILED
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionType.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionType.java
deleted file mode 100644
index 7960f74..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/action/ActionType.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.action;
-
-public class ActionType {
-
-  public final String actionName;
-
-  public ActionType(String actionName) {
-    super();
-    this.actionName = actionName;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterFactory.java
deleted file mode 100644
index 27d0c06..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterFactory.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.cluster;
-
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.state.Cluster;
-
-/**
- * Factory interface for Guice injections
- */
-public interface ClusterFactory {
-  Cluster create(ClusterEntity clusterEntity);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
deleted file mode 100644
index 38f2cc4..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ /dev/null
@@ -1,695 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.cluster;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.ServiceComponentHostNotFoundException;
-import org.apache.ambari.server.ServiceNotFoundException;
-import org.apache.ambari.server.controller.ClusterResponse;
-import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigFactory;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.ServiceComponent;
-import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.state.ServiceFactory;
-import org.apache.ambari.server.state.StackId;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.gson.Gson;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.persist.Transactional;
-
-public class ClusterImpl implements Cluster {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ClusterImpl.class);
-
-  @Inject
-  private Clusters clusters;
-
-  private StackId desiredStackVersion;
-
-  private Map<String, Service> services = null;
-
-  /**
-   * [ Config Type -> [ Config Version Tag -> Config ] ]
-   */
-  private Map<String, Map<String, Config>> configs;
-
-  /**
-   * [ ServiceName -> [ ServiceComponentName -> [ HostName -> [ ... ] ] ] ]
-   */
-  private Map<String, Map<String, Map<String, ServiceComponentHost>>>
-      serviceComponentHosts;
-
-  /**
-   * [ HostName -> [ ... ] ]
-   */
-  private Map<String, List<ServiceComponentHost>>
-      serviceComponentHostsByHost;
-
-  private ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
-  private Lock readLock = readWriteLock.readLock();
-  private Lock writeLock = readWriteLock.writeLock();
-
-  private ClusterEntity clusterEntity;
-
-  @Inject
-  private ClusterDAO clusterDAO;
-//  @Inject
-//  private ClusterServiceDAO clusterServiceDAO;
-  @Inject
-  private ServiceFactory serviceFactory;
-  @Inject
-  private ConfigFactory configFactory;
-  @Inject
-  private Gson gson;
-  
-  private volatile boolean svcHostsLoaded = false;
-
-  @Inject
-  public ClusterImpl(@Assisted ClusterEntity clusterEntity,
-                     Injector injector) {
-    injector.injectMembers(this);
-    this.clusterEntity = clusterEntity;
-
-    this.serviceComponentHosts = new HashMap<String,
-        Map<String, Map<String, ServiceComponentHost>>>();
-    this.serviceComponentHostsByHost = new HashMap<String,
-        List<ServiceComponentHost>>();
-    this.desiredStackVersion = gson.fromJson(
-        clusterEntity.getDesiredStackVersion(), StackId.class);
-    configs = new HashMap<String, Map<String, Config>>();
-    if (!clusterEntity.getClusterConfigEntities().isEmpty()) {
-      for (ClusterConfigEntity entity : clusterEntity.getClusterConfigEntities()) {
-
-        if (!configs.containsKey(entity.getType())) {
-          configs.put(entity.getType(), new HashMap<String, Config>());
-        }
-
-        Config config = configFactory.createExisting(this, entity);
-
-        configs.get(entity.getType()).put(entity.getTag(), config);
-      }
-    }
-  }
-  
-  
-  /**
-   * Make sure we load all the service host components.
-   * We need this for live status checks.
-   */
-  public void loadServiceHostComponents() {
-    loadServices();
-    if (svcHostsLoaded) return;
-    writeLock.lock();
-    try {
-      if (svcHostsLoaded) return;
-      LOG.info("Loading Service Host Components");
-      if (svcHostsLoaded) return;
-      if (services != null) {
-        for (Entry<String, Service> serviceKV: services.entrySet()) {
-          /* get all the service component hosts **/
-          Service service = serviceKV.getValue();
-          if (!serviceComponentHosts.containsKey(service.getName())) {
-            serviceComponentHosts.put(service.getName(), new HashMap<String,
-                Map<String, ServiceComponentHost>>());
-          }
-          for (Entry<String, ServiceComponent> svcComponent:
-              service.getServiceComponents().entrySet()) {
-            ServiceComponent comp = svcComponent.getValue();
-            String componentName = svcComponent.getKey();
-            if (!serviceComponentHosts.get(service.getName()).containsKey(componentName)) {
-              serviceComponentHosts.get(service.getName()).put(componentName,
-                  new HashMap<String, ServiceComponentHost>());
-            }
-            /** Get Service Host Components **/
-            for (Entry<String, ServiceComponentHost> svchost:
-                comp.getServiceComponentHosts().entrySet()) {
-                String hostname = svchost.getKey();
-                ServiceComponentHost svcHostComponent = svchost.getValue();
-                if (!serviceComponentHostsByHost.containsKey(hostname)) {
-                  serviceComponentHostsByHost.put(hostname,
-                      new ArrayList<ServiceComponentHost>());
-                }
-                List<ServiceComponentHost> compList =  serviceComponentHostsByHost.get(hostname);
-                compList.add(svcHostComponent);
-
-                if (!serviceComponentHosts.get(service.getName()).get(componentName)
-                    .containsKey(hostname)) {
-                  serviceComponentHosts.get(service.getName()).get(componentName)
-                  .put(hostname, svcHostComponent);
-                }
-            }
-          }
-        }
-      }
-      svcHostsLoaded = true;
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  private void loadServices() {
-    LOG.info("clusterEntity " + clusterEntity.getClusterServiceEntities() );
-    if (services == null) {
-      writeLock.lock();
-      try {
-        if (services == null) {
-          services = new TreeMap<String, Service>();
-          if (!clusterEntity.getClusterServiceEntities().isEmpty()) {
-            for (ClusterServiceEntity serviceEntity : clusterEntity.getClusterServiceEntities()) {
-              services.put(serviceEntity.getServiceName(), serviceFactory.createExisting(this, serviceEntity));
-            }
-          }
-        }
-      } finally {
-        writeLock.unlock();
-      }
-    }
-  }
-
-  public ServiceComponentHost getServiceComponentHost(String serviceName,
-      String serviceComponentName, String hostname) throws AmbariException {
-    loadServiceHostComponents();
-    readLock.lock();
-    try {
-      if (!serviceComponentHosts.containsKey(serviceName)
-          || !serviceComponentHosts.get(serviceName)
-              .containsKey(serviceComponentName)
-          || !serviceComponentHosts.get(serviceName).get(serviceComponentName)
-              .containsKey(hostname)) {
-        throw new ServiceComponentHostNotFoundException(getClusterName(), serviceName,
-            serviceComponentName, hostname);
-      }
-      return serviceComponentHosts.get(serviceName).get(serviceComponentName)
-          .get(hostname);
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public String getClusterName() {
-    readLock.lock();
-    try {
-      return clusterEntity.getClusterName();
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setClusterName(String clusterName) {
-    writeLock.lock();
-    try {
-      String oldName = clusterEntity.getClusterName();
-      clusterEntity.setClusterName(clusterName);
-      clusterDAO.merge(clusterEntity); //RollbackException possibility if UNIQUE constraint violated
-      clusters.updateClusterName(oldName, clusterName);
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  public void addServiceComponentHost(
-      ServiceComponentHost svcCompHost) throws AmbariException {
-    loadServiceHostComponents();
-    writeLock.lock();
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Trying to add ServiceComponentHost to ClusterHostMap cache"
-            + ", serviceName=" + svcCompHost.getServiceName()
-            + ", componentName=" + svcCompHost.getServiceComponentName()
-            + ", hostname=" + svcCompHost.getHostName());
-      }
-
-      final String hostname = svcCompHost.getHostName();
-      final String serviceName = svcCompHost.getServiceName();
-      final String componentName = svcCompHost.getServiceComponentName();
-      Set<Cluster> cs = clusters.getClustersForHost(hostname);
-      boolean clusterFound = false;
-      Iterator<Cluster> iter = cs.iterator();
-      while (iter.hasNext()) {
-        Cluster c = iter.next();
-        if (c.getClusterId() == this.getClusterId()) {
-          clusterFound = true;
-          break;
-        }
-      }
-      if (!clusterFound) {
-        throw new AmbariException("Host does not belong this cluster"
-                + ", hostname=" + hostname
-                + ", clusterName=" + getClusterName()
-                + ", clusterId=" + getClusterId());
-      }
-
-    if (!serviceComponentHosts.containsKey(serviceName)) {
-      serviceComponentHosts.put(serviceName,
-          new HashMap<String, Map<String,ServiceComponentHost>>());
-    }
-    if (!serviceComponentHosts.get(serviceName).containsKey(componentName)) {
-      serviceComponentHosts.get(serviceName).put(componentName,
-          new HashMap<String, ServiceComponentHost>());
-    }
-
-      if (serviceComponentHosts.get(serviceName).get(componentName).
-          containsKey(hostname)) {
-        throw new AmbariException("Duplicate entry for ServiceComponentHost"
-            + ", serviceName=" + serviceName
-            + ", serviceComponentName" + componentName
-            + ", hostname= " + hostname);
-      }
-
-      if (!serviceComponentHostsByHost.containsKey(hostname)) {
-        serviceComponentHostsByHost.put(hostname,
-            new ArrayList<ServiceComponentHost>());
-      }
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding a new ServiceComponentHost"
-            + ", clusterName=" + getClusterName()
-            + ", clusterId=" + getClusterId()
-            + ", serviceName=" + serviceName
-            + ", serviceComponentName" + componentName
-            + ", hostname= " + hostname);
-      }
-
-      serviceComponentHosts.get(serviceName).get(componentName).put(hostname,
-          svcCompHost);
-      serviceComponentHostsByHost.get(hostname).add(svcCompHost);
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public long getClusterId() {
-    readLock.lock();
-    try {
-      return clusterEntity.getClusterId();
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public List<ServiceComponentHost> getServiceComponentHosts(
-      String hostname) {
-    loadServiceHostComponents();
-    readLock.lock();
-    try {
-      if (serviceComponentHostsByHost.containsKey(hostname)) {
-        return Collections.unmodifiableList(
-            serviceComponentHostsByHost.get(hostname));
-      }
-      return new ArrayList<ServiceComponentHost>();
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void addService(Service service)
-      throws AmbariException {
-    loadServices();
-    writeLock.lock();
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding a new Service"
-            + ", clusterName=" + getClusterName()
-            + ", clusterId=" + getClusterId()
-            + ", serviceName=" + service.getName());
-      }
-      if (services.containsKey(service.getName())) {
-        throw new AmbariException("Service already exists"
-            + ", clusterName=" + getClusterName()
-            + ", clusterId=" + getClusterId()
-            + ", serviceName=" + service.getName());
-      }
-      this.services.put(service.getName(), service);
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public Service addService(String serviceName) throws AmbariException{
-    loadServices();
-    writeLock.lock();
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding a new Service"
-            + ", clusterName=" + getClusterName()
-            + ", clusterId=" + getClusterId()
-            + ", serviceName=" + serviceName);
-      }
-      if (services.containsKey(serviceName)) {
-        throw new AmbariException("Service already exists"
-            + ", clusterName=" + getClusterName()
-            + ", clusterId=" + getClusterId()
-            + ", serviceName=" + serviceName);
-      }
-      Service s = serviceFactory.createNew(this, serviceName);
-      this.services.put(s.getName(), s);
-      return s;
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public Service getService(String serviceName)
-      throws AmbariException {
-    loadServices();
-    readLock.lock();
-    try {
-      if (!services.containsKey(serviceName)) {
-        throw new ServiceNotFoundException(getClusterName(), serviceName);
-      }
-      return services.get(serviceName);
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public Map<String, Service> getServices() {
-    loadServices();
-    readLock.lock();
-    try {
-      return Collections.unmodifiableMap(services);
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public StackId getDesiredStackVersion() {
-    readLock.lock();
-    try {
-      return desiredStackVersion;
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setDesiredStackVersion(StackId stackVersion) {
-    readWriteLock.writeLock().lock();
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Changing DesiredStackVersion of Cluster"
-            + ", clusterName=" + getClusterName()
-            + ", clusterId=" + getClusterId()
-            + ", currentDesiredStackVersion=" + this.desiredStackVersion
-            + ", newDesiredStackVersion=" + stackVersion);
-      }
-      this.desiredStackVersion = stackVersion;
-      clusterEntity.setDesiredStackVersion(gson.toJson(stackVersion));
-      clusterDAO.merge(clusterEntity);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
-  }
-
-  public StackId getDesiredState() {
-    //TODO separate implementation, mapped to StackVersion for now
-//    return desiredState; for separate implementation
-    readWriteLock.readLock().lock();
-    try {
-      return getDesiredStackVersion();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
-  }
-
-  public void setDesiredState(StackId desiredState) {
-    //TODO separate implementation, mapped to StackVersion for now
-//    LOG.debug("Changing desired state of cluster, clusterName={}, clusterId={}, oldState={}, newState={}",
-//        getClusterName(), getClusterId(), this.desiredState, desiredState);
-//    clusterEntity.setDesiredClusterState(gson.toJson(desiredState));
-//    clusterDAO.merge(clusterEntity);
-//    this.desiredState = desiredState;
-    readWriteLock.writeLock().lock();
-    try {
-      setDesiredStackVersion(desiredState);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
-  }
-
-
-  @Override
-  public Map<String, Config> getDesiredConfigsByType(String configType) {
-    readWriteLock.writeLock().lock();
-    try {
-      if (!configs.containsKey(configType))
-        return null;
-
-      return Collections.unmodifiableMap(configs.get(configType));
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
-  }
-
-  @Override
-  public Config getDesiredConfig(String configType, String versionTag) {
-    readWriteLock.readLock().lock();
-    try {
-      if (!configs.containsKey(configType)
-          || !configs.get(configType).containsKey(versionTag)) {
-        return null;
-      }
-      return configs.get(configType).get(versionTag);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
-  }
-
-  @Override
-  public void addDesiredConfig(Config config) {
-    readWriteLock.writeLock().lock();
-    try {
-      if (config.getType() == null
-          || config.getType().isEmpty()
-          || config.getVersionTag() == null
-          || config.getVersionTag().isEmpty()) {
-        // TODO throw error
-      }
-      if (!configs.containsKey(config.getType())) {
-        configs.put(config.getType(), new HashMap<String, Config>());
-      }
-
-      configs.get(config.getType()).put(config.getVersionTag(), config);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
-  }
-
-  public Collection<Config> getAllConfigs() {
-    readWriteLock.readLock().lock();
-    try {
-      List<Config> list = new ArrayList<Config>();
-      for (Entry<String, Map<String, Config>> entry : configs.entrySet()) {
-        for (Config config : entry.getValue().values()) {
-          list.add(config);
-        }
-      }
-      return Collections.unmodifiableList(list);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
-  }
-
-  @Override
-  public ClusterResponse convertToResponse()
-      throws AmbariException {
-    readWriteLock.readLock().lock();
-    try {
-      ClusterResponse r = new ClusterResponse(getClusterId(), getClusterName(),
-          clusters.getHostsForCluster(getClusterName()).keySet(),
-          getDesiredStackVersion().getStackId());
-      return r;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
-  }
-
-  public void debugDump(StringBuilder sb) {
-    loadServices();
-    readWriteLock.readLock().lock();
-    try {
-      sb.append("Cluster={ clusterName=" + getClusterName()
-          + ", clusterId=" + getClusterId()
-          + ", desiredStackVersion=" + desiredStackVersion.getStackId()
-          + ", services=[ ");
-      boolean first = true;
-      for (Service s : services.values()) {
-        if (!first) {
-          sb.append(" , ");
-          first = false;
-        }
-        sb.append("\n    ");
-        s.debugDump(sb);
-        sb.append(" ");
-      }
-      sb.append(" ] }");
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
-  }
-
-  @Override
-  @Transactional
-  public void refresh() {
-    readWriteLock.writeLock().lock();
-    try {
-      clusterEntity = clusterDAO.findById(clusterEntity.getClusterId());
-      clusterDAO.refresh(clusterEntity);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
-  }
-
-  @Override
-  @Transactional
-  public void deleteAllServices() throws AmbariException {
-    loadServices();
-    readWriteLock.writeLock().lock();
-    try {
-      LOG.info("Deleting all services for cluster"
-          + ", clusterName=" + getClusterName());
-      for (Service service : services.values()) {
-        if (!service.canBeRemoved()) {
-          throw new AmbariException("Found non removable service when trying to"
-              + " all services from cluster"
-              + ", clusterName=" + getClusterName()
-              + ", serviceName=" + service.getName());
-        }
-      }
-
-      for (Service service : services.values()) {
-        service.delete();
-      }
-
-      services.clear();
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
-  }
-
-  @Override
-  public void deleteService(String serviceName)
-      throws AmbariException {
-    loadServices();
-    readWriteLock.writeLock().lock();
-    try {
-      Service service = getService(serviceName);
-      LOG.info("Deleting service for cluster"
-          + ", clusterName=" + getClusterName()
-          + ", serviceName=" + service.getName());
-      // FIXME check dependencies from meta layer
-      if (!service.canBeRemoved()) {
-        throw new AmbariException("Could not delete service from cluster"
-            + ", clusterName=" + getClusterName()
-            + ", serviceName=" + service.getName());
-      }
-      service.delete();
-      services.remove(serviceName);
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
-  }
-
-  @Override
-  public boolean canBeRemoved() {
-    loadServices();
-    readWriteLock.readLock().lock();
-    try {
-      boolean safeToRemove = true;
-      for (Service service : services.values()) {
-        if (!service.canBeRemoved()) {
-          safeToRemove = false;
-          LOG.warn("Found non removable service"
-              + ", clusterName=" + getClusterName()
-              + ", serviceName=" + service.getName());
-        }
-      }
-      return safeToRemove;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
-  }
-
-  @Override
-  @Transactional
-  public void delete() throws AmbariException {
-    readWriteLock.writeLock().lock();
-    try {
-      deleteAllServices();
-      removeEntities();
-      configs.clear();
-    } finally {
-      readWriteLock.writeLock().unlock();
-    }
-
-  }
-
-  @Transactional
-  protected void removeEntities() throws AmbariException {
-    clusterDAO.removeByPK(getClusterId());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
deleted file mode 100644
index 6022695..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
+++ /dev/null
@@ -1,449 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.cluster;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import javax.persistence.RollbackException;
-
-import com.google.gson.Gson;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.ClusterNotFoundException;
-import org.apache.ambari.server.DuplicateResourceException;
-import org.apache.ambari.server.HostNotFoundException;
-import org.apache.ambari.server.agent.DiskInfo;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.apache.ambari.server.orm.dao.HostDAO;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.state.*;
-import org.apache.ambari.server.state.HostHealthStatus.HealthStatus;
-import org.apache.ambari.server.state.host.HostFactory;
-import org.apache.ambari.server.state.host.HostImpl;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-
-@Singleton
-public class ClustersImpl implements Clusters {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      ClustersImpl.class);
-
-  private ConcurrentHashMap<String, Cluster> clusters;
-  private ConcurrentHashMap<Long, Cluster> clustersById;
-  private ConcurrentHashMap<String, Host> hosts;
-  private ConcurrentHashMap<String, Set<Cluster>> hostClusterMap;
-  private ConcurrentHashMap<String, Set<Host>> clusterHostMap;
-
-  private final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock();
-  private final Lock r = rwl.readLock();
-  private final Lock w = rwl.writeLock();
-
-  volatile boolean clustersLoaded = false;
-
-  @Inject
-  ClusterDAO clusterDAO;
-  @Inject
-  HostDAO hostDAO;
-  @Inject
-  ClusterFactory clusterFactory;
-  @Inject
-  HostFactory hostFactory;
-  @Inject
-  AmbariMetaInfo ambariMetaInfo;
-  @Inject
-  Gson gson;
-
-  @Inject
-  public ClustersImpl() {
-    clusters = new ConcurrentHashMap<String, Cluster>();
-    clustersById = new ConcurrentHashMap<Long, Cluster>();
-    hosts = new ConcurrentHashMap<String, Host>();
-    hostClusterMap = new ConcurrentHashMap<String, Set<Cluster>>();
-    clusterHostMap = new ConcurrentHashMap<String, Set<Host>>();
-
-    LOG.info("Initializing the ClustersImpl");
-  }
-
-  @Transactional
-  void loadClustersAndHosts() {
-    if (!clustersLoaded) {
-      w.lock();
-      try {
-        if (!clustersLoaded) {
-          for (ClusterEntity clusterEntity : clusterDAO.findAll()) {
-            Cluster currentCluster = clusterFactory.create(clusterEntity);
-            clusters.put(clusterEntity.getClusterName(), currentCluster);
-            clustersById.put(currentCluster.getClusterId(), currentCluster);
-            clusterHostMap.put(currentCluster.getClusterName(), Collections.newSetFromMap(new ConcurrentHashMap<Host, Boolean>()));
-          }
-
-          for (HostEntity hostEntity : hostDAO.findAll()) {
-            Host host = hostFactory.create(hostEntity, true);
-            hosts.put(hostEntity.getHostName(), host);
-            Set<Cluster> cSet = Collections.newSetFromMap(new ConcurrentHashMap<Cluster, Boolean>());
-            hostClusterMap.put(hostEntity.getHostName(), cSet);
-
-            for (ClusterEntity clusterEntity : hostEntity.getClusterEntities()) {
-              clusterHostMap.get(clusterEntity.getClusterName()).add(host);
-              cSet.add(clusters.get(clusterEntity.getClusterName()));
-            }
-          }
-        }
-        clustersLoaded = true;
-      } finally {
-        w.unlock();
-      }
-    }
-  }
-
-  @Override
-  public void addCluster(String clusterName)
-      throws AmbariException {
-    loadClustersAndHosts();
-
-    if (clusters.containsKey(clusterName)) {
-      throw new DuplicateResourceException("Attempted to create a Cluster which already exists"
-          + ", clusterName=" + clusterName);
-    }
-
-    w.lock();
-    try {
-      if (clusters.containsKey(clusterName)) {
-        throw new DuplicateResourceException("Attempted to create a Cluster which already exists"
-            + ", clusterName=" + clusterName);
-      }
-      // retrieve new cluster id
-      // add cluster id -> cluster mapping into clustersById
-      ClusterEntity clusterEntity = new ClusterEntity();
-      clusterEntity.setClusterName(clusterName);
-      clusterEntity.setDesiredStackVersion(gson.toJson(new StackId()));
-
-      try {
-        clusterDAO.create(clusterEntity);
-        clusterEntity = clusterDAO.merge(clusterEntity);
-      } catch (RollbackException e) {
-        LOG.warn("Unable to create cluster " + clusterName, e);
-        throw new AmbariException("Unable to create cluster " + clusterName, e);
-      }
-
-      Cluster cluster = clusterFactory.create(clusterEntity);
-      clusters.put(clusterName, cluster);
-      clustersById.put(cluster.getClusterId(), cluster);
-      clusterHostMap.put(clusterName, new HashSet<Host>());
-    } finally {
-      w.unlock();
-    }
-  }
-
-  @Override
-  public Cluster getCluster(String clusterName)
-      throws AmbariException {
-    loadClustersAndHosts();
-    r.lock();
-    try {
-      if (!clusters.containsKey(clusterName)) {
-        throw new ClusterNotFoundException(clusterName);
-      }
-      return clusters.get(clusterName);
-    } finally {
-      r.unlock();
-    }
-  }
-
-  @Override
-  public Cluster getClusterById(long id) throws AmbariException {
-    loadClustersAndHosts();
-    r.lock();
-    try {
-      if (!clustersById.containsKey(id)) {
-        throw new ClusterNotFoundException("clusterID=" + id);
-      }
-      return clustersById.get(id);
-    } finally {
-      r.unlock();
-    }
-  }
-
-  @Override
-  @Transactional
-  public List<Host> getHosts() {
-    loadClustersAndHosts();
-    r.lock();
-
-    try {
-      List<Host> hostList = new ArrayList<Host>(hosts.size());
-      hostList.addAll(hosts.values());
-      return hostList;
-    } finally {
-      r.unlock();
-    }
-  }
-
-  @Override
-  public Set<Cluster> getClustersForHost(String hostname)
-      throws AmbariException {
-    loadClustersAndHosts();
-    r.lock();
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Looking up clusters for hostname"
-            + ", hostname=" + hostname
-            + ", mappedClusters=" + hostClusterMap.get(hostname).size());
-      }
-      return Collections.unmodifiableSet(hostClusterMap.get(hostname));
-    } finally {
-      r.unlock();
-    }
-  }
-
-  @Override
-  @Transactional
-  public Host getHost(String hostname) throws AmbariException {
-    loadClustersAndHosts();
-    r.lock();
-    try {
-      if (!hosts.containsKey(hostname)) {
-        throw new HostNotFoundException(hostname);
-      }
-      return hosts.get(hostname);
-    } finally {
-      r.unlock();
-    }
-  }
-
-  @Override
-  public void addHost(String hostname) throws AmbariException {
-    loadClustersAndHosts();
-    String duplicateMessage = "Duplicate entry for Host"
-        + ", hostName= " + hostname;
-
-    if (hosts.containsKey(hostname)) {
-      throw new AmbariException(duplicateMessage);
-    }
-    r.lock();
-
-    try {
-      HostEntity hostEntity = new HostEntity();
-      hostEntity.setHostName(hostname);
-      hostEntity.setClusterEntities(new ArrayList<ClusterEntity>());
-      //not stored to DB
-      Host host = hostFactory.create(hostEntity, false);
-      host.setAgentVersion(new AgentVersion(""));
-      List<DiskInfo> emptyDiskList = new ArrayList<DiskInfo>();
-      host.setDisksInfo(emptyDiskList);
-      host.setHealthStatus(new HostHealthStatus(HealthStatus.UNKNOWN, ""));
-      host.setHostAttributes(new HashMap<String, String>());
-      host.setState(HostState.INIT);
-      hosts.put(hostname, host);
-      hostClusterMap.put(hostname, Collections.newSetFromMap(new ConcurrentHashMap<Cluster, Boolean>()));
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding a host to Clusters"
-            + ", hostname=" + hostname);
-      }
-    } finally {
-      r.unlock();
-    }
-  }
-
-  private boolean isOsSupportedByClusterStack(Cluster c, Host h) {
-    Map<String, List<RepositoryInfo>> repos =
-        ambariMetaInfo.getRepository(c.getDesiredStackVersion().getStackName(),
-            c.getDesiredStackVersion().getStackVersion());
-    if (repos == null || repos.isEmpty()) {
-      return false;
-    }
-    return repos.containsKey(h.getOsType());
-  }
-
-  @Override
-  public void mapHostToCluster(String hostname,
-                               String clusterName) throws AmbariException {
-    loadClustersAndHosts();
-    w.lock();
-
-    try {
-      Host host = getHost(hostname);
-      Cluster cluster = getCluster(clusterName);
-
-      for (Cluster c : hostClusterMap.get(hostname)) {
-        if (c.getClusterName().equals(clusterName)) {
-          throw new DuplicateResourceException("Attempted to create a host which already exists: clusterName=" +
-              clusterName + ", hostName=" + hostname);
-        }
-      }
-
-      if (!isOsSupportedByClusterStack(cluster, host)) {
-        String message = "Trying to map host to cluster where stack does not"
-            + " support host's os type"
-            + ", clusterName=" + clusterName
-            + ", clusterStackId=" + cluster.getDesiredStackVersion().getStackId()
-            + ", hostname=" + hostname
-            + ", hostOsType=" + host.getOsType();
-        LOG.warn(message);
-        throw new AmbariException(message);
-      }
-
-      mapHostClusterEntities(hostname, cluster.getClusterId());
-
-      hostClusterMap.get(hostname).add(cluster);
-      clusterHostMap.get(clusterName).add(host);
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Mapping a host to a cluster"
-            + ", clusterName=" + clusterName
-            + ", clusterId=" + cluster.getClusterId()
-            + ", hostname=" + hostname);
-      }
-    } finally {
-      w.unlock();
-    }
-  }
-
-  @Transactional
-  void mapHostClusterEntities(String hostName, Long clusterId) {
-    HostEntity hostEntity = hostDAO.findByName(hostName);
-    ClusterEntity clusterEntity = clusterDAO.findById(clusterId);
-
-    hostEntity.getClusterEntities().add(clusterEntity);
-    clusterEntity.getHostEntities().add(hostEntity);
-
-    clusterDAO.merge(clusterEntity);
-    hostDAO.merge(hostEntity);
-  }
-
-  @Override
-  @Transactional
-  public Map<String, Cluster> getClusters() {
-    loadClustersAndHosts();
-    r.lock();
-    try {
-      return Collections.unmodifiableMap(clusters);
-    } finally {
-      r.unlock();
-    }
-  }
-
-  @Override
-  public void mapHostsToCluster(Set<String> hostnames,
-                                             String clusterName) throws AmbariException {
-    loadClustersAndHosts();
-    w.lock();
-    try {
-      for (String hostname : hostnames) {
-        mapHostToCluster(hostname, clusterName);
-      }
-    } finally {
-      w.unlock();
-    }
-  }
-
-  @Override
-  public void updateClusterName(String oldName, String newName) {
-    w.lock();
-    try {
-      clusters.put(newName, clusters.remove(oldName));
-      clusterHostMap.put(newName, clusterHostMap.remove(oldName));
-    } finally {
-      w.unlock();
-    }
-  }
-
-
-  public void debugDump(StringBuilder sb) {
-    r.lock();
-    try {
-      sb.append("Clusters=[ ");
-      boolean first = true;
-      for (Cluster c : clusters.values()) {
-        if (!first) {
-          sb.append(" , ");
-          first = false;
-        }
-        sb.append("\n  ");
-        c.debugDump(sb);
-        sb.append(" ");
-      }
-      sb.append(" ]");
-    } finally {
-      r.unlock();
-    }
-  }
-
-  @Override
-  @Transactional
-  public Map<String, Host> getHostsForCluster(String clusterName)
-      throws AmbariException {
-    loadClustersAndHosts();
-    r.lock();
-
-    try {
-      Map<String, Host> hosts = new HashMap<String, Host>();
-
-      for (Host h : clusterHostMap.get(clusterName)) {
-        hosts.put(h.getHostName(), h);
-      }
-
-      return hosts;
-    } finally {
-      r.unlock();
-    }
-  }
-
-  @Override
-  public synchronized void deleteCluster(String clusterName)
-      throws AmbariException {
-    loadClustersAndHosts();
-    w.lock();
-    try {
-      Cluster cluster = getCluster(clusterName);
-      if (!cluster.canBeRemoved()) {
-        throw new AmbariException("Could not delete cluster"
-            + ", clusterName=" + clusterName);
-      }
-      LOG.info("Deleting cluster " + cluster.getClusterName());
-      cluster.delete();
-
-      //clear maps
-      for (Set<Cluster> clusterSet : hostClusterMap.values()) {
-        clusterSet.remove(cluster);
-      }
-      clusterHostMap.remove(cluster.getClusterName());
-      clusters.remove(clusterName);
-    } finally {
-      w.unlock();
-    }
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/InvalidStateTransitionException.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/InvalidStateTransitionException.java
deleted file mode 100644
index 074912c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/InvalidStateTransitionException.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.ambari.server.state.fsm;
-
-/**
- * Exception thrown when a StateMachine encounters an invalid
- * event at its current state.
- */
-@SuppressWarnings("serial")
-public class InvalidStateTransitionException extends Exception {
-
-  private Enum<?> currentState;
-  private Enum<?> event;
-
-  public InvalidStateTransitionException(Enum<?> currentState, Enum<?> event) {
-    super("Invalid event: " + event + " at " + currentState);
-    this.currentState = currentState;
-    this.event = event;
-  }
-
-  public Enum<?> getCurrentState() {
-    return currentState;
-  }
-
-  public Enum<?> getEvent() {
-    return event;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/MultipleArcTransition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/MultipleArcTransition.java
deleted file mode 100644
index b4c688f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/MultipleArcTransition.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.ambari.server.state.fsm;
-
-/**
- * Hook for Transition.
- * Post state is decided by Transition hook. Post state must be one of the
- * valid post states registered in StateMachine.
- */
-public interface MultipleArcTransition
-        <OPERAND, EVENT, STATE extends Enum<STATE>> {
-
-  /**
-   * Transition hook.
-   * @return the postState. Post state must be one of the
-   *                      valid post states registered in StateMachine.
-   * @param operand the entity attached to the FSM, whose internal
-   *                state may change.
-   * @param event causal event
-   */
-  public STATE transition(OPERAND operand, EVENT event);
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/SingleArcTransition.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/SingleArcTransition.java
deleted file mode 100644
index c802e2a..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/SingleArcTransition.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.ambari.server.state.fsm;
-
-/**
- * Hook for Transition. This lead to state machine to move to
- * the post state as registered in the state machine.
- */
-public interface SingleArcTransition<OPERAND, EVENT> {
-  /**
-   * Transition hook.
-   *
-   * @param operand the entity attached to the FSM, whose internal
-   *                state may change.
-   * @param event causal event
-   */
-  public void transition(OPERAND operand, EVENT event);
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachine.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachine.java
deleted file mode 100644
index e8e2813..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachine.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.ambari.server.state.fsm;
-
-public interface StateMachine
-                 <STATE extends Enum<STATE>,
-                  EVENTTYPE extends Enum<EVENTTYPE>, EVENT> {
-  public STATE getCurrentState();
-  public void setCurrentState(STATE state);
-  public STATE doTransition(EVENTTYPE eventType, EVENT event)
-        throws InvalidStateTransitionException;
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachineFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachineFactory.java
deleted file mode 100644
index 4f6c54b..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/StateMachineFactory.java
+++ /dev/null
@@ -1,486 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.ambari.server.state.fsm;
-
-import java.util.EnumMap;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-import java.util.Stack;
-
-/**
- * State machine topology.
- * This object is semantically immutable.  If you have a
- * StateMachineFactory there's no operation in the API that changes
- * its semantic properties.
- *
- * @param <OPERAND> The object type on which this state machine operates.
- * @param <STATE> The state of the entity.
- * @param <EVENTTYPE> The external eventType to be handled.
- * @param <EVENT> The event object.
- *
- */
-final public class StateMachineFactory
-             <OPERAND, STATE extends Enum<STATE>,
-              EVENTTYPE extends Enum<EVENTTYPE>, EVENT> {
-
-  private final TransitionsListNode transitionsListNode;
-
-  private Map<STATE, Map<EVENTTYPE,
-    Transition<OPERAND, STATE, EVENTTYPE, EVENT>>> stateMachineTable;
-
-  private STATE defaultInitialState;
-
-  private final boolean optimized;
-
-  /**
-   * Constructor
-   *
-   * This is the only constructor in the API.
-   *
-   */
-  public StateMachineFactory(STATE defaultInitialState) {
-    this.transitionsListNode = null;
-    this.defaultInitialState = defaultInitialState;
-    this.optimized = false;
-    this.stateMachineTable = null;
-  }
-
-  private StateMachineFactory
-      (StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> that,
-       ApplicableTransition t) {
-    this.defaultInitialState = that.defaultInitialState;
-    this.transitionsListNode
-        = new TransitionsListNode(t, that.transitionsListNode);
-    this.optimized = false;
-    this.stateMachineTable = null;
-  }
-
-  private StateMachineFactory
-      (StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> that,
-       boolean optimized) {
-    this.defaultInitialState = that.defaultInitialState;
-    this.transitionsListNode = that.transitionsListNode;
-    this.optimized = optimized;
-    if (optimized) {
-      makeStateMachineTable();
-    } else {
-      stateMachineTable = null;
-    }
-  }
-
-  private interface ApplicableTransition
-             <OPERAND, STATE extends Enum<STATE>,
-              EVENTTYPE extends Enum<EVENTTYPE>, EVENT> {
-    void apply(StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> subject);
-  }
-
-  private class TransitionsListNode {
-    final ApplicableTransition transition;
-    final TransitionsListNode next;
-
-    TransitionsListNode
-        (ApplicableTransition transition, TransitionsListNode next) {
-      this.transition = transition;
-      this.next = next;
-    }
-  }
-
-  static private class ApplicableSingleOrMultipleTransition
-             <OPERAND, STATE extends Enum<STATE>,
-              EVENTTYPE extends Enum<EVENTTYPE>, EVENT>
-          implements ApplicableTransition<OPERAND, STATE, EVENTTYPE, EVENT> {
-    final STATE preState;
-    final EVENTTYPE eventType;
-    final Transition<OPERAND, STATE, EVENTTYPE, EVENT> transition;
-
-    ApplicableSingleOrMultipleTransition
-        (STATE preState, EVENTTYPE eventType,
-         Transition<OPERAND, STATE, EVENTTYPE, EVENT> transition) {
-      this.preState = preState;
-      this.eventType = eventType;
-      this.transition = transition;
-    }
-
-    @Override
-    public void apply
-             (StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> subject) {
-      Map<EVENTTYPE, Transition<OPERAND, STATE, EVENTTYPE, EVENT>> transitionMap
-        = subject.stateMachineTable.get(preState);
-      if (transitionMap == null) {
-        // I use HashMap here because I would expect most EVENTTYPE's to not
-        //  apply out of a particular state, so FSM sizes would be
-        //  quadratic if I use EnumMap's here as I do at the top level.
-        transitionMap = new HashMap<EVENTTYPE,
-          Transition<OPERAND, STATE, EVENTTYPE, EVENT>>();
-        subject.stateMachineTable.put(preState, transitionMap);
-      }
-      transitionMap.put(eventType, transition);
-    }
-  }
-
-  /**
-   * @return a NEW StateMachineFactory just like {@code this} with the current
-   *          transition added as a new legal transition.  This overload
-   *          has no hook object.
-   *
-   *         Note that the returned StateMachineFactory is a distinct
-   *         object.
-   *
-   *         This method is part of the API.
-   *
-   * @param preState pre-transition state
-   * @param postState post-transition state
-   * @param eventType stimulus for the transition
-   */
-  public StateMachineFactory
-             <OPERAND, STATE, EVENTTYPE, EVENT>
-          addTransition(STATE preState, STATE postState, EVENTTYPE eventType) {
-    return addTransition(preState, postState, eventType, null);
-  }
-
-  /**
-   * @return a NEW StateMachineFactory just like {@code this} with the current
-   *          transition added as a new legal transition.  This overload
-   *          has no hook object.
-   *
-   *
-   *         Note that the returned StateMachineFactory is a distinct
-   *         object.
-   *
-   *         This method is part of the API.
-   *
-   * @param preState pre-transition state
-   * @param postState post-transition state
-   * @param eventTypes List of stimuli for the transitions
-   */
-  public StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> addTransition(
-      STATE preState, STATE postState, Set<EVENTTYPE> eventTypes) {
-    return addTransition(preState, postState, eventTypes, null);
-  }
-
-  /**
-   * @return a NEW StateMachineFactory just like {@code this} with the current
-   *          transition added as a new legal transition
-   *
-   *         Note that the returned StateMachineFactory is a distinct
-   *         object.
-   *
-   *         This method is part of the API.
-   *
-   * @param preState pre-transition state
-   * @param postState post-transition state
-   * @param eventTypes List of stimuli for the transitions
-   * @param hook transition hook
-   */
-  public StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> addTransition(
-      STATE preState, STATE postState, Set<EVENTTYPE> eventTypes,
-      SingleArcTransition<OPERAND, EVENT> hook) {
-    StateMachineFactory<OPERAND, STATE, EVENTTYPE, EVENT> factory = null;
-    for (EVENTTYPE event : eventTypes) {
-      if (factory == null) {
-        factory = addTransition(preState, postState, event, hook);
-      } else {
-        factory = factory.addTransition(preState, postState, event, hook);
-      }
-    }
-    return factory;
-  }
-
-  /**
-   * @return a NEW StateMachineFactory just like {@code this} with the current
-   *          transition added as a new legal transition
-   *
-   *         Note that the returned StateMachineFactory is a distinct object.
-   *
-   *         This method is part of the API.
-   *
-   * @param preState pre-transition state
-   * @param postState post-transition state
-   * @param eventType stimulus for the transition
-   * @param hook transition hook
-   */
-  public StateMachineFactory
-             <OPERAND, STATE, EVENTTYPE, EVENT>
-          addTransition(STATE preState, STATE postState,
-                        EVENTTYPE eventType,
-                        SingleArcTransition<OPERAND, EVENT> hook){
-    return new StateMachineFactory
-        (this, new ApplicableSingleOrMultipleTransition
-           (preState, eventType, new SingleInternalArc(postState, hook)));
-  }
-
-  /**
-   * @return a NEW StateMachineFactory just like {@code this} with the current
-   *          transition added as a new legal transition
-   *
-   *         Note that the returned StateMachineFactory is a distinct object.
-   *
-   *         This method is part of the API.
-   *
-   * @param preState pre-transition state
-   * @param postStates valid post-transition states
-   * @param eventType stimulus for the transition
-   * @param hook transition hook
-   */
-  public StateMachineFactory
-             <OPERAND, STATE, EVENTTYPE, EVENT>
-          addTransition(STATE preState, Set<STATE> postStates,
-                        EVENTTYPE eventType,
-                        MultipleArcTransition<OPERAND, EVENT, STATE> hook){
-    return new StateMachineFactory
-        (this,
-         new ApplicableSingleOrMultipleTransition
-           (preState, eventType, new MultipleInternalArc(postStates, hook)));
-  }
-
-  /**
-   * @return a StateMachineFactory just like {@code this}, except that if
-   *         you won't need any synchronization to build a state machine
-   *
-   *         Note that the returned StateMachineFactory is a distinct object.
-   *
-   *         This method is part of the API.
-   *
-   *         The only way you could distinguish the returned
-   *         StateMachineFactory from {@code this} would be by
-   *         measuring the performance of the derived
-   *         {@code StateMachine} you can get from it.
-   *
-   * Calling this is optional.  It doesn't change the semantics of the factory,
-   *   if you call it then when you use the factory there is no synchronization.
-   */
-  public StateMachineFactory
-             <OPERAND, STATE, EVENTTYPE, EVENT>
-          installTopology() {
-    return new StateMachineFactory(this, true);
-  }
-
-  /**
-   * Effect a transition due to the effecting stimulus.
-   * @param state current state
-   * @param eventType trigger to initiate the transition
-   * @param cause causal eventType context
-   * @return transitioned state
-   */
-  private STATE doTransition
-           (OPERAND operand, STATE oldState, EVENTTYPE eventType, EVENT event)
-      throws InvalidStateTransitionException {
-    // We can assume that stateMachineTable is non-null because we call
-    //  maybeMakeStateMachineTable() when we build an InnerStateMachine ,
-    //  and this code only gets called from inside a working InnerStateMachine .
-    Map<EVENTTYPE, Transition<OPERAND, STATE, EVENTTYPE, EVENT>> transitionMap
-      = stateMachineTable.get(oldState);
-    if (transitionMap != null) {
-      Transition<OPERAND, STATE, EVENTTYPE, EVENT> transition
-          = transitionMap.get(eventType);
-      if (transition != null) {
-        return transition.doTransition(operand, oldState, event, eventType);
-      }
-    }
-    throw new InvalidStateTransitionException(oldState, eventType);
-  }
-
-  private synchronized void maybeMakeStateMachineTable() {
-    if (stateMachineTable == null) {
-      makeStateMachineTable();
-    }
-  }
-
-  private void makeStateMachineTable() {
-    Stack<ApplicableTransition> stack = new Stack<ApplicableTransition>();
-
-    Map<STATE, Map<EVENTTYPE, Transition<OPERAND, STATE, EVENTTYPE, EVENT>>>
-      prototype = new HashMap<STATE, Map<EVENTTYPE, Transition<OPERAND, STATE, EVENTTYPE, EVENT>>>();
-
-    prototype.put(defaultInitialState, null);
-
-    // I use EnumMap here because it'll be faster and denser.  I would
-    //  expect most of the states to have at least one transition.
-    stateMachineTable
-       = new EnumMap<STATE, Map<EVENTTYPE,
-                           Transition<OPERAND, STATE, EVENTTYPE, EVENT>>>(prototype);
-
-    for (TransitionsListNode cursor = transitionsListNode;
-         cursor != null;
-         cursor = cursor.next) {
-      stack.push(cursor.transition);
-    }
-
-    while (!stack.isEmpty()) {
-      stack.pop().apply(this);
-    }
-  }
-
-  private interface Transition<OPERAND, STATE extends Enum<STATE>,
-          EVENTTYPE extends Enum<EVENTTYPE>, EVENT> {
-    STATE doTransition(OPERAND operand, STATE oldState,
-                       EVENT event, EVENTTYPE eventType)
-       throws InvalidStateTransitionException;
-  }
-
-  private class SingleInternalArc
-                    implements Transition<OPERAND, STATE, EVENTTYPE, EVENT> {
-
-    private STATE postState;
-    private SingleArcTransition<OPERAND, EVENT> hook; // transition hook
-
-    SingleInternalArc(STATE postState,
-        SingleArcTransition<OPERAND, EVENT> hook) {
-      this.postState = postState;
-      this.hook = hook;
-    }
-
-    @Override
-    public STATE doTransition(OPERAND operand, STATE oldState,
-                              EVENT event, EVENTTYPE eventType) {
-      if (hook != null) {
-        hook.transition(operand, event);
-      }
-      return postState;
-    }
-  }
-
-  private class MultipleInternalArc
-              implements Transition<OPERAND, STATE, EVENTTYPE, EVENT>{
-
-    // Fields
-    private Set<STATE> validPostStates;
-    private MultipleArcTransition<OPERAND, EVENT, STATE> hook;  // transition hook
-
-    MultipleInternalArc(Set<STATE> postStates,
-                   MultipleArcTransition<OPERAND, EVENT, STATE> hook) {
-      this.validPostStates = postStates;
-      this.hook = hook;
-    }
-
-    @Override
-    public STATE doTransition(OPERAND operand, STATE oldState,
-                              EVENT event, EVENTTYPE eventType)
-        throws InvalidStateTransitionException {
-      STATE postState = hook.transition(operand, event);
-
-      if (!validPostStates.contains(postState)) {
-        throw new InvalidStateTransitionException(oldState, eventType);
-      }
-      return postState;
-    }
-  }
-
-  /*
-   * @return a {@link StateMachine} that starts in
-   *         {@code initialState} and whose {@link Transition} s are
-   *         applied to {@code operand} .
-   *
-   *         This is part of the API.
-   *
-   * @param operand the object upon which the returned
-   *                {@link StateMachine} will operate.
-   * @param initialState the state in which the returned
-   *                {@link StateMachine} will start.
-   *
-   */
-  public StateMachine<STATE, EVENTTYPE, EVENT>
-        make(OPERAND operand, STATE initialState) {
-    return new InternalStateMachine(operand, initialState);
-  }
-
-  /*
-   * @return a {@link StateMachine} that starts in the default initial
-   *          state and whose {@link Transition} s are applied to
-   *          {@code operand} .
-   *
-   *         This is part of the API.
-   *
-   * @param operand the object upon which the returned
-   *                {@link StateMachine} will operate.
-   *
-   */
-  public StateMachine<STATE, EVENTTYPE, EVENT> make(OPERAND operand) {
-    return new InternalStateMachine(operand, defaultInitialState);
-  }
-
-  private class InternalStateMachine
-        implements StateMachine<STATE, EVENTTYPE, EVENT> {
-    private final OPERAND operand;
-    private STATE currentState;
-
-    InternalStateMachine(OPERAND operand, STATE initialState) {
-      this.operand = operand;
-      this.currentState = initialState;
-      if (!optimized) {
-        maybeMakeStateMachineTable();
-      }
-    }
-
-    @Override
-    public synchronized STATE getCurrentState() {
-      return currentState;
-    }
-
-    @Override
-    public synchronized STATE doTransition(EVENTTYPE eventType, EVENT event)
-         throws InvalidStateTransitionException  {
-      currentState = StateMachineFactory.this.doTransition
-          (operand, currentState, eventType, event);
-      return currentState;
-    }
-
-    @Override
-    public synchronized void setCurrentState(STATE state) {
-      currentState = state;
-    }
-
-  }
-
-  /*
-   * Generate a graph represents the state graph of this StateMachine
-   * @param name graph name
-   * @return Graph object generated
-  public Graph generateStateGraph(String name) {
-    maybeMakeStateMachineTable();
-    Graph g = new Graph(name);
-    for (STATE startState : stateMachineTable.keySet()) {
-      Map<EVENTTYPE, Transition<OPERAND, STATE, EVENTTYPE, EVENT>> transitions
-          = stateMachineTable.get(startState);
-      for (Entry<EVENTTYPE, Transition<OPERAND, STATE, EVENTTYPE, EVENT>> entry :
-         transitions.entrySet()) {
-        Transition<OPERAND, STATE, EVENTTYPE, EVENT> transition = entry.getValue();
-        if (transition instanceof StateMachineFactory.SingleInternalArc) {
-          StateMachineFactory.SingleInternalArc sa
-              = (StateMachineFactory.SingleInternalArc) transition;
-          Graph.Node fromNode = g.getNode(startState.toString());
-          Graph.Node toNode = g.getNode(sa.postState.toString());
-          fromNode.addEdge(toNode, entry.getKey().toString());
-        } else if (transition instanceof StateMachineFactory.MultipleInternalArc) {
-          StateMachineFactory.MultipleInternalArc ma
-              = (StateMachineFactory.MultipleInternalArc) transition;
-          Iterator<STATE> iter = ma.validPostStates.iterator();
-          while (iter.hasNext()) {
-            Graph.Node fromNode = g.getNode(startState.toString());
-            Graph.Node toNode = g.getNode(iter.next().toString());
-            fromNode.addEdge(toNode, entry.getKey().toString());
-          }
-        }
-      }
-    }
-    return g;
-  }
-  */
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/event/AbstractEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/event/AbstractEvent.java
deleted file mode 100644
index e837ae7..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/event/AbstractEvent.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.ambari.server.state.fsm.event;
-
-/**
- * parent class of all the events. All events extend this class.
- */
-public abstract class AbstractEvent<TYPE extends Enum<TYPE>>
-    implements Event<TYPE> {
-
-  private final TYPE type;
-  private final long timestamp;
-
-  // use this if you DON'T care about the timestamp
-  public AbstractEvent(TYPE type) {
-    this.type = type;
-    // We're not generating a real timestamp here.  It's too expensive.
-    timestamp = -1L;
-  }
-
-  // use this if you care about the timestamp
-  public AbstractEvent(TYPE type, long timestamp) {
-    this.type = type;
-    this.timestamp = timestamp;
-  }
-
-  @Override
-  public long getTimestamp() {
-    return timestamp;
-  }
-
-  @Override
-  public TYPE getType() {
-    return type;
-  }
-
-  @Override
-  public String toString() {
-    return "EventType: " + getType();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/event/Event.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/event/Event.java
deleted file mode 100644
index e97d3b0..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/event/Event.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.ambari.server.state.fsm.event;
-
-/**
- * Interface defining events api.
- *
- */
-public interface Event<TYPE extends Enum<TYPE>> {
-
-  TYPE getType();
-  long getTimestamp();
-  String toString();
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/event/EventHandler.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/event/EventHandler.java
deleted file mode 100644
index 07ae32e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/fsm/event/EventHandler.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.ambari.server.state.fsm.event;
-
-/**
- * Interface for handling events of type T
- *
- * @param <T> paremeterized event of type T
- */
-public interface EventHandler<T extends Event<?> > {
-
-  void handle(T event);
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostFactory.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostFactory.java
deleted file mode 100644
index 5cfbbef..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostFactory.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.state.host;
-
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.state.Host;
-
-public interface HostFactory {
-  Host create(HostEntity hostEntity, boolean persisted);
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostHealthyHeartbeatEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostHealthyHeartbeatEvent.java
deleted file mode 100644
index 9afb433..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostHealthyHeartbeatEvent.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.state.host;
-
-import org.apache.ambari.server.agent.AgentEnv;
-import org.apache.ambari.server.state.HostEvent;
-import org.apache.ambari.server.state.HostEventType;
-
-public class HostHealthyHeartbeatEvent extends HostEvent {
-
-  private final long heartbeatTime;
-  private AgentEnv agentEnv = null;
-
-  public HostHealthyHeartbeatEvent(String hostName, long heartbeatTime, AgentEnv env) {
-    super(hostName, HostEventType.HOST_HEARTBEAT_HEALTHY);
-    this.heartbeatTime = heartbeatTime;
-    agentEnv = env;
-  }
-
-  /**
-   * @return the heartbeatTime
-   */
-  public long getHeartbeatTime() {
-    return heartbeatTime;
-  }
-  
-  /**
-   * @return the heartbeatinfo, if present.  Can return <code>null</code> if
-   * there was no new status.
-   */
-  public AgentEnv getAgentEnv() {
-    return agentEnv;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostHeartbeatLostEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostHeartbeatLostEvent.java
deleted file mode 100644
index 7f9a3e8..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostHeartbeatLostEvent.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.state.host;
-
-import org.apache.ambari.server.state.HostEvent;
-import org.apache.ambari.server.state.HostEventType;
-
-public class HostHeartbeatLostEvent extends HostEvent {
-
-  public HostHeartbeatLostEvent(String hostName) {
-    super(hostName, HostEventType.HOST_HEARTBEAT_LOST);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
deleted file mode 100644
index 673f1a2..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
+++ /dev/null
@@ -1,1022 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.state.host;
-
-import java.lang.reflect.Type;
-import java.util.*;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import com.google.gson.Gson;
-import com.google.gson.reflect.TypeToken;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.agent.DiskInfo;
-import org.apache.ambari.server.agent.AgentEnv;
-import org.apache.ambari.server.agent.HostInfo;
-import org.apache.ambari.server.controller.HostResponse;
-import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.apache.ambari.server.orm.dao.HostStateDAO;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.HostStateEntity;
-import org.apache.ambari.server.state.*;
-import org.apache.ambari.server.state.HostHealthStatus.HealthStatus;
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-import org.apache.ambari.server.state.fsm.SingleArcTransition;
-import org.apache.ambari.server.state.fsm.StateMachine;
-import org.apache.ambari.server.state.fsm.StateMachineFactory;
-import org.apache.ambari.server.orm.dao.HostDAO;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-public class HostImpl implements Host {
-
-  private static final Log LOG = LogFactory.getLog(HostImpl.class);
-  private final Gson gson;
-
-  private static final Type diskInfoType =
-      new TypeToken<List<DiskInfo>>() {}.getType();
-  private static final Type hostAttributesType =
-      new TypeToken<Map<String, String>>() {}.getType();
-
-  ReadWriteLock rwLock;
-  private final Lock readLock;
-  private final Lock writeLock;
-
-  private HostEntity hostEntity;
-  private HostStateEntity hostStateEntity;
-  private Injector injector;
-  private HostDAO hostDAO;
-  private HostStateDAO hostStateDAO;
-  private ClusterDAO clusterDAO;
-  private Clusters clusters;
-
-  private long lastHeartbeatTime = 0L;
-  private AgentEnv lastAgentEnv = null;
-  private boolean persisted = false;
-
-  private static final String HARDWAREISA = "hardware_isa";
-  private static final String HARDWAREMODEL = "hardware_model";
-  private static final String INTERFACES = "interfaces";
-  private static final String KERNEL = "kernel";
-  private static final String KERNELMAJOREVERSON = "kernel_majorversion";
-  private static final String KERNELRELEASE = "kernel_release";
-  private static final String KERNELVERSION = "kernel_version";
-  private static final String MACADDRESS = "mac_address";
-  private static final String NETMASK = "netmask";
-  private static final String OSFAMILY = "os_family";
-  private static final String PHYSICALPROCESSORCOUNT =
-      "physicalprocessors_count";
-  private static final String PROCESSORCOUNT = "processors_count";
-  private static final String SELINUXENABLED = "selinux_enabled";
-  private static final String SWAPSIZE = "swap_size";
-  private static final String SWAPFREE = "swap_free";
-  private static final String TIMEZONE = "timezone";
-
-  private static final StateMachineFactory
-    <HostImpl, HostState, HostEventType, HostEvent>
-      stateMachineFactory
-        = new StateMachineFactory<HostImpl, HostState, HostEventType, HostEvent>
-        (HostState.INIT)
-
-   // define the state machine of a Host
-
-   // Transition from INIT state
-   // when the initial registration request is received
-   .addTransition(HostState.INIT, HostState.WAITING_FOR_HOST_STATUS_UPDATES,
-       HostEventType.HOST_REGISTRATION_REQUEST, new HostRegistrationReceived())
-   // when a heartbeat is lost right after registration
-   .addTransition(HostState.INIT, HostState.HEARTBEAT_LOST,
-       HostEventType.HOST_HEARTBEAT_LOST, new HostHeartbeatLostTransition())
-
-   // Transition from WAITING_FOR_STATUS_UPDATES state
-   // when the host has responded to its status update requests
-   // TODO this will create problems if the host is not healthy
-   // TODO Based on discussion with Jitendra, ignoring this for now
-   .addTransition(HostState.WAITING_FOR_HOST_STATUS_UPDATES, HostState.HEALTHY,
-       HostEventType.HOST_STATUS_UPDATES_RECEIVED,
-       new HostStatusUpdatesReceivedTransition())
-   // when a normal heartbeat is received
-   .addTransition(HostState.WAITING_FOR_HOST_STATUS_UPDATES,
-       HostState.WAITING_FOR_HOST_STATUS_UPDATES,
-       HostEventType.HOST_HEARTBEAT_HEALTHY)   // TODO: Heartbeat is ignored here
-   // when a heartbeart denoting host as unhealthy is received
-   .addTransition(HostState.WAITING_FOR_HOST_STATUS_UPDATES,
-       HostState.WAITING_FOR_HOST_STATUS_UPDATES, // Still waiting for component status
-       HostEventType.HOST_HEARTBEAT_UNHEALTHY,
-       new HostBecameUnhealthyTransition()) // TODO: Not sure
-  // when a heartbeat is lost and status update is not received
-   .addTransition(HostState.WAITING_FOR_HOST_STATUS_UPDATES,
-       HostState.HEARTBEAT_LOST,
-       HostEventType.HOST_HEARTBEAT_LOST,
-       new HostHeartbeatLostTransition())
-
-   // Transitions from HEALTHY state
-   // when a normal heartbeat is received
-   .addTransition(HostState.HEALTHY, HostState.HEALTHY,
-       HostEventType.HOST_HEARTBEAT_HEALTHY,
-       new HostHeartbeatReceivedTransition())
-   // when a heartbeat is not received within the configured timeout period
-   .addTransition(HostState.HEALTHY, HostState.HEARTBEAT_LOST,
-       HostEventType.HOST_HEARTBEAT_LOST,
-       new HostHeartbeatLostTransition())
-   // when a heartbeart denoting host as unhealthy is received
-   .addTransition(HostState.HEALTHY, HostState.UNHEALTHY,
-       HostEventType.HOST_HEARTBEAT_UNHEALTHY,
-       new HostBecameUnhealthyTransition())
-   // if a new registration request is received
-   .addTransition(HostState.HEALTHY,
-       HostState.WAITING_FOR_HOST_STATUS_UPDATES,
-       HostEventType.HOST_REGISTRATION_REQUEST, new HostRegistrationReceived())
-
-   // Transitions from UNHEALTHY state
-   // when a normal heartbeat is received
-   .addTransition(HostState.UNHEALTHY, HostState.HEALTHY,
-       HostEventType.HOST_HEARTBEAT_HEALTHY,
-       new HostBecameHealthyTransition())
-   // when a heartbeart denoting host as unhealthy is received
-   .addTransition(HostState.UNHEALTHY, HostState.UNHEALTHY,
-       HostEventType.HOST_HEARTBEAT_UNHEALTHY,
-       new HostHeartbeatReceivedTransition())
-   // when a heartbeat is not received within the configured timeout period
-   .addTransition(HostState.UNHEALTHY, HostState.HEARTBEAT_LOST,
-       HostEventType.HOST_HEARTBEAT_LOST,
-       new HostHeartbeatLostTransition())
-   // if a new registration request is received
-   .addTransition(HostState.UNHEALTHY,
-       HostState.WAITING_FOR_HOST_STATUS_UPDATES,
-       HostEventType.HOST_REGISTRATION_REQUEST, new HostRegistrationReceived())
-
-   // Transitions from HEARTBEAT_LOST state
-   // when a heartbeat is not received within the configured timeout period
-   .addTransition(HostState.HEARTBEAT_LOST, HostState.HEARTBEAT_LOST,
-       HostEventType.HOST_HEARTBEAT_LOST)
-   // if a new registration request is received
-   .addTransition(HostState.HEARTBEAT_LOST,
-       HostState.WAITING_FOR_HOST_STATUS_UPDATES,
-       HostEventType.HOST_REGISTRATION_REQUEST, new HostRegistrationReceived())
-
-   .installTopology();
-
-  private final StateMachine<HostState, HostEventType, HostEvent> stateMachine;
-
-  @Inject
-  public HostImpl(@Assisted HostEntity hostEntity,
-      @Assisted boolean persisted, Injector injector) {
-    this.stateMachine = stateMachineFactory.make(this);
-    rwLock = new ReentrantReadWriteLock();
-    this.readLock = rwLock.readLock();
-    this.writeLock = rwLock.writeLock();
-
-    this.hostEntity = hostEntity;
-    this.injector = injector;
-    this.persisted = persisted;
-    this.hostDAO = injector.getInstance(HostDAO.class);
-    this.hostStateDAO = injector.getInstance(HostStateDAO.class);
-    this.gson = injector.getInstance(Gson.class);
-    this.clusterDAO = injector.getInstance(ClusterDAO.class);
-    this.clusters = injector.getInstance(Clusters.class);
-
-    hostStateEntity = hostEntity.getHostStateEntity();
-    if (hostStateEntity == null) {
-      hostStateEntity = new HostStateEntity();
-      hostStateEntity.setHostEntity(hostEntity);
-      hostEntity.setHostStateEntity(hostStateEntity);
-      setHealthStatus(new HostHealthStatus(HealthStatus.UNKNOWN, ""));
-      if (persisted) {
-        persist();
-      }
-    } else {
-      this.stateMachine.setCurrentState(hostStateEntity.getCurrentState());
-    }
-
-  }
-
-//  //TODO delete
-//  public HostImpl(String hostname) {
-//    this.stateMachine = stateMachineFactory.make(this);
-//    ReadWriteLock rwLock = new ReentrantReadWriteLock();
-//    this.readLock = rwLock.readLock();
-//    this.writeLock = rwLock.writeLock();
-//    setHostName(hostname);
-//    setHealthStatus(new HostHealthStatus(HealthStatus.UNKNOWN, ""));
-//  }
-
-  static class HostRegistrationReceived
-      implements SingleArcTransition<HostImpl, HostEvent> {
-
-    @Override
-    public void transition(HostImpl host, HostEvent event) {
-      HostRegistrationRequestEvent e = (HostRegistrationRequestEvent) event;
-      host.importHostInfo(e.hostInfo);
-      host.setLastRegistrationTime(e.registrationTime);
-      //Initialize heartbeat time and timeInState with registration time.
-      host.setLastHeartbeatTime(e.registrationTime);
-      host.setLastAgentEnv(e.agentEnv);
-      host.setTimeInState(e.registrationTime);
-      host.setAgentVersion(e.agentVersion);
-      host.setPublicHostName(e.publicHostName);
-
-      String agentVersion = null;
-      if (e.agentVersion != null) {
-        agentVersion = e.agentVersion.getVersion();
-      }
-      LOG.info("Received host registration, host="
-          + e.hostInfo.toString()
-          + ", registrationTime=" + e.registrationTime
-          + ", agentVersion=" + agentVersion);
-      host.persist();
-    }
-  }
-
-  static class HostStatusUpdatesReceivedTransition
-      implements SingleArcTransition<HostImpl, HostEvent> {
-
-    @Override
-    public void transition(HostImpl host, HostEvent event) {
-      HostStatusUpdatesReceivedEvent e = (HostStatusUpdatesReceivedEvent)event;
-      // TODO Audit logs
-      LOG.debug("Host transition to host status updates received state"
-          + ", host=" + e.getHostName()
-          + ", heartbeatTime=" + e.getTimestamp());
-      host.setHealthStatus(new HostHealthStatus(HealthStatus.HEALTHY,
-          host.getHealthStatus().getHealthReport()));
-    }
-  }
-
-  static class HostHeartbeatReceivedTransition
-    implements SingleArcTransition<HostImpl, HostEvent> {
-
-    @Override
-    public void transition(HostImpl host, HostEvent event) {
-      long heartbeatTime = 0;
-      switch (event.getType()) {
-        case HOST_HEARTBEAT_HEALTHY:
-          HostHealthyHeartbeatEvent hhevent = (HostHealthyHeartbeatEvent) event;
-          heartbeatTime = hhevent.getHeartbeatTime();
-          if (null != hhevent.getAgentEnv())
-            host.setLastAgentEnv(hhevent.getAgentEnv());
-          break;
-        case HOST_HEARTBEAT_UNHEALTHY:
-          heartbeatTime =
-            ((HostUnhealthyHeartbeatEvent)event).getHeartbeatTime();
-          break;
-        default:
-          break;
-      }
-      if (0 == heartbeatTime) {
-        LOG.error("heartbeatTime = 0 !!!");
-        // TODO handle error
-      }
-      // host.setLastHeartbeatState(new Object());
-      host.setLastHeartbeatTime(heartbeatTime);
-    }
-  }
-
-  static class HostBecameHealthyTransition
-      implements SingleArcTransition<HostImpl, HostEvent> {
-
-    @Override
-    public void transition(HostImpl host, HostEvent event) {
-      HostHealthyHeartbeatEvent e = (HostHealthyHeartbeatEvent) event;
-      host.setLastHeartbeatTime(e.getHeartbeatTime());
-      // TODO Audit logs
-      LOG.debug("Host transitioned to a healthy state"
-              + ", host=" + e.getHostName()
-              + ", heartbeatTime=" + e.getHeartbeatTime());
-      host.setHealthStatus(new HostHealthStatus(HealthStatus.HEALTHY, host.getHealthStatus().getHealthReport()));
-    }
-  }
-
-  static class HostBecameUnhealthyTransition
-      implements SingleArcTransition<HostImpl, HostEvent> {
-
-    @Override
-    public void transition(HostImpl host, HostEvent event) {
-      HostUnhealthyHeartbeatEvent e = (HostUnhealthyHeartbeatEvent) event;
-      host.setLastHeartbeatTime(e.getHeartbeatTime());
-      // TODO Audit logs
-      LOG.debug("Host transitioned to an unhealthy state"
-          + ", host=" + e.getHostName()
-          + ", heartbeatTime=" + e.getHeartbeatTime()
-          + ", healthStatus=" + e.getHealthStatus());
-      host.setHealthStatus(e.getHealthStatus());
-    }
-  }
-
-  static class HostHeartbeatLostTransition
-      implements SingleArcTransition<HostImpl, HostEvent> {
-
-    @Override
-    public void transition(HostImpl host, HostEvent event) {
-      HostHeartbeatLostEvent e = (HostHeartbeatLostEvent) event;
-      // TODO Audit logs
-      LOG.debug("Host transitioned to heartbeat lost state"
-          + ", host=" + e.getHostName()
-          + ", lastHeartbeatTime=" + host.getLastHeartbeatTime());
-      host.setHealthStatus(new HostHealthStatus(HealthStatus.UNKNOWN, host.getHealthStatus().getHealthReport()));
-    }
-  }
-
-  @Override
-  public void importHostInfo(HostInfo hostInfo) {
-    try {
-      writeLock.lock();
-
-      if (hostInfo.getIPAddress() != null
-          && !hostInfo.getIPAddress().isEmpty()) {
-        setIPv4(hostInfo.getIPAddress());
-        setIPv6(hostInfo.getIPAddress());
-      }
-
-      setCpuCount(hostInfo.getPhysicalProcessorCount());
-      setTotalMemBytes(hostInfo.getMemoryTotal());
-      setAvailableMemBytes(hostInfo.getFreeMemory());
-
-      if (hostInfo.getArchitecture() != null
-          && !hostInfo.getArchitecture().isEmpty()) {
-        setOsArch(hostInfo.getArchitecture());
-      }
-
-      if (hostInfo.getOS() != null
-          && !hostInfo.getOS().isEmpty()) {
-        String osType = hostInfo.getOS();
-        if (hostInfo.getOSRelease() != null) {
-          String[] release = hostInfo.getOSRelease().split("\\.");
-          if (release.length > 0) {
-            osType += release[0];
-          }
-        }
-        setOsType(osType.toLowerCase());
-      }
-
-      if (hostInfo.getMounts() != null
-          && !hostInfo.getMounts().isEmpty()) {
-        setDisksInfo(hostInfo.getMounts());
-      }
-
-      // FIXME add all other information into host attributes
-      this.setAgentVersion(new AgentVersion(
-          hostInfo.getAgentUserId()));
-
-      Map<String, String> attrs = new HashMap<String, String>();
-      if (hostInfo.getHardwareIsa() != null) {
-        attrs.put(HARDWAREISA, hostInfo.getHardwareIsa());
-      }
-      if (hostInfo.getHardwareModel() != null) {
-        attrs.put(HARDWAREMODEL, hostInfo.getHardwareModel());
-      }
-      if (hostInfo.getInterfaces() != null) {
-        attrs.put(INTERFACES, hostInfo.getInterfaces());
-      }
-      if (hostInfo.getKernel() != null) {
-        attrs.put(KERNEL, hostInfo.getKernel());
-      }
-      if (hostInfo.getKernelMajVersion() != null) {
-        attrs.put(KERNELMAJOREVERSON, hostInfo.getKernelMajVersion());
-      }
-      if (hostInfo.getKernelRelease() != null) {
-        attrs.put(KERNELRELEASE, hostInfo.getKernelRelease());
-      }
-      if (hostInfo.getKernelVersion() != null) {
-        attrs.put(KERNELVERSION, hostInfo.getKernelVersion());
-      }
-      if (hostInfo.getMacAddress() != null) {
-        attrs.put(MACADDRESS, hostInfo.getMacAddress());
-      }
-      if (hostInfo.getNetMask() != null) {
-        attrs.put(NETMASK, hostInfo.getNetMask());
-      }
-      if (hostInfo.getOSFamily() != null) {
-        attrs.put(OSFAMILY, hostInfo.getOSFamily());
-      }
-      if (hostInfo.getPhysicalProcessorCount() != 0) {
-        attrs.put(PHYSICALPROCESSORCOUNT,
-          Long.toString(hostInfo.getPhysicalProcessorCount()));
-      }
-      if (hostInfo.getProcessorCount() != 0) {
-        attrs.put(PROCESSORCOUNT,
-          Long.toString(hostInfo.getProcessorCount()));
-      }
-      if (Boolean.toString(hostInfo.getSeLinux()) != null) {
-        attrs.put(SELINUXENABLED, Boolean.toString(hostInfo.getSeLinux()));
-      }
-      if (hostInfo.getSwapSize() != null) {
-        attrs.put(SWAPSIZE, hostInfo.getSwapSize());
-      }
-      if (hostInfo.getSwapFree() != null) {
-        attrs.put(SWAPFREE, hostInfo.getSwapFree());
-      }
-      if (hostInfo.getTimeZone() != null) {
-        attrs.put(TIMEZONE, hostInfo.getTimeZone());
-      }
-      setHostAttributes(attrs);
-
-      saveIfPersisted();
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  /**
-   * @param hostInfo
-   */
-  @Override
-  public void setLastAgentEnv(AgentEnv env) {
-    lastAgentEnv = env;
-  }
-  
-  @Override
-  public AgentEnv getLastAgentEnv() {
-    return lastAgentEnv;
-  }
-
-  @Override
-  public HostState getState() {
-    try {
-      readLock.lock();
-      return stateMachine.getCurrentState();
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setState(HostState state) {
-    try {
-      writeLock.lock();
-      stateMachine.setCurrentState(state);
-      hostStateEntity.setCurrentState(state);
-      hostStateEntity.setTimeInState(System.currentTimeMillis());
-      saveIfPersisted();
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public void handleEvent(HostEvent event)
-      throws InvalidStateTransitionException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Handling Host event, eventType=" + event.getType().name()
-          + ", event=" + event.toString());
-    }
-    HostState oldState = getState();
-    try {
-      writeLock.lock();
-      try {
-        stateMachine.doTransition(event.getType(), event);
-      } catch (InvalidStateTransitionException e) {
-        LOG.error("Can't handle Host event at current state"
-            + ", host=" + this.getHostName()
-            + ", currentState=" + oldState
-            + ", eventType=" + event.getType()
-            + ", event=" + event);
-        throw e;
-      }
-    }
-    finally {
-      writeLock.unlock();
-    }
-    if (oldState != getState()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Host transitioned to a new state"
-            + ", host=" + this.getHostName()
-            + ", oldState=" + oldState
-            + ", currentState=" + getState()
-            + ", eventType=" + event.getType().name()
-            + ", event=" + event);
-      }
-    }
-  }
-
-  @Override
-  public String getHostName() {
-    try {
-      readLock.lock();
-      return hostEntity.getHostName();
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setHostName(String hostName) {
-    try {
-      writeLock.lock();
-      if (!isPersisted()) {
-        hostEntity.setHostName(hostName);
-      } else {
-        throw new UnsupportedOperationException("PK of persisted entity cannot be modified");
-      }
-    } finally {
-      writeLock.unlock();
-    }
-  }
-  
-  @Override
-  public void setPublicHostName(String hostName) {
-    try {
-      writeLock.lock();
-      hostEntity.setPublicHostName(hostName);
-      saveIfPersisted();
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-  
-  @Override
-  public String getPublicHostName() {
-    try {
-      readLock.lock();
-      return hostEntity.getPublicHostName();
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public String getIPv4() {
-    try {
-      readLock.lock();
-      return hostEntity.getIpv4();
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setIPv4(String ip) {
-    try {
-      writeLock.lock();
-      hostEntity.setIpv4(ip);
-      saveIfPersisted();
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public String getIPv6() {
-    try {
-      readLock.lock();
-      return hostEntity.getIpv6();
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setIPv6(String ip) {
-    try {
-      writeLock.lock();
-      hostEntity.setIpv6(ip);
-      saveIfPersisted();
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public int getCpuCount() {
-    try {
-      readLock.lock();
-      return hostEntity.getCpuCount();
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setCpuCount(int cpuCount) {
-    try {
-      writeLock.lock();
-      hostEntity.setCpuCount(cpuCount);
-      saveIfPersisted();
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public long getTotalMemBytes() {
-    try {
-      readLock.lock();
-      return hostEntity.getTotalMem();
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setTotalMemBytes(long totalMemBytes) {
-    try {
-      writeLock.lock();
-      hostEntity.setTotalMem(totalMemBytes);
-      saveIfPersisted();
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public long getAvailableMemBytes() {
-    try {
-      readLock.lock();
-      return hostStateEntity.getAvailableMem();
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setAvailableMemBytes(long availableMemBytes) {
-    try {
-      writeLock.lock();
-      hostStateEntity.setAvailableMem(availableMemBytes);
-      saveIfPersisted();
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public String getOsArch() {
-    try {
-      readLock.lock();
-      return hostEntity.getOsArch();
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setOsArch(String osArch) {
-    try {
-      writeLock.lock();
-      hostEntity.setOsArch(osArch);
-      saveIfPersisted();
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public String getOsInfo() {
-    try {
-      readLock.lock();
-      return hostEntity.getOsInfo();
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setOsInfo(String osInfo) {
-    try {
-      writeLock.lock();
-      hostEntity.setOsInfo(osInfo);
-      saveIfPersisted();
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public String getOsType() {
-    try {
-      readLock.lock();
-      return hostEntity.getOsType();
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setOsType(String osType) {
-    try {
-      writeLock.lock();
-      hostEntity.setOsType(osType);
-      saveIfPersisted();
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public List<DiskInfo> getDisksInfo() {
-    try {
-      readLock.lock();
-      return gson.<List<DiskInfo>>fromJson(
-                hostEntity.getDisksInfo(), diskInfoType);
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setDisksInfo(List<DiskInfo> disksInfo) {
-    try {
-      writeLock.lock();
-      hostEntity.setDisksInfo(gson.toJson(disksInfo, diskInfoType));
-      saveIfPersisted();
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public HostHealthStatus getHealthStatus() {
-    try {
-      readLock.lock();
-      return gson.fromJson(hostStateEntity.getHealthStatus(),
-          HostHealthStatus.class);
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setHealthStatus(HostHealthStatus healthStatus) {
-    try {
-      writeLock.lock();
-      hostStateEntity.setHealthStatus(gson.toJson(healthStatus));
-      saveIfPersisted();
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public Map<String, String> getHostAttributes() {
-    try {
-      readLock.lock();
-      return gson.<Map<String, String>>fromJson(hostEntity.getHostAttributes(),
-          hostAttributesType);
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setHostAttributes(Map<String, String> hostAttributes) {
-    try {
-      writeLock.lock();
-      Map<String, String> hostAttrs = gson.<Map<String, String>>
-          fromJson(hostEntity.getHostAttributes(), hostAttributesType);
-      if (hostAttrs == null) {
-        hostAttrs = new HashMap<String, String>();
-      }
-      hostAttrs.putAll(hostAttributes);
-      hostEntity.setHostAttributes(gson.toJson(hostAttrs,
-          hostAttributesType));
-      saveIfPersisted();
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public String getRackInfo() {
-    try {
-      readLock.lock();
-      return hostEntity.getRackInfo();
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setRackInfo(String rackInfo) {
-    try {
-      writeLock.lock();
-      hostEntity.setRackInfo(rackInfo);
-      saveIfPersisted();
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public long getLastRegistrationTime() {
-    try {
-      readLock.lock();
-      return hostEntity.getLastRegistrationTime();
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setLastRegistrationTime(long lastRegistrationTime) {
-    try {
-      writeLock.lock();
-      this.hostEntity.setLastRegistrationTime(lastRegistrationTime);
-      saveIfPersisted();
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public long getLastHeartbeatTime() {
-    try {
-      readLock.lock();
-      return lastHeartbeatTime;
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setLastHeartbeatTime(long lastHeartbeatTime) {
-    try {
-      writeLock.lock();
-      this.lastHeartbeatTime = lastHeartbeatTime;
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public AgentVersion getAgentVersion() {
-    try {
-      readLock.lock();
-      return gson.fromJson(hostStateEntity.getAgentVersion(),
-          AgentVersion.class);
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setAgentVersion(AgentVersion agentVersion) {
-    try {
-      writeLock.lock();
-      hostStateEntity.setAgentVersion(gson.toJson(agentVersion));
-      saveIfPersisted();
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public long getTimeInState() {
-    return hostStateEntity.getTimeInState();
-  }
-
-  @Override
-  public void setTimeInState(long timeInState) {
-    try {
-      writeLock.lock();
-      hostStateEntity.setTimeInState(timeInState);
-      saveIfPersisted();
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public HostResponse convertToResponse() {
-    try {
-      readLock.lock();
-      HostResponse r = new HostResponse(getHostName());
-
-      r.setAgentVersion(getAgentVersion());
-      r.setAvailableMemBytes(getAvailableMemBytes());
-      r.setCpuCount(getCpuCount());
-      r.setDisksInfo(getDisksInfo());
-      r.setHealthStatus(getHealthStatus());
-      r.setHostAttributes(getHostAttributes());
-      r.setIpv4(getIPv4());
-      r.setIpv6(getIPv6());
-      r.setLastHeartbeatTime(getLastHeartbeatTime());
-      r.setLastAgentEnv(lastAgentEnv);
-      r.setLastRegistrationTime(getLastRegistrationTime());
-      r.setOsArch(getOsArch());
-      r.setOsInfo(getOsInfo());
-      r.setOsType(getOsType());
-      r.setRackInfo(getRackInfo());
-      r.setTotalMemBytes(getTotalMemBytes());
-      r.setPublicHostName(getPublicHostName());
-      r.setHostState(getState().toString());
-
-      return r;
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  /**
-   * Shows if Host is persisted to database
-   *
-   * @return true if persisted
-   */
-  @Override
-  public boolean isPersisted() {
-    try {
-      readLock.lock();
-      return persisted;
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  /**
-   * Save host to database and make all changes to be saved afterwards
-   */
-  @Override
-  public void persist() {
-    writeLock.lock();
-    try {
-      if (!persisted) {
-        persistEntities();
-        refresh();
-        for (ClusterEntity clusterEntity : hostEntity.getClusterEntities()) {
-          try {
-            clusters.getClusterById(clusterEntity.getClusterId()).refresh();
-          } catch (AmbariException e) {
-            LOG.error(e);
-            throw new RuntimeException("Cluster '" + clusterEntity.getClusterId() + "' was removed", e);
-          }
-        }
-        persisted = true;
-      } else {
-        saveIfPersisted();
-      }
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Transactional
-  void persistEntities() {
-    hostDAO.create(hostEntity);
-    hostStateDAO.create(hostStateEntity);
-    if (!hostEntity.getClusterEntities().isEmpty()) {
-      for (ClusterEntity clusterEntity : hostEntity.getClusterEntities()) {
-        clusterEntity.getHostEntities().add(hostEntity);
-        clusterDAO.merge(clusterEntity);
-      }
-    }
-  }
-
-  @Override
-  @Transactional
-  public void refresh() {
-    writeLock.lock();
-    try {
-      if (isPersisted()) {
-        hostEntity = hostDAO.findByName(hostEntity.getHostName());
-        hostStateEntity = hostEntity.getHostStateEntity();
-        hostDAO.refresh(hostEntity);
-        hostStateDAO.refresh(hostStateEntity);
-      }
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Transactional
-  void saveIfPersisted() {
-    if (isPersisted()) {
-      hostDAO.merge(hostEntity);
-      hostStateDAO.merge(hostStateEntity);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostRegistrationRequestEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostRegistrationRequestEvent.java
deleted file mode 100644
index 2a6abfd..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostRegistrationRequestEvent.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.host;
-
-import org.apache.ambari.server.agent.AgentEnv;
-import org.apache.ambari.server.agent.HostInfo;
-import org.apache.ambari.server.state.AgentVersion;
-import org.apache.ambari.server.state.HostEvent;
-import org.apache.ambari.server.state.HostEventType;
-
-public class HostRegistrationRequestEvent extends HostEvent {
-
-  final long registrationTime;
-  final HostInfo hostInfo;
-  final AgentVersion agentVersion;
-  final String publicHostName;
-  final AgentEnv agentEnv;
-
-  public HostRegistrationRequestEvent(String hostName,
-      AgentVersion agentVersion, long registrationTime, HostInfo hostInfo, AgentEnv env) {
-    this(hostName, hostName, agentVersion, registrationTime, hostInfo, env);
-  }
-  
-  public HostRegistrationRequestEvent(String hostName, String publicName,
-      AgentVersion agentVersion, long registrationTime, HostInfo hostInfo, AgentEnv env) {
-    super(hostName, HostEventType.HOST_REGISTRATION_REQUEST);
-    this.registrationTime = registrationTime;
-    this.hostInfo = hostInfo;
-    this.agentVersion = agentVersion;
-    this.publicHostName = (null == publicName) ? hostName : publicName;
-    this.agentEnv = env;
-  }
-
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostStatusUpdatesReceivedEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostStatusUpdatesReceivedEvent.java
deleted file mode 100644
index b73bdaa..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostStatusUpdatesReceivedEvent.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.host;
-
-import org.apache.ambari.server.state.HostEvent;
-import org.apache.ambari.server.state.HostEventType;
-
-public class HostStatusUpdatesReceivedEvent extends HostEvent {
-
-  private final long timestamp;
-
-  // TODO need to add any additional information required for verification
-  // tracking
-  public HostStatusUpdatesReceivedEvent(String hostName,
-      long timestamp) {
-    super(hostName, HostEventType.HOST_STATUS_UPDATES_RECEIVED);
-    this.timestamp = timestamp;
-  }
-
-  /**
-   * @return the timestamp
-   */
-  public long getTimestamp() {
-    return timestamp;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostUnhealthyHeartbeatEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostUnhealthyHeartbeatEvent.java
deleted file mode 100644
index 8cd10b1..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostUnhealthyHeartbeatEvent.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.host;
-
-import org.apache.ambari.server.state.HostEvent;
-import org.apache.ambari.server.state.HostEventType;
-import org.apache.ambari.server.state.HostHealthStatus;
-
-public class HostUnhealthyHeartbeatEvent extends HostEvent {
-
-  private final long heartbeatTime;
-
-  private final HostHealthStatus healthStatus;
-
-  public HostUnhealthyHeartbeatEvent(String hostName, long heartbeatTime,
-      HostHealthStatus healthStatus) {
-    super(hostName, HostEventType.HOST_HEARTBEAT_UNHEALTHY);
-    this.heartbeatTime = heartbeatTime;
-    this.healthStatus = healthStatus;
-  }
-
-  /**
-   * @return the heartbeatTime
-   */
-  public long getHeartbeatTime() {
-    return heartbeatTime;
-  }
-
-  /**
-   * @return the healthStatus
-   */
-  public HostHealthStatus getHealthStatus() {
-    return healthStatus;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
deleted file mode 100644
index 36a1500..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ /dev/null
@@ -1,1236 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.svccomphost;
-
-import java.util.*;
-import java.util.Map.Entry;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.ServiceComponentHostResponse;
-import org.apache.ambari.server.orm.dao.HostComponentConfigMappingDAO;
-import org.apache.ambari.server.orm.dao.HostComponentDesiredConfigMappingDAO;
-import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
-import org.apache.ambari.server.orm.dao.HostDAO;
-import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.entities.HostComponentConfigMappingEntity;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredConfigMappingEntity;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntityPK;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntityPK;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntityPK;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.ServiceComponent;
-import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.state.ServiceComponentHostEvent;
-import org.apache.ambari.server.state.ServiceComponentHostEventType;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.State;
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-import org.apache.ambari.server.state.fsm.SingleArcTransition;
-import org.apache.ambari.server.state.fsm.StateMachine;
-import org.apache.ambari.server.state.fsm.StateMachineFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.gson.Gson;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
-import com.google.inject.persist.Transactional;
-
-public class ServiceComponentHostImpl implements ServiceComponentHost {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ServiceComponentHostImpl.class);
-
-  // FIXME need more debug logs
-
-  private final Lock readLock;
-  private final Lock writeLock;
-
-  private final ServiceComponent serviceComponent;
-  private final Host host;
-  private boolean persisted = false;
-
-  @Inject
-  Gson gson;
-  @Inject
-  HostComponentStateDAO hostComponentStateDAO;
-  @Inject
-  HostComponentDesiredStateDAO hostComponentDesiredStateDAO;
-  @Inject
-  HostDAO hostDAO;
-  @Inject
-  ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO;
-  @Inject
-  Clusters clusters;
-  @Inject
-  HostComponentDesiredConfigMappingDAO
-      hostComponentDesiredConfigMappingDAO;
-  @Inject
-  HostComponentConfigMappingDAO
-      hostComponentConfigMappingDAO;
-
-  private HostComponentStateEntity stateEntity;
-  private HostComponentDesiredStateEntity desiredStateEntity;
-
-  private Map<String, String> configs;
-  private Map<String, String> desiredConfigs;
-
-  private long lastOpStartTime;
-  private long lastOpEndTime;
-  private long lastOpLastUpdateTime;
-
-  private static final StateMachineFactory
-  <ServiceComponentHostImpl, State,
-  ServiceComponentHostEventType, ServiceComponentHostEvent>
-    daemonStateMachineFactory
-      = new StateMachineFactory<ServiceComponentHostImpl,
-          State, ServiceComponentHostEventType,
-          ServiceComponentHostEvent>
-          (State.INIT)
-
-  // define the state machine of a HostServiceComponent for runnable
-  // components
-
-     .addTransition(State.INIT,
-         State.INSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_INSTALL,
-         new ServiceComponentHostOpStartedTransition())
-     .addTransition(State.INSTALLING,
-         State.INSTALLED,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_SUCCEEDED,
-         new ServiceComponentHostOpCompletedTransition())
-         
-     .addTransition(State.INSTALLED,
-         State.INSTALLED,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_SUCCEEDED,
-         new ServiceComponentHostOpCompletedTransition())
-         
-     .addTransition(State.INSTALLING,
-         State.INSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_IN_PROGRESS,
-         new ServiceComponentHostOpInProgressTransition())
-     .addTransition(State.INSTALLING,
-         State.INSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_INSTALL,
-         new ServiceComponentHostOpStartedTransition())
-
-     .addTransition(State.INSTALLING,
-         State.INSTALL_FAILED,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_FAILED,
-         new ServiceComponentHostOpCompletedTransition())
-
-     .addTransition(State.INSTALL_FAILED,
-         State.INSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_RESTART,
-         new ServiceComponentHostOpStartedTransition())
-     .addTransition(State.INSTALL_FAILED,
-         State.INSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_INSTALL,
-         new ServiceComponentHostOpStartedTransition())
-
-     .addTransition(State.INSTALLED,
-         State.STARTING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_START,
-         new ServiceComponentHostOpStartedTransition())
-     .addTransition(State.INSTALLED,
-         State.UNINSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_UNINSTALL,
-         new ServiceComponentHostOpStartedTransition())
-     .addTransition(State.INSTALLED,
-         State.INSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_INSTALL,
-         new ServiceComponentHostOpStartedTransition())
-     .addTransition(State.INSTALLED,
-         State.STOPPING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_STOP,
-         new ServiceComponentHostOpStartedTransition())
-
-     .addTransition(State.STARTING,
-         State.STARTING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_IN_PROGRESS,
-         new ServiceComponentHostOpInProgressTransition())
-         
-     .addTransition(State.STARTING,
-         State.STARTING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_START,
-         new ServiceComponentHostOpStartedTransition())
-         
-     .addTransition(State.STARTING,
-         State.STARTED,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_SUCCEEDED,
-         new ServiceComponentHostOpCompletedTransition())
-         
-     .addTransition(State.STARTING,
-         State.START_FAILED,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_FAILED,
-         new ServiceComponentHostOpCompletedTransition())
-
-     .addTransition(State.START_FAILED,
-         State.STARTING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_RESTART,
-         new ServiceComponentHostOpStartedTransition())
-     .addTransition(State.START_FAILED,
-         State.STARTING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_START,
-         new ServiceComponentHostOpStartedTransition())
-     .addTransition(State.START_FAILED,
-         State.STOPPING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_STOP,
-         new ServiceComponentHostOpStartedTransition())
-
-     .addTransition(State.STARTED,
-         State.STOPPING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_STOP,
-         new ServiceComponentHostOpStartedTransition())
-
-     .addTransition(State.STOPPING,
-         State.STOPPING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_IN_PROGRESS,
-         new ServiceComponentHostOpInProgressTransition())
-     .addTransition(State.STOPPING,
-         State.INSTALLED,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_SUCCEEDED,
-         new ServiceComponentHostOpCompletedTransition())
-     .addTransition(State.STOPPING,
-         State.STOP_FAILED,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_FAILED,
-         new ServiceComponentHostOpCompletedTransition())
-
-     .addTransition(State.STOP_FAILED,
-         State.STOPPING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_RESTART,
-         new ServiceComponentHostOpStartedTransition())
-     .addTransition(State.STOP_FAILED,
-         State.STOPPING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_STOP,
-         new ServiceComponentHostOpStartedTransition())
-
-     .addTransition(State.UNINSTALLING,
-         State.UNINSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_IN_PROGRESS,
-         new ServiceComponentHostOpInProgressTransition())
-     .addTransition(State.UNINSTALLING,
-         State.UNINSTALLED,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_SUCCEEDED,
-         new ServiceComponentHostOpCompletedTransition())
-     .addTransition(State.UNINSTALLING,
-         State.UNINSTALL_FAILED,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_FAILED,
-         new ServiceComponentHostOpCompletedTransition())
-
-     .addTransition(State.UNINSTALL_FAILED,
-         State.UNINSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_RESTART,
-         new ServiceComponentHostOpStartedTransition())
-     .addTransition(State.UNINSTALL_FAILED,
-         State.UNINSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_UNINSTALL,
-         new ServiceComponentHostOpStartedTransition())
-
-     .addTransition(State.UNINSTALLED,
-         State.INSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_INSTALL,
-         new ServiceComponentHostOpStartedTransition())
-
-     .addTransition(State.UNINSTALLED,
-         State.WIPING_OUT,
-         ServiceComponentHostEventType.HOST_SVCCOMP_WIPEOUT,
-         new ServiceComponentHostOpStartedTransition())
-
-     .addTransition(State.WIPING_OUT,
-         State.WIPING_OUT,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_IN_PROGRESS,
-         new ServiceComponentHostOpInProgressTransition())
-     .addTransition(State.WIPING_OUT,
-         State.INIT,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_SUCCEEDED,
-         new ServiceComponentHostOpCompletedTransition())
-     .addTransition(State.WIPING_OUT,
-         State.WIPEOUT_FAILED,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_FAILED,
-         new ServiceComponentHostOpCompletedTransition())
-
-     .addTransition(State.WIPEOUT_FAILED,
-         State.WIPING_OUT,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_RESTART,
-         new ServiceComponentHostOpStartedTransition())
-     .addTransition(State.WIPEOUT_FAILED,
-         State.WIPING_OUT,
-         ServiceComponentHostEventType.HOST_SVCCOMP_WIPEOUT,
-         new ServiceComponentHostOpStartedTransition())
-
-     .installTopology();
-
-  private static final StateMachineFactory
-  <ServiceComponentHostImpl, State,
-  ServiceComponentHostEventType, ServiceComponentHostEvent>
-    clientStateMachineFactory
-      = new StateMachineFactory<ServiceComponentHostImpl,
-          State, ServiceComponentHostEventType,
-          ServiceComponentHostEvent>
-          (State.INIT)
-
-  // define the state machine of a HostServiceComponent for client only
-  // components
-
-     .addTransition(State.INIT,
-         State.INSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_INSTALL,
-         new ServiceComponentHostOpStartedTransition())
-
-     .addTransition(State.INSTALLING,
-         State.INSTALLED,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_SUCCEEDED,
-         new ServiceComponentHostOpCompletedTransition())
-    
-     .addTransition(State.INSTALLED,
-         State.INSTALLED,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_SUCCEEDED,
-         new ServiceComponentHostOpCompletedTransition())
-    
-     .addTransition(State.INSTALLING,
-         State.INSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_INSTALL,
-         new ServiceComponentHostOpStartedTransition())
-     
-     .addTransition(State.INSTALLING,
-         State.INSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_IN_PROGRESS,
-         new ServiceComponentHostOpInProgressTransition())
-     .addTransition(State.INSTALLING,
-         State.INSTALL_FAILED,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_FAILED,
-         new ServiceComponentHostOpCompletedTransition())
-
-     .addTransition(State.INSTALL_FAILED,
-         State.INSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_RESTART,
-         new ServiceComponentHostOpStartedTransition())
-     .addTransition(State.INSTALL_FAILED,
-         State.INSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_INSTALL,
-         new ServiceComponentHostOpStartedTransition())
-
-     .addTransition(State.INSTALLED,
-         State.UNINSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_UNINSTALL,
-         new ServiceComponentHostOpStartedTransition())
-     .addTransition(State.INSTALLED,
-         State.INSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_INSTALL,
-         new ServiceComponentHostOpStartedTransition())
-
-     .addTransition(State.UNINSTALLING,
-         State.UNINSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_IN_PROGRESS,
-         new ServiceComponentHostOpInProgressTransition())
-     .addTransition(State.UNINSTALLING,
-         State.UNINSTALLED,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_SUCCEEDED,
-         new ServiceComponentHostOpCompletedTransition())
-     .addTransition(State.UNINSTALLING,
-         State.UNINSTALL_FAILED,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_FAILED,
-         new ServiceComponentHostOpCompletedTransition())
-
-     .addTransition(State.UNINSTALL_FAILED,
-         State.UNINSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_RESTART,
-         new ServiceComponentHostOpStartedTransition())
-     .addTransition(State.UNINSTALL_FAILED,
-         State.UNINSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_UNINSTALL,
-         new ServiceComponentHostOpStartedTransition())
-
-     .addTransition(State.UNINSTALLED,
-         State.INSTALLING,
-         ServiceComponentHostEventType.HOST_SVCCOMP_INSTALL,
-         new ServiceComponentHostOpStartedTransition())
-
-     .addTransition(State.UNINSTALLED,
-         State.WIPING_OUT,
-         ServiceComponentHostEventType.HOST_SVCCOMP_WIPEOUT,
-         new ServiceComponentHostOpStartedTransition())
-
-     .addTransition(State.WIPING_OUT,
-         State.WIPING_OUT,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_IN_PROGRESS,
-         new ServiceComponentHostOpInProgressTransition())
-     .addTransition(State.WIPING_OUT,
-         State.INIT,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_SUCCEEDED,
-         new ServiceComponentHostOpCompletedTransition())
-     .addTransition(State.WIPING_OUT,
-         State.WIPEOUT_FAILED,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_FAILED,
-         new ServiceComponentHostOpCompletedTransition())
-
-     .addTransition(State.WIPEOUT_FAILED,
-         State.WIPING_OUT,
-         ServiceComponentHostEventType.HOST_SVCCOMP_OP_RESTART,
-         new ServiceComponentHostOpStartedTransition())
-     .addTransition(State.WIPEOUT_FAILED,
-         State.WIPING_OUT,
-         ServiceComponentHostEventType.HOST_SVCCOMP_WIPEOUT,
-         new ServiceComponentHostOpStartedTransition())
-
-     .installTopology();
-
-
-  private final StateMachine<State,
-      ServiceComponentHostEventType, ServiceComponentHostEvent> stateMachine;
-
-  static class ServiceComponentHostOpCompletedTransition
-     implements SingleArcTransition<ServiceComponentHostImpl,
-         ServiceComponentHostEvent> {
-
-    @Override
-    public void transition(ServiceComponentHostImpl impl,
-        ServiceComponentHostEvent event) {
-      // TODO Audit logs
-      impl.updateLastOpInfo(event.getType(), event.getOpTimestamp());
-    }
-
-  }
-
-  static class ServiceComponentHostOpStartedTransition
-    implements SingleArcTransition<ServiceComponentHostImpl,
-        ServiceComponentHostEvent> {
-
-    @Override
-    public void transition(ServiceComponentHostImpl impl,
-        ServiceComponentHostEvent event) {
-      // TODO Audit logs
-      // FIXME handle restartOp event
-      impl.updateLastOpInfo(event.getType(), event.getOpTimestamp());
-      if (event.getType() == ServiceComponentHostEventType.HOST_SVCCOMP_START) {
-        ServiceComponentHostStartEvent e =
-            (ServiceComponentHostStartEvent) event;
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Updating live state configs during START event"
-              + ", updated configs set size=" + e.getConfigs().size());
-        }
-        impl.setConfigs(e.getConfigs());
-      } else if (event.getType() ==
-          ServiceComponentHostEventType.HOST_SVCCOMP_INSTALL) {
-        ServiceComponentHostInstallEvent e =
-            (ServiceComponentHostInstallEvent) event;
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Updating live stack version during INSTALL event"
-              + ", new stack verion=" + e.getStackId());
-        }
-        impl.setStackVersion(new StackId(e.getStackId()));
-      }
-    }
-  }
-
-  static class ServiceComponentHostOpInProgressTransition
-    implements SingleArcTransition<ServiceComponentHostImpl,
-        ServiceComponentHostEvent> {
-
-    @Override
-    public void transition(ServiceComponentHostImpl impl,
-        ServiceComponentHostEvent event) {
-      // TODO Audit logs
-      impl.updateLastOpInfo(event.getType(), event.getOpTimestamp());
-    }
-  }
-
-
-  private void resetLastOpInfo() {
-    try {
-      writeLock.lock();
-      setLastOpStartTime(-1);
-      setLastOpLastUpdateTime(-1);
-      setLastOpEndTime(-1);
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  private void updateLastOpInfo(ServiceComponentHostEventType eventType,
-      long time) {
-    try {
-      writeLock.lock();
-      switch (eventType) {
-        case HOST_SVCCOMP_INSTALL:
-        case HOST_SVCCOMP_START:
-        case HOST_SVCCOMP_STOP:
-        case HOST_SVCCOMP_UNINSTALL:
-        case HOST_SVCCOMP_WIPEOUT:
-        case HOST_SVCCOMP_OP_RESTART:
-          resetLastOpInfo();
-          setLastOpStartTime(time);
-          break;
-        case HOST_SVCCOMP_OP_FAILED:
-        case HOST_SVCCOMP_OP_SUCCEEDED:
-          setLastOpLastUpdateTime(time);
-          setLastOpEndTime(time);
-          break;
-        case HOST_SVCCOMP_OP_IN_PROGRESS:
-          setLastOpLastUpdateTime(time);
-          break;
-      }
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @AssistedInject
-  public ServiceComponentHostImpl(@Assisted ServiceComponent serviceComponent,
-                                  @Assisted String hostName, @Assisted boolean isClient, Injector injector) {
-    injector.injectMembers(this);
-
-    if (isClient) {
-      this.stateMachine = clientStateMachineFactory.make(this);
-    } else {
-      this.stateMachine = daemonStateMachineFactory.make(this);
-    }
-
-    ReadWriteLock rwLock = new ReentrantReadWriteLock();
-    this.readLock = rwLock.readLock();
-    this.writeLock = rwLock.writeLock();
-    this.serviceComponent = serviceComponent;
-
-    stateEntity = new HostComponentStateEntity();
-    stateEntity.setClusterId(serviceComponent.getClusterId());
-    stateEntity.setComponentName(serviceComponent.getName());
-    stateEntity.setServiceName(serviceComponent.getServiceName());
-    stateEntity.setHostName(hostName);
-    stateEntity.setCurrentState(stateMachine.getCurrentState());
-    stateEntity.setCurrentStackVersion(gson.toJson(new StackId()));
-
-    desiredStateEntity = new HostComponentDesiredStateEntity();
-    desiredStateEntity.setClusterId(serviceComponent.getClusterId());
-    desiredStateEntity.setComponentName(serviceComponent.getName());
-    desiredStateEntity.setServiceName(serviceComponent.getServiceName());
-    desiredStateEntity.setHostName(hostName);
-    desiredStateEntity.setDesiredState(State.INIT);
-    desiredStateEntity.setDesiredStackVersion(
-        gson.toJson(serviceComponent.getDesiredStackVersion()));
-
-    try {
-      this.host = clusters.getHost(hostName);
-    } catch (AmbariException e) {
-      //TODO exception?
-      LOG.error("Host '{}' was not found" + hostName);
-      throw new RuntimeException(e);
-    }
-
-    this.resetLastOpInfo();
-    this.desiredConfigs = new HashMap<String, String>();
-    this.configs = new HashMap<String, String>();
-  }
-
-  @AssistedInject
-  public ServiceComponentHostImpl(@Assisted ServiceComponent serviceComponent,
-                                  @Assisted HostComponentStateEntity stateEntity,
-                                  @Assisted HostComponentDesiredStateEntity desiredStateEntity,
-                                  Injector injector) {
-    injector.injectMembers(this);
-    ReadWriteLock rwLock = new ReentrantReadWriteLock();
-    this.readLock = rwLock.readLock();
-    this.writeLock = rwLock.writeLock();
-    this.serviceComponent = serviceComponent;
-
-
-    this.desiredStateEntity = desiredStateEntity;
-    this.stateEntity = stateEntity;
-    //TODO implement State Machine init as now type choosing is hardcoded in above code
-    if (serviceComponent.isClientComponent()) {
-      this.stateMachine = clientStateMachineFactory.make(this);
-    } else {
-      this.stateMachine = daemonStateMachineFactory.make(this);
-    }
-    this.stateMachine.setCurrentState(stateEntity.getCurrentState());
-
-    try {
-      this.host = clusters.getHost(stateEntity.getHostName());
-    } catch (AmbariException e) {
-      //TODO exception? impossible due to database restrictions
-      LOG.error("Host '{}' was not found " + stateEntity.getHostName());
-      throw new RuntimeException(e);
-    }
-
-    desiredConfigs = new HashMap<String, String>();
-    configs = new HashMap<String, String>();
-
-    for (HostComponentDesiredConfigMappingEntity entity : desiredStateEntity.getHostComponentDesiredConfigMappingEntities()) {
-      desiredConfigs.put(entity.getConfigType(), entity.getVersionTag());
-    }
-    // FIXME no configs in live state being persisted in DB??
-
-    persisted = true;
-  }
-
-  @Override
-  public State getState() {
-    try {
-      readLock.lock();
-      return stateMachine.getCurrentState();
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setState(State state) {
-    try {
-      writeLock.lock();
-      stateMachine.setCurrentState(state);
-      stateEntity.setCurrentState(state);
-      saveIfPersisted();
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  @Transactional
-  public void handleEvent(ServiceComponentHostEvent event)
-      throws InvalidStateTransitionException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Handling ServiceComponentHostEvent event,"
-          + " eventType=" + event.getType().name()
-          + ", event=" + event.toString());
-    }
-    State oldState = getState();
-    try {
-      writeLock.lock();
-      try {
-        stateMachine.doTransition(event.getType(), event);
-        stateEntity.setCurrentState(stateMachine.getCurrentState());
-        saveIfPersisted();
-        // TODO Audit logs
-      } catch (InvalidStateTransitionException e) {
-        LOG.error("Can't handle ServiceComponentHostEvent event at"
-            + " current state"
-            + ", serviceComponentName=" + this.getServiceComponentName()
-            + ", hostName=" + this.getHostName()
-            + ", currentState=" + oldState
-            + ", eventType=" + event.getType()
-            + ", event=" + event);
-        throw e;
-      }
-    }
-    finally {
-      writeLock.unlock();
-    }
-    if (!oldState.equals(getState())) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("ServiceComponentHost transitioned to a new state"
-            + ", serviceComponentName=" + this.getServiceComponentName()
-            + ", hostName=" + this.getHostName()
-            + ", oldState=" + oldState
-            + ", currentState=" + getState()
-            + ", eventType=" + event.getType().name()
-            + ", event=" + event);
-      }
-    }
-  }
-
-  @Override
-  public String getServiceComponentName() {
-    return serviceComponent.getName();
-  }
-
-  @Override
-  public String getHostName() {
-    return host.getHostName();
-  }
-
-  /**
-   * @return the lastOpStartTime
-   */
-  public long getLastOpStartTime() {
-    try {
-      readLock.lock();
-      return lastOpStartTime;
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  /**
-   * @param lastOpStartTime the lastOpStartTime to set
-   */
-  public void setLastOpStartTime(long lastOpStartTime) {
-    try {
-      writeLock.lock();
-      this.lastOpStartTime = lastOpStartTime;
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  /**
-   * @return the lastOpEndTime
-   */
-  public long getLastOpEndTime() {
-    try {
-      readLock.lock();
-      return lastOpEndTime;
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  /**
-   * @param lastOpEndTime the lastOpEndTime to set
-   */
-  public void setLastOpEndTime(long lastOpEndTime) {
-    try {
-      writeLock.lock();
-      this.lastOpEndTime = lastOpEndTime;
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  /**
-   * @return the lastOpLastUpdateTime
-   */
-  public long getLastOpLastUpdateTime() {
-    try {
-      readLock.lock();
-      return lastOpLastUpdateTime;
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  /**
-   * @param lastOpLastUpdateTime the lastOpLastUpdateTime to set
-   */
-  public void setLastOpLastUpdateTime(long lastOpLastUpdateTime) {
-    try {
-      writeLock.lock();
-      this.lastOpLastUpdateTime = lastOpLastUpdateTime;
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public long getClusterId() {
-    return serviceComponent.getClusterId();
-  }
-
-  @Override
-  public String getServiceName() {
-    return serviceComponent.getServiceName();
-  }
-
-  Map<String, String> getConfigVersions() {
-    try {
-      readLock.lock();
-      if (this.configs != null) {
-        return Collections.unmodifiableMap(configs);
-      } else {
-        return new HashMap<String, String>();
-      }
-    }
-    finally {
-      readLock.unlock();
-    }
-
-  }
-
-  @Override
-  public Map<String, Config> getConfigs() throws AmbariException {
-    try {
-      readLock.lock();
-      Map<String, Config> map = new HashMap<String, Config>();
-      Cluster cluster = clusters.getClusterById(getClusterId());
-      for (Entry<String, String> entry : configs.entrySet()) {
-        Config config = cluster.getDesiredConfig(
-            entry.getKey(), entry.getValue());
-        if (null != config) {
-          map.put(entry.getKey(), config);
-        }
-      }
-      return map;
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  @Transactional
-  void setConfigs(Map<String, String> configs) {
-    try {
-      writeLock.lock();
-
-      Set<String> deletedTypes = new HashSet<String>();
-      for (String type : this.configs.keySet()) {
-        if (!configs.containsKey(type)) {
-          deletedTypes.add(type);
-        }
-      }
-
-      long now = Long.valueOf(new java.util.Date().getTime());
-
-      for (Entry<String,String> entry : configs.entrySet()) {
-
-        boolean contains = false;
-        for (HostComponentConfigMappingEntity mappingEntity : stateEntity.getHostComponentConfigMappingEntities()) {
-          if (entry.getKey().equals(mappingEntity.getConfigType())) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Updating live config to ServiceComponentHost"
-                  + ", clusterId=" + stateEntity.getClusterId()
-                  + ", serviceName=" + stateEntity.getServiceName()
-                  + ", componentName=" + stateEntity.getComponentName()
-                  + ", hostname=" + stateEntity.getHostName()
-                  + ", configType=" + entry.getKey()
-                  + ", configVersionTag=" + entry.getValue());
-            }
-            contains = true;
-            mappingEntity.setVersionTag(entry.getValue());
-            mappingEntity.setTimestamp(now);
-            break;
-          }
-        }
-
-        if (!contains) {
-          HostComponentConfigMappingEntity newEntity =
-              new HostComponentConfigMappingEntity();
-          newEntity.setClusterId(stateEntity.getClusterId());
-          newEntity.setServiceName(stateEntity.getServiceName());
-          newEntity.setComponentName(stateEntity.getComponentName());
-          newEntity.setHostName(stateEntity.getHostName());
-          newEntity.setConfigType(entry.getKey());
-          newEntity.setVersionTag(entry.getValue());
-          newEntity.setTimestamp(now);
-
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Adding new live config to ServiceComponentHost"
-                + ", clusterId=" + stateEntity.getClusterId()
-                + ", serviceName=" + stateEntity.getServiceName()
-                + ", componentName=" + stateEntity.getComponentName()
-                + ", hostname=" + stateEntity.getHostName()
-                + ", configType=" + entry.getKey()
-                + ", configVersionTag=" + entry.getValue());
-          }
-          stateEntity.getHostComponentConfigMappingEntities().add(newEntity);
-          newEntity.setHostComponentStateEntity(stateEntity);
-
-        }
-      }
-
-      if (!deletedTypes.isEmpty()) {
-        List<HostComponentConfigMappingEntity> deleteEntities =
-            hostComponentConfigMappingDAO.findByHostComponentAndType(
-                stateEntity.getClusterId(), stateEntity.getServiceName(),
-                stateEntity.getComponentName(),
-                stateEntity.getHostName(), deletedTypes);
-        for (HostComponentConfigMappingEntity deleteEntity : deleteEntities) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Deleting live config to ServiceComponentHost"
-                + ", clusterId="  + stateEntity.getClusterId()
-                + ", serviceName=" + stateEntity.getServiceName()
-                + ", componentName=" + stateEntity.getComponentName()
-                + ", hostname=" + stateEntity.getHostName()
-                + ", configType=" + deleteEntity.getConfigType()
-                + ", configVersionTag=" + deleteEntity.getVersionTag());
-          }
-          stateEntity.getHostComponentConfigMappingEntities().remove(
-              deleteEntity);
-          if (persisted) {
-            hostComponentConfigMappingDAO.remove(deleteEntity);
-          }
-        }
-      }
-      this.configs = configs;
-      saveIfPersisted();
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public StackId getStackVersion() {
-    try {
-      readLock.lock();
-      return gson.fromJson(stateEntity.getCurrentStackVersion(), StackId.class);
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setStackVersion(StackId stackVersion) {
-    try {
-      writeLock.lock();
-      stateEntity.setCurrentStackVersion(gson.toJson(stackVersion));
-      saveIfPersisted();
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-
-  @Override
-  public State getDesiredState() {
-    try {
-      readLock.lock();
-      return desiredStateEntity.getDesiredState();
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setDesiredState(State state) {
-    try {
-      writeLock.lock();
-      desiredStateEntity.setDesiredState(state);
-      saveIfPersisted();
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public Map<String, String> getDesiredConfigVersionsRecursive() {
-    try {
-      readLock.lock();
-      Map<String, String> fullDesiredConfigVersions =
-          new HashMap<String, String>();
-      Map<String, Config> desiredConfs = getDesiredConfigs();
-      for (Config c : desiredConfs.values()) {
-        fullDesiredConfigVersions.put(c.getType(), c.getVersionTag());
-      }
-      return fullDesiredConfigVersions;
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-
-  @Override
-  public Map<String, Config> getDesiredConfigs() {
-    Map<String, Config> map = new HashMap<String, Config>();
-    try {
-      readLock.lock();
-      for (Entry<String, String> entry : desiredConfigs.entrySet()) {
-        Config config = clusters.getClusterById(getClusterId()).getDesiredConfig(
-            entry.getKey(), entry.getValue());
-        if (null != config) {
-          map.put(entry.getKey(), config);
-        }
-      }
-    }
-    catch (AmbariException e) {
-      // TODO do something
-      return null;
-    }
-    finally {
-      readLock.unlock();
-    }
-    // do a union with component level configs
-    Map<String, Config> compConfigs = serviceComponent.getDesiredConfigs();
-    for (Entry<String, Config> entry : compConfigs.entrySet()) {
-      if (!map.containsKey(entry.getKey())) {
-        map.put(entry.getKey(), entry.getValue());
-      }
-    }
-    return Collections.unmodifiableMap(map);
-  }
-
-  @Override
-  @Transactional
-  public void updateDesiredConfigs(Map<String, Config> configs) {
-    try {
-      writeLock.lock();
-
-      for (Entry<String,Config> entry : configs.entrySet()) {
-
-        boolean contains = false;
-        for (HostComponentDesiredConfigMappingEntity desiredConfigMappingEntity : desiredStateEntity.getHostComponentDesiredConfigMappingEntities()) {
-          if (entry.getKey().equals(desiredConfigMappingEntity.getConfigType())) {
-            contains = true;
-            desiredConfigMappingEntity.setVersionTag(entry.getValue().getVersionTag());
-            desiredConfigMappingEntity.setTimestamp(new Date().getTime());
-            break;
-          }
-        }
-
-        if (!contains) {
-          HostComponentDesiredConfigMappingEntity newEntity = new HostComponentDesiredConfigMappingEntity();
-          newEntity.setClusterId(desiredStateEntity.getClusterId());
-          newEntity.setServiceName(desiredStateEntity.getServiceName());
-          newEntity.setComponentName(desiredStateEntity.getComponentName());
-          newEntity.setHostName(desiredStateEntity.getHostName());
-          newEntity.setConfigType(entry.getKey());
-          newEntity.setVersionTag(entry.getValue().getVersionTag());
-          newEntity.setTimestamp(new Date().getTime());
-          newEntity.setHostComponentDesiredStateEntity(desiredStateEntity);
-          desiredStateEntity.getHostComponentDesiredConfigMappingEntities().add(newEntity);
-        }
-
-        this.desiredConfigs.put(entry.getKey(), entry.getValue().getVersionTag());
-      }
-
-      saveIfPersisted();
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public StackId getDesiredStackVersion() {
-    try {
-      readLock.lock();
-      return gson.fromJson(desiredStateEntity.getDesiredStackVersion(), StackId.class);
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void setDesiredStackVersion(StackId stackVersion) {
-    try {
-      writeLock.lock();
-      desiredStateEntity.setDesiredStackVersion(gson.toJson(stackVersion));
-      saveIfPersisted();
-    }
-    finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public ServiceComponentHostResponse convertToResponse() {
-    try {
-      readLock.lock();
-      ServiceComponentHostResponse r = new ServiceComponentHostResponse(
-          serviceComponent.getClusterName(),
-          serviceComponent.getServiceName(),
-          serviceComponent.getName(),
-          getHostName(),
-          configs,
-          desiredConfigs,
-          getState().toString(),
-          getStackVersion().getStackId(),
-          getDesiredState().toString());
-      return r;
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public String getClusterName() {
-    return serviceComponent.getClusterName();
-  }
-
-  @Override
-  public void debugDump(StringBuilder sb) {
-    try {
-      readLock.lock();
-      sb.append("ServiceComponentHost={ hostname=" + getHostName()
-          + ", serviceComponentName=" + serviceComponent.getName()
-          + ", clusterName=" + serviceComponent.getClusterName()
-          + ", serviceName=" + serviceComponent.getServiceName()
-          + ", desiredStackVersion=" + getDesiredStackVersion()
-          + ", desiredState=" + getDesiredState()
-          + ", stackVersion=" + getStackVersion()
-          + ", state=" + getState()
-          + " }");
-    }
-    finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public boolean isPersisted() {
-    try {
-      readLock.lock();
-      return persisted;
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void persist() {
-    try {
-      writeLock.lock();
-      if (!persisted) {
-        persistEntities();
-        refresh();
-        host.refresh();
-        serviceComponent.refresh();
-        persisted = true;
-      } else {
-        saveIfPersisted();
-      }
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Transactional
-  protected void persistEntities() {
-    HostEntity hostEntity = hostDAO.findByName(getHostName());
-    hostEntity.getHostComponentStateEntities().add(stateEntity);
-    hostEntity.getHostComponentDesiredStateEntities().add(desiredStateEntity);
-
-    ServiceComponentDesiredStateEntityPK dpk = new ServiceComponentDesiredStateEntityPK();
-    dpk.setClusterId(serviceComponent.getClusterId());
-    dpk.setServiceName(serviceComponent.getServiceName());
-    dpk.setComponentName(serviceComponent.getName());
-
-    ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByPK(dpk);
-    serviceComponentDesiredStateEntity.getHostComponentDesiredStateEntities().add(desiredStateEntity);
-
-    desiredStateEntity.setServiceComponentDesiredStateEntity(serviceComponentDesiredStateEntity);
-    desiredStateEntity.setHostEntity(hostEntity);
-    stateEntity.setServiceComponentDesiredStateEntity(serviceComponentDesiredStateEntity);
-    stateEntity.setHostEntity(hostEntity);
-
-    hostComponentStateDAO.create(stateEntity);
-    hostComponentDesiredStateDAO.create(desiredStateEntity);
-
-    serviceComponentDesiredStateDAO.merge(serviceComponentDesiredStateEntity);
-    hostDAO.merge(hostEntity);
-  }
-
-  @Override
-  @Transactional
-  public synchronized void refresh() {
-    if (isPersisted()) {
-      HostComponentStateEntityPK pk = new HostComponentStateEntityPK();
-      HostComponentDesiredStateEntityPK dpk = new HostComponentDesiredStateEntityPK();
-      pk.setClusterId(getClusterId());
-      pk.setComponentName(getServiceComponentName());
-      pk.setServiceName(getServiceName());
-      pk.setHostName(getHostName());
-      dpk.setClusterId(getClusterId());
-      dpk.setComponentName(getServiceComponentName());
-      dpk.setServiceName(getServiceName());
-      dpk.setHostName(getHostName());
-      stateEntity = hostComponentStateDAO.findByPK(pk);
-      desiredStateEntity = hostComponentDesiredStateDAO.findByPK(dpk);
-      hostComponentStateDAO.refresh(stateEntity);
-      hostComponentDesiredStateDAO.refresh(desiredStateEntity);
-    }
-  }
-
-  @Transactional
-  private void saveIfPersisted() {
-    if (isPersisted()) {
-      hostComponentStateDAO.merge(stateEntity);
-      hostComponentDesiredStateDAO.merge(desiredStateEntity);
-    }
-  }
-
-  @Override
-  public synchronized boolean canBeRemoved() {
-    try {
-      readLock.lock();
-
-      return (getDesiredState().isRemovableState() &&
-              getState().isRemovableState());
-
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  @Override
-  public void deleteDesiredConfigs(Set<String> configTypes) {
-    try {
-      writeLock.lock();
-      hostComponentDesiredConfigMappingDAO.removeByType(configTypes);
-      for (String configType : configTypes) {
-        desiredConfigs.remove(configType);
-      }
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  @Override
-  public void delete() throws AmbariException {
-    try {
-      writeLock.lock();
-      if (persisted) {
-        removeEntities();
-        persisted = false;
-      }
-      desiredConfigs.clear();
-    } finally {
-      writeLock.unlock();
-    }
-
-  }
-
-  @Transactional
-  protected void removeEntities() {
-    HostComponentStateEntityPK pk = new HostComponentStateEntityPK();
-    pk.setClusterId(stateEntity.getClusterId());
-    pk.setComponentName(stateEntity.getComponentName());
-    pk.setServiceName(stateEntity.getServiceName());
-    pk.setHostName(stateEntity.getHostName());
-
-    hostComponentStateDAO.removeByPK(pk);
-
-    HostComponentDesiredStateEntityPK desiredPK = new HostComponentDesiredStateEntityPK();
-    desiredPK.setClusterId(desiredStateEntity.getClusterId());
-    desiredPK.setComponentName(desiredStateEntity.getComponentName());
-    desiredPK.setServiceName(desiredStateEntity.getServiceName());
-    desiredPK.setHostName(desiredStateEntity.getHostName());
-
-    hostComponentDesiredStateDAO.removeByPK(desiredPK);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostInstallEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostInstallEvent.java
deleted file mode 100644
index ff10789..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostInstallEvent.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.svccomphost;
-
-import org.apache.ambari.server.state.ServiceComponentHostEvent;
-import org.apache.ambari.server.state.ServiceComponentHostEventType;
-
-public class ServiceComponentHostInstallEvent extends
-    ServiceComponentHostEvent {
-
-
-  public ServiceComponentHostInstallEvent(String serviceComponentName,
-      String hostName, long opTimestamp, String stackId) {
-    super(ServiceComponentHostEventType.HOST_SVCCOMP_INSTALL,
-        serviceComponentName, hostName, opTimestamp, stackId);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostOpFailedEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostOpFailedEvent.java
deleted file mode 100644
index f6cc79f..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostOpFailedEvent.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.svccomphost;
-
-import org.apache.ambari.server.state.ServiceComponentHostEvent;
-import org.apache.ambari.server.state.ServiceComponentHostEventType;
-
-public class ServiceComponentHostOpFailedEvent extends
-    ServiceComponentHostEvent {
-
-  public ServiceComponentHostOpFailedEvent(String serviceComponentName,
-      String hostName, long opTimestamp) {
-    super(ServiceComponentHostEventType.HOST_SVCCOMP_OP_FAILED,
-        serviceComponentName, hostName, opTimestamp);
-    // TODO Auto-generated constructor stub
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostOpInProgressEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostOpInProgressEvent.java
deleted file mode 100644
index eac49d6..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostOpInProgressEvent.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.svccomphost;
-
-import org.apache.ambari.server.state.ServiceComponentHostEvent;
-import org.apache.ambari.server.state.ServiceComponentHostEventType;
-
-public class ServiceComponentHostOpInProgressEvent extends
-    ServiceComponentHostEvent {
-
-  public ServiceComponentHostOpInProgressEvent(String serviceComponentName,
-      String hostName, long opTimestamp) {
-    super(ServiceComponentHostEventType.HOST_SVCCOMP_OP_IN_PROGRESS,
-        serviceComponentName, hostName, opTimestamp);
-    // TODO Auto-generated constructor stub
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostOpRestartedEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostOpRestartedEvent.java
deleted file mode 100644
index 60d046c..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostOpRestartedEvent.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.svccomphost;
-
-import org.apache.ambari.server.state.ServiceComponentHostEvent;
-import org.apache.ambari.server.state.ServiceComponentHostEventType;
-
-public class ServiceComponentHostOpRestartedEvent extends
-    ServiceComponentHostEvent {
-
-  public ServiceComponentHostOpRestartedEvent(String serviceComponentName,
-      String hostName, long opTimestamp) {
-    super(ServiceComponentHostEventType.HOST_SVCCOMP_OP_RESTART,
-        serviceComponentName, hostName, opTimestamp);
-    // TODO Auto-generated constructor stub
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostOpSucceededEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostOpSucceededEvent.java
deleted file mode 100644
index 690204e..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostOpSucceededEvent.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.svccomphost;
-
-import org.apache.ambari.server.state.ServiceComponentHostEvent;
-import org.apache.ambari.server.state.ServiceComponentHostEventType;
-
-public class ServiceComponentHostOpSucceededEvent extends
-    ServiceComponentHostEvent {
-
-  public ServiceComponentHostOpSucceededEvent(String serviceComponentName,
-      String hostName, long opTimestamp) {
-    super(ServiceComponentHostEventType.HOST_SVCCOMP_OP_SUCCEEDED,
-        serviceComponentName, hostName, opTimestamp);
-    // TODO Auto-generated constructor stub
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostStartEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostStartEvent.java
deleted file mode 100644
index 84febf8..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostStartEvent.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.svccomphost;
-
-import java.util.Map;
-
-import org.apache.ambari.server.state.ServiceComponentHostEvent;
-import org.apache.ambari.server.state.ServiceComponentHostEventType;
-
-public class ServiceComponentHostStartEvent extends
-    ServiceComponentHostEvent {
-
-  public ServiceComponentHostStartEvent(String serviceComponentName,
-      String hostName, long opTimestamp, Map<String, String> configs) {
-    super(ServiceComponentHostEventType.HOST_SVCCOMP_START,
-        serviceComponentName, hostName, opTimestamp, configs);
-    // TODO Auto-generated constructor stub
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostStopEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostStopEvent.java
deleted file mode 100644
index 55d9017..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostStopEvent.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.svccomphost;
-
-import org.apache.ambari.server.state.ServiceComponentHostEvent;
-import org.apache.ambari.server.state.ServiceComponentHostEventType;
-
-public class ServiceComponentHostStopEvent extends
-    ServiceComponentHostEvent {
-
-  public ServiceComponentHostStopEvent(String serviceComponentName,
-      String hostName, long opTimestamp) {
-    super(ServiceComponentHostEventType.HOST_SVCCOMP_STOP,
-        serviceComponentName, hostName, opTimestamp);
-    // TODO Auto-generated constructor stub
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostUninstallEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostUninstallEvent.java
deleted file mode 100644
index b224ea8..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostUninstallEvent.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.svccomphost;
-
-import org.apache.ambari.server.state.ServiceComponentHostEvent;
-import org.apache.ambari.server.state.ServiceComponentHostEventType;
-
-public class ServiceComponentHostUninstallEvent extends
-    ServiceComponentHostEvent {
-
-  public ServiceComponentHostUninstallEvent(String serviceComponentName,
-      String hostName, long opTimestamp) {
-    super(ServiceComponentHostEventType.HOST_SVCCOMP_UNINSTALL,
-        serviceComponentName, hostName, opTimestamp);
-    // TODO Auto-generated constructor stub
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostWipeoutEvent.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostWipeoutEvent.java
deleted file mode 100644
index 198597d..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostWipeoutEvent.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.svccomphost;
-
-import org.apache.ambari.server.state.ServiceComponentHostEvent;
-import org.apache.ambari.server.state.ServiceComponentHostEventType;
-
-public class ServiceComponentHostWipeoutEvent extends
-    ServiceComponentHostEvent {
-
-  public ServiceComponentHostWipeoutEvent(String serviceComponentName,
-      String hostName, long opTimestamp) {
-    super(ServiceComponentHostEventType.HOST_SVCCOMP_WIPEOUT,
-        serviceComponentName, hostName, opTimestamp);
-    // TODO Auto-generated constructor stub
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyList.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyList.java
deleted file mode 100644
index edb0681..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyList.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.utils;
-
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlElement;
-
-public class JaxbMapKeyList {
-  @XmlElement public String  key;
-  @XmlElement public List<String> value;
-
-  private JaxbMapKeyList() {}
-
-  public JaxbMapKeyList(String key, List<String> value)
-  {
-    this.key   = key;
-    this.value = value;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyListAdapter.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyListAdapter.java
deleted file mode 100644
index 4589948..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyListAdapter.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.utils;
-
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import javax.xml.bind.annotation.adapters.XmlAdapter;
-
-public class JaxbMapKeyListAdapter extends
-    XmlAdapter<JaxbMapKeyList[], Map<String, List<String>>> {
-
-  @Override
-  public JaxbMapKeyList[] marshal(Map<String, List<String>> map)
-      throws Exception {
-    if (map==null) {
-      return null;
-    }
-    JaxbMapKeyList[] list = new JaxbMapKeyList[map.size()] ;
-    int index = 0;
-    for (String key : map.keySet()) {
-      JaxbMapKeyList jaxbMap = new JaxbMapKeyList(key, map.get(key));
-      list[index++] = jaxbMap;
-    }
-    return list;
-  }
-
-  @Override
-  public Map<String, List<String>> unmarshal(JaxbMapKeyList[] list)
-      throws Exception {
-    if (list == null) {
-      return null;
-    }
-    Map<String, List<String>> m = new TreeMap<String, List<String>>();
-    for (JaxbMapKeyList jaxbMap : list) {
-      m.put(jaxbMap.key, jaxbMap.value);
-    }
-    return m;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyMap.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyMap.java
deleted file mode 100644
index becc3d9..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyMap.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.utils;
-
-import javax.xml.bind.annotation.XmlElement;
-
-public class JaxbMapKeyMap {
-  @XmlElement public String  key;
-  @XmlElement public JaxbMapKeyVal[] value;
-
-  private JaxbMapKeyMap() {}
-
-  public JaxbMapKeyMap(String key, JaxbMapKeyVal[] value)
-  {
-    this.key   = key;
-    this.value = value;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyMapAdapter.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyMapAdapter.java
deleted file mode 100644
index 8419d73..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyMapAdapter.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.utils;
-
-import java.util.Map;
-import java.util.TreeMap;
-
-import javax.xml.bind.annotation.adapters.XmlAdapter;
-
-public class JaxbMapKeyMapAdapter extends
-    XmlAdapter<JaxbMapKeyMap[], Map<String, Map<String, String>>> {
-
-  private static JaxbMapKeyValAdapter mapAdapter = new JaxbMapKeyValAdapter();
-
-  @Override
-  public JaxbMapKeyMap[] marshal(Map<String, Map<String, String>> map)
-      throws Exception {
-    if (map == null) {
-      return null;
-    }
-    JaxbMapKeyMap[] list = new JaxbMapKeyMap[map.size()];
-    int index=0;
-    for (String key : map.keySet()) {
-      Map<String, String> value = map.get(key);
-      JaxbMapKeyVal[] keyValList = mapAdapter.marshal(value);
-      list[index++] = new JaxbMapKeyMap(key, keyValList);
-    }
-    return list;
-  }
-
-  @Override
-  public Map<String, Map<String, String>> unmarshal(JaxbMapKeyMap[] list)
-      throws Exception {
-    if (list == null) {
-      return null;
-    }
-    Map<String, Map<String, String>> map = new TreeMap<String, Map<String, String>>();
-    for (JaxbMapKeyMap jaxbkeyMap : list) {
-      map.put(jaxbkeyMap.key, mapAdapter.unmarshal(jaxbkeyMap.value));
-    }
-    return map;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyVal.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyVal.java
deleted file mode 100644
index b162174..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyVal.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.utils;
-
-import javax.xml.bind.annotation.XmlElement;
-
-public class JaxbMapKeyVal {
-  @XmlElement public String  key;
-  @XmlElement public String value;
-
-  public JaxbMapKeyVal() {}
-
-  public JaxbMapKeyVal(String key, String value)
-  {
-    this.key   = key;
-    this.value = value;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyValAdapter.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyValAdapter.java
deleted file mode 100644
index e3531cd..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/JaxbMapKeyValAdapter.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.utils;
-
-import java.util.Map;
-import java.util.TreeMap;
-
-import javax.xml.bind.annotation.adapters.XmlAdapter;
-
-public class JaxbMapKeyValAdapter extends
-    XmlAdapter<JaxbMapKeyVal[], Map<String, String>> {
-
-  @Override
-  public JaxbMapKeyVal[] marshal(Map<String, String> m) throws Exception {
-    if (m==null) {
-      return null;
-    }
-    JaxbMapKeyVal[] list = new JaxbMapKeyVal[m.size()] ;
-    int index = 0;
-    for (String key : m.keySet()) {
-      JaxbMapKeyVal jaxbMap = new JaxbMapKeyVal(key, m.get(key));
-      list[index++] = jaxbMap;
-    }
-    return list;
-  }
-
-  @Override
-  public Map<String, String> unmarshal(JaxbMapKeyVal[] jm) throws Exception {
-    if (jm == null) {
-      return null;
-    }
-    Map<String, String> m = new TreeMap<String, String>();
-    for (JaxbMapKeyVal jaxbMap : jm) {
-      m.put(jaxbMap.key, jaxbMap.value);
-    }
-    return m;
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/ShellCommandUtil.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/ShellCommandUtil.java
deleted file mode 100644
index 2c5e1b8..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/ShellCommandUtil.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.utils;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-/**
- * Logs OpenSsl command exit code with description
- */
-public class ShellCommandUtil {
-  private static final Log LOG = LogFactory.getLog(ShellCommandUtil.class);
-  /*
-  public static String LogAndReturnOpenSslExitCode(String command, int exitCode) {
-    logOpenSslExitCode(command, exitCode);
-    return getOpenSslCommandResult(command, exitCode);
-  }
-  */
-  public static void logOpenSslExitCode(String command, int exitCode) {
-    if (exitCode == 0) {
-      LOG.info(getOpenSslCommandResult(command, exitCode));
-    } else {
-      LOG.warn(getOpenSslCommandResult(command, exitCode));
-    }
-
-  }
-
-  public static String getOpenSslCommandResult(String command, int exitCode) {
-    return new StringBuilder().append("Command ").append(command).append(" was finished with exit code: ")
-            .append(exitCode).append(" - ").append(getOpenSslExitCodeDescription(exitCode)).toString();
-  }
-
-  private static String getOpenSslExitCodeDescription(int exitCode) {
-    switch (exitCode) {
-      case 0: {
-        return "the operation was completely successfully.";
-      }
-      case 1: {
-        return "an error occurred parsing the command options.";
-      }
-      case 2: {
-        return "one of the input files could not be read.";
-      }
-      case 3: {
-        return "an error occurred creating the PKCS#7 file or when reading the MIME message.";
-      }
-      case 4: {
-        return "an error occurred decrypting or verifying the message.";
-      }
-      case 5: {
-        return "the message was verified correctly but an error occurred writing out the signers certificates.";
-      }
-      default:
-        return "unsupported code";
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java b/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
deleted file mode 100644
index 7931f31..0000000
--- a/branch-1.2/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.utils;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import javax.xml.bind.JAXBException;
-
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.RoleCommand;
-import org.apache.ambari.server.actionmanager.Stage;
-import org.apache.ambari.server.agent.ExecutionCommand;
-import org.apache.ambari.server.controller.HostsMap;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.ServiceComponent;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEvent;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.codehaus.jackson.JsonGenerationException;
-import org.codehaus.jackson.JsonParseException;
-import org.codehaus.jackson.map.JsonMappingException;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.SerializationConfig;
-
-public class StageUtils {
-  private static Log LOG = LogFactory.getLog(StageUtils.class);
-  
-  private static Map<String, String> componentToClusterInfoKeyMap = 
-      new HashMap<String, String>();
-  
-  static {
-    componentToClusterInfoKeyMap.put("NAMENODE", "namenode_host");
-    componentToClusterInfoKeyMap.put("JOBTRACKER", "jtnode_host");
-    componentToClusterInfoKeyMap.put("SNAMENODE", "snamenode_host");
-    componentToClusterInfoKeyMap.put("ZOOKEEPER_SERVER", "zookeeper_hosts");
-    componentToClusterInfoKeyMap.put("HBASE_MASTER", "hbase_master_host");
-    componentToClusterInfoKeyMap.put("HBASE_REGIONSERVER", "hbase_rs_hosts");
-    componentToClusterInfoKeyMap.put("HIVE_SERVER", "hive_server_host");
-    componentToClusterInfoKeyMap.put("OOZIE_SERVER", "oozie_server");
-    componentToClusterInfoKeyMap.put("WEBHCAT_SERVER",
-        "webhcat_server_host");
-    componentToClusterInfoKeyMap.put(Role.MYSQL_SERVER.toString(),
-        "hive_mysql_host");
-    componentToClusterInfoKeyMap.put("DASHBOARD", "dashboard_host");
-    componentToClusterInfoKeyMap.put("NAGIOS_SERVER", "nagios_server_host");
-    componentToClusterInfoKeyMap.put("GANGLIA_SERVER",
-        "ganglia_server_host");
-    componentToClusterInfoKeyMap.put("DATANODE", "slave_hosts");
-    componentToClusterInfoKeyMap.put("TASKTRACKER", "slave_hosts");
-    componentToClusterInfoKeyMap.put("HBASE_REGIONSERVER", "hbase_rs_hosts");
-    componentToClusterInfoKeyMap.put("KERBEROS_SERVER", "kdc_host");
-    componentToClusterInfoKeyMap.put("KERBEROS_ADMIN_CLIENT",
-        "kerberos_adminclient_host");
-  }
-  
-  public static String getActionId(long requestId, long stageId) {
-    return requestId + "-" + stageId;
-  }
-
-  public static long[] getRequestStage(String actionId) {
-    String [] fields = actionId.split("-");
-    long[] requestStageIds = new long[2];
-    requestStageIds[0] = Long.parseLong(fields[0]);
-    requestStageIds[1] = Long.parseLong(fields[1]);
-    return requestStageIds;
-  }
-
-  public static Stage getATestStage(long requestId, long stageId) {
-    String hostname;
-    try {
-      hostname = InetAddress.getLocalHost().getHostName();
-    } catch (UnknownHostException e) {
-      hostname = "host-dummy";
-    }
-    return getATestStage(requestId, stageId, hostname);
-  }
-  
-  //For testing only
-  public static Stage getATestStage(long requestId, long stageId, String hostname) {
-    Stage s = new Stage(requestId, "/tmp", "cluster1");
-    s.setStageId(stageId);
-    long now = System.currentTimeMillis();
-    String filename = null;
-    s.addHostRoleExecutionCommand(hostname, Role.NAMENODE, RoleCommand.INSTALL,
-        new ServiceComponentHostInstallEvent("NAMENODE", hostname, now, "HDP-1.2.0"),
-        "cluster1", "HDFS");
-    ExecutionCommand execCmd = s.getExecutionCommandWrapper(hostname, "NAMENODE").getExecutionCommand();
-    execCmd.setCommandId(s.getActionId());
-    Map<String, List<String>> clusterHostInfo = new TreeMap<String, List<String>>();
-    List<String> slaveHostList = new ArrayList<String>();
-    slaveHostList.add(hostname);
-    slaveHostList.add("host2");
-    clusterHostInfo.put("slave_hosts", slaveHostList);
-    execCmd.setClusterHostInfo(clusterHostInfo);
-    Map<String, String> hdfsSite = new TreeMap<String, String>();
-    hdfsSite.put("dfs.block.size", "2560000000");
-    Map<String, Map<String, String>> configurations =
-        new TreeMap<String, Map<String, String>>();
-    configurations.put("hdfs-site", hdfsSite);
-    execCmd.setConfigurations(configurations);
-    Map<String, String> params = new TreeMap<String, String>();
-    params.put("jdklocation", "/x/y/z");
-    execCmd.setHostLevelParams(params);
-    Map<String, String> roleParams = new TreeMap<String, String>();
-    roleParams.put("format", "false");
-    execCmd.setRoleParams(roleParams);
-    return s;
-  }
-  
-  public static String jaxbToString(Object jaxbObj) throws JAXBException,
-  JsonGenerationException, JsonMappingException, IOException {
-    ObjectMapper mapper = new ObjectMapper();
-    mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true);
-    mapper.configure(SerializationConfig.Feature.USE_ANNOTATIONS, true);
-    return mapper.writeValueAsString(jaxbObj);
-  }
-  
-  public static ExecutionCommand stringToExecutionCommand(String json)
-      throws JsonParseException, JsonMappingException, IOException {
-    ObjectMapper mapper = new ObjectMapper();
-    mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true);
-    mapper.configure(SerializationConfig.Feature.USE_ANNOTATIONS, true);
-    InputStream is = new ByteArrayInputStream(json.getBytes(Charset.forName("UTF8")));
-    return mapper.readValue(is, ExecutionCommand.class);
-  }
-
-  public static <T> T fromJson(String json, Class<T> clazz) throws IOException {
-    ObjectMapper mapper = new ObjectMapper();
-    mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true);
-    mapper.configure(SerializationConfig.Feature.USE_ANNOTATIONS, true);
-    InputStream is = new ByteArrayInputStream(json.getBytes(Charset.forName("UTF8")));
-    return mapper.readValue(is, clazz);
-  }
-  
-  
-  public static Map<String, List<String>> getClusterHostInfo(Cluster cluster, HostsMap hostsMap) {
-    Map<String, List<String>> info = new HashMap<String, List<String>>();
-    if (cluster.getServices() != null) {
-      for (String serviceName : cluster.getServices().keySet()) {
-        if (cluster.getServices().get(serviceName) != null) {
-          for (String componentName : cluster.getServices().get(serviceName)
-              .getServiceComponents().keySet()) {
-            String clusterInfoKey = componentToClusterInfoKeyMap
-                .get(componentName);
-            if (clusterInfoKey == null) {
-              continue;
-            }
-            ServiceComponent scomp = cluster.getServices().get(serviceName)
-                .getServiceComponents().get(componentName);
-            if (scomp.getServiceComponentHosts() != null
-                && !scomp.getServiceComponentHosts().isEmpty()) {
-              List<String> hostList = new ArrayList<String>();
-              for (String host: scomp.getServiceComponentHosts().keySet()) {
-                String mappedHost = hostsMap.getHostMap(host);
-                hostList.add(mappedHost);
-              }
-              info.put(clusterInfoKey, hostList);
-            }
-            //Add ambari db server
-            info.put("ambari_db_server_host", Arrays.asList(hostsMap.getHostMap(getHostName())));
-          }
-        }
-      }
-    }
-    return info;
-  }
-
-  public static String getHostName() {
-    try {
-      return InetAddress.getLocalHost().getCanonicalHostName();
-    } catch (UnknownHostException e) {
-      LOG.warn("Could not find canonical hostname ", e);
-      return "localhost";
-    }
-  }
-  
-  public static String getHostsToDecommission(List<String> hosts) {
-    StringBuilder builder = new StringBuilder();
-    builder.append("[");
-    boolean first = true;
-    for (String host : hosts) {
-      if (!first) {
-        builder.append(",");
-      } else {
-        first = false;
-      }
-      builder.append("'");
-      builder.append(host);
-      builder.append("'");
-    }
-    return builder.toString();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/python/ambari-server-state/Configurator.py b/branch-1.2/ambari-server/src/main/python/ambari-server-state/Configurator.py
deleted file mode 100644
index 3dc0991..0000000
--- a/branch-1.2/ambari-server/src/main/python/ambari-server-state/Configurator.py
+++ /dev/null
@@ -1,184 +0,0 @@
-from Entities import *
-from os.path import exists
-import xml.dom.minidom
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-#* Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-class Configurator:
-
-  servicesPath = {"Core": "core-site.xml",
-                  "Configuration": "initProperties.xml"
-                  #                  "NameNode": "resources/hdfs-site.xml",
-                  #                  "JobTracker": "resources/mapred-site.xml",
-                  #                  "OOZIE": "resources/oozie-site.xml",
-                  #                  "HBASE": "resources/hbase-site.xml",
-
-                  #                 "ZooKeeper": "zk.properties",
-  }
-
-  CONFIG_INIT_TYPE = ("CURRENT_DIR", "USER_PATH_INPUT");
-  RESULT_FILE_PATH = "ambari-server-state.xml"
-  configurationType = 0;
-
-  configurations = Configurations()#root xml element for the resulting file
-
-  def __init__(self):
-    "Init class with all required information about resources"
-    self.configurationType = self.chooseConfInitType()
-    self.initPaths()##TODO uncomment
-    self.isFilesExist()
-    self.getServicesFromUserInputConfigFile()
-    self.createResultFile()
-
-
-  def getServicesFromUserInputConfigFile(self):
-    "Get all configeration data from resources(files)"
-    self.getConfiguration()
-    self.getStackInformation()
-    self.getHosts()
-
-  def getStackInformation(self):
-    "Init data for Stack block of the result configuration file"
-    DOMTree = xml.dom.minidom.parse(self.servicesPath["Configuration"])
-    rootElement = DOMTree.documentElement
-    stack = rootElement.getElementsByTagName("stack")[0]
-    self.configurations.stack.comment = stack.getElementsByTagName('comment')[0].childNodes[0].data
-    services = stack.getElementsByTagName("service")
-    servicesResultStructure = self.configurations.stack.services
-    for service in services:
-      serviceStructure = Service()
-      serviceStructure.name = service.getElementsByTagName('name')[0].childNodes[0].data
-      serviceStructure.version = service.getElementsByTagName('version')[0].childNodes[0].data
-      serviceStructure.comment = service.getElementsByTagName('comment')[0].childNodes[0].data
-      serviceStructure.user = service.getElementsByTagName('user')[0].childNodes[0].data
-      serviceStructure.enabled = service.getElementsByTagName('enabled')[0].childNodes[0].data
-      servicesResultStructure.addService(serviceStructure)
-
-    repositories = rootElement.getElementsByTagName("repository")
-    for repositiry in repositories:
-      self.configurations.stack.repository.comment = repositiry.getElementsByTagName('comment')[0].childNodes[0].data
-      infos = repositiry.getElementsByTagName("info")
-      for info in infos:
-        for key in info.childNodes:
-          if key.nodeType == 1:
-            repositoryStructure = self.configurations.stack.repository
-            repositoryStructure.info.addKey(key.nodeName, key.childNodes[0].data)
-
-  def getHosts(self):
-    "Init data for Hosts block of the result configuration file"
-  ###########################Get Hosts information##########################
-    DOMTree = xml.dom.minidom.parse(self.servicesPath["Configuration"])
-    rootElement = DOMTree.documentElement
-    hostsElement = rootElement.getElementsByTagName("hosts")[0]
-    hosts = hostsElement.getElementsByTagName("host")
-    for host in hosts:
-      if host.nodeType == 1:
-        hostsStructure = self.configurations.hosts
-        hostElement = Host()
-        hostElement.name = host.childNodes[0].data
-        hostsStructure.addHost(hostElement)
-
-
-  def getConfiguration(self):
-    "Init Configuration block of result configuration file"
-    DOMTree = xml.dom.minidom.parse(self.servicesPath["Configuration"])
-    rootElement = DOMTree.documentElement
-
-    hadoopEnvResultStructure = self.configurations.configuration.hadoopEnv
-    coreSiteResultStructure = self.configurations.configuration.coreSite
-
-    ###########################Get Configuration information##########################
-    env = rootElement.getElementsByTagName("hadoop-env")[0]
-    confDir = env.getElementsByTagName('conf-dir')[0].childNodes[0].data
-    hadoopEnvResultStructure.confDir = confDir
-    namenodeJvmOpts = env.getElementsByTagName('namenode-jvm-opts')[0].childNodes[0].data
-    hadoopEnvResultStructure.namenodeJvmOpts = namenodeJvmOpts
-    clientOpts = env.getElementsByTagName('client-opts')[0].childNodes[0].data
-    hadoopEnvResultStructure.clientOpts = clientOpts
-
-    coreSite = rootElement.getElementsByTagName("core-site")
-    fsDefaultName = coreSite[0].getElementsByTagName('fs-default-name')[0].childNodes[0].data
-    coreSiteResultStructure.fsDefaultName = fsDefaultName
-
-    hadoopSecurityAuthentication = coreSite[0].getElementsByTagName('hadoop-security-authentication')[0].childNodes[0].data
-    coreSiteResultStructure.hadoopSecurityAuthentication = hadoopSecurityAuthentication
-
-    #########################Get configuration.coreSite#########################
-    DOMTree = xml.dom.minidom.parse(self.servicesPath["Core"])
-    rootElement = DOMTree.documentElement
-    properties = rootElement.getElementsByTagName("property")
-    for property in properties:
-      name = property.getElementsByTagName('name')[0].childNodes[0].data
-      if name == "fs.default.name":
-        self.configurations.configuration.coreSite.fsDefaultName = property.getElementsByTagName('value')[0].childNodes[0].data
-      if name == "hadoop.security.authentication":
-        self.configurations.configuration.coreSite.hadoopSecurityAuthentication = property.getElementsByTagName('value')[0].childNodes[0].data
-
-
-  def chooseConfInitType(self):
-    "Type of how to get paths to configuration files"
-    "Configuration types are base on Configuration.CONFIG_INIT_TYPE tuple"
-    return int(raw_input("\tInput configuration type:\n" +
-                         "0)Current path contains all required configuration files.\n" +
-                         "1)Enter path for each conf file manually.\n" +
-                         "Choose:"
-    )
-    ).numerator
-
-
-  def initPaths(self):
-    "Input alternative file paths for resources"
-    if self.configurationType != 0:
-      for service in self.servicesPath.keys():
-        path = raw_input("Please enter path for " + service + "(if there is no such service type \"no\") :")
-        if len(path) > 0 and not path == "no":
-          self.servicesPath[service] = path
-        elif path == "no":
-          self.servicesPath.__delitem__(service)
-          print(self.servicesPath)
-        else:
-          raise ValueError(
-            "Path to the configuration file can't be empty.") #Catch it layter and start input mode automatically
-
-
-  def isFilesExist(self):
-    "Checking for resources file existing"
-    for service in self.servicesPath.keys():
-      path = self.servicesPath[service]
-      isExit = exists(path)
-      errorMessage = "File " + path + " doesn't exist! ("+ service+ " service)"
-      if not isExit:
-        raise  IOError(errorMessage)
-#      else:
-#        print("File " + path + " exist!")
-
-  def createResultFile(self):
-    resultFile = open(self.RESULT_FILE_PATH, "w")
-    resultFile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>")
-    resultFile.write( str(self.configurations) )
-    resultFile.flush()
-    resultFile.close()
-    print("\t\t Result configuration file( "+self.RESULT_FILE_PATH+") was generate successfully.")
-
-
-
-
-
-
-
-
diff --git a/branch-1.2/ambari-server/src/main/python/ambari-server-state/Entities.py b/branch-1.2/ambari-server/src/main/python/ambari-server-state/Entities.py
deleted file mode 100644
index 379c95f..0000000
--- a/branch-1.2/ambari-server/src/main/python/ambari-server-state/Entities.py
+++ /dev/null
@@ -1,209 +0,0 @@
-import inspect
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-#* Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-class ConvertToXml:
-  "Template class, allow to output fields in xml format"
-  def getField(self):
-    return [name for name, obj in inspect.getmembers(self)
-            if not name.startswith("__") and not inspect.isroutine(obj)]
-
-  def attributesToXml(self):
-    result = ""
-    listOfAttr = self.getField()
-    for attrName in listOfAttr:
-      result += "<" + attrName + ">"
-      result += str(getattr(self, attrName))
-      result += "</" + attrName + ">"
-    return result
-
-
-class Configurations(ConvertToXml):
-  "Root element for resulting file, incluge all exported data"
-  stack = "" #Stack entuty
-  configuration = "" #Configuration entity
-  hosts = ""  #Hosts entity
-
-  def __init__(self):
-    self.stack = Stack()
-    self.hosts = Hosts()
-    self.configuration = ConfigurationEntity()
-
-  def __str__(self):
-    result = "<configurations>"
-    result += str(self.stack)
-    result += str(self.hosts)
-    result += str(self.configuration)
-    result += "</configurations>"
-    return result
-
-
-###Stack structure {
-class Stack(ConvertToXml):
-  comment = ""
-  services = "" #Services object
-  repository = "" #Repository object
-
-  def __init__(self):
-    self.services = Services()
-    self.repository = Repository()
-
-  def __str__(self):
-    result = "<stack>"
-    result += "<comment>" + self.comment + "</comment>"
-    result += str(self.services)
-    result += str(self.repository)
-    result += "</stack>"
-    return result
-
-
-class Services(ConvertToXml):
-  service = [] #Service objects
-
-  def __str__(self):
-    result = "<services>"
-    for serv in self.service:
-      result += str(serv)
-    result += "</services>"
-    return result
-
-  def addService(self, service):
-    self.service.append(service)
-
-
-class Service(ConvertToXml):
-  name = ""
-  version = ""
-  comment = ""
-  user = ""
-  enabled = ""
-
-#  def __init__(self, name, ver, comment, user, enabled):
-#    super(self)
-#
-#  def __init__(self, name, ver, comment, user, enabled):
-#    self.name = name
-#    self.version = ver
-#    self.comment = comment
-#    self.user = user
-#    self.enabled = enabled
-
-  def __str__(self):
-    result = "<service>"
-    result += self.attributesToXml()
-    result += "</service>"
-    return result
-
-
-class Repository(ConvertToXml):
-  comment = ""
-  info = "" #Info object
-
-  def __init__(self):
-    self.info = Info()
-
-  def __str__(self):
-    result = "<repository>"
-    result += "<comment>" + self.comment + "</comment>"
-    result += str(self.info)
-    result += "</repository>"
-    return result
-
-
-class Info(ConvertToXml):
-  keys = {}
-
-  def __str__(self):
-    result = "<info>"
-    for key in self.keys.keys():
-      result += "<" + key + ">"
-      result += self.keys.get(key)
-      result += "</" + key + ">"
-    result += "</info>"
-    return result
-
-  def addKey(self, key, value):
-    self.keys[key] = value
-
-###Stack structure }
-
-
-###Configuration structure {
-class ConfigurationEntity(ConvertToXml):
-  hadoopEnv = "" #HadoopEnv object
-  coreSite = ""   #CoreSite
-
-  def __init__(self):
-    self.hadoopEnv = HadoopEnv()
-    self.coreSite = CoreSite()
-
-  def __str__(self):
-    result = "<configuration>"
-    result += str(self.hadoopEnv)
-    result += str(self.coreSite)
-    result += "</configuration>"
-    return result
-
-
-class Hosts:
-  hosts = []  #Host collection
-  comment = ""
-
-  def addHost(self, host):
-    self.hosts.append(host)
-
-  def __str__(self):
-    result = "<hosts>"
-    for host in self.hosts:
-      result += str(host)
-    result += "</hosts>"
-    return result
-
-class Host(ConvertToXml):
-  name = ""
-
-  def __str__(self):
-    result = "<host>"
-    result += self.name
-    result += "</host>"
-    return result
-
-
-class HadoopEnv(ConvertToXml):
-  confDir = ""
-  namenodeJvmOpts = ""
-  clientOpts = ""
-
-  def __str__(self):
-    result = "<hadoop-env>"
-    result += self.attributesToXml()
-    result += "</hadoop-env>"
-    return result
-
-
-class CoreSite(ConvertToXml):
-  fsDefaultName = ""
-  #hadoopTmpDir = ""
-  hadoopSecurityAuthentication = ""
-
-  def __str__(self):
-    result = "<core-site>"
-    result += self.attributesToXml()
-    result += "</core-site>"
-    return result
-
-###Configuration structure }
diff --git a/branch-1.2/ambari-server/src/main/python/ambari-server-state/Main.py b/branch-1.2/ambari-server/src/main/python/ambari-server-state/Main.py
deleted file mode 100644
index 8bc1146..0000000
--- a/branch-1.2/ambari-server/src/main/python/ambari-server-state/Main.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from Configurator import *
-
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-#* Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-Configurator()
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/main/python/ambari-server-state/core-site.xml b/branch-1.2/ambari-server/src/main/python/ambari-server-state/core-site.xml
deleted file mode 100644
index b6547b4..0000000
--- a/branch-1.2/ambari-server/src/main/python/ambari-server-state/core-site.xml
+++ /dev/null
@@ -1,264 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-    <!-- i/o properties -->
-
-    <property>
-        <name>io.file.buffer.size</name>
-        <value>131072</value>
-        <description>The size of buffer for use in sequence files.
-            The size of this buffer should probably be a multiple of hardware
-            page size (4096 on Intel x86), and it determines how much data is
-            buffered during read and write operations.
-        </description>
-    </property>
-
-    <property>
-        <name>io.serializations</name>
-        <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-    </property>
-
-    <property>
-        <name>io.compression.codecs</name>
-        <value>
-            org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec
-        </value>
-        <description>A list of the compression codec classes that can be used
-            for compression/decompression.
-        </description>
-    </property>
-
-    <property>
-        <name>io.compression.codec.lzo.class</name>
-        <value>com.hadoop.compression.lzo.LzoCodec</value>
-        <description>The implementation for lzo codec.</description>
-    </property>
-
-    <!-- file system properties -->
-
-    <property>
-        <name>fs.default.name</name>
-        <!-- cluster variant -->
-        <value>hdfs://hdp1:8020</value>
-        <description>The name of the default file system. Either the
-            literal string "local" or a host:port for NDFS.
-        </description>
-        <final>true</final>
-    </property>
-
-    <property>
-        <name>fs.trash.interval</name>
-        <value>360</value>
-        <description>Number of minutes between trash checkpoints.
-            If zero, the trash feature is disabled.
-        </description>
-    </property>
-
-    <property>
-        <name>fs.checkpoint.dir</name>
-        <value>/mnt/hmc/hadoop/hdfs/namesecondary</value>
-        <description>Determines where on the local filesystem the DFS secondary
-            name node should store the temporary images to merge.
-            If this is a comma-delimited list of directories then the image is
-            replicated in all of the directories for redundancy.
-        </description>
-    </property>
-
-    <property>
-        <name>fs.checkpoint.edits.dir</name>
-        <value>${fs.checkpoint.dir}</value>
-        <description>Determines where on the local filesystem the DFS secondary
-            name node should store the temporary edits to merge.
-            If this is a comma-delimited list of directoires then teh edits is
-            replicated in all of the directoires for redundancy.
-            Default value is same as fs.checkpoint.dir
-        </description>
-    </property>
-
-    <property>
-        <name>fs.checkpoint.period</name>
-        <value>21600</value>
-        <description>The number of seconds between two periodic checkpoints.
-        </description>
-    </property>
-
-    <property>
-        <name>fs.checkpoint.size</name>
-        <value>536870912</value>
-        <description>The size of the current edit log (in bytes) that triggers
-            a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-        </description>
-    </property>
-
-    <!-- ipc properties: copied from kryptonite configuration -->
-    <property>
-        <name>ipc.client.idlethreshold</name>
-        <value>8000</value>
-        <description>Defines the threshold number of connections after which
-            connections will be inspected for idleness.
-        </description>
-    </property>
-
-    <property>
-        <name>ipc.client.connection.maxidletime</name>
-        <value>30000</value>
-        <description>The maximum time after which a client will bring down the
-            connection to the server.
-        </description>
-    </property>
-
-    <property>
-        <name>ipc.client.connect.max.retries</name>
-        <value>50</value>
-        <description>Defines the maximum number of retries for IPC connections.</description>
-    </property>
-
-    <!-- Web Interface Configuration -->
-    <property>
-        <name>webinterface.private.actions</name>
-        <value>false</value>
-        <description>If set to true, the web interfaces of JT and NN may contain
-            actions, such as kill job, delete file, etc., that should
-            not be exposed to public. Enable this option if the interfaces
-            are only reachable by those who have the right authorization.
-        </description>
-    </property>
-
-    <property>
-        <name>hadoop.security.authentication</name>
-        <value>simple</value>
-        <description>
-            Set the authentication for the cluster. Valid values are: simple or
-            kerberos.
-        </description>
-    </property>
-    <property>
-        <name>hadoop.security.authorization</name>
-        <value>false</value>
-        <description>
-            Enable authorization for different protocols.
-        </description>
-    </property>
-
-    <property>
-        <name>hadoop.security.auth_to_local</name>
-        <value>
-            RULE:[2:$1@$0]([jt]t@.*)s/.*/mapred/
-            RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
-            RULE:[2:$1@$0](hm@.*)s/.*/hbase/
-            RULE:[2:$1@$0](rs@.*)s/.*/hbase/
-            DEFAULT
-        </value>
-        <description>The mapping from kerberos principal names to local OS user names.
-            So the default rule is just "DEFAULT" which takes all principals in your default domain to their first
-            component.
-            "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-            The translations rules have 3 sections:
-            base filter substitution
-            The base consists of a number that represents the number of components in the principal name excluding the
-            realm and the pattern for building the name from the sections of the principal name. The base uses $0 to
-            mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-            [1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-            [2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-            [2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-            The filter is a regex in parens that must the generated string for the rule to apply.
-
-            "(.*%admin)" will take any string that ends in "%admin"
-            "(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-            Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-            "s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-            "s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-            "s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-            So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had
-            a single component "joe@ACME.COM", you'd do:
-
-            RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-            DEFAULT
-
-            To also translate the names with a second component, you'd make the rules:
-
-            RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-            RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-            DEFAULT
-
-            If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-            RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-            DEFAULT
-        </description>
-    </property>
-
-    <property>
-        <name>hadoop.proxyuser.hcat.groups</name>
-        <value>users</value>
-        <description>
-            Proxy group for Hadoop.
-        </description>
-    </property>
-
-    <property>
-        <name>hadoop.proxyuser.hcat.hosts</name>
-        <value></value>
-        <description>
-            Proxy host for Hadoop.
-        </description>
-    </property>
-
-    <property>
-        <name>hadoop.proxyuser.oozie.groups</name>
-        <value>users</value>
-        <description>
-            Proxy group for Hadoop.
-        </description>
-    </property>
-
-    <property>
-        <name>hadoop.proxyuser.oozie.hosts</name>
-        <value>hdp1</value>
-        <description>
-            Proxy host for Hadoop.
-        </description>
-    </property>
-
-    <property>
-        <name>hadoop.proxyuser.hcat.groups</name>
-        <value>users</value>
-        <description>
-            Proxy group for templeton.
-        </description>
-    </property>
-
-    <property>
-        <name>hadoop.proxyuser.hcat.hosts</name>
-        <value>hdp1</value>
-        <description>
-            Proxy host for templeton.
-        </description>
-    </property>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/python/ambari-server-state/initProperties.xml b/branch-1.2/ambari-server/src/main/python/ambari-server-state/initProperties.xml
deleted file mode 100644
index 217a84a..0000000
--- a/branch-1.2/ambari-server/src/main/python/ambari-server-state/initProperties.xml
+++ /dev/null
@@ -1,76 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configurations>
-    <configuration>
-        <stack>
-            <comment>stack comment</comment>
-            <repository>
-                <comment>Could be winpkg/tarball/others</comment>
-                <info>
-                    <key1>value for key1</key1>
-                    <key2>value for key2</key2>
-                    <key3>value for key3</key3>
-                </info>
-            </repository>
-
-            <services>
-                <service>
-                    <name>service1</name>
-                    <version>1.0.1</version>
-                    <enabled>true</enabled>
-                    <user>root</user>
-                    <comment>some hadoop service</comment>
-                </service>
-                <service>
-                    <name>service2</name>
-                    <version>1.0.2</version>
-                    <enabled>false</enabled>
-                    <user>toor</user>
-                    <comment>some hadoop service2</comment>
-                </service>
-            </services>
-        </stack>
-
-        <hadoop-env>
-            <conf-dir>/etc/hadoop</conf-dir>
-            <namenode-jvm-opts>-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT</namenode-jvm-opts>
-            <client-opts>-Xmx128m</client-opts>
-        </hadoop-env>
-        <core-site>
-            <fs-default-name>hdfs://${namenode}:8020/</fs-default-name>
-            <!-- hadoop-tmp-dir>/grid/0/hadoop/tmp</hadoop-tmp-dir -->
-            <hadoop-security-authentication>kerberos</hadoop-security-authentication>
-        </core-site>
-
-        <hosts>
-            <comment>This can also be thought of as representation of hosts eg. small/medium/large</comment>
-            <host>host_1</host>
-            <host>host_2</host>
-        </hosts>
-
-        <!-- TODO Roles block -->
-        <!--<roles>-->
-            <!--<comment></comment>-->
-
-        <!--</roles>-->
-
-    </configuration>
-
-
-</configurations>
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/main/python/ambari-server.py b/branch-1.2/ambari-server/src/main/python/ambari-server.py
deleted file mode 100755
index 8a435a3..0000000
--- a/branch-1.2/ambari-server/src/main/python/ambari-server.py
+++ /dev/null
@@ -1,1201 +0,0 @@
-#!/usr/bin/env python2.6
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import shlex
-import sys
-import os
-import signal
-import subprocess
-import re
-import string
-import glob
-import platform
-import shutil
-import stat
-import fileinput
-import urllib2
-import time
-import getpass
-# debug settings
-VERBOSE = False
-SILENT = False
-SERVER_START_DEBUG = False
-
-# action commands
-SETUP_ACTION = "setup"
-START_ACTION = "start"
-STOP_ACTION = "stop"
-RESET_ACTION = "reset"
-UPGRADE_ACTION = "upgrade"
-UPGRADE_STACK_ACTION = "upgradestack"
-
-# selinux commands
-GET_SE_LINUX_ST_CMD = "/usr/sbin/sestatus"
-SE_SETENFORCE_CMD = "setenforce 0"
-SE_STATUS_DISABLED = "disabled"
-SE_STATUS_ENABLED = "enabled"
-SE_MODE_ENFORCING = "enforcing"
-SE_MODE_PERMISSIVE = "permissive"
-
-# iptables commands
-IP_TBLS_ST_CMD = "/sbin/service iptables status"
-IP_TBLS_STOP_CMD = "/sbin/service iptables stop"
-IP_TBLS_ENABLED = "Firewall is running"
-IP_TBLS_DISABLED = "Firewall is stopped.\n"
-IP_TBLS_SRVC_NT_FND = "iptables: unrecognized service"
-
-# server commands
-ambari_provider_module_option = ""
-ambari_provider_module = os.environ.get('AMBARI_PROVIDER_MODULE')
-
-# constants
-STACK_NAME_VER_SEP = "-"
-
-if ambari_provider_module is not None:
-  ambari_provider_module_option = "-Dprovider.module.class=" +\
-                                  ambari_provider_module + " "
-
-SERVER_START_CMD="{0}" + os.sep + "bin" + os.sep +\
-                 "java -server -XX:NewRatio=3 "\
-                 "-XX:+UseConcMarkSweepGC " +\
-                 "-XX:-UseGCOverheadLimit -XX:CMSInitiatingOccupancyFraction=60 " +\
-                 ambari_provider_module_option +\
-                 os.getenv('AMBARI_JVM_ARGS','-Xms512m -Xmx2048m') +\
-                 " -cp {1}"+ os.pathsep + "{2}" +\
-                 "/* org.apache.ambari.server.controller.AmbariServer "\
-                 ">/var/log/ambari-server/ambari-server.out 2>&1"
-SERVER_START_CMD_DEBUG="{0}" + os.sep + "bin" + os.sep +\
-                       "java -server -XX:NewRatio=2 -XX:+UseConcMarkSweepGC " +\
-                       ambari_provider_module_option +\
-                       os.getenv('AMBARI_JVM_ARGS','-Xms512m -Xmx2048m') +\
-                       " -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,"\
-                       "server=y,suspend=n -cp {1}"+ os.pathsep + ".." +\
-                       os.sep + "lib" + os.sep + "ambari-server" +\
-                       os.sep +\
-                       "* org.apache.ambari.server.controller.AmbariServer"
-
-AMBARI_CONF_VAR="AMBARI_CONF_DIR"
-AMBARI_SERVER_LIB="AMBARI_SERVER_LIB"
-JAVA_HOME="JAVA_HOME"
-PID_DIR="/var/run/ambari-server"
-PID_NAME="ambari-server.pid"
-AMBARI_PROPERTIES_FILE="ambari.properties"
-
-SETUP_DB_CMD = ['su', 'postgres',
-        '--command=psql -f {0} -v username=\'"{1}"\' -v password="\'{2}\'"']
-UPGRADE_STACK_CMD = ['su', 'postgres',
-        '--command=psql -f {0} -v stack_name="\'{1}\'"  -v stack_version="\'{2}\'"']
-PG_ST_CMD = "/sbin/service postgresql status"
-PG_START_CMD = "/sbin/service postgresql start"
-PG_RESTART_CMD = "/sbin/service postgresql restart"
-PG_STATUS_RUNNING = "running"
-PG_HBA_DIR = "/var/lib/pgsql/data/"
-PG_HBA_CONF_FILE = PG_HBA_DIR + "pg_hba.conf"
-PG_HBA_CONF_FILE_BACKUP = PG_HBA_DIR + "pg_hba_bak.conf.old"
-POSTGRESQL_CONF_FILE = PG_HBA_DIR + "postgresql.conf"
-PG_HBA_RELOAD_CMD = "su postgres --command='pg_ctl -D {0} reload'"
-PG_DEFAULT_PASSWORD = "bigdata"
-JDBC_USER_NAME_PROPERTY = "server.jdbc.user.name"
-JDBC_PASSWORD_FILE_PROPERTY = "server.jdbc.user.passwd"
-JDBC_PASSWORD_FILENAME = "password.dat"
-
-# jdk commands
-JDK_LOCAL_FILENAME = "jdk-6u31-linux-x64.bin"
-JDK_MIN_FILESIZE = 5000
-JDK_INSTALL_DIR = "/usr/jdk64"
-CREATE_JDK_DIR_CMD = "/bin/mkdir -p " + JDK_INSTALL_DIR
-MAKE_FILE_EXECUTABLE_CMD = "chmod a+x {0}"
-JAVA_HOME_PROPERTY = "java.home"
-OS_TYPE_PROPERTY = "server.os_type"
-
-JDK_DOWNLOAD_CMD = "curl --create-dirs -o {0} {1}"
-JDK_DOWNLOAD_SIZE_CMD = "curl -I {0}"
-
-
-
-def configure_pg_hba_ambaridb_users():
-  args = optparse.Values()
-  configure_postgres_username_password(args)
-
-  with open(PG_HBA_CONF_FILE, "a") as pgHbaConf:
-    pgHbaConf.write("\n")
-    pgHbaConf.write("local  all  " + args.postgres_username +
-                    ",mapred md5")
-    pgHbaConf.write("\n")
-    pgHbaConf.write("host  all   " + args.postgres_username +
-                    ",mapred 0.0.0.0/0  md5")
-    pgHbaConf.write("\n")
-    pgHbaConf.write("host  all   " + args.postgres_username +
-                    ",mapred ::/0 md5")
-    pgHbaConf.write("\n")
-  command = PG_HBA_RELOAD_CMD.format(PG_HBA_DIR)
-  retcode, out, err = run_os_command(command)
-  if not retcode == 0:
-    print err
-    sys.exit(retcode)
-
-
-
-def configure_pg_hba_postgres_user():
-  postgresString = "all   postgres"
-  for line in fileinput.input(PG_HBA_CONF_FILE, inplace=1):
-    print re.sub('all\s*all', postgresString, line),
-  os.chmod(PG_HBA_CONF_FILE, 0644)
-
-
-
-def configure_postgresql_conf():
-  listenAddress = "listen_addresses = '*'        #"
-  for line in fileinput.input(POSTGRESQL_CONF_FILE, inplace=1):
-    print re.sub('#+listen_addresses.*?(#|$)', listenAddress, line),
-  os.chmod(POSTGRESQL_CONF_FILE, 0644)
-
-
-
-def configure_postgres():
-  if os.path.isfile(PG_HBA_CONF_FILE):
-    if not os.path.isfile(PG_HBA_CONF_FILE_BACKUP):
-      shutil.copyfile(PG_HBA_CONF_FILE, PG_HBA_CONF_FILE_BACKUP)
-    else:
-      #Postgres has been configured before, must not override backup
-      print "Backup for pg_hba found, reconfiguration not required"
-      return 0
-  configure_pg_hba_postgres_user()
-  configure_pg_hba_ambaridb_users()
-  os.chmod(PG_HBA_CONF_FILE, 0644)
-  configure_postgresql_conf()
-  #restart postgresql if already running
-  pg_status = get_postgre_status()
-  if pg_status == PG_STATUS_RUNNING:
-    retcode = restart_postgres()
-    return retcode
-  return 0
-
-
-
-def restart_postgres():
-  print "Restarting PostgreSQL"
-  process = subprocess.Popen(PG_RESTART_CMD.split(' '),
-    stdout=subprocess.PIPE,
-    stdin=subprocess.PIPE,
-    stderr=subprocess.PIPE
-  )
-  time.sleep(5)
-  result = process.poll()
-  if result is None:
-    print_info_msg("Killing restart PostgresSQL process")
-    process.kill()
-    pg_status = get_postgre_status()
-    # SUSE linux set status of stopped postgresql proc to unused
-    if pg_status == "unused" or pg_status == "stopped":
-      print_info_msg("PostgreSQL is stopped. Restarting ...")
-      retcode, out, err = run_os_command(PG_START_CMD)
-      return retcode
-  return 0
-
-
-
-def run_os_command(cmd):
-  print_info_msg('about to run command: ' + str(cmd))
-  if type(cmd) == str:
-    cmd = shlex.split(cmd)
-  process = subprocess.Popen(cmd,
-    stdout=subprocess.PIPE,
-    stdin=subprocess.PIPE,
-    stderr=subprocess.PIPE
-  )
-  (stdoutdata, stderrdata) = process.communicate()
-  return process.returncode, stdoutdata, stderrdata
-
-
-
-# todo: check if the scheme is already exist
-
-def write_property(key, value):
-  conf_file = search_file(AMBARI_PROPERTIES_FILE, get_conf_dir())
-  with open(conf_file, 'a') as ambariConf:
-    ambariConf.write(key + "=" + value)
-    ambariConf.write("\n")
-  return 0
-
-
-def setup_db(args):
-  #password access to ambari-server and mapred
-  configure_postgres_username_password(args)
-  dbname = args.postgredbname
-  file = args.init_script_file
-  username = args.postgres_username
-  password = args.postgres_password
-  command = SETUP_DB_CMD[:]
-  command[-1] = command[-1].format(file, username, password)
-  retcode, outdata, errdata = run_os_command(command)
-  if not retcode == 0:
-    print errdata
-  return retcode
-
-
-def execute_db_script(args, file):
-  #password access to ambari-server and mapred
-  configure_postgres_username_password(args)
-  dbname = args.postgredbname
-  username = args.postgres_username
-  password = args.postgres_password
-  command = SETUP_DB_CMD[:]
-  command[-1] = command[-1].format(file, username, password)
-  retcode, outdata, errdata = run_os_command(command)
-  if not retcode == 0:
-    print errdata
-  return retcode
-
-
-def check_db_consistency(args, file):
-  #password access to ambari-server and mapred
-  configure_postgres_username_password(args)
-  dbname = args.postgredbname
-  username = args.postgres_username
-  password = args.postgres_password
-  command = SETUP_DB_CMD[:]
-  command[-1] = command[-1].format(file, username, password)
-  retcode, outdata, errdata = run_os_command(command)
-  if not retcode == 0:
-    print errdata
-    return retcode
-  else:
-    # Assumes that the output is of the form ...\n<count>
-    print_info_msg("Parsing output: " + outdata)
-    lines = outdata.splitlines()
-    if (lines[-1] == '3'):
-      return 0
-  return -1
-
-
-def upgrade_stack(args, stack_id):
-  #password access to ambari-server and mapred
-  configure_postgres_username_password(args)
-  dbname = args.postgredbname
-  file = args.upgrade_stack_script_file
-  stack_name, stack_version = stack_id.split(STACK_NAME_VER_SEP)
-  command = UPGRADE_STACK_CMD[:]
-  command[-1] = command[-1].format(file, stack_name, stack_version)
-  retcode, outdata, errdata = run_os_command(command)
-  if not retcode == 0:
-    print errdata
-  return retcode
-
-#
-# Checks SELinux
-#
-def check_selinux():
-  try:
-    retcode, out, err = run_os_command(GET_SE_LINUX_ST_CMD)
-    se_status = re.search('(disabled|enabled)', out).group(0)
-    print "SELinux status is '" + se_status + "'"
-    if se_status == SE_STATUS_DISABLED:
-      return 0
-    else:
-      se_mode = ''
-      try:
-        se_mode = re.search('(enforcing|permissive)', out).group(0)
-      except AttributeError:
-        print_error_msg("Error determining SELinux mode. Exiting.")
-        sys.exit(0)
-      print "SELinux mode is '" + se_mode + "'"
-      if se_mode == SE_MODE_ENFORCING:
-        print "Temporarily disabling SELinux"
-        run_os_command(SE_SETENFORCE_CMD)
-      print_warning_msg(
-        "SELinux is set to 'permissive' mode and temporarily disabled."
-        " You should disable SELinux permanently.")
-      ok = get_YN_input("OK to continue [y/n] (y)? ", True)
-      if ok == False:
-        sys.exit(0)
-      return 0
-  except OSError:
-    print_warning_msg("Could not run {0}: OK".format(GET_SE_LINUX_ST_CMD))
-  return 0
-
-
-
-def get_ambari_jars():
-  try:
-    conf_dir = os.environ[AMBARI_SERVER_LIB]
-    return conf_dir
-  except KeyError:
-    default_jar_location = "/usr/lib/ambari-server"
-    print_info_msg(AMBARI_SERVER_LIB + " is not set, using default "
-                 + default_jar_location)
-    return default_jar_location
-
-
-
-def get_conf_dir():
-  try:
-    conf_dir = os.environ[AMBARI_CONF_VAR]
-    return conf_dir
-  except KeyError:
-    default_conf_dir = "/etc/ambari-server/conf"
-    print_info_msg(AMBARI_CONF_VAR + " is not set, using default "
-                 + default_conf_dir)
-    return default_conf_dir
-
-
-
-def search_file(filename, search_path, pathsep=os.pathsep):
-  """ Given a search path, find file with requested name """
-  for path in string.split(search_path, pathsep):
-    candidate = os.path.join(path, filename)
-    if os.path.exists(candidate): return os.path.abspath(candidate)
-  return None
-
-
-
-#
-# Checks iptables
-#
-def check_iptables():
-  # not used
-  # retcode, out, err = run_os_command(IP_TBLS_ST_CMD)
-  ''' This check doesn't work on CentOS 6.2 if firewall AND
-  iptables service are running if out == IP_TBLS_ENABLED:
-    print 'iptables is enabled now'
-    print 'Stopping iptables service'
-  '''
-  retcode, out, err = run_os_command(IP_TBLS_STOP_CMD)
-  print 'iptables is disabled now'
-
-  if not retcode == 0 and err and len(err) > 0:
-    print err
-
-  if err.strip() == IP_TBLS_SRVC_NT_FND:
-    return 0
-  else:
-    return retcode, out
-
-
-
-def dlprogress(base_name, count, blockSize, totalSize):
-  percent = int(count * blockSize * 100 / totalSize)
-
-  if (totalSize < blockSize):
-    sys.stdout.write("\r" + base_name + "... %d%%" % (100))
-  else:
-    sys.stdout.write("\r" + base_name + "... %d%% (%.1f MB of %.1f MB)" % (
-      percent, count * blockSize / 1024 / 1024.0, totalSize / 1024 / 1024.0))
-
-  if (percent == 100 or totalSize < blockSize):
-    sys.stdout.write("\n")
-  sys.stdout.flush()
-
-
-
-def track_jdk(base_name, url, local_name):
-  u = urllib2.urlopen(url)
-  h = u.info()
-  totalSize = int(h["Content-Length"])
-  fp = open(local_name, "wb")
-  blockSize = 8192
-  count = 0
-  percent = 0
-  while True:
-    chunk = u.read(blockSize)
-    if not chunk:
-      break
-    fp.write(chunk)
-    count += 1
-
-    dlprogress(base_name, count, blockSize, totalSize)
-
-  fp.flush()
-  fp.close()
-
-
-
-#
-# Downloads the JDK
-#
-def download_jdk(args):
-  conf_file = search_file(AMBARI_PROPERTIES_FILE, get_conf_dir())
-  if conf_file is None:
-    print 'File %s not found in search path $%s: %s' %\
-          (AMBARI_PROPERTIES_FILE, AMBARI_CONF_VAR, get_conf_dir())
-    return -1
-  print_info_msg('Loading properties from ' + conf_file)
-
-  if get_JAVA_HOME():
-    return 0
-
-  if args.java_home and os.path.exists(args.java_home):
-    print_warning_msg("JAVA_HOME " + args.java_home
-                    + " must be valid on ALL hosts")
-    write_property(JAVA_HOME_PROPERTY, args.java_home)
-    return 0
-
-  properties = None
-  try:
-    properties = Properties()
-    properties.load(open(conf_file))
-  except (Exception), e:
-    print 'Could not read "%s": %s' % (conf_file, e)
-    return -1
-
-  try:
-    jdk_url = properties['jdk.url']
-    resources_dir = properties['resources.dir']  
-  except (KeyError), e:
-    print 'Property ' + str(e) + ' is not defined at ' + conf_file
-    return -1
-  dest_file = resources_dir + os.sep + JDK_LOCAL_FILENAME
-  if not os.path.exists(dest_file):
-    print 'Downloading JDK from ' + jdk_url + ' to ' + dest_file
-    try:
-      size_command = JDK_DOWNLOAD_SIZE_CMD.format(jdk_url);
-      #Get Header from url,to get file size then
-      retcode, out, err = run_os_command(size_command)
-      if out.find("Content-Length") == -1:
-        print "Request header doesn't contain Content-Length";
-        return -1
-      start_with = int(out.find("Content-Length") + len("Content-Length") + 2)
-      end_with = out.find("\r\n", start_with)
-      src_size = int(out[start_with:end_with])
-      print 'JDK distribution size is ' + str(src_size) + ' bytes'
-      file_exists = os.path.isfile(dest_file)
-      file_size = -1
-      if file_exists:
-        file_size = os.stat(dest_file).st_size
-      if file_exists and file_size == src_size:
-        print_info_msg("File already exists")
-      else:
-        track_jdk(JDK_LOCAL_FILENAME, jdk_url, dest_file)
-        print 'Successfully downloaded JDK distribution to ' + dest_file
-    except Exception, e:
-      print_error_msg('Failed to download JDK: ' + str(e))
-      return -1
-    downloaded_size = os.stat(dest_file).st_size
-    if downloaded_size != src_size or downloaded_size < JDK_MIN_FILESIZE:
-      print_error_msg('Size of downloaded JDK distribution file is '
-                    + str(downloaded_size) + ' bytes, it is probably \
-                    damaged or incomplete')
-      return -1
-  else:
-    print "JDK already exists using " + dest_file
-  
-  try:
-     out = install_jdk(dest_file)
-     jdk_version = re.search('Creating (jdk.*)/jre', out).group(1)
-  except Exception, e:
-     print "Installation of JDK was failed: %s\n" % e.message
-     file_exists = os.path.isfile(dest_file)
-     if file_exists:
-        ok = get_YN_input("JDK found at "+dest_file+". "
-                    "Would you like to re-download the JDK [y/n] (y)? ", True)
-        if (ok == False):
-           print "Unable to install JDK. Please remove JDK file found at "+ dest_file +" and re-run Ambari Server setup" 
-           return -1
-        else:
-           track_jdk(JDK_LOCAL_FILENAME, jdk_url, dest_file)
-           print 'Successfully re-downloaded JDK distribution to ' + dest_file 
-           try:
-               out = install_jdk(dest_file)
-               jdk_version = re.search('Creating (jdk.*)/jre', out).group(1)
-           except Exception, e:
-               print "Installation of JDK was failed: %s\n" % e.message
-               print "Unable to install JDK. Please remove JDK, file found at "+ dest_file +" and re-run Ambari Server setup" 
-               return -1              
-  
-     else:
-         print "Unable to install JDK. File "+ dest_file +"does not exist, please re-run Ambari Server setup"
-         return -1
-  
-  print "Successfully installed JDK to {0}/{1}".\
-      format(JDK_INSTALL_DIR, jdk_version)
-  write_property(JAVA_HOME_PROPERTY, "{0}/{1}".
-      format(JDK_INSTALL_DIR, jdk_version))
-  return 0
-
-class RetCodeException(Exception): pass
-
-def install_jdk(dest_file):
-  ok = get_YN_input("To install the Oracle JDK you must accept the "
-                    "license terms found at "
-                    "http://www.oracle.com/technetwork/java/javase/"
-                  "downloads/jdk-6u21-license-159167.txt. Not accepting will "
-                  "cancel the Ambari Server setup.\nDo you accept the "
-                  "Oracle Binary Code License Agreement [y/n] (y)? ", True)
-  if (ok == False):
-    return -1
-
-  print "Installing JDK to {0}".format(JDK_INSTALL_DIR)
-  retcode, out, err = run_os_command(CREATE_JDK_DIR_CMD)
-  savedPath = os.getcwd()
-  os.chdir(JDK_INSTALL_DIR)
-  retcode, out, err = run_os_command(MAKE_FILE_EXECUTABLE_CMD.format(dest_file))
-  retcode, out, err = run_os_command(dest_file + ' -noregister')
-  os.chdir(savedPath)
-  if (retcode != 0):
-       raise RetCodeException("Installation JDK returned code %s" % retcode) 
-  return out  
-
-def get_postgre_status():
-  retcode, out, err = run_os_command(PG_ST_CMD)
-  try:
-    pg_status = re.search('(stopped|running)', out).group(0)
-  except AttributeError:
-    pg_status = None
-  return pg_status 
-
-
-
-def check_postgre_up():
-  pg_status = get_postgre_status()
-  if pg_status == PG_STATUS_RUNNING:
-    print_info_msg ("PostgreSQL is running")
-    return 0
-  else:
-    print "About to start PostgreSQL"
-    retcode, out, err = run_os_command(PG_START_CMD)
-    return retcode
-
-
-
-#
-# Configures the OS settings in ambari properties.
-#
-def configure_os_settings():
-  conf_file = search_file(AMBARI_PROPERTIES_FILE, get_conf_dir())
-  if conf_file is None:
-    print_error_msg ('File %s not found in search path $%s: %s'
-                   % (AMBARI_PROPERTIES_FILE, AMBARI_CONF_VAR, get_conf_dir()))
-    return -1
-  print_info_msg ('Loading properties from ' + conf_file)
-  properties = None
-  try:
-    properties = Properties()
-    properties.load(open(conf_file))
-  except (Exception), e:
-    print_error_msg ('Could not read "%s": %s' % (conf_file, e))
-    return -1
-  try:
-    conf_os_type = properties[OS_TYPE_PROPERTY]
-    if conf_os_type != '':
-      print_info_msg ("os_type already setting in properties file")
-      return 0
-  except (KeyError), e:
-    print_error_msg ("os_type is not set in properties file")
-
-  os_system = platform.system()
-  if os_system != 'Linux':
-    print_error_msg ("Non-Linux systems are not supported")
-    return -1
-
-  os_info = platform.linux_distribution(
-    None, None, None, ['SuSE', 'redhat' ], 0
-  )
-  os_name = os_info[0].lower()
-  if os_name == 'suse':
-    os_name = 'sles'
-  os_version = os_info[1].split('.', 1)[0]
-  master_os_type = os_name + os_version
-  with open(conf_file, "a") as ambariConf:
-    ambariConf.write(OS_TYPE_PROPERTY + "=" + master_os_type)
-    ambariConf.write("\n")
-    ambariConf.closed
-  return 0
-
-
-
-def get_JAVA_HOME():
-  conf_file = search_file(AMBARI_PROPERTIES_FILE, get_conf_dir())
-  properties = Properties()
-  
-  try:
-    properties.load(open(conf_file))
-    java_home = properties[JAVA_HOME_PROPERTY]
-    if (not 0 == len(java_home)) and (os.path.exists(java_home)):
-      return java_home
-  except (Exception), e:
-    print 'Could not read "%s": %s' % (conf_file, e)
-  
-  return None
-
-
-
-#
-# Finds the available JDKs.
-#
-def find_jdk():
-  if get_JAVA_HOME():
-    return get_JAVA_HOME()
-  print "Looking for available JDKs at " + JDK_INSTALL_DIR
-  jdks = glob.glob(JDK_INSTALL_DIR + os.sep + "jdk*")
-  jdks.sort()
-  print "Found: " + str(jdks)
-  count = len(jdks)
-  if count == 0:
-    return
-  jdkPath = jdks[count - 1]
-  print "Selected JDK {0}".format(jdkPath)
-  return jdkPath
-
-
-#
-# Setup the Ambari Server.
-#
-def setup(args):
-
-  print 'Checking SELinux...'
-  retcode = check_selinux()
-  if not retcode == 0:
-    print_error_msg ('Failed to disable SELinux. Exiting.')
-    sys.exit(retcode)
-   
-  print 'Checking iptables...'
-  retcode, out = check_iptables()
-  if not retcode == 0 and out == IP_TBLS_ENABLED:
-    print_error_msg ('Failed to stop iptables. Exiting.')
-    sys.exit(retcode)
-
-  print 'Checking PostgreSQL...'
-  retcode = check_postgre_up()
-  if not retcode == 0:
-    print_error_msg ('Unable to start PostgreSQL server. Exiting')
-    sys.exit(retcode)
-
-  print 'Configuring database...'
-  retcode = setup_db(args)
-  if not retcode == 0:
-    print_error_msg  ('Running database init script was failed. Exiting.')
-    sys.exit(retcode)
-    
-  print 'Configuring PostgreSQL...'
-  retcode = configure_postgres()
-  if not retcode == 0:
-    print_error_msg ('Unable to configure PostgreSQL server. Exiting')
-    sys.exit(retcode)
-  
-  print 'Checking JDK...'
-  retcode = download_jdk(args)
-  if not retcode == 0:
-    print_error_msg ('Downloading or installing JDK failed. Exiting.')
-    sys.exit(retcode)
-
-  print 'Completing setup...'
-  retcode = configure_os_settings()
-  if not retcode == 0:
-    print_error_msg ('Configure of OS settings in '
-                   'ambari.properties failed. Exiting.')
-    sys.exit(retcode)
-
-  print "Ambari Server 'setup' finished successfully"
-
-
-
-#
-# Resets the Ambari Server.
-#
-def reset(args):
-  okToRun = False
-  choice = raw_input("**** WARNING **** You are about to reset and clear the "
-                     "Ambari Server database. This will remove all cluster "
-                     "host and configuration information from the database. "
-                     "You will be required to re-configure the Ambari server "
-                     "and re-run the cluster wizard. \n"
-                     "Are you SURE you want to perform the reset "
-                     "[yes/no]? ").lower()
-  if choice in set(['yes']):
-    okToRun = True
-
-  if not okToRun:
-    print "Ambari Server 'reset' cancelled"
-    return -1
-
-  okToRun = False
-  choice = raw_input("Confirm server reset [yes/no]? ").lower()
-  if choice in set(['yes']):
-    okToRun = True
-
-  if not okToRun:
-    print "Ambari Server 'reset' cancelled"
-    return -1
-
-  print "Reseting the Server database..."
-
-  configure_postgres_username_password(args)
-  dbname = args.postgredbname
-  filename = args.drop_script_file
-  username = args.postgres_username
-  password = args.postgres_password
-  command = SETUP_DB_CMD[:]
-  command[-1] = command[-1].format(filename, username, password)
-  retcode, outdata, errdata = run_os_command(command)
-  if not retcode == 0:
-    print errdata
-    return retcode
-
-  print_info_msg ("About to run database setup")
-  setup_db(args)
-
-  print "Ambari Server 'reset' complete"
-
-
-
-#
-# Starts the Ambari Server.
-#
-def start(args):
-  if os.path.exists(PID_DIR + os.sep + PID_NAME):
-    f = open(PID_DIR + os.sep + PID_NAME, "r")
-    pid = int(f.readline())
-    f.close()
-    try:
-      os.kill(pid, 0)
-      print("Server is already running.")
-      return
-    except OSError, e:
-      print_info_msg("Server is not running, continue.")
-
-  conf_dir = get_conf_dir()
-  jdk_path = find_jdk()
-  if jdk_path is None:
-    print_error_msg("No JDK found, please run the \"setup\" "
-                    "command to install a JDK automatically or install any "
-                    "JDK manually to " + JDK_INSTALL_DIR)
-    return -1
-  retcode = check_postgre_up()
-  if not retcode == 0:
-    print_error_msg("Unable to start PostgreSQL server. Exiting")
-    sys.exit(retcode)
-
-  print 'Checking iptables...'
-  retcode, out = check_iptables()
-  if not retcode == 0 and out == IP_TBLS_ENABLED:
-    print_error_msg ("Failed to stop iptables. Exiting")
-    sys.exit(retcode)
-
-  command = SERVER_START_CMD.format(jdk_path, conf_dir, get_ambari_jars())
-  print "Running server: " + command
-  server_process = subprocess.Popen(["/bin/sh", "-c", command])
-  f = open(PID_DIR + os.sep + PID_NAME, "w")
-  f.write(str(server_process.pid))
-  f.close()
-
-
-
-#
-# Stops the Ambari Server.
-#
-def stop(args):
-  if os.path.exists(PID_DIR + os.sep + PID_NAME):
-    f = open(PID_DIR + os.sep + PID_NAME, "r")
-    pid = int(f.readline())
-    try:
-      os.killpg(os.getpgid(pid), signal.SIGKILL)
-    except OSError, e:
-      print_info_msg( "Unable to stop Ambari Server - " + str(e) )
-      return
-    f.close()
-    os.remove(f.name)
-    print "Ambari Server stopped"
-  else:
-    print "Ambari Server not running"
-
-
-
-#
-# Upgrades the Ambari Server.
-#
-def upgrade(args):
-  print 'Checking PostgreSQL...'
-  retcode = check_postgre_up()
-  if not retcode == 0:
-    printErrorMsg('PostgreSQL server not running. Exiting')
-    sys.exit(retcode)
-
-  file = args.upgrade_script_file
-  print 'Upgrading database...'
-  retcode = execute_db_script(args, file)
-  if not retcode == 0:
-    printErrorMsg('Database upgrade script has failed. Exiting.')
-    sys.exit(retcode)
-
-  print 'Checking database integrity...'
-  check_file = file[:-3] + "Check" + file[-4:]
-  retcode = check_db_consistency(args, check_file)
-
-  if not retcode == 0:
-    print 'Found inconsistency. Trying to fix...'
-    fix_file = file[:-3] + "Fix" + file[-4:]
-    retcode = execute_db_script(args, fix_file)
-
-    if not retcode == 0:
-      printErrorMsg('Database cannot be fixed. Exiting.')
-      sys.exit(retcode)
-  else:
-    print 'Database is consistent.'
-  print "Ambari Server 'upgrade' finished successfully"
-
-#
-# Prints an "info" messsage.
-#
-def print_info_msg(msg):
-  if VERBOSE:
-    print("INFO: " + msg)
-
-
-
-#
-# Prints an "error" messsage.
-#
-def print_error_msg(msg):
-  print("ERROR: " + msg)
-
-
-
-#
-# Prints a "warning" messsage.
-#
-def print_warning_msg(msg):
-  print("WARNING: " + msg)
-
-
-
-#
-# Gets the y/n input.
-#
-# return True if 'y' or False if 'n'
-#
-def get_YN_input(prompt,default):
-  yes = set(['yes','ye', 'y'])
-  no = set(['no','n'])
-  return get_choice_string_input(prompt,default,yes,no)
-
-
-
-def get_choice_string_input(prompt,default,firstChoice,secondChoice):
-  if SILENT:
-    print(prompt)
-    return default
-  choice = raw_input(prompt).lower()
-  if choice in firstChoice:
-    return True
-  elif choice in secondChoice:
-    return False
-  elif choice is "": # Just enter pressed
-    return default
-  else:
-    print "input not recognized, please try again: "
-    return get_choice_string_input(prompt,default,firstChoice,secondChoice)
-
-
-
-def get_validated_string_input(prompt, default, pattern, description, is_pass):
-  input =""
-  while not input:
-    if SILENT:
-      print (prompt)
-      input = default
-    elif is_pass:
-      input = getpass.getpass(prompt)
-    else:
-      input = raw_input(prompt)
-    if not input.strip():
-      input = default
-      break #done here and picking up default
-    else:
-      if not re.search(pattern,input.strip()):
-        print description
-        input=""
-  return input
-
-
-
-def configure_postgres_password():
-  # setup password
-  passwordDefault = PG_DEFAULT_PASSWORD
-  passwordPrompt = 'Password [' + passwordDefault + ']: '
-  passwordPattern = "^[a-zA-Z0-9_-]*$"
-  passwordDescr = "Invalid characters in password. Use only alphanumeric or " \
-                  "_ or - characters"
-
-  password = get_validated_string_input(passwordPrompt, passwordDefault,
-      passwordPattern, passwordDescr, True)
-  if password != passwordDefault:
-    password1 = get_validated_string_input("Re-enter password: ",
-      passwordDefault, passwordPattern, passwordDescr, True)
-    if password != password1:
-      print "Passwords do not match"
-      password = configure_postgres_password()
-
-  return password
-
-
-
-def get_pass_file_path(conf_file):
-  return os.path.join(os.path.dirname(conf_file),
-    JDBC_PASSWORD_FILENAME)
-
-
-
-def configure_postgres_username_password(args):
-  conf_file = search_file(AMBARI_PROPERTIES_FILE, get_conf_dir())
-  properties = Properties()
-
-  try:
-    properties.load(open(conf_file))
-  except Exception, e:
-    print 'Could not read ambari config file "%s": %s' % (conf_file, e)
-    return -1
-
-  username = properties[JDBC_USER_NAME_PROPERTY]
-  passFilePath = properties[JDBC_PASSWORD_FILE_PROPERTY]
-
-  if username and passFilePath:
-    print_info_msg("Database username + password already configured - skipping")
-    args.postgres_username=username
-    args.postgres_password = open(passFilePath).read()
-    return 1
-
-  # setup username
-  usernameDefault = 'ambari-server'
-  usernamePrompt = 'Username [' + usernameDefault + ']: '
-  usernamePattern = "^[a-zA-Z_][a-zA-Z0-9_\-]*$"
-  usernameDescr = "Invalid characters in username. Start with _ or alpha " \
-                  "followed by alphanumeric or _ or - characters"
-  username = usernameDefault
-
-  # setup password
-  password = PG_DEFAULT_PASSWORD
-
-  ok = get_YN_input("Enter advanced database configuration [y/n] (n)? ", False)
-  if ok:
-    username = get_validated_string_input(usernamePrompt, usernameDefault,
-        usernamePattern, usernameDescr, False)
-    print "Database username set to: " + username
-    password = configure_postgres_password()
-        
-  passFilePath = get_pass_file_path(conf_file)
-  
-  print_info_msg ("Database username set to: " + username)
-  print_info_msg ("Database password set to: " + password)
-    
-  with open(passFilePath, 'w+') as passFile:
-    passFile.write(password)
-    pass
-  os.chmod(passFilePath, stat.S_IREAD | stat.S_IWRITE)
-
-  write_property(JDBC_USER_NAME_PROPERTY, username)
-  write_property(JDBC_PASSWORD_FILE_PROPERTY,passFilePath)
-  args.postgres_username=username
-  args.postgres_password=password
-
-
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] action [stack_id]",)
-  parser.add_option('-d', '--postgredbname', default='ambari',
-                      help="Database name in postgresql")
-  parser.add_option('-f', '--init-script-file',
-                      default='/var/lib/ambari-server/'
-                              'resources/Ambari-DDL-Postgres-CREATE.sql',
-                      help="File with setup script")
-  parser.add_option('-r', '--drop-script-file', default="/var/lib/"
-                              "ambari-server/resources/"
-                              "Ambari-DDL-Postgres-DROP.sql",
-                      help="File with drop script")
-  parser.add_option('-u', '--upgrade-script-file', default="/var/lib/"
-                              "ambari-server/resources/upgrade/ddl/"
-                              "Ambari-DDL-Postgres-UPGRADE-1.2.2.sql",
-                      help="File with upgrade script")
-  parser.add_option('-t', '--upgrade-stack-script-file', default="/var/lib/"
-                              "ambari-server/resources/upgrade/dml/"
-                              "Ambari-DML-Postgres-UPGRADE_STACK.sql",
-                      help="File with stack upgrade script")
-  parser.add_option('-j', '--java-home', default=None,
-                  help="Use specified java_home.  Must be valid on all hosts")
-  parser.add_option("-v", "--verbose",
-                  action="store_true", dest="verbose", default=False,
-                  help="Print verbose status messages")
-  parser.add_option("-s", "--silent",
-                  action="store_true", dest="silent", default=False,
-                  help="Silently accepts default prompt values")
-
-  (options, args) = parser.parse_args()
-
-  # set verbose
-  global VERBOSE
-  VERBOSE = options.verbose
-
-  # set silent
-  global SILENT
-  SILENT = options.silent
-
-
-  
-  if len(args) == 0:
-    print parser.print_help()
-    parser.error("No action entered")
-	
-  action = args[0]
-
-  if action == UPGRADE_STACK_ACTION:
-    args_number_required = 2
-  else:
-    args_number_required = 1
-	
-  if len(args) < args_number_required:
-    print parser.print_help()
-    parser.error("Invalid number of arguments. Entered: " + str(len(args)) + ", required: " + str(args_number_required))
- 
-  if action == SETUP_ACTION:
-    setup(options)
-  elif action == START_ACTION:
-    start(options)
-  elif action == STOP_ACTION:
-    stop(options)
-  elif action == RESET_ACTION:
-    reset(options)
-  elif action == UPGRADE_ACTION:
-    upgrade(options)
-  elif action == UPGRADE_STACK_ACTION:
-    stack_id = args[1]
-    upgrade_stack(options, stack_id)
-  else:
-    parser.error("Invalid action")
-
-
-
-
-# A Python replacement for java.util.Properties
-# Based on http://code.activestate.com/recipes
-# /496795-a-python-replacement-for-javautilproperties/
-class Properties(object):
-  def __init__(self, props=None):
-    self._props = {}
-    self._origprops = {}
-    self._keymap = {}
-
-    self.othercharre = re.compile(r'(?<!\\)(\s*\=)|(?<!\\)(\s*\:)')
-    self.othercharre2 = re.compile(r'(\s*\=)|(\s*\:)')
-    self.bspacere = re.compile(r'\\(?!\s$)')
-
-  def __parse(self, lines):
-    lineno = 0
-    i = iter(lines)
-    for line in i:
-      lineno += 1
-      line = line.strip()
-      if not line: continue
-      if line[0] == '#': continue
-      escaped = False
-      sepidx = -1
-      flag = 0
-      m = self.othercharre.search(line)
-      if m:
-        first, last = m.span()
-        start, end = 0, first
-        flag = 1
-        wspacere = re.compile(r'(?<![\\\=\:])(\s)')
-      else:
-        if self.othercharre2.search(line):
-          wspacere = re.compile(r'(?<![\\])(\s)')
-        start, end = 0, len(line)
-      m2 = wspacere.search(line, start, end)
-      if m2:
-        first, last = m2.span()
-        sepidx = first
-      elif m:
-        first, last = m.span()
-        sepidx = last - 1
-      while line[-1] == '\\':
-        nextline = i.next()
-        nextline = nextline.strip()
-        lineno += 1
-        line = line[:-1] + nextline
-      if sepidx != -1:
-        key, value = line[:sepidx], line[sepidx + 1:]
-      else:
-        key, value = line, ''
-      self.process_pair(key, value)
-
-  def process_pair(self, key, value):
-    oldkey = key
-    oldvalue = value
-    keyparts = self.bspacere.split(key)
-    strippable = False
-    lastpart = keyparts[-1]
-    if lastpart.find('\\ ') != -1:
-      keyparts[-1] = lastpart.replace('\\', '')
-    elif lastpart and lastpart[-1] == ' ':
-      strippable = True
-    key = ''.join(keyparts)
-    if strippable:
-      key = key.strip()
-      oldkey = oldkey.strip()
-    oldvalue = self.unescape(oldvalue)
-    value = self.unescape(value)
-    self._props[key] = value.strip()
-    if self._keymap.has_key(key):
-      oldkey = self._keymap.get(key)
-      self._origprops[oldkey] = oldvalue.strip()
-    else:
-      self._origprops[oldkey] = oldvalue.strip()
-      self._keymap[key] = oldkey
-
-  def unescape(self, value):
-    newvalue = value.replace('\:', ':')
-    newvalue = newvalue.replace('\=', '=')
-    return newvalue
-
-  def load(self, stream):
-    if type(stream) is not file:
-      raise TypeError, 'Argument should be a file object!'
-    if stream.mode != 'r':
-      raise ValueError, 'Stream should be opened in read-only mode!'
-    try:
-      lines = stream.readlines()
-      self.__parse(lines)
-    except IOError, e:
-      raise
-
-  def get_property(self, key):
-    return self._props.get(key, '')
-
-  def propertyNames(self):
-    return self._props.keys()
-
-  def getPropertyDict(self):
-    return self._props
-
-  def __getitem__(self, name):
-    return self.get_property(name)
-
-  def __getattr__(self, name):
-    try:
-      return self.__dict__[name]
-    except KeyError:
-      if hasattr(self._props, name):
-        return getattr(self._props, name)
-
-
-if __name__ == "__main__":
-  main()
diff --git a/branch-1.2/ambari-server/src/main/python/bootstrap.py b/branch-1.2/ambari-server/src/main/python/bootstrap.py
deleted file mode 100644
index 2d93aef..0000000
--- a/branch-1.2/ambari-server/src/main/python/bootstrap.py
+++ /dev/null
@@ -1,363 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import socket
-import time
-import sys
-import logging
-import pprint
-import os
-import subprocess
-import threading
-import traceback
-from pprint import pformat
-
-AMBARI_PASSPHRASE_VAR_NAME = "AMBARI_PASSPHRASE"
-
-class SCP(threading.Thread):
-  """ SCP implementation that is thread based. The status can be returned using
-   status val """
-  def __init__(self, sshKeyFile, host, inputFile, remote, bootdir):
-    self.sshKeyFile = sshKeyFile
-    self.host = host
-    self.inputFile = inputFile
-    self.remote = remote
-    self.bootdir = bootdir
-    self.ret = {"exitstatus" : -1, "log" : "FAILED"}
-    threading.Thread.__init__(self)
-    pass
-  
-  def getStatus(self):
-    return self.ret
-    pass
-  
-  def getHost(self):
-    return self.host
-  
-  def run(self):
-    scpcommand = ["scp",
-                  "-o", "ConnectTimeout=60",
-                  "-o", "BatchMode=yes",
-                  "-o", "StrictHostKeyChecking=no",
-                  "-i", self.sshKeyFile, self.inputFile, "root@" +
-                   self.host + ":" + self.remote]
-    logging.info("Running scp command " + ' '.join(scpcommand))
-    scpstat = subprocess.Popen(scpcommand, stdout=subprocess.PIPE,
-                                  stderr=subprocess.PIPE)
-    log = scpstat.communicate()
-    self.ret["exitstatus"] = scpstat.returncode
-    self.ret["log"] = "STDOUT\n" + log[0] + "\nSTDERR\n" + log[1]
-    logFilePath = os.path.join(self.bootdir, self.host + ".log")
-    logFile = open(logFilePath, "a+")
-    logFile.write(self.ret["log"])
-    logFile.close
-    logging.info("scp " + self.inputFile + " done for host " + self.host + ", exitcode=" + str(scpstat.returncode))
-    pass
-
-class SSH(threading.Thread):
-  """ Ssh implementation of this """
-  def __init__(self, sshKeyFile, host, command, bootdir):
-    self.sshKeyFile = sshKeyFile
-    self.host = host
-    self.command = command
-    self.bootdir = bootdir
-    self.ret = {"exitstatus" : -1, "log": "FAILED"}
-    threading.Thread.__init__(self)
-    pass
-  
-  def getHost(self):
-    return self.host
-  
-  def getStatus(self):
-    return self.ret
-  
-  def run(self):
-    sshcommand = ["ssh",
-                  "-o", "ConnectTimeOut=60",
-                  "-o", "StrictHostKeyChecking=no",
-                  "-o", "BatchMode=yes",
-                  "-tt", # Should prevent "tput: No value for $TERM and no -T specified" warning
-                  "-i", self.sshKeyFile,
-                  "root@" + self.host, self.command]
-    logging.info("Running ssh command " + ' '.join(sshcommand))
-    sshstat = subprocess.Popen(sshcommand, stdout=subprocess.PIPE,
-                                  stderr=subprocess.PIPE)
-    log = sshstat.communicate()
-    self.ret["exitstatus"] = sshstat.returncode
-    self.ret["log"] = "STDOUT\n" + log[0] + "\nSTDERR\n" + log[1]
-    logFilePath = os.path.join(self.bootdir, self.host + ".log")
-    logFile = open(logFilePath, "a+")
-    logFile.write(self.ret["log"])
-    logFile.close
-
-    doneFilePath = os.path.join(self.bootdir, self.host + ".done")
-    doneFile = open(doneFilePath, "w+")
-    doneFile.write(str(sshstat.returncode))
-    doneFile.close()
-
-    logging.info("Setup agent done for host " + self.host + ", exitcode=" + str(sshstat.returncode))
-    pass
-pass
-
-
-def splitlist(hosts, n):
-  return [hosts[i:i+n] for i in range(0, len(hosts), n)]
-
-def skip_failed_hosts(statuses):
-  """ Takes a dictionary <hostname, hoststatus> and returns list of hosts whose status is SUCCESS"""
-  res = list(key for key, value in statuses.iteritems() if value["exitstatus"] == 0)
-  return res
-
-def unite_statuses(statuses, update):
-  """ Takes two dictionaries <hostname, hoststatus> and returns dictionary with united entries (returncode is set
-  to the max value per host, logs per host are concatenated)"""
-  result = {}
-  for key, value in statuses.iteritems():
-    if key in update:
-      upd_status = update[key]
-      res_status = {
-        "exitstatus" : max(value["exitstatus"], upd_status["exitstatus"]),
-        "log" : value["log"] + "\n" + upd_status["log"]
-      }
-      result[key] = res_status
-    else:
-      result[key] = value
-  return result
-
-def get_difference(list1, list2):
-  """Takes two lists and returns list filled by elements of list1 that are absent at list2.
-  Duplicates are removed too"""
-  #res =
-  s1 = set(list1)
-  s2 = set(list2)
-  return list(s1- s2)
-
-class PSSH:
-  """Run SSH in parallel for a given list of hosts"""
-  def __init__(self, hosts, sshKeyFile, command, bootdir):
-    self.hosts = hosts
-    self.sshKeyFile = sshKeyFile
-    self.command = command
-    self.bootdir = bootdir
-    self.ret = {}
-    pass
-    
-  def getstatus(self):
-    return self.ret
-    pass
-  
-  def run(self):
-    """ Run 20 at a time in parallel """
-    for chunk in splitlist(self.hosts, 20):
-      chunkstats = []
-      for host in chunk:
-        ssh = SSH(self.sshKeyFile, host, self.command, self.bootdir)
-        ssh.start()
-        chunkstats.append(ssh)
-        pass
-      # wait for the ssh's to complete
-      for chunkstat in chunkstats:
-        chunkstat.join()
-        self.ret[chunkstat.getHost()] = chunkstat.getStatus()
-      pass
-    pass
-pass    
-
-class PSCP:
-  """Run SCP in parallel for a given list of hosts"""
-  def __init__(self, hosts, sshKeyFile, inputfile, remote, bootdir):
-    self.hosts = hosts
-    self.sshKeyFile = sshKeyFile
-    self.inputfile = inputfile
-    self.remote = remote
-    self.bootdir = bootdir
-    self.ret = {}
-    pass
-    
-  def getstatus(self):
-    return self.ret
-    pass
-  
-  def run(self):
-    """ Run 20 at a time in parallel """
-    for chunk in splitlist(self.hosts, 20):
-      chunkstats = []
-      for host in chunk:
-        scp = SCP(self.sshKeyFile, host, self.inputfile, self.remote, self.bootdir)
-        scp.start()
-        chunkstats.append(scp)
-        pass
-      # wait for the scp's to complete
-      for chunkstat in chunkstats:
-        chunkstat.join()
-        self.ret[chunkstat.getHost()] = chunkstat.getStatus()
-      pass
-    
-    pass
-pass    
-    
-class BootStrap:
-  """ BootStrapping the agents on a list of hosts"""
-  def __init__(self, hosts, sshkeyFile, scriptDir, boottmpdir, setupAgentFile, ambariServer):
-    self.hostlist = hosts
-    self.successive_hostlist = hosts
-    self.sshkeyFile = sshkeyFile
-    self.bootdir = boottmpdir
-    self.scriptDir = scriptDir
-    self.setupAgentFile = setupAgentFile
-    self.ambariServer = ambariServer
-    self.statuses = None
-    pass
-
-  def is_suse(self):
-    if os.path.isfile("/etc/issue"):
-      if "suse" in open("/etc/issue").read().lower():
-        return True
-    return False
-
-  def getRepoDir(self):
-    """ Ambari repo file for Ambari."""
-    if self.is_suse():
-      return "/etc/zypp/repos.d"
-    else:
-      return "/etc/yum.repos.d"
-  
-  def getRepoFile(self):
-    """ Ambari repo file for Ambari."""
-    return os.path.join(self.getRepoDir(), "ambari.repo")
-
-  def getSetupScript(self):
-    return os.path.join(self.scriptDir, "setupAgent.py")
-
-  def copyNeededFiles(self):
-    try:
-      # Copying the files
-      fileToCopy = self.getRepoFile()
-      targetDir = self.getRepoDir()
-      pscp = PSCP(self.hostlist, self.sshkeyFile, fileToCopy, targetDir, self.bootdir)
-      pscp.run()
-      out = pscp.getstatus()
-      # Prepearing report about failed hosts
-      self.successive_hostlist = skip_failed_hosts(out)
-      failed = get_difference(self.hostlist, self.successive_hostlist)
-      logging.info("Parallel scp returns for repo file. Failed hosts are: " + str(failed))
-      #updating statuses
-      self.statuses = out
-
-      pscp = PSCP(self.successive_hostlist, self.sshkeyFile, self.setupAgentFile, "/tmp", self.bootdir)
-      pscp.run()
-      out = pscp.getstatus()
-      # Prepearing report about failed hosts
-      failed_current = get_difference(self.successive_hostlist, skip_failed_hosts(out))
-      self.successive_hostlist = skip_failed_hosts(out)
-      failed = get_difference(self.hostlist, self.successive_hostlist)
-      logging.info("Parallel scp returns for agent script. All failed hosts are: " + str(failed) +
-                   ". Failed on last step: " + str(failed_current))
-      #updating statuses
-      self.statuses = unite_statuses(self.statuses, out)
-      retstatus = 0
-      if not failed: 
-        retstatus = 0
-      else:
-        retstatus = 1
-      return retstatus
-
-    except Exception, e:
-      logging.info("Traceback " + traceback.format_exc())
-      pass
-
-    pass
-
-  def runSetupAgent(self):
-    logging.info("Running setup agent...")
-    command = "python /tmp/setupAgent.py " + os.environ[AMBARI_PASSPHRASE_VAR_NAME] + " " + self.ambariServer
-    pssh = PSSH(self.successive_hostlist, self.sshkeyFile, command, self.bootdir)
-    pssh.run()
-    out = pssh.getstatus()
-
-    # Prepearing report about failed hosts
-    failed_current = get_difference(self.successive_hostlist, skip_failed_hosts(out))
-    self.successive_hostlist = skip_failed_hosts(out)
-    failed = get_difference(self.hostlist, self.successive_hostlist)
-    logging.info("Parallel ssh returns for setup agent. All failed hosts are: " + str(failed) +
-                 ". Failed on last step: " + str(failed_current))
-
-    #updating statuses
-    self.statuses = unite_statuses(self.statuses, out)
-    retstatus = 0 
-    if not failed:
-      retstatus = 0
-    else:
-      retstatus = 1
-    pass
-
-  def createDoneFiles(self):
-    """ Creates .done files for every host. These files are later read from Java code.
-    If .done file for any host is not created, the bootstrap will hang or fail due to timeout"""
-    for key, value in self.statuses.iteritems():
-      doneFilePath = os.path.join(self.bootdir, key + ".done")
-      if not os.path.exists(doneFilePath):
-        doneFile = open(doneFilePath, "w+")
-        doneFile.write(str(value["exitstatus"]))
-        doneFile.close()
-    pass
-
-  def run(self):
-    """ Copy files and run commands on remote hosts """
-    ret1 = self.copyNeededFiles()
-    logging.info("Copying files finished")
-    ret2 = self.runSetupAgent()
-    logging.info("Running ssh command finished")
-    retcode = max(ret1, ret2)
-    self.createDoneFiles()
-    return retcode
-    pass
-  pass
-  
-  
-def main(argv=None):
-  scriptDir = os.path.realpath(os.path.dirname(argv[0]))
-  onlyargs = argv[1:]
-  if len(onlyargs) < 3:
-    sys.stderr.write("Usage: <comma separated hosts> "
-                     "<tmpdir for storage> <sshkeyFile> <agent setup script> <ambari-server name>\n")
-    sys.exit(2)
-    pass
-  #Parse the input
-  hostList = onlyargs[0].split(",")
-  bootdir =  onlyargs[1]
-  sshKeyFile = onlyargs[2]
-  setupAgentFile = onlyargs[3]
-  ambariServer = onlyargs[4]
-
-  # ssh doesn't like open files
-  stat = subprocess.Popen(["chmod", "600", sshKeyFile], stdout=subprocess.PIPE)
-  
-  logging.info("BootStrapping hosts " + pprint.pformat(hostList) +
-               "using " + scriptDir + 
-              " with sshKey File " + sshKeyFile + " using tmp dir " + bootdir + " ambari: " + ambariServer)
-  bootstrap = BootStrap(hostList, sshKeyFile, scriptDir, bootdir, setupAgentFile, ambariServer)
-  ret = bootstrap.run()
-  #return  ret
-  return 0 # Hack to comply with current usage
-  
-if __name__ == '__main__':
-  logging.basicConfig(level=logging.DEBUG)
-  main(sys.argv)
diff --git a/branch-1.2/ambari-server/src/main/python/rrd.py b/branch-1.2/ambari-server/src/main/python/rrd.py
deleted file mode 100644
index d7ee49f..0000000
--- a/branch-1.2/ambari-server/src/main/python/rrd.py
+++ /dev/null
@@ -1,139 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import cgi
-#import cgitb
-import os
-import rrdtool
-import sys
-
-# place this script in /var/www/cgi-bin of the Ganglia collector
-# requires 'yum install rrdtool-python' on the Ganglia collector
-
-#cgitb.enable()
-
-def printMetric(clusterName, hostName, metricName, file, cf, start, end, resolution):
-  if clusterName.endswith("rrds"):
-    clusterName = ""
-
-  args = [file, cf]
-
-  if start is not None:
-   args.extend(["-s", start])
-
-  if end is not None:
-   args.extend(["-e", end])
-
-  if resolution is not None:
-   args.extend(["-r", resolution])
-
-  rrdMetric = rrdtool.fetch(args)
-
-  time = rrdMetric[0][0]
-  step = rrdMetric[0][2]
-
-  sys.stdout.write("  {\n    \"ds_name\":\"" + rrdMetric[1][0] +\
-                   "\",\n    \"cluster_name\":\"" + clusterName +\
-                   "\",\n    \"host_name\":\"" + hostName +\
-                   "\",\n    \"metric_name\":\"" + metricName + "\",\n")
-
-  firstDP = True
-  sys.stdout.write("    \"datapoints\":[\n")
-  for tuple in rrdMetric[2]:
-    if tuple[0] is not None:
-      if not firstDP:
-        sys.stdout.write(",\n")
-      firstDP = False
-      sys.stdout.write("      [")
-      sys.stdout.write(str(tuple[0]))
-      sys.stdout.write(",")
-      sys.stdout.write(str(time))
-      sys.stdout.write("]")
-    time = time + step
-  sys.stdout.write("\n    ]\n  }")
-  return
-
-def stripList(l):
-  return([x.strip() for x in l])
-
-sys.stdout.write("Content-type: application/json\n\n")
-
-queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
-
-sys.stdout.write("[\n")
-
-firstMetric = True
-
-if "m" in queryString:
-  metricParts = queryString["m"].split(",")
-else:
-  metricParts = [""]
-metricParts = stripList(metricParts)
-
-hostParts = []
-if "h" in queryString:
-  hostParts = queryString["h"].split(",")
-hostParts = stripList(hostParts)
-
-if "c" in queryString:
-  clusterParts = queryString["c"].split(",")
-else:
-  clusterParts = [""]
-clusterParts = stripList(clusterParts)
-
-if "p" in queryString:
-  rrdPath = queryString["p"]
-else:
-  rrdPath = "/var/lib/ganglia/rrds/"
-
-start = None
-if "s" in queryString:
-  start = queryString["s"]
-
-end = None
-if "e" in queryString:
-  end = queryString["e"]
-
-resolution = None
-if "r" in queryString:
-  resolution = queryString["r"]
-
-if "cf" in queryString:
-  cf = queryString["cf"]
-else:
-  cf = "AVERAGE"
-
-for cluster in clusterParts:
-  for path, dirs, files in os.walk(rrdPath + cluster):
-    pathParts = path.split("/")
-    if len(hostParts) == 0 or pathParts[-1] in hostParts:
-      for file in files:
-        for metric in metricParts:
-          if file.endswith(metric + ".rrd"):
-            if not firstMetric:
-              sys.stdout.write(",\n")
-
-            printMetric(pathParts[-2], pathParts[-1], file[:-4], os.path.join(path, file), cf, start, end, resolution)
-
-            firstMetric = False
-
-sys.stdout.write("\n]\n")
-sys.stdout.flush
-
diff --git a/branch-1.2/ambari-server/src/main/python/setupAgent.py b/branch-1.2/ambari-server/src/main/python/setupAgent.py
deleted file mode 100644
index 14e12a4..0000000
--- a/branch-1.2/ambari-server/src/main/python/setupAgent.py
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import socket
-import time
-import sys
-import logging
-import pprint
-import os
-import subprocess
-import threading
-import traceback
-import stat
-from pprint import pformat
-import re
-
-AMBARI_PASSPHRASE_VAR = "AMBARI_PASSPHRASE"
-
-
-def execOsCommand(osCommand):
-  osStat = subprocess.Popen(osCommand, stdout=subprocess.PIPE)
-  log = osStat.communicate(0)
-  ret = {}
-  ret["exitstatus"] = osStat.returncode
-  ret["log"] = log
-  return ret
-
-def is_suse():
-  if os.path.isfile("/etc/issue"):
-    if "suse" in open("/etc/issue").read().lower():
-      return True
-  return False
-
-def installAgentSuse():
-  """ Run zypper install and make sure the agent install alright """
-  zypperCommand = ["zypper", "install", "-y", "ambari-agent"]
-  return execOsCommand(zypperCommand)
-
-def installPreReq():
-  """ required for ruby deps """
-  checkepel = ["yum", "repolist", "enabled"]
-  retval = execOsCommand(checkepel)
-  logval = str(retval["log"])
-  if not "epel" in logval:
-    yumCommand = ["yum", "-y", "install", "epel-release"]
-  else:
-    yumCommand = ["echo", "Epel already exists"]
-  return execOsCommand(yumCommand)
-
-def installAgent():
-  """ Run yum install and make sure the agent install alright """
-  # The command doesn't work with file mask ambari-agent*.rpm, so rename it on agent host
-  rpmCommand = ["yum", "-y", "install", "--nogpgcheck", "ambari-agent"]
-  return execOsCommand(rpmCommand)
-
-def configureAgent(host):
-  """ Configure the agent so that it has all the configs knobs properly installed """
-  osCommand = ["sed", "-i.bak", "s/hostname=localhost/hostname=" + host + "/g", "/etc/ambari-agent/conf/ambari-agent.ini"]
-  execOsCommand(osCommand)
-
-  return
-
-def runAgent(passPhrase):
-  os.environ[AMBARI_PASSPHRASE_VAR] = passPhrase
-  subprocess.call("/usr/sbin/ambari-agent start", shell=True)
-  try:
-    # print this to the log.  despite the directory, machine.py works with Python 2.4
-    ret = execOsCommand(["python", "/usr/lib/python2.6/site-packages/ambari_agent/machine.py"])
-    if not 0 == ret['exitstatus']:
-      return ret['exitstatus']
-    print ret['log']
-
-    ret = execOsCommand(["tail", "-20", "/var/log/ambari-agent/ambari-agent.log"])
-    if not 0 == ret['exitstatus']:
-      return ret['exitstatus']
-    print ret['log']
-
-    return 0
-  except (Exception), e:
-    return 1
-
-def main(argv=None):
-  scriptDir = os.path.realpath(os.path.dirname(argv[0]))
-  # Parse the input
-  onlyargs = argv[1:]
-  passPhrase = onlyargs[0]
-  hostName = onlyargs[1]
-
-  if is_suse():
-    installAgentSuse()
-  else:
-    installPreReq()
-    installAgent()
-
-  configureAgent(hostName)
-  sys.exit(runAgent(passPhrase))
-  
-if __name__ == '__main__':
-  logging.basicConfig(level=logging.DEBUG)
-  main(sys.argv)
-
diff --git a/branch-1.2/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql b/branch-1.2/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
deleted file mode 100644
index 3f941a6..0000000
--- a/branch-1.2/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
+++ /dev/null
@@ -1,261 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one
--- or more contributor license agreements.  See the NOTICE file
--- distributed with this work for additional information
--- regarding copyright ownership.  The ASF licenses this file
--- to you under the Apache License, Version 2.0 (the
--- "License"); you may not use this file except in compliance
--- with the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-CREATE DATABASE ambari;
-
-\connect ambari;
-
-CREATE ROLE :username LOGIN ENCRYPTED PASSWORD :password;
-
-CREATE SCHEMA ambari
-  AUTHORIZATION :username;
-
-CREATE TABLE ambari.clusters (cluster_id BIGINT NOT NULL, cluster_info VARCHAR(255) NOT NULL, cluster_name VARCHAR(100) NOT NULL UNIQUE, desired_cluster_state VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id));
-
-GRANT ALL PRIVILEGES ON TABLE ambari.clusters TO :username;
-
-CREATE TABLE ambari.clusterconfig (version_tag VARCHAR(255) NOT NULL, type_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, config_data VARCHAR(32000) NOT NULL, create_timestamp BIGINT NOT NULL, PRIMARY KEY (cluster_id, type_name, version_tag));
-
-GRANT ALL PRIVILEGES ON TABLE ambari.clusterconfig TO :username;
-
-CREATE TABLE ambari.clusterservices (service_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, service_enabled INTEGER NOT NULL, PRIMARY KEY (service_name, cluster_id));
-
-GRANT ALL PRIVILEGES ON TABLE ambari.clusterservices TO :username;
-
-CREATE TABLE ambari.clusterstate (cluster_id BIGINT NOT NULL, current_cluster_state VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id));
-
-GRANT ALL PRIVILEGES ON TABLE ambari.clusterstate TO :username;
-
-CREATE TABLE ambari.componentconfigmapping (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, config_type VARCHAR(255) NOT NULL, timestamp BIGINT NOT NULL, config_tag VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, service_name, config_type));
-
-GRANT ALL PRIVILEGES ON TABLE ambari.componentconfigmapping TO :username;
-
-CREATE TABLE ambari.hostcomponentconfigmapping (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, config_type VARCHAR(255) NOT NULL, timestamp BIGINT NOT NULL, config_tag VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name, config_type));
-
-GRANT ALL PRIVILEGES ON TABLE ambari.hostcomponentconfigmapping TO :username;
-
-CREATE TABLE ambari.hostcomponentdesiredconfigmapping (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, config_type VARCHAR(255) NOT NULL, timestamp BIGINT NOT NULL, config_tag VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name, config_type));
-
-GRANT ALL PRIVILEGES ON TABLE ambari.hostcomponentdesiredconfigmapping TO :username;
-
-CREATE TABLE ambari.hostcomponentdesiredstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
-
-GRANT ALL PRIVILEGES ON TABLE ambari.hostcomponentdesiredstate TO :username;
-
-CREATE TABLE ambari.hostcomponentstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, current_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
-
-GRANT ALL PRIVILEGES ON TABLE ambari.hostcomponentstate TO :username;
-
-CREATE TABLE ambari.hosts (host_name VARCHAR(255) NOT NULL, cpu_count INTEGER NOT NULL, cpu_info VARCHAR(255) NOT NULL, discovery_status VARCHAR(2000) NOT NULL, disks_info VARCHAR(10000) NOT NULL, host_attributes VARCHAR(20000) NOT NULL, ipv4 VARCHAR(255), ipv6 VARCHAR(255), public_host_name VARCHAR(255), last_registration_time BIGINT NOT NULL, os_arch VARCHAR(255) NOT NULL, os_info VARCHAR(1000) NOT NULL, os_type VARCHAR(255) NOT NULL, rack_info VARCHAR(255) NOT NULL, total_mem BIGINT NOT NULL, PRIMARY KEY (host_name));
-
-GRANT ALL PRIVILEGES ON TABLE ambari.hosts TO :username;
-
-CREATE TABLE ambari.hoststate (agent_version VARCHAR(255) NOT NULL, available_mem BIGINT NOT NULL, current_state VARCHAR(255) NOT NULL, health_status VARCHAR(255), host_name VARCHAR(255) NOT NULL, time_in_state BIGINT NOT NULL,  PRIMARY KEY (host_name));
-
-GRANT ALL PRIVILEGES ON TABLE ambari.hoststate TO :username;
-
-CREATE TABLE ambari.servicecomponentdesiredstate (component_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (component_name, cluster_id, service_name));
-
-GRANT ALL PRIVILEGES ON TABLE ambari.servicecomponentdesiredstate TO :username;
-
-CREATE TABLE ambari.serviceconfigmapping (cluster_id BIGINT NOT NULL, service_name VARCHAR(255) NOT NULL, config_type VARCHAR(255) NOT NULL, timestamp BIGINT NOT NULL, config_tag VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, service_name, config_type));
-
-GRANT ALL PRIVILEGES ON TABLE ambari.serviceconfigmapping TO :username;
-
-CREATE TABLE ambari.servicedesiredstate (cluster_id BIGINT NOT NULL, desired_host_role_mapping INTEGER NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, service_name));
-
-GRANT ALL PRIVILEGES ON TABLE ambari.servicedesiredstate TO :username;
-
-CREATE TABLE ambari.roles (role_name VARCHAR(255) NOT NULL, PRIMARY KEY (role_name));
-
-GRANT ALL PRIVILEGES ON TABLE ambari.roles TO :username;
-
-CREATE TABLE ambari.users (user_id SERIAL, ldap_user BOOLEAN NOT NULL, user_name VARCHAR(255) NOT NULL, create_time TIMESTAMP DEFAULT NOW(), user_password VARCHAR(255), PRIMARY KEY (user_id), UNIQUE (ldap_user, user_name));
-GRANT ALL PRIVILEGES ON TABLE ambari.users TO :username;
-
-CREATE TABLE ambari.execution_command (command bytea, task_id BIGINT NOT NULL, PRIMARY KEY (task_id));
-GRANT ALL PRIVILEGES ON TABLE ambari.execution_command TO :username;
-
-CREATE TABLE ambari.host_role_command (task_id BIGINT NOT NULL, attempt_count SMALLINT NOT NULL, event VARCHAR(32000) NOT NULL, exitcode INTEGER NOT NULL, host_name VARCHAR(255) NOT NULL, last_attempt_time BIGINT NOT NULL, request_id BIGINT NOT NULL, role VARCHAR(255), stage_id BIGINT NOT NULL, start_time BIGINT NOT NULL, status VARCHAR(255), std_error bytea, std_out bytea, role_command VARCHAR(255), PRIMARY KEY (task_id));
-GRANT ALL PRIVILEGES ON TABLE ambari.host_role_command TO :username;
-
-CREATE TABLE ambari.role_success_criteria (role VARCHAR(255) NOT NULL, request_id BIGINT NOT NULL, stage_id BIGINT NOT NULL, success_factor FLOAT NOT NULL, PRIMARY KEY (role, request_id, stage_id));
-GRANT ALL PRIVILEGES ON TABLE ambari.role_success_criteria TO :username;
-
-CREATE TABLE ambari.stage (stage_id BIGINT NOT NULL, request_id BIGINT NOT NULL, cluster_id BIGINT NOT NULL, log_info VARCHAR(255) NOT NULL, PRIMARY KEY (stage_id, request_id));
-GRANT ALL PRIVILEGES ON TABLE ambari.stage TO :username;
-
-CREATE TABLE ambari.ClusterHostMapping (cluster_id BIGINT NOT NULL, host_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, host_name));
-GRANT ALL PRIVILEGES ON TABLE ambari.ClusterHostMapping TO :username;
-
-CREATE TABLE ambari.user_roles (role_name VARCHAR(255) NOT NULL, user_id INTEGER NOT NULL, PRIMARY KEY (role_name, user_id));
-GRANT ALL PRIVILEGES ON TABLE ambari.user_roles TO :username;
-
-CREATE TABLE ambari.key_value_store ("key" VARCHAR(255), "value" VARCHAR, PRIMARY KEY("key"));
-GRANT ALL PRIVILEGES ON TABLE ambari.key_value_store TO :username;
-
-ALTER TABLE ambari.clusterconfig ADD CONSTRAINT FK_clusterconfig_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
-ALTER TABLE ambari.clusterservices ADD CONSTRAINT FK_clusterservices_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
-ALTER TABLE ambari.clusterstate ADD CONSTRAINT FK_clusterstate_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
-ALTER TABLE ambari.componentconfigmapping ADD CONSTRAINT FK_componentconfigmapping_config_tag FOREIGN KEY (config_tag, config_type, cluster_id) REFERENCES ambari.clusterconfig (version_tag, type_name, cluster_id);
-ALTER TABLE ambari.componentconfigmapping ADD CONSTRAINT FK_componentconfigmapping_component_name FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES ambari.servicecomponentdesiredstate (component_name, cluster_id, service_name);
-ALTER TABLE ambari.hostcomponentconfigmapping ADD CONSTRAINT FK_hostcomponentconfigmapping_config_tag FOREIGN KEY (config_tag, config_type, cluster_id) REFERENCES ambari.clusterconfig (version_tag, type_name, cluster_id);
-ALTER TABLE ambari.hostcomponentconfigmapping ADD CONSTRAINT FK_hostcomponentconfigmapping_cluster_id FOREIGN KEY (cluster_id, component_name, host_name, service_name) REFERENCES ambari.hostcomponentstate (cluster_id, component_name, host_name, service_name);
-ALTER TABLE ambari.hostcomponentdesiredconfigmapping ADD CONSTRAINT FK_hostcomponentdesiredconfigmapping_config_tag FOREIGN KEY (config_tag, config_type, cluster_id) REFERENCES ambari.clusterconfig (version_tag, type_name, cluster_id);
-ALTER TABLE ambari.hostcomponentdesiredconfigmapping ADD CONSTRAINT FK_hostcomponentdesiredconfigmapping_cluster_id FOREIGN KEY (cluster_id, component_name, host_name, service_name) REFERENCES ambari.hostcomponentdesiredstate (cluster_id, component_name, host_name, service_name);
-ALTER TABLE ambari.hostcomponentdesiredstate ADD CONSTRAINT FK_hostcomponentdesiredstate_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
-ALTER TABLE ambari.hostcomponentdesiredstate ADD CONSTRAINT FK_hostcomponentdesiredstate_component_name FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES ambari.servicecomponentdesiredstate (component_name, cluster_id, service_name);
-ALTER TABLE ambari.hostcomponentstate ADD CONSTRAINT FK_hostcomponentstate_component_name FOREIGN KEY (component_name, cluster_id, service_name) REFERENCES ambari.servicecomponentdesiredstate (component_name, cluster_id, service_name);
-ALTER TABLE ambari.hostcomponentstate ADD CONSTRAINT FK_hostcomponentstate_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
-ALTER TABLE ambari.hoststate ADD CONSTRAINT FK_hoststate_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
-ALTER TABLE ambari.servicecomponentdesiredstate ADD CONSTRAINT FK_servicecomponentdesiredstate_service_name FOREIGN KEY (service_name, cluster_id) REFERENCES ambari.clusterservices (service_name, cluster_id);
-ALTER TABLE ambari.serviceconfigmapping ADD CONSTRAINT FK_serviceconfigmapping_config_tag FOREIGN KEY (config_tag, config_type, cluster_id) REFERENCES ambari.clusterconfig (version_tag, type_name, cluster_id);
-ALTER TABLE ambari.serviceconfigmapping ADD CONSTRAINT FK_serviceconfigmapping_service_name FOREIGN KEY (service_name, cluster_id) REFERENCES ambari.clusterservices (service_name, cluster_id);
-ALTER TABLE ambari.servicedesiredstate ADD CONSTRAINT FK_servicedesiredstate_service_name FOREIGN KEY (service_name, cluster_id) REFERENCES ambari.clusterservices (service_name, cluster_id);
-ALTER TABLE ambari.execution_command ADD CONSTRAINT FK_execution_command_task_id FOREIGN KEY (task_id) REFERENCES ambari.host_role_command (task_id);
-ALTER TABLE ambari.host_role_command ADD CONSTRAINT FK_host_role_command_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES ambari.stage (stage_id, request_id);
-ALTER TABLE ambari.host_role_command ADD CONSTRAINT FK_host_role_command_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
-ALTER TABLE ambari.role_success_criteria ADD CONSTRAINT FK_role_success_criteria_stage_id FOREIGN KEY (stage_id, request_id) REFERENCES ambari.stage (stage_id, request_id);
-ALTER TABLE ambari.stage ADD CONSTRAINT FK_stage_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
-ALTER TABLE ambari.ClusterHostMapping ADD CONSTRAINT FK_ClusterHostMapping_host_name FOREIGN KEY (host_name) REFERENCES ambari.hosts (host_name);
-ALTER TABLE ambari.ClusterHostMapping ADD CONSTRAINT FK_ClusterHostMapping_cluster_id FOREIGN KEY (cluster_id) REFERENCES ambari.clusters (cluster_id);
-ALTER TABLE ambari.user_roles ADD CONSTRAINT FK_user_roles_user_id FOREIGN KEY (user_id) REFERENCES ambari.users (user_id);
-ALTER TABLE ambari.user_roles ADD CONSTRAINT FK_user_roles_role_name FOREIGN KEY (role_name) REFERENCES ambari.roles (role_name);
-
-CREATE SEQUENCE ambari.host_role_command_task_id_seq START WITH 1;
-
-GRANT ALL PRIVILEGES ON TABLE ambari.host_role_command_task_id_seq TO :username;
-
-GRANT ALL PRIVILEGES ON TABLE ambari.users_user_id_seq TO :username;
-
-CREATE SEQUENCE ambari.clusters_cluster_id_seq START WITH 1;
-
-GRANT ALL PRIVILEGES ON TABLE ambari.clusters_cluster_id_seq TO :username;
-
-BEGIN;
-
-insert into ambari.Roles(role_name)
-select 'admin'
-union all
-select 'user';
-
-insert into ambari.Users(user_name, user_password, ldap_user)
-select 'admin','538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00', FALSE;
-
-insert into ambari.user_roles(role_name, user_id)
-select 'admin',(select user_id from ambari.users where user_name='admin' and ldap_user=false);
-
-COMMIT;
-
--- ambari log4j DDL
-
-CREATE DATABASE ambarirca;
-
-\connect ambarirca;
-
-CREATE USER "mapred" WITH PASSWORD 'mapred';
-
-GRANT ALL PRIVILEGES ON DATABASE ambarirca TO "mapred";
-
-CREATE TABLE workflow (
-  workflowId TEXT, workflowName TEXT,
-  parentWorkflowId TEXT,  
-  workflowContext TEXT, userName TEXT,
-  startTime BIGINT, lastUpdateTime BIGINT,
-  numJobsTotal INTEGER, numJobsCompleted INTEGER,
-  inputBytes BIGINT, outputBytes BIGINT,
-  duration BIGINT,
-  PRIMARY KEY (workflowId),
-  FOREIGN KEY (parentWorkflowId) REFERENCES workflow(workflowId)
-);
-
-GRANT ALL PRIVILEGES ON TABLE workflow TO "mapred";
-
-CREATE TABLE job (
-  jobId TEXT, workflowId TEXT, jobName TEXT, workflowEntityName TEXT,
-  userName TEXT, queue TEXT, acls TEXT, confPath TEXT, 
-  submitTime BIGINT, launchTime BIGINT, finishTime BIGINT, 
-  maps INTEGER, reduces INTEGER, status TEXT, priority TEXT, 
-  finishedMaps INTEGER, finishedReduces INTEGER, 
-  failedMaps INTEGER, failedReduces INTEGER, 
-  mapsRuntime BIGINT, reducesRuntime BIGINT,
-  mapCounters TEXT, reduceCounters TEXT, jobCounters TEXT, 
-  inputBytes BIGINT, outputBytes BIGINT,
-  PRIMARY KEY(jobId),
-  FOREIGN KEY(workflowId) REFERENCES workflow(workflowId)
-);
-
-GRANT ALL PRIVILEGES ON TABLE job TO "mapred";
-
-CREATE TABLE task (
-  taskId TEXT, jobId TEXT, taskType TEXT, splits TEXT, 
-  startTime BIGINT, finishTime BIGINT, status TEXT, error TEXT, counters TEXT, 
-  failedAttempt TEXT, 
-  PRIMARY KEY(taskId), 
-  FOREIGN KEY(jobId) REFERENCES job(jobId)
-);
-
-GRANT ALL PRIVILEGES ON TABLE task TO "mapred";
-
-CREATE TABLE taskAttempt (
-  taskAttemptId TEXT, taskId TEXT, jobId TEXT, taskType TEXT, taskTracker TEXT, 
-  startTime BIGINT, finishTime BIGINT, 
-  mapFinishTime BIGINT, shuffleFinishTime BIGINT, sortFinishTime BIGINT, 
-  locality TEXT, avataar TEXT, 
-  status TEXT, error TEXT, counters TEXT, 
-  inputBytes BIGINT, outputBytes BIGINT,
-  PRIMARY KEY(taskAttemptId), 
-  FOREIGN KEY(jobId) REFERENCES job(jobId), 
-  FOREIGN KEY(taskId) REFERENCES task(taskId)
-); 
-
-GRANT ALL PRIVILEGES ON TABLE taskAttempt TO "mapred";
-
-CREATE TABLE hdfsEvent (
-  timestamp BIGINT,
-  userName TEXT,
-  clientIP TEXT,
-  operation TEXT,
-  srcPath TEXT,
-  dstPath TEXT,
-  permissions TEXT
-);
-
-GRANT ALL PRIVILEGES ON TABLE hdfsEvent TO "mapred";
-
-CREATE TABLE mapreduceEvent (
-  timestamp BIGINT,
-  userName TEXT,
-  clientIP TEXT,
-  operation TEXT,
-  target TEXT,
-  result TEXT,
-  description TEXT,
-  permissions TEXT
-);
-
-GRANT ALL PRIVILEGES ON TABLE mapreduceEvent TO "mapred";
-
-CREATE TABLE clusterEvent (
-  timestamp BIGINT, 
-  service TEXT, status TEXT, 
-  error TEXT, data TEXT , 
-  host TEXT, rack TEXT
-);
-
-GRANT ALL PRIVILEGES ON TABLE clusterEvent TO "mapred";
-
diff --git a/branch-1.2/ambari-server/src/main/resources/Ambari-DDL-Postgres-DROP.sql b/branch-1.2/ambari-server/src/main/resources/Ambari-DDL-Postgres-DROP.sql
deleted file mode 100644
index 3483494..0000000
--- a/branch-1.2/ambari-server/src/main/resources/Ambari-DDL-Postgres-DROP.sql
+++ /dev/null
@@ -1,80 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one
--- or more contributor license agreements.  See the NOTICE file
--- distributed with this work for additional information
--- regarding copyright ownership.  The ASF licenses this file
--- to you under the Apache License, Version 2.0 (the
--- "License"); you may not use this file except in compliance
--- with the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-\connect ambari
-ALTER TABLE ambari.clusterconfig DROP CONSTRAINT FK_clusterconfig_cluster_id;
-ALTER TABLE ambari.clusterservices DROP CONSTRAINT FK_clusterservices_cluster_id;
-ALTER TABLE ambari.clusterstate DROP CONSTRAINT FK_clusterstate_cluster_id;
-ALTER TABLE ambari.componentconfigmapping DROP CONSTRAINT FK_componentconfigmapping_component_name;
-ALTER TABLE ambari.hostcomponentconfigmapping DROP CONSTRAINT FK_hostcomponentconfigmapping_cluster_id;
-ALTER TABLE ambari.hostcomponentdesiredconfigmapping DROP CONSTRAINT FK_hostcomponentdesiredconfigmapping_config_tag;
-ALTER TABLE ambari.hostcomponentdesiredconfigmapping DROP CONSTRAINT FK_hostcomponentdesiredconfigmapping_cluster_id;
-ALTER TABLE ambari.hostcomponentdesiredstate DROP CONSTRAINT FK_hostcomponentdesiredstate_host_name;
-ALTER TABLE ambari.hostcomponentdesiredstate DROP CONSTRAINT FK_hostcomponentdesiredstate_component_name;
-ALTER TABLE ambari.hostcomponentstate DROP CONSTRAINT FK_hostcomponentstate_component_name;
-ALTER TABLE ambari.hostcomponentstate DROP CONSTRAINT FK_hostcomponentstate_host_name;
-ALTER TABLE ambari.hoststate DROP CONSTRAINT FK_hoststate_host_name;
-ALTER TABLE ambari.servicecomponentdesiredstate DROP CONSTRAINT FK_servicecomponentdesiredstate_service_name;
-ALTER TABLE ambari.serviceconfigmapping DROP CONSTRAINT FK_serviceconfigmapping_service_name;
-ALTER TABLE ambari.servicedesiredstate DROP CONSTRAINT FK_servicedesiredstate_service_name;
-ALTER TABLE ambari.execution_command DROP CONSTRAINT FK_execution_command_task_id;
-ALTER TABLE ambari.host_role_command DROP CONSTRAINT FK_host_role_command_stage_id;
-ALTER TABLE ambari.host_role_command DROP CONSTRAINT FK_host_role_command_host_name;
-ALTER TABLE ambari.role_success_criteria DROP CONSTRAINT FK_role_success_criteria_stage_id;
-ALTER TABLE ambari.stage DROP CONSTRAINT FK_stage_cluster_id;
-ALTER TABLE ambari.ClusterHostMapping DROP CONSTRAINT FK_ClusterHostMapping_host_name;
-ALTER TABLE ambari.ClusterHostMapping DROP CONSTRAINT FK_ClusterHostMapping_cluster_id;
-ALTER TABLE ambari.user_roles DROP CONSTRAINT FK_user_roles_ldap_user;
-ALTER TABLE ambari.user_roles DROP CONSTRAINT FK_user_roles_role_name;
-DROP TABLE ambari.clusters CASCADE;
-DROP TABLE ambari.clusterservices CASCADE;
-DROP TABLE ambari.clusterstate CASCADE;
-DROP TABLE ambari.componentconfigmapping CASCADE;
-DROP TABLE ambari.hostcomponentconfigmapping CASCADE;
-DROP TABLE ambari.hostcomponentdesiredconfigmapping CASCADE;
-DROP TABLE ambari.hostcomponentdesiredstate CASCADE;
-DROP TABLE ambari.hostcomponentstate CASCADE;
-DROP TABLE ambari.hosts CASCADE;
-DROP TABLE ambari.hoststate CASCADE;
-DROP TABLE ambari.servicecomponentdesiredstate CASCADE;
-DROP TABLE ambari.serviceconfigmapping CASCADE;
-DROP TABLE ambari.servicedesiredstate CASCADE;
-DROP TABLE ambari.roles CASCADE;
-DROP TABLE ambari.users CASCADE;
-DROP TABLE ambari.execution_command CASCADE;
-DROP TABLE ambari.host_role_command CASCADE;
-DROP TABLE ambari.role_success_criteria CASCADE;
-DROP TABLE ambari.stage CASCADE;
-DROP TABLE ambari.ClusterHostMapping CASCADE;
-DROP TABLE ambari.clusterconfig CASCADE;
-DROP TABLE ambari.user_roles CASCADE;
-DROP TABLE ambari.key_value_store CASCADE;
-DROP SEQUENCE ambari.host_role_command_task_id_seq;
-DROP SEQUENCE ambari.clusters_cluster_id_seq;
-
-\connect ambarirca
-ALTER TABLE job DROP CONSTRAINT job_workflowid_fkey;
-ALTER TABLE task DROP CONSTRAINT task_jobid_fkey;
-ALTER TABLE taskattempt DROP CONSTRAINT taskattempt_jobid_fkey;
-ALTER TABLE taskattempt DROP CONSTRAINT taskattempt_taskid_fkey;
-DROP TABLE workflow;
-DROP TABLE job;
-DROP TABLE task;
-DROP TABLE taskAttempt;
-DROP TABLE hdfsEvent;
-DROP TABLE mapreduceEvent;
-DROP TABLE clusterEvent;
diff --git a/branch-1.2/ambari-server/src/main/resources/Ambari-DDL.sql b/branch-1.2/ambari-server/src/main/resources/Ambari-DDL.sql
deleted file mode 100644
index 1041771..0000000
--- a/branch-1.2/ambari-server/src/main/resources/Ambari-DDL.sql
+++ /dev/null
@@ -1,314 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one
--- or more contributor license agreements.  See the NOTICE file
--- distributed with this work for additional information
--- regarding copyright ownership.  The ASF licenses this file
--- to you under the Apache License, Version 2.0 (the
--- "License"); you may not use this file except in compliance
--- with the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-
-DROP SCHEMA IF EXISTS ambari CASCADE;
-
-DROP ROLE IF EXISTS "ambari-server";
-
-CREATE ROLE "ambari-server" LOGIN ENCRYPTED PASSWORD 'bigdata';
-
-CREATE SCHEMA ambari
-  AUTHORIZATION "ambari-server";
-
-COMMENT ON SCHEMA ambari
-  IS 'test schema';
-
-SET search_path TO ambari;
-
-/* Table for storing user information*/
-CREATE TABLE Users
-(
-user_name VARCHAR,
-user_password VARCHAR,
-ldap_user boolean DEFAULT FALSE NOT NULL,
-create_time TIMESTAMP DEFAULT now() NOT NULL,
-PRIMARY KEY(user_name, ldap_user)
-);
-
-/*Table for storing roles list - can be dropped out if list of roles is predefined and limited on upper layer*/
-CREATE TABLE Roles
-(
-role_name VARCHAR PRIMARY KEY
-);
-
-/*Users - Roles mapping table*/
-CREATE TABLE user_roles
-(
-user_name VARCHAR,
-ldap_user boolean default false,
-role_name VARCHAR references Roles(role_name),
-PRIMARY KEY(user_name, ldap_user, role_name),
-FOREIGN KEY(user_name, ldap_user) REFERENCES Users(user_name, ldap_user)
-);
-
-/* Overall clusters table - all created/managed clusters */
-CREATE TABLE Clusters
-(
-cluster_id BIGSERIAL,
-cluster_name VARCHAR(100) UNIQUE NOT NULL,
-desired_cluster_state VARCHAR DEFAULT '' NOT NULL,
-cluster_info VARCHAR DEFAULT '' NOT NULL,
-PRIMARY KEY (cluster_id)
-);
-
-/* All hosts for all clusters */
-CREATE TABLE Hosts
-(
-host_name VARCHAR NOT NULL,
-ipv4 VARCHAR UNIQUE,
-ipv6 VARCHAR UNIQUE,
-total_mem BIGINT DEFAULT '0' NOT NULL,
-cpu_count INTEGER DEFAULT '0' NOT NULL,
-cpu_info VARCHAR DEFAULT '' NOT NULL,
-os_arch VARCHAR DEFAULT '' NOT NULL,
-disks_info VARCHAR DEFAULT '' NOT NULL,
-os_info VARCHAR DEFAULT '' NOT NULL,
-os_type VARCHAR DEFAULT '' NOT NULL,
-discovery_status VARCHAR DEFAULT '' NOT NULL,
-last_registration_time BIGINT DEFAULT '0' NOT NULL,
-rack_info VARCHAR DEFAULT '/default-rack' NOT NULL,
-host_attributes VARCHAR DEFAULT '' NOT NULL,
-PRIMARY KEY (host_name)
-);
-
-/* Cluster Hosts mapping table */
-CREATE TABLE ClusterHostMapping
-(
-  cluster_id BIGINT references Clusters(cluster_id),
-  host_name VARCHAR references Hosts(host_name),
-  PRIMARY KEY(cluster_id, host_name)
-);
-
-CREATE TABLE ClusterServices
-(
-cluster_id BIGINT NOT NULL references Clusters(cluster_id),
-service_name VARCHAR,
-service_enabled INTEGER DEFAULT '0' NOT NULL,
-PRIMARY KEY (cluster_id,service_name)
-);
-
-/* Configs at a service level */
-/* This will be used in most scenarios for homogenous clusters */
-/* Snapshot is a blob for all properties and their values. There is no separate row for each property */
-/* A special service called AMBARI or GLOBAL can be leveraged for global level configs */
-CREATE TABLE ServiceConfig
-(
-config_version SERIAL /*INTEGER NOT NULL AUTO_INCREMENT*/,
-cluster_id BIGINT NOT NULL,
-service_name VARCHAR NOT NULL,
-config_snapshot VARCHAR DEFAULT '' NOT NULL,
-config_snapshot_time timestamp NOT NULL,
-PRIMARY KEY (config_version),
-FOREIGN KEY (cluster_id, service_name) REFERENCES ClusterServices(cluster_id, service_name)
-);
-
-/* Configs that are overridden at the component level */
-/* Combination of serviceconfig and servicecomponentconfig table 
-    defines the config for a given component. 
-    Absence of an entry implies the component’s configs are same as that of the overall service config */
-CREATE TABLE ServiceComponentConfig
-(
-config_version SERIAL /*INTEGER NOT NULL AUTO_INCREMENT*/,
-cluster_id BIGINT NOT NULL,
-service_name VARCHAR NOT NULL,
-component_name VARCHAR NOT NULL,
-config_snapshot VARCHAR DEFAULT '' NOT NULL,
-config_snapshot_time timestamp NOT NULL,
-PRIMARY KEY (config_version),
-FOREIGN KEY (cluster_id, service_name) REFERENCES ClusterServices(cluster_id, service_name)
-);
-
-/* For overridding configs on a per host level for heterogenous clusters */
-CREATE TABLE ServiceComponentHostConfig
-(
-config_version SERIAL /*INTEGER NOT NULL AUTO_INCREMENT*/,
-cluster_id BIGINT NOT NULL,
-service_name VARCHAR NOT NULL,
-component_name VARCHAR NOT NULL,
-host_name VARCHAR NOT NULL references Hosts(host_name),
-config_snapshot VARCHAR DEFAULT '' NOT NULL,
-config_snapshot_time timestamp NOT NULL,
-PRIMARY KEY (config_version),
-FOREIGN KEY (cluster_id, service_name) REFERENCES ClusterServices(cluster_id, service_name)
-);
-
-CREATE TABLE ServiceDesiredState
-(
-cluster_id BIGINT,
-service_name VARCHAR DEFAULT '' NOT NULL,
-desired_state VARCHAR DEFAULT '' NOT NULL,
-desired_host_role_mapping INTEGER DEFAULT '0' NOT NULL,
-desired_stack_version VARCHAR DEFAULT '' NOT NULL,
-PRIMARY KEY (cluster_id, service_name),
-FOREIGN KEY (cluster_id, service_name) REFERENCES ClusterServices(cluster_id, service_name)
-);
-
-CREATE TABLE HostComponentMapping /*HostRoleMapping*/
-(
-cluster_id BIGINT,
-service_name VARCHAR DEFAULT '' NOT NULL,
-host_component_mapping_id SERIAL /*INTEGER NOT NULL AUTO_INCREMENT*/,
-host_component_mapping_snapshot VARCHAR DEFAULT '' NOT NULL,
-PRIMARY KEY (cluster_id, service_name, host_component_mapping_id),
-FOREIGN KEY (cluster_id, service_name) REFERENCES ClusterServices(cluster_id, service_name)
-);
-
-
-CREATE TABLE ClusterState
-(
-cluster_id BIGINT NOT NULL references Clusters(cluster_id),
-current_cluster_state VARCHAR DEFAULT '' NOT NULL,
-PRIMARY KEY (cluster_id)
-);
-
-CREATE TABLE HostState
-(
-/*cluster_id INTEGER references Clusters(cluster_id),*/
-host_name VARCHAR NOT NULL references Hosts(host_name),
-available_mem INTEGER DEFAULT '0' NOT NULL,
-last_heartbeat_time INTEGER DEFAULT '0' NOT NULL,
-time_in_state INTEGER DEFAULT '0' NOT NULL,
-agent_version VARCHAR DEFAULT '' NOT NULL,
-health_status VARCHAR,
-current_state VARCHAR DEFAULT '' NOT NULL,
-PRIMARY KEY (host_name)
-);
-
-
-CREATE TABLE ServiceComponentDesiredState
-(
-cluster_id BIGINT references Clusters(cluster_id),
-service_name VARCHAR DEFAULT '' NOT NULL,
-component_name VARCHAR DEFAULT '' NOT NULL,
-desired_state VARCHAR DEFAULT '' NOT NULL,
-desired_stack_version VARCHAR DEFAULT '' NOT NULL,
-PRIMARY KEY (cluster_id,service_name,component_name),
-FOREIGN KEY (cluster_id, service_name) REFERENCES ClusterServices(cluster_id, service_name)
-);
-
-
-CREATE TABLE HostComponentState
-(
-cluster_id BIGINT,
-service_name VARCHAR DEFAULT '' NOT NULL,
-host_name VARCHAR DEFAULT '' NOT NULL references Hosts(host_name),
-component_name VARCHAR DEFAULT '' NOT NULL,
-current_state VARCHAR DEFAULT '' NOT NULL,
-current_config_version VARCHAR DEFAULT '' NOT NULL,
-current_stack_version VARCHAR DEFAULT '' NOT NULL,
-PRIMARY KEY (cluster_id, service_name, host_name, component_name),
-FOREIGN KEY (cluster_id, service_name, component_name) REFERENCES ServiceComponentDesiredState(cluster_id, service_name, component_name)
-);
-
-CREATE TABLE HostComponentDesiredState
-(
-cluster_id BIGINT,
-service_name VARCHAR DEFAULT '' NOT NULL,
-host_name VARCHAR NOT NULL references Hosts(host_name),
-component_name VARCHAR DEFAULT '' NOT NULL,
-desired_state VARCHAR DEFAULT '' NOT NULL,
-desired_config_version VARCHAR DEFAULT '' NOT NULL, /* desired config version defines a combined version of service/component/node-component config versions */
-desired_stack_version VARCHAR DEFAULT '' NOT NULL,
-PRIMARY KEY (cluster_id,host_name,component_name),
-FOREIGN KEY (cluster_id, service_name, component_name) REFERENCES ServiceComponentDesiredState(cluster_id, service_name, component_name)
-);
-
-CREATE TABLE STAGE
-(
-   cluster_id BIGINT references Clusters(cluster_id),
-   request_id BIGINT DEFAULT '0',
-   stage_id BIGINT DEFAULT '0' NOT NULL,
-   log_info VARCHAR DEFAULT '' NOT NULL,
-   PRIMARY KEY (request_id, stage_id)
-);
-
-CREATE TABLE HOST_ROLE_COMMAND
-(
-   task_id BIGSERIAL NOT NULL,
-   request_id BIGINT NOT NULL,
-   stage_id BIGINT NOT NULL,
-   host_name VARCHAR DEFAULT '' NOT NULL references Hosts(host_name),
-   role VARCHAR DEFAULT '' NOT NULL,
-   command VARCHAR DEFAULT '' NOT NULL,
-   event VARCHAR DEFAULT '' NOT NULL, /** Refer to ServiceComponentHostEventType.java */
-   exitCode INTEGER DEFAULT '0' NOT NULL,
-   status VARCHAR DEFAULT '' NOT NULL, /** PENDING, QUEUED, IN_PROGRESS, COMPLETED, FAILED, TIMEDOUT, ABORTED **/
-   std_error VARCHAR DEFAULT '' NOT NULL,
-   std_out VARCHAR DEFAULT '' NOT NULL,
-   start_time BIGINT DEFAULT -1 NOT NULL,
-   last_attempt_time BIGINT DEFAULT -1 NOT NULL,
-   attempt_count SMALLINT DEFAULT 0 NOT NULL,
-   PRIMARY KEY (task_id),
-   FOREIGN KEY (request_id, stage_id) REFERENCES STAGE(request_id, stage_id)
-);
-
-CREATE TABLE EXECUTION_COMMAND
-(
-   task_id BIGINT DEFAULT '0' NOT NULL references HOST_ROLE_COMMAND(task_id),
-   command VARCHAR NOT NULL, /** Serialized ExecutionCommand **/
-   PRIMARY KEY(task_id)
-);
-
-
-CREATE TABLE ROLE_SUCCESS_CRITERIA
-(
-   request_id BIGINT NOT NULL,
-   stage_id BIGINT NOT NULL,
-   role VARCHAR DEFAULT '' NOT NULL,
-   success_factor FLOAT DEFAULT 1,
-   PRIMARY KEY(role, request_id, stage_id),
-   FOREIGN KEY (request_id, stage_id) REFERENCES STAGE(request_id, stage_id)
-);
-
---CREATE TABLE ActionStatus 
---(
---cluster_id INTEGER references Clusters(cluster_id),
---host_name VARCHAR DEFAULT '' NOT NULL references Hosts(host_name),
---role VARCHAR DEFAULT '' NOT NULL,
---request_id INTEGER DEFAULT '0' NOT NULL,
---stage_id INTEGER DEFAULT '0' NOT NULL,
---event VARCHAR DEFAULT '' NOT NULL,
---task_id INTEGER DEFAULT '0' NOT NULL,
---status VARCHAR DEFAULT '' NOT NULL, /* PENDING, QUEUED, COMPLETED, FAILED,, ABORTED */ 
---log_info VARCHAR DEFAULT '' NOT NULL,
---continue_criteria bytea /*BLOB*/ DEFAULT '' NOT NULL, /* Define continuation criteria for moving to next stage */
---PRIMARY KEY (cluster_id, host_name, role, request_id, stage_id)
---);
-
-
-GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA ambari TO "ambari-server";
-GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA ambari TO "ambari-server";
-
-BEGIN;
-
-insert into Roles(role_name) 
-select 'admin'
-union all
-select 'user';
-
-insert into Users(user_name, user_password)
-select 'administrator','538916f8943ec225d97a9a86a2c6ec0818c1cd400e09e03b660fdaaec4af29ddbb6f2b1033b81b00'
-union all
-select 'test','d2f5da28bf8353e836fbae0a7f586b9cbda03f590910998957383371fbacba7e4088394991305ef8';
-
-insert into user_roles(user_name,role_name)
-select 'test','user'
-union all
-select 'administrator','admin';
-
-COMMIT;
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/main/resources/META-INF/persistence.xml b/branch-1.2/ambari-server/src/main/resources/META-INF/persistence.xml
deleted file mode 100644
index 9367e88..0000000
--- a/branch-1.2/ambari-server/src/main/resources/META-INF/persistence.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor 
-  license agreements. See the NOTICE file distributed with this work for additional 
-  information regarding copyright ownership. The ASF licenses this file to 
-  You under the Apache License, Version 2.0 (the "License"); you may not use 
-  this file except in compliance with the License. You may obtain a copy of 
-  the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required 
-  by applicable law or agreed to in writing, software distributed under the 
-  License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS 
-  OF ANY KIND, either express or implied. See the License for the specific 
-  language governing permissions and limitations under the License. -->
-<persistence xmlns="http://java.sun.com/xml/ns/persistence"
-  version="2.0">
-
-  <persistence-unit name="ambari-postgres" transaction-type="RESOURCE_LOCAL">
-    <provider>org.eclipse.persistence.jpa.PersistenceProvider</provider>
-    <class>org.apache.ambari.server.orm.entities.ClusterEntity</class>
-    <class>org.apache.ambari.server.orm.entities.ClusterConfigEntity</class>
-    <class>org.apache.ambari.server.orm.entities.ClusterServiceEntity</class>
-    <class>org.apache.ambari.server.orm.entities.ClusterStateEntity</class>
-    <class>org.apache.ambari.server.orm.entities.ComponentConfigMappingEntity</class>
-    <class>org.apache.ambari.server.orm.entities.HostComponentConfigMappingEntity</class>
-    <class>org.apache.ambari.server.orm.entities.HostComponentDesiredConfigMappingEntity</class>
-    <class>org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity</class>
-    <class>org.apache.ambari.server.orm.entities.HostComponentStateEntity</class>
-    <class>org.apache.ambari.server.orm.entities.HostEntity</class>
-    <class>org.apache.ambari.server.orm.entities.HostStateEntity</class>
-    <class>org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity</class>
-    <class>org.apache.ambari.server.orm.entities.ServiceConfigMappingEntity</class>
-    <class>org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity</class>
-    <class>org.apache.ambari.server.orm.entities.RoleEntity</class>
-    <class>org.apache.ambari.server.orm.entities.UserEntity</class>
-    <class>org.apache.ambari.server.orm.entities.ExecutionCommandEntity</class>
-    <class>org.apache.ambari.server.orm.entities.HostRoleCommandEntity</class>
-    <class>org.apache.ambari.server.orm.entities.RoleSuccessCriteriaEntity</class>
-    <class>org.apache.ambari.server.orm.entities.StageEntity</class>
-    <class>org.apache.ambari.server.orm.entities.KeyValueEntity</class>
-
-    <properties>
-      <property name="javax.persistence.jdbc.url" value="jdbc:postgresql://localhost/ambari" />
-      <property name="javax.persistence.jdbc.driver" value="org.postgresql.Driver" />
-      <property name="eclipselink.cache.size.default" value="3000" />
-    </properties>
-  </persistence-unit>
-
-  <persistence-unit name="ambari-javadb" transaction-type="RESOURCE_LOCAL">
-    <provider>org.eclipse.persistence.jpa.PersistenceProvider</provider>
-    <class>org.apache.ambari.server.orm.entities.ClusterEntity</class>
-    <class>org.apache.ambari.server.orm.entities.ClusterConfigEntity</class>
-    <class>org.apache.ambari.server.orm.entities.ClusterServiceEntity</class>
-    <class>org.apache.ambari.server.orm.entities.ClusterStateEntity</class>
-    <class>org.apache.ambari.server.orm.entities.ComponentConfigMappingEntity</class>
-    <class>org.apache.ambari.server.orm.entities.HostComponentConfigMappingEntity</class>
-    <class>org.apache.ambari.server.orm.entities.HostComponentDesiredConfigMappingEntity</class>
-    <class>org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity</class>
-    <class>org.apache.ambari.server.orm.entities.HostComponentStateEntity</class>
-    <class>org.apache.ambari.server.orm.entities.HostEntity</class>
-    <class>org.apache.ambari.server.orm.entities.HostStateEntity</class>
-    <class>org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity</class>
-    <class>org.apache.ambari.server.orm.entities.ServiceConfigMappingEntity</class>
-    <class>org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity</class>
-    <class>org.apache.ambari.server.orm.entities.RoleEntity</class>
-    <class>org.apache.ambari.server.orm.entities.UserEntity</class>
-    <class>org.apache.ambari.server.orm.entities.ExecutionCommandEntity</class>
-    <class>org.apache.ambari.server.orm.entities.HostRoleCommandEntity</class>
-    <class>org.apache.ambari.server.orm.entities.RoleSuccessCriteriaEntity</class>
-    <class>org.apache.ambari.server.orm.entities.StageEntity</class>
-    <class>org.apache.ambari.server.orm.entities.KeyValueEntity</class>
-
-
-    <properties>
-      <property name="javax.persistence.jdbc.url" value="jdbc:derby:memory:myDB;create=true" />
-      <property name="javax.persistence.jdbc.driver" value="org.apache.derby.jdbc.EmbeddedDriver" />
-      <property name="eclipselink.ddl-generation" value="drop-and-create-tables" />
-      <property name="eclipselink.orm.throw.exceptions" value="true" />
-      <property name="eclipselink.cache.size.default" value="3000" />
-      <!--<property name="eclipselink.logging.level" value="FINEST"/>-->
-    </properties>
-  </persistence-unit>
-</persistence>
diff --git a/branch-1.2/ambari-server/src/main/resources/ca.config b/branch-1.2/ambari-server/src/main/resources/ca.config
deleted file mode 100644
index 7324275..0000000
--- a/branch-1.2/ambari-server/src/main/resources/ca.config
+++ /dev/null
@@ -1,24 +0,0 @@
-[ ca ]
-default_ca             = CA_CLIENT
-[ CA_CLIENT ]
-dir		                 = keystore/db
-certs                  = $dir/certs
-new_certs_dir          = $dir/newcerts
-
-database               = $dir/index.txt
-serial                 = $dir/serial
-default_days           = 365    
-
-default_crl_days       = 7  
-default_md             = md5 
-
-policy                 = policy_anything 
-
-[ policy_anything ]
-countryName            = optional
-stateOrProvinceName    = optional 
-localityName           = optional
-organizationName       = optional
-organizationalUnitName = supplied 
-commonName             = optional   
-emailAddress           = optional       
diff --git a/branch-1.2/ambari-server/src/main/resources/db/index.txt b/branch-1.2/ambari-server/src/main/resources/db/index.txt
deleted file mode 100644
index e69de29..0000000
--- a/branch-1.2/ambari-server/src/main/resources/db/index.txt
+++ /dev/null
diff --git a/branch-1.2/ambari-server/src/main/resources/db/newcerts/.gitignore b/branch-1.2/ambari-server/src/main/resources/db/newcerts/.gitignore
deleted file mode 100644
index 5e7d273..0000000
--- a/branch-1.2/ambari-server/src/main/resources/db/newcerts/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-# Ignore everything in this directory
-*
-# Except this file
-!.gitignore
diff --git a/branch-1.2/ambari-server/src/main/resources/db/serial b/branch-1.2/ambari-server/src/main/resources/db/serial
deleted file mode 100644
index 8a0f05e..0000000
--- a/branch-1.2/ambari-server/src/main/resources/db/serial
+++ /dev/null
@@ -1 +0,0 @@
-01
diff --git a/branch-1.2/ambari-server/src/main/resources/ganglia_properties.json b/branch-1.2/ambari-server/src/main/resources/ganglia_properties.json
deleted file mode 100644
index d0781b5..0000000
--- a/branch-1.2/ambari-server/src/main/resources/ganglia_properties.json
+++ /dev/null
@@ -1,19565 +0,0 @@
-{
-  "Cluster":{
-    "*":{
-      "metrics/cpu/Idle":{
-        "metric":"cpu_report.Idle\\g",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/cpu/Nice":{
-        "metric":"cpu_report.Nice\\g",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/cpu/System":{
-        "metric":"cpu_report.System\\g",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/cpu/User":{
-        "metric":"cpu_report.User\\g",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/cpu/Wait":{
-        "metric":"cpu_report.Wait\\g",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/load/1-min":{
-        "metric":"load_report.1-min",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/load/CPUs":{
-        "metric":"load_report.CPUs ",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/load/Nodes":{
-        "metric":"load_report.Nodes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/load/Procs":{
-        "metric":"load_report.Procs",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/memory/Buffer":{
-        "metric":"mem_report.Buffer\\g",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/memory/Cache":{
-        "metric":"mem_report.Cache\\g",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/memory/Share":{
-        "metric":"mem_report.Share\\g",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/memory/Swap":{
-        "metric":"mem_report.Swap\\g",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/memory/Total":{
-        "metric":"mem_report.Total\\g",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/memory/Use":{
-        "metric":"mem_report.Use\\g",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/network/In":{
-        "metric":"network_report.In ",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/network/Out":{
-        "metric":"network_report.Out",
-        "pointInTime":false,
-        "temporal":true
-      }
-    }
-  },
-  "Host":{
-    "*":{
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcCount":{
-        "metric":"jvm.metrics.gcCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric":"jvm.metrics.gcTimeMillis",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logError":{
-        "metric":"jvm.metrics.logError",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logFatal":{
-        "metric":"jvm.metrics.logFatal",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logInfo":{
-        "metric":"jvm.metrics.logInfo",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logWarn":{
-        "metric":"jvm.metrics.logWarn",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/maxMemoryM":{
-        "metric":"jvm.metrics.maxMemoryM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric":"jvm.metrics.memHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric":"jvm.metrics.memHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric":"jvm.metrics.memNonHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric":"jvm.metrics.memNonHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric":"jvm.metrics.threadsBlocked",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsNew":{
-        "metric":"jvm.metrics.threadsNew",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric":"jvm.metrics.threadsRunnable",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric":"jvm.metrics.threadsTerminated",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric":"jvm.metrics.threadsTimedWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric":"jvm.metrics.threadsWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_fifteen":{
-        "metric":"load_fifteen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_five":{
-        "metric":"load_five",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_one":{
-        "metric":"load_one",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.metrics.NumOpenConnections",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.rpc.ReceivedBytes",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric":"rpc.rpc.RpcProcessingTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric":"rpc.rpc.RpcProcessingTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric":"rpc.metrics.RpcQueueTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric":"rpc.rpc.RpcQueueTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_avg_time":{
-        "metric":"rpc.metrics.RpcSlowResponse_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_num_ops":{
-        "metric":"rpc.metrics.RpcSlowResponse_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.rpc.SentBytes",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.abort.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.abort.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_avg_time":{
-        "metric":"rpc.metrics.abort_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_num_ops":{
-        "metric":"rpc.metrics.abort_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_avg_time":{
-        "metric":"rpc.metrics.addColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_num_ops":{
-        "metric":"rpc.metrics.addColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.addToOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.addToOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_avg_time":{
-        "metric":"rpc.metrics.addToOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_num_ops":{
-        "metric":"rpc.metrics.addToOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_avg_time":{
-        "metric":"rpc.metrics.assign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_num_ops":{
-        "metric":"rpc.metrics.assign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_avg_time":{
-        "metric":"rpc.metrics.balanceSwitch_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_num_ops":{
-        "metric":"rpc.metrics.balanceSwitch_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_avg_time":{
-        "metric":"rpc.metrics.balance_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_num_ops":{
-        "metric":"rpc.metrics.balance_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.bulkLoadHFiles.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.bulkLoadHFiles.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_avg_time":{
-        "metric":"rpc.metrics.bulkLoadHFiles_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_num_ops":{
-        "metric":"rpc.metrics.bulkLoadHFiles_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"rpc.metrics.callQueueLen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.checkAndDelete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.checkAndDelete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_avg_time":{
-        "metric":"rpc.metrics.checkAndDelete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_num_ops":{
-        "metric":"rpc.metrics.checkAndDelete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.checkAndPut.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.checkAndPut.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_avg_time":{
-        "metric":"rpc.metrics.checkAndPut_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_num_ops":{
-        "metric":"rpc.metrics.checkAndPut_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.checkOOME.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.checkOOME.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_avg_time":{
-        "metric":"rpc.metrics.checkOOME_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_num_ops":{
-        "metric":"rpc.metrics.checkOOME_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.close.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.close.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.closeRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.closeRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_avg_time":{
-        "metric":"rpc.metrics.closeRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_num_ops":{
-        "metric":"rpc.metrics.closeRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_avg_time":{
-        "metric":"rpc.metrics.close_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_num_ops":{
-        "metric":"rpc.metrics.close_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.compactRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.compactRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_avg_time":{
-        "metric":"rpc.metrics.compactRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_num_ops":{
-        "metric":"rpc.metrics.compactRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_avg_time":{
-        "metric":"rpc.metrics.createTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_num_ops":{
-        "metric":"rpc.metrics.createTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.delete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.delete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_avg_time":{
-        "metric":"rpc.metrics.deleteColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_num_ops":{
-        "metric":"rpc.metrics.deleteColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_avg_time":{
-        "metric":"rpc.metrics.deleteTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_num_ops":{
-        "metric":"rpc.metrics.deleteTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_avg_time":{
-        "metric":"rpc.metrics.delete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_num_ops":{
-        "metric":"rpc.metrics.delete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_avg_time":{
-        "metric":"rpc.metrics.disableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_num_ops":{
-        "metric":"rpc.metrics.disableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_avg_time":{
-        "metric":"rpc.metrics.enableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_num_ops":{
-        "metric":"rpc.metrics.enableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.execCoprocessor.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.execCoprocessor.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_avg_time":{
-        "metric":"rpc.metrics.execCoprocessor_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_num_ops":{
-        "metric":"rpc.metrics.execCoprocessor_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.exists.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.exists.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_avg_time":{
-        "metric":"rpc.metrics.exists_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_num_ops":{
-        "metric":"rpc.metrics.exists_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.flushRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.flushRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_avg_time":{
-        "metric":"rpc.metrics.flushRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_num_ops":{
-        "metric":"rpc.metrics.flushRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.get.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.get.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_avg_time":{
-        "metric":"rpc.metrics.getAlterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_num_ops":{
-        "metric":"rpc.metrics.getAlterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
-        "metric":"rpc.metrics.getBlockCacheColumnFamilySummaries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
-        "metric":"rpc.metrics.getBlockCacheColumnFamilySummaries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.getCatalogTracker.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.getCatalogTracker.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_avg_time":{
-        "metric":"rpc.metrics.getCatalogTracker_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_num_ops":{
-        "metric":"rpc.metrics.getCatalogTracker_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.getClosestRowBefore.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.getClosestRowBefore.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_avg_time":{
-        "metric":"rpc.metrics.getClosestRowBefore_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_num_ops":{
-        "metric":"rpc.metrics.getClosestRowBefore_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_avg_time":{
-        "metric":"rpc.metrics.getClusterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_num_ops":{
-        "metric":"rpc.metrics.getClusterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.getConfiguration.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.getConfiguration.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_avg_time":{
-        "metric":"rpc.metrics.getConfiguration_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_num_ops":{
-        "metric":"rpc.metrics.getConfiguration_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.getFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.getFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_avg_time":{
-        "metric":"rpc.metrics.getFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_num_ops":{
-        "metric":"rpc.metrics.getFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.getHServerInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.getHServerInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_avg_time":{
-        "metric":"rpc.metrics.getHServerInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_num_ops":{
-        "metric":"rpc.metrics.getHServerInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_avg_time":{
-        "metric":"rpc.metrics.getHTableDescriptors_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_num_ops":{
-        "metric":"rpc.metrics.getHTableDescriptors_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.getOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.getOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_avg_time":{
-        "metric":"rpc.metrics.getOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_num_ops":{
-        "metric":"rpc.metrics.getOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.getProtocolSignature.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.getProtocolSignature.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_avg_time":{
-        "metric":"rpc.metrics.getProtocolSignature_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_num_ops":{
-        "metric":"rpc.metrics.getProtocolSignature_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.getProtocolVersion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.getProtocolVersion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_avg_time":{
-        "metric":"rpc.metrics.getProtocolVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_num_ops":{
-        "metric":"rpc.metrics.getProtocolVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.getRegionInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.getRegionInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_avg_time":{
-        "metric":"rpc.metrics.getRegionInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_num_ops":{
-        "metric":"rpc.metrics.getRegionInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.getServerName.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.getServerName.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_avg_time":{
-        "metric":"rpc.metrics.getServerName_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_num_ops":{
-        "metric":"rpc.metrics.getServerName_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.getZooKeeper.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.getZooKeeper.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_avg_time":{
-        "metric":"rpc.metrics.getZooKeeper_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_num_ops":{
-        "metric":"rpc.metrics.getZooKeeper_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_avg_time":{
-        "metric":"rpc.metrics.get_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_num_ops":{
-        "metric":"rpc.metrics.get_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.increment.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.increment.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.incrementColumnValue.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.incrementColumnValue.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_avg_time":{
-        "metric":"rpc.metrics.incrementColumnValue_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_num_ops":{
-        "metric":"rpc.metrics.incrementColumnValue_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_avg_time":{
-        "metric":"rpc.metrics.increment_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_num_ops":{
-        "metric":"rpc.metrics.increment_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.isAborted.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.isAborted.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_avg_time":{
-        "metric":"rpc.metrics.isAborted_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_num_ops":{
-        "metric":"rpc.metrics.isAborted_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_avg_time":{
-        "metric":"rpc.metrics.isMasterRunning_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_num_ops":{
-        "metric":"rpc.metrics.isMasterRunning_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.isStopped.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.isStopped.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_avg_time":{
-        "metric":"rpc.metrics.isStopped_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_num_ops":{
-        "metric":"rpc.metrics.isStopped_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.lockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.lockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_avg_time":{
-        "metric":"rpc.metrics.lockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_num_ops":{
-        "metric":"rpc.metrics.lockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_avg_time":{
-        "metric":"rpc.metrics.modifyColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_num_ops":{
-        "metric":"rpc.metrics.modifyColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_avg_time":{
-        "metric":"rpc.metrics.modifyTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_num_ops":{
-        "metric":"rpc.metrics.modifyTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_avg_time":{
-        "metric":"rpc.metrics.move_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_num_ops":{
-        "metric":"rpc.metrics.move_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.multi.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.multi.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_avg_time":{
-        "metric":"rpc.metrics.multi_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_num_ops":{
-        "metric":"rpc.metrics.multi_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.next.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.next.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_avg_time":{
-        "metric":"rpc.metrics.next_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_num_ops":{
-        "metric":"rpc.metrics.next_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_avg_time":{
-        "metric":"rpc.metrics.offline_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_num_ops":{
-        "metric":"rpc.metrics.offline_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.openRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.openRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_avg_time":{
-        "metric":"rpc.metrics.openRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_num_ops":{
-        "metric":"rpc.metrics.openRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.openRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.openRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_avg_time":{
-        "metric":"rpc.metrics.openRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_num_ops":{
-        "metric":"rpc.metrics.openRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.openScanner.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.openScanner.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_avg_time":{
-        "metric":"rpc.metrics.openScanner_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_num_ops":{
-        "metric":"rpc.metrics.openScanner_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.put.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.put.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_avg_time":{
-        "metric":"rpc.metrics.put_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_num_ops":{
-        "metric":"rpc.metrics.put_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_avg_time":{
-        "metric":"rpc.metrics.regionServerReport_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_num_ops":{
-        "metric":"rpc.metrics.regionServerReport_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_avg_time":{
-        "metric":"rpc.metrics.regionServerStartup_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_num_ops":{
-        "metric":"rpc.metrics.regionServerStartup_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.removeFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.removeFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_avg_time":{
-        "metric":"rpc.metrics.removeFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_num_ops":{
-        "metric":"rpc.metrics.removeFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.replicateLogEntries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.replicateLogEntries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_avg_time":{
-        "metric":"rpc.metrics.replicateLogEntries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_num_ops":{
-        "metric":"rpc.metrics.replicateLogEntries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_avg_time":{
-        "metric":"rpc.metrics.reportRSFatalError_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_num_ops":{
-        "metric":"rpc.metrics.reportRSFatalError_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.rollHLogWriter.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.rollHLogWriter.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_avg_time":{
-        "metric":"rpc.metrics.rollHLogWriter_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_num_ops":{
-        "metric":"rpc.metrics.rollHLogWriter_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"rpc.metrics.rpcAuthenticationFailures",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"rpc.metrics.rpcAuthenticationSuccesses",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"rpc.rpc.rpcAuthorizationFailures",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"rpc.metrics.rpcAuthorizationSuccesses",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_avg_time":{
-        "metric":"rpc.metrics.shutdown_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_num_ops":{
-        "metric":"rpc.metrics.shutdown_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.splitRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.splitRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_avg_time":{
-        "metric":"rpc.metrics.splitRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_num_ops":{
-        "metric":"rpc.metrics.splitRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.stop.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.stop.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_avg_time":{
-        "metric":"rpc.metrics.stopMaster_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_num_ops":{
-        "metric":"rpc.metrics.stopMaster_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_avg_time":{
-        "metric":"rpc.metrics.stop_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_num_ops":{
-        "metric":"rpc.metrics.stop_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_avg_time":{
-        "metric":"rpc.metrics.unassign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_num_ops":{
-        "metric":"rpc.metrics.unassign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.metrics.unlockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.metrics.unlockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_avg_time":{
-        "metric":"rpc.metrics.unlockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_num_ops":{
-        "metric":"rpc.metrics.unlockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/canCommit_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.canCommit_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/canCommit_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.canCommit_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/commitPending_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.commitPending_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/commitPending_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.commitPending_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/done_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.done_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/done_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.done_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getTask_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getTask_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getTask_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getTask_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/ping_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.ping_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/ping_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.ping_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/statusUpdate_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.statusUpdate_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/statusUpdate_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.statusUpdate_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"ugi.ugi.loginFailure_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"ugi.ugi.loginFailure_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"ugi.ugi.loginSuccess_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"ugi.ugi.loginSuccess_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      }
-    }
-  },
-  "Component":{
-    "NAMENODE":{
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/BlockCapacity":{
-        "metric":"dfs.FSNamesystem.BlockCapacity",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/BlocksTotal":{
-        "metric":"dfs.FSNamesystem.BlocksTotal",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/CapacityRemainingGB":{
-        "metric":"dfs.FSNamesystem.CapacityRemainingGB",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/CapacityTotalGB":{
-        "metric":"dfs.FSNamesystem.CapacityTotalGB",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/CapacityUsedGB":{
-        "metric":"dfs.FSNamesystem.CapacityUsedGB",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/CorruptBlocks":{
-        "metric":"dfs.FSNamesystem.CorruptBlocks",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/ExcessBlocks":{
-        "metric":"dfs.FSNamesystem.ExcessBlocks",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/FilesTotal":{
-        "metric":"dfs.FSNamesystem.FilesTotal",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/MissingBlocks":{
-        "metric":"dfs.FSNamesystem.MissingBlocks",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/PendingDeletionBlocks":{
-        "metric":"dfs.FSNamesystem.PendingDeletionBlocks",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/PendingReplicationBlocks":{
-        "metric":"dfs.FSNamesystem.PendingReplicationBlocks",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks":{
-        "metric":"dfs.FSNamesystem.ScheduledReplicationBlocks",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/TotalLoad":{
-        "metric":"dfs.FSNamesystem.TotalLoad",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/UnderReplicatedBlocks":{
-        "metric":"dfs.FSNamesystem.UnderReplicatedBlocks",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/AddBlockOps":{
-        "metric":"dfs.namenode.AddBlockOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/CreateFileOps":{
-        "metric":"dfs.namenode.CreateFileOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/DeleteFileOps":{
-        "metric":"dfs.namenode.DeleteFileOps",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/FileInfoOps":{
-        "metric":"dfs.namenode.FileInfoOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/FilesAppended":{
-        "metric":"dfs.namenode.FilesAppended",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/FilesCreated":{
-        "metric":"dfs.namenode.FilesCreated",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/FilesDeleted":{
-        "metric":"dfs.namenode.FilesDeleted",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/FilesInGetListingOps":{
-        "metric":"dfs.namenode.FilesInGetListingOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/FilesRenamed":{
-        "metric":"dfs.namenode.FilesRenamed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/GetBlockLocations":{
-        "metric":"dfs.namenode.GetBlockLocations",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/GetListingOps":{
-        "metric":"dfs.namenode.GetListingOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/JournalTransactionsBatchedInSync":{
-        "metric":"dfs.namenode.JournalTransactionsBatchedInSync",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/SafemodeTime":{
-        "metric":"dfs.namenode.SafemodeTime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/Syncs_avg_time":{
-        "metric":"dfs.namenode.Syncs_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/Syncs_num_ops":{
-        "metric":"dfs.namenode.Syncs_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/Transactions_avg_time":{
-        "metric":"dfs.namenode.Transactions_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/Transactions_num_ops":{
-        "metric":"dfs.namenode.Transactions_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/blockReport_avg_time":{
-        "metric":"dfs.namenode.blockReport_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/blockReport_num_ops":{
-        "metric":"dfs.namenode.blockReport_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/fsImageLoadTime":{
-        "metric":"dfs.namenode.fsImageLoadTime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcCount":{
-        "metric":"jvm.metrics.gcCount",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric":"jvm.metrics.gcTimeMillis",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logError":{
-        "metric":"jvm.metrics.logError",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logFatal":{
-        "metric":"jvm.metrics.logFatal",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logInfo":{
-        "metric":"jvm.metrics.logInfo",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logWarn":{
-        "metric":"jvm.metrics.logWarn",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric":"jvm.metrics.memHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric":"jvm.metrics.memHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric":"jvm.metrics.memNonHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric":"jvm.metrics.memNonHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric":"jvm.metrics.threadsBlocked",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsNew":{
-        "metric":"jvm.metrics.threadsNew",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric":"jvm.metrics.threadsRunnable",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric":"jvm.metrics.threadsTerminated",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric":"jvm.metrics.threadsTimedWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric":"jvm.metrics.threadsWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/load/load_fifteen":{
-        "metric":"load_fifteen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_five":{
-        "metric":"load_five",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_one":{
-        "metric":"load_one",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.rpc.NumOpenConnections",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.rpc.ReceivedBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric":"rpc.rpc.RpcProcessingTime_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric":"rpc.rpc.RpcProcessingTime_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric":"rpc.rpc.RpcQueueTime_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric":"rpc.rpc.RpcQueueTime_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.rpc.SentBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"rpc.rpc.callQueueLen",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"rpc.rpc.rpcAuthenticationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthenticationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"rpc.rpc.rpcAuthorizationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthorizationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/addBlock_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.addBlock_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/addBlock_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.addBlock_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/blockReceived_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.blockReceived_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/blockReceived_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.blockReceived_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/blockReport_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.blockReport_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/blockReport_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.blockReport_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/complete_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.complete_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/complete_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.complete_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/create_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.create_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/create_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.create_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/delete_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.delete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/delete_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.delete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/fsync_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.fsync_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/fsync_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.fsync_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBlockLocations_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getBlockLocations_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBlockLocations_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getBlockLocations_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getEditLogSize_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getEditLogSize_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getEditLogSize_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getEditLogSize_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getFileInfo_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getFileInfo_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getFileInfo_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getFileInfo_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getListing_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getListing_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getListing_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getListing_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/mkdirs_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.mkdirs_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/mkdirs_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.mkdirs_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/register_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.register_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/register_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.register_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/rename_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.rename_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/rename_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.rename_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/renewLease_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.renewLease_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/renewLease_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.renewLease_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/rollEditLog_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.rollEditLog_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/rollEditLog_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.rollEditLog_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/rollFsImage_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.rollFsImage_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/rollFsImage_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.rollFsImage_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/sendHeartbeat_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.sendHeartbeat_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/sendHeartbeat_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.sendHeartbeat_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/setOwner_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.setOwner_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/setOwner_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.setOwner_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/setPermission_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.setPermission_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/setPermission_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.setPermission_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/setReplication_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.setReplication_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/setReplication_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.setReplication_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/setSafeMode_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.setSafeMode_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/setSafeMode_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.setSafeMode_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/versionRequest_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.versionRequest_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/versionRequest_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.versionRequest_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"ugi.ugi.loginFailure_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"ugi.ugi.loginFailure_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"ugi.ugi.loginSuccess_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"ugi.ugi.loginSuccess_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      }
-    },
-
-    "DATANODE":{
-
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blockChecksumOp_avg_time":{
-        "metric":"dfs.datanode.blockChecksumOp_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blockChecksumOp_num_ops":{
-        "metric":"dfs.datanode.blockChecksumOp_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blockReports_avg_time":{
-        "metric":"dfs.datanode.blockReports_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blockReports_num_ops":{
-        "metric":"dfs.datanode.blockReports_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/block_verification_failures":{
-        "metric":"dfs.datanode.block_verification_failures",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blocks_get_local_pathinfo":{
-        "metric":"dfs.datanode.blocks_get_local_pathinfo",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blocks_read":{
-        "metric":"dfs.datanode.blocks_read",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blocks_removed":{
-        "metric":"dfs.datanode.blocks_removed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blocks_replicated":{
-        "metric":"dfs.datanode.blocks_replicated",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blocks_verified":{
-        "metric":"dfs.datanode.blocks_verified",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blocks_written":{
-        "metric":"dfs.datanode.blocks_written",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/bytes_read":{
-        "metric":"dfs.datanode.bytes_read",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/bytes_written":{
-        "metric":"dfs.datanode.bytes_written",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/copyBlockOp_avg_time":{
-        "metric":"dfs.datanode.copyBlockOp_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/copyBlockOp_num_ops":{
-        "metric":"dfs.datanode.copyBlockOp_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/heartBeats_avg_time":{
-        "metric":"dfs.datanode.heartBeats_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/heartBeats_num_ops":{
-        "metric":"dfs.datanode.heartBeats_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/readBlockOp_avg_time":{
-        "metric":"dfs.datanode.readBlockOp_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/readBlockOp_num_ops":{
-        "metric":"dfs.datanode.readBlockOp_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/reads_from_local_client":{
-        "metric":"dfs.datanode.reads_from_local_client",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/reads_from_remote_client":{
-        "metric":"dfs.datanode.reads_from_remote_client",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/replaceBlockOp_avg_time":{
-        "metric":"dfs.datanode.replaceBlockOp_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/replaceBlockOp_num_ops":{
-        "metric":"dfs.datanode.replaceBlockOp_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/writeBlockOp_avg_time":{
-        "metric":"dfs.datanode.writeBlockOp_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/writeBlockOp_num_ops":{
-        "metric":"dfs.datanode.writeBlockOp_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/writes_from_local_client":{
-        "metric":"dfs.datanode.writes_from_local_client",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/writes_from_remote_client":{
-        "metric":"dfs.datanode.writes_from_remote_client",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcCount":{
-        "metric":"jvm.metrics.gcCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric":"jvm.metrics.gcTimeMillis",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logError":{
-        "metric":"jvm.metrics.logError",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logFatal":{
-        "metric":"jvm.metrics.logFatal",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logInfo":{
-        "metric":"jvm.metrics.logInfo",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logWarn":{
-        "metric":"jvm.metrics.logWarn",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/maxMemoryM":{
-        "metric":"jvm.metrics.maxMemoryM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric":"jvm.metrics.memHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric":"jvm.metrics.memHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric":"jvm.metrics.memNonHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric":"jvm.metrics.memNonHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric":"jvm.metrics.threadsBlocked",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsNew":{
-        "metric":"jvm.metrics.threadsNew",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric":"jvm.metrics.threadsRunnable",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric":"jvm.metrics.threadsTerminated",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric":"jvm.metrics.threadsTimedWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric":"jvm.metrics.threadsWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_fifteen":{
-        "metric":"load_fifteen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_five":{
-        "metric":"load_five",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_one":{
-        "metric":"load_one",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.rpc.NumOpenConnections",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.rpc.ReceivedBytes",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric":"rpc.rpc.RpcProcessingTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric":"rpc.rpc.RpcProcessingTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric":"rpc.rpc.RpcQueueTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric":"rpc.rpc.RpcQueueTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_avg_time":{
-        "metric":"rpc.rpc.RpcSlowResponse_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_num_ops":{
-        "metric":"rpc.rpc.RpcSlowResponse_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.rpc.SentBytes",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.abort.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.abort.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_avg_time":{
-        "metric":"rpc.rpc.abort_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_num_ops":{
-        "metric":"rpc.rpc.abort_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_avg_time":{
-        "metric":"rpc.rpc.addColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_num_ops":{
-        "metric":"rpc.rpc.addColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.addToOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.addToOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_avg_time":{
-        "metric":"rpc.rpc.assign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_num_ops":{
-        "metric":"rpc.rpc.assign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_avg_time":{
-        "metric":"rpc.rpc.balanceSwitch_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_num_ops":{
-        "metric":"rpc.rpc.balanceSwitch_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_avg_time":{
-        "metric":"rpc.rpc.balance_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_num_ops":{
-        "metric":"rpc.rpc.balance_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"rpc.rpc.callQueueLen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkAndPut.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkAndPut.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_avg_time":{
-        "metric":"rpc.rpc.checkAndPut_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_num_ops":{
-        "metric":"rpc.rpc.checkAndPut_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkOOME.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkOOME.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_avg_time":{
-        "metric":"rpc.rpc.checkOOME_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_num_ops":{
-        "metric":"rpc.rpc.checkOOME_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.close.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.close.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.closeRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.closeRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_avg_time":{
-        "metric":"rpc.rpc.closeRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_num_ops":{
-        "metric":"rpc.rpc.closeRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_avg_time":{
-        "metric":"rpc.rpc.close_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_num_ops":{
-        "metric":"rpc.rpc.close_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.compactRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.compactRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_avg_time":{
-        "metric":"rpc.rpc.compactRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_num_ops":{
-        "metric":"rpc.rpc.compactRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_avg_time":{
-        "metric":"rpc.rpc.createTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_num_ops":{
-        "metric":"rpc.rpc.createTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.delete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.delete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_avg_time":{
-        "metric":"rpc.rpc.deleteColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_num_ops":{
-        "metric":"rpc.rpc.deleteColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_avg_time":{
-        "metric":"rpc.rpc.deleteTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_num_ops":{
-        "metric":"rpc.rpc.deleteTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_avg_time":{
-        "metric":"rpc.rpc.delete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_num_ops":{
-        "metric":"rpc.rpc.delete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_avg_time":{
-        "metric":"rpc.rpc.disableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_num_ops":{
-        "metric":"rpc.rpc.disableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_avg_time":{
-        "metric":"rpc.rpc.enableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_num_ops":{
-        "metric":"rpc.rpc.enableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.exists.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.exists.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_avg_time":{
-        "metric":"rpc.rpc.exists_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_num_ops":{
-        "metric":"rpc.rpc.exists_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.flushRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.flushRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_avg_time":{
-        "metric":"rpc.rpc.flushRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_num_ops":{
-        "metric":"rpc.rpc.flushRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.get.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.get.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_avg_time":{
-        "metric":"rpc.rpc.getAlterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_num_ops":{
-        "metric":"rpc.rpc.getAlterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_avg_time":{
-        "metric":"rpc.rpc.getCatalogTracker_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_num_ops":{
-        "metric":"rpc.rpc.getCatalogTracker_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_avg_time":{
-        "metric":"rpc.rpc.getClusterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_num_ops":{
-        "metric":"rpc.rpc.getClusterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getConfiguration.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getConfiguration.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_avg_time":{
-        "metric":"rpc.rpc.getConfiguration_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_num_ops":{
-        "metric":"rpc.rpc.getConfiguration_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_avg_time":{
-        "metric":"rpc.rpc.getHTableDescriptors_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_num_ops":{
-        "metric":"rpc.rpc.getHTableDescriptors_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getServerName.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getServerName.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_avg_time":{
-        "metric":"rpc.rpc.getServerName_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_num_ops":{
-        "metric":"rpc.rpc.getServerName_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_avg_time":{
-        "metric":"rpc.rpc.getZooKeeper_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_num_ops":{
-        "metric":"rpc.rpc.getZooKeeper_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_avg_time":{
-        "metric":"rpc.rpc.get_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_num_ops":{
-        "metric":"rpc.rpc.get_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.increment.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.increment.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_avg_time":{
-        "metric":"rpc.rpc.increment_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_num_ops":{
-        "metric":"rpc.rpc.increment_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isAborted.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isAborted.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_avg_time":{
-        "metric":"rpc.rpc.isAborted_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_num_ops":{
-        "metric":"rpc.rpc.isAborted_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_avg_time":{
-        "metric":"rpc.rpc.isMasterRunning_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_num_ops":{
-        "metric":"rpc.rpc.isMasterRunning_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isStopped.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isStopped.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_avg_time":{
-        "metric":"rpc.rpc.isStopped_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_num_ops":{
-        "metric":"rpc.rpc.isStopped_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.lockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.lockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_avg_time":{
-        "metric":"rpc.rpc.lockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_num_ops":{
-        "metric":"rpc.rpc.lockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_avg_time":{
-        "metric":"rpc.rpc.modifyColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_num_ops":{
-        "metric":"rpc.rpc.modifyColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_avg_time":{
-        "metric":"rpc.rpc.modifyTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_num_ops":{
-        "metric":"rpc.rpc.modifyTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_avg_time":{
-        "metric":"rpc.rpc.move_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_num_ops":{
-        "metric":"rpc.rpc.move_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.multi.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.multi.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_avg_time":{
-        "metric":"rpc.rpc.multi_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_num_ops":{
-        "metric":"rpc.rpc.multi_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.next.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.next.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_avg_time":{
-        "metric":"rpc.rpc.next_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_num_ops":{
-        "metric":"rpc.rpc.next_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_avg_time":{
-        "metric":"rpc.rpc.offline_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_num_ops":{
-        "metric":"rpc.rpc.offline_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_avg_time":{
-        "metric":"rpc.rpc.openRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_num_ops":{
-        "metric":"rpc.rpc.openRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_avg_time":{
-        "metric":"rpc.rpc.openRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_num_ops":{
-        "metric":"rpc.rpc.openRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openScanner.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openScanner.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_avg_time":{
-        "metric":"rpc.rpc.openScanner_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_num_ops":{
-        "metric":"rpc.rpc.openScanner_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.put.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.put.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_avg_time":{
-        "metric":"rpc.rpc.put_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_num_ops":{
-        "metric":"rpc.rpc.put_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_avg_time":{
-        "metric":"rpc.rpc.regionServerReport_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_num_ops":{
-        "metric":"rpc.rpc.regionServerReport_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_avg_time":{
-        "metric":"rpc.rpc.regionServerStartup_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_num_ops":{
-        "metric":"rpc.rpc.regionServerStartup_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.removeFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.removeFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_avg_time":{
-        "metric":"rpc.rpc.reportRSFatalError_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_num_ops":{
-        "metric":"rpc.rpc.reportRSFatalError_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"rpc.rpc.rpcAuthenticationFailures",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthenticationSuccesses",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"rpc.rpc.rpcAuthorizationFailures",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthorizationSuccesses",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_avg_time":{
-        "metric":"rpc.rpc.shutdown_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_num_ops":{
-        "metric":"rpc.rpc.shutdown_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.splitRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.splitRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_avg_time":{
-        "metric":"rpc.rpc.splitRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_num_ops":{
-        "metric":"rpc.rpc.splitRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.stop.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.stop.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_avg_time":{
-        "metric":"rpc.rpc.stopMaster_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_num_ops":{
-        "metric":"rpc.rpc.stopMaster_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_avg_time":{
-        "metric":"rpc.rpc.stop_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_num_ops":{
-        "metric":"rpc.rpc.stop_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_avg_time":{
-        "metric":"rpc.rpc.unassign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_num_ops":{
-        "metric":"rpc.rpc.unassign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.unlockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.unlockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_avg_time":{
-        "metric":"rpc.rpc.unlockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_num_ops":{
-        "metric":"rpc.rpc.unlockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/canCommit_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.canCommit_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/canCommit_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.canCommit_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/commitPending_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.commitPending_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/commitPending_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.commitPending_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/done_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.done_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/done_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.done_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getMapCompletionEvents_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getMapCompletionEvents_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getMapCompletionEvents_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getMapCompletionEvents_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getTask_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getTask_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getTask_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getTask_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/ping_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.ping_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/ping_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.ping_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/statusUpdate_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.statusUpdate_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/statusUpdate_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.statusUpdate_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"ugi.ugi.loginFailure_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"ugi.ugi.loginFailure_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"ugi.ugi.loginSuccess_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"ugi.ugi.loginSuccess_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      }
-    },
-
-    "JOBTRACKER":{
-
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcCount":{
-        "metric":"jvm.metrics.gcCount",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric":"jvm.metrics.gcTimeMillis",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logError":{
-        "metric":"jvm.metrics.logError",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logFatal":{
-        "metric":"jvm.metrics.logFatal",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logInfo":{
-        "metric":"jvm.metrics.logInfo",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logWarn":{
-        "metric":"jvm.metrics.logWarn",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric":"jvm.metrics.memHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric":"jvm.metrics.memHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric":"jvm.metrics.memNonHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric":"jvm.metrics.memNonHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric":"jvm.metrics.threadsBlocked",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsNew":{
-        "metric":"jvm.metrics.threadsNew",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric":"jvm.metrics.threadsRunnable",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric":"jvm.metrics.threadsTerminated",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric":"jvm.metrics.threadsTimedWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric":"jvm.metrics.threadsWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/load/load_fifteen":{
-        "metric":"load_fifteen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_five":{
-        "metric":"load_five",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_one":{
-        "metric":"load_one",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/jobs_completed":{
-        "metric":"mapred.Queue.jobs_completed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/jobs_failed":{
-        "metric":"mapred.Queue.jobs_failed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/jobs_killed":{
-        "metric":"mapred.Queue.jobs_killed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/jobs_preparing":{
-        "metric":"mapred.Queue.jobs_preparing",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/jobs_running":{
-        "metric":"mapred.Queue.jobs_running",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/jobs_submitted":{
-        "metric":"mapred.Queue.jobs_submitted",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/maps_completed":{
-        "metric":"mapred.Queue.maps_completed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/maps_failed":{
-        "metric":"mapred.Queue.maps_failed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/maps_killed":{
-        "metric":"mapred.Queue.maps_killed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/maps_launched":{
-        "metric":"mapred.Queue.maps_launched",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/reduces_completed":{
-        "metric":"mapred.Queue.reduces_completed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/reduces_failed":{
-        "metric":"mapred.Queue.reduces_failed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/reduces_killed":{
-        "metric":"mapred.Queue.reduces_killed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/reduces_launched":{
-        "metric":"mapred.Queue.reduces_launched",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/reserved_map_slots":{
-        "metric":"mapred.Queue.reserved_map_slots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/reserved_reduce_slots":{
-        "metric":"mapred.Queue.reserved_reduce_slots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/running_0":{
-        "metric":"mapred.Queue.running_0",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/running_1440":{
-        "metric":"mapred.Queue.running_1440",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/running_300":{
-        "metric":"mapred.Queue.running_300",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/running_60":{
-        "metric":"mapred.Queue.running_60",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/waiting_maps":{
-        "metric":"mapred.Queue.waiting_maps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/waiting_reduces":{
-        "metric":"mapred.Queue.waiting_reduces",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/blacklisted_maps":{
-        "metric":"mapred.jobtracker.blacklisted_maps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/blacklisted_reduces":{
-        "metric":"mapred.jobtracker.blacklisted_reduces",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/heartbeats":{
-        "metric":"mapred.jobtracker.heartbeats",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/jobs_completed":{
-        "metric":"mapred.jobtracker.jobs_completed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/jobs_failed":{
-        "metric":"mapred.jobtracker.jobs_failed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/jobs_killed":{
-        "metric":"mapred.jobtracker.jobs_killed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/jobs_preparing":{
-        "metric":"mapred.jobtracker.jobs_preparing",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/jobs_running":{
-        "metric":"mapred.jobtracker.jobs_running",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/jobs_submitted":{
-        "metric":"mapred.jobtracker.jobs_submitted",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/map_slots":{
-        "metric":"mapred.jobtracker.map_slots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/maps_completed":{
-        "metric":"mapred.jobtracker.maps_completed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/maps_failed":{
-        "metric":"mapred.jobtracker.maps_failed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/maps_killed":{
-        "metric":"mapred.jobtracker.maps_killed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/maps_launched":{
-        "metric":"mapred.jobtracker.maps_launched",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/occupied_map_slots":{
-        "metric":"mapred.jobtracker.occupied_map_slots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/occupied_reduce_slots":{
-        "metric":"mapred.jobtracker.occupied_reduce_slots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/reduce_slots":{
-        "metric":"mapred.jobtracker.reduce_slots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/reduces_completed":{
-        "metric":"mapred.jobtracker.reduces_completed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/reduces_failed":{
-        "metric":"mapred.jobtracker.reduces_failed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/reduces_killed":{
-        "metric":"mapred.jobtracker.reduces_killed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/reduces_launched":{
-        "metric":"mapred.jobtracker.reduces_launched",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/reserved_map_slots":{
-        "metric":"mapred.jobtracker.reserved_map_slots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/reserved_reduce_slots":{
-        "metric":"mapred.jobtracker.reserved_reduce_slots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/running_maps":{
-        "metric":"mapred.jobtracker.running_maps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/running_reduces":{
-        "metric":"mapred.jobtracker.running_reduces",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/trackers":{
-        "metric":"mapred.jobtracker.trackers",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/trackers_blacklisted":{
-        "metric":"mapred.jobtracker.trackers_blacklisted",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/trackers_decommissioned":{
-        "metric":"mapred.jobtracker.trackers_decommissioned",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/trackers_graylisted":{
-        "metric":"mapred.jobtracker.trackers_graylisted",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/waiting_maps":{
-        "metric":"mapred.jobtracker.waiting_maps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/waiting_reduces":{
-        "metric":"mapred.jobtracker.waiting_reduces",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.rpc.NumOpenConnections",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.rpc.ReceivedBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric":"rpc.rpc.RpcProcessingTime_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric":"rpc.rpc.RpcProcessingTime_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric":"rpc.rpc.RpcQueueTime_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric":"rpc.rpc.RpcQueueTime_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.rpc.SentBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"rpc.rpc.callQueueLen",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"rpc.rpc.rpcAuthenticationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthenticationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"rpc.rpc.rpcAuthorizationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthorizationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBuildVersion_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getBuildVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBuildVersion_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getBuildVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getDelegationToken_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getDelegationToken_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getDelegationToken_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getDelegationToken_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getJobCounters_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getJobCounters_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getJobCounters_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getJobCounters_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getJobProfile_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getJobProfile_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getJobProfile_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getJobProfile_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getJobStatus_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getJobStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getJobStatus_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getJobStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getMapTaskReports_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getMapTaskReports_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getMapTaskReports_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getMapTaskReports_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getNewJobId_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getNewJobId_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getNewJobId_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getNewJobId_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getQueueAdmins_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getQueueAdmins_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getQueueAdmins_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getQueueAdmins_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getReduceTaskReports_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getReduceTaskReports_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getReduceTaskReports_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getReduceTaskReports_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getStagingAreaDir_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getStagingAreaDir_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getStagingAreaDir_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getStagingAreaDir_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getSystemDir_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getSystemDir_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getSystemDir_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getSystemDir_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getTaskCompletionEvents_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getTaskCompletionEvents_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getTaskCompletionEvents_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getTaskCompletionEvents_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/heartbeat_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.heartbeat_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/heartbeat_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.heartbeat_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/jobsToComplete_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.jobsToComplete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/jobsToComplete_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.jobsToComplete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/submitJob_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.submitJob_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/submitJob_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.submitJob_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"ugi.ugi.loginFailure_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"ugi.ugi.loginFailure_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"ugi.ugi.loginSuccess_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"ugi.ugi.loginSuccess_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      }
-    },
-
-    "TASKTRACKER":{
-
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcCount":{
-        "metric":"jvm.metrics.gcCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric":"jvm.metrics.gcTimeMillis",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logError":{
-        "metric":"jvm.metrics.logError",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logFatal":{
-        "metric":"jvm.metrics.logFatal",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logInfo":{
-        "metric":"jvm.metrics.logInfo",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logWarn":{
-        "metric":"jvm.metrics.logWarn",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/maxMemoryM":{
-        "metric":"jvm.metrics.maxMemoryM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric":"jvm.metrics.memHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric":"jvm.metrics.memHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric":"jvm.metrics.memNonHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric":"jvm.metrics.memNonHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric":"jvm.metrics.threadsBlocked",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsNew":{
-        "metric":"jvm.metrics.threadsNew",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric":"jvm.metrics.threadsRunnable",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric":"jvm.metrics.threadsTerminated",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric":"jvm.metrics.threadsTimedWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric":"jvm.metrics.threadsWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_fifteen":{
-        "metric":"load_fifteen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_five":{
-        "metric":"load_five",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_one":{
-        "metric":"load_one",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/shuffleOutput/shuffle_exceptions_caught":{
-        "metric":"mapred.shuffleOutput.shuffle_exceptions_caught",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/shuffleOutput/shuffle_failed_outputs":{
-        "metric":"mapred.shuffleOutput.shuffle_failed_outputs",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/shuffleOutput/shuffle_handler_busy_percent":{
-        "metric":"mapred.shuffleOutput.shuffle_handler_busy_percent",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/shuffleOutput/shuffle_output_bytes":{
-        "metric":"mapred.shuffleOutput.shuffle_output_bytes",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/shuffleOutput/shuffle_success_outputs":{
-        "metric":"mapred.shuffleOutput.shuffle_success_outputs",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/tasktracker/mapTaskSlots":{
-        "metric":"mapred.tasktracker.mapTaskSlots",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/tasktracker/maps_running":{
-        "metric":"mapred.tasktracker.maps_running",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/tasktracker/reduceTaskSlots":{
-        "metric":"mapred.tasktracker.reduceTaskSlots",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/tasktracker/reduces_running":{
-        "metric":"mapred.tasktracker.reduces_running",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/tasktracker/tasks_completed":{
-        "metric":"mapred.tasktracker.tasks_completed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/tasktracker/tasks_failed_ping":{
-        "metric":"mapred.tasktracker.tasks_failed_ping",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/tasktracker/tasks_failed_timeout":{
-        "metric":"mapred.tasktracker.tasks_failed_timeout",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.rpc.NumOpenConnections",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.rpc.ReceivedBytes",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric":"rpc.rpc.RpcProcessingTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric":"rpc.rpc.RpcProcessingTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric":"rpc.rpc.RpcQueueTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric":"rpc.rpc.RpcQueueTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_avg_time":{
-        "metric":"rpc.rpc.RpcSlowResponse_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_num_ops":{
-        "metric":"rpc.rpc.RpcSlowResponse_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.rpc.SentBytes",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.abort.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.abort.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_avg_time":{
-        "metric":"rpc.rpc.abort_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_num_ops":{
-        "metric":"rpc.rpc.abort_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_avg_time":{
-        "metric":"rpc.rpc.addColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_num_ops":{
-        "metric":"rpc.rpc.addColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.addToOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.addToOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_avg_time":{
-        "metric":"rpc.rpc.assign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_num_ops":{
-        "metric":"rpc.rpc.assign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_avg_time":{
-        "metric":"rpc.rpc.balanceSwitch_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_num_ops":{
-        "metric":"rpc.rpc.balanceSwitch_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_avg_time":{
-        "metric":"rpc.rpc.balance_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_num_ops":{
-        "metric":"rpc.rpc.balance_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"rpc.rpc.callQueueLen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkAndPut.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkAndPut.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_avg_time":{
-        "metric":"rpc.rpc.checkAndPut_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_num_ops":{
-        "metric":"rpc.rpc.checkAndPut_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkOOME.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkOOME.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_avg_time":{
-        "metric":"rpc.rpc.checkOOME_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_num_ops":{
-        "metric":"rpc.rpc.checkOOME_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.close.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.close.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.closeRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.closeRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_avg_time":{
-        "metric":"rpc.rpc.closeRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_num_ops":{
-        "metric":"rpc.rpc.closeRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_avg_time":{
-        "metric":"rpc.rpc.close_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_num_ops":{
-        "metric":"rpc.rpc.close_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.compactRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.compactRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_avg_time":{
-        "metric":"rpc.rpc.compactRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_num_ops":{
-        "metric":"rpc.rpc.compactRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_avg_time":{
-        "metric":"rpc.rpc.createTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_num_ops":{
-        "metric":"rpc.rpc.createTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.delete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.delete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_avg_time":{
-        "metric":"rpc.rpc.deleteColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_num_ops":{
-        "metric":"rpc.rpc.deleteColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_avg_time":{
-        "metric":"rpc.rpc.deleteTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_num_ops":{
-        "metric":"rpc.rpc.deleteTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_avg_time":{
-        "metric":"rpc.rpc.delete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_num_ops":{
-        "metric":"rpc.rpc.delete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_avg_time":{
-        "metric":"rpc.rpc.disableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_num_ops":{
-        "metric":"rpc.rpc.disableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_avg_time":{
-        "metric":"rpc.rpc.enableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_num_ops":{
-        "metric":"rpc.rpc.enableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.exists.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.exists.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_avg_time":{
-        "metric":"rpc.rpc.exists_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_num_ops":{
-        "metric":"rpc.rpc.exists_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.flushRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.flushRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_avg_time":{
-        "metric":"rpc.rpc.flushRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_num_ops":{
-        "metric":"rpc.rpc.flushRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.get.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.get.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_avg_time":{
-        "metric":"rpc.rpc.getAlterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_num_ops":{
-        "metric":"rpc.rpc.getAlterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_avg_time":{
-        "metric":"rpc.rpc.getCatalogTracker_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_num_ops":{
-        "metric":"rpc.rpc.getCatalogTracker_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_avg_time":{
-        "metric":"rpc.rpc.getClusterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_num_ops":{
-        "metric":"rpc.rpc.getClusterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getConfiguration.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getConfiguration.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_avg_time":{
-        "metric":"rpc.rpc.getConfiguration_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_num_ops":{
-        "metric":"rpc.rpc.getConfiguration_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_avg_time":{
-        "metric":"rpc.rpc.getHTableDescriptors_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_num_ops":{
-        "metric":"rpc.rpc.getHTableDescriptors_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getServerName.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getServerName.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_avg_time":{
-        "metric":"rpc.rpc.getServerName_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_num_ops":{
-        "metric":"rpc.rpc.getServerName_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_avg_time":{
-        "metric":"rpc.rpc.getZooKeeper_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_num_ops":{
-        "metric":"rpc.rpc.getZooKeeper_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_avg_time":{
-        "metric":"rpc.rpc.get_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_num_ops":{
-        "metric":"rpc.rpc.get_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.increment.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.increment.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_avg_time":{
-        "metric":"rpc.rpc.increment_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_num_ops":{
-        "metric":"rpc.rpc.increment_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isAborted.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isAborted.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_avg_time":{
-        "metric":"rpc.rpc.isAborted_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_num_ops":{
-        "metric":"rpc.rpc.isAborted_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_avg_time":{
-        "metric":"rpc.rpc.isMasterRunning_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_num_ops":{
-        "metric":"rpc.rpc.isMasterRunning_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isStopped.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isStopped.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_avg_time":{
-        "metric":"rpc.rpc.isStopped_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_num_ops":{
-        "metric":"rpc.rpc.isStopped_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.lockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.lockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_avg_time":{
-        "metric":"rpc.rpc.lockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_num_ops":{
-        "metric":"rpc.rpc.lockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_avg_time":{
-        "metric":"rpc.rpc.modifyColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_num_ops":{
-        "metric":"rpc.rpc.modifyColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_avg_time":{
-        "metric":"rpc.rpc.modifyTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_num_ops":{
-        "metric":"rpc.rpc.modifyTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_avg_time":{
-        "metric":"rpc.rpc.move_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_num_ops":{
-        "metric":"rpc.rpc.move_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.multi.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.multi.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_avg_time":{
-        "metric":"rpc.rpc.multi_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_num_ops":{
-        "metric":"rpc.rpc.multi_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.next.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.next.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_avg_time":{
-        "metric":"rpc.rpc.next_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_num_ops":{
-        "metric":"rpc.rpc.next_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_avg_time":{
-        "metric":"rpc.rpc.offline_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_num_ops":{
-        "metric":"rpc.rpc.offline_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_avg_time":{
-        "metric":"rpc.rpc.openRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_num_ops":{
-        "metric":"rpc.rpc.openRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_avg_time":{
-        "metric":"rpc.rpc.openRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_num_ops":{
-        "metric":"rpc.rpc.openRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openScanner.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openScanner.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_avg_time":{
-        "metric":"rpc.rpc.openScanner_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_num_ops":{
-        "metric":"rpc.rpc.openScanner_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.put.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.put.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_avg_time":{
-        "metric":"rpc.rpc.put_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_num_ops":{
-        "metric":"rpc.rpc.put_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_avg_time":{
-        "metric":"rpc.rpc.regionServerReport_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_num_ops":{
-        "metric":"rpc.rpc.regionServerReport_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_avg_time":{
-        "metric":"rpc.rpc.regionServerStartup_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_num_ops":{
-        "metric":"rpc.rpc.regionServerStartup_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.removeFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.removeFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_avg_time":{
-        "metric":"rpc.rpc.reportRSFatalError_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_num_ops":{
-        "metric":"rpc.rpc.reportRSFatalError_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"rpc.rpc.rpcAuthenticationFailures",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthenticationSuccesses",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"rpc.rpc.rpcAuthorizationFailures",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthorizationSuccesses",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_avg_time":{
-        "metric":"rpc.rpc.shutdown_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_num_ops":{
-        "metric":"rpc.rpc.shutdown_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.splitRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.splitRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_avg_time":{
-        "metric":"rpc.rpc.splitRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_num_ops":{
-        "metric":"rpc.rpc.splitRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.stop.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.stop.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_avg_time":{
-        "metric":"rpc.rpc.stopMaster_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_num_ops":{
-        "metric":"rpc.rpc.stopMaster_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_avg_time":{
-        "metric":"rpc.rpc.stop_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_num_ops":{
-        "metric":"rpc.rpc.stop_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_avg_time":{
-        "metric":"rpc.rpc.unassign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_num_ops":{
-        "metric":"rpc.rpc.unassign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.unlockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.unlockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_avg_time":{
-        "metric":"rpc.rpc.unlockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_num_ops":{
-        "metric":"rpc.rpc.unlockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/canCommit_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.canCommit_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/canCommit_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.canCommit_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/commitPending_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.commitPending_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/commitPending_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.commitPending_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/done_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.done_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/done_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.done_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getMapCompletionEvents_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getMapCompletionEvents_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getMapCompletionEvents_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getMapCompletionEvents_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getTask_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getTask_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getTask_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getTask_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/ping_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.ping_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/ping_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.ping_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/statusUpdate_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.statusUpdate_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/statusUpdate_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.statusUpdate_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"ugi.ugi.loginFailure_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"ugi.ugi.loginFailure_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"ugi.ugi.loginSuccess_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"ugi.ugi.loginSuccess_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      }
-    },
-
-    "HBASE_MASTER":{
-
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/master/cluster_requests":{
-        "metric":"hbase.master.cluster_requests",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/hbase/master/splitSize_avg_time":{
-        "metric":"hbase.master.splitSize_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/master/splitSize_num_ops":{
-        "metric":"hbase.master.splitSize_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/master/splitTime_avg_time":{
-        "metric":"hbase.master.splitTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/master/splitTime_num_ops":{
-        "metric":"hbase.master.splitTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcCount":{
-        "metric":"jvm.metrics.gcCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric":"jvm.metrics.gcTimeMillis",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logError":{
-        "metric":"jvm.metrics.logError",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logFatal":{
-        "metric":"jvm.metrics.logFatal",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logInfo":{
-        "metric":"jvm.metrics.logInfo",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logWarn":{
-        "metric":"jvm.metrics.logWarn",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/maxMemoryM":{
-        "metric":"jvm.metrics.maxMemoryM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric":"jvm.metrics.memHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric":"jvm.metrics.memHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric":"jvm.metrics.memNonHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric":"jvm.metrics.memNonHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric":"jvm.metrics.threadsBlocked",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsNew":{
-        "metric":"jvm.metrics.threadsNew",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric":"jvm.metrics.threadsRunnable",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric":"jvm.metrics.threadsTerminated",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric":"jvm.metrics.threadsTimedWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric":"jvm.metrics.threadsWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_fifteen":{
-        "metric":"load_fifteen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_five":{
-        "metric":"load_five",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_one":{
-        "metric":"load_one",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.rpc.NumOpenConnections",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.rpc.ReceivedBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric":"rpc.rpc.RpcProcessingTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric":"rpc.rpc.RpcProcessingTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric":"rpc.rpc.RpcQueueTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric":"rpc.rpc.RpcQueueTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_avg_time":{
-        "metric":"rpc.rpc.RpcSlowResponse_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_num_ops":{
-        "metric":"rpc.rpc.RpcSlowResponse_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.rpc.SentBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.addColumn.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.addColumn.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_avg_time":{
-        "metric":"rpc.rpc.addColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_num_ops":{
-        "metric":"rpc.rpc.addColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.assign.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.assign.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_avg_time":{
-        "metric":"rpc.rpc.assign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_num_ops":{
-        "metric":"rpc.rpc.assign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.balance.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.balance.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.balanceSwitch.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.balanceSwitch.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_avg_time":{
-        "metric":"rpc.rpc.balanceSwitch_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_num_ops":{
-        "metric":"rpc.rpc.balanceSwitch_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_avg_time":{
-        "metric":"rpc.rpc.balance_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_num_ops":{
-        "metric":"rpc.rpc.balance_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"rpc.rpc.callQueueLen",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_avg_time":{
-        "metric":"rpc.rpc.checkAndPut_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_num_ops":{
-        "metric":"rpc.rpc.checkAndPut_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_avg_time":{
-        "metric":"rpc.rpc.closeRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_num_ops":{
-        "metric":"rpc.rpc.closeRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_avg_time":{
-        "metric":"rpc.rpc.close_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_num_ops":{
-        "metric":"rpc.rpc.close_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_avg_time":{
-        "metric":"rpc.rpc.compactRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_num_ops":{
-        "metric":"rpc.rpc.compactRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.createTable.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.createTable.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_avg_time":{
-        "metric":"rpc.rpc.createTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_num_ops":{
-        "metric":"rpc.rpc.createTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.deleteColumn.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.deleteColumn.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_avg_time":{
-        "metric":"rpc.rpc.deleteColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_num_ops":{
-        "metric":"rpc.rpc.deleteColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.deleteTable.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.deleteTable.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_avg_time":{
-        "metric":"rpc.rpc.deleteTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_num_ops":{
-        "metric":"rpc.rpc.deleteTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_avg_time":{
-        "metric":"rpc.rpc.delete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_num_ops":{
-        "metric":"rpc.rpc.delete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.disableTable.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.disableTable.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_avg_time":{
-        "metric":"rpc.rpc.disableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_num_ops":{
-        "metric":"rpc.rpc.disableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.enableTable.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.enableTable.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_avg_time":{
-        "metric":"rpc.rpc.enableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_num_ops":{
-        "metric":"rpc.rpc.enableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_avg_time":{
-        "metric":"rpc.rpc.exists_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_num_ops":{
-        "metric":"rpc.rpc.exists_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_avg_time":{
-        "metric":"rpc.rpc.flushRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_num_ops":{
-        "metric":"rpc.rpc.flushRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getAlterStatus.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getAlterStatus.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_avg_time":{
-        "metric":"rpc.rpc.getAlterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_num_ops":{
-        "metric":"rpc.rpc.getAlterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getClusterStatus.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getClusterStatus.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_avg_time":{
-        "metric":"rpc.rpc.getClusterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_num_ops":{
-        "metric":"rpc.rpc.getClusterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getHTableDescriptors.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getHTableDescriptors.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_avg_time":{
-        "metric":"rpc.rpc.getHTableDescriptors_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_num_ops":{
-        "metric":"rpc.rpc.getHTableDescriptors_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_avg_time":{
-        "metric":"rpc.rpc.get_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_num_ops":{
-        "metric":"rpc.rpc.get_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_avg_time":{
-        "metric":"rpc.rpc.increment_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_num_ops":{
-        "metric":"rpc.rpc.increment_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isMasterRunning.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isMasterRunning.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_avg_time":{
-        "metric":"rpc.rpc.isMasterRunning_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_num_ops":{
-        "metric":"rpc.rpc.isMasterRunning_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_avg_time":{
-        "metric":"rpc.rpc.lockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_num_ops":{
-        "metric":"rpc.rpc.lockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.modifyColumn.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.modifyColumn.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_avg_time":{
-        "metric":"rpc.rpc.modifyColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_num_ops":{
-        "metric":"rpc.rpc.modifyColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.modifyTable.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.modifyTable.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_avg_time":{
-        "metric":"rpc.rpc.modifyTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_num_ops":{
-        "metric":"rpc.rpc.modifyTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.move.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.move.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_avg_time":{
-        "metric":"rpc.rpc.move_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_num_ops":{
-        "metric":"rpc.rpc.move_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_avg_time":{
-        "metric":"rpc.rpc.multi_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_num_ops":{
-        "metric":"rpc.rpc.multi_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_avg_time":{
-        "metric":"rpc.rpc.next_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_num_ops":{
-        "metric":"rpc.rpc.next_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.offline.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.offline.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_avg_time":{
-        "metric":"rpc.rpc.offline_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_num_ops":{
-        "metric":"rpc.rpc.offline_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_avg_time":{
-        "metric":"rpc.rpc.openRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_num_ops":{
-        "metric":"rpc.rpc.openRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_avg_time":{
-        "metric":"rpc.rpc.openRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_num_ops":{
-        "metric":"rpc.rpc.openRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_avg_time":{
-        "metric":"rpc.rpc.openScanner_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_num_ops":{
-        "metric":"rpc.rpc.openScanner_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_avg_time":{
-        "metric":"rpc.rpc.put_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_num_ops":{
-        "metric":"rpc.rpc.put_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.regionServerReport.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.regionServerReport.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_avg_time":{
-        "metric":"rpc.rpc.regionServerReport_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_num_ops":{
-        "metric":"rpc.rpc.regionServerReport_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.regionServerStartup.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.regionServerStartup.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_avg_time":{
-        "metric":"rpc.rpc.regionServerStartup_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_num_ops":{
-        "metric":"rpc.rpc.regionServerStartup_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.reportRSFatalError.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.reportRSFatalError.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_avg_time":{
-        "metric":"rpc.rpc.reportRSFatalError_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_num_ops":{
-        "metric":"rpc.rpc.reportRSFatalError_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"rpc.rpc.rpcAuthenticationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthenticationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"rpc.rpc.rpcAuthorizationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthorizationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.shutdown.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.shutdown.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_avg_time":{
-        "metric":"rpc.rpc.shutdown_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_num_ops":{
-        "metric":"rpc.rpc.shutdown_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_avg_time":{
-        "metric":"rpc.rpc.splitRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_num_ops":{
-        "metric":"rpc.rpc.splitRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.stopMaster.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.stopMaster.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_avg_time":{
-        "metric":"rpc.rpc.stopMaster_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_num_ops":{
-        "metric":"rpc.rpc.stopMaster_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_avg_time":{
-        "metric":"rpc.rpc.stop_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_num_ops":{
-        "metric":"rpc.rpc.stop_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.unassign.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.unassign.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_avg_time":{
-        "metric":"rpc.rpc.unassign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_num_ops":{
-        "metric":"rpc.rpc.unassign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_avg_time":{
-        "metric":"rpc.rpc.unlockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_num_ops":{
-        "metric":"rpc.rpc.unlockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      }
-    },
-
-    "HBASE_CLIENT":{
-
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcCount":{
-        "metric":"jvm.metrics.gcCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric":"jvm.metrics.gcTimeMillis",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logError":{
-        "metric":"jvm.metrics.logError",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logFatal":{
-        "metric":"jvm.metrics.logFatal",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logInfo":{
-        "metric":"jvm.metrics.logInfo",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logWarn":{
-        "metric":"jvm.metrics.logWarn",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/maxMemoryM":{
-        "metric":"jvm.metrics.maxMemoryM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric":"jvm.metrics.memHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric":"jvm.metrics.memHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric":"jvm.metrics.memNonHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric":"jvm.metrics.memNonHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric":"jvm.metrics.threadsBlocked",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsNew":{
-        "metric":"jvm.metrics.threadsNew",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric":"jvm.metrics.threadsRunnable",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric":"jvm.metrics.threadsTerminated",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric":"jvm.metrics.threadsTimedWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric":"jvm.metrics.threadsWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_fifteen":{
-        "metric":"load_fifteen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_five":{
-        "metric":"load_five",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_one":{
-        "metric":"load_one",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.rpc.NumOpenConnections",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.rpc.ReceivedBytes",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric":"rpc.rpc.RpcProcessingTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric":"rpc.rpc.RpcProcessingTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric":"rpc.rpc.RpcQueueTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric":"rpc.rpc.RpcQueueTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_avg_time":{
-        "metric":"rpc.rpc.RpcSlowResponse_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_num_ops":{
-        "metric":"rpc.rpc.RpcSlowResponse_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.rpc.SentBytes",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.abort.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.abort.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_avg_time":{
-        "metric":"rpc.rpc.abort_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_num_ops":{
-        "metric":"rpc.rpc.abort_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_avg_time":{
-        "metric":"rpc.rpc.addColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_num_ops":{
-        "metric":"rpc.rpc.addColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.addToOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.addToOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_avg_time":{
-        "metric":"rpc.rpc.assign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_num_ops":{
-        "metric":"rpc.rpc.assign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_avg_time":{
-        "metric":"rpc.rpc.balanceSwitch_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_num_ops":{
-        "metric":"rpc.rpc.balanceSwitch_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_avg_time":{
-        "metric":"rpc.rpc.balance_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_num_ops":{
-        "metric":"rpc.rpc.balance_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"rpc.rpc.callQueueLen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkAndPut.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkAndPut.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_avg_time":{
-        "metric":"rpc.rpc.checkAndPut_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_num_ops":{
-        "metric":"rpc.rpc.checkAndPut_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkOOME.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkOOME.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_avg_time":{
-        "metric":"rpc.rpc.checkOOME_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_num_ops":{
-        "metric":"rpc.rpc.checkOOME_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.close.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.close.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.closeRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.closeRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_avg_time":{
-        "metric":"rpc.rpc.closeRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_num_ops":{
-        "metric":"rpc.rpc.closeRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_avg_time":{
-        "metric":"rpc.rpc.close_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_num_ops":{
-        "metric":"rpc.rpc.close_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.compactRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.compactRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_avg_time":{
-        "metric":"rpc.rpc.compactRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_num_ops":{
-        "metric":"rpc.rpc.compactRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_avg_time":{
-        "metric":"rpc.rpc.createTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_num_ops":{
-        "metric":"rpc.rpc.createTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.delete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.delete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_avg_time":{
-        "metric":"rpc.rpc.deleteColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_num_ops":{
-        "metric":"rpc.rpc.deleteColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_avg_time":{
-        "metric":"rpc.rpc.deleteTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_num_ops":{
-        "metric":"rpc.rpc.deleteTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_avg_time":{
-        "metric":"rpc.rpc.delete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_num_ops":{
-        "metric":"rpc.rpc.delete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_avg_time":{
-        "metric":"rpc.rpc.disableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_num_ops":{
-        "metric":"rpc.rpc.disableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_avg_time":{
-        "metric":"rpc.rpc.enableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_num_ops":{
-        "metric":"rpc.rpc.enableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.exists.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.exists.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_avg_time":{
-        "metric":"rpc.rpc.exists_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_num_ops":{
-        "metric":"rpc.rpc.exists_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.flushRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.flushRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_avg_time":{
-        "metric":"rpc.rpc.flushRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_num_ops":{
-        "metric":"rpc.rpc.flushRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.get.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.get.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_avg_time":{
-        "metric":"rpc.rpc.getAlterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_num_ops":{
-        "metric":"rpc.rpc.getAlterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_avg_time":{
-        "metric":"rpc.rpc.getCatalogTracker_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_num_ops":{
-        "metric":"rpc.rpc.getCatalogTracker_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_avg_time":{
-        "metric":"rpc.rpc.getClusterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_num_ops":{
-        "metric":"rpc.rpc.getClusterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getConfiguration.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getConfiguration.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_avg_time":{
-        "metric":"rpc.rpc.getConfiguration_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_num_ops":{
-        "metric":"rpc.rpc.getConfiguration_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_avg_time":{
-        "metric":"rpc.rpc.getHTableDescriptors_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_num_ops":{
-        "metric":"rpc.rpc.getHTableDescriptors_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getServerName.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getServerName.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_avg_time":{
-        "metric":"rpc.rpc.getServerName_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_num_ops":{
-        "metric":"rpc.rpc.getServerName_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_avg_time":{
-        "metric":"rpc.rpc.getZooKeeper_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_num_ops":{
-        "metric":"rpc.rpc.getZooKeeper_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_avg_time":{
-        "metric":"rpc.rpc.get_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_num_ops":{
-        "metric":"rpc.rpc.get_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.increment.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.increment.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_avg_time":{
-        "metric":"rpc.rpc.increment_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_num_ops":{
-        "metric":"rpc.rpc.increment_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isAborted.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isAborted.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_avg_time":{
-        "metric":"rpc.rpc.isAborted_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_num_ops":{
-        "metric":"rpc.rpc.isAborted_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_avg_time":{
-        "metric":"rpc.rpc.isMasterRunning_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_num_ops":{
-        "metric":"rpc.rpc.isMasterRunning_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isStopped.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isStopped.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_avg_time":{
-        "metric":"rpc.rpc.isStopped_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_num_ops":{
-        "metric":"rpc.rpc.isStopped_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.lockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.lockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_avg_time":{
-        "metric":"rpc.rpc.lockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_num_ops":{
-        "metric":"rpc.rpc.lockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_avg_time":{
-        "metric":"rpc.rpc.modifyColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_num_ops":{
-        "metric":"rpc.rpc.modifyColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_avg_time":{
-        "metric":"rpc.rpc.modifyTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_num_ops":{
-        "metric":"rpc.rpc.modifyTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_avg_time":{
-        "metric":"rpc.rpc.move_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_num_ops":{
-        "metric":"rpc.rpc.move_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.multi.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.multi.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_avg_time":{
-        "metric":"rpc.rpc.multi_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_num_ops":{
-        "metric":"rpc.rpc.multi_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.next.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.next.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_avg_time":{
-        "metric":"rpc.rpc.next_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_num_ops":{
-        "metric":"rpc.rpc.next_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_avg_time":{
-        "metric":"rpc.rpc.offline_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_num_ops":{
-        "metric":"rpc.rpc.offline_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_avg_time":{
-        "metric":"rpc.rpc.openRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_num_ops":{
-        "metric":"rpc.rpc.openRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_avg_time":{
-        "metric":"rpc.rpc.openRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_num_ops":{
-        "metric":"rpc.rpc.openRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openScanner.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openScanner.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_avg_time":{
-        "metric":"rpc.rpc.openScanner_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_num_ops":{
-        "metric":"rpc.rpc.openScanner_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.put.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.put.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_avg_time":{
-        "metric":"rpc.rpc.put_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_num_ops":{
-        "metric":"rpc.rpc.put_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_avg_time":{
-        "metric":"rpc.rpc.regionServerReport_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_num_ops":{
-        "metric":"rpc.rpc.regionServerReport_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_avg_time":{
-        "metric":"rpc.rpc.regionServerStartup_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_num_ops":{
-        "metric":"rpc.rpc.regionServerStartup_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.removeFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.removeFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_avg_time":{
-        "metric":"rpc.rpc.reportRSFatalError_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_num_ops":{
-        "metric":"rpc.rpc.reportRSFatalError_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"rpc.rpc.rpcAuthenticationFailures",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthenticationSuccesses",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"rpc.rpc.rpcAuthorizationFailures",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthorizationSuccesses",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_avg_time":{
-        "metric":"rpc.rpc.shutdown_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_num_ops":{
-        "metric":"rpc.rpc.shutdown_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.splitRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.splitRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_avg_time":{
-        "metric":"rpc.rpc.splitRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_num_ops":{
-        "metric":"rpc.rpc.splitRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.stop.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.stop.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_avg_time":{
-        "metric":"rpc.rpc.stopMaster_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_num_ops":{
-        "metric":"rpc.rpc.stopMaster_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_avg_time":{
-        "metric":"rpc.rpc.stop_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_num_ops":{
-        "metric":"rpc.rpc.stop_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_avg_time":{
-        "metric":"rpc.rpc.unassign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_num_ops":{
-        "metric":"rpc.rpc.unassign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.unlockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.unlockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_avg_time":{
-        "metric":"rpc.rpc.unlockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_num_ops":{
-        "metric":"rpc.rpc.unlockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/canCommit_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.canCommit_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/canCommit_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.canCommit_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/commitPending_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.commitPending_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/commitPending_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.commitPending_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/done_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.done_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/done_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.done_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getMapCompletionEvents_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getMapCompletionEvents_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getMapCompletionEvents_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getMapCompletionEvents_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getTask_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getTask_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getTask_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getTask_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/ping_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.ping_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/ping_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.ping_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/statusUpdate_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.statusUpdate_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/statusUpdate_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.statusUpdate_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"ugi.ugi.loginFailure_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"ugi.ugi.loginFailure_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"ugi.ugi.loginSuccess_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"ugi.ugi.loginSuccess_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      }
-    },
-
-    "HBASE_REGIONSERVER":{
-
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/blockCacheCount":{
-        "metric":"hbase.regionserver.blockCacheCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/blockCacheEvictedCount":{
-        "metric":"hbase.regionserver.blockCacheEvictedCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/blockCacheFree":{
-        "metric":"hbase.regionserver.blockCacheFree",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/blockCacheHitCachingRatio":{
-        "metric":"hbase.regionserver.blockCacheHitCachingRatio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/blockCacheHitCount":{
-        "metric":"hbase.regionserver.blockCacheHitCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/blockCacheHitRatio":{
-        "metric":"hbase.regionserver.blockCacheHitRatio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/blockCacheMissCount":{
-        "metric":"hbase.regionserver.blockCacheMissCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/blockCacheSize":{
-        "metric":"hbase.regionserver.blockCacheSize",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/compactionQueueSize":{
-        "metric":"hbase.regionserver.compactionQueueSize",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/compactionSize_avg_time":{
-        "metric":"hbase.regionserver.compactionSize_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/compactionSize_num_ops":{
-        "metric":"hbase.regionserver.compactionSize_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/compactionTime_avg_time":{
-        "metric":"hbase.regionserver.compactionTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/compactionTime_num_ops":{
-        "metric":"hbase.regionserver.compactionTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile":{
-        "metric":"hbase.regionserver.deleteRequestLatency_75th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile":{
-        "metric":"hbase.regionserver.deleteRequestLatency_95th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile":{
-        "metric":"hbase.regionserver.deleteRequestLatency_99th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_max":{
-        "metric":"hbase.regionserver.deleteRequestLatency_max",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_mean":{
-        "metric":"hbase.regionserver.deleteRequestLatency_mean",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_median":{
-        "metric":"hbase.regionserver.deleteRequestLatency_median",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_min":{
-        "metric":"hbase.regionserver.deleteRequestLatency_min",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_num_ops":{
-        "metric":"hbase.regionserver.deleteRequestLatency_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_std_dev":{
-        "metric":"hbase.regionserver.deleteRequestLatency_std_dev",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/flushQueueSize":{
-        "metric":"hbase.regionserver.flushQueueSize",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/flushSize_avg_time":{
-        "metric":"hbase.regionserver.flushSize_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/flushSize_num_ops":{
-        "metric":"hbase.regionserver.flushSize_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/flushTime_avg_time":{
-        "metric":"hbase.regionserver.flushTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/flushTime_num_ops":{
-        "metric":"hbase.regionserver.flushTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_75th_percentile":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_75th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_95th_percentile":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_95th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_99th_percentile":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_99th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_max":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_max",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_mean":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_mean",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_median":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_median",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_min":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_min",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_num_ops":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_std_dev":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_std_dev",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatency_avg_time":{
-        "metric":"hbase.regionserver.fsReadLatency_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatency_num_ops":{
-        "metric":"hbase.regionserver.fsReadLatency_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsSyncLatency_avg_time":{
-        "metric":"hbase.regionserver.fsSyncLatency_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsSyncLatency_num_ops":{
-        "metric":"hbase.regionserver.fsSyncLatency_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_75th_percentile":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_75th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_95th_percentile":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_95th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_99th_percentile":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_99th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_max":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_max",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_mean":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_mean",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_median":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_median",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_min":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_min",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_num_ops":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_std_dev":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_std_dev",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatency_avg_time":{
-        "metric":"hbase.regionserver.fsWriteLatency_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatency_num_ops":{
-        "metric":"hbase.regionserver.fsWriteLatency_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_75th_percentile":{
-        "metric":"hbase.regionserver.getRequestLatency_75th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_95th_percentile":{
-        "metric":"hbase.regionserver.getRequestLatency_95th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_99th_percentile":{
-        "metric":"hbase.regionserver.getRequestLatency_99th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_max":{
-        "metric":"hbase.regionserver.getRequestLatency_max",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_mean":{
-        "metric":"hbase.regionserver.getRequestLatency_mean",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_median":{
-        "metric":"hbase.regionserver.getRequestLatency_median",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_min":{
-        "metric":"hbase.regionserver.getRequestLatency_min",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_num_ops":{
-        "metric":"hbase.regionserver.getRequestLatency_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_std_dev":{
-        "metric":"hbase.regionserver.getRequestLatency_std_dev",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/hdfsBlocksLocalityIndex":{
-        "metric":"hbase.regionserver.hdfsBlocksLocalityIndex",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/hlogFileCount":{
-        "metric":"hbase.regionserver.hlogFileCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/memstoreSizeMB":{
-        "metric":"hbase.regionserver.memstoreSizeMB",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_75th_percentile":{
-        "metric":"hbase.regionserver.putRequestLatency_75th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_95th_percentile":{
-        "metric":"hbase.regionserver.putRequestLatency_95th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_99th_percentile":{
-        "metric":"hbase.regionserver.putRequestLatency_99th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_max":{
-        "metric":"hbase.regionserver.putRequestLatency_max",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_mean":{
-        "metric":"hbase.regionserver.putRequestLatency_mean",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_median":{
-        "metric":"hbase.regionserver.putRequestLatency_median",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_min":{
-        "metric":"hbase.regionserver.putRequestLatency_min",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_num_ops":{
-        "metric":"hbase.regionserver.putRequestLatency_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_std_dev":{
-        "metric":"hbase.regionserver.putRequestLatency_std_dev",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/readRequestsCount":{
-        "metric":"hbase.regionserver.readRequestsCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/regions":{
-        "metric":"hbase.regionserver.regions",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/requests":{
-        "metric":"hbase.regionserver.requests",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/rootIndexSizeKB":{
-        "metric":"hbase.regionserver.rootIndexSizeKB",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/storefileIndexSizeMB":{
-        "metric":"hbase.regionserver.storefileIndexSizeMB",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/storefiles":{
-        "metric":"hbase.regionserver.storefiles",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/stores":{
-        "metric":"hbase.regionserver.stores",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/totalStaticBloomSizeKB":{
-        "metric":"hbase.regionserver.totalStaticBloomSizeKB",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/totalStaticIndexSizeKB":{
-        "metric":"hbase.regionserver.totalStaticIndexSizeKB",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/writeRequestsCount":{
-        "metric":"hbase.regionserver.writeRequestsCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcCount":{
-        "metric":"jvm.metrics.gcCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric":"jvm.metrics.gcTimeMillis",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logError":{
-        "metric":"jvm.metrics.logError",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logFatal":{
-        "metric":"jvm.metrics.logFatal",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logInfo":{
-        "metric":"jvm.metrics.logInfo",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logWarn":{
-        "metric":"jvm.metrics.logWarn",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/maxMemoryM":{
-        "metric":"jvm.metrics.maxMemoryM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric":"jvm.metrics.memHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric":"jvm.metrics.memHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric":"jvm.metrics.memNonHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric":"jvm.metrics.memNonHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric":"jvm.metrics.threadsBlocked",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsNew":{
-        "metric":"jvm.metrics.threadsNew",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric":"jvm.metrics.threadsRunnable",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric":"jvm.metrics.threadsTerminated",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric":"jvm.metrics.threadsTimedWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric":"jvm.metrics.threadsWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_fifteen":{
-        "metric":"load_fifteen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_five":{
-        "metric":"load_five",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_one":{
-        "metric":"load_one",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.rpc.NumOpenConnections",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.rpc.ReceivedBytes",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric":"rpc.rpc.RpcProcessingTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric":"rpc.rpc.RpcProcessingTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric":"rpc.rpc.RpcQueueTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric":"rpc.rpc.RpcQueueTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_avg_time":{
-        "metric":"rpc.rpc.RpcSlowResponse_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_num_ops":{
-        "metric":"rpc.rpc.RpcSlowResponse_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.rpc.SentBytes",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.abort.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.abort.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_avg_time":{
-        "metric":"rpc.rpc.abort_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_num_ops":{
-        "metric":"rpc.rpc.abort_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_avg_time":{
-        "metric":"rpc.rpc.addColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_num_ops":{
-        "metric":"rpc.rpc.addColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.addToOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.addToOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_avg_time":{
-        "metric":"rpc.rpc.assign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_num_ops":{
-        "metric":"rpc.rpc.assign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_avg_time":{
-        "metric":"rpc.rpc.balanceSwitch_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_num_ops":{
-        "metric":"rpc.rpc.balanceSwitch_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_avg_time":{
-        "metric":"rpc.rpc.balance_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_num_ops":{
-        "metric":"rpc.rpc.balance_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"rpc.rpc.callQueueLen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkAndPut.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkAndPut.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_avg_time":{
-        "metric":"rpc.rpc.checkAndPut_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_num_ops":{
-        "metric":"rpc.rpc.checkAndPut_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkOOME.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkOOME.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_avg_time":{
-        "metric":"rpc.rpc.checkOOME_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_num_ops":{
-        "metric":"rpc.rpc.checkOOME_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.close.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.close.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.closeRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.closeRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_avg_time":{
-        "metric":"rpc.rpc.closeRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_num_ops":{
-        "metric":"rpc.rpc.closeRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_avg_time":{
-        "metric":"rpc.rpc.close_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_num_ops":{
-        "metric":"rpc.rpc.close_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.compactRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.compactRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_avg_time":{
-        "metric":"rpc.rpc.compactRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_num_ops":{
-        "metric":"rpc.rpc.compactRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_avg_time":{
-        "metric":"rpc.rpc.createTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_num_ops":{
-        "metric":"rpc.rpc.createTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.delete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.delete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_avg_time":{
-        "metric":"rpc.rpc.deleteColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_num_ops":{
-        "metric":"rpc.rpc.deleteColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_avg_time":{
-        "metric":"rpc.rpc.deleteTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_num_ops":{
-        "metric":"rpc.rpc.deleteTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_avg_time":{
-        "metric":"rpc.rpc.delete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_num_ops":{
-        "metric":"rpc.rpc.delete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_avg_time":{
-        "metric":"rpc.rpc.disableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_num_ops":{
-        "metric":"rpc.rpc.disableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_avg_time":{
-        "metric":"rpc.rpc.enableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_num_ops":{
-        "metric":"rpc.rpc.enableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.exists.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.exists.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_avg_time":{
-        "metric":"rpc.rpc.exists_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_num_ops":{
-        "metric":"rpc.rpc.exists_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.flushRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.flushRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_avg_time":{
-        "metric":"rpc.rpc.flushRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_num_ops":{
-        "metric":"rpc.rpc.flushRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.get.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.get.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_avg_time":{
-        "metric":"rpc.rpc.getAlterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_num_ops":{
-        "metric":"rpc.rpc.getAlterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_avg_time":{
-        "metric":"rpc.rpc.getCatalogTracker_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_num_ops":{
-        "metric":"rpc.rpc.getCatalogTracker_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_avg_time":{
-        "metric":"rpc.rpc.getClusterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_num_ops":{
-        "metric":"rpc.rpc.getClusterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getConfiguration.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getConfiguration.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_avg_time":{
-        "metric":"rpc.rpc.getConfiguration_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_num_ops":{
-        "metric":"rpc.rpc.getConfiguration_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_avg_time":{
-        "metric":"rpc.rpc.getHTableDescriptors_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_num_ops":{
-        "metric":"rpc.rpc.getHTableDescriptors_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getServerName.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getServerName.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_avg_time":{
-        "metric":"rpc.rpc.getServerName_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_num_ops":{
-        "metric":"rpc.rpc.getServerName_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_avg_time":{
-        "metric":"rpc.rpc.getZooKeeper_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_num_ops":{
-        "metric":"rpc.rpc.getZooKeeper_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_avg_time":{
-        "metric":"rpc.rpc.get_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_num_ops":{
-        "metric":"rpc.rpc.get_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.increment.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.increment.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_avg_time":{
-        "metric":"rpc.rpc.increment_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_num_ops":{
-        "metric":"rpc.rpc.increment_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isAborted.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isAborted.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_avg_time":{
-        "metric":"rpc.rpc.isAborted_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_num_ops":{
-        "metric":"rpc.rpc.isAborted_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_avg_time":{
-        "metric":"rpc.rpc.isMasterRunning_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_num_ops":{
-        "metric":"rpc.rpc.isMasterRunning_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isStopped.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isStopped.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_avg_time":{
-        "metric":"rpc.rpc.isStopped_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_num_ops":{
-        "metric":"rpc.rpc.isStopped_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.lockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.lockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_avg_time":{
-        "metric":"rpc.rpc.lockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_num_ops":{
-        "metric":"rpc.rpc.lockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_avg_time":{
-        "metric":"rpc.rpc.modifyColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_num_ops":{
-        "metric":"rpc.rpc.modifyColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_avg_time":{
-        "metric":"rpc.rpc.modifyTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_num_ops":{
-        "metric":"rpc.rpc.modifyTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_avg_time":{
-        "metric":"rpc.rpc.move_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_num_ops":{
-        "metric":"rpc.rpc.move_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.multi.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.multi.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_avg_time":{
-        "metric":"rpc.rpc.multi_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_num_ops":{
-        "metric":"rpc.rpc.multi_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.next.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.next.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_avg_time":{
-        "metric":"rpc.rpc.next_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_num_ops":{
-        "metric":"rpc.rpc.next_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_avg_time":{
-        "metric":"rpc.rpc.offline_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_num_ops":{
-        "metric":"rpc.rpc.offline_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_avg_time":{
-        "metric":"rpc.rpc.openRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_num_ops":{
-        "metric":"rpc.rpc.openRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_avg_time":{
-        "metric":"rpc.rpc.openRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_num_ops":{
-        "metric":"rpc.rpc.openRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openScanner.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openScanner.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_avg_time":{
-        "metric":"rpc.rpc.openScanner_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_num_ops":{
-        "metric":"rpc.rpc.openScanner_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.put.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.put.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_avg_time":{
-        "metric":"rpc.rpc.put_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_num_ops":{
-        "metric":"rpc.rpc.put_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_avg_time":{
-        "metric":"rpc.rpc.regionServerReport_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_num_ops":{
-        "metric":"rpc.rpc.regionServerReport_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_avg_time":{
-        "metric":"rpc.rpc.regionServerStartup_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_num_ops":{
-        "metric":"rpc.rpc.regionServerStartup_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.removeFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.removeFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_avg_time":{
-        "metric":"rpc.rpc.reportRSFatalError_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_num_ops":{
-        "metric":"rpc.rpc.reportRSFatalError_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"rpc.rpc.rpcAuthenticationFailures",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthenticationSuccesses",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"rpc.rpc.rpcAuthorizationFailures",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthorizationSuccesses",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_avg_time":{
-        "metric":"rpc.rpc.shutdown_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_num_ops":{
-        "metric":"rpc.rpc.shutdown_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.splitRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.splitRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_avg_time":{
-        "metric":"rpc.rpc.splitRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_num_ops":{
-        "metric":"rpc.rpc.splitRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.stop.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.stop.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_avg_time":{
-        "metric":"rpc.rpc.stopMaster_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_num_ops":{
-        "metric":"rpc.rpc.stopMaster_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_avg_time":{
-        "metric":"rpc.rpc.stop_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_num_ops":{
-        "metric":"rpc.rpc.stop_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_avg_time":{
-        "metric":"rpc.rpc.unassign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_num_ops":{
-        "metric":"rpc.rpc.unassign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.unlockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.unlockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_avg_time":{
-        "metric":"rpc.rpc.unlockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_num_ops":{
-        "metric":"rpc.rpc.unlockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/canCommit_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.canCommit_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/canCommit_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.canCommit_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/commitPending_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.commitPending_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/commitPending_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.commitPending_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/done_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.done_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/done_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.done_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getMapCompletionEvents_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getMapCompletionEvents_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getMapCompletionEvents_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getMapCompletionEvents_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getTask_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getTask_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getTask_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getTask_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/ping_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.ping_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/ping_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.ping_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/statusUpdate_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.statusUpdate_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/statusUpdate_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.statusUpdate_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"ugi.ugi.loginFailure_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"ugi.ugi.loginFailure_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"ugi.ugi.loginSuccess_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"ugi.ugi.loginSuccess_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      }
-    }
-
-  },
-
-  "HostComponent":{
-
-    "NAMENODE":{
-
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/BlockCapacity":{
-        "metric":"dfs.FSNamesystem.BlockCapacity",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/BlocksTotal":{
-        "metric":"dfs.FSNamesystem.BlocksTotal",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/CapacityRemainingGB":{
-        "metric":"dfs.FSNamesystem.CapacityRemainingGB",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/CapacityTotalGB":{
-        "metric":"dfs.FSNamesystem.CapacityTotalGB",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/CapacityUsedGB":{
-        "metric":"dfs.FSNamesystem.CapacityUsedGB",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/CorruptBlocks":{
-        "metric":"dfs.FSNamesystem.CorruptBlocks",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/ExcessBlocks":{
-        "metric":"dfs.FSNamesystem.ExcessBlocks",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/FilesTotal":{
-        "metric":"dfs.FSNamesystem.FilesTotal",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/MissingBlocks":{
-        "metric":"dfs.FSNamesystem.MissingBlocks",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/PendingDeletionBlocks":{
-        "metric":"dfs.FSNamesystem.PendingDeletionBlocks",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/PendingReplicationBlocks":{
-        "metric":"dfs.FSNamesystem.PendingReplicationBlocks",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks":{
-        "metric":"dfs.FSNamesystem.ScheduledReplicationBlocks",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/TotalLoad":{
-        "metric":"dfs.FSNamesystem.TotalLoad",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/FSNamesystem/UnderReplicatedBlocks":{
-        "metric":"dfs.FSNamesystem.UnderReplicatedBlocks",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/AddBlockOps":{
-        "metric":"dfs.namenode.AddBlockOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/CreateFileOps":{
-        "metric":"dfs.namenode.CreateFileOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/DeleteFileOps":{
-        "metric":"dfs.namenode.DeleteFileOps",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/FileInfoOps":{
-        "metric":"dfs.namenode.FileInfoOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/FilesAppended":{
-        "metric":"dfs.namenode.FilesAppended",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/FilesCreated":{
-        "metric":"dfs.namenode.FilesCreated",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/FilesDeleted":{
-        "metric":"dfs.namenode.FilesDeleted",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/FilesInGetListingOps":{
-        "metric":"dfs.namenode.FilesInGetListingOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/FilesRenamed":{
-        "metric":"dfs.namenode.FilesRenamed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/GetBlockLocations":{
-        "metric":"dfs.namenode.GetBlockLocations",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/GetListingOps":{
-        "metric":"dfs.namenode.GetListingOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/JournalTransactionsBatchedInSync":{
-        "metric":"dfs.namenode.JournalTransactionsBatchedInSync",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/SafemodeTime":{
-        "metric":"dfs.namenode.SafemodeTime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/Syncs_avg_time":{
-        "metric":"dfs.namenode.Syncs_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/Syncs_num_ops":{
-        "metric":"dfs.namenode.Syncs_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/Transactions_avg_time":{
-        "metric":"dfs.namenode.Transactions_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/Transactions_num_ops":{
-        "metric":"dfs.namenode.Transactions_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/blockReport_avg_time":{
-        "metric":"dfs.namenode.blockReport_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/blockReport_num_ops":{
-        "metric":"dfs.namenode.blockReport_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/dfs/namenode/fsImageLoadTime":{
-        "metric":"dfs.namenode.fsImageLoadTime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcCount":{
-        "metric":"jvm.metrics.gcCount",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric":"jvm.metrics.gcTimeMillis",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logError":{
-        "metric":"jvm.metrics.logError",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logFatal":{
-        "metric":"jvm.metrics.logFatal",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logInfo":{
-        "metric":"jvm.metrics.logInfo",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logWarn":{
-        "metric":"jvm.metrics.logWarn",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric":"jvm.metrics.memHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric":"jvm.metrics.memHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric":"jvm.metrics.memNonHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric":"jvm.metrics.memNonHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric":"jvm.metrics.threadsBlocked",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsNew":{
-        "metric":"jvm.metrics.threadsNew",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric":"jvm.metrics.threadsRunnable",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric":"jvm.metrics.threadsTerminated",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric":"jvm.metrics.threadsTimedWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric":"jvm.metrics.threadsWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/load/load_fifteen":{
-        "metric":"load_fifteen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_five":{
-        "metric":"load_five",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_one":{
-        "metric":"load_one",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.rpc.NumOpenConnections",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.rpc.ReceivedBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric":"rpc.rpc.RpcProcessingTime_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric":"rpc.rpc.RpcProcessingTime_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric":"rpc.rpc.RpcQueueTime_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric":"rpc.rpc.RpcQueueTime_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.rpc.SentBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"rpc.rpc.callQueueLen",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"rpc.rpc.rpcAuthenticationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthenticationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"rpc.rpc.rpcAuthorizationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthorizationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/addBlock_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.addBlock_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/addBlock_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.addBlock_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/blockReceived_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.blockReceived_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/blockReceived_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.blockReceived_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/blockReport_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.blockReport_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/blockReport_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.blockReport_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/complete_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.complete_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/complete_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.complete_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/create_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.create_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/create_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.create_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/delete_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.delete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/delete_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.delete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/fsync_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.fsync_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/fsync_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.fsync_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBlockLocations_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getBlockLocations_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBlockLocations_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getBlockLocations_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getEditLogSize_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getEditLogSize_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getEditLogSize_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getEditLogSize_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getFileInfo_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getFileInfo_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getFileInfo_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getFileInfo_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getListing_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getListing_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getListing_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getListing_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/mkdirs_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.mkdirs_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/mkdirs_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.mkdirs_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/register_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.register_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/register_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.register_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/rename_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.rename_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/rename_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.rename_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/renewLease_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.renewLease_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/renewLease_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.renewLease_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/rollEditLog_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.rollEditLog_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/rollEditLog_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.rollEditLog_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/rollFsImage_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.rollFsImage_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/rollFsImage_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.rollFsImage_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/sendHeartbeat_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.sendHeartbeat_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/sendHeartbeat_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.sendHeartbeat_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/setOwner_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.setOwner_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/setOwner_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.setOwner_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/setPermission_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.setPermission_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/setPermission_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.setPermission_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/setReplication_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.setReplication_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/setReplication_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.setReplication_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/setSafeMode_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.setSafeMode_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/setSafeMode_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.setSafeMode_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/versionRequest_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.versionRequest_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/versionRequest_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.versionRequest_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"ugi.ugi.loginFailure_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"ugi.ugi.loginFailure_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"ugi.ugi.loginSuccess_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"ugi.ugi.loginSuccess_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      }
-    },
-
-    "DATANODE":{
-
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blockChecksumOp_avg_time":{
-        "metric":"dfs.datanode.blockChecksumOp_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blockChecksumOp_num_ops":{
-        "metric":"dfs.datanode.blockChecksumOp_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blockReports_avg_time":{
-        "metric":"dfs.datanode.blockReports_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blockReports_num_ops":{
-        "metric":"dfs.datanode.blockReports_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/block_verification_failures":{
-        "metric":"dfs.datanode.block_verification_failures",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blocks_get_local_pathinfo":{
-        "metric":"dfs.datanode.blocks_get_local_pathinfo",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blocks_read":{
-        "metric":"dfs.datanode.blocks_read",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blocks_removed":{
-        "metric":"dfs.datanode.blocks_removed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blocks_replicated":{
-        "metric":"dfs.datanode.blocks_replicated",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blocks_verified":{
-        "metric":"dfs.datanode.blocks_verified",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/blocks_written":{
-        "metric":"dfs.datanode.blocks_written",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/bytes_read":{
-        "metric":"dfs.datanode.bytes_read",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/bytes_written":{
-        "metric":"dfs.datanode.bytes_written",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/copyBlockOp_avg_time":{
-        "metric":"dfs.datanode.copyBlockOp_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/copyBlockOp_num_ops":{
-        "metric":"dfs.datanode.copyBlockOp_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/heartBeats_avg_time":{
-        "metric":"dfs.datanode.heartBeats_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/heartBeats_num_ops":{
-        "metric":"dfs.datanode.heartBeats_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/readBlockOp_avg_time":{
-        "metric":"dfs.datanode.readBlockOp_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/readBlockOp_num_ops":{
-        "metric":"dfs.datanode.readBlockOp_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/reads_from_local_client":{
-        "metric":"dfs.datanode.reads_from_local_client",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/reads_from_remote_client":{
-        "metric":"dfs.datanode.reads_from_remote_client",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/replaceBlockOp_avg_time":{
-        "metric":"dfs.datanode.replaceBlockOp_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/replaceBlockOp_num_ops":{
-        "metric":"dfs.datanode.replaceBlockOp_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/writeBlockOp_avg_time":{
-        "metric":"dfs.datanode.writeBlockOp_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/writeBlockOp_num_ops":{
-        "metric":"dfs.datanode.writeBlockOp_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/writes_from_local_client":{
-        "metric":"dfs.datanode.writes_from_local_client",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/dfs/datanode/writes_from_remote_client":{
-        "metric":"dfs.datanode.writes_from_remote_client",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcCount":{
-        "metric":"jvm.metrics.gcCount",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric":"jvm.metrics.gcTimeMillis",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logError":{
-        "metric":"jvm.metrics.logError",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logFatal":{
-        "metric":"jvm.metrics.logFatal",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logInfo":{
-        "metric":"jvm.metrics.logInfo",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logWarn":{
-        "metric":"jvm.metrics.logWarn",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/maxMemoryM":{
-        "metric":"jvm.metrics.maxMemoryM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric":"jvm.metrics.memHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric":"jvm.metrics.memHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric":"jvm.metrics.memNonHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric":"jvm.metrics.memNonHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric":"jvm.metrics.threadsBlocked",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsNew":{
-        "metric":"jvm.metrics.threadsNew",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric":"jvm.metrics.threadsRunnable",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric":"jvm.metrics.threadsTerminated",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric":"jvm.metrics.threadsTimedWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric":"jvm.metrics.threadsWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/load/load_fifteen":{
-        "metric":"load_fifteen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_five":{
-        "metric":"load_five",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_one":{
-        "metric":"load_one",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.rpc.NumOpenConnections",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.rpc.ReceivedBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric":"rpc.rpc.RpcProcessingTime_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric":"rpc.rpc.RpcProcessingTime_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric":"rpc.rpc.RpcQueueTime_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric":"rpc.rpc.RpcQueueTime_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_avg_time":{
-        "metric":"rpc.rpc.RpcSlowResponse_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_num_ops":{
-        "metric":"rpc.rpc.RpcSlowResponse_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.rpc.SentBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.abort.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.abort.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_avg_time":{
-        "metric":"rpc.rpc.abort_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_num_ops":{
-        "metric":"rpc.rpc.abort_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_avg_time":{
-        "metric":"rpc.rpc.addColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_num_ops":{
-        "metric":"rpc.rpc.addColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.addToOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.addToOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_avg_time":{
-        "metric":"rpc.rpc.assign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_num_ops":{
-        "metric":"rpc.rpc.assign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_avg_time":{
-        "metric":"rpc.rpc.balanceSwitch_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_num_ops":{
-        "metric":"rpc.rpc.balanceSwitch_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_avg_time":{
-        "metric":"rpc.rpc.balance_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_num_ops":{
-        "metric":"rpc.rpc.balance_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"rpc.rpc.callQueueLen",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkAndPut.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkAndPut.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_avg_time":{
-        "metric":"rpc.rpc.checkAndPut_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_num_ops":{
-        "metric":"rpc.rpc.checkAndPut_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkOOME.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkOOME.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_avg_time":{
-        "metric":"rpc.rpc.checkOOME_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_num_ops":{
-        "metric":"rpc.rpc.checkOOME_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.close.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.close.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.closeRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.closeRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_avg_time":{
-        "metric":"rpc.rpc.closeRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_num_ops":{
-        "metric":"rpc.rpc.closeRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_avg_time":{
-        "metric":"rpc.rpc.close_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_num_ops":{
-        "metric":"rpc.rpc.close_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.compactRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.compactRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_avg_time":{
-        "metric":"rpc.rpc.compactRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_num_ops":{
-        "metric":"rpc.rpc.compactRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_avg_time":{
-        "metric":"rpc.rpc.createTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_num_ops":{
-        "metric":"rpc.rpc.createTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.delete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.delete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_avg_time":{
-        "metric":"rpc.rpc.deleteColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_num_ops":{
-        "metric":"rpc.rpc.deleteColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_avg_time":{
-        "metric":"rpc.rpc.deleteTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_num_ops":{
-        "metric":"rpc.rpc.deleteTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_avg_time":{
-        "metric":"rpc.rpc.delete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_num_ops":{
-        "metric":"rpc.rpc.delete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_avg_time":{
-        "metric":"rpc.rpc.disableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_num_ops":{
-        "metric":"rpc.rpc.disableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_avg_time":{
-        "metric":"rpc.rpc.enableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_num_ops":{
-        "metric":"rpc.rpc.enableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.exists.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.exists.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_avg_time":{
-        "metric":"rpc.rpc.exists_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_num_ops":{
-        "metric":"rpc.rpc.exists_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.flushRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.flushRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_avg_time":{
-        "metric":"rpc.rpc.flushRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_num_ops":{
-        "metric":"rpc.rpc.flushRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.get.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.get.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_avg_time":{
-        "metric":"rpc.rpc.getAlterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_num_ops":{
-        "metric":"rpc.rpc.getAlterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_avg_time":{
-        "metric":"rpc.rpc.getCatalogTracker_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_num_ops":{
-        "metric":"rpc.rpc.getCatalogTracker_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_avg_time":{
-        "metric":"rpc.rpc.getClusterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_num_ops":{
-        "metric":"rpc.rpc.getClusterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getConfiguration.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getConfiguration.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_avg_time":{
-        "metric":"rpc.rpc.getConfiguration_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_num_ops":{
-        "metric":"rpc.rpc.getConfiguration_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_avg_time":{
-        "metric":"rpc.rpc.getHTableDescriptors_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_num_ops":{
-        "metric":"rpc.rpc.getHTableDescriptors_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getServerName.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getServerName.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_avg_time":{
-        "metric":"rpc.rpc.getServerName_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_num_ops":{
-        "metric":"rpc.rpc.getServerName_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_avg_time":{
-        "metric":"rpc.rpc.getZooKeeper_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_num_ops":{
-        "metric":"rpc.rpc.getZooKeeper_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_avg_time":{
-        "metric":"rpc.rpc.get_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_num_ops":{
-        "metric":"rpc.rpc.get_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.increment.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.increment.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_avg_time":{
-        "metric":"rpc.rpc.increment_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_num_ops":{
-        "metric":"rpc.rpc.increment_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isAborted.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isAborted.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_avg_time":{
-        "metric":"rpc.rpc.isAborted_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_num_ops":{
-        "metric":"rpc.rpc.isAborted_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_avg_time":{
-        "metric":"rpc.rpc.isMasterRunning_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_num_ops":{
-        "metric":"rpc.rpc.isMasterRunning_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isStopped.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isStopped.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_avg_time":{
-        "metric":"rpc.rpc.isStopped_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_num_ops":{
-        "metric":"rpc.rpc.isStopped_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.lockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.lockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_avg_time":{
-        "metric":"rpc.rpc.lockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_num_ops":{
-        "metric":"rpc.rpc.lockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_avg_time":{
-        "metric":"rpc.rpc.modifyColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_num_ops":{
-        "metric":"rpc.rpc.modifyColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_avg_time":{
-        "metric":"rpc.rpc.modifyTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_num_ops":{
-        "metric":"rpc.rpc.modifyTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_avg_time":{
-        "metric":"rpc.rpc.move_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_num_ops":{
-        "metric":"rpc.rpc.move_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.multi.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.multi.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_avg_time":{
-        "metric":"rpc.rpc.multi_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_num_ops":{
-        "metric":"rpc.rpc.multi_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.next.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.next.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_avg_time":{
-        "metric":"rpc.rpc.next_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_num_ops":{
-        "metric":"rpc.rpc.next_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_avg_time":{
-        "metric":"rpc.rpc.offline_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_num_ops":{
-        "metric":"rpc.rpc.offline_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_avg_time":{
-        "metric":"rpc.rpc.openRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_num_ops":{
-        "metric":"rpc.rpc.openRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_avg_time":{
-        "metric":"rpc.rpc.openRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_num_ops":{
-        "metric":"rpc.rpc.openRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openScanner.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openScanner.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_avg_time":{
-        "metric":"rpc.rpc.openScanner_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_num_ops":{
-        "metric":"rpc.rpc.openScanner_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.put.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.put.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_avg_time":{
-        "metric":"rpc.rpc.put_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_num_ops":{
-        "metric":"rpc.rpc.put_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_avg_time":{
-        "metric":"rpc.rpc.regionServerReport_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_num_ops":{
-        "metric":"rpc.rpc.regionServerReport_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_avg_time":{
-        "metric":"rpc.rpc.regionServerStartup_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_num_ops":{
-        "metric":"rpc.rpc.regionServerStartup_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.removeFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.removeFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_avg_time":{
-        "metric":"rpc.rpc.reportRSFatalError_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_num_ops":{
-        "metric":"rpc.rpc.reportRSFatalError_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"rpc.rpc.rpcAuthenticationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthenticationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"rpc.rpc.rpcAuthorizationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthorizationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_avg_time":{
-        "metric":"rpc.rpc.shutdown_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_num_ops":{
-        "metric":"rpc.rpc.shutdown_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.splitRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.splitRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_avg_time":{
-        "metric":"rpc.rpc.splitRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_num_ops":{
-        "metric":"rpc.rpc.splitRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.stop.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.stop.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_avg_time":{
-        "metric":"rpc.rpc.stopMaster_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_num_ops":{
-        "metric":"rpc.rpc.stopMaster_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_avg_time":{
-        "metric":"rpc.rpc.stop_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_num_ops":{
-        "metric":"rpc.rpc.stop_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_avg_time":{
-        "metric":"rpc.rpc.unassign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_num_ops":{
-        "metric":"rpc.rpc.unassign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.unlockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.unlockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_avg_time":{
-        "metric":"rpc.rpc.unlockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_num_ops":{
-        "metric":"rpc.rpc.unlockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"ugi.ugi.loginFailure_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"ugi.ugi.loginFailure_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"ugi.ugi.loginSuccess_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"ugi.ugi.loginSuccess_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      }
-    },
-
-    "JOBTRACKER":{
-
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcCount":{
-        "metric":"jvm.metrics.gcCount",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric":"jvm.metrics.gcTimeMillis",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logError":{
-        "metric":"jvm.metrics.logError",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logFatal":{
-        "metric":"jvm.metrics.logFatal",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logInfo":{
-        "metric":"jvm.metrics.logInfo",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logWarn":{
-        "metric":"jvm.metrics.logWarn",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric":"jvm.metrics.memHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric":"jvm.metrics.memHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric":"jvm.metrics.memNonHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric":"jvm.metrics.memNonHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric":"jvm.metrics.threadsBlocked",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsNew":{
-        "metric":"jvm.metrics.threadsNew",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric":"jvm.metrics.threadsRunnable",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric":"jvm.metrics.threadsTerminated",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric":"jvm.metrics.threadsTimedWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric":"jvm.metrics.threadsWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/load/load_fifteen":{
-        "metric":"load_fifteen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_five":{
-        "metric":"load_five",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_one":{
-        "metric":"load_one",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/jobs_completed":{
-        "metric":"mapred.Queue.jobs_completed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/jobs_failed":{
-        "metric":"mapred.Queue.jobs_failed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/jobs_killed":{
-        "metric":"mapred.Queue.jobs_killed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/jobs_preparing":{
-        "metric":"mapred.Queue.jobs_preparing",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/jobs_running":{
-        "metric":"mapred.Queue.jobs_running",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/jobs_submitted":{
-        "metric":"mapred.Queue.jobs_submitted",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/maps_completed":{
-        "metric":"mapred.Queue.maps_completed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/maps_failed":{
-        "metric":"mapred.Queue.maps_failed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/maps_killed":{
-        "metric":"mapred.Queue.maps_killed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/maps_launched":{
-        "metric":"mapred.Queue.maps_launched",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/reduces_completed":{
-        "metric":"mapred.Queue.reduces_completed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/reduces_failed":{
-        "metric":"mapred.Queue.reduces_failed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/reduces_killed":{
-        "metric":"mapred.Queue.reduces_killed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/reduces_launched":{
-        "metric":"mapred.Queue.reduces_launched",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/reserved_map_slots":{
-        "metric":"mapred.Queue.reserved_map_slots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/reserved_reduce_slots":{
-        "metric":"mapred.Queue.reserved_reduce_slots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/running_0":{
-        "metric":"mapred.Queue.running_0",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/running_1440":{
-        "metric":"mapred.Queue.running_1440",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/running_300":{
-        "metric":"mapred.Queue.running_300",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/running_60":{
-        "metric":"mapred.Queue.running_60",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/waiting_maps":{
-        "metric":"mapred.Queue.waiting_maps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/Queue/waiting_reduces":{
-        "metric":"mapred.Queue.waiting_reduces",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/blacklisted_maps":{
-        "metric":"mapred.jobtracker.blacklisted_maps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/blacklisted_reduces":{
-        "metric":"mapred.jobtracker.blacklisted_reduces",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/heartbeats":{
-        "metric":"mapred.jobtracker.heartbeats",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/jobs_completed":{
-        "metric":"mapred.jobtracker.jobs_completed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/jobs_failed":{
-        "metric":"mapred.jobtracker.jobs_failed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/jobs_killed":{
-        "metric":"mapred.jobtracker.jobs_killed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/jobs_preparing":{
-        "metric":"mapred.jobtracker.jobs_preparing",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/jobs_running":{
-        "metric":"mapred.jobtracker.jobs_running",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/jobs_submitted":{
-        "metric":"mapred.jobtracker.jobs_submitted",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/map_slots":{
-        "metric":"mapred.jobtracker.map_slots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/maps_completed":{
-        "metric":"mapred.jobtracker.maps_completed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/maps_failed":{
-        "metric":"mapred.jobtracker.maps_failed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/maps_killed":{
-        "metric":"mapred.jobtracker.maps_killed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/maps_launched":{
-        "metric":"mapred.jobtracker.maps_launched",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/occupied_map_slots":{
-        "metric":"mapred.jobtracker.occupied_map_slots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/occupied_reduce_slots":{
-        "metric":"mapred.jobtracker.occupied_reduce_slots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/reduce_slots":{
-        "metric":"mapred.jobtracker.reduce_slots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/reduces_completed":{
-        "metric":"mapred.jobtracker.reduces_completed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/reduces_failed":{
-        "metric":"mapred.jobtracker.reduces_failed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/reduces_killed":{
-        "metric":"mapred.jobtracker.reduces_killed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/reduces_launched":{
-        "metric":"mapred.jobtracker.reduces_launched",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/reserved_map_slots":{
-        "metric":"mapred.jobtracker.reserved_map_slots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/reserved_reduce_slots":{
-        "metric":"mapred.jobtracker.reserved_reduce_slots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/running_maps":{
-        "metric":"mapred.jobtracker.running_maps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/running_reduces":{
-        "metric":"mapred.jobtracker.running_reduces",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/trackers":{
-        "metric":"mapred.jobtracker.trackers",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/trackers_blacklisted":{
-        "metric":"mapred.jobtracker.trackers_blacklisted",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/trackers_decommissioned":{
-        "metric":"mapred.jobtracker.trackers_decommissioned",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/trackers_graylisted":{
-        "metric":"mapred.jobtracker.trackers_graylisted",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/waiting_maps":{
-        "metric":"mapred.jobtracker.waiting_maps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/jobtracker/waiting_reduces":{
-        "metric":"mapred.jobtracker.waiting_reduces",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.rpc.NumOpenConnections",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.rpc.ReceivedBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric":"rpc.rpc.RpcProcessingTime_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric":"rpc.rpc.RpcProcessingTime_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric":"rpc.rpc.RpcQueueTime_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric":"rpc.rpc.RpcQueueTime_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.rpc.SentBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"rpc.rpc.callQueueLen",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"rpc.rpc.rpcAuthenticationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthenticationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"rpc.rpc.rpcAuthorizationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthorizationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBuildVersion_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getBuildVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getBuildVersion_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getBuildVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getDelegationToken_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getDelegationToken_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getDelegationToken_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getDelegationToken_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getJobCounters_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getJobCounters_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getJobCounters_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getJobCounters_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getJobProfile_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getJobProfile_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getJobProfile_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getJobProfile_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getJobStatus_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getJobStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getJobStatus_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getJobStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getMapTaskReports_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getMapTaskReports_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getMapTaskReports_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getMapTaskReports_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getNewJobId_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getNewJobId_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getNewJobId_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getNewJobId_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getProtocolVersion_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getQueueAdmins_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getQueueAdmins_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getQueueAdmins_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getQueueAdmins_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getReduceTaskReports_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getReduceTaskReports_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getReduceTaskReports_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getReduceTaskReports_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getStagingAreaDir_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getStagingAreaDir_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getStagingAreaDir_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getStagingAreaDir_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getSystemDir_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getSystemDir_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getSystemDir_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getSystemDir_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getTaskCompletionEvents_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.getTaskCompletionEvents_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/getTaskCompletionEvents_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.getTaskCompletionEvents_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/heartbeat_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.heartbeat_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/heartbeat_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.heartbeat_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/jobsToComplete_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.jobsToComplete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/jobsToComplete_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.jobsToComplete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/submitJob_avg_time":{
-        "metric":"rpcdetailed.rpcdetailed.submitJob_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpcdetailed/submitJob_num_ops":{
-        "metric":"rpcdetailed.rpcdetailed.submitJob_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"ugi.ugi.loginFailure_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"ugi.ugi.loginFailure_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"ugi.ugi.loginSuccess_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"ugi.ugi.loginSuccess_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      }
-    },
-
-    "TASKTRACKER":{
-
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcCount":{
-        "metric":"jvm.metrics.gcCount",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric":"jvm.metrics.gcTimeMillis",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logError":{
-        "metric":"jvm.metrics.logError",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logFatal":{
-        "metric":"jvm.metrics.logFatal",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logInfo":{
-        "metric":"jvm.metrics.logInfo",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/logWarn":{
-        "metric":"jvm.metrics.logWarn",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/maxMemoryM":{
-        "metric":"jvm.metrics.maxMemoryM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric":"jvm.metrics.memHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric":"jvm.metrics.memHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric":"jvm.metrics.memNonHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric":"jvm.metrics.memNonHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric":"jvm.metrics.threadsBlocked",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsNew":{
-        "metric":"jvm.metrics.threadsNew",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric":"jvm.metrics.threadsRunnable",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric":"jvm.metrics.threadsTerminated",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric":"jvm.metrics.threadsTimedWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric":"jvm.metrics.threadsWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/load/load_fifteen":{
-        "metric":"load_fifteen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_five":{
-        "metric":"load_five",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_one":{
-        "metric":"load_one",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/shuffleOutput/shuffle_exceptions_caught":{
-        "metric":"mapred.shuffleOutput.shuffle_exceptions_caught",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/shuffleOutput/shuffle_failed_outputs":{
-        "metric":"mapred.shuffleOutput.shuffle_failed_outputs",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/shuffleOutput/shuffle_handler_busy_percent":{
-        "metric":"mapred.shuffleOutput.shuffle_handler_busy_percent",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/shuffleOutput/shuffle_output_bytes":{
-        "metric":"mapred.shuffleOutput.shuffle_output_bytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/shuffleOutput/shuffle_success_outputs":{
-        "metric":"mapred.shuffleOutput.shuffle_success_outputs",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/tasktracker/mapTaskSlots":{
-        "metric":"mapred.tasktracker.mapTaskSlots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/tasktracker/maps_running":{
-        "metric":"mapred.tasktracker.maps_running",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/tasktracker/reduceTaskSlots":{
-        "metric":"mapred.tasktracker.reduceTaskSlots",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/mapred/tasktracker/reduces_running":{
-        "metric":"mapred.tasktracker.reduces_running",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.rpc.NumOpenConnections",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.rpc.ReceivedBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric":"rpc.rpc.RpcProcessingTime_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric":"rpc.rpc.RpcProcessingTime_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric":"rpc.rpc.RpcQueueTime_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric":"rpc.rpc.RpcQueueTime_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_avg_time":{
-        "metric":"rpc.rpc.RpcSlowResponse_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_num_ops":{
-        "metric":"rpc.rpc.RpcSlowResponse_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.rpc.SentBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.abort.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.abort.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_avg_time":{
-        "metric":"rpc.rpc.abort_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_num_ops":{
-        "metric":"rpc.rpc.abort_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_avg_time":{
-        "metric":"rpc.rpc.addColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_num_ops":{
-        "metric":"rpc.rpc.addColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.addToOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.addToOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_avg_time":{
-        "metric":"rpc.rpc.assign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_num_ops":{
-        "metric":"rpc.rpc.assign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_avg_time":{
-        "metric":"rpc.rpc.balanceSwitch_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_num_ops":{
-        "metric":"rpc.rpc.balanceSwitch_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_avg_time":{
-        "metric":"rpc.rpc.balance_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_num_ops":{
-        "metric":"rpc.rpc.balance_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"rpc.rpc.callQueueLen",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkAndPut.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkAndPut.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_avg_time":{
-        "metric":"rpc.rpc.checkAndPut_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_num_ops":{
-        "metric":"rpc.rpc.checkAndPut_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkOOME.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkOOME.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_avg_time":{
-        "metric":"rpc.rpc.checkOOME_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_num_ops":{
-        "metric":"rpc.rpc.checkOOME_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.close.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.close.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.closeRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.closeRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_avg_time":{
-        "metric":"rpc.rpc.closeRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_num_ops":{
-        "metric":"rpc.rpc.closeRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_avg_time":{
-        "metric":"rpc.rpc.close_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_num_ops":{
-        "metric":"rpc.rpc.close_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.compactRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.compactRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_avg_time":{
-        "metric":"rpc.rpc.compactRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_num_ops":{
-        "metric":"rpc.rpc.compactRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_avg_time":{
-        "metric":"rpc.rpc.createTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_num_ops":{
-        "metric":"rpc.rpc.createTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.delete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.delete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_avg_time":{
-        "metric":"rpc.rpc.deleteColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_num_ops":{
-        "metric":"rpc.rpc.deleteColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_avg_time":{
-        "metric":"rpc.rpc.deleteTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_num_ops":{
-        "metric":"rpc.rpc.deleteTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_avg_time":{
-        "metric":"rpc.rpc.delete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_num_ops":{
-        "metric":"rpc.rpc.delete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_avg_time":{
-        "metric":"rpc.rpc.disableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_num_ops":{
-        "metric":"rpc.rpc.disableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_avg_time":{
-        "metric":"rpc.rpc.enableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_num_ops":{
-        "metric":"rpc.rpc.enableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.exists.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.exists.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_avg_time":{
-        "metric":"rpc.rpc.exists_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_num_ops":{
-        "metric":"rpc.rpc.exists_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.flushRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.flushRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_avg_time":{
-        "metric":"rpc.rpc.flushRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_num_ops":{
-        "metric":"rpc.rpc.flushRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.get.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.get.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_avg_time":{
-        "metric":"rpc.rpc.getAlterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_num_ops":{
-        "metric":"rpc.rpc.getAlterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_avg_time":{
-        "metric":"rpc.rpc.getCatalogTracker_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_num_ops":{
-        "metric":"rpc.rpc.getCatalogTracker_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_avg_time":{
-        "metric":"rpc.rpc.getClusterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_num_ops":{
-        "metric":"rpc.rpc.getClusterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getConfiguration.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getConfiguration.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_avg_time":{
-        "metric":"rpc.rpc.getConfiguration_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_num_ops":{
-        "metric":"rpc.rpc.getConfiguration_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_avg_time":{
-        "metric":"rpc.rpc.getHTableDescriptors_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_num_ops":{
-        "metric":"rpc.rpc.getHTableDescriptors_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getServerName.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getServerName.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_avg_time":{
-        "metric":"rpc.rpc.getServerName_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_num_ops":{
-        "metric":"rpc.rpc.getServerName_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_avg_time":{
-        "metric":"rpc.rpc.getZooKeeper_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_num_ops":{
-        "metric":"rpc.rpc.getZooKeeper_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_avg_time":{
-        "metric":"rpc.rpc.get_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_num_ops":{
-        "metric":"rpc.rpc.get_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.increment.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.increment.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_avg_time":{
-        "metric":"rpc.rpc.increment_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_num_ops":{
-        "metric":"rpc.rpc.increment_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isAborted.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isAborted.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_avg_time":{
-        "metric":"rpc.rpc.isAborted_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_num_ops":{
-        "metric":"rpc.rpc.isAborted_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_avg_time":{
-        "metric":"rpc.rpc.isMasterRunning_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_num_ops":{
-        "metric":"rpc.rpc.isMasterRunning_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isStopped.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isStopped.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_avg_time":{
-        "metric":"rpc.rpc.isStopped_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_num_ops":{
-        "metric":"rpc.rpc.isStopped_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.lockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.lockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_avg_time":{
-        "metric":"rpc.rpc.lockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_num_ops":{
-        "metric":"rpc.rpc.lockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_avg_time":{
-        "metric":"rpc.rpc.modifyColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_num_ops":{
-        "metric":"rpc.rpc.modifyColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_avg_time":{
-        "metric":"rpc.rpc.modifyTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_num_ops":{
-        "metric":"rpc.rpc.modifyTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_avg_time":{
-        "metric":"rpc.rpc.move_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_num_ops":{
-        "metric":"rpc.rpc.move_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.multi.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.multi.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_avg_time":{
-        "metric":"rpc.rpc.multi_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_num_ops":{
-        "metric":"rpc.rpc.multi_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.next.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.next.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_avg_time":{
-        "metric":"rpc.rpc.next_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_num_ops":{
-        "metric":"rpc.rpc.next_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_avg_time":{
-        "metric":"rpc.rpc.offline_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_num_ops":{
-        "metric":"rpc.rpc.offline_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_avg_time":{
-        "metric":"rpc.rpc.openRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_num_ops":{
-        "metric":"rpc.rpc.openRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_avg_time":{
-        "metric":"rpc.rpc.openRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_num_ops":{
-        "metric":"rpc.rpc.openRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openScanner.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openScanner.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_avg_time":{
-        "metric":"rpc.rpc.openScanner_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_num_ops":{
-        "metric":"rpc.rpc.openScanner_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.put.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.put.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_avg_time":{
-        "metric":"rpc.rpc.put_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_num_ops":{
-        "metric":"rpc.rpc.put_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_avg_time":{
-        "metric":"rpc.rpc.regionServerReport_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_num_ops":{
-        "metric":"rpc.rpc.regionServerReport_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_avg_time":{
-        "metric":"rpc.rpc.regionServerStartup_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_num_ops":{
-        "metric":"rpc.rpc.regionServerStartup_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.removeFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.removeFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_avg_time":{
-        "metric":"rpc.rpc.reportRSFatalError_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_num_ops":{
-        "metric":"rpc.rpc.reportRSFatalError_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"rpc.rpc.rpcAuthenticationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthenticationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"rpc.rpc.rpcAuthorizationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthorizationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_avg_time":{
-        "metric":"rpc.rpc.shutdown_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_num_ops":{
-        "metric":"rpc.rpc.shutdown_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.splitRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.splitRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_avg_time":{
-        "metric":"rpc.rpc.splitRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_num_ops":{
-        "metric":"rpc.rpc.splitRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.stop.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.stop.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_avg_time":{
-        "metric":"rpc.rpc.stopMaster_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_num_ops":{
-        "metric":"rpc.rpc.stopMaster_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_avg_time":{
-        "metric":"rpc.rpc.stop_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_num_ops":{
-        "metric":"rpc.rpc.stop_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_avg_time":{
-        "metric":"rpc.rpc.unassign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_num_ops":{
-        "metric":"rpc.rpc.unassign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.unlockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.unlockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_avg_time":{
-        "metric":"rpc.rpc.unlockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_num_ops":{
-        "metric":"rpc.rpc.unlockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"ugi.ugi.loginFailure_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"ugi.ugi.loginFailure_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"ugi.ugi.loginSuccess_avg_time",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"ugi.ugi.loginSuccess_num_ops",
-        "pointInTime":false,
-        "temporal":true
-      }
-    },
-
-    "HBASE_MASTER":{
-
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/master/cluster_requests":{
-        "metric":"hbase.master.cluster_requests",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/hbase/master/splitSize_avg_time":{
-        "metric":"hbase.master.splitSize_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/master/splitSize_num_ops":{
-        "metric":"hbase.master.splitSize_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/master/splitTime_avg_time":{
-        "metric":"hbase.master.splitTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/master/splitTime_num_ops":{
-        "metric":"hbase.master.splitTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcCount":{
-        "metric":"jvm.metrics.gcCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric":"jvm.metrics.gcTimeMillis",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logError":{
-        "metric":"jvm.metrics.logError",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logFatal":{
-        "metric":"jvm.metrics.logFatal",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logInfo":{
-        "metric":"jvm.metrics.logInfo",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logWarn":{
-        "metric":"jvm.metrics.logWarn",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/maxMemoryM":{
-        "metric":"jvm.metrics.maxMemoryM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric":"jvm.metrics.memHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric":"jvm.metrics.memHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric":"jvm.metrics.memNonHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric":"jvm.metrics.memNonHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric":"jvm.metrics.threadsBlocked",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsNew":{
-        "metric":"jvm.metrics.threadsNew",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric":"jvm.metrics.threadsRunnable",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric":"jvm.metrics.threadsTerminated",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric":"jvm.metrics.threadsTimedWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric":"jvm.metrics.threadsWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_fifteen":{
-        "metric":"load_fifteen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_five":{
-        "metric":"load_five",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_one":{
-        "metric":"load_one",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.rpc.NumOpenConnections",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.rpc.ReceivedBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric":"rpc.rpc.RpcProcessingTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric":"rpc.rpc.RpcProcessingTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric":"rpc.rpc.RpcQueueTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric":"rpc.rpc.RpcQueueTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_avg_time":{
-        "metric":"rpc.rpc.RpcSlowResponse_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_num_ops":{
-        "metric":"rpc.rpc.RpcSlowResponse_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.rpc.SentBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.addColumn.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.addColumn.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_avg_time":{
-        "metric":"rpc.rpc.addColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_num_ops":{
-        "metric":"rpc.rpc.addColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.assign.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.assign.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_avg_time":{
-        "metric":"rpc.rpc.assign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_num_ops":{
-        "metric":"rpc.rpc.assign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.balance.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.balance.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.balanceSwitch.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.balanceSwitch.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_avg_time":{
-        "metric":"rpc.rpc.balanceSwitch_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_num_ops":{
-        "metric":"rpc.rpc.balanceSwitch_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_avg_time":{
-        "metric":"rpc.rpc.balance_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_num_ops":{
-        "metric":"rpc.rpc.balance_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"rpc.rpc.callQueueLen",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_avg_time":{
-        "metric":"rpc.rpc.checkAndPut_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_num_ops":{
-        "metric":"rpc.rpc.checkAndPut_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_avg_time":{
-        "metric":"rpc.rpc.closeRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_num_ops":{
-        "metric":"rpc.rpc.closeRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_avg_time":{
-        "metric":"rpc.rpc.close_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_num_ops":{
-        "metric":"rpc.rpc.close_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_avg_time":{
-        "metric":"rpc.rpc.compactRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_num_ops":{
-        "metric":"rpc.rpc.compactRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.createTable.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.createTable.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_avg_time":{
-        "metric":"rpc.rpc.createTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_num_ops":{
-        "metric":"rpc.rpc.createTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.deleteColumn.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.deleteColumn.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_avg_time":{
-        "metric":"rpc.rpc.deleteColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_num_ops":{
-        "metric":"rpc.rpc.deleteColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.deleteTable.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.deleteTable.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_avg_time":{
-        "metric":"rpc.rpc.deleteTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_num_ops":{
-        "metric":"rpc.rpc.deleteTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_avg_time":{
-        "metric":"rpc.rpc.delete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_num_ops":{
-        "metric":"rpc.rpc.delete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.disableTable.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.disableTable.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_avg_time":{
-        "metric":"rpc.rpc.disableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_num_ops":{
-        "metric":"rpc.rpc.disableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.enableTable.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.enableTable.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_avg_time":{
-        "metric":"rpc.rpc.enableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_num_ops":{
-        "metric":"rpc.rpc.enableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_avg_time":{
-        "metric":"rpc.rpc.exists_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_num_ops":{
-        "metric":"rpc.rpc.exists_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_avg_time":{
-        "metric":"rpc.rpc.flushRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_num_ops":{
-        "metric":"rpc.rpc.flushRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getAlterStatus.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getAlterStatus.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_avg_time":{
-        "metric":"rpc.rpc.getAlterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_num_ops":{
-        "metric":"rpc.rpc.getAlterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getClusterStatus.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getClusterStatus.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_avg_time":{
-        "metric":"rpc.rpc.getClusterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_num_ops":{
-        "metric":"rpc.rpc.getClusterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getHTableDescriptors.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getHTableDescriptors.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_avg_time":{
-        "metric":"rpc.rpc.getHTableDescriptors_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_num_ops":{
-        "metric":"rpc.rpc.getHTableDescriptors_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_avg_time":{
-        "metric":"rpc.rpc.get_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_num_ops":{
-        "metric":"rpc.rpc.get_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_avg_time":{
-        "metric":"rpc.rpc.increment_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_num_ops":{
-        "metric":"rpc.rpc.increment_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isMasterRunning.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isMasterRunning.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_avg_time":{
-        "metric":"rpc.rpc.isMasterRunning_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_num_ops":{
-        "metric":"rpc.rpc.isMasterRunning_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_avg_time":{
-        "metric":"rpc.rpc.lockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_num_ops":{
-        "metric":"rpc.rpc.lockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.modifyColumn.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.modifyColumn.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_avg_time":{
-        "metric":"rpc.rpc.modifyColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_num_ops":{
-        "metric":"rpc.rpc.modifyColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.modifyTable.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.modifyTable.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_avg_time":{
-        "metric":"rpc.rpc.modifyTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_num_ops":{
-        "metric":"rpc.rpc.modifyTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.move.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.move.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_avg_time":{
-        "metric":"rpc.rpc.move_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_num_ops":{
-        "metric":"rpc.rpc.move_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_avg_time":{
-        "metric":"rpc.rpc.multi_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_num_ops":{
-        "metric":"rpc.rpc.multi_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_avg_time":{
-        "metric":"rpc.rpc.next_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_num_ops":{
-        "metric":"rpc.rpc.next_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.offline.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.offline.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_avg_time":{
-        "metric":"rpc.rpc.offline_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_num_ops":{
-        "metric":"rpc.rpc.offline_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_avg_time":{
-        "metric":"rpc.rpc.openRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_num_ops":{
-        "metric":"rpc.rpc.openRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_avg_time":{
-        "metric":"rpc.rpc.openRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_num_ops":{
-        "metric":"rpc.rpc.openRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_avg_time":{
-        "metric":"rpc.rpc.openScanner_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_num_ops":{
-        "metric":"rpc.rpc.openScanner_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_avg_time":{
-        "metric":"rpc.rpc.put_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_num_ops":{
-        "metric":"rpc.rpc.put_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.regionServerReport.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.regionServerReport.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_avg_time":{
-        "metric":"rpc.rpc.regionServerReport_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_num_ops":{
-        "metric":"rpc.rpc.regionServerReport_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.regionServerStartup.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.regionServerStartup.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_avg_time":{
-        "metric":"rpc.rpc.regionServerStartup_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_num_ops":{
-        "metric":"rpc.rpc.regionServerStartup_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.reportRSFatalError.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.reportRSFatalError.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_avg_time":{
-        "metric":"rpc.rpc.reportRSFatalError_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_num_ops":{
-        "metric":"rpc.rpc.reportRSFatalError_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"rpc.rpc.rpcAuthenticationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthenticationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"rpc.rpc.rpcAuthorizationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthorizationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.shutdown.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.shutdown.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_avg_time":{
-        "metric":"rpc.rpc.shutdown_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_num_ops":{
-        "metric":"rpc.rpc.shutdown_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_avg_time":{
-        "metric":"rpc.rpc.splitRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_num_ops":{
-        "metric":"rpc.rpc.splitRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.stopMaster.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.stopMaster.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_avg_time":{
-        "metric":"rpc.rpc.stopMaster_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_num_ops":{
-        "metric":"rpc.rpc.stopMaster_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_avg_time":{
-        "metric":"rpc.rpc.stop_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_num_ops":{
-        "metric":"rpc.rpc.stop_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.unassign.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.unassign.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_avg_time":{
-        "metric":"rpc.rpc.unassign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_num_ops":{
-        "metric":"rpc.rpc.unassign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_avg_time":{
-        "metric":"rpc.rpc.unlockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_num_ops":{
-        "metric":"rpc.rpc.unlockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      }
-    },
-
-    "HBASE_CLIENT":{
-
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcCount":{
-        "metric":"jvm.metrics.gcCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric":"jvm.metrics.gcTimeMillis",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logError":{
-        "metric":"jvm.metrics.logError",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logFatal":{
-        "metric":"jvm.metrics.logFatal",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logInfo":{
-        "metric":"jvm.metrics.logInfo",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logWarn":{
-        "metric":"jvm.metrics.logWarn",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/maxMemoryM":{
-        "metric":"jvm.metrics.maxMemoryM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric":"jvm.metrics.memHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric":"jvm.metrics.memHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric":"jvm.metrics.memNonHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric":"jvm.metrics.memNonHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric":"jvm.metrics.threadsBlocked",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsNew":{
-        "metric":"jvm.metrics.threadsNew",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric":"jvm.metrics.threadsRunnable",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric":"jvm.metrics.threadsTerminated",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric":"jvm.metrics.threadsTimedWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric":"jvm.metrics.threadsWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_fifteen":{
-        "metric":"load_fifteen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_five":{
-        "metric":"load_five",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_one":{
-        "metric":"load_one",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.rpc.NumOpenConnections",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.rpc.ReceivedBytes",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric":"rpc.rpc.RpcProcessingTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric":"rpc.rpc.RpcProcessingTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric":"rpc.rpc.RpcQueueTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric":"rpc.rpc.RpcQueueTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_avg_time":{
-        "metric":"rpc.rpc.RpcSlowResponse_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_num_ops":{
-        "metric":"rpc.rpc.RpcSlowResponse_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.rpc.SentBytes",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.abort.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.abort.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_avg_time":{
-        "metric":"rpc.rpc.abort_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_num_ops":{
-        "metric":"rpc.rpc.abort_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_avg_time":{
-        "metric":"rpc.rpc.addColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_num_ops":{
-        "metric":"rpc.rpc.addColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.addToOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.addToOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_avg_time":{
-        "metric":"rpc.rpc.assign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_num_ops":{
-        "metric":"rpc.rpc.assign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_avg_time":{
-        "metric":"rpc.rpc.balanceSwitch_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_num_ops":{
-        "metric":"rpc.rpc.balanceSwitch_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_avg_time":{
-        "metric":"rpc.rpc.balance_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_num_ops":{
-        "metric":"rpc.rpc.balance_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"rpc.rpc.callQueueLen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkAndPut.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkAndPut.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_avg_time":{
-        "metric":"rpc.rpc.checkAndPut_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_num_ops":{
-        "metric":"rpc.rpc.checkAndPut_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkOOME.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkOOME.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_avg_time":{
-        "metric":"rpc.rpc.checkOOME_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_num_ops":{
-        "metric":"rpc.rpc.checkOOME_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.close.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.close.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.closeRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.closeRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_avg_time":{
-        "metric":"rpc.rpc.closeRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_num_ops":{
-        "metric":"rpc.rpc.closeRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_avg_time":{
-        "metric":"rpc.rpc.close_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_num_ops":{
-        "metric":"rpc.rpc.close_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.compactRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.compactRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_avg_time":{
-        "metric":"rpc.rpc.compactRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_num_ops":{
-        "metric":"rpc.rpc.compactRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_avg_time":{
-        "metric":"rpc.rpc.createTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_num_ops":{
-        "metric":"rpc.rpc.createTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.delete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.delete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_avg_time":{
-        "metric":"rpc.rpc.deleteColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_num_ops":{
-        "metric":"rpc.rpc.deleteColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_avg_time":{
-        "metric":"rpc.rpc.deleteTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_num_ops":{
-        "metric":"rpc.rpc.deleteTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_avg_time":{
-        "metric":"rpc.rpc.delete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_num_ops":{
-        "metric":"rpc.rpc.delete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_avg_time":{
-        "metric":"rpc.rpc.disableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_num_ops":{
-        "metric":"rpc.rpc.disableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_avg_time":{
-        "metric":"rpc.rpc.enableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_num_ops":{
-        "metric":"rpc.rpc.enableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.exists.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.exists.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_avg_time":{
-        "metric":"rpc.rpc.exists_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_num_ops":{
-        "metric":"rpc.rpc.exists_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.flushRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.flushRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_avg_time":{
-        "metric":"rpc.rpc.flushRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_num_ops":{
-        "metric":"rpc.rpc.flushRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.get.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.get.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_avg_time":{
-        "metric":"rpc.rpc.getAlterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_num_ops":{
-        "metric":"rpc.rpc.getAlterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_avg_time":{
-        "metric":"rpc.rpc.getCatalogTracker_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_num_ops":{
-        "metric":"rpc.rpc.getCatalogTracker_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_avg_time":{
-        "metric":"rpc.rpc.getClusterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_num_ops":{
-        "metric":"rpc.rpc.getClusterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getConfiguration.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getConfiguration.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_avg_time":{
-        "metric":"rpc.rpc.getConfiguration_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_num_ops":{
-        "metric":"rpc.rpc.getConfiguration_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_avg_time":{
-        "metric":"rpc.rpc.getHTableDescriptors_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_num_ops":{
-        "metric":"rpc.rpc.getHTableDescriptors_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getServerName.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getServerName.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_avg_time":{
-        "metric":"rpc.rpc.getServerName_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_num_ops":{
-        "metric":"rpc.rpc.getServerName_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_avg_time":{
-        "metric":"rpc.rpc.getZooKeeper_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_num_ops":{
-        "metric":"rpc.rpc.getZooKeeper_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_avg_time":{
-        "metric":"rpc.rpc.get_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_num_ops":{
-        "metric":"rpc.rpc.get_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.increment.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.increment.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_avg_time":{
-        "metric":"rpc.rpc.increment_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_num_ops":{
-        "metric":"rpc.rpc.increment_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isAborted.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isAborted.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_avg_time":{
-        "metric":"rpc.rpc.isAborted_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_num_ops":{
-        "metric":"rpc.rpc.isAborted_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_avg_time":{
-        "metric":"rpc.rpc.isMasterRunning_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_num_ops":{
-        "metric":"rpc.rpc.isMasterRunning_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isStopped.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isStopped.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_avg_time":{
-        "metric":"rpc.rpc.isStopped_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_num_ops":{
-        "metric":"rpc.rpc.isStopped_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.lockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.lockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_avg_time":{
-        "metric":"rpc.rpc.lockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_num_ops":{
-        "metric":"rpc.rpc.lockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_avg_time":{
-        "metric":"rpc.rpc.modifyColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_num_ops":{
-        "metric":"rpc.rpc.modifyColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_avg_time":{
-        "metric":"rpc.rpc.modifyTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_num_ops":{
-        "metric":"rpc.rpc.modifyTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_avg_time":{
-        "metric":"rpc.rpc.move_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_num_ops":{
-        "metric":"rpc.rpc.move_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.multi.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.multi.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_avg_time":{
-        "metric":"rpc.rpc.multi_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_num_ops":{
-        "metric":"rpc.rpc.multi_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.next.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.next.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_avg_time":{
-        "metric":"rpc.rpc.next_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_num_ops":{
-        "metric":"rpc.rpc.next_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_avg_time":{
-        "metric":"rpc.rpc.offline_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_num_ops":{
-        "metric":"rpc.rpc.offline_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_avg_time":{
-        "metric":"rpc.rpc.openRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_num_ops":{
-        "metric":"rpc.rpc.openRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_avg_time":{
-        "metric":"rpc.rpc.openRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_num_ops":{
-        "metric":"rpc.rpc.openRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openScanner.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openScanner.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_avg_time":{
-        "metric":"rpc.rpc.openScanner_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_num_ops":{
-        "metric":"rpc.rpc.openScanner_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.put.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.put.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_avg_time":{
-        "metric":"rpc.rpc.put_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_num_ops":{
-        "metric":"rpc.rpc.put_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_avg_time":{
-        "metric":"rpc.rpc.regionServerReport_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_num_ops":{
-        "metric":"rpc.rpc.regionServerReport_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_avg_time":{
-        "metric":"rpc.rpc.regionServerStartup_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_num_ops":{
-        "metric":"rpc.rpc.regionServerStartup_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.removeFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.removeFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_avg_time":{
-        "metric":"rpc.rpc.reportRSFatalError_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_num_ops":{
-        "metric":"rpc.rpc.reportRSFatalError_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"rpc.rpc.rpcAuthenticationFailures",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthenticationSuccesses",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"rpc.rpc.rpcAuthorizationFailures",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthorizationSuccesses",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_avg_time":{
-        "metric":"rpc.rpc.shutdown_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_num_ops":{
-        "metric":"rpc.rpc.shutdown_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.splitRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.splitRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_avg_time":{
-        "metric":"rpc.rpc.splitRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_num_ops":{
-        "metric":"rpc.rpc.splitRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.stop.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.stop.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_avg_time":{
-        "metric":"rpc.rpc.stopMaster_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_num_ops":{
-        "metric":"rpc.rpc.stopMaster_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_avg_time":{
-        "metric":"rpc.rpc.stop_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_num_ops":{
-        "metric":"rpc.rpc.stop_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_avg_time":{
-        "metric":"rpc.rpc.unassign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_num_ops":{
-        "metric":"rpc.rpc.unassign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.unlockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.unlockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_avg_time":{
-        "metric":"rpc.rpc.unlockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_num_ops":{
-        "metric":"rpc.rpc.unlockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"ugi.ugi.loginFailure_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"ugi.ugi.loginFailure_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"ugi.ugi.loginSuccess_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"ugi.ugi.loginSuccess_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      }
-    },
-
-    "HBASE_REGIONSERVER":{
-
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/blockCacheCount":{
-        "metric":"hbase.regionserver.blockCacheCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/blockCacheEvictedCount":{
-        "metric":"hbase.regionserver.blockCacheEvictedCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/blockCacheFree":{
-        "metric":"hbase.regionserver.blockCacheFree",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/blockCacheHitCachingRatio":{
-        "metric":"hbase.regionserver.blockCacheHitCachingRatio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/blockCacheHitCount":{
-        "metric":"hbase.regionserver.blockCacheHitCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/blockCacheHitRatio":{
-        "metric":"hbase.regionserver.blockCacheHitRatio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/blockCacheMissCount":{
-        "metric":"hbase.regionserver.blockCacheMissCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/blockCacheSize":{
-        "metric":"hbase.regionserver.blockCacheSize",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/compactionQueueSize":{
-        "metric":"hbase.regionserver.compactionQueueSize",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/compactionSize_avg_time":{
-        "metric":"hbase.regionserver.compactionSize_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/compactionSize_num_ops":{
-        "metric":"hbase.regionserver.compactionSize_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/compactionTime_avg_time":{
-        "metric":"hbase.regionserver.compactionTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/compactionTime_num_ops":{
-        "metric":"hbase.regionserver.compactionTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile":{
-        "metric":"hbase.regionserver.deleteRequestLatency_75th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile":{
-        "metric":"hbase.regionserver.deleteRequestLatency_95th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile":{
-        "metric":"hbase.regionserver.deleteRequestLatency_99th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_max":{
-        "metric":"hbase.regionserver.deleteRequestLatency_max",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_mean":{
-        "metric":"hbase.regionserver.deleteRequestLatency_mean",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_median":{
-        "metric":"hbase.regionserver.deleteRequestLatency_median",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_min":{
-        "metric":"hbase.regionserver.deleteRequestLatency_min",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_num_ops":{
-        "metric":"hbase.regionserver.deleteRequestLatency_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/deleteRequestLatency_std_dev":{
-        "metric":"hbase.regionserver.deleteRequestLatency_std_dev",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/flushQueueSize":{
-        "metric":"hbase.regionserver.flushQueueSize",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/flushSize_avg_time":{
-        "metric":"hbase.regionserver.flushSize_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/flushSize_num_ops":{
-        "metric":"hbase.regionserver.flushSize_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/flushTime_avg_time":{
-        "metric":"hbase.regionserver.flushTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/flushTime_num_ops":{
-        "metric":"hbase.regionserver.flushTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_75th_percentile":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_75th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_95th_percentile":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_95th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_99th_percentile":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_99th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_max":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_max",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_mean":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_mean",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_median":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_median",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_min":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_min",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_num_ops":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatencyHistogram_std_dev":{
-        "metric":"hbase.regionserver.fsReadLatencyHistogram_std_dev",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatency_avg_time":{
-        "metric":"hbase.regionserver.fsReadLatency_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsReadLatency_num_ops":{
-        "metric":"hbase.regionserver.fsReadLatency_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsSyncLatency_avg_time":{
-        "metric":"hbase.regionserver.fsSyncLatency_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsSyncLatency_num_ops":{
-        "metric":"hbase.regionserver.fsSyncLatency_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_75th_percentile":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_75th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_95th_percentile":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_95th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_99th_percentile":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_99th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_max":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_max",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_mean":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_mean",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_median":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_median",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_min":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_min",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_num_ops":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatencyHistogram_std_dev":{
-        "metric":"hbase.regionserver.fsWriteLatencyHistogram_std_dev",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatency_avg_time":{
-        "metric":"hbase.regionserver.fsWriteLatency_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/fsWriteLatency_num_ops":{
-        "metric":"hbase.regionserver.fsWriteLatency_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_75th_percentile":{
-        "metric":"hbase.regionserver.getRequestLatency_75th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_95th_percentile":{
-        "metric":"hbase.regionserver.getRequestLatency_95th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_99th_percentile":{
-        "metric":"hbase.regionserver.getRequestLatency_99th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_max":{
-        "metric":"hbase.regionserver.getRequestLatency_max",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_mean":{
-        "metric":"hbase.regionserver.getRequestLatency_mean",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_median":{
-        "metric":"hbase.regionserver.getRequestLatency_median",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_min":{
-        "metric":"hbase.regionserver.getRequestLatency_min",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_num_ops":{
-        "metric":"hbase.regionserver.getRequestLatency_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/getRequestLatency_std_dev":{
-        "metric":"hbase.regionserver.getRequestLatency_std_dev",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/hdfsBlocksLocalityIndex":{
-        "metric":"hbase.regionserver.hdfsBlocksLocalityIndex",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/hlogFileCount":{
-        "metric":"hbase.regionserver.hlogFileCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/memstoreSizeMB":{
-        "metric":"hbase.regionserver.memstoreSizeMB",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_75th_percentile":{
-        "metric":"hbase.regionserver.putRequestLatency_75th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_95th_percentile":{
-        "metric":"hbase.regionserver.putRequestLatency_95th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_99th_percentile":{
-        "metric":"hbase.regionserver.putRequestLatency_99th_percentile",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_max":{
-        "metric":"hbase.regionserver.putRequestLatency_max",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_mean":{
-        "metric":"hbase.regionserver.putRequestLatency_mean",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_median":{
-        "metric":"hbase.regionserver.putRequestLatency_median",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_min":{
-        "metric":"hbase.regionserver.putRequestLatency_min",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_num_ops":{
-        "metric":"hbase.regionserver.putRequestLatency_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/putRequestLatency_std_dev":{
-        "metric":"hbase.regionserver.putRequestLatency_std_dev",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/readRequestsCount":{
-        "metric":"hbase.regionserver.readRequestsCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/regions":{
-        "metric":"hbase.regionserver.regions",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/requests":{
-        "metric":"hbase.regionserver.requests",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/rootIndexSizeKB":{
-        "metric":"hbase.regionserver.rootIndexSizeKB",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/storefileIndexSizeMB":{
-        "metric":"hbase.regionserver.storefileIndexSizeMB",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/storefiles":{
-        "metric":"hbase.regionserver.storefiles",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/stores":{
-        "metric":"hbase.regionserver.stores",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/totalStaticBloomSizeKB":{
-        "metric":"hbase.regionserver.totalStaticBloomSizeKB",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/totalStaticIndexSizeKB":{
-        "metric":"hbase.regionserver.totalStaticIndexSizeKB",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/hbase/regionserver/writeRequestsCount":{
-        "metric":"hbase.regionserver.writeRequestsCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcCount":{
-        "metric":"jvm.metrics.gcCount",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric":"jvm.metrics.gcTimeMillis",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logError":{
-        "metric":"jvm.metrics.logError",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logFatal":{
-        "metric":"jvm.metrics.logFatal",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logInfo":{
-        "metric":"jvm.metrics.logInfo",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/logWarn":{
-        "metric":"jvm.metrics.logWarn",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/maxMemoryM":{
-        "metric":"jvm.metrics.maxMemoryM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric":"jvm.metrics.memHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric":"jvm.metrics.memHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric":"jvm.metrics.memNonHeapCommittedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric":"jvm.metrics.memNonHeapUsedM",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric":"jvm.metrics.threadsBlocked",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsNew":{
-        "metric":"jvm.metrics.threadsNew",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric":"jvm.metrics.threadsRunnable",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric":"jvm.metrics.threadsTerminated",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric":"jvm.metrics.threadsTimedWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric":"jvm.metrics.threadsWaiting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_fifteen":{
-        "metric":"load_fifteen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_five":{
-        "metric":"load_five",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/load/load_one":{
-        "metric":"load_one",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.rpc.NumOpenConnections",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.rpc.ReceivedBytes",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric":"rpc.rpc.RpcProcessingTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric":"rpc.rpc.RpcProcessingTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric":"rpc.rpc.RpcQueueTime_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric":"rpc.rpc.RpcQueueTime_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_avg_time":{
-        "metric":"rpc.rpc.RpcSlowResponse_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/RpcSlowResponse_num_ops":{
-        "metric":"rpc.rpc.RpcSlowResponse_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.rpc.SentBytes",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.abort.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.abort.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_avg_time":{
-        "metric":"rpc.rpc.abort_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/abort_num_ops":{
-        "metric":"rpc.rpc.abort_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_avg_time":{
-        "metric":"rpc.rpc.addColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addColumn_num_ops":{
-        "metric":"rpc.rpc.addColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.addToOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/addToOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.addToOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_avg_time":{
-        "metric":"rpc.rpc.assign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/assign_num_ops":{
-        "metric":"rpc.rpc.assign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_avg_time":{
-        "metric":"rpc.rpc.balanceSwitch_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balanceSwitch_num_ops":{
-        "metric":"rpc.rpc.balanceSwitch_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_avg_time":{
-        "metric":"rpc.rpc.balance_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/balance_num_ops":{
-        "metric":"rpc.rpc.balance_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_avg_time":{
-        "metric":"rpc.rpc.bulkLoadHFiles_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/bulkLoadHFiles_num_ops":{
-        "metric":"rpc.rpc.bulkLoadHFiles_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"rpc.rpc.callQueueLen",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_avg_time":{
-        "metric":"rpc.rpc.checkAndDelete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndDelete_num_ops":{
-        "metric":"rpc.rpc.checkAndDelete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkAndPut.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkAndPut.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_avg_time":{
-        "metric":"rpc.rpc.checkAndPut_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkAndPut_num_ops":{
-        "metric":"rpc.rpc.checkAndPut_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.checkOOME.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.checkOOME.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_avg_time":{
-        "metric":"rpc.rpc.checkOOME_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/checkOOME_num_ops":{
-        "metric":"rpc.rpc.checkOOME_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.close.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.close.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.closeRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.closeRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_avg_time":{
-        "metric":"rpc.rpc.closeRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/closeRegion_num_ops":{
-        "metric":"rpc.rpc.closeRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_avg_time":{
-        "metric":"rpc.rpc.close_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/close_num_ops":{
-        "metric":"rpc.rpc.close_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.compactRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.compactRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_avg_time":{
-        "metric":"rpc.rpc.compactRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/compactRegion_num_ops":{
-        "metric":"rpc.rpc.compactRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_avg_time":{
-        "metric":"rpc.rpc.createTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/createTable_num_ops":{
-        "metric":"rpc.rpc.createTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.delete.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.delete.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_avg_time":{
-        "metric":"rpc.rpc.deleteColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteColumn_num_ops":{
-        "metric":"rpc.rpc.deleteColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_avg_time":{
-        "metric":"rpc.rpc.deleteTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/deleteTable_num_ops":{
-        "metric":"rpc.rpc.deleteTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_avg_time":{
-        "metric":"rpc.rpc.delete_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/delete_num_ops":{
-        "metric":"rpc.rpc.delete_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_avg_time":{
-        "metric":"rpc.rpc.disableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/disableTable_num_ops":{
-        "metric":"rpc.rpc.disableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_avg_time":{
-        "metric":"rpc.rpc.enableTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/enableTable_num_ops":{
-        "metric":"rpc.rpc.enableTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_avg_time":{
-        "metric":"rpc.rpc.execCoprocessor_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/execCoprocessor_num_ops":{
-        "metric":"rpc.rpc.execCoprocessor_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.exists.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.exists.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_avg_time":{
-        "metric":"rpc.rpc.exists_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/exists_num_ops":{
-        "metric":"rpc.rpc.exists_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.flushRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.flushRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_avg_time":{
-        "metric":"rpc.rpc.flushRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/flushRegion_num_ops":{
-        "metric":"rpc.rpc.flushRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.get.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.get.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_avg_time":{
-        "metric":"rpc.rpc.getAlterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getAlterStatus_num_ops":{
-        "metric":"rpc.rpc.getAlterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops":{
-        "metric":"rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_avg_time":{
-        "metric":"rpc.rpc.getCatalogTracker_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getCatalogTracker_num_ops":{
-        "metric":"rpc.rpc.getCatalogTracker_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_avg_time":{
-        "metric":"rpc.rpc.getClosestRowBefore_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClosestRowBefore_num_ops":{
-        "metric":"rpc.rpc.getClosestRowBefore_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_avg_time":{
-        "metric":"rpc.rpc.getClusterStatus_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getClusterStatus_num_ops":{
-        "metric":"rpc.rpc.getClusterStatus_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getConfiguration.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getConfiguration.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_avg_time":{
-        "metric":"rpc.rpc.getConfiguration_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getConfiguration_num_ops":{
-        "metric":"rpc.rpc.getConfiguration_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getFromOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_avg_time":{
-        "metric":"rpc.rpc.getHServerInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHServerInfo_num_ops":{
-        "metric":"rpc.rpc.getHServerInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_avg_time":{
-        "metric":"rpc.rpc.getHTableDescriptors_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getHTableDescriptors_num_ops":{
-        "metric":"rpc.rpc.getHTableDescriptors_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.getOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.getOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_avg_time":{
-        "metric":"rpc.rpc.getProtocolSignature_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolSignature_num_ops":{
-        "metric":"rpc.rpc.getProtocolSignature_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_avg_time":{
-        "metric":"rpc.rpc.getProtocolVersion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getProtocolVersion_num_ops":{
-        "metric":"rpc.rpc.getProtocolVersion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_avg_time":{
-        "metric":"rpc.rpc.getRegionInfo_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getRegionInfo_num_ops":{
-        "metric":"rpc.rpc.getRegionInfo_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getServerName.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getServerName.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_avg_time":{
-        "metric":"rpc.rpc.getServerName_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getServerName_num_ops":{
-        "metric":"rpc.rpc.getServerName_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_avg_time":{
-        "metric":"rpc.rpc.getZooKeeper_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/getZooKeeper_num_ops":{
-        "metric":"rpc.rpc.getZooKeeper_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_avg_time":{
-        "metric":"rpc.rpc.get_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/get_num_ops":{
-        "metric":"rpc.rpc.get_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.increment.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.increment.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_avg_time":{
-        "metric":"rpc.rpc.incrementColumnValue_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/incrementColumnValue_num_ops":{
-        "metric":"rpc.rpc.incrementColumnValue_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_avg_time":{
-        "metric":"rpc.rpc.increment_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/increment_num_ops":{
-        "metric":"rpc.rpc.increment_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isAborted.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isAborted.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_avg_time":{
-        "metric":"rpc.rpc.isAborted_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isAborted_num_ops":{
-        "metric":"rpc.rpc.isAborted_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_avg_time":{
-        "metric":"rpc.rpc.isMasterRunning_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isMasterRunning_num_ops":{
-        "metric":"rpc.rpc.isMasterRunning_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.isStopped.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.isStopped.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_avg_time":{
-        "metric":"rpc.rpc.isStopped_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/isStopped_num_ops":{
-        "metric":"rpc.rpc.isStopped_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.lockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.lockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_avg_time":{
-        "metric":"rpc.rpc.lockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/lockRow_num_ops":{
-        "metric":"rpc.rpc.lockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_avg_time":{
-        "metric":"rpc.rpc.modifyColumn_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyColumn_num_ops":{
-        "metric":"rpc.rpc.modifyColumn_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_avg_time":{
-        "metric":"rpc.rpc.modifyTable_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/modifyTable_num_ops":{
-        "metric":"rpc.rpc.modifyTable_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_avg_time":{
-        "metric":"rpc.rpc.move_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/move_num_ops":{
-        "metric":"rpc.rpc.move_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.multi.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.multi.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_avg_time":{
-        "metric":"rpc.rpc.multi_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/multi_num_ops":{
-        "metric":"rpc.rpc.multi_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.next.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.next.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_avg_time":{
-        "metric":"rpc.rpc.next_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/next_num_ops":{
-        "metric":"rpc.rpc.next_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_avg_time":{
-        "metric":"rpc.rpc.offline_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/offline_num_ops":{
-        "metric":"rpc.rpc.offline_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_avg_time":{
-        "metric":"rpc.rpc.openRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegion_num_ops":{
-        "metric":"rpc.rpc.openRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_avg_time":{
-        "metric":"rpc.rpc.openRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openRegions_num_ops":{
-        "metric":"rpc.rpc.openRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.openScanner.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.openScanner.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_avg_time":{
-        "metric":"rpc.rpc.openScanner_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/openScanner_num_ops":{
-        "metric":"rpc.rpc.openScanner_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.put.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.put.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_avg_time":{
-        "metric":"rpc.rpc.put_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/put_num_ops":{
-        "metric":"rpc.rpc.put_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_avg_time":{
-        "metric":"rpc.rpc.regionServerReport_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerReport_num_ops":{
-        "metric":"rpc.rpc.regionServerReport_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_avg_time":{
-        "metric":"rpc.rpc.regionServerStartup_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/regionServerStartup_num_ops":{
-        "metric":"rpc.rpc.regionServerStartup_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_avg_time":{
-        "metric":"rpc.rpc.removeFromOnlineRegions_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/removeFromOnlineRegions_num_ops":{
-        "metric":"rpc.rpc.removeFromOnlineRegions_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_avg_time":{
-        "metric":"rpc.rpc.replicateLogEntries_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/replicateLogEntries_num_ops":{
-        "metric":"rpc.rpc.replicateLogEntries_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_avg_time":{
-        "metric":"rpc.rpc.reportRSFatalError_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/reportRSFatalError_num_ops":{
-        "metric":"rpc.rpc.reportRSFatalError_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_avg_time":{
-        "metric":"rpc.rpc.rollHLogWriter_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rollHLogWriter_num_ops":{
-        "metric":"rpc.rpc.rollHLogWriter_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"rpc.rpc.rpcAuthenticationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthenticationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"rpc.rpc.rpcAuthorizationFailures",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"rpc.rpc.rpcAuthorizationSuccesses",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_avg_time":{
-        "metric":"rpc.rpc.shutdown_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/shutdown_num_ops":{
-        "metric":"rpc.rpc.shutdown_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.splitRegion.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.splitRegion.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_avg_time":{
-        "metric":"rpc.rpc.splitRegion_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/splitRegion_num_ops":{
-        "metric":"rpc.rpc.splitRegion_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.stop.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.stop.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_avg_time":{
-        "metric":"rpc.rpc.stopMaster_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stopMaster_num_ops":{
-        "metric":"rpc.rpc.stopMaster_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_avg_time":{
-        "metric":"rpc.rpc.stop_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/stop_num_ops":{
-        "metric":"rpc.rpc.stop_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_avg_time":{
-        "metric":"rpc.rpc.unassign_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unassign_num_ops":{
-        "metric":"rpc.rpc.unassign_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_avg_time":{
-        "metric":"rpc.rpc.unlockRow.aboveOneSec._avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow/aboveOneSec/_num_ops":{
-        "metric":"rpc.rpc.unlockRow.aboveOneSec._num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_avg_time":{
-        "metric":"rpc.rpc.unlockRow_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/rpc/unlockRow_num_ops":{
-        "metric":"rpc.rpc.unlockRow_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"ugi.ugi.loginFailure_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"ugi.ugi.loginFailure_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"ugi.ugi.loginSuccess_avg_time",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"ugi.ugi.loginSuccess_num_ops",
-        "pointInTime":true,
-        "temporal":true
-      }
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/resources/jmx_properties.json b/branch-1.2/ambari-server/src/main/resources/jmx_properties.json
deleted file mode 100644
index fa41d86..0000000
--- a/branch-1.2/ambari-server/src/main/resources/jmx_properties.json
+++ /dev/null
@@ -1,7392 +0,0 @@
-{
-
-  "Component":{
-
-    "NAMENODE":{
-
-      "ServiceComponentInfo/BlocksTotal":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/CapacityRemaining":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemState.CapacityRemaining",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/CapacityTotal":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.Total",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/CapacityUsed":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/CorruptBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/DeadNodes":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/DecomNodes":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/HeapMemoryMax":{
-        "metric" : "java.lang:type=Memory.HeapMemoryUsage[max]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/HeapMemoryUsed":{
-        "metric" : "java.lang:type=Memory.HeapMemoryUsage[used]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/NonHeapMemoryMax":{
-        "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/NonHeapMemoryUsed":{
-        "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/LiveNodes":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/MissingBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/NonDfsUsedSpace":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/PercentUsed":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/PercentRemaining":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/Safemode":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/StartTime":{
-        "metric" : "java.lang:type=Runtime.StartTime",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/TotalFiles":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/UnderReplicatedBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/UpgradeFinalized":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/Version":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.Version",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/BlockCapacity":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlockCapacity",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/BlocksTotal":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/CapacityRemainingGB":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityRemainingGB",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/CapacityTotalGB":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityTotalGB",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/CapacityUsedGB":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityUsedGB",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/CorruptBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/ExcessBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.ExcessBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/FilesTotal":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.FilesTotal",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/MissingBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/PendingDeletionBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.PendingDeletionBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/PendingReplicationBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.PendingReplicationBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.ScheduledReplicationBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/TotalLoad":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.TotalLoad",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/UnderReplicatedBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/AddBlockOps":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.AddBlockOps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/CreateFileOps":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.CreateFileOps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/FileInfoOps":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.FileInfoOps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/FilesCreated":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.FilesCreated",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/FilesInGetListingOps":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.FilesInGetListingOps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/GetBlockLocations":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.GetBlockLocations",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/GetListingOps":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.GetListingOps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Syncs_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.Syncs_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Syncs_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.Syncs_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Transactions_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.Transactions_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Transactions_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.Transactions_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/blockReport_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.blockReport_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/blockReport_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.blockReport_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Threads":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.Threads",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/HostName":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.HostName",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Total":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.Total",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Version":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.Version",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/UpgradeFinalized":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Used":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Free":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.Free",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Safemode":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/NonDfsUsedSpace":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/PercentUsed":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/PercentRemaining":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/TotalBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.TotalBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/TotalFiles":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/LiveNodes":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/DeadNodes":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/DecomNodes":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/NameDirStatuses":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.NameDirStatuses",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/gcCount":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.gcCount",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.gcTimeMillis",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logError":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.logError",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logFatal":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.logFatal",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logInfo":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.logInfo",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logWarn":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.logWarn",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.memHeapCommittedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.memHeapUsedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.memNonHeapCommittedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.memNonHeapUsedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.threadsBlocked",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsNew":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.threadsNew",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.threadsRunnable",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.threadsTerminated",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.threadsTimedWaiting",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.threadsWaiting",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.NumOpenConnections",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.ReceivedBytes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/SentBytes":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.SentBytes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.callQueueLen",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationFailures",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationSuccesses",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationFailures",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationSuccesses",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/addBlock_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/addBlock_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReceived_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReceived_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReport_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReport_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/complete_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/complete_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/create_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.create_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/create_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.create_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/fsync_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/fsync_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getBlockLocations_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getBlockLocations_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getEditLogSize_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getEditLogSize_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getFileInfo_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getFileInfo_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getListing_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getListing_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getProtocolVersion_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getProtocolVersion_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/renewLease_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/renewLease_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollEditLog_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollEditLog_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollFsImage_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollFsImage_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/sendHeartbeat_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/sendHeartbeat_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"Hadoop:service=NameNode,name=ugi.loginFailure_avg_time",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"Hadoop:service=NameNode,name=ugi.loginFailure_num_ops",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"Hadoop:service=NameNode,name=ugi.loginSuccess_avg_time",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"Hadoop:service=NameNode,name=ugi.loginSuccess_num_ops",
-        "pointInTime":true,
-        "temporal":false
-      }
-    },
-    "JOBTRACKER":{
-
-      "ServiceComponentInfo/AliveNodes":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.AliveNodesInfoJson",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/BlackListedNodes":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.BlacklistedNodesInfoJson",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/GrayListedNodes":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.GraylistedNodesInfoJson",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/HeapMemoryMax":{
-        "metric" : "java.lang:type=Memory.HeapMemoryUsage[max]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/HeapMemoryUsed":{
-        "metric" : "java.lang:type=Memory.HeapMemoryUsage[used]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/NonHeapMemoryMax":{
-        "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/NonHeapMemoryUsed":{
-        "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/Queue/jobs_completed":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.jobs_completed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/Queue/jobs_submitted":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.jobs_submitted",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/Queue/reserved_map_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.reserved_map_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/Queue/reserved_reduce_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.reserved_reduce_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/Queue/running_0":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.running_0",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/Queue/running_1440":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.running_1440",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/Queue/running_300":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.running_300",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/Queue/running_60":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.running_60",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/Queue/waiting_maps":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.waiting_maps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/Queue/waiting_reduces":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.waiting_reduces",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/StartTime":{
-        "metric" : "java.lang:type=Runtime.StartTime",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/Version":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.Version",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/jobs_running":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.jobs_running",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/jobtracker/jobs_completed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.jobs_completed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/jobtracker/jobs_running":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.jobs_running",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/jobtracker/jobs_submitted":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.jobs_submitted",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/jobtracker/occupied_map_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.occupied_map_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/jobtracker/occupied_reduce_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.occupied_reduce_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/jobtracker/reserved_map_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.reserved_map_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/jobtracker/reserved_reduce_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.reserved_reduce_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/jobtracker/running_maps":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.running_maps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/jobtracker/running_reduces":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.running_reduces",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/jobtracker/waiting_maps":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.waiting_maps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/jobtracker/waiting_reduces":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.waiting_reduces",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/gcCount":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.gcCount",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.gcTimeMillis",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logError":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.logError",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logFatal":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.logFatal",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logInfo":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.logInfo",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logWarn":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.logWarn",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.memHeapCommittedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.memHeapUsedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.memNonHeapCommittedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.memNonHeapUsedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.threadsBlocked",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsNew":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.threadsNew",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.threadsRunnable",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.threadsTerminated",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.threadsTimedWaiting",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.threadsWaiting",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/jobs_completed":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.jobs_completed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/jobs_submitted":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.jobs_submitted",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/reserved_map_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.reserved_map_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/reserved_reduce_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.reserved_reduce_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/running_0":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.running_0",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/running_1440":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.running_1440",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/running_300":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.running_300",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/running_60":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.running_60",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/waiting_maps":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.waiting_maps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/waiting_reduces":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.waiting_reduces",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/jobs_running":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.jobs_running",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/jobs_completed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.jobs_completed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/jobs_running":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.jobs_running",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/jobs_submitted":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.jobs_submitted",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/map_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.map_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/reduce_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.reduce_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/blacklisted_maps":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.blacklisted_maps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/blacklisted_reduces":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.blacklisted_reduces",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/maps_launched":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.maps_launched",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/maps_completed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.maps_launched",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/maps_failed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.maps_failed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/reduces_launched":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.reduces_launched",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/reduces_completed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.reduces_completed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/reduces_failed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.reduces_failed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/jobs_failed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.jobs_failed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/jobs_killed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.jobs_killed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/jobs_preparing":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.jobs_preparing",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/maps_killed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.maps_killed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/reduces_killed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.reduces_killed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/trackers":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.trackers",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/trackers_blacklisted":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.trackers_blacklisted",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/trackers_graylisted":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.trackers_graylisted",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/trackers_decommissioned":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.trackers_decommissioned",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/heartbeats":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.heartbeats",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/occupied_map_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.occupied_map_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/occupied_reduce_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.occupied_reduce_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/reserved_map_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.reserved_map_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/reserved_reduce_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.reserved_reduce_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/running_maps":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.running_maps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/running_reduces":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.running_reduces",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/waiting_maps":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.waiting_maps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/waiting_reduces":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.waiting_reduces",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/Hostname":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.Hostname",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/Version":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.Version",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/ConfigVersion":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.ConfigVersion",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/ThreadCount":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.ThreadCount",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/SummaryJson":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.SummaryJson",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/AliveNodesInfoJson":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.AliveNodesInfoJson",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/BlacklistedNodesInfoJson":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.BlacklistedNodesInfoJson",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/GraylistedNodesInfoJson":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.GraylistedNodesInfoJson",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/QueueInfoJson":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.QueueInfoJson",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.NumOpenConnections",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.ReceivedBytes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.RpcProcessingTime_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.RpcProcessingTime_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.RpcQueueTime_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.RpcQueueTime_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/SentBytes":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.SentBytes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.callQueueLen",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.rpcAuthenticationFailures",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.rpcAuthenticationSuccesses",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.rpcAuthorizationFailures",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.rpcAuthorizationSuccesses",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/addBlock_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.addBlock_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/addBlock_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.addBlock_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReceived_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.blockReceived_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReceived_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.blockReceived_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReport_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.blockReport_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReport_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.blockReport_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/complete_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.complete_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/complete_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.complete_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/create_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.create_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/create_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.create_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/fsync_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.fsync_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/fsync_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.fsync_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getBlockLocations_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getBlockLocations_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getBlockLocations_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getBlockLocations_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getEditLogSize_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getEditLogSize_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getEditLogSize_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getEditLogSize_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getFileInfo_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getFileInfo_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getFileInfo_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getFileInfo_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getListing_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getListing_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getListing_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getListing_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getProtocolVersion_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getProtocolVersion_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getProtocolVersion_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getProtocolVersion_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/renewLease_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.renewLease_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/renewLease_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.renewLease_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollEditLog_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.rollEditLog_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollEditLog_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.rollEditLog_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollFsImage_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.rollFsImage_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollFsImage_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.rollFsImage_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/sendHeartbeat_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.sendHeartbeat_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/sendHeartbeat_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.sendHeartbeat_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"Hadoop:service=JobTracker,name=ugi.loginFailure_avg_time",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"Hadoop:service=JobTracker,name=ugi.loginFailure_num_ops",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"Hadoop:service=JobTracker,name=ugi.loginSuccess_avg_time",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"Hadoop:service=JobTracker,name=ugi.loginSuccess_num_ops",
-        "pointInTime":true,
-        "temporal":false
-      }
-    },
-
-    "HBASE_MASTER":{
-      "ServiceComponentInfo/AverageLoad":{
-        "metric" : "hadoop:service=Master,name=Master.AverageLoad",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/HeapMemoryMax":{
-        "metric" : "java.lang:type=Memory.HeapMemoryUsage[max]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/HeapMemoryUsed":{
-        "metric" : "java.lang:type=Memory.HeapMemoryUsage[used]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/NonHeapMemoryMax":{
-        "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/NonHeapMemoryUsed":{
-        "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/MasterActiveTime":{
-        "metric" : "hadoop:service=Master,name=Master.MasterActiveTime",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/MasterStartTime":{
-        "metric" : "hadoop:service=Master,name=Master.MasterStartTime",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/RegionsInTransition":{
-        "metric" : "hadoop:service=Master,name=Master.RegionsInTransition",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/Revision":{
-        "metric" : "hadoop:service=HBase,name=Info.revision",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "ServiceComponentInfo/Version":{
-        "metric" : "hadoop:service=HBase,name=Info.version",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/hbase/master/ClusterId":{
-        "metric":"hadoop:service=Master,name=Master.ClusterId",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/MasterStartTime":{
-        "metric":"hadoop:service=Master,name=Master.MasterStartTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/MasterActiveTime":{
-        "metric":"hadoop:service=Master,name=Master.MasterActiveTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/Coprocessors":{
-        "metric":"hadoop:service=Master,name=Master.Coprocessors",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/ServerName":{
-        "metric":"hadoop:service=Master,name=Master.ServerName",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/AverageLoad":{
-        "metric":"hadoop:service=Master,name=Master.AverageLoad",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/RegionsInTransition":{
-        "metric":"hadoop:service=Master,name=Master.RegionsInTransition",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/RegionServers":{
-        "metric":"hadoop:service=Master,name=Master.RegionServers",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/ZookeeperQuorum":{
-        "metric":"hadoop:service=Master,name=Master.ZookeeperQuorum",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/DeadRegionServers":{
-        "metric":"hadoop:service=Master,name=Master.DeadRegionServers",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/IsActiveMaster":{
-        "metric":"hadoop:service=Master,name=Master.IsActiveMaster",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/splitTimeNumOps":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.splitTimeNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/splitTimeAvgTime":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.splitTimeAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/splitTimeMinTime":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.splitTimeMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/splitTimeMaxTime":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.splitTimeMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/splitSizeNumOps":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.splitSizeNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/splitSizeAvgTime":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.splitSizeAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/splitSizeMinTime":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.splitSizeMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/splitSizeMaxTime":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.splitSizeMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/cluster_requests":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.cluster_requests",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/revision":{
-        "metric":"hadoop:service=HBase,name=Info.revision",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/hdfsUser":{
-        "metric":"hadoop:service=HBase,name=Info.hdfsUser",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/hdfsDate":{
-        "metric":"hadoop:service=HBase,name=Info.hdfsDate",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/hdfsUrl":{
-        "metric":"hadoop:service=HBase,name=Info.hdfsUrl",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/date":{
-        "metric":"hadoop:service=HBase,name=Info.date",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/hdfsRevision":{
-        "metric":"hadoop:service=HBase,name=Info.hdfsRevision",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/user":{
-        "metric":"hadoop:service=HBase,name=Info.user",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/hdfsVersion":{
-        "metric":"hadoop:service=HBase,name=Info.hdfsVersion",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/url":{
-        "metric":"hadoop:service=HBase,name=Info.url",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/version":{
-        "metric":"hadoop:service=HBase,name=Info.version",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/enableTableNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.enableTableNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/enableTableAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.enableTableAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/enableTableMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.enableTableMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/enableTableMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.enableTableMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/assignNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.assignNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/assignAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.assignAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/assignMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.assignMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/assignMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.assignMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/enableTable.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/enableTable.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/enableTable.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/enableTable.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcSlowResponseNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcSlowResponseAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcSlowResponseMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcSlowResponseMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getStoreFileListNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getStoreFileListNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getStoreFileListAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getStoreFileListAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getStoreFileListMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getStoreFileListMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getStoreFileListMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getStoreFileListMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolVersion.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolVersion.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolVersion.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolVersion.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAlterStatus.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAlterStatus.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAlterStatus.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAlterStatus.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/moveNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.moveNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/moveAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.moveAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/moveMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.moveMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/moveMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.moveMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openRegionNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openRegionNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openRegionAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openRegionAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openRegionMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openRegionMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openRegionMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openRegionMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/incrementNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.incrementNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/incrementAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.incrementAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/incrementMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.incrementMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/incrementMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.incrementMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerStartup.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerStartup.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerStartup.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerStartup.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteTableNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteTableNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteTableAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteTableAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteTableMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteTableMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteTableMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteTableMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balance.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balance.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balance.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balance.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/execCoprocessorNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.execCoprocessorNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/execCoprocessorAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.execCoprocessorAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/execCoprocessorMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.execCoprocessorMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/execCoprocessorMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.execCoprocessorMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHTableDescriptorsNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHTableDescriptorsAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHTableDescriptorsMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHTableDescriptorsMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/addColumnNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.addColumnNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/addColumnAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.addColumnAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/addColumnMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.addColumnMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/addColumnMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.addColumnMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/offline.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/offline.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/offline.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/offline.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/multiNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.multiNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/multiAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.multiAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/multiMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.multiMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/multiMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.multiMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/closeRegionNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.closeRegionNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/closeRegionAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.closeRegionAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/closeRegionMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.closeRegionMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/closeRegionMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.closeRegionMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/disableTableNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.disableTableNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/disableTableAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.disableTableAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/disableTableMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.disableTableMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/disableTableMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.disableTableMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/bulkLoadHFilesNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/bulkLoadHFilesAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/bulkLoadHFilesMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/bulkLoadHFilesMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/putNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.putNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/putAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.putAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/putMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.putMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/putMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.putMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/createTableNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.createTableNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/createTableAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.createTableAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/createTableMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.createTableMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/createTableMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.createTableMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/nextNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.nextNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/nextAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.nextAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/nextMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.nextMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/nextMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.nextMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unlockRowNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unlockRowNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unlockRowAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unlockRowAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unlockRowMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unlockRowMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unlockRowMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unlockRowMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/reportRSFatalErrorNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/reportRSFatalErrorAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/reportRSFatalErrorMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/reportRSFatalErrorMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.rpcAuthenticationFailures",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getRegionInfoNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getRegionInfoNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getRegionInfoAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getRegionInfoAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getRegionInfoMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getRegionInfoMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getRegionInfoMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getRegionInfoMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openScannerNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openScannerNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openScannerAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openScannerAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openScannerMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openScannerMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openScannerMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openScannerMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/offlineNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.offlineNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/offlineAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.offlineAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/offlineMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.offlineMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/offlineMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.offlineMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAlterStatusNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAlterStatusNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAlterStatusAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAlterStatusAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAlterStatusMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAlterStatusMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAlterStatusMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAlterStatusMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcProcessingTimeNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcProcessingTimeAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcProcessingTimeMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcProcessingTimeMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/move.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/move.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/move.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/move.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getLastFlushTimeNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getLastFlushTimeAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getLastFlushTimeMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getLastFlushTimeMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/shutdownNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.shutdownNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/shutdownAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.shutdownAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/shutdownMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.shutdownMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/shutdownMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.shutdownMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openRegionsNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openRegionsNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openRegionsAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openRegionsAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openRegionsMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openRegionsMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openRegionsMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openRegionsMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClosestRowBeforeNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClosestRowBeforeAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClosestRowBeforeMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClosestRowBeforeMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHServerInfoNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHServerInfoNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHServerInfoAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHServerInfoAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHServerInfoMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHServerInfoMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHServerInfoMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHServerInfoMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolSignatureNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolSignatureAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolSignatureMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolSignatureMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/replicationCallQueueLen":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.replicationCallQueueLen",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.SentBytes",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/existsNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.existsNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/existsAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.existsAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/existsMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.existsMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/existsMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.existsMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/shutdown.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/shutdown.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/shutdown.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/shutdown.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerStartupNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerStartupNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerStartupAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerStartupAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerStartupMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerStartupMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerStartupMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerStartupMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/compactRegionNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.compactRegionNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/compactRegionAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.compactRegionAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/compactRegionMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.compactRegionMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/compactRegionMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.compactRegionMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unassign.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unassign.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unassign.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unassign.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceSwitchNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceSwitchNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceSwitchAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceSwitchAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceSwitchMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceSwitchMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceSwitchMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceSwitchMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/rollHLogWriterNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.rollHLogWriterNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/rollHLogWriterAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.rollHLogWriterAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/rollHLogWriterMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.rollHLogWriterMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/rollHLogWriterMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.rollHLogWriterMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/splitRegionNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.splitRegionNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/splitRegionAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.splitRegionAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/splitRegionMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.splitRegionMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/splitRegionMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.splitRegionMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.ReceivedBytes",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/isMasterRunning.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/isMasterRunning.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/isMasterRunning.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/isMasterRunning.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/addColumn.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/addColumn.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/addColumn.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/addColumn.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/createTable.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/createTable.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/createTable.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/createTable.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getOnlineRegionsNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getOnlineRegionsAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getOnlineRegionsMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getOnlineRegionsMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/closeNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.closeNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/closeAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.closeAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/closeMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.closeMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/closeMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.closeMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolSignature.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolSignature.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolSignature.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolSignature.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClusterStatusNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClusterStatusNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClusterStatusAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClusterStatusAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClusterStatusMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClusterStatusMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClusterStatusMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClusterStatusMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceSwitch.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceSwitch.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceSwitch.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceSwitch.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyTable.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyTable.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyTable.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyTable.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/appendNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.appendNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/appendAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.appendAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/appendMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.appendMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/appendMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.appendMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummariesNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummariesAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummariesMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummariesMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/synchronousBalanceSwitchNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/synchronousBalanceSwitchAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/synchronousBalanceSwitchMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/synchronousBalanceSwitchMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMasterNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMasterNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMasterAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMasterAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMasterMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMasterMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMasterMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMasterMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/priorityCallQueueLen":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.priorityCallQueueLen",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/checkAndPutNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.checkAndPutNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/checkAndPutAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.checkAndPutAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/checkAndPutMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.checkAndPutMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/checkAndPutMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.checkAndPutMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteColumnNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteColumnNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteColumnAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteColumnAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteColumnMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteColumnMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteColumnMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteColumnMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/disableTable.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/disableTable.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/disableTable.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/disableTable.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMaster.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMaster.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMaster.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMaster.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.callQueueLen",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/replicateLogEntriesNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/replicateLogEntriesAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/replicateLogEntriesMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/replicateLogEntriesMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.rpcAuthorizationSuccesses",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/incrementColumnValueNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.incrementColumnValueNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/incrementColumnValueAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.incrementColumnValueAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/incrementColumnValueMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.incrementColumnValueMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/incrementColumnValueMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.incrementColumnValueMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/flushRegionNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.flushRegionNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/flushRegionAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.flushRegionAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/flushRegionMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.flushRegionMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/flushRegionMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.flushRegionMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unassignNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unassignNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unassignAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unassignAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unassignMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unassignMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unassignMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unassignMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClusterStatus.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClusterStatus.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClusterStatus.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClusterStatus.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/reportRSFatalError.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/reportRSFatalError.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/reportRSFatalError.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/reportRSFatalError.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.NumOpenConnections",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.rpcAuthenticationSuccesses",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/mutateRowNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.mutateRowNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/mutateRowAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.mutateRowAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/mutateRowMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.mutateRowMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/mutateRowMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.mutateRowMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyTableNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyTableNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyTableAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyTableAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyTableMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyTableMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyTableMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyTableMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.rpcAuthorizationFailures",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolVersionNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolVersionNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolVersionAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolVersionAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolVersionMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolVersionMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolVersionMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolVersionMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcQueueTimeNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcQueueTimeAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcQueueTimeMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcQueueTimeMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/checkAndDeleteNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.checkAndDeleteNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/checkAndDeleteAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.checkAndDeleteAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/checkAndDeleteMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.checkAndDeleteMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/checkAndDeleteMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.checkAndDeleteMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteTable.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteTable.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteTable.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteTable.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/isMasterRunningNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.isMasterRunningNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/isMasterRunningAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.isMasterRunningAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/isMasterRunningMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.isMasterRunningMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/isMasterRunningMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.isMasterRunningMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyColumnNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyColumnNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyColumnAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyColumnAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyColumnMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyColumnMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyColumnMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyColumnMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/lockRowNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.lockRowNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/lockRowAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.lockRowAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/lockRowMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.lockRowMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/lockRowMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.lockRowMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyColumn.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyColumn.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyColumn.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyColumn.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerReport.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerReport.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerReport.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerReport.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getCompactionStateNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getCompactionStateNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getCompactionStateAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getCompactionStateAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getCompactionStateMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getCompactionStateMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getCompactionStateMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getCompactionStateMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/assign.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/assign.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/assign.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/assign.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerReportNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerReportNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerReportAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerReportAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerReportMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerReportMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerReportMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerReportMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteColumn.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteColumn.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteColumn.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteColumn.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHTableDescriptors.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHTableDescriptors.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHTableDescriptors.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHTableDescriptors.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      }
-    }
-  },
-  "HostComponent":{
-    "NAMENODE":{
-      "metrics/dfs/FSNamesystem/BlockCapacity":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlockCapacity",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/BlocksTotal":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/CapacityRemainingGB":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityRemainingGB",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/CapacityTotalGB":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityTotalGB",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/CapacityUsedGB":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityUsedGB",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/CorruptBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/ExcessBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.ExcessBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/FilesTotal":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.FilesTotal",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/MissingBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/PendingDeletionBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.PendingDeletionBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/PendingReplicationBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.PendingReplicationBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.ScheduledReplicationBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/TotalLoad":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.TotalLoad",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/UnderReplicatedBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/AddBlockOps":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.AddBlockOps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/CreateFileOps":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.CreateFileOps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/FileInfoOps":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.FileInfoOps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/FilesCreated":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.FilesCreated",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/FilesInGetListingOps":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.FilesInGetListingOps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/GetBlockLocations":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.GetBlockLocations",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/GetListingOps":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.GetListingOps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Syncs_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.Syncs_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Syncs_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.Syncs_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Transactions_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.Transactions_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Transactions_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.Transactions_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/blockReport_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.blockReport_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/blockReport_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=NameNode.blockReport_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Threads":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.Threads",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/HostName":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.HostName",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Total":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.Total",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Version":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.Version",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/UpgradeFinalized":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Used":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Free":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.Free",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/Safemode":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/NonDfsUsedSpace":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/PercentUsed":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/PercentRemaining":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/TotalBlocks":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.TotalBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/TotalFiles":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/LiveNodes":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/DeadNodes":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/DecomNodes":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/namenode/NameDirStatuses":{
-        "metric" : "Hadoop:service=NameNode,name=NameNodeInfo.NameDirStatuses",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/HeapMemoryMax":{
-        "metric" : "java.lang:type=Memory.HeapMemoryUsage[max]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/HeapMemoryUsed":{
-        "metric" : "java.lang:type=Memory.HeapMemoryUsage[used]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/NonHeapMemoryMax":{
-        "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/NonHeapMemoryUsed":{
-        "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/gcCount":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.gcCount",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.gcTimeMillis",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logError":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.logError",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logFatal":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.logFatal",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logInfo":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.logInfo",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logWarn":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.logWarn",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.memHeapCommittedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.memHeapUsedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.memNonHeapCommittedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.memNonHeapUsedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.threadsBlocked",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsNew":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.threadsNew",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.threadsRunnable",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.threadsTerminated",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.threadsTimedWaiting",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric" : "Hadoop:service=NameNode,name=jvm.threadsWaiting",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.NumOpenConnections",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.ReceivedBytes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/SentBytes":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.SentBytes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.callQueueLen",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationFailures",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationSuccesses",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationFailures",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric" : "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationSuccesses",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/addBlock_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/addBlock_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReceived_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReceived_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReport_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReport_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/complete_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/complete_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/create_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.create_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/create_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.create_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/fsync_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/fsync_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getBlockLocations_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getBlockLocations_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getEditLogSize_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getEditLogSize_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getFileInfo_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getFileInfo_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getListing_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getListing_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getProtocolVersion_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getProtocolVersion_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/renewLease_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/renewLease_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollEditLog_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollEditLog_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollFsImage_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollFsImage_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/sendHeartbeat_avg_time":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/sendHeartbeat_num_ops":{
-        "metric" : "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"Hadoop:service=NameNode,name=ugi.loginFailure_avg_time",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"Hadoop:service=NameNode,name=ugi.loginFailure_num_ops",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"Hadoop:service=NameNode,name=ugi.loginSuccess_avg_time",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"Hadoop:service=NameNode,name=ugi.loginSuccess_num_ops",
-        "pointInTime":true,
-        "temporal":false
-      }
-    },
-    "DATANODE":{
-      "metrics/dfs/FSNamesystem/BlockCapacity":{
-        "metric" : "Hadoop:service=DataNode,name=DataNode.BlockCapacity",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/BlocksTotal":{
-        "metric" : "Hadoop:service=DataNode,name=DataNode.BlocksTotal",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/CapacityRemainingGB":{
-        "metric" : "Hadoop:service=DataNode,name=DataNode.CapacityRemainingGB",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/CapacityTotalGB":{
-        "metric" : "Hadoop:service=DataNode,name=DataNode.CapacityTotalGB",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/CapacityUsedGB":{
-        "metric" : "Hadoop:service=DataNode,name=DataNode.CapacityUsedGB",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/CorruptBlocks":{
-        "metric" : "Hadoop:service=DataNode,name=DataNode.CorruptBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/ExcessBlocks":{
-        "metric" : "Hadoop:service=DataNode,name=DataNode.ExcessBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/FilesTotal":{
-        "metric" : "Hadoop:service=DataNode,name=DataNode.FilesTotal",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/MissingBlocks":{
-        "metric" : "Hadoop:service=DataNode,name=DataNode.MissingBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/PendingDeletionBlocks":{
-        "metric" : "Hadoop:service=DataNode,name=DataNode.PendingDeletionBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/PendingReplicationBlocks":{
-        "metric" : "Hadoop:service=DataNode,name=DataNode.PendingReplicationBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks":{
-        "metric" : "Hadoop:service=DataNode,name=DataNode.ScheduledReplicationBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/TotalLoad":{
-        "metric" : "Hadoop:service=DataNode,name=DataNode.TotalLoad",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/UnderReplicatedBlocks":{
-        "metric" : "Hadoop:service=DataNode,name=DataNode.UnderReplicatedBlocks",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/HostName":{
-        "metric" : "Hadoop:service=DataNode,name=DataNodeInfo.HostName",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/Version":{
-        "metric" : "Hadoop:service=DataNode,name=DataNodeInfo.Version",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/RpcPort":{
-        "metric" : "Hadoop:service=DataNode,name=DataNodeInfo.RpcPort",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/HttpPort":{
-        "metric" : "Hadoop:service=DataNode,name=DataNodeInfo.HttpPort",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/NamenodeAddress":{
-        "metric" : "Hadoop:service=DataNode,name=DataNodeInfo.NamenodeAddress",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/dfs/FSNamesystem/VolumeInfo":{
-        "metric" : "Hadoop:service=DataNode,name=DataNodeInfo.VolumeInfo",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/HeapMemoryMax":{
-        "metric" : "java.lang:type=Memory.HeapMemoryUsage[max]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/HeapMemoryUsed":{
-        "metric" : "java.lang:type=Memory.HeapMemoryUsage[used]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/NonHeapMemoryMax":{
-        "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/NonHeapMemoryUsed":{
-        "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/gcCount":{
-        "metric" : "Hadoop:service=DataNode,name=jvm.gcCount",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric" : "Hadoop:service=DataNode,name=jvm.gcTimeMillis",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logError":{
-        "metric" : "Hadoop:service=DataNode,name=jvm.logError",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logFatal":{
-        "metric" : "Hadoop:service=DataNode,name=jvm.logFatal",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logInfo":{
-        "metric" : "Hadoop:service=DataNode,name=jvm.logInfo",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logWarn":{
-        "metric" : "Hadoop:service=DataNode,name=jvm.logWarn",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric" : "Hadoop:service=DataNode,name=jvm.memHeapCommittedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric" : "Hadoop:service=DataNode,name=jvm.memHeapUsedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric" : "Hadoop:service=DataNode,name=jvm.memNonHeapCommittedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric" : "Hadoop:service=DataNode,name=jvm.memNonHeapUsedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric" : "Hadoop:service=DataNode,name=jvm.threadsBlocked",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsNew":{
-        "metric" : "Hadoop:service=DataNode,name=jvm.threadsNew",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric" : "Hadoop:service=DataNode,name=jvm.threadsRunnable",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric" : "Hadoop:service=DataNode,name=jvm.threadsTerminated",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric" : "Hadoop:service=DataNode,name=jvm.threadsTimedWaiting",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric" : "Hadoop:service=DataNode,name=jvm.threadsWaiting",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric" : "Hadoop:service=DataNode,name=RpcActivity.NumOpenConnections",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric" : "Hadoop:service=DataNode,name=RpcActivity.ReceivedBytes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcActivity.RpcProcessingTime_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcActivity.RpcProcessingTime_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcActivity.RpcQueueTime_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcActivity.RpcQueueTime_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/SentBytes":{
-        "metric" : "Hadoop:service=DataNode,name=RpcActivity.SentBytes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric" : "Hadoop:service=DataNode,name=RpcActivity.callQueueLen",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric" : "Hadoop:service=DataNode,name=RpcActivity.rpcAuthenticationFailures",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric" : "Hadoop:service=DataNode,name=RpcActivity.rpcAuthenticationSuccesses",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric" : "Hadoop:service=DataNode,name=RpcActivity.rpcAuthorizationFailures",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric" : "Hadoop:service=DataNode,name=RpcActivity.rpcAuthorizationSuccesses",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/addBlock_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.addBlock_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/addBlock_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.addBlock_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReceived_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.blockReceived_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReceived_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.blockReceived_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReport_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.blockReport_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReport_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.blockReport_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/complete_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.complete_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/complete_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.complete_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/create_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.create_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/create_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.create_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/fsync_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.fsync_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/fsync_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.fsync_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getBlockLocations_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getBlockLocations_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getEditLogSize_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getEditLogSize_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getFileInfo_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.getFileInfo_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getFileInfo_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.getFileInfo_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getListing_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.getListing_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getListing_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.getListing_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getProtocolVersion_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getProtocolVersion_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/renewLease_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.renewLease_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/renewLease_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.renewLease_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollEditLog_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.rollEditLog_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollEditLog_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.rollEditLog_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollFsImage_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.rollFsImage_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollFsImage_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.rollFsImage_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/sendHeartbeat_avg_time":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/sendHeartbeat_num_ops":{
-        "metric" : "Hadoop:service=DataNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"Hadoop:service=DataNode,name=ugi.loginFailure_avg_time",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"Hadoop:service=DataNode,name=ugi.loginFailure_num_ops",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"Hadoop:service=DataNode,name=ugi.loginSuccess_avg_time",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"Hadoop:service=DataNode,name=ugi.loginSuccess_num_ops",
-        "pointInTime":true,
-        "temporal":false
-      }
-    },
-    "JOBTRACKER":{
-      "metrics/jvm/HeapMemoryMax":{
-        "metric" : "java.lang:type=Memory.HeapMemoryUsage[max]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/HeapMemoryUsed":{
-        "metric" : "java.lang:type=Memory.HeapMemoryUsage[used]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/NonHeapMemoryMax":{
-        "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/NonHeapMemoryUsed":{
-        "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/gcCount":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.gcCount",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.gcTimeMillis",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logError":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.logError",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logFatal":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.logFatal",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logInfo":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.logInfo",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logWarn":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.logWarn",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.memHeapCommittedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.memHeapUsedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.memNonHeapCommittedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.memNonHeapUsedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.threadsBlocked",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsNew":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.threadsNew",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.threadsRunnable",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.threadsTerminated",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.threadsTimedWaiting",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric" : "Hadoop:service=JobTracker,name=jvm.threadsWaiting",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/jobs_completed":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.jobs_completed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/jobs_submitted":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.jobs_submitted",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/reserved_map_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.reserved_map_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/reserved_reduce_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.reserved_reduce_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/running_0":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.running_0",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/running_1440":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.running_1440",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/running_300":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.running_300",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/running_60":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.running_60",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/waiting_maps":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.waiting_maps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/waiting_reduces":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.waiting_reduces",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/Queue/jobs_running":{
-        "metric" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default.jobs_running",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/jobs_completed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.jobs_completed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/jobs_running":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.jobs_running",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/jobs_submitted":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.jobs_submitted",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/map_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.map_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/reduce_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.reduce_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/blacklisted_maps":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.blacklisted_maps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/blacklisted_reduces":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.blacklisted_reduces",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/maps_launched":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.maps_launched",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/maps_completed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.maps_launched",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/maps_failed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.maps_failed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/reduces_launched":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.reduces_launched",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/reduces_completed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.reduces_completed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/reduces_failed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.reduces_failed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/jobs_failed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.jobs_failed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/jobs_killed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.jobs_killed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/jobs_preparing":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.jobs_preparing",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/maps_killed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.maps_killed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/reduces_killed":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.reduces_killed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/trackers":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.trackers",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/trackers_blacklisted":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.trackers_blacklisted",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/trackers_graylisted":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.trackers_graylisted",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/trackers_decommissioned":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.trackers_decommissioned",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/heartbeats":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.heartbeats",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/occupied_map_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.occupied_map_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/occupied_reduce_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.occupied_reduce_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/reserved_map_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.reserved_map_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/reserved_reduce_slots":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.reserved_reduce_slots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/running_maps":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.running_maps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/running_reduces":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.running_reduces",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/waiting_maps":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.waiting_maps",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/waiting_reduces":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerMetrics.waiting_reduces",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/Hostname":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.Hostname",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/Version":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.Version",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/ConfigVersion":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.ConfigVersion",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/ThreadCount":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.ThreadCount",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/SummaryJson":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.SummaryJson",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/AliveNodesInfoJson":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.AliveNodesInfoJson",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/BlacklistedNodesInfoJson":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.BlacklistedNodesInfoJson",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/GraylistedNodesInfoJson":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.GraylistedNodesInfoJson",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/jobtracker/QueueInfoJson":{
-        "metric" : "Hadoop:service=JobTracker,name=JobTrackerInfo.QueueInfoJson",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.NumOpenConnections",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.ReceivedBytes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.RpcProcessingTime_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.RpcProcessingTime_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.RpcQueueTime_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.RpcQueueTime_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/SentBytes":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.SentBytes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.callQueueLen",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.rpcAuthenticationFailures",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.rpcAuthenticationSuccesses",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.rpcAuthorizationFailures",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcActivity.rpcAuthorizationSuccesses",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/addBlock_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.addBlock_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/addBlock_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.addBlock_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReceived_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.blockReceived_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReceived_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.blockReceived_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReport_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.blockReport_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/blockReport_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.blockReport_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/complete_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.complete_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/complete_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.complete_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/create_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.create_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/create_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.create_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/fsync_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.fsync_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/fsync_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.fsync_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getBlockLocations_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getBlockLocations_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getBlockLocations_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getBlockLocations_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getEditLogSize_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getEditLogSize_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getEditLogSize_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getEditLogSize_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getFileInfo_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getFileInfo_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getFileInfo_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getFileInfo_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getListing_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getListing_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getListing_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getListing_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getProtocolVersion_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getProtocolVersion_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getProtocolVersion_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.getProtocolVersion_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/renewLease_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.renewLease_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/renewLease_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.renewLease_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollEditLog_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.rollEditLog_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollEditLog_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.rollEditLog_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollFsImage_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.rollFsImage_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/rollFsImage_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.rollFsImage_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/sendHeartbeat_avg_time":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.sendHeartbeat_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/sendHeartbeat_num_ops":{
-        "metric" : "Hadoop:service=JobTracker,name=RpcDetailedActivity.sendHeartbeat_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"Hadoop:service=JobTracker,name=ugi.loginFailure_avg_time",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"Hadoop:service=JobTracker,name=ugi.loginFailure_num_ops",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"Hadoop:service=JobTracker,name=ugi.loginSuccess_avg_time",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"Hadoop:service=JobTracker,name=ugi.loginSuccess_num_ops",
-        "pointInTime":true,
-        "temporal":false
-      }
-    },
-    "TASKTRACKER":{
-      "metrics/jvm/HeapMemoryMax":{
-        "metric" : "java.lang:type=Memory.HeapMemoryUsage[max]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/HeapMemoryUsed":{
-        "metric" : "java.lang:type=Memory.HeapMemoryUsage[used]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/NonHeapMemoryMax":{
-        "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/NonHeapMemoryUsed":{
-        "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/gcCount":{
-        "metric" : "Hadoop:service=TaskTracker,name=jvm.gcCount",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/gcTimeMillis":{
-        "metric" : "Hadoop:service=TaskTracker,name=jvm.gcTimeMillis",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logError":{
-        "metric" : "Hadoop:service=TaskTracker,name=jvm.logError",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logFatal":{
-        "metric" : "Hadoop:service=TaskTracker,name=jvm.logFatal",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logInfo":{
-        "metric" : "Hadoop:service=TaskTracker,name=jvm.logInfo",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/logWarn":{
-        "metric" : "Hadoop:service=TaskTracker,name=jvm.logWarn",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memHeapCommittedM":{
-        "metric" : "Hadoop:service=TaskTracker,name=jvm.memHeapCommittedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memHeapUsedM":{
-        "metric" : "Hadoop:service=TaskTracker,name=jvm.memHeapUsedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memNonHeapCommittedM":{
-        "metric" : "Hadoop:service=TaskTracker,name=jvm.memNonHeapCommittedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/memNonHeapUsedM":{
-        "metric" : "Hadoop:service=TaskTracker,name=jvm.memNonHeapUsedM",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsBlocked":{
-        "metric" : "Hadoop:service=TaskTracker,name=jvm.threadsBlocked",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsNew":{
-        "metric" : "Hadoop:service=TaskTracker,name=jvm.threadsNew",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsRunnable":{
-        "metric" : "Hadoop:service=TaskTracker,name=jvm.threadsRunnable",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsTerminated":{
-        "metric" : "Hadoop:service=TaskTracker,name=jvm.threadsTerminated",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsTimedWaiting":{
-        "metric" : "Hadoop:service=TaskTracker,name=jvm.threadsTimedWaiting",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/threadsWaiting":{
-        "metric" : "Hadoop:service=TaskTracker,name=jvm.threadsWaiting",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/shuffleOutput/shuffle_exceptions_caught":{
-        "metric":"Hadoop:service=TaskTracker,name=ShuffleServerMetrics.shuffle_exceptions_caught",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/mapred/shuffleOutput/shuffle_failed_outputs":{
-        "metric":"Hadoop:service=TaskTracker,name=ShuffleServerMetrics.shuffle_failed_outputs",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/mapred/shuffleOutput/shuffle_handler_busy_percent":{
-        "metric":"Hadoop:service=TaskTracker,name=ShuffleServerMetrics.shuffle_handler_busy_percent",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/mapred/shuffleOutput/shuffle_output_bytes":{
-        "metric":"Hadoop:service=TaskTracker,name=ShuffleServerMetrics.shuffle_output_bytes",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/mapred/shuffleOutput/shuffle_success_outputs":{
-        "metric":"Hadoop:service=TaskTracker,name=ShuffleServerMetrics.shuffle_success_outputs",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/mapred/tasktracker/maps_running":{
-        "metric" : "Hadoop:service=TaskTracker,name=TaskTrackerMetrics.maps_running",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/tasktracker/reduces_running":{
-        "metric" : "Hadoop:service=TaskTracker,name=TaskTrackerMetrics.reduces_running",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/tasktracker/mapTaskSlots":{
-        "metric" : "Hadoop:service=TaskTracker,name=TaskTrackerMetrics.mapTaskSlots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/tasktracker/reduceTaskSlots":{
-        "metric" : "Hadoop:service=TaskTracker,name=TaskTrackerMetrics.reduceTaskSlots",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/tasktracker/failedDirs":{
-        "metric" : "Hadoop:service=TaskTracker,name=TaskTrackerMetrics.failedDirs",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/tasktracker/tasks_completed":{
-        "metric" : "Hadoop:service=TaskTracker,name=TaskTrackerMetrics.tasks_completed",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/tasktracker/tasks_failed_timeout":{
-        "metric" : "Hadoop:service=TaskTracker,name=TaskTrackerMetrics.tasks_failed_timeout",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/tasktracker/tasks_failed_ping":{
-        "metric" : "Hadoop:service=TaskTracker,name=TaskTrackerMetrics.tasks_failed_ping",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/tasktracker/Hostname":{
-        "metric" : "Hadoop:service=TaskTracker,name=TaskTrackerInfo.Hostname",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/tasktracker/Version":{
-        "metric" : "Hadoop:service=TaskTracker,name=TaskTrackerInfo.Version",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/tasktracker/ConfigVersion":{
-        "metric" : "Hadoop:service=TaskTracker,name=TaskTrackerInfo.ConfigVersion",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/tasktracker/JobTrackerUrl":{
-        "metric" : "Hadoop:service=TaskTracker,name=TaskTrackerInfo.JobTrackerUrl",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/tasktracker/RpcPort":{
-        "metric" : "Hadoop:service=TaskTracker,name=TaskTrackerInfo.RpcPort",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/tasktracker/HttpPort":{
-        "metric" : "Hadoop:service=TaskTracker,name=TaskTrackerInfo.HttpPort",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/tasktracker/Healthy":{
-        "metric" : "Hadoop:service=TaskTracker,name=TaskTrackerInfo.Healthy",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/mapred/tasktracker/TasksInfoJson":{
-        "metric" : "Hadoop:service=TaskTracker,name=TaskTrackerInfo.TasksInfoJson",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcActivity.NumOpenConnections",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcActivity.ReceivedBytes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcProcessingTime_avg_time":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcActivity.RpcProcessingTime_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcProcessingTime_num_ops":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcActivity.RpcProcessingTime_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcQueueTime_avg_time":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcActivity.RpcQueueTime_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/RpcQueueTime_num_ops":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcActivity.RpcQueueTime_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/SentBytes":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcActivity.SentBytes",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcActivity.callQueueLen",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcActivity.rpcAuthenticationFailures",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcActivity.rpcAuthenticationSuccesses",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcActivity.rpcAuthorizationFailures",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcActivity.rpcAuthorizationSuccesses",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getProtocolVersion_num_ops":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcDetailedActivity.getProtocolVersion_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getProtocolVersion_avg_time":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcDetailedActivity.getProtocolVersion_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getTask_num_ops":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcDetailedActivity.getTask_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getTask_avg_time":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcDetailedActivity.getTask_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/statusUpdate_num_ops":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcDetailedActivity.statusUpdate_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/statusUpdate_avg_time":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcDetailedActivity.statusUpdate_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/done_num_ops":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcDetailedActivity.done_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/done_avg_time":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcDetailedActivity.done_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getMapCompletionEvents_num_ops":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcDetailedActivity.getMapCompletionEvents_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/getMapCompletionEvents_avg_time":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcDetailedActivity.getMapCompletionEvents_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/ping_num_ops":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcDetailedActivity.ping_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/ping_avg_time":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcDetailedActivity.ping_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/commitPending_num_ops":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcDetailedActivity.commitPending_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/commitPending_avg_time":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcDetailedActivity.commitPending_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/canCommit_num_ops":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcDetailedActivity.canCommit_num_ops",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpcdetailed/canCommit_avg_time":{
-        "metric" : "Hadoop:service=TaskTracker,name=RpcDetailedActivity.canCommit_avg_time",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/ugi/loginFailure_avg_time":{
-        "metric":"Hadoop:service=TaskTracker,name=ugi.loginFailure_avg_time",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginFailure_num_ops":{
-        "metric":"Hadoop:service=TaskTracker,name=ugi.loginFailure_num_ops",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginSuccess_avg_time":{
-        "metric":"Hadoop:service=TaskTracker,name=ugi.loginSuccess_avg_time",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/ugi/loginSuccess_num_ops":{
-        "metric":"Hadoop:service=TaskTracker,name=ugi.loginSuccess_num_ops",
-        "pointInTime":true,
-        "temporal":false
-      }
-    },
-    "HBASE_MASTER":{
-      "metrics/hbase/master/ClusterId":{
-        "metric":"hadoop:service=Master,name=Master.ClusterId",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/MasterStartTime":{
-        "metric":"hadoop:service=Master,name=Master.MasterStartTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/MasterActiveTime":{
-        "metric":"hadoop:service=Master,name=Master.MasterActiveTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/Coprocessors":{
-        "metric":"hadoop:service=Master,name=Master.Coprocessors",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/ServerName":{
-        "metric":"hadoop:service=Master,name=Master.ServerName",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/AverageLoad":{
-        "metric":"hadoop:service=Master,name=Master.AverageLoad",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/RegionsInTransition":{
-        "metric":"hadoop:service=Master,name=Master.RegionsInTransition",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/RegionServers":{
-        "metric":"hadoop:service=Master,name=Master.RegionServers",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/ZookeeperQuorum":{
-        "metric":"hadoop:service=Master,name=Master.ZookeeperQuorum",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/DeadRegionServers":{
-        "metric":"hadoop:service=Master,name=Master.DeadRegionServers",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/IsActiveMaster":{
-        "metric":"hadoop:service=Master,name=Master.IsActiveMaster",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/splitTimeNumOps":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.splitTimeNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/splitTimeAvgTime":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.splitTimeAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/splitTimeMinTime":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.splitTimeMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/splitTimeMaxTime":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.splitTimeMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/splitSizeNumOps":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.splitSizeNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/splitSizeAvgTime":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.splitSizeAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/splitSizeMinTime":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.splitSizeMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/splitSizeMaxTime":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.splitSizeMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/cluster_requests":{
-        "metric":"hadoop:service=Master,name=MasterStatistics.cluster_requests",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/revision":{
-        "metric":"hadoop:service=HBase,name=Info.revision",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/hdfsUser":{
-        "metric":"hadoop:service=HBase,name=Info.hdfsUser",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/hdfsDate":{
-        "metric":"hadoop:service=HBase,name=Info.hdfsDate",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/hdfsUrl":{
-        "metric":"hadoop:service=HBase,name=Info.hdfsUrl",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/date":{
-        "metric":"hadoop:service=HBase,name=Info.date",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/hdfsRevision":{
-        "metric":"hadoop:service=HBase,name=Info.hdfsRevision",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/user":{
-        "metric":"hadoop:service=HBase,name=Info.user",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/hdfsVersion":{
-        "metric":"hadoop:service=HBase,name=Info.hdfsVersion",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/url":{
-        "metric":"hadoop:service=HBase,name=Info.url",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/hbase/master/version":{
-        "metric":"hadoop:service=HBase,name=Info.version",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/load/AverageLoad":{
-        "metric" : "hadoop:service=Master,name=Master.AverageLoad",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/HeapMemoryMax":{
-        "metric" : "java.lang:type=Memory.HeapMemoryUsage[max]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/HeapMemoryUsed":{
-        "metric" : "java.lang:type=Memory.HeapMemoryUsage[used]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/NonHeapMemoryMax":{
-        "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/jvm/NonHeapMemoryUsed":{
-        "metric" : "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-        "pointInTime" : true,
-        "temporal" : false
-      },
-      "metrics/rpc/enableTableNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.enableTableNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/enableTableAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.enableTableAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/enableTableMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.enableTableMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/enableTableMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.enableTableMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/assignNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.assignNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/assignAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.assignAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/assignMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.assignMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/assignMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.assignMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/enableTable.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/enableTable.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/enableTable.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/enableTable.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.enableTable.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcSlowResponseNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcSlowResponseAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcSlowResponseMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcSlowResponseMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcSlowResponseMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getStoreFileListNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getStoreFileListNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getStoreFileListAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getStoreFileListAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getStoreFileListMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getStoreFileListMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getStoreFileListMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getStoreFileListMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolVersion.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolVersion.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolVersion.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolVersion.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolVersion.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAlterStatus.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAlterStatus.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAlterStatus.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAlterStatus.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAlterStatus.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/moveNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.moveNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/moveAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.moveAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/moveMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.moveMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/moveMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.moveMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openRegionNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openRegionNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openRegionAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openRegionAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openRegionMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openRegionMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openRegionMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openRegionMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/incrementNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.incrementNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/incrementAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.incrementAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/incrementMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.incrementMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/incrementMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.incrementMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerStartup.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerStartup.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerStartup.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerStartup.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerStartup.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteTableNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteTableNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteTableAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteTableAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteTableMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteTableMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteTableMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteTableMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balance.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balance.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balance.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balance.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balance.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/execCoprocessorNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.execCoprocessorNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/execCoprocessorAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.execCoprocessorAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/execCoprocessorMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.execCoprocessorMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/execCoprocessorMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.execCoprocessorMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHTableDescriptorsNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHTableDescriptorsAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHTableDescriptorsMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHTableDescriptorsMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHTableDescriptorsMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/addColumnNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.addColumnNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/addColumnAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.addColumnAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/addColumnMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.addColumnMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/addColumnMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.addColumnMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/offline.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/offline.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/offline.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/offline.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.offline.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/multiNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.multiNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/multiAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.multiAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/multiMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.multiMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/multiMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.multiMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/closeRegionNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.closeRegionNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/closeRegionAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.closeRegionAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/closeRegionMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.closeRegionMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/closeRegionMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.closeRegionMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/disableTableNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.disableTableNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/disableTableAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.disableTableAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/disableTableMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.disableTableMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/disableTableMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.disableTableMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/bulkLoadHFilesNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/bulkLoadHFilesAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/bulkLoadHFilesMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/bulkLoadHFilesMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.bulkLoadHFilesMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/putNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.putNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/putAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.putAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/putMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.putMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/putMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.putMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/createTableNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.createTableNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/createTableAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.createTableAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/createTableMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.createTableMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/createTableMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.createTableMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/nextNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.nextNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/nextAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.nextAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/nextMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.nextMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/nextMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.nextMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unlockRowNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unlockRowNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unlockRowAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unlockRowAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unlockRowMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unlockRowMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unlockRowMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unlockRowMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/reportRSFatalErrorNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/reportRSFatalErrorAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/reportRSFatalErrorMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/reportRSFatalErrorMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.reportRSFatalErrorMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/rpcAuthenticationFailures":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.rpcAuthenticationFailures",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getRegionInfoNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getRegionInfoNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getRegionInfoAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getRegionInfoAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getRegionInfoMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getRegionInfoMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getRegionInfoMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getRegionInfoMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openScannerNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openScannerNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openScannerAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openScannerAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openScannerMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openScannerMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openScannerMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openScannerMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/offlineNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.offlineNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/offlineAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.offlineAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/offlineMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.offlineMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/offlineMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.offlineMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAlterStatusNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAlterStatusNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAlterStatusAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAlterStatusAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAlterStatusMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAlterStatusMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAlterStatusMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAlterStatusMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcProcessingTimeNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcProcessingTimeAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcProcessingTimeMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcProcessingTimeMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcProcessingTimeMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/move.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/move.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/move.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/move.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.move.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getLastFlushTimeNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getLastFlushTimeAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getLastFlushTimeMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getLastFlushTimeMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getLastFlushTimeMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/shutdownNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.shutdownNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/shutdownAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.shutdownAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/shutdownMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.shutdownMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/shutdownMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.shutdownMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openRegionsNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openRegionsNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openRegionsAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openRegionsAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openRegionsMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openRegionsMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/openRegionsMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.openRegionsMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClosestRowBeforeNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClosestRowBeforeAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClosestRowBeforeMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClosestRowBeforeMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClosestRowBeforeMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHServerInfoNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHServerInfoNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHServerInfoAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHServerInfoAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHServerInfoMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHServerInfoMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHServerInfoMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHServerInfoMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolSignatureNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolSignatureAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolSignatureMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolSignatureMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolSignatureMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/replicationCallQueueLen":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.replicationCallQueueLen",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.SentBytes",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/existsNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.existsNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/existsAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.existsAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/existsMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.existsMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/existsMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.existsMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/shutdown.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/shutdown.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/shutdown.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/shutdown.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.shutdown.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerStartupNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerStartupNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerStartupAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerStartupAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerStartupMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerStartupMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerStartupMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerStartupMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/compactRegionNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.compactRegionNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/compactRegionAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.compactRegionAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/compactRegionMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.compactRegionMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/compactRegionMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.compactRegionMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unassign.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unassign.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unassign.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unassign.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unassign.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceSwitchNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceSwitchNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceSwitchAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceSwitchAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceSwitchMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceSwitchMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceSwitchMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceSwitchMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/rollHLogWriterNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.rollHLogWriterNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/rollHLogWriterAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.rollHLogWriterAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/rollHLogWriterMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.rollHLogWriterMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/rollHLogWriterMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.rollHLogWriterMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/splitRegionNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.splitRegionNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/splitRegionAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.splitRegionAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/splitRegionMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.splitRegionMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/splitRegionMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.splitRegionMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.ReceivedBytes",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/isMasterRunning.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/isMasterRunning.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/isMasterRunning.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/isMasterRunning.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.isMasterRunning.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/addColumn.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/addColumn.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/addColumn.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/addColumn.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.addColumn.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/createTable.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/createTable.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/createTable.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/createTable.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.createTable.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getOnlineRegionsNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getOnlineRegionsAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getOnlineRegionsMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getOnlineRegionsMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getOnlineRegionsMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/closeNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.closeNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/closeAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.closeAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/closeMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.closeMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/closeMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.closeMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolSignature.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolSignature.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolSignature.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolSignature.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolSignature.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClusterStatusNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClusterStatusNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClusterStatusAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClusterStatusAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClusterStatusMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClusterStatusMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClusterStatusMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClusterStatusMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceSwitch.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceSwitch.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceSwitch.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/balanceSwitch.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.balanceSwitch.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyTable.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyTable.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyTable.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyTable.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyTable.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/appendNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.appendNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/appendAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.appendAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/appendMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.appendMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/appendMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.appendMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummariesNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummariesAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummariesMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getBlockCacheColumnFamilySummariesMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getBlockCacheColumnFamilySummariesMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/synchronousBalanceSwitchNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/synchronousBalanceSwitchAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/synchronousBalanceSwitchMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/synchronousBalanceSwitchMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitchMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMasterNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMasterNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMasterAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMasterAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMasterMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMasterMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMasterMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMasterMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/priorityCallQueueLen":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.priorityCallQueueLen",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/checkAndPutNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.checkAndPutNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/checkAndPutAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.checkAndPutAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/checkAndPutMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.checkAndPutMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/checkAndPutMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.checkAndPutMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteColumnNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteColumnNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteColumnAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteColumnAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteColumnMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteColumnMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteColumnMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteColumnMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/disableTable.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/disableTable.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/disableTable.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/disableTable.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.disableTable.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMaster.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMaster.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMaster.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMaster.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMaster.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/callQueueLen":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.callQueueLen",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/replicateLogEntriesNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/replicateLogEntriesAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/replicateLogEntriesMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/replicateLogEntriesMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.replicateLogEntriesMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/rpcAuthorizationSuccesses":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.rpcAuthorizationSuccesses",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/stopMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.stopMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/incrementColumnValueNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.incrementColumnValueNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/incrementColumnValueAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.incrementColumnValueAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/incrementColumnValueMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.incrementColumnValueMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/incrementColumnValueMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.incrementColumnValueMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/flushRegionNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.flushRegionNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/flushRegionAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.flushRegionAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/flushRegionMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.flushRegionMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/flushRegionMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.flushRegionMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unassignNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unassignNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unassignAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unassignAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unassignMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unassignMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/unassignMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.unassignMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClusterStatus.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClusterStatus.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClusterStatus.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getClusterStatus.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getClusterStatus.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/reportRSFatalError.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/reportRSFatalError.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/reportRSFatalError.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/reportRSFatalError.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.reportRSFatalError.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.NumOpenConnections",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/rpcAuthenticationSuccesses":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.rpcAuthenticationSuccesses",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/mutateRowNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.mutateRowNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/mutateRowAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.mutateRowAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/mutateRowMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.mutateRowMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/mutateRowMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.mutateRowMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyTableNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyTableNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyTableAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyTableAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyTableMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyTableMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyTableMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyTableMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/synchronousBalanceSwitch.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.synchronousBalanceSwitch.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/rpcAuthorizationFailures":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.rpcAuthorizationFailures",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolVersionNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolVersionNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolVersionAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolVersionAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolVersionMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolVersionMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getProtocolVersionMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getProtocolVersionMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcQueueTimeNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcQueueTimeAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcQueueTimeMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/RpcQueueTimeMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.RpcQueueTimeMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/checkAndDeleteNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.checkAndDeleteNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/checkAndDeleteAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.checkAndDeleteAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/checkAndDeleteMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.checkAndDeleteMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/checkAndDeleteMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.checkAndDeleteMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteTable.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteTable.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteTable.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteTable.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteTable.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/isMasterRunningNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.isMasterRunningNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/isMasterRunningAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.isMasterRunningAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/isMasterRunningMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.isMasterRunningMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/isMasterRunningMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.isMasterRunningMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyColumnNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyColumnNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyColumnAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyColumnAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyColumnMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyColumnMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyColumnMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyColumnMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/lockRowNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.lockRowNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/lockRowAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.lockRowAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/lockRowMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.lockRowMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/lockRowMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.lockRowMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyColumn.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyColumn.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyColumn.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/modifyColumn.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.modifyColumn.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerReport.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerReport.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerReport.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerReport.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerReport.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getCompactionStateNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getCompactionStateNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getCompactionStateAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getCompactionStateAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getCompactionStateMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getCompactionStateMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getCompactionStateMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getCompactionStateMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/assign.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/assign.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/assign.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/assign.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.assign.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerReportNumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerReportNumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerReportAvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerReportAvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerReportMinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerReportMinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/regionServerReportMaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.regionServerReportMaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteColumn.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteColumn.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteColumn.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/deleteColumn.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.deleteColumn.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHTableDescriptors.aboveOneSec.NumOps":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.NumOps",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHTableDescriptors.aboveOneSec.AvgTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.AvgTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHTableDescriptors.aboveOneSec.MinTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.MinTime",
-        "pointInTime":true,
-        "temporal":false
-      },
-      "metrics/rpc/getHTableDescriptors.aboveOneSec.MaxTime":{
-        "metric":"hadoop:service=HBase,name=RPCStatistics.getHTableDescriptors.aboveOneSec.MaxTime",
-        "pointInTime":true,
-        "temporal":false
-      }
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/resources/key_properties.json b/branch-1.2/ambari-server/src/main/resources/key_properties.json
deleted file mode 100644
index 5841b7c..0000000
--- a/branch-1.2/ambari-server/src/main/resources/key_properties.json
+++ /dev/null
@@ -1,46 +0,0 @@
-{
-  "Cluster":{
-    "Cluster":"Clusters/cluster_name"
-  },
-  "Service":{
-    "Cluster":"ServiceInfo/cluster_name",
-    "Service":"ServiceInfo/service_name"
-  },
-  "Host":{
-    "Cluster":"Hosts/cluster_name",
-    "Host":"Hosts/host_name"
-  },
-  "Component":{
-    "Cluster":"ServiceComponentInfo/cluster_name",
-    "Service":"ServiceComponentInfo/service_name",
-    "Component":"ServiceComponentInfo/component_name",
-    "HostComponent":"ServiceComponentInfo/component_name"
-  },
-  "HostComponent":{
-    "Cluster":"HostRoles/cluster_name",
-    "Host":"HostRoles/host_name",
-    "HostComponent":"HostRoles/component_name",
-    "Component":"HostRoles/component_name"
-  },
-  "Configuration":{
-    "Cluster":"Config/cluster_name",
-    "Configuration":"Config/type"
-  },
-  "Action":{
-    "Cluster":"Actions/cluster_name",
-    "Service":"Actions/service_name",
-    "Action":"Actions/action_name"
-  },
-  "Request":{
-    "Cluster":"Requests/cluster_name",
-    "Request":"Requests/id"
-  },
-  "Task":{
-    "Cluster":"Tasks/cluster_name",
-    "Request":"Tasks/request_id",
-    "Task":"Tasks/id"
-  },
-  "User":{
-    "User":"Users/user_name"
-  }
-}
diff --git a/branch-1.2/ambari-server/src/main/resources/properties.json b/branch-1.2/ambari-server/src/main/resources/properties.json
deleted file mode 100644
index 448bea0..0000000
--- a/branch-1.2/ambari-server/src/main/resources/properties.json
+++ /dev/null
@@ -1,101 +0,0 @@
-{
-    "Cluster":[
-        "Clusters/cluster_id",
-        "Clusters/cluster_name",
-        "Clusters/version",
-        "Clusters/state",
-        "_"
-    ],
-    "Service":[
-        "ServiceInfo/service_name",
-        "ServiceInfo/cluster_name",
-        "ServiceInfo/state",
-        "Services/description",
-        "Services/display_name",
-        "Services/attributes",
-        "ServiceInfo/desired_configs",
-        "_"
-    ],
-    "Host":[
-        "Hosts/cluster_name",
-        "Hosts/host_name",
-        "Hosts/ip",
-        "Hosts/attributes",
-        "Hosts/total_mem",
-        "Hosts/cpu_count",
-        "Hosts/os_arch",
-        "Hosts/os_type",
-        "Hosts/rack_info",
-        "Hosts/last_heartbeat_time",
-        "Hosts/last_agent_env",
-        "Hosts/last_registration_time",
-        "Hosts/disk_info",
-        "Hosts/host_status",
-        "Hosts/host_health_report",
-        "Hosts/public_host_name",
-        "Hosts/host_state",
-        "_"
-    ],
-    "Component":[
-        "ServiceComponentInfo/service_name",
-        "ServiceComponentInfo/component_name",
-        "ServiceComponentInfo/cluster_name",
-        "ServiceComponentInfo/state",
-        "ServiceComponents/display_name",
-        "ServiceComponents/description",
-        "ServiceComponentInfo/desired_configs",
-        "_"
-    ],
-    "HostComponent":[
-        "HostRoles/role_id",
-        "HostRoles/cluster_name",
-        "HostRoles/host_name",
-        "HostRoles/component_name",
-        "HostRoles/state",
-        "HostRoles/desired_state",
-        "HostRoles/configs",
-        "HostRoles/desired_configs",
-        "_"
-    ],
-    "Configuration":[
-        "Config/tag",
-        "Config/type",
-        "Config/cluster_name"
-    ],
-    "Action":[
-        "Actions/cluster_name",
-        "Actions/service_name",
-        "Actions/action_name",
-        "_"
-    ],
-    "Request":[
-        "Requests/id",
-        "Requests/cluster_name",
-        "Requests/request_status",
-        "_"
-    ],
-    "Task":[
-        "Tasks/id",
-        "Tasks/request_id",
-        "Tasks/cluster_name",
-        "Tasks/stage_id",
-        "Tasks/host_name",
-        "Tasks/role",
-        "Tasks/command",
-        "Tasks/status",
-        "Tasks/exit_code",
-        "Tasks/stderr",
-        "Tasks/stdout",
-        "Tasks/start_time",
-        "Tasks/attempt_cnt",
-        "_"
-    ],
-    "User":[
-        "Users/user_name",
-        "Users/roles",
-        "Users/password",
-        "Users/old_password",
-        "Users/ldap_user",
-        "_"
-    ]
-}
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/repos/repoinfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/repos/repoinfo.xml
deleted file mode 100644
index 2d1a8ff..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/repos/repoinfo.xml
+++ /dev/null
@@ -1,95 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
-      <repoid>HDP-1.1.1.16</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>epel</repoid>
-      <reponame>epel</reponame>
-      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
-      <repoid>HDP-1.1.1.16</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>epel</repoid>
-      <reponame>epel</reponame>
-      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="redhat5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
-      <repoid>HDP-1.1.1.16</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>epel</repoid>
-      <reponame>epel</reponame>
-      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="redhat6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
-      <repoid>HDP-1.1.1.16</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>epel</repoid>
-      <reponame>epel</reponame>
-      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-</reposinfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/GANGLIA/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/GANGLIA/metainfo.xml
deleted file mode 100644
index 667cdc1..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for GANGLIA service</comment>
-    <version>1.0</version>
-
-
-    <components>
-        <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MONITOR_WEBSERVER</name>
-            <category>MASTER</category>
-        </component>
-
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HBASE/configuration/hbase-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index f7e9e58..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,145 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.regionserver.msginterval</name>
-    <value>1000</value>
-    <description>Interval between messages from the RegionServer to HMaster
-    in milliseconds.  Default is 15. Set this value low if you want unit
-    tests to be responsive.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.pause</name>
-    <value>5000</value>
-    <description>General client pause value.  Used mostly as value to wait
-    before running a retry of a failed get, region lookup, etc.</description>
-  </property>
-  <property>
-    <name>hbase.master.meta.thread.rescanfrequency</name>
-    <value>10000</value>
-    <description>How long the HMaster sleeps (in milliseconds) between scans of
-    the root and meta tables.
-    </description>
-  </property>
-  <property>
-    <name>hbase.server.thread.wakefrequency</name>
-    <value>1000</value>
-    <description>Time to sleep in between searches for work (in milliseconds).
-    Used as sleep interval by service threads such as META scanner and log roller.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>5</value>
-    <description>Count of RPC Server instances spun up on RegionServers
-    Same property is used by the HMaster for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.period</name>
-    <value>6000</value>
-    <description>Length of time the master will wait before timing out a region
-    server lease. Since region servers report in every second (see above), this
-    value has been reduced so that the master will notice a dead region server
-    sooner. The default is 30 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value>-1</value>
-    <description>The port for the hbase master web UI
-    Set to -1 if you do not want the info server to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>-1</value>
-    <description>The port for the hbase regionserver web UI
-    Set to -1 if you do not want the info server to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port.auto</name>
-    <value>true</value>
-    <description>Info server auto port bind. Enables automatic port
-    search if hbase.regionserver.info.port is already in use.
-    Enabled for testing to run multiple tests on one machine.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.thread.wakefrequency</name>
-    <value>3000</value>
-    <description>The interval between checks for expired region server leases.
-    This value has been reduced due to the other reduced values above so that
-    the master will notice a dead region server sooner. The default is 15 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.optionalcacheflushinterval</name>
-    <value>10000</value>
-    <description>
-    Amount of time to wait since the last time a region was flushed before
-    invoking an optional cache flush. Default 60,000.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.safemode</name>
-    <value>false</value>
-    <description>
-    Turn on/off safe mode in region server. Always on for production, always off
-    for tests.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>67108864</value>
-    <description>
-    Maximum desired file size for an HRegion.  If filesize exceeds
-    value + (value / 2), the HRegion is split in two.  Default: 256M.
-
-    Keep the maximum filesize small so we split more often in tests.
-    </description>
-  </property>
-  <property>
-    <name>hadoop.log.dir</name>
-    <value>${user.dir}/../logs</value>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>
-    The mode the cluster will be in. Possible values are false for standalone mode and true for distributed mode.
-    If false, startup will run all HBase and ZooKeeper daemons together in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>21818</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HBASE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HBASE/metainfo.xml
deleted file mode 100644
index d584d7f..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>This is comment for HBASE service</comment>
-    <version>1.0</version>
-
-
-    <components>
-        <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HCATALOG/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HCATALOG/metainfo.xml
deleted file mode 100644
index 33218b6..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>1.0</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HDFS/configuration/core-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index 73942a5..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,596 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Do not modify this file directly.  Instead, copy entries that you -->
-<!-- wish to modify from this file into core-site.xml and change them -->
-<!-- there.  If core-site.xml does not already exist, create it.      -->
-
-<configuration>
-
-<!--- global properties -->
-
-<property>
-  <name>hadoop.tmp.dir</name>
-  <value>/tmp/hadoop-${user.name}</value>
-  <description>A base for other temporary directories.</description>
-</property>
-
-<property>
-  <name>hadoop.native.lib</name>
-  <value>true</value>
-  <description>Should native hadoop libraries, if present, be used.</description>
-</property>
-
-<property>
-  <name>hadoop.http.filter.initializers</name>
-  <value></value>
-  <description>A comma separated list of class names. Each class in the list
-  must extend org.apache.hadoop.http.FilterInitializer. The corresponding
-  Filter will be initialized. Then, the Filter will be applied to all user
-  facing jsp and servlet web pages.  The ordering of the list defines the
-  ordering of the filters.</description>
-</property>
-
- <property>
-  <name>hadoop.security.group.mapping</name>
-  <value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value>
-  <description>Class for user to group mapping (get groups for a given user)
-  </description>
-</property>
-
-<property>
-  <name>hadoop.security.authorization</name>
-  <value>false</value>
-  <description>Is service-level authorization enabled?</description>
-</property>
-
-<property>
-  <name>hadoop.security.authentication</name>
-  <value>simple</value>
-  <description>Possible values are simple (no authentication), and kerberos
-  </description>
-</property>
-
-<property>
-  <name>hadoop.security.token.service.use_ip</name>
-  <value>true</value>
-  <description>Controls whether tokens always use IP addresses.  DNS changes
-  will not be detected if this option is enabled.  Existing client connections
-  that break will always reconnect to the IP of the original host.  New clients
-  will connect to the host's new IP but fail to locate a token.  Disabling
-  this option will allow existing and new clients to detect an IP change and
-  continue to locate the new host's token.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.security.use-weak-http-crypto</name>
-  <value>false</value>
-  <description>If enabled, use KSSL to authenticate HTTP connections to the
-  NameNode. Due to a bug in JDK6, using KSSL requires one to configure
-  Kerberos tickets to use encryption types that are known to be
-  cryptographically weak. If disabled, SPNEGO will be used for HTTP
-  authentication, which supports stronger encryption types.
-  </description>
-</property>
-
-<!--
-<property>
-  <name>hadoop.security.service.user.name.key</name>
-  <value></value>
-  <description>Name of the kerberos principal of the user that owns
-  a given service daemon
-  </description>
-</property>
--->
-
-<!--- logging properties -->
-
-<property>
-  <name>hadoop.logfile.size</name>
-  <value>10000000</value>
-  <description>The max size of each log file</description>
-</property>
-
-<property>
-  <name>hadoop.logfile.count</name>
-  <value>10</value>
-  <description>The max number of log files</description>
-</property>
-
-<!-- i/o properties -->
-<property>
-  <name>io.file.buffer.size</name>
-  <value>4096</value>
-  <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-</property>
-
-<property>
-  <name>io.bytes.per.checksum</name>
-  <value>512</value>
-  <description>The number of bytes per checksum.  Must not be larger than
-  io.file.buffer.size.</description>
-</property>
-
-<property>
-  <name>io.skip.checksum.errors</name>
-  <value>false</value>
-  <description>If true, when a checksum error is encountered while
-  reading a sequence file, entries are skipped, instead of throwing an
-  exception.</description>
-</property>
-
-<property>
-  <name>io.compression.codecs</name>
-  <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.SnappyCodec</value>
-  <description>A list of the compression codec classes that can be used
-               for compression/decompression.</description>
-</property>
-
-<property>
-  <name>io.serializations</name>
-  <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  <description>A list of serialization classes that can be used for
-  obtaining serializers and deserializers.</description>
-</property>
-
-<!-- file system properties -->
-
-<property>
-  <name>fs.default.name</name>
-  <value>file:///</value>
-  <description>The name of the default file system.  A URI whose
-  scheme and authority determine the FileSystem implementation.  The
-  uri's scheme determines the config property (fs.SCHEME.impl) naming
-  the FileSystem implementation class.  The uri's authority is used to
-  determine the host, port, etc. for a filesystem.</description>
-</property>
-
-<property>
-  <name>fs.trash.interval</name>
-  <value>0</value>
-  <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-</property>
-
-<property>
-  <name>fs.file.impl</name>
-  <value>org.apache.hadoop.fs.LocalFileSystem</value>
-  <description>The FileSystem for file: uris.</description>
-</property>
-
-<property>
-  <name>fs.hdfs.impl</name>
-  <value>org.apache.hadoop.hdfs.DistributedFileSystem</value>
-  <description>The FileSystem for hdfs: uris.</description>
-</property>
-
-<property>
-  <name>fs.s3.impl</name>
-  <value>org.apache.hadoop.fs.s3.S3FileSystem</value>
-  <description>The FileSystem for s3: uris.</description>
-</property>
-
-<property>
-  <name>fs.s3n.impl</name>
-  <value>org.apache.hadoop.fs.s3native.NativeS3FileSystem</value>
-  <description>The FileSystem for s3n: (Native S3) uris.</description>
-</property>
-
-<property>
-  <name>fs.kfs.impl</name>
-  <value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value>
-  <description>The FileSystem for kfs: uris.</description>
-</property>
-
-<property>
-  <name>fs.hftp.impl</name>
-  <value>org.apache.hadoop.hdfs.HftpFileSystem</value>
-</property>
-
-<property>
-  <name>fs.hsftp.impl</name>
-  <value>org.apache.hadoop.hdfs.HsftpFileSystem</value>
-</property>
-
-<property>
-  <name>fs.webhdfs.impl</name>
-  <value>org.apache.hadoop.hdfs.web.WebHdfsFileSystem</value>
-</property>
-
-<property>
-  <name>fs.ftp.impl</name>
-  <value>org.apache.hadoop.fs.ftp.FTPFileSystem</value>
-  <description>The FileSystem for ftp: uris.</description>
-</property>
-
-<property>
-  <name>fs.ramfs.impl</name>
-  <value>org.apache.hadoop.fs.InMemoryFileSystem</value>
-  <description>The FileSystem for ramfs: uris.</description>
-</property>
-
-<property>
-  <name>fs.har.impl</name>
-  <value>org.apache.hadoop.fs.HarFileSystem</value>
-  <description>The filesystem for Hadoop archives. </description>
-</property>
-
-<property>
-  <name>fs.har.impl.disable.cache</name>
-  <value>true</value>
-  <description>Don't cache 'har' filesystem instances.</description>
-</property>
-
-<property>
-  <name>fs.checkpoint.dir</name>
-  <value>${hadoop.tmp.dir}/dfs/namesecondary</value>
-  <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
-  </description>
-</property>
-
-<property>
-  <name>fs.checkpoint.edits.dir</name>
-  <value>${fs.checkpoint.dir}</value>
-  <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary edits to merge.
-      If this is a comma-delimited list of directoires then teh edits is
-      replicated in all of the directoires for redundancy.
-      Default value is same as fs.checkpoint.dir
-  </description>
-</property>
-
-<property>
-  <name>fs.checkpoint.period</name>
-  <value>3600</value>
-  <description>The number of seconds between two periodic checkpoints.
-  </description>
-</property>
-
-<property>
-  <name>fs.checkpoint.size</name>
-  <value>67108864</value>
-  <description>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-  </description>
-</property>
-
-
-
-<property>
-  <name>fs.s3.block.size</name>
-  <value>67108864</value>
-  <description>Block size to use when writing files to S3.</description>
-</property>
-
-<property>
-  <name>fs.s3.buffer.dir</name>
-  <value>${hadoop.tmp.dir}/s3</value>
-  <description>Determines where on the local filesystem the S3 filesystem
-  should store files before sending them to S3
-  (or after retrieving them from S3).
-  </description>
-</property>
-
-<property>
-  <name>fs.s3.maxRetries</name>
-  <value>4</value>
-  <description>The maximum number of retries for reading or writing files to S3,
-  before we signal failure to the application.
-  </description>
-</property>
-
-<property>
-  <name>fs.s3.sleepTimeSeconds</name>
-  <value>10</value>
-  <description>The number of seconds to sleep between each S3 retry.
-  </description>
-</property>
-
-
-<property>
-  <name>local.cache.size</name>
-  <value>10737418240</value>
-  <description>The limit on the size of cache you want to keep, set by default
-  to 10GB. This will act as a soft limit on the cache directory for out of band data.
-  </description>
-</property>
-
-<property>
-  <name>io.seqfile.compress.blocksize</name>
-  <value>1000000</value>
-  <description>The minimum block size for compression in block compressed
-          SequenceFiles.
-  </description>
-</property>
-
-<property>
-  <name>io.seqfile.lazydecompress</name>
-  <value>true</value>
-  <description>Should values of block-compressed SequenceFiles be decompressed
-          only when necessary.
-  </description>
-</property>
-
-<property>
-  <name>io.seqfile.sorter.recordlimit</name>
-  <value>1000000</value>
-  <description>The limit on number of records to be kept in memory in a spill
-          in SequenceFiles.Sorter
-  </description>
-</property>
-
- <property>
-  <name>io.mapfile.bloom.size</name>
-  <value>1048576</value>
-  <description>The size of BloomFilter-s used in BloomMapFile. Each time this many
-  keys is appended the next BloomFilter will be created (inside a DynamicBloomFilter).
-  Larger values minimize the number of filters, which slightly increases the performance,
-  but may waste too much space if the total number of keys is usually much smaller
-  than this number.
-  </description>
-</property>
-
-<property>
-  <name>io.mapfile.bloom.error.rate</name>
-  <value>0.005</value>
-  <description>The rate of false positives in BloomFilter-s used in BloomMapFile.
-  As this value decreases, the size of BloomFilter-s increases exponentially. This
-  value is the probability of encountering false positives (default is 0.5%).
-  </description>
-</property>
-
-<property>
-  <name>hadoop.util.hash.type</name>
-  <value>murmur</value>
-  <description>The default implementation of Hash. Currently this can take one of the
-  two values: 'murmur' to select MurmurHash and 'jenkins' to select JenkinsHash.
-  </description>
-</property>
-
-
-<!-- ipc properties -->
-
-<property>
-  <name>ipc.client.idlethreshold</name>
-  <value>4000</value>
-  <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-</property>
-
-<property>
-  <name>ipc.client.kill.max</name>
-  <value>10</value>
-  <description>Defines the maximum number of clients to disconnect in one go.
-  </description>
-</property>
-
-<property>
-  <name>ipc.client.connection.maxidletime</name>
-  <value>10000</value>
-  <description>The maximum time in msec after which a client will bring down the
-               connection to the server.
-  </description>
-</property>
-
-<property>
-  <name>ipc.client.connect.max.retries</name>
-  <value>10</value>
-  <description>Indicates the number of retries a client will make to establish
-               a server connection.
-  </description>
-</property>
-
-<property>
-  <name>ipc.server.listen.queue.size</name>
-  <value>128</value>
-  <description>Indicates the length of the listen queue for servers accepting
-               client connections.
-  </description>
-</property>
-
-<property>
-  <name>ipc.server.tcpnodelay</name>
-  <value>false</value>
-  <description>Turn on/off Nagle's algorithm for the TCP socket connection on
-  the server. Setting to true disables the algorithm and may decrease latency
-  with a cost of more/smaller packets.
-  </description>
-</property>
-
-<property>
-  <name>ipc.client.tcpnodelay</name>
-  <value>false</value>
-  <description>Turn on/off Nagle's algorithm for the TCP socket connection on
-  the client. Setting to true disables the algorithm and may decrease latency
-  with a cost of more/smaller packets.
-  </description>
-</property>
-
-
-<!-- Web Interface Configuration -->
-
-<property>
-  <name>webinterface.private.actions</name>
-  <value>false</value>
-  <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-</property>
-
-<!-- Proxy Configuration -->
-
-<property>
-  <name>hadoop.rpc.socket.factory.class.default</name>
-  <value>org.apache.hadoop.net.StandardSocketFactory</value>
-  <description> Default SocketFactory to use. This parameter is expected to be
-    formatted as "package.FactoryClassName".
-  </description>
-</property>
-
-<property>
-  <name>hadoop.rpc.socket.factory.class.ClientProtocol</name>
-  <value></value>
-  <description> SocketFactory to use to connect to a DFS. If null or empty, use
-    hadoop.rpc.socket.class.default. This socket factory is also used by
-    DFSClient to create sockets to DataNodes.
-  </description>
-</property>
-
-
-
-<property>
-  <name>hadoop.socks.server</name>
-  <value></value>
-  <description> Address (host:port) of the SOCKS server to be used by the
-    SocksSocketFactory.
-  </description>
-</property>
-
-<!-- Rack Configuration -->
-
-<property>
-  <name>topology.node.switch.mapping.impl</name>
-  <value>org.apache.hadoop.net.ScriptBasedMapping</value>
-  <description> The default implementation of the DNSToSwitchMapping. It
-    invokes a script specified in topology.script.file.name to resolve
-    node names. If the value for topology.script.file.name is not set, the
-    default value of DEFAULT_RACK is returned for all node names.
-  </description>
-</property>
-
-<property>
-  <name>topology.script.file.name</name>
-  <value></value>
-  <description> The script name that should be invoked to resolve DNS names to
-    NetworkTopology names. Example: the script would take host.foo.bar as an
-    argument, and return /rack1 as the output.
-  </description>
-</property>
-
-<property>
-  <name>topology.script.number.args</name>
-  <value>100</value>
-  <description> The max number of args that the script configured with
-    topology.script.file.name should be run with. Each arg is an
-    IP address.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.security.uid.cache.secs</name>
-  <value>14400</value>
-  <description> NativeIO maintains a cache from UID to UserName. This is
-  the timeout for an entry in that cache. </description>
-</property>
-
-<!-- HTTP web-consoles Authentication -->
-
-<property>
-  <name>hadoop.http.authentication.type</name>
-  <value>simple</value>
-  <description>
-    Defines authentication used for Oozie HTTP endpoint.
-    Supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#
-  </description>
-</property>
-
-<property>
-  <name>hadoop.http.authentication.token.validity</name>
-  <value>36000</value>
-  <description>
-    Indicates how long (in seconds) an authentication token is valid before it has
-    to be renewed.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.http.authentication.signature.secret.file</name>
-  <value>${user.home}/hadoop-http-auth-signature-secret</value>
-  <description>
-    The signature secret for signing the authentication tokens.
-    If not set a random secret is generated at startup time.
-    The same secret should be used for JT/NN/DN/TT configurations.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.http.authentication.cookie.domain</name>
-  <value></value>
-  <description>
-    The domain to use for the HTTP cookie that stores the authentication token.
-    In order to authentiation to work correctly across all Hadoop nodes web-consoles
-    the domain must be correctly set.
-    IMPORTANT: when using IP addresses, browsers ignore cookies with domain settings.
-    For this setting to work properly all nodes in the cluster must be configured
-    to generate URLs with hostname.domain names on it.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.http.authentication.simple.anonymous.allowed</name>
-  <value>true</value>
-  <description>
-    Indicates if anonymous requests are allowed when using 'simple' authentication.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.http.authentication.kerberos.principal</name>
-  <value>HTTP/localhost@LOCALHOST</value>
-  <description>
-    Indicates the Kerberos principal to be used for HTTP endpoint.
-    The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.http.authentication.kerberos.keytab</name>
-  <value>${user.home}/hadoop.keytab</value>
-  <description>
-    Location of the keytab file with the credentials for the principal.
-    Referring to the same keytab file Oozie uses for its Kerberos credentials for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.relaxed.worker.version.check</name>
-  <value>false</value>
-  <description>
-    By default datanodes refuse to connect to namenodes if their build
-    revision (svn revision) do not match, and tasktrackers refuse to
-    connect to jobtrackers if their build version (version, revision,
-    user, and source checksum) do not match. This option changes the
-    behavior of hadoop workers to only check for a version match (eg
-    "1.0.2") but ignore the other build fields (revision, user, and
-    source checksum).
-  </description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HDFS/configuration/hdfs-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index 240068b..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,403 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>false</value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value>hbase</value>
-    <description>the user who is allowed to perform short
-    circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value>/mnt/hmc/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value>/etc/hadoop/conf/dfs.include</value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value>hdp1.cybervisiontech.com.ua:50070</value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value>1073741824</value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>1024</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>077</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value>nn/_HOST@</value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value>nn/_HOST@</value>
-    <description>
-        Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value>host/_HOST@</value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value>host/_HOST@</value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value>hdp2.cybervisiontech.com.ua:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value>HTTP/_HOST@</value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value>/nn.service.keytab</value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value>dn/_HOST@</value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value>/nn.service.keytab</value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value>/nn.service.keytab</value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value>/dn.service.keytab</value>
- <description>
-        The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
- <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value>hdp1.cybervisiontech.com.ua:50470</value>
-  <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-<description>The permissions that should be there on dfs.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-  <name>dfs.access.time.precision</name>
-  <value>0</value>
-  <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
-
-<property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
-</property>
-
-<property>
-  <name>ipc.server.read.threadpool.size</name>
-  <value>5</value>
-  <description></description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HDFS/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HDFS/metainfo.xml
deleted file mode 100644
index e21e357..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,47 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HDFS service</comment>
-    <version>1.0</version>
-
-
-    <components>
-        <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HIVE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HIVE/metainfo.xml
deleted file mode 100644
index 03205a3..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HIVE service</comment>
-    <version>1.0</version>
-
-
-    <components>
-        <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-<!--
-        <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
-        </component>
--->
-        <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/core-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
deleted file mode 100644
index 347766c..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
+++ /dev/null
@@ -1,1298 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Do not modify this file directly.  Instead, copy entries that you -->
-<!-- wish to modify from this file into mapred-site.xml and change them -->
-<!-- there.  If mapred-site.xml does not already exist, create it.      -->
-
-<configuration>
-
-<property>
-  <name>hadoop.job.history.location</name>
-  <value></value>
-  <description> If job tracker is static the history files are stored
-  in this single well known place. If No value is set here, by default,
-  it is in the local file system at ${hadoop.log.dir}/history.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.job.history.user.location</name>
-  <value></value>
-  <description> User can specify a location to store the history files of
-  a particular job. If nothing is specified, the logs are stored in
-  output directory. The files are stored in "_logs/history/" in the directory.
-  User can stop logging by giving the value "none".
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.history.completed.location</name>
-  <value></value>
-  <description> The completed job history files are stored at this single well
-  known location. If nothing is specified, the files are stored at
-  ${hadoop.job.history.location}/done.
-  </description>
-</property>
-
-<!-- i/o properties -->
-
-<property>
-  <name>io.sort.factor</name>
-  <value>10</value>
-  <description>The number of streams to merge at once while sorting
-  files.  This determines the number of open file handles.</description>
-</property>
-
-<property>
-  <name>io.sort.mb</name>
-  <value>100</value>
-  <description>The total amount of buffer memory to use while sorting
-  files, in megabytes.  By default, gives each merge stream 1MB, which
-  should minimize seeks.</description>
-</property>
-
-<property>
-  <name>io.sort.record.percent</name>
-  <value>0.05</value>
-  <description>The percentage of io.sort.mb dedicated to tracking record
-  boundaries. Let this value be r, io.sort.mb be x. The maximum number
-  of records collected before the collection thread must block is equal
-  to (r * x) / 4</description>
-</property>
-
-<property>
-  <name>io.sort.spill.percent</name>
-  <value>0.80</value>
-  <description>The soft limit in either the buffer or record collection
-  buffers. Once reached, a thread will begin to spill the contents to disk
-  in the background. Note that this does not imply any chunking of data to
-  the spill. A value less than 0.5 is not recommended.</description>
-</property>
-
-<property>
-  <name>io.map.index.skip</name>
-  <value>0</value>
-  <description>Number of index entries to skip between each entry.
-  Zero by default. Setting this to values larger than zero can
-  facilitate opening large map files using less memory.</description>
-</property>
-
-<property>
-  <name>mapred.job.tracker</name>
-  <value>local</value>
-  <description>The host and port that the MapReduce job tracker runs
-  at.  If "local", then jobs are run in-process as a single map
-  and reduce task.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.http.address</name>
-  <value>0.0.0.0:50030</value>
-  <description>
-    The job tracker http server address and port the server will listen on.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.handler.count</name>
-  <value>10</value>
-  <description>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
-  </description>
-</property>
-
-<property>
-  <name>mapred.task.tracker.report.address</name>
-  <value>127.0.0.1:0</value>
-  <description>The interface and port that task tracker server listens on.
-  Since it is only connected to by the tasks, it uses the local interface.
-  EXPERT ONLY. Should only be changed if your host does not have the loopback
-  interface.</description>
-</property>
-
-<property>
-  <name>mapred.local.dir</name>
-  <value>${hadoop.tmp.dir}/mapred/local</value>
-  <description>The local directory where MapReduce stores intermediate
-  data files.  May be a comma-separated list of
-  directories on different devices in order to spread disk i/o.
-  Directories that do not exist are ignored.
-  </description>
-</property>
-
-<property>
-  <name>mapred.system.dir</name>
-  <value>${hadoop.tmp.dir}/mapred/system</value>
-  <description>The directory where MapReduce stores control files.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.staging.root.dir</name>
-  <value>${hadoop.tmp.dir}/mapred/staging</value>
-  <description>The root of the staging area for users' job files
-  In practice, this should be the directory where users' home
-  directories are located (usually /user)
-  </description>
-</property>
-
-<property>
-  <name>mapred.temp.dir</name>
-  <value>${hadoop.tmp.dir}/mapred/temp</value>
-  <description>A shared directory for temporary files.
-  </description>
-</property>
-
-<property>
-  <name>mapred.local.dir.minspacestart</name>
-  <value>0</value>
-  <description>If the space in mapred.local.dir drops under this,
-  do not ask for more tasks.
-  Value in bytes.
-  </description>
-</property>
-
-<property>
-  <name>mapred.local.dir.minspacekill</name>
-  <value>0</value>
-  <description>If the space in mapred.local.dir drops under this,
-    do not ask more tasks until all the current ones have finished and
-    cleaned up. Also, to save the rest of the tasks we have running,
-    kill one of them, to clean up some space. Start with the reduce tasks,
-    then go with the ones that have finished the least.
-    Value in bytes.
-  </description>
-</property>
-
-<property>
-  <name>mapred.tasktracker.expiry.interval</name>
-  <value>600000</value>
-  <description>Expert: The time-interval, in miliseconds, after which
-  a tasktracker is declared 'lost' if it doesn't send heartbeats.
-  </description>
-</property>
-
-<!--
-<property>
-  <name>mapred.tasktracker.instrumentation</name>
-  <value>com.example.hadoop.TaskTrackerInstrumentation</value>
-  <description>Expert: The instrumentation class to associate with each TaskTracker.
-  </description>
-</property>
--->
-
-<property>
-  <name>mapred.tasktracker.resourcecalculatorplugin</name>
-  <value></value>
-  <description>
-   Name of the class whose instance will be used to query resource information
-   on the tasktracker.
-
-   The class must be an instance of
-   org.apache.hadoop.util.ResourceCalculatorPlugin. If the value is null, the
-   tasktracker attempts to use a class appropriate to the platform.
-   Currently, the only platform supported is Linux.
-  </description>
-</property>
-
-<property>
-  <name>mapred.tasktracker.taskmemorymanager.monitoring-interval</name>
-  <value>5000</value>
-  <description>The interval, in milliseconds, for which the tasktracker waits
-   between two cycles of monitoring its tasks' memory usage. Used only if
-   tasks' memory management is enabled via mapred.tasktracker.tasks.maxmemory.
-   </description>
-</property>
-
-<property>
-  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-  <value>5000</value>
-  <description>The time, in milliseconds, the tasktracker waits for sending a
-  SIGKILL to a process, after it has been sent a SIGTERM.</description>
-</property>
-
-<property>
-  <name>mapred.map.tasks</name>
-  <value>2</value>
-  <description>The default number of map tasks per job.
-  Ignored when mapred.job.tracker is "local".
-  </description>
-</property>
-
-<property>
-  <name>mapred.reduce.tasks</name>
-  <value>1</value>
-  <description>The default number of reduce tasks per job. Typically set to 99%
-  of the cluster's reduce capacity, so that if a node fails the reduces can
-  still be executed in a single wave.
-  Ignored when mapred.job.tracker is "local".
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.outofband.heartbeat</name>
-  <value>false</value>
-  <description>Expert: Set this to true to let the tasktracker send an
-  out-of-band heartbeat on task-completion for better latency.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.outofband.heartbeat.damper</name>
-  <value>1000000</value>
-  <description>When out-of-band heartbeats are enabled, provides
-  damping to avoid overwhelming the JobTracker if too many out-of-band
-  heartbeats would occur. The damping is calculated such that the
-  heartbeat interval is divided by (T*D + 1) where T is the number
-  of completed tasks and D is the damper value.
-
-  Setting this to a high value like the default provides no damping --
-  as soon as any task finishes, a heartbeat will be sent. Setting this
-  parameter to 0 is equivalent to disabling the out-of-band heartbeat feature.
-  A value of 1 would indicate that, after one task has completed, the
-  time to wait before the next heartbeat would be 1/2 the usual time.
-  After two tasks have finished, it would be 1/3 the usual time, etc.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.restart.recover</name>
-  <value>false</value>
-  <description>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.job.history.block.size</name>
-  <value>3145728</value>
-  <description>The block size of the job history file. Since the job recovery
-               uses job history, its important to dump job history to disk as
-               soon as possible. Note that this is an expert level parameter.
-               The default value is set to 3 MB.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.job.split.metainfo.maxsize</name>
-  <value>10000000</value>
-  <description>The maximum permissible size of the split metainfo file.
-  The JobTracker won't attempt to read split metainfo files bigger than
-  the configured value.
-  No limits if set to -1.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.taskScheduler</name>
-  <value>org.apache.hadoop.mapred.JobQueueTaskScheduler</value>
-  <description>The class responsible for scheduling the tasks.</description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.taskScheduler.maxRunningTasksPerJob</name>
-  <value></value>
-  <description>The maximum number of running tasks for a job before
-  it gets preempted. No limits if undefined.
-  </description>
-</property>
-
-<property>
-  <name>mapred.map.max.attempts</name>
-  <value>4</value>
-  <description>Expert: The maximum number of attempts per map task.
-  In other words, framework will try to execute a map task these many number
-  of times before giving up on it.
-  </description>
-</property>
-
-<property>
-  <name>mapred.reduce.max.attempts</name>
-  <value>4</value>
-  <description>Expert: The maximum number of attempts per reduce task.
-  In other words, framework will try to execute a reduce task these many number
-  of times before giving up on it.
-  </description>
-</property>
-
-<property>
-  <name>mapred.reduce.parallel.copies</name>
-  <value>5</value>
-  <description>The default number of parallel transfers run by reduce
-  during the copy(shuffle) phase.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.reduce.shuffle.maxfetchfailures</name>
-  <value>10</value>
-  <description>The maximum number of times a reducer tries to
-  fetch a map output before it reports it.
-</description></property>
-
-<property>
-  <name>mapreduce.reduce.shuffle.connect.timeout</name>
-  <value>180000</value>
-  <description>Expert: The maximum amount of time (in milli seconds) a reduce
-  task spends in trying to connect to a tasktracker for getting map output.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.reduce.shuffle.read.timeout</name>
-  <value>180000</value>
-  <description>Expert: The maximum amount of time (in milli seconds) a reduce
-  task waits for map output data to be available for reading after obtaining
-  connection.
-  </description>
-</property>
-
-<property>
-  <name>mapred.task.timeout</name>
-  <value>600000</value>
-  <description>The number of milliseconds before a task will be
-  terminated if it neither reads an input, writes an output, nor
-  updates its status string.
-  </description>
-</property>
-
-<property>
-  <name>mapred.tasktracker.map.tasks.maximum</name>
-  <value>2</value>
-  <description>The maximum number of map tasks that will be run
-  simultaneously by a task tracker.
-  </description>
-</property>
-
-<property>
-  <name>mapred.tasktracker.reduce.tasks.maximum</name>
-  <value>2</value>
-  <description>The maximum number of reduce tasks that will be run
-  simultaneously by a task tracker.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.completeuserjobs.maximum</name>
-  <value>100</value>
-  <description>The maximum number of complete jobs per user to keep around
-  before delegating them to the job history.</description>
-</property>
-
-<property>
-  <name>mapreduce.reduce.input.limit</name>
-  <value>-1</value>
-  <description>The limit on the input size of the reduce. If the estimated
-  input size of the reduce is greater than this value, job is failed. A
-  value of -1 means that there is no limit set. </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.retiredjobs.cache.size</name>
-  <value>1000</value>
-  <description>The number of retired job status to keep in the cache.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.jobhistory.lru.cache.size</name>
-  <value>5</value>
-  <description>The number of job history files loaded in memory. The jobs are
-  loaded when they are first accessed. The cache is cleared based on LRU.
-  </description>
-</property>
-
-<!--
-<property>
-  <name>mapred.jobtracker.instrumentation</name>
-  <value>com.example.hadoop.JobTrackerInstrumentation</value>
-  <description>Expert: The instrumentation class to associate with each JobTracker.
-  </description>
-</property>
--->
-
-<property>
-  <name>mapred.child.java.opts</name>
-  <value>-Xmx200m</value>
-  <description>Java opts for the task tracker child processes.
-  The following symbol, if present, will be interpolated: @taskid@ is replaced
-  by current TaskID. Any other occurrences of '@' will go unchanged.
-  For example, to enable verbose gc logging to a file named for the taskid in
-  /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
-        -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-
-  The configuration variable mapred.child.ulimit can be used to control the
-  maximum virtual memory of the child processes.
-  </description>
-</property>
-
-<property>
-  <name>mapred.child.env</name>
-  <value></value>
-  <description>User added environment variables for the task tracker child
-  processes. Example :
-  1) A=foo  This will set the env variable A to foo
-  2) B=$B:c This is inherit tasktracker's B env variable.
-  </description>
-</property>
-
-<property>
-  <name>mapred.child.ulimit</name>
-  <value></value>
-  <description>The maximum virtual memory, in KB, of a process launched by the
-  Map-Reduce framework. This can be used to control both the Mapper/Reducer
-  tasks and applications using Hadoop Pipes, Hadoop Streaming etc.
-  By default it is left unspecified to let cluster admins control it via
-  limits.conf and other such relevant mechanisms.
-
-  Note: mapred.child.ulimit must be greater than or equal to the -Xmx passed to
-  JavaVM, else the VM might not start.
-  </description>
-</property>
-
-<property>
-  <name>mapred.cluster.map.memory.mb</name>
-  <value>-1</value>
-  <description>The size, in terms of virtual memory, of a single map slot
-  in the Map-Reduce framework, used by the scheduler.
-  A job can ask for multiple slots for a single map task via
-  mapred.job.map.memory.mb, upto the limit specified by
-  mapred.cluster.max.map.memory.mb, if the scheduler supports the feature.
-  The value of -1 indicates that this feature is turned off.
-  </description>
-</property>
-
-<property>
-  <name>mapred.cluster.reduce.memory.mb</name>
-  <value>-1</value>
-  <description>The size, in terms of virtual memory, of a single reduce slot
-  in the Map-Reduce framework, used by the scheduler.
-  A job can ask for multiple slots for a single reduce task via
-  mapred.job.reduce.memory.mb, upto the limit specified by
-  mapred.cluster.max.reduce.memory.mb, if the scheduler supports the feature.
-  The value of -1 indicates that this feature is turned off.
-  </description>
-</property>
-
-<property>
-  <name>mapred.cluster.max.map.memory.mb</name>
-  <value>-1</value>
-  <description>The maximum size, in terms of virtual memory, of a single map
-  task launched by the Map-Reduce framework, used by the scheduler.
-  A job can ask for multiple slots for a single map task via
-  mapred.job.map.memory.mb, upto the limit specified by
-  mapred.cluster.max.map.memory.mb, if the scheduler supports the feature.
-  The value of -1 indicates that this feature is turned off.
-  </description>
-</property>
-
-<property>
-  <name>mapred.cluster.max.reduce.memory.mb</name>
-  <value>-1</value>
-  <description>The maximum size, in terms of virtual memory, of a single reduce
-  task launched by the Map-Reduce framework, used by the scheduler.
-  A job can ask for multiple slots for a single reduce task via
-  mapred.job.reduce.memory.mb, upto the limit specified by
-  mapred.cluster.max.reduce.memory.mb, if the scheduler supports the feature.
-  The value of -1 indicates that this feature is turned off.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.map.memory.mb</name>
-  <value>-1</value>
-  <description>The size, in terms of virtual memory, of a single map task
-  for the job.
-  A job can ask for multiple slots for a single map task, rounded up to the
-  next multiple of mapred.cluster.map.memory.mb and upto the limit
-  specified by mapred.cluster.max.map.memory.mb, if the scheduler supports
-  the feature.
-  The value of -1 indicates that this feature is turned off iff
-  mapred.cluster.map.memory.mb is also turned off (-1).
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.reduce.memory.mb</name>
-  <value>-1</value>
-  <description>The size, in terms of virtual memory, of a single reduce task
-  for the job.
-  A job can ask for multiple slots for a single map task, rounded up to the
-  next multiple of mapred.cluster.reduce.memory.mb and upto the limit
-  specified by mapred.cluster.max.reduce.memory.mb, if the scheduler supports
-  the feature.
-  The value of -1 indicates that this feature is turned off iff
-  mapred.cluster.reduce.memory.mb is also turned off (-1).
-  </description>
-</property>
-
-<property>
-  <name>mapred.child.tmp</name>
-  <value>./tmp</value>
-  <description> To set the value of tmp directory for map and reduce tasks.
-  If the value is an absolute path, it is directly assigned. Otherwise, it is
-  prepended with task's working directory. The java tasks are executed with
-  option -Djava.io.tmpdir='the absolute path of the tmp dir'. Pipes and
-  streaming are set with environment variable,
-   TMPDIR='the absolute path of the tmp dir'
-  </description>
-</property>
-
-<property>
-  <name>mapred.inmem.merge.threshold</name>
-  <value>1000</value>
-  <description>The threshold, in terms of the number of files
-  for the in-memory merge process. When we accumulate threshold number of files
-  we initiate the in-memory merge and spill to disk. A value of 0 or less than
-  0 indicates we want to DON'T have any threshold and instead depend only on
-  the ramfs's memory consumption to trigger the merge.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.shuffle.merge.percent</name>
-  <value>0.66</value>
-  <description>The usage threshold at which an in-memory merge will be
-  initiated, expressed as a percentage of the total memory allocated to
-  storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.shuffle.input.buffer.percent</name>
-  <value>0.70</value>
-  <description>The percentage of memory to be allocated from the maximum heap
-  size to storing map outputs during the shuffle.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.reduce.input.buffer.percent</name>
-  <value>0.0</value>
-  <description>The percentage of memory- relative to the maximum heap size- to
-  retain map outputs during the reduce. When the shuffle is concluded, any
-  remaining map outputs in memory must consume less than this threshold before
-  the reduce can begin.
-  </description>
-</property>
-
-<property>
-  <name>mapred.map.tasks.speculative.execution</name>
-  <value>true</value>
-  <description>If true, then multiple instances of some map tasks
-               may be executed in parallel.</description>
-</property>
-
-<property>
-  <name>mapred.reduce.tasks.speculative.execution</name>
-  <value>true</value>
-  <description>If true, then multiple instances of some reduce tasks
-               may be executed in parallel.</description>
-</property>
-
-<property>
-  <name>mapred.job.reuse.jvm.num.tasks</name>
-  <value>1</value>
-  <description>How many tasks to run per jvm. If set to -1, there is
-  no limit.
-  </description>
-</property>
-
-<property>
-  <name>mapred.min.split.size</name>
-  <value>0</value>
-  <description>The minimum size chunk that map input should be split
-  into.  Note that some file formats may have minimum split sizes that
-  take priority over this setting.</description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.maxtasks.per.job</name>
-  <value>-1</value>
-  <description>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </description>
-</property>
-
-<property>
-  <name>mapred.submit.replication</name>
-  <value>10</value>
-  <description>The replication level for submitted job files.  This
-  should be around the square root of the number of nodes.
-  </description>
-</property>
-
-
-<property>
-  <name>mapred.tasktracker.dns.interface</name>
-  <value>default</value>
-  <description>The name of the Network Interface from which a task
-  tracker should report its IP address.
-  </description>
- </property>
-
-<property>
-  <name>mapred.tasktracker.dns.nameserver</name>
-  <value>default</value>
-  <description>The host name or IP address of the name server (DNS)
-  which a TaskTracker should use to determine the host name used by
-  the JobTracker for communication and display purposes.
-  </description>
- </property>
-
-<property>
-  <name>tasktracker.http.threads</name>
-  <value>40</value>
-  <description>The number of worker threads that for the http server. This is
-               used for map output fetching
-  </description>
-</property>
-
-<property>
-  <name>mapred.task.tracker.http.address</name>
-  <value>0.0.0.0:50060</value>
-  <description>
-    The task tracker http server address and port.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
-  <name>keep.failed.task.files</name>
-  <value>false</value>
-  <description>Should the files for failed tasks be kept. This should only be
-               used on jobs that are failing, because the storage is never
-               reclaimed. It also prevents the map outputs from being erased
-               from the reduce directory as they are consumed.</description>
-</property>
-
-
-<!--
-  <property>
-  <name>keep.task.files.pattern</name>
-  <value>.*_m_123456_0</value>
-  <description>Keep all files from tasks whose task names match the given
-               regular expression. Defaults to none.</description>
-  </property>
--->
-
-<property>
-  <name>mapred.output.compress</name>
-  <value>false</value>
-  <description>Should the job outputs be compressed?
-  </description>
-</property>
-
-<property>
-  <name>mapred.output.compression.type</name>
-  <value>RECORD</value>
-  <description>If the job outputs are to compressed as SequenceFiles, how should
-               they be compressed? Should be one of NONE, RECORD or BLOCK.
-  </description>
-</property>
-
-<property>
-  <name>mapred.output.compression.codec</name>
-  <value>org.apache.hadoop.io.compress.DefaultCodec</value>
-  <description>If the job outputs are compressed, how should they be compressed?
-  </description>
-</property>
-
-<property>
-  <name>mapred.compress.map.output</name>
-  <value>false</value>
-  <description>Should the outputs of the maps be compressed before being
-               sent across the network. Uses SequenceFile compression.
-  </description>
-</property>
-
-<property>
-  <name>mapred.map.output.compression.codec</name>
-  <value>org.apache.hadoop.io.compress.DefaultCodec</value>
-  <description>If the map outputs are compressed, how should they be
-               compressed?
-  </description>
-</property>
-
-<property>
-  <name>map.sort.class</name>
-  <value>org.apache.hadoop.util.QuickSort</value>
-  <description>The default sort class for sorting keys.
-  </description>
-</property>
-
-<property>
-  <name>mapred.userlog.limit.kb</name>
-  <value>0</value>
-  <description>The maximum size of user-logs of each task in KB. 0 disables the cap.
-  </description>
-</property>
-
-<property>
-  <name>mapred.userlog.retain.hours</name>
-  <value>24</value>
-  <description>The maximum time, in hours, for which the user-logs are to be
-               retained after the job completion.
-  </description>
-</property>
-
-<property>
-  <name>mapred.user.jobconf.limit</name>
-  <value>5242880</value>
-  <description>The maximum allowed size of the user jobconf. The
-  default is set to 5 MB</description>
-</property>
-
-<property>
-  <name>mapred.hosts</name>
-  <value></value>
-  <description>Names a file that contains the list of nodes that may
-  connect to the jobtracker.  If the value is empty, all hosts are
-  permitted.</description>
-</property>
-
-<property>
-  <name>mapred.hosts.exclude</name>
-  <value></value>
-  <description>Names a file that contains the list of hosts that
-  should be excluded by the jobtracker.  If the value is empty, no
-  hosts are excluded.</description>
-</property>
-
-<property>
-  <name>mapred.heartbeats.in.second</name>
-  <value>100</value>
-  <description>Expert: Approximate number of heart-beats that could arrive
-               at JobTracker in a second. Assuming each RPC can be processed
-               in 10msec, the default value is made 100 RPCs in a second.
-  </description>
-</property>
-
-<property>
-  <name>mapred.max.tracker.blacklists</name>
-  <value>4</value>
-  <description>The number of blacklists for a tasktracker by various jobs
-               after which the tasktracker will be marked as potentially
-               faulty and is a candidate for graylisting across all jobs.
-               (Unlike blacklisting, this is advisory; the tracker remains
-               active.  However, it is reported as graylisted in the web UI,
-               with the expectation that chronically graylisted trackers
-               will be manually decommissioned.)  This value is tied to
-               mapred.jobtracker.blacklist.fault-timeout-window; faults
-               older than the window width are forgiven, so the tracker
-               will recover from transient problems.  It will also become
-               healthy after a restart.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-  <value>180</value>
-  <description>The timeout (in minutes) after which per-job tasktracker
-               faults are forgiven.  The window is logically a circular
-               buffer of time-interval buckets whose width is defined by
-               mapred.jobtracker.blacklist.fault-bucket-width; when the
-               "now" pointer moves across a bucket boundary, the previous
-               contents (faults) of the new bucket are cleared.  In other
-               words, the timeout's granularity is determined by the bucket
-               width.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-  <value>15</value>
-  <description>The width (in minutes) of each bucket in the tasktracker
-               fault timeout window.  Each bucket is reused in a circular
-               manner after a full timeout-window interval (defined by
-               mapred.jobtracker.blacklist.fault-timeout-window).
-  </description>
-</property>
-
-<property>
-  <name>mapred.max.tracker.failures</name>
-  <value>4</value>
-  <description>The number of task-failures on a tasktracker of a given job
-               after which new tasks of that job aren't assigned to it.
-  </description>
-</property>
-
-<property>
-  <name>jobclient.output.filter</name>
-  <value>FAILED</value>
-  <description>The filter for controlling the output of the task's userlogs sent
-               to the console of the JobClient.
-               The permissible options are: NONE, KILLED, FAILED, SUCCEEDED and
-               ALL.
-  </description>
-</property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.active</name>
-    <value>false</value>
-    <description>Indicates if persistency of job status information is
-      active or not.
-    </description>
-  </property>
-
-  <property>
-  <name>mapred.job.tracker.persist.jobstatus.hours</name>
-  <value>0</value>
-  <description>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </description>
-</property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.dir</name>
-    <value>/jobtracker/jobsInfo</value>
-    <description>The directory where the job status information is persisted
-      in a file system to be available after it drops of the memory queue and
-      between jobtracker restarts.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.job.complete.cancel.delegation.tokens</name>
-    <value>true</value>
-    <description> if false - do not unregister/cancel delegation tokens
-    from renewal, because same tokens may be used by spawned jobs
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.task.profile</name>
-    <value>false</value>
-    <description>To set whether the system should collect profiler
-     information for some of the tasks in this job? The information is stored
-     in the user log directory. The value is "true" if task profiling
-     is enabled.</description>
-  </property>
-
-  <property>
-    <name>mapred.task.profile.maps</name>
-    <value>0-2</value>
-    <description> To set the ranges of map tasks to profile.
-    mapred.task.profile has to be set to true for the value to be accounted.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.task.profile.reduces</name>
-    <value>0-2</value>
-    <description> To set the ranges of reduce tasks to profile.
-    mapred.task.profile has to be set to true for the value to be accounted.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.line.input.format.linespermap</name>
-    <value>1</value>
-    <description> Number of lines per split in NLineInputFormat.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.skip.attempts.to.start.skipping</name>
-    <value>2</value>
-    <description> The number of Task attempts AFTER which skip mode
-    will be kicked off. When skip mode is kicked off, the
-    tasks reports the range of records which it will process
-    next, to the TaskTracker. So that on failures, TT knows which
-    ones are possibly the bad records. On further executions,
-    those are skipped.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.skip.map.auto.incr.proc.count</name>
-    <value>true</value>
-    <description> The flag which if set to true,
-    SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS is incremented
-    by MapRunner after invoking the map function. This value must be set to
-    false for applications which process the records asynchronously
-    or buffer the input records. For example streaming.
-    In such cases applications should increment this counter on their own.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.skip.reduce.auto.incr.proc.count</name>
-    <value>true</value>
-    <description> The flag which if set to true,
-    SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS is incremented
-    by framework after invoking the reduce function. This value must be set to
-    false for applications which process the records asynchronously
-    or buffer the input records. For example streaming.
-    In such cases applications should increment this counter on their own.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.skip.out.dir</name>
-    <value></value>
-    <description> If no value is specified here, the skipped records are
-    written to the output directory at _logs/skip.
-    User can stop writing skipped records by giving the value "none".
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.skip.map.max.skip.records</name>
-    <value>0</value>
-    <description> The number of acceptable skip records surrounding the bad
-    record PER bad record in mapper. The number includes the bad record as well.
-    To turn the feature of detection/skipping of bad records off, set the
-    value to 0.
-    The framework tries to narrow down the skipped range by retrying
-    until this threshold is met OR all attempts get exhausted for this task.
-    Set the value to Long.MAX_VALUE to indicate that framework need not try to
-    narrow down. Whatever records(depends on application) get skipped are
-    acceptable.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.skip.reduce.max.skip.groups</name>
-    <value>0</value>
-    <description> The number of acceptable skip groups surrounding the bad
-    group PER bad group in reducer. The number includes the bad group as well.
-    To turn the feature of detection/skipping of bad groups off, set the
-    value to 0.
-    The framework tries to narrow down the skipped range by retrying
-    until this threshold is met OR all attempts get exhausted for this task.
-    Set the value to Long.MAX_VALUE to indicate that framework need not try to
-    narrow down. Whatever groups(depends on application) get skipped are
-    acceptable.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.ifile.readahead</name>
-    <value>true</value>
-    <description>Configuration key to enable/disable IFile readahead.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.ifile.readahead.bytes</name>
-    <value>4194304</value>
-    <description>Configuration key to set the IFile readahead length in bytes.
-    </description>
-  </property>
-
-<!-- Job Notification Configuration -->
-
-<!--
-<property>
- <name>job.end.notification.url</name>
- <value>http://localhost:8080/jobstatus.php?jobId=$jobId&amp;jobStatus=$jobStatus</value>
- <description>Indicates url which will be called on completion of job to inform
-              end status of job.
-              User can give at most 2 variables with URI : $jobId and $jobStatus.
-              If they are present in URI, then they will be replaced by their
-              respective values.
-</description>
-</property>
--->
-
-<property>
-  <name>job.end.retry.attempts</name>
-  <value>0</value>
-  <description>Indicates how many times hadoop should attempt to contact the
-               notification URL </description>
-</property>
-
-<property>
-  <name>job.end.retry.interval</name>
-   <value>30000</value>
-   <description>Indicates time in milliseconds between notification URL retry
-                calls</description>
-</property>
-
-<!-- Proxy Configuration -->
-<property>
-  <name>hadoop.rpc.socket.factory.class.JobSubmissionProtocol</name>
-  <value></value>
-  <description> SocketFactory to use to connect to a Map/Reduce master
-    (JobTracker). If null or empty, then use hadoop.rpc.socket.class.default.
-  </description>
-</property>
-
-<property>
-  <name>mapred.task.cache.levels</name>
-  <value>2</value>
-  <description> This is the max level of the task cache. For example, if
-    the level is 2, the tasks cached are at the host level and at the rack
-    level.
-  </description>
-</property>
-
-<property>
-  <name>mapred.queue.names</name>
-  <value>default</value>
-  <description> Comma separated list of queues configured for this jobtracker.
-    Jobs are added to queues and schedulers can configure different
-    scheduling properties for the various queues. To configure a property
-    for a queue, the name of the queue must match the name specified in this
-    value. Queue properties that are common to all schedulers are configured
-    here with the naming convention, mapred.queue.$QUEUE-NAME.$PROPERTY-NAME,
-    for e.g. mapred.queue.default.submit-job-acl.
-    The number of queues configured in this parameter could depend on the
-    type of scheduler being used, as specified in
-    mapred.jobtracker.taskScheduler. For example, the JobQueueTaskScheduler
-    supports only a single queue, which is the default configured here.
-    Before adding more queues, ensure that the scheduler you've configured
-    supports multiple queues.
-  </description>
-</property>
-
-<property>
-  <name>mapred.acls.enabled</name>
-  <value>false</value>
-  <description> Specifies whether ACLs should be checked
-    for authorization of users for doing various queue and job level operations.
-    ACLs are disabled by default. If enabled, access control checks are made by
-    JobTracker and TaskTracker when requests are made by users for queue
-    operations like submit job to a queue and kill a job in the queue and job
-    operations like viewing the job-details (See mapreduce.job.acl-view-job)
-    or for modifying the job (See mapreduce.job.acl-modify-job) using
-    Map/Reduce APIs, RPCs or via the console and web user interfaces.
-  </description>
-</property>
-
-<property>
-  <name>mapred.queue.default.state</name>
-  <value>RUNNING</value>
-  <description>
-   This values defines the state , default queue is in.
-   the values can be either "STOPPED" or "RUNNING"
-   This value can be changed at runtime.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.queue.name</name>
-  <value>default</value>
-  <description> Queue to which a job is submitted. This must match one of the
-    queues defined in mapred.queue.names for the system. Also, the ACL setup
-    for the queue must allow the current user to submit a job to the queue.
-    Before specifying a queue, ensure that the system is configured with
-    the queue, and access is allowed for submitting jobs to the queue.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.job.acl-modify-job</name>
-  <value> </value>
-  <description> Job specific access-control list for 'modifying' the job. It
-    is only used if authorization is enabled in Map/Reduce by setting the
-    configuration property mapred.acls.enabled to true.
-    This specifies the list of users and/or groups who can do modification
-    operations on the job. For specifying a list of users and groups the
-    format to use is "user1,user2 group1,group". If set to '*', it allows all
-    users/groups to modify this job. If set to ' '(i.e. space), it allows
-    none. This configuration is used to guard all the modifications with respect
-    to this job and takes care of all the following operations:
-      o killing this job
-      o killing a task of this job, failing a task of this job
-      o setting the priority of this job
-    Each of these operations are also protected by the per-queue level ACL
-    "acl-administer-jobs" configured via mapred-queues.xml. So a caller should
-    have the authorization to satisfy either the queue-level ACL or the
-    job-level ACL.
-
-    Irrespective of this ACL configuration, job-owner, the user who started the
-    cluster, cluster administrators configured via
-    mapreduce.cluster.administrators and queue administrators of the queue to
-    which this job is submitted to configured via
-    mapred.queue.queue-name.acl-administer-jobs  in mapred-queue-acls.xml can
-    do all the modification operations on a job.
-
-    By default, nobody else besides job-owner, the user who started the cluster,
-    cluster administrators and queue administrators can perform modification
-    operations on a job.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.job.acl-view-job</name>
-  <value> </value>
-  <description> Job specific access-control list for 'viewing' the job. It is
-    only used if authorization is enabled in Map/Reduce by setting the
-    configuration property mapred.acls.enabled to true.
-    This specifies the list of users and/or groups who can view private details
-    about the job. For specifying a list of users and groups the
-    format to use is "user1,user2 group1,group". If set to '*', it allows all
-    users/groups to modify this job. If set to ' '(i.e. space), it allows
-    none. This configuration is used to guard some of the job-views and at
-    present only protects APIs that can return possibly sensitive information
-    of the job-owner like
-      o job-level counters
-      o task-level counters
-      o tasks' diagnostic information
-      o task-logs displayed on the TaskTracker web-UI and
-      o job.xml showed by the JobTracker's web-UI
-    Every other piece of information of jobs is still accessible by any other
-    user, for e.g., JobStatus, JobProfile, list of jobs in the queue, etc.
-
-    Irrespective of this ACL configuration, job-owner, the user who started the
-    cluster, cluster administrators configured via
-    mapreduce.cluster.administrators and queue administrators of the queue to
-    which this job is submitted to configured via
-    mapred.queue.queue-name.acl-administer-jobs in mapred-queue-acls.xml can do
-    all the view operations on a job.
-
-    By default, nobody else besides job-owner, the user who started the
-    cluster, cluster administrators and queue administrators can perform
-    view operations on a job.
-  </description>
-</property>
-
-<property>
-  <name>mapred.tasktracker.indexcache.mb</name>
-  <value>10</value>
-  <description> The maximum memory that a task tracker allows for the
-    index cache that is used when serving map outputs to reducers.
-  </description>
-</property>
-
-<property>
-  <name>mapred.combine.recordsBeforeProgress</name>
-  <value>10000</value>
-  <description> The number of records to process during combine output collection
-   before sending a progress notification to the TaskTracker.
-  </description>
-</property>
-
-<property>
-  <name>mapred.merge.recordsBeforeProgress</name>
-  <value>10000</value>
-  <description> The number of records to process during merge before
-   sending a progress notification to the TaskTracker.
-  </description>
-</property>
-
-<property>
-  <name>mapred.reduce.slowstart.completed.maps</name>
-  <value>0.05</value>
-  <description>Fraction of the number of maps in the job which should be
-  complete before reduces are scheduled for the job.
-  </description>
-</property>
-
-<property>
-  <name>mapred.task.tracker.task-controller</name>
-  <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-  <description>TaskController which is used to launch and manage task execution
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.group</name>
-  <value></value>
-  <description>Expert: Group to which TaskTracker belongs. If
-   LinuxTaskController is configured via mapreduce.tasktracker.taskcontroller,
-   the group owner of the task-controller binary should be same as this group.
-  </description>
-</property>
-
-<property>
-  <name>mapred.disk.healthChecker.interval</name>
-  <value>60000</value>
-  <description>How often the TaskTracker checks the health of its
-  local directories. Configuring this to a value smaller than the
-  heartbeat interval is equivalent to setting this to heartbeat
-  interval value.
-  </description>
-</property>
-
-<!--  Node health script variables -->
-
-<property>
-  <name>mapred.healthChecker.script.path</name>
-  <value></value>
-  <description>Absolute path to the script which is
-  periodicallyrun by the node health monitoring service to determine if
-  the node is healthy or not. If the value of this key is empty or the
-  file does not exist in the location configured here, the node health
-  monitoring service is not started.</description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.interval</name>
-  <value>60000</value>
-  <description>Frequency of the node health script to be run,
-  in milliseconds</description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.timeout</name>
-  <value>600000</value>
-  <description>Time after node health script should be killed if
-  unresponsive and considered that the script has failed.</description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.args</name>
-  <value></value>
-  <description>List of arguments which are to be passed to
-  node health script when it is being launched comma seperated.
-  </description>
-</property>
-
-<!--  end of node health script variables -->
-
-<property>
-  <name>mapreduce.job.counters.max</name>
-  <value>120</value>
-  <description>Limit on the number of counters allowed per job.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.job.counters.groups.max</name>
-  <value>50</value>
-  <description>Limit on the number of counter groups allowed per job.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.job.counters.counter.name.max</name>
-  <value>64</value>
-  <description>Limit on the length of counter names in jobs. Names
-  exceeding this limit will be truncated.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.job.counters.group.name.max</name>
-  <value>128</value>
-  <description>Limit on the length of counter group names in jobs. Names
-  exceeding this limit will be truncated.
-  </description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/MAPREDUCE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/MAPREDUCE/metainfo.xml
deleted file mode 100644
index 9b5c1a0..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/MAPREDUCE/metainfo.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>This is comment for MAPREDUCE service</comment>
-    <version>1.0</version>
-
-
-    <components>
-        <component>
-            <name>JOBTRACKER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>TASKTRACKER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MAPREDUCE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/NAGIOS/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/NAGIOS/metainfo.xml
deleted file mode 100644
index c419c9f..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for NAGIOS service</comment>
-    <version>1.0</version>
-
-
-    <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/OOZIE/configuration/oozie-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index 37e14ab..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,1601 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<configuration>
-
-    <!-- ************************** VERY IMPORTANT  ************************** -->
-    <!-- This file is in the Oozie configuration directory only for reference. -->
-    <!-- It is not loaded by Oozie, Oozie uses its own privatecopy.            -->
-    <!-- ************************** VERY IMPORTANT  ************************** -->
-
-    <!-- Base Oozie URL: <SCHEME>://<HOST>:<PORT>/<CONTEXT> -->
-
-    <property>
-        <name>oozie.base.url</name>
-        <value>http://localhost:8080/oozie</value>
-        <description>
-             Base Oozie URL.
-        </description>
-    </property>
-
-    <!-- Services -->
-
-    <property>
-        <name>oozie.system.id</name>
-        <value>oozie-${user.name}</value>
-        <description>
-            The Oozie system ID.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.systemmode</name>
-        <value>NORMAL</value>
-        <description>
-            System mode for  Oozie at startup.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.delete.runtime.dir.on.shutdown</name>
-        <value>true</value>
-        <description>
-            If the runtime directory should be kept after Oozie shutdowns down.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.services</name>
-        <value>
-            org.apache.oozie.service.SchedulerService,
-            org.apache.oozie.service.InstrumentationService,
-            org.apache.oozie.service.CallableQueueService,
-            org.apache.oozie.service.UUIDService,
-            org.apache.oozie.service.ELService,
-            org.apache.oozie.service.AuthorizationService,
-            org.apache.oozie.service.HadoopAccessorService,
-            org.apache.oozie.service.MemoryLocksService,
-            org.apache.oozie.service.DagXLogInfoService,
-            org.apache.oozie.service.SchemaService,
-            org.apache.oozie.service.LiteWorkflowAppService,
-            org.apache.oozie.service.JPAService,
-            org.apache.oozie.service.StoreService,
-            org.apache.oozie.service.CoordinatorStoreService,
-            org.apache.oozie.service.SLAStoreService,
-            org.apache.oozie.service.DBLiteWorkflowStoreService,
-            org.apache.oozie.service.CallbackService,
-            org.apache.oozie.service.ActionService,
-            org.apache.oozie.service.ActionCheckerService,
-            org.apache.oozie.service.RecoveryService,
-            org.apache.oozie.service.PurgeService,
-            org.apache.oozie.service.CoordinatorEngineService,
-            org.apache.oozie.service.BundleEngineService,
-            org.apache.oozie.service.DagEngineService,
-            org.apache.oozie.service.CoordMaterializeTriggerService,
-            org.apache.oozie.service.StatusTransitService,
-            org.apache.oozie.service.PauseTransitService,
-            org.apache.oozie.service.GroupsService,
-            org.apache.oozie.service.ProxyUserService
-        </value>
-        <description>
-            All services to be created and managed by Oozie Services singleton.
-            Class names must be separated by commas.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.services.ext</name>
-        <value> </value>
-        <description>
-            To add/replace services defined in 'oozie.services' with custom implementations.
-            Class names must be separated by commas.
-        </description>
-    </property>
-
-    <!-- ConfigurationService -->
-
-    <property>
-        <name>oozie.service.ConfigurationService.ignore.system.properties</name>
-        <value>
-            oozie.service.AuthorizationService.security.enabled
-        </value>
-        <description>
-            Specifies "oozie.*" properties to cannot be overriden via Java system properties.
-            Property names must be separted by commas.
-        </description>
-    </property>
-
-    <!-- SchedulerService -->
-
-    <property>
-        <name>oozie.service.SchedulerService.threads</name>
-        <value>5</value>
-        <description>
-            The number of threads to be used by the SchedulerService to run deamon tasks.
-            If maxed out, scheduled daemon tasks will be queued up and delayed until threads become available.
-        </description>
-    </property>
-
-    <!--  AuthorizationService -->
-
-    <property>
-        <name>oozie.service.AuthorizationService.authorization.enabled</name>
-        <value>false</value>
-        <description>
-            Specifies whether security (user name/admin role) is enabled or not.
-            If disabled any user can manage Oozie system and manage any job.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.AuthorizationService.default.group.as.acl</name>
-        <value>false</value>
-        <description>
-            Enables old behavior where the User's default group is the job's ACL.
-        </description>
-    </property>
-
-    <!-- InstrumentationService -->
-
-    <property>
-        <name>oozie.service.InstrumentationService.logging.interval</name>
-        <value>60</value>
-        <description>
-            Interval, in seconds, at which instrumentation should be logged by the InstrumentationService.
-            If set to 0 it will not log instrumentation data.
-        </description>
-    </property>
-
-    <!-- PurgeService -->
-    <property>
-        <name>oozie.service.PurgeService.older.than</name>
-        <value>30</value>
-        <description>
-            Completed workflow jobs older than this value, in days, will be purged by the PurgeService.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.PurgeService.coord.older.than</name>
-        <value>7</value>
-        <description>
-            Completed coordinator jobs older than this value, in days, will be purged by the PurgeService.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.PurgeService.bundle.older.than</name>
-        <value>7</value>
-        <description>
-            Completed bundle jobs older than this value, in days, will be purged by the PurgeService.
-        </description>
-    </property>
-
-    <property>
-		<name>oozie.service.PurgeService.purge.limit</name>
-		<value>100</value>
-		<description>
-			Completed Actions purge - limit each purge to this value
-        </description>
-	</property>
-	
-    <property>
-        <name>oozie.service.PurgeService.purge.interval</name>
-        <value>3600</value>
-        <description>
-            Interval at which the purge service will run, in seconds.
-        </description>
-    </property>
-
-    <!-- RecoveryService -->
-
-    <property>
-        <name>oozie.service.RecoveryService.wf.actions.older.than</name>
-        <value>120</value>
-        <description>
-            Age of the actions which are eligible to be queued for recovery, in seconds.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.RecoveryService.callable.batch.size</name>
-        <value>10</value>
-        <description>
-            This value determines the number of callable which will be batched together
-            to be executed by a single thread.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.RecoveryService.interval</name>
-        <value>60</value>
-        <description>
-            Interval at which the RecoverService will run, in seconds.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.RecoveryService.coord.older.than</name>
-        <value>600</value>
-        <description>
-            Age of the Coordinator jobs or actions which are eligible to be queued for recovery, in seconds.
-        </description>
-    </property>
-
-	<property>
-        <name>oozie.service.RecoveryService.bundle.older.than</name>
-        <value>600</value>
-        <description>
-            Age of the Bundle jobs which are eligible to be queued for recovery, in seconds.
-        </description>
-    </property>
-
-    <!-- CallableQueueService -->
-
-    <property>
-        <name>oozie.service.CallableQueueService.queue.size</name>
-        <value>10000</value>
-        <description>Max callable queue size</description>
-    </property>
-
-    <property>
-        <name>oozie.service.CallableQueueService.threads</name>
-        <value>10</value>
-        <description>Number of threads used for executing callables</description>
-    </property>
-
-    <property>
-        <name>oozie.service.CallableQueueService.callable.concurrency</name>
-        <value>3</value>
-        <description>
-            Maximum concurrency for a given callable type.
-            Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-            Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-            All commands that use action executors (action-start, action-end, action-kill and action-check) use
-            the action type as the callable type.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.CallableQueueService.callable.next.eligible</name>
-        <value>true</value>
-        <description>
-            If true, when a callable in the queue has already reached max concurrency,
-            Oozie continuously find next one which has not yet reach max concurrency.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.CallableQueueService.InterruptMapMaxSize</name>
-        <value>500</value>
-        <description>
-            Maximum Size of the Interrupt Map, the interrupt element will not be inserted in the map if exceeded the size.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.CallableQueueService.InterruptTypes</name>
-        <value>kill,resume,suspend,bundle_kill,bundle_resume,bundle_suspend,coord_kill,coord_change,coord_resume,coord_suspend</value>
-        <description>
-            Getting the types of XCommands that are considered to be of Interrupt type
-        </description>
-    </property>
-
-    <!--  CoordMaterializeTriggerService -->
-
-	<property>
-		<name>oozie.service.CoordMaterializeTriggerService.lookup.interval
-		</name>
-		<value>300</value>
-		<description> Coordinator Job Lookup trigger command is scheduled at
-			this "interval" (in seconds).</description>
-	</property>
-
-	<property>
-		<name>oozie.service.CoordMaterializeTriggerService.materialization.window
-		</name>
-		<value>3600</value>
-		<description> Coordinator Job Lookup command materialized each job for
-			this next "window" duration</description>
-	</property>
-
-    <property>
-        <name>oozie.service.CoordMaterializeTriggerService.callable.batch.size</name>
-        <value>10</value>
-        <description>
-            This value determines the number of callable which will be batched together
-            to be executed by a single thread.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.CoordMaterializeTriggerService.materialization.system.limit</name>
-        <value>50</value>
-        <description>
-            This value determines the number of coordinator jobs to be materialized at a given time.
-        </description>
-    </property>
-
-    <property>
-		<name>oozie.service.coord.normal.default.timeout
-		</name>
-		<value>10080</value>
-		<description>Default timeout for a coordinator action input check (in minutes) for normal job.
-            </description>
-	</property>
-
-	<property>
-		<name>oozie.service.coord.default.max.timeout
-		</name>
-		<value>86400</value>
-		<description>Default maximum timeout for a coordinator action input check (in minutes). 86400= 60days
-        </description>
-	</property>
-
-	<property>
-		<name>oozie.service.coord.input.check.requeue.interval
-		</name>
-		<value>60000</value>
-		<description>Command re-queue interval for coordinator data input check (in millisecond).
-        </description>
-	</property>
-
-	<property>
-		<name>oozie.service.coord.default.concurrency
-		</name>
-		<value>1</value>
-		<description>Default concurrency for a coordinator job to determine how many maximum action should
-		be executed at the same time. -1 means infinite concurrency.</description>
-	</property>
-
-    <property>
-		<name>oozie.service.coord.default.throttle
-		</name>
-		<value>12</value>
-		<description>Default throttle for a coordinator job to determine how many maximum action should
-		be in WAITING state at the same time.</description>
-	</property>
-
-	<property>
-		<name>oozie.service.coord.materialization.throttling.factor
-		</name>
-		<value>0.05</value>
-		<description>Determine how many maximum actions should be in WAITING state for a single job at any time. The value is calculated by
-		this factor X the total queue size.</description>
-	</property>
-
-	<!-- ELService -->
-    <!--  List of supported groups for ELService -->
-	<property>
-        <name>oozie.service.ELService.groups</name>
-        <value>workflow,wf-sla-submit,coord-job-submit-freq,coord-job-submit-nofuncs,coord-job-submit-data,coord-job-submit-instances,coord-sla-submit,coord-action-create,coord-action-create-inst,coord-sla-create,coord-action-start</value>
-        <description>List of groups for different ELServices</description>
-    </property>
-
-<!-- Workflow specifics -->
-    <property>
-        <name>oozie.service.ELService.constants.workflow</name>
-        <value>
-            KB=org.apache.oozie.util.ELConstantsFunctions#KB,
-            MB=org.apache.oozie.util.ELConstantsFunctions#MB,
-            GB=org.apache.oozie.util.ELConstantsFunctions#GB,
-            TB=org.apache.oozie.util.ELConstantsFunctions#TB,
-            PB=org.apache.oozie.util.ELConstantsFunctions#PB,
-            RECORDS=org.apache.oozie.action.hadoop.HadoopELFunctions#RECORDS,
-            MAP_IN=org.apache.oozie.action.hadoop.HadoopELFunctions#MAP_IN,
-            MAP_OUT=org.apache.oozie.action.hadoop.HadoopELFunctions#MAP_OUT,
-            REDUCE_IN=org.apache.oozie.action.hadoop.HadoopELFunctions#REDUCE_IN,
-            REDUCE_OUT=org.apache.oozie.action.hadoop.HadoopELFunctions#REDUCE_OUT,
-            GROUPS=org.apache.oozie.action.hadoop.HadoopELFunctions#GROUPS
-        </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.constants.workflow</name>
-        <value> </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.functions.workflow</name>
-        <value>
-            firstNotNull=org.apache.oozie.util.ELConstantsFunctions#firstNotNull,
-            concat=org.apache.oozie.util.ELConstantsFunctions#concat,
-            trim=org.apache.oozie.util.ELConstantsFunctions#trim,
-            timestamp=org.apache.oozie.util.ELConstantsFunctions#timestamp,
-            urlEncode=org.apache.oozie.util.ELConstantsFunctions#urlEncode,
-            toJsonStr=org.apache.oozie.util.ELConstantsFunctions#toJsonStr,
-            toPropertiesStr=org.apache.oozie.util.ELConstantsFunctions#toPropertiesStr,
-            toConfigurationStr=org.apache.oozie.util.ELConstantsFunctions#toConfigurationStr,
-            wf:id=org.apache.oozie.DagELFunctions#wf_id,
-            wf:name=org.apache.oozie.DagELFunctions#wf_name,
-            wf:appPath=org.apache.oozie.DagELFunctions#wf_appPath,
-            wf:conf=org.apache.oozie.DagELFunctions#wf_conf,
-            wf:user=org.apache.oozie.DagELFunctions#wf_user,
-            wf:group=org.apache.oozie.DagELFunctions#wf_group,
-            wf:callback=org.apache.oozie.DagELFunctions#wf_callback,
-            wf:transition=org.apache.oozie.DagELFunctions#wf_transition,
-            wf:lastErrorNode=org.apache.oozie.DagELFunctions#wf_lastErrorNode,
-            wf:errorCode=org.apache.oozie.DagELFunctions#wf_errorCode,
-            wf:errorMessage=org.apache.oozie.DagELFunctions#wf_errorMessage,
-            wf:run=org.apache.oozie.DagELFunctions#wf_run,
-            wf:actionData=org.apache.oozie.DagELFunctions#wf_actionData,
-            wf:actionExternalId=org.apache.oozie.DagELFunctions#wf_actionExternalId,
-            wf:actionTrackerUri=org.apache.oozie.DagELFunctions#wf_actionTrackerUri,
-            wf:actionExternalStatus=org.apache.oozie.DagELFunctions#wf_actionExternalStatus,
-            hadoop:counters=org.apache.oozie.action.hadoop.HadoopELFunctions#hadoop_counters,
-            fs:exists=org.apache.oozie.action.hadoop.FsELFunctions#fs_exists,
-            fs:isDir=org.apache.oozie.action.hadoop.FsELFunctions#fs_isDir,
-            fs:dirSize=org.apache.oozie.action.hadoop.FsELFunctions#fs_dirSize,
-            fs:fileSize=org.apache.oozie.action.hadoop.FsELFunctions#fs_fileSize,
-            fs:blockSize=org.apache.oozie.action.hadoop.FsELFunctions#fs_blockSize
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.WorkflowAppService.WorkflowDefinitionMaxLength</name>
-        <value>100000</value>
-        <description>
-            The maximum length of the workflow definition in bytes
-            An error will be reported if the length exceeds the given maximum
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.functions.workflow</name>
-        <value>
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-    <!-- Resolve SLA information during Workflow job submission -->
-	<property>
-        <name>oozie.service.ELService.constants.wf-sla-submit</name>
-        <value>
-            MINUTES=org.apache.oozie.util.ELConstantsFunctions#SUBMIT_MINUTES,
-            HOURS=org.apache.oozie.util.ELConstantsFunctions#SUBMIT_HOURS,
-            DAYS=org.apache.oozie.util.ELConstantsFunctions#SUBMIT_DAYS
-            </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.constants.wf-sla-submit</name>
-        <value> </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.functions.wf-sla-submit</name>
-        <value> </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-        </description>
-    </property>
-    <property>
-        <name>oozie.service.ELService.ext.functions.wf-sla-submit</name>
-        <value>
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-<!-- Coordinator specifics -->l
-<!-- Phase 1 resolution during job submission -->
-<!-- EL Evalautor setup to resolve mainly frequency tags -->
-    <property>
-        <name>oozie.service.ELService.constants.coord-job-submit-freq</name>
-        <value> </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.constants.coord-job-submit-freq</name>
-        <value> </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.functions.coord-job-submit-freq</name>
-        <value>
-            coord:days=org.apache.oozie.coord.CoordELFunctions#ph1_coord_days,
-            coord:months=org.apache.oozie.coord.CoordELFunctions#ph1_coord_months,
-            coord:hours=org.apache.oozie.coord.CoordELFunctions#ph1_coord_hours,
-            coord:minutes=org.apache.oozie.coord.CoordELFunctions#ph1_coord_minutes,
-            coord:endOfDays=org.apache.oozie.coord.CoordELFunctions#ph1_coord_endOfDays,
-            coord:endOfMonths=org.apache.oozie.coord.CoordELFunctions#ph1_coord_endOfMonths,
-            coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,
-            coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.functions.coord-job-submit-freq</name>
-        <value>
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-<!-- EL Evalautor setup to resolve mainly all constants/variables - no EL functions is resolved -->
-    <property>
-        <name>oozie.service.ELService.constants.coord-job-submit-nofuncs</name>
-        <value>
-            MINUTE=org.apache.oozie.coord.CoordELConstants#SUBMIT_MINUTE,
-            HOUR=org.apache.oozie.coord.CoordELConstants#SUBMIT_HOUR,
-            DAY=org.apache.oozie.coord.CoordELConstants#SUBMIT_DAY,
-            MONTH=org.apache.oozie.coord.CoordELConstants#SUBMIT_MONTH,
-            YEAR=org.apache.oozie.coord.CoordELConstants#SUBMIT_YEAR
-        </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.constants.coord-job-submit-nofuncs</name>
-        <value> </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.functions.coord-job-submit-nofuncs</name>
-        <value>
-            coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,
-            coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.functions.coord-job-submit-nofuncs</name>
-        <value> </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-<!-- EL Evalautor setup to **check** whether instances/start-instance/end-instances are valid
- no EL functions will be resolved -->
-    <property>
-        <name>oozie.service.ELService.constants.coord-job-submit-instances</name>
-        <value> </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.constants.coord-job-submit-instances</name>
-        <value> </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.functions.coord-job-submit-instances</name>
-        <value>
-            coord:hoursInDay=org.apache.oozie.coord.CoordELFunctions#ph1_coord_hoursInDay_echo,
-            coord:daysInMonth=org.apache.oozie.coord.CoordELFunctions#ph1_coord_daysInMonth_echo,
-            coord:tzOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_tzOffset_echo,
-            coord:current=org.apache.oozie.coord.CoordELFunctions#ph1_coord_current_echo,
-            coord:latest=org.apache.oozie.coord.CoordELFunctions#ph1_coord_latest_echo,
-            coord:future=org.apache.oozie.coord.CoordELFunctions#ph1_coord_future_echo,
-            coord:formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
-            coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,
-            coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.functions.coord-job-submit-instances</name>
-        <value>
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-<!-- EL Evalautor setup to **check** whether dataIn and dataOut are valid
- no EL functions will be resolved -->
-
-    <property>
-        <name>oozie.service.ELService.constants.coord-job-submit-data</name>
-        <value> </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.constants.coord-job-submit-data</name>
-        <value> </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.functions.coord-job-submit-data</name>
-        <value>
-            coord:dataIn=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dataIn_echo,
-            coord:dataOut=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dataOut_echo,
-            coord:nominalTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap,
-            coord:actualTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_actualTime_echo_wrap,
-            coord:dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo,
-            coord:formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
-            coord:actionId=org.apache.oozie.coord.CoordELFunctions#ph1_coord_actionId_echo,
-            coord:name=org.apache.oozie.coord.CoordELFunctions#ph1_coord_name_echo,
-            coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,
-            coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.functions.coord-job-submit-data</name>
-        <value>
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-    <!-- Resolve SLA information during Coordinator job submission -->
-	<property>
-        <name>oozie.service.ELService.constants.coord-sla-submit</name>
-        <value>
-            MINUTES=org.apache.oozie.coord.CoordELConstants#SUBMIT_MINUTES,
-            HOURS=org.apache.oozie.coord.CoordELConstants#SUBMIT_HOURS,
-            DAYS=org.apache.oozie.coord.CoordELConstants#SUBMIT_DAYS
-            </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.constants.coord-sla-submit</name>
-        <value> </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.functions.coord-sla-submit</name>
-        <value>
-            coord:nominalTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed,
-            coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,
-            coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-        </description>
-    </property>
-    <property>
-        <name>oozie.service.ELService.ext.functions.coord-sla-submit</name>
-        <value>
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
- <!--  Action creation for coordinator -->
-<property>
-        <name>oozie.service.ELService.constants.coord-action-create</name>
-        <value>
-        </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.constants.coord-action-create</name>
-        <value> </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.functions.coord-action-create</name>
-        <value>
-            coord:hoursInDay=org.apache.oozie.coord.CoordELFunctions#ph2_coord_hoursInDay,
-            coord:daysInMonth=org.apache.oozie.coord.CoordELFunctions#ph2_coord_daysInMonth,
-            coord:tzOffset=org.apache.oozie.coord.CoordELFunctions#ph2_coord_tzOffset,
-            coord:current=org.apache.oozie.coord.CoordELFunctions#ph2_coord_current,
-            coord:latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-            coord:future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
-            coord:actionId=org.apache.oozie.coord.CoordELFunctions#ph2_coord_actionId,
-            coord:name=org.apache.oozie.coord.CoordELFunctions#ph2_coord_name,
-            coord:formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
-            coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,
-            coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.functions.coord-action-create</name>
-        <value>
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-
- <!--  Action creation for coordinator used to only evaluate instance number like ${current (daysInMonth())}. current will be echo-ed -->
-<property>
-        <name>oozie.service.ELService.constants.coord-action-create-inst</name>
-        <value>
-        </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.constants.coord-action-create-inst</name>
-        <value> </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.functions.coord-action-create-inst</name>
-        <value>
-            coord:hoursInDay=org.apache.oozie.coord.CoordELFunctions#ph2_coord_hoursInDay,
-            coord:daysInMonth=org.apache.oozie.coord.CoordELFunctions#ph2_coord_daysInMonth,
-            coord:tzOffset=org.apache.oozie.coord.CoordELFunctions#ph2_coord_tzOffset,
-            coord:current=org.apache.oozie.coord.CoordELFunctions#ph2_coord_current_echo,
-            coord:latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-            coord:future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
-            coord:formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
-            coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,
-            coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.functions.coord-action-create-inst</name>
-        <value>
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-        <!-- Resolve SLA information during Action creation/materialization -->
-	<property>
-        <name>oozie.service.ELService.constants.coord-sla-create</name>
-        <value> </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.constants.coord-sla-create</name>
-        <value>
-            MINUTES=org.apache.oozie.coord.CoordELConstants#SUBMIT_MINUTES,
-            HOURS=org.apache.oozie.coord.CoordELConstants#SUBMIT_HOURS,
-            DAYS=org.apache.oozie.coord.CoordELConstants#SUBMIT_DAYS</value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.functions.coord-sla-create</name>
-        <value>
-            coord:nominalTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime,
-            coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,
-            coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-        </description>
-    </property>
-    <property>
-        <name>oozie.service.ELService.ext.functions.coord-sla-create</name>
-        <value>
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-<!--  Action start for coordinator -->
-<property>
-        <name>oozie.service.ELService.constants.coord-action-start</name>
-        <value>
-        </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.constants.coord-action-start</name>
-        <value> </value>
-        <description>
-            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.functions.coord-action-start</name>
-        <value>
-            coord:hoursInDay=org.apache.oozie.coord.CoordELFunctions#ph3_coord_hoursInDay,
-            coord:daysInMonth=org.apache.oozie.coord.CoordELFunctions#ph3_coord_daysInMonth,
-            coord:tzOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_tzOffset,
-            coord:latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest,
-            coord:future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future,
-            coord:dataIn=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dataIn,
-            coord:dataOut=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dataOut,
-            coord:nominalTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime,
-            coord:actualTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_actualTime,
-            coord:dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset,
-            coord:formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime,
-            coord:actionId=org.apache.oozie.coord.CoordELFunctions#ph3_coord_actionId,
-            coord:name=org.apache.oozie.coord.CoordELFunctions#ph3_coord_name,
-            coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,
-            coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ELService.ext.functions.coord-action-start</name>
-        <value>
-        </value>
-        <description>
-            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-            This property is a convenience property to add extensions to the built in executors without having to
-            include all the built in ones.
-        </description>
-    </property>
-
-
-    <!-- UUIDService -->
-
-    <property>
-        <name>oozie.service.UUIDService.generator</name>
-        <value>counter</value>
-        <description>
-            random : generated UUIDs will be random strings.
-            counter: generated UUIDs generated will be a counter postfixed with the system startup time.
-        </description>
-    </property>
-
-    <!-- DBLiteWorkflowStoreService -->
-
-    <property>
-        <name>oozie.service.DBLiteWorkflowStoreService.status.metrics.collection.interval</name>
-        <value>5</value>
-        <description> Workflow Status metrics collection interval in minutes.</description>
-    </property>
-
-    <property>
-        <name>oozie.service.DBLiteWorkflowStoreService.status.metrics.window</name>
-        <value>3600</value>
-        <description>
-            Workflow Status metrics collection window in seconds. Workflow status will be instrumented for the window.
-        </description>
-    </property>
-
-    <!-- DB Schema Info, used by DBLiteWorkflowStoreService -->
-
-    <property>
-        <name>oozie.db.schema.name</name>
-        <value>oozie</value>
-        <description>
-            Oozie DataBase Name
-        </description>
-    </property>
-
-   <!-- StoreService -->
-
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>true</value>
-        <description>
-            Creates Oozie DB.
-
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.validate.db.connection</name>
-        <value>false</value>
-        <description>
-            Validates DB connections from the DB connection pool.
-            If the 'oozie.service.JPAService.create.db.schema' property is set to true, this property is ignored.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.validate.db.connection.eviction.interval</name>
-        <value>300000</value>
-        <description>
-            Validates DB connections from the DB connection pool.
-            When validate db connection 'TestWhileIdle' is true, the number of milliseconds to sleep
-            between runs of the idle object evictor thread.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.validate.db.connection.eviction.num</name>
-        <value>10</value>
-        <description>
-            Validates DB connections from the DB connection pool.
-            When validate db connection 'TestWhileIdle' is true, the number of objects to examine during
-            each run of the idle object evictor thread.
-        </description>
-    </property>
-
-
-    <property>
-        <name>oozie.service.JPAService.connection.data.source</name>
-        <value>org.apache.commons.dbcp.BasicDataSource</value>
-        <description>
-            DataSource to be used for connection pooling.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>sa</value>
-        <description>
-            DB user name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
-            DB user password.
-
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-
-            IMPORTANT: if the StoreServicePasswordService is active, it will reset this value with the value given in
-                       the console.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
-
-   <!-- SchemaService -->
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>oozie-sla-0.1.xsd</value>
-        <description>
-            Schemas for additional actions types.
-
-            IMPORTANT: if there are no schemas leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.coord.ext.schemas</name>
-        <value>oozie-sla-0.1.xsd</value>
-        <description>
-            Schemas for additional actions types.
-
-            IMPORTANT: if there are no schemas leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.sla.ext.schemas</name>
-        <value> </value>
-        <description>
-            Schemas for semantic validation for GMS SLA.
-
-            IMPORTANT: if there are no schemas leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-    <!-- CallbackService -->
-
-    <property>
-        <name>oozie.service.CallbackService.base.url</name>
-        <value>${oozie.base.url}/callback</value>
-        <description>
-             Base callback URL used by ActionExecutors.
-        </description>
-    </property>
-
-    <!-- CallbackServlet -->
-
-    <property>
-        <name>oozie.servlet.CallbackServlet.max.data.len</name>
-        <value>2048</value>
-        <description>
-            Max size in characters for the action completion data output.
-        </description>
-    </property>
-
-    <!-- External stats-->
-
-    <property>
-        <name>oozie.external.stats.max.size</name>
-        <value>-1</value>
-        <description>
-            Max size in bytes for action stats. -1 means infinite value.
-        </description>
-    </property>
-
-    <!-- JobCommand -->
-
-    <property>
-        <name>oozie.JobCommand.job.console.url</name>
-        <value>${oozie.base.url}?job=</value>
-        <description>
-             Base console URL for a workflow job.
-        </description>
-    </property>
-
-
-    <!-- ActionService -->
-
-    <property>
-        <name>oozie.service.ActionService.executor.classes</name>
-        <value>
-            org.apache.oozie.action.decision.DecisionActionExecutor,
-            org.apache.oozie.action.hadoop.JavaActionExecutor,
-            org.apache.oozie.action.hadoop.FsActionExecutor,
-            org.apache.oozie.action.hadoop.MapReduceActionExecutor,
-            org.apache.oozie.action.hadoop.PigActionExecutor,
-            org.apache.oozie.action.ssh.SshActionExecutor,
-            org.apache.oozie.action.oozie.SubWorkflowActionExecutor
-        </value>
-        <description>
-            List of ActionExecutors classes (separated by commas).
-            Only action types with associated executors can be used in workflows.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value> </value>
-        <description>
-            List of ActionExecutors extension classes (separated by commas). Only action types with associated
-            executors can be used in workflows. This property is a convenience property to add extensions to the built
-            in executors without having to include all the built in ones.
-        </description>
-    </property>
-
-    <!-- ActionCheckerService -->
-
-    <property>
-        <name>oozie.service.ActionCheckerService.action.check.interval</name>
-        <value>60</value>
-        <description>
-            The frequency at which the ActionCheckService will run.
-        </description>
-    </property>
-
-     <property>
-        <name>oozie.service.ActionCheckerService.action.check.delay</name>
-        <value>600</value>
-        <description>
-            The time, in seconds, between an ActionCheck for the same action.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ActionCheckerService.callable.batch.size</name>
-        <value>10</value>
-        <description>
-            This value determines the number of actions which will be batched together
-            to be executed by a single thread.
-        </description>
-    </property>
-
-    <!-- StatusTransitService -->
-    <property>
-        <name>oozie.service.StatusTransitService.statusTransit.interval</name>
-        <value>60</value>
-        <description>
-            The frequency in seconds at which the StatusTransitService will run.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.StatusTransitService.backward.support.for.coord.status</name>
-        <value>false</value>
-        <description>
-            true, if coordinator job submits using 'uri:oozie:coordinator:0.1' namespace and wants to keep Oozie 2.x status transit.
-            if set true,
-            1. SUCCEEDED state in coordinator job means materialization done.
-            2. No DONEWITHERROR state in coordinator job
-            3. No PAUSED or PREPPAUSED state in coordinator job
-            4. PREPSUSPENDED becomes SUSPENDED in coordinator job
-        </description>
-    </property>
-
-    <!-- PauseTransitService -->
-    <property>
-        <name>oozie.service.PauseTransitService.PauseTransit.interval</name>
-        <value>60</value>
-        <description>
-            The frequency in seconds at which the PauseTransitService will run.
-        </description>
-    </property>
-
-    <!-- HadoopActionExecutor -->
-    <!-- This is common to the subclasses action executors for map-reduce and pig -->
-
-    <property>
-        <name>oozie.action.retries.max</name>
-        <value>3</value>
-        <description>
-           The number of retries for executing an action in case of failure
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.action.hadoop.delete.hdfs.tmp.dir</name>
-        <value>false</value>
-        <description>
-            If set to true, it will delete temporary directory at the end of execution of map reduce action.
-        </description>
-    </property>
-
-    <!-- PigActionExecutor -->
-
-    <property>
-        <name>oozie.action.pig.delete.hdfs.tmp.dir</name>
-        <value>false</value>
-        <description>
-            If set to true, it will delete temporary directory at the end of execution of pig action.
-        </description>
-    </property>
-
-    <!-- SshActionExecutor -->
-
-    <property>
-        <name>oozie.action.ssh.delete.remote.tmp.dir</name>
-        <value>false</value>
-        <description>
-            If set to true, it will delete temporary directory at the end of execution of ssh action.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.action.ssh.http.command</name>
-        <value>curl</value>
-        <description>
-            Command to use for callback to oozie, normally is 'curl' or 'wget'.
-            The command must available in PATH environment variable of the USER@HOST box shell.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.action.ssh.http.command.post.options</name>
-        <value>--data-binary @#stdout --request POST --header "content-type:text/plain"</value>
-        <description>
-            The callback command POST options.
-            Used when the ouptut of the ssh action is captured.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.action.ssh.allow.user.at.host</name>
-        <value>true</value>
-        <description>
-            Specifies whether the user specified by the ssh action is allowed or is to be replaced
-            by the Job user
-        </description>
-    </property>
-
-    <!-- HadoopAccessorService -->
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.kerberos.enabled</name>
-        <value>false</value>
-        <description>
-            Indicates if Oozie is configured to use Kerberos.
-        </description>
-    </property>
-
-    <property>
-        <name>local.realm</name>
-        <value>LOCALHOST</value>
-        <description>
-            Kerberos Realm used by Oozie and Hadoop. Using 'local.realm' to be aligned with Hadoop configuration
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.keytab.file</name>
-        <value>${user.home}/oozie.keytab</value>
-        <description>
-            Location of the Oozie user keytab file.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.kerberos.principal</name>
-        <value>${user.name}/localhost@${local.realm}</value>
-        <description>
-            Kerberos principal for Oozie service.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-        <value> </value>
-        <description>
-            Whitelisted job tracker for Oozie service.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-        <value> </value>
-        <description>
-            Whitelisted job tracker for Oozie service.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-        <value>*=hadoop-conf</value>
-        <description>
-            Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-            the Hadoop service (JobTracker, YARN, HDFS). The wildcard '*' configuration is
-            used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-            the relevant Hadoop *-site.xml files. If the path is relative is looked within
-            the Oozie configuration directory; though the path can be absolute (i.e. to point
-            to Hadoop client conf/ directories in the local filesystem.
-        </description>
-    </property>
-
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.action.configurations</name>
-        <value>*=action-conf</value>
-        <description>
-            Comma separated AUTHORITY=ACTION_CONF_DIR, where AUTHORITY is the HOST:PORT of
-            the Hadoop MapReduce service (JobTracker, YARN). The wildcard '*' configuration is
-            used when there is no exact match for an authority. The ACTION_CONF_DIR may contain
-            ACTION.xml files where ACTION is the action type ('java', 'map-reduce', 'pig',
-            'hive', 'sqoop', etc.). If the ACTION.xml file exists, its properties will be used
-            as defaults properties for the action. If the path is relative is looked within
-            the Oozie configuration directory; though the path can be absolute (i.e. to point
-            to Hadoop client conf/ directories in the local filesystem.
-        </description>
-    </property>
-
-    <!-- Credentials -->
-    <property>
-        <name>oozie.credentials.credentialclasses</name>
-        <value> </value>
-        <description>
-            A list of credential class mapping for CredentialsProvider
-        </description>
-    </property>
-
-	<property>
-        <name>oozie.actions.main.classnames</name>
-        <value>distcp=org.apache.hadoop.tools.DistCp</value>
-        <description>
-            A list of class name mapping for Action classes
-        </description>
-    </property>
-	
-	
-    <property>
-        <name>oozie.service.WorkflowAppService.system.libpath</name>
-        <value>/user/${user.name}/share/lib</value>
-        <description>
-            System library path to use for workflow applications.
-            This path is added to workflow application if their job properties sets
-            the property 'oozie.use.system.libpath' to true.
-        </description>
-    </property>
-
-    <property>
-        <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-        <value>false</value>
-        <description>
-            If set to true, submissions of MapReduce and Pig jobs will include
-            automatically the system library path, thus not requiring users to
-            specify where the Pig JAR files are. Instead, the ones from the system
-            library path are used.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.command.default.lock.timeout</name>
-        <value>5000</value>
-        <description>
-            Default timeout (in milliseconds) for commands for acquiring an exclusive lock on an entity.
-        </description>
-    </property>
-
-   <!-- LiteWorkflowStoreService, Workflow Action Automatic Retry -->
-
-    <property>
-        <name>oozie.service.LiteWorkflowStoreService.user.retry.max</name>
-        <value>3</value>
-        <description>
-            Automatic retry max count for workflow action is 3 in default.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.LiteWorkflowStoreService.user.retry.inteval</name>
-        <value>10</value>
-        <description>
-            Automatic retry interval for workflow action is in minutes and the default value is 10 minutes.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.LiteWorkflowStoreService.user.retry.error.code</name>
-        <value>JA008,JA009,JA017,JA018,JA019,FS009,FS008</value>
-        <description>
-            Automatic retry interval for workflow action is handled for these specified error code:
-            FS009, FS008 is file exists error when using chmod in fs action.
-            JA018 is output directory exists error in workflow map-reduce action.
-            JA019 is error while executing distcp action.
-            JA017 is job not exists error in action executor.
-            JA008 is FileNotFoundException in action executor.
-            JA009 is IOException in action executor.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.LiteWorkflowStoreService.user.retry.error.code.ext</name>
-        <value> </value>
-        <description>
-            Automatic retry interval for workflow action is handled for these specified extra error code.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.LiteWorkflowStoreService.node.def.version</name>
-        <value>_oozie_inst_v_1</value>
-        <description>
-            NodeDef default version, _oozie_inst_v_0 or _oozie_inst_v_1
-        </description>
-    </property>
-
-    <!-- Oozie Authentication -->
-
-    <property>
-        <name>oozie.authentication.type</name>
-        <value>simple</value>
-        <description>
-            Defines authentication used for Oozie HTTP endpoint.
-            Supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.token.validity</name>
-        <value>36000</value>
-        <description>
-            Indicates how long (in seconds) an authentication token is valid before it has
-            to be renewed.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.signature.secret</name>
-        <value>oozie</value>
-        <description>
-            The signature secret for signing the authentication tokens.
-            If not set a random secret is generated at startup time.
-            In order to authentiation to work correctly across multiple hosts
-            the secret must be the same across al the hosts.
-        </description>
-    </property>
-
-    <property>
-      <name>oozie.authentication.cookie.domain</name>
-      <value></value>
-      <description>
-        The domain to use for the HTTP cookie that stores the authentication token.
-        In order to authentiation to work correctly across multiple hosts
-        the domain must be correctly set.
-      </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.simple.anonymous.allowed</name>
-        <value>true</value>
-        <description>
-            Indicates if anonymous requests are allowed when using 'simple' authentication.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.kerberos.principal</name>
-        <value>HTTP/localhost@${local.realm}</value>
-        <description>
-            Indicates the Kerberos principal to be used for HTTP endpoint.
-            The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.kerberos.keytab</name>
-        <value>${oozie.service.HadoopAccessorService.keytab.file}</value>
-        <description>
-            Location of the keytab file with the credentials for the principal.
-            Referring to the same keytab file Oozie uses for its Kerberos credentials for Hadoop.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.kerberos.name.rules</name>
-        <value>DEFAULT</value>
-        <description>
-            The kerberos names rules is to resolve kerberos principal names, refer to Hadoop's
-            KerberosName for more details.
-        </description>
-    </property>
-
-	<!-- Coordinator Actions default length -->
-	<property>
-		<name>oozie.coord.actions.default.length</name>
-		<value>1000</value>
-		<description>
-			Default number of coordinator actions to be retrieved by the info command
-		</description>
-	</property>
-
-	<!-- ForkJoin validation -->
-	<property>
-		<name>oozie.validate.ForkJoin</name>
-		<value>true</value>
-		<description>
-			If true, fork and join should be validated at wf submission time.
-		</description>
-	</property>
-
-	<property>
-		<name>oozie.coord.action.get.all.attributes</name>
-		<value>false</value>
-		<description>
-			Setting to true is not recommended as coord job/action info will bring all columns of the action in memory.
-			Set it true only if backward compatibility for action/job info is required.
-		</description>
-	</property>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/OOZIE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/OOZIE/metainfo.xml
deleted file mode 100644
index 91cac6a..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for OOZIE service</comment>
-    <version>1.0</version>
-
-    <components>
-        <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/PIG/configuration/pig.properties b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/PIG/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/PIG/metainfo.xml
deleted file mode 100644
index c89afa7..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for PIG service</comment>
-    <version>1.0</version>
-
-    <components>
-        <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/SQOOP/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/SQOOP/metainfo.xml
deleted file mode 100644
index 4d80811..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for SQOOP service</comment>
-    <version>1.0</version>
-
-    <components>
-        <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/TEMPLETON/configuration/templeton-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/TEMPLETON/configuration/templeton-site.xml
deleted file mode 100644
index 2db7605..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/TEMPLETON/configuration/templeton-site.xml
+++ /dev/null
@@ -1,217 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-    <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>${env.HADOOP_CONF_DIR}</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>${env.TEMPLETON_HOME}/templeton-0.1.0-dev.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>${env.TEMPLETON_HOME}/lib/zookeeper-3.3.4.jar</value>
-    <description>Jars to add to the classpath.</description>
-  </property>
-
-  <property>
-    <name>templeton.override.jars</name>
-    <value>hdfs:///user/templeton/ugi.jar</value>
-    <description>
-        Jars to add the the HADOOP_CLASSPATH for all Map Reduce jobs.
-        This is a list of jars that must exist on hdfs that are added
-        to the Distributed Cache.
-    </description>
-  </property>
-
-  <property>
-    <name>templeton.override.enabled</name>
-    <value>true</value>
-    <description>
-      Enable the override path in templeton.override.jars
-    </description>
-  </property>
-
-  <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///user/templeton/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>${env.HADOOP_PREFIX}/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.python</name>
-    <value>${env.PYTHON_CMD}</value>
-    <description>The path to the python executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///user/templeton/pig-0.9.2.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig-0.9.2.tar.gz/pig-0.9.2/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>${env.HCAT_PREFIX}/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///user/templeton/hcatalog-0.3.0.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hcatalog-0.3.0.tar.gz/hcatalog-0.3.0/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value>hive.metastore.local=false,hive.metastore.uris=thrift://localhost:9933,hive.metastore.sasl.enabled=false</value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-  <property>
-    <name>templeton.exec.encoding</name>
-    <value>UTF-8</value>
-    <description>The encoding of the stdout and stderr data.</description>
-  </property>
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>10000</value>
-    <description>
-      How long in milliseconds a program is allowed to run on the
-      Templeton box.
-    </description>
-  </property>
-
-  <property>
-    <name>templeton.exec.max-procs</name>
-    <value>16</value>
-    <description>The maximum number of processes allowed to run at once.</description>
-  </property>
-
-  <property>
-    <name>templeton.exec.max-output-bytes</name>
-    <value>1048576</value>
-    <description>
-      The maximum number of bytes from stdout or stderr stored in ram.
-    </description>
-  </property>
-
-  <property>
-    <name>templeton.exec.envs</name>
-    <value>HADOOP_PREFIX,HADOOP_HOME,JAVA_HOME</value>
-    <description>The environment variables passed through to exec.</description>
-  </property>
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value>127.0.0.1:2181</value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.zookeeper.session-timeout</name>
-    <value>30000</value>
-    <description>ZooKeeper session timeout in milliseconds</description>
-  </property>
-
-  <property>
-    <name>templeton.callback.retry.interval</name>
-    <value>10000</value>
-    <description>How long to wait between callback retry attempts in milliseconds</description>
-  </property>
-
-  <property>
-    <name>templeton.callback.retry.attempts</name>
-    <value>5</value>
-    <description>How many times to retry the callback</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hcatalog.templeton.tool.HDFSStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.root</name>
-    <value>/templeton-hadoop</value>
-    <description>The path to the directory to use for storage</description>
-  </property>
-
-  <property>
-    <name>templeton.hdfs.cleanup.interval</name>
-    <value>43200000</value>
-    <description>The maximum delay between a thread's cleanup checks</description>
-  </property>
-
-  <property>
-    <name>templeton.hdfs.cleanup.maxage</name>
-    <value>604800000</value>
-    <description>The maximum age of a templeton job</description>
-  </property>
-
-  <property>
-    <name>templeton.zookeeper.cleanup.interval</name>
-    <value>43200000</value>
-    <description>The maximum delay between a thread's cleanup checks</description>
-  </property>
-
-  <property>
-    <name>templeton.zookeeper.cleanup.maxage</name>
-    <value>604800000</value>
-    <description>The maximum age of a templeton job</description>
-  </property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/TEMPLETON/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/TEMPLETON/metainfo.xml
deleted file mode 100644
index 8df57b6..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/TEMPLETON/metainfo.xml
+++ /dev/null
@@ -1,37 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for TEMPLETON service</comment>
-    <version>1.0</version>
-
-
-    <components>
-        <component>
-            <name>TEMPLETON_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>TEMPLETON_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/ZOOKEEPER/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index 7115199..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/0.1/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,37 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for ZOOKEEPER service</comment>
-    <version>1.0</version>
-
-
-    <components>
-        <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/repos/repoinfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/repos/repoinfo.xml
deleted file mode 100644
index 54b0644..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/repos/repoinfo.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos6</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>HDP-epel</repoid>
-      <reponame>HDP-epel</reponame>
-      <mirrorslist><![CDATA[http://mirrors.fedoraproject.org/mirrorlist?repo=epel-6&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos5</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>HDP-epel</repoid>
-      <reponame>HDP-epel</reponame>
-      <mirrorslist><![CDATA[http://mirrors.fedoraproject.org/mirrorlist?repo=epel-5&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="redhat6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos6</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>HDP-epel</repoid>
-      <reponame>HDP-epel</reponame>
-      <mirrorslist><![CDATA[http://mirrors.fedoraproject.org/mirrorlist?repo=epel-6&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="redhat5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos5</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>HDP-epel</repoid>
-      <reponame>HDP-epel</reponame>
-      <mirrorslist><![CDATA[http://mirrors.fedoraproject.org/mirrorlist?repo=epel-5&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="suse11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/suse11</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-  </os>
-    <os type="sles11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/suse11</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-  </os>
-</reposinfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/GANGLIA/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/GANGLIA/metainfo.xml
deleted file mode 100644
index 0b21f0f..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Ganglia Metrics Collection system</comment>
-    <version>3.2.0</version>
-
-    <components>
-        <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MONITOR_WEBSERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HBASE/configuration/hbase-policy.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HBASE/configuration/hbase-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index c4b3651..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,334 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value></value>
-    <description>The directory shared by region servers and into
-    which HBase persists.  The URL should be 'fully-qualified'
-    to include the filesystem scheme.  For example, to specify the
-    HDFS directory '/hbase' where the HDFS instance's namenode is
-    running at namenode.example.org on port 9000, set this value to:
-    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-    into /tmp.  Change this configuration else all data will be lost
-    on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value></value>
-    <description>Temporary directory on the local filesystem.
-    Change this setting to point to a location more permanent
-    than '/tmp' (The '/tmp' directory is often cleared on
-    machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value></value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value></value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value></value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-    Same property is used by the Master for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value></value>
-    <description>The time (in miliseconds) between 'major' compactions of all
-    HStoreFiles in a region.  Default: 1 day.
-    Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.thread.wakefrequency</name>
-    <value>3000</value>
-    <description>The interval between checks for expired region server leases.
-    This value has been reduced due to the other reduced values above so that
-    the master will notice a dead region server sooner. The default is 15 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value></value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value></value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-    time hbase.hregion.flush.size bytes.  Useful preventing
-    runaway memstore during spikes in update traffic.  Without an
-    upper-bound, memstore fills such that when it flushes the
-    resultant flush files take a long time to compact or split, or
-    worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value></value>
-    <description>
-    Memstore will be flushed to disk if size of the memstore
-    exceeds this number of bytes.  Value is checked by a thread that runs
-    every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value></value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value></value>
-    <description>
-    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-    grown to exceed this value, the hosting HRegion is split in two.
-    Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value></value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value></value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value></value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-    instance. This is to set an upper boundary for a single entry saved in a
-    storage file. Since they cannot be split it helps avoiding that a region
-    cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero
-    or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value></value>
-    <description>
-    If more than this number of HStoreFiles in any one HStore
-    (one HStoreFile is written per flush of memstore) then a compaction
-    is run to rewrite all HStoreFiles files as one.  Larger numbers
-    put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value></value>
-    <description>
-    If more than this number of StoreFiles in any one Store
-    (one StoreFile is written per flush of MemStore) then updates are
-    blocked for this HRegion until a compaction is completed, or
-    until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value></value>
-    <description>
-        Percentage of maximum heap (-Xmx setting) to allocate to block cache
-        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-        Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HMaster process.  The principal name should
-    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-    portion, it will be replaced with the actual hostname of the running
-    instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HRegionServer process.  The principal name
-    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-    hostname portion, it will be replaced with the actual hostname of the
-    running instance.  An entry for this principal must exist in the file
-    specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-    full privileges, regardless of stored ACLs, across the cluster.
-    Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-    default on all tables. For any override coprocessor method, these classes
-    will be called in order. After implementing your own Coprocessor, just put
-    it in HBase's classpath and add the fully qualified class name here.
-    A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value></value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-    By default this is set to localhost for local and pseudo-distributed modes
-    of operation. For a fully-distributed setup, this should be set to a full
-    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-    this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>dfs.support.append</name>
-    <value></value>
-    <description>Does HDFS allow appends to files?
-    This is an hdfs config. set in here so the hdfs client will do append support.
-    You must ensure that this config. is true serverside too when running hbase
-    (You will have to restart your cluster after setting it).
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value></value>
-    <description>Enable/Disable short circuit read for your client.
-    Hadoop servers should be configured to allow short circuit read
-    for the hbase user for this to take effect
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-
-  <property>
-    <name>hbase.regionserver.optionalcacheflushinterval</name>
-    <value>10000</value>
-    <description>
-      Amount of time to wait since the last time a region was flushed before
-      invoking an optional cache flush. Default 60,000.
-    </description>
-  </property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HBASE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HBASE/metainfo.xml
deleted file mode 100644
index c91d9f0..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.94.2</version>
-
-    <components>
-        <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HCATALOG/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HCATALOG/metainfo.xml
deleted file mode 100644
index 1951a5d..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>0.5.0</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HDFS/configuration/core-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index a312e68..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,251 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
- 
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value></value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-
-  <property>
-    <name>io.compression.codec.lzo.class</name>
-    <value>com.hadoop.compression.lzo.LzoCodec</value>
-    <description>The implementation for lzo codec.</description>
-  </property>
-
-<!-- file system properties -->
-
-  <property>
-    <name>fs.default.name</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary images to merge.
-        If this is a comma-delimited list of directories then the image is
-        replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.edits.dir</name>
-    <value>${fs.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary edits to merge.
-        If this is a comma-delimited list of directoires then teh edits is
-        replicated in all of the directoires for redundancy.
-        Default value is same as fs.checkpoint.dir
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>536870912</value>
-    <description>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-  </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value></value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value></value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value></value>
-<description>The mapping from kerberos principal names to local OS user names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
-  </property>
-
-<!--
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").groups</name>
-  <value></value>
-  <description>
-    Proxy group for templeton.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").hosts</name>
-  <value></value>
-  <description>
-    Proxy host for templeton.
-  </description>
-</property>
--->
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HDFS/configuration/hadoop-policy.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 900da99..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.submission.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.task.umbilical.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value></value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HDFS/configuration/hdfs-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index db92d4b..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,415 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value></value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value></value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
- <property>
-    <name>dfs.datanode.socket.write.timeout</name>
-    <value>0</value>
-    <description>DFS Client write socket timeout</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value></value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value></value>
-    <description>the user who is allowed to perform short
-    circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value></value>
-    <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value></value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value></value>
-    <description>Default block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value></value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value></value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>4096</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>077</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value></value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value></value>
-    <description>
-        Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value></value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
- <description>
-        The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
- <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value></value>
-  <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value></value>
-<description>The permissions that should be there on dfs.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-  <name>dfs.access.time.precision</name>
-  <value>0</value>
-  <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
-
-<property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
-</property>
-
-<property>
-  <name>ipc.server.read.threadpool.size</name>
-  <value>5</value>
-  <description></description>
-</property>
-
-<property>
-  <name>dfs.datanode.failed.volumes.tolerated</name>
-  <value>0</value>
-  <description>Number of failed disks datanode would tolerate</description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HDFS/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HDFS/metainfo.xml
deleted file mode 100644
index 1b185e1..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Apache Hadoop Distributed File System</comment>
-    <version>1.1.2</version>
-
-    <components>
-        <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HIVE/configuration/hive-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index 7d35558..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,138 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>hive.metastore.local</name>
-    <value>false</value>
-    <description>controls whether to connect to remove metastore server or
-    open a new metastore server in Hive Client JVM</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value></value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value></value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-     Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-     thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-    string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value></value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hadoop.clientside.fs.operations</name>
-    <value>true</value>
-    <description>FS operations are owned by client</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>true</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hcatalog.security.HdfsAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HIVE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HIVE/metainfo.xml
deleted file mode 100644
index 6a52064..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.10.0</version>
-
-    <components>        
-        <component>
-            <name>HIVE_METASTORE</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
deleted file mode 100644
index 8034d19..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,195 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- This is the configuration file for the resource manager in Hadoop. -->
-<!-- You can configure various scheduling parameters related to queues. -->
-<!-- The properties for a queue follow a naming convention,such as, -->
-<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
-
-<configuration>
-
-  <property>
-    <name>mapred.capacity-scheduler.maximum-system-jobs</name>
-    <value>3000</value>
-    <description>Maximum number of jobs in the system which can be initialized,
-     concurrently, by the CapacityScheduler.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.capacity</name>
-    <value>100</value>
-    <description>Percentage of the number of slots in the cluster that are
-      to be available for jobs in this queue.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-capacity</name>
-    <value>-1</value>
-    <description>
-	maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
-	This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
-	The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
-        Default value of -1 implies a queue can use complete capacity of the cluster.
-
-        This property could be to curtail certain jobs which are long running in nature from occupying more than a 
-        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of 
-        other queues being affected.
-        
-        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
-        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in 
-        absolute terms would increase accordingly.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description> Each queue enforces a limit on the percentage of resources 
-    allocated to a user at any given time, if there is competition for them. 
-    This user limit can vary between a minimum and maximum value. The former
-    depends on the number of users who have submitted jobs, and the latter is
-    set to this property value. For example, suppose the value of this 
-    property is 25. If two users have submitted jobs to a queue, no single 
-    user can use more than 50% of the queue resources. If a third user submits
-    a job, no single user can use more than 33% of the queue resources. With 4 
-    or more users, no user can use more than 25% of the queue's resources. A 
-    value of 100 implies no user limits are imposed. 
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.user-limit-factor</name>
-    <value>1</value>
-    <description>The multiple of the queue capacity which can be configured to 
-    allow a single user to acquire more slots. 
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks</name>
-    <value>200000</value>
-    <description>The maximum number of tasks, across all jobs in the queue, 
-    which can be initialized concurrently. Once the queue's jobs exceed this 
-    limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The maximum number of tasks per-user, across all the of the 
-    user's jobs in the queue, which can be initialized concurrently. Once the 
-    user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The multipe of (maximum-system-jobs * queue-capacity) used to 
-    determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- The default configuration settings for the capacity task scheduler -->
-  <!-- The default values would be applied to all the queues which don't have -->
-  <!-- the appropriate property for the particular queue -->
-  <property>
-    <name>mapred.capacity-scheduler.default-supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions by default in a job queue.
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>The percentage of the resources limited to a particular user
-      for the job queue at any given point of time by default.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapred.capacity-scheduler.default-user-limit-factor</name>
-    <value>1</value>
-    <description>The default multiple of queue-capacity which is used to 
-    determine the amount of slots a single user can consume concurrently.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-queue</name>
-    <value>200000</value>
-    <description>The default maximum number of tasks, across all jobs in the 
-    queue, which can be initialized concurrently. Once the queue's jobs exceed 
-    this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The default maximum number of tasks per-user, across all the of 
-    the user's jobs in the queue, which can be initialized concurrently. Once 
-    the user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The default multipe of (maximum-system-jobs * queue-capacity) 
-    used to determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- Capacity scheduler Job Initialization configuration parameters -->
-  <property>
-    <name>mapred.capacity-scheduler.init-poll-interval</name>
-    <value>5000</value>
-    <description>The amount of time in miliseconds which is used to poll 
-    the job queues for jobs to initialize.
-    </description>
-  </property>
-  <property>
-    <name>mapred.capacity-scheduler.init-worker-threads</name>
-    <value>5</value>
-    <description>Number of worker threads which would be used by
-    Initialization poller to initialize jobs in a set of queue.
-    If number mentioned in property is equal to number of job queues
-    then a single thread would initialize jobs in a queue. If lesser
-    then a thread would get a set of queues assigned. If the number
-    is greater then number of threads would be equal to number of 
-    job queues.
-    </description>
-  </property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/configuration/core-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml
deleted file mode 100644
index ce12380..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/configuration/mapred-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/configuration/mapred-site.xml
deleted file mode 100644
index 11a72b1..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/configuration/mapred-site.xml
+++ /dev/null
@@ -1,531 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.sort.mb</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.record.percent</name>
-    <value>.2</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.spill.percent</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.factor</name>
-    <value>100</value>
-    <description>No description</description>
-  </property>
-
-<!-- map/reduce properties -->
-
-<property>
-  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-  <value>250</value>
-  <description>Normally, this is the amount of time before killing
-  processes, and the recommended-default is 5.000 seconds - a value of
-  5000 here.  In this case, we are using it solely to blast tasks before
-  killing them, and killing them very quickly (1/4 second) to guarantee
-  that we do not leave VMs around for later jobs.
-  </description>
-</property>
-
-  <property>
-    <name>mapred.job.tracker.handler.count</name>
-    <value>50</value>
-    <description>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.system.dir</name>
-    <value>/mapred/system</value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.http.address</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <!-- cluster specific -->
-    <name>mapred.local.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-  <name>mapreduce.cluster.administrators</name>
-  <value> hadoop</value>
-  </property>
-
-  <property>
-    <name>mapred.reduce.parallel.copies</name>
-    <value>30</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>tasktracker.http.threads</name>
-    <value>50</value>
-  </property>
-
-  <property>
-    <name>mapred.map.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some map tasks
-               may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some reduce tasks
-               may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.slowstart.completed.maps</name>
-    <value>0.05</value>
-  </property>
-
-  <property>
-    <name>mapred.inmem.merge.threshold</name>
-    <value>1000</value>
-    <description>The threshold, in terms of the number of files
-  for the in-memory merge process. When we accumulate threshold number of files
-  we initiate the in-memory merge and spill to disk. A value of 0 or less than
-  0 indicates we want to DON'T have any threshold and instead depend only on
-  the ramfs's memory consumption to trigger the merge.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>The usage threshold at which an in-memory merge will be
-  initiated, expressed as a percentage of the total memory allocated to
-  storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>The percentage of memory to be allocated from the maximum heap
-  size to storing map outputs during the shuffle.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.map.output.compression.codec</name>
-    <value></value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-<property>
-  <name>mapred.output.compression.type</name>
-  <value>BLOCK</value>
-  <description>If the job outputs are to compressed as SequenceFiles, how should
-               they be compressed? Should be one of NONE, RECORD or BLOCK.
-  </description>
-</property>
-
-
-  <property>
-    <name>mapred.jobtracker.completeuserjobs.maximum</name>
-    <value>0</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.taskScheduler</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.restart.recover</name>
-    <value>false</value>
-    <description>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>The percentage of memory- relative to the maximum heap size- to
-  retain map outputs during the reduce. When the shuffle is concluded, any
-  remaining map outputs in memory must consume less than this threshold before
-  the reduce can begin.
-  </description>
-  </property>
-
- <property>
-  <name>mapreduce.reduce.input.limit</name>
-  <value>10737418240</value>
-  <description>The limit on the input size of the reduce. (This value
-  is 10 Gb.)  If the estimated input size of the reduce is greater than
-  this value, job is failed. A value of -1 means that there is no limit
-  set. </description>
-</property>
-
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapred.compress.map.output</name>
-    <value></value>
-  </property>
-
-
-  <property>
-    <name>mapred.task.timeout</name>
-    <value>600000</value>
-    <description>The number of milliseconds before a task will be
-  terminated if it neither reads an input, writes an output, nor
-  updates its status string.
-  </description>
-  </property>
-
-  <property>
-    <name>jetty.connector</name>
-    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.task.tracker.task-controller</name>
-    <value></value>
-   <description>
-     TaskController which is used to launch and manage task execution.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.child.root.logger</name>
-    <value>INFO,TLA</value>
-  </property>
-
-  <property>
-    <name>mapred.child.java.opts</name>
-    <value></value>
-
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.job.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-<property>
-  <name>mapred.hosts</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.hosts.exclude</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.max.tracker.blacklists</name>
-  <value>16</value>
-  <description>
-    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-  </description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.path</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.interval</name>
-  <value>135000</value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.timeout</name>
-  <value>60000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.active</name>
-  <value>false</value>
-  <description>Indicates if persistency of job status information is
-  active or not.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.hours</name>
-  <value>1</value>
-  <description>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value></value>
-  <description>The directory where the job status information is persisted
-   in a file system to be available after it drops of the memory queue and
-   between jobtracker restarts.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.check</name>
-  <value>10000</value>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.interval</name>
-  <value>0</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.history.completed.location</name>
-  <value>/mapred/history/done</value>
-  <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.task.maxvmem</name>
-  <value></value>
-  <final>true</final>
-   <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.maxtasks.per.job</name>
-  <value></value>
-  <final>true</final>
-  <description>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </description>
-</property>
-
-<property>
-  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-  <value>false</value>
-</property>
-
-<property>
-  <name>mapred.userlog.retain.hours</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.job.reuse.jvm.num.tasks</name>
-  <value>1</value>
-  <description>
-    How many tasks to run per jvm. If set to -1, there is no limit
-  </description>
-  <final>true</final>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.kerberos.principal</name>
-  <value></value>
-  <description>
-      JT user name key.
- </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.kerberos.principal</name>
-   <value></value>
-  <description>
-       tt user name key. "_HOST" is replaced by the host name of the task tracker.
-   </description>
-</property>
-
-
-  <property>
-    <name>hadoop.job.history.user.location</name>
-    <value>none</value>
-    <final>true</final>
-  </property>
-
-
- <property>
-   <name>mapreduce.jobtracker.keytab.file</name>
-   <value></value>
-   <description>
-       The keytab for the jobtracker principal.
-   </description>
-
-</property>
-
- <property>
-   <name>mapreduce.tasktracker.keytab.file</name>
-   <value></value>
-    <description>The filename of the keytab for the task tracker</description>
- </property>
-
- <property>
-   <name>mapreduce.jobtracker.staging.root.dir</name>
-   <value>/user</value>
- <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-   name. It is a path in the default file system.</description>
- </property>
-
- <property>
-      <name>mapreduce.tasktracker.group</name>
-      <value>hadoop</value>
-      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
-
- </property>
-
-  <property>
-    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
-    <value>50000000</value>
-    <final>true</final>
-     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-    initialize.
-   </description>
-  </property>
-  <property>
-    <name>mapreduce.history.server.embedded</name>
-    <value>false</value>
-    <description>Should job history server be embedded within Job tracker
-process</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.history.server.http.address</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Http address of the history server</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.kerberos.principal</name>
-    <!-- cluster variant -->
-  <value></value>
-    <description>Job history user name key. (must map to same user as JT
-user)</description>
-  </property>
-
- <property>
-   <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-   <value></value>
-   <description>The keytab for the job history server principal.</description>
- </property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-  <value>180</value>
-  <description>
-    3-hour sliding window (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-  <value>15</value>
-  <description>
-    15-minute bucket size (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.queue.names</name>
-  <value>default</value>
-  <description> Comma separated list of queues configured for this jobtracker.</description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/metainfo.xml
deleted file mode 100644
index 79d219b..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/metainfo.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop Distributed Processing Framework</comment>
-    <version>1.1.2</version>
-
-    <components>
-        <component>
-            <name>JOBTRACKER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>TASKTRACKER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MAPREDUCE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/NAGIOS/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/NAGIOS/metainfo.xml
deleted file mode 100644
index bd7de07..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Nagios Monitoring and Alerting system</comment>
-    <version>3.2.3</version>
-
-    <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/OOZIE/configuration/oozie-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index 1665ba8..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,245 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->     
-
-<configuration>
-
-<!--
-    Refer to the oozie-default.xml file for the complete list of
-    Oozie configuration properties and their default values.
--->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-   </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-    The Oozie system ID.
-    </description>
-   </property>
-
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
-     System mode for  Oozie at startup.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.AuthorizationService.security.enabled</name>
-     <value>true</value>
-     <description>
-     Specifies whether security (user name/admin role) is enabled or not.
-     If disabled any user can manage Oozie system and manage any job.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
-     Jobs older than this value, in days, will be purged by the PurgeService.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
-     Interval at which the purge service will run, in seconds.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
-     Maximum concurrency for a given callable type.
-     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-     All commands that use action executors (action-start, action-end, action-kill and action-check) use
-     the action type as the callable type.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-   </property>
-
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
-      Oozie DataBase Name
-     </description>
-   </property>
-
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
-      Whitelisted job tracker for Oozie service.
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
-      </description>
-    </property>
-
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-      </description>
-    </property>
-
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-      </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>
-        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
-        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
-        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        DEFAULT
-        </value>
-      <description>The mapping from kerberos principal names to local OS user names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
-          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-          the relevant Hadoop *-site.xml files. If the path is relative is looked within
-          the Oozie configuration directory; though the path can be absolute (i.e. to point
-          to Hadoop client conf/ directories in the local filesystem.
-      </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>
-            org.apache.oozie.action.email.EmailActionExecutor,
-            org.apache.oozie.action.hadoop.HiveActionExecutor,
-            org.apache.oozie.action.hadoop.ShellActionExecutor,
-            org.apache.oozie.action.hadoop.SqoopActionExecutor,
-            org.apache.oozie.action.hadoop.DistcpActionExecutor
-        </value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
-            Creates Oozie DB.
-
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>sa</value>
-        <description>
-            DB user name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
-            DB user password.
-
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
-</configuration>
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/OOZIE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/OOZIE/metainfo.xml
deleted file mode 100644
index 83ccb06..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
-    <version>3.2.0</version>
-
-    <components>
-        <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/PIG/configuration/pig.properties b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/PIG/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/PIG/metainfo.xml
deleted file mode 100644
index 4982fd2..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.10.1</version>
-
-    <components>
-        <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/SQOOP/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/SQOOP/metainfo.xml
deleted file mode 100644
index ae0e68b..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.2</version>
-
-    <components>
-        <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index 31d0113..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-      <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value></value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
-
- <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/WEBHCAT/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index e65992f..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for WEBHCAT service</comment>
-    <version>0.5.0</version>
-
-    <components>
-        <component>
-            <name>WEBHCAT_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/ZOOKEEPER/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index 0e21f4f..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for ZOOKEEPER service</comment>
-    <version>3.4.5</version>
-
-    <components>
-        <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/repos/repoinfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/repos/repoinfo.xml
deleted file mode 100644
index f5c0fee..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/repos/repoinfo.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>HDP-epel</repoid>
-      <reponame>HDP-epel</reponame>
-      <mirrorslist><![CDATA[http://mirrors.fedoraproject.org/mirrorlist?repo=epel-6&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>HDP-epel</repoid>
-      <reponame>HDP-epel</reponame>
-      <mirrorslist><![CDATA[http://mirrors.fedoraproject.org/mirrorlist?repo=epel-5&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="redhat6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>HDP-epel</repoid>
-      <reponame>HDP-epel</reponame>
-      <mirrorslist><![CDATA[http://mirrors.fedoraproject.org/mirrorlist?repo=epel-6&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="redhat5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>HDP-epel</repoid>
-      <reponame>HDP-epel</reponame>
-      <mirrorslist><![CDATA[http://mirrors.fedoraproject.org/mirrorlist?repo=epel-5&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="suse11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-  </os>
-    <os type="sles11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-  </os>
-</reposinfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/GANGLIA/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/GANGLIA/metainfo.xml
deleted file mode 100644
index 0b21f0f..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Ganglia Metrics Collection system</comment>
-    <version>3.2.0</version>
-
-    <components>
-        <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MONITOR_WEBSERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HBASE/configuration/hbase-policy.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HBASE/configuration/hbase-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index 149751e..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,345 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value></value>
-    <description>The directory shared by region servers and into
-    which HBase persists.  The URL should be 'fully-qualified'
-    to include the filesystem scheme.  For example, to specify the
-    HDFS directory '/hbase' where the HDFS instance's namenode is
-    running at namenode.example.org on port 9000, set this value to:
-    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-    into /tmp.  Change this configuration else all data will be lost
-    on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value></value>
-    <description>Temporary directory on the local filesystem.
-    Change this setting to point to a location more permanent
-    than '/tmp' (The '/tmp' directory is often cleared on
-    machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value></value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value></value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value></value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-    Same property is used by the Master for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value></value>
-    <description>The time (in miliseconds) between 'major' compactions of all
-    HStoreFiles in a region.  Default: 1 day.
-    Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.thread.wakefrequency</name>
-    <value>3000</value>
-    <description>The interval between checks for expired region server leases.
-    This value has been reduced due to the other reduced values above so that
-    the master will notice a dead region server sooner. The default is 15 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value></value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value></value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-    time hbase.hregion.flush.size bytes.  Useful preventing
-    runaway memstore during spikes in update traffic.  Without an
-    upper-bound, memstore fills such that when it flushes the
-    resultant flush files take a long time to compact or split, or
-    worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value></value>
-    <description>
-    Memstore will be flushed to disk if size of the memstore
-    exceeds this number of bytes.  Value is checked by a thread that runs
-    every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value></value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value></value>
-    <description>
-    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-    grown to exceed this value, the hosting HRegion is split in two.
-    Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value></value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value></value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value></value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-    instance. This is to set an upper boundary for a single entry saved in a
-    storage file. Since they cannot be split it helps avoiding that a region
-    cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero
-    or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value></value>
-    <description>
-    If more than this number of HStoreFiles in any one HStore
-    (one HStoreFile is written per flush of memstore) then a compaction
-    is run to rewrite all HStoreFiles files as one.  Larger numbers
-    put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value></value>
-    <description>
-    If more than this number of StoreFiles in any one Store
-    (one StoreFile is written per flush of MemStore) then updates are
-    blocked for this HRegion until a compaction is completed, or
-    until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value></value>
-    <description>
-        Percentage of maximum heap (-Xmx setting) to allocate to block cache
-        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-        Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HMaster process.  The principal name should
-    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-    portion, it will be replaced with the actual hostname of the running
-    instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HRegionServer process.  The principal name
-    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-    hostname portion, it will be replaced with the actual hostname of the
-    running instance.  An entry for this principal must exist in the file
-    specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-    full privileges, regardless of stored ACLs, across the cluster.
-    Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-    default on all tables. For any override coprocessor method, these classes
-    will be called in order. After implementing your own Coprocessor, just put
-    it in HBase's classpath and add the fully qualified class name here.
-    A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value></value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-    By default this is set to localhost for local and pseudo-distributed modes
-    of operation. For a fully-distributed setup, this should be set to a full
-    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-    this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>dfs.support.append</name>
-    <value></value>
-    <description>Does HDFS allow appends to files?
-    This is an hdfs config. set in here so the hdfs client will do append support.
-    You must ensure that this config. is true serverside too when running hbase
-    (You will have to restart your cluster after setting it).
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value></value>
-    <description>Enable/Disable short circuit read for your client.
-    Hadoop servers should be configured to allow short circuit read
-    for the hbase user for this to take effect
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-
-  <property>
-    <name>hbase.regionserver.optionalcacheflushinterval</name>
-    <value>10000</value>
-    <description>
-      Amount of time to wait since the last time a region was flushed before
-      invoking an optional cache flush. Default 60,000.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
-    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-  
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HBASE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HBASE/metainfo.xml
deleted file mode 100644
index 553fa2b..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.94.5</version>
-
-    <components>
-        <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HCATALOG/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HCATALOG/metainfo.xml
deleted file mode 100644
index 1951a5d..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>0.5.0</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HDFS/configuration/core-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index a312e68..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,251 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
- 
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value></value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-
-  <property>
-    <name>io.compression.codec.lzo.class</name>
-    <value>com.hadoop.compression.lzo.LzoCodec</value>
-    <description>The implementation for lzo codec.</description>
-  </property>
-
-<!-- file system properties -->
-
-  <property>
-    <name>fs.default.name</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary images to merge.
-        If this is a comma-delimited list of directories then the image is
-        replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.edits.dir</name>
-    <value>${fs.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary edits to merge.
-        If this is a comma-delimited list of directoires then teh edits is
-        replicated in all of the directoires for redundancy.
-        Default value is same as fs.checkpoint.dir
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>536870912</value>
-    <description>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-  </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value></value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value></value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value></value>
-<description>The mapping from kerberos principal names to local OS user names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
-  </property>
-
-<!--
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").groups</name>
-  <value></value>
-  <description>
-    Proxy group for templeton.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").hosts</name>
-  <value></value>
-  <description>
-    Proxy host for templeton.
-  </description>
-</property>
--->
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HDFS/configuration/hadoop-policy.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 900da99..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.submission.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.task.umbilical.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value></value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HDFS/configuration/hdfs-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index db92d4b..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,415 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value></value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value></value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
- <property>
-    <name>dfs.datanode.socket.write.timeout</name>
-    <value>0</value>
-    <description>DFS Client write socket timeout</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value></value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value></value>
-    <description>the user who is allowed to perform short
-    circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value></value>
-    <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value></value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value></value>
-    <description>Default block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value></value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value></value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>4096</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>077</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value></value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value></value>
-    <description>
-        Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value></value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
- <description>
-        The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
- <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value></value>
-  <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value></value>
-<description>The permissions that should be there on dfs.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-  <name>dfs.access.time.precision</name>
-  <value>0</value>
-  <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
-
-<property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
-</property>
-
-<property>
-  <name>ipc.server.read.threadpool.size</name>
-  <value>5</value>
-  <description></description>
-</property>
-
-<property>
-  <name>dfs.datanode.failed.volumes.tolerated</name>
-  <value>0</value>
-  <description>Number of failed disks datanode would tolerate</description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HDFS/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HDFS/metainfo.xml
deleted file mode 100644
index 1b185e1..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Apache Hadoop Distributed File System</comment>
-    <version>1.1.2</version>
-
-    <components>
-        <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HIVE/configuration/hive-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index 7d35558..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,138 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>hive.metastore.local</name>
-    <value>false</value>
-    <description>controls whether to connect to remove metastore server or
-    open a new metastore server in Hive Client JVM</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value></value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value></value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-     Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-     thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-    string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value></value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hadoop.clientside.fs.operations</name>
-    <value>true</value>
-    <description>FS operations are owned by client</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>true</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hcatalog.security.HdfsAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HIVE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HIVE/metainfo.xml
deleted file mode 100644
index 6a52064..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.10.0</version>
-
-    <components>        
-        <component>
-            <name>HIVE_METASTORE</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/MAPREDUCE/configuration/capacity-scheduler.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/MAPREDUCE/configuration/capacity-scheduler.xml
deleted file mode 100644
index 8034d19..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/MAPREDUCE/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,195 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- This is the configuration file for the resource manager in Hadoop. -->
-<!-- You can configure various scheduling parameters related to queues. -->
-<!-- The properties for a queue follow a naming convention,such as, -->
-<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
-
-<configuration>
-
-  <property>
-    <name>mapred.capacity-scheduler.maximum-system-jobs</name>
-    <value>3000</value>
-    <description>Maximum number of jobs in the system which can be initialized,
-     concurrently, by the CapacityScheduler.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.capacity</name>
-    <value>100</value>
-    <description>Percentage of the number of slots in the cluster that are
-      to be available for jobs in this queue.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-capacity</name>
-    <value>-1</value>
-    <description>
-	maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
-	This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
-	The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
-        Default value of -1 implies a queue can use complete capacity of the cluster.
-
-        This property could be to curtail certain jobs which are long running in nature from occupying more than a 
-        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of 
-        other queues being affected.
-        
-        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
-        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in 
-        absolute terms would increase accordingly.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description> Each queue enforces a limit on the percentage of resources 
-    allocated to a user at any given time, if there is competition for them. 
-    This user limit can vary between a minimum and maximum value. The former
-    depends on the number of users who have submitted jobs, and the latter is
-    set to this property value. For example, suppose the value of this 
-    property is 25. If two users have submitted jobs to a queue, no single 
-    user can use more than 50% of the queue resources. If a third user submits
-    a job, no single user can use more than 33% of the queue resources. With 4 
-    or more users, no user can use more than 25% of the queue's resources. A 
-    value of 100 implies no user limits are imposed. 
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.user-limit-factor</name>
-    <value>1</value>
-    <description>The multiple of the queue capacity which can be configured to 
-    allow a single user to acquire more slots. 
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks</name>
-    <value>200000</value>
-    <description>The maximum number of tasks, across all jobs in the queue, 
-    which can be initialized concurrently. Once the queue's jobs exceed this 
-    limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The maximum number of tasks per-user, across all the of the 
-    user's jobs in the queue, which can be initialized concurrently. Once the 
-    user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The multipe of (maximum-system-jobs * queue-capacity) used to 
-    determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- The default configuration settings for the capacity task scheduler -->
-  <!-- The default values would be applied to all the queues which don't have -->
-  <!-- the appropriate property for the particular queue -->
-  <property>
-    <name>mapred.capacity-scheduler.default-supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions by default in a job queue.
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>The percentage of the resources limited to a particular user
-      for the job queue at any given point of time by default.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapred.capacity-scheduler.default-user-limit-factor</name>
-    <value>1</value>
-    <description>The default multiple of queue-capacity which is used to 
-    determine the amount of slots a single user can consume concurrently.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-queue</name>
-    <value>200000</value>
-    <description>The default maximum number of tasks, across all jobs in the 
-    queue, which can be initialized concurrently. Once the queue's jobs exceed 
-    this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The default maximum number of tasks per-user, across all the of 
-    the user's jobs in the queue, which can be initialized concurrently. Once 
-    the user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The default multipe of (maximum-system-jobs * queue-capacity) 
-    used to determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- Capacity scheduler Job Initialization configuration parameters -->
-  <property>
-    <name>mapred.capacity-scheduler.init-poll-interval</name>
-    <value>5000</value>
-    <description>The amount of time in miliseconds which is used to poll 
-    the job queues for jobs to initialize.
-    </description>
-  </property>
-  <property>
-    <name>mapred.capacity-scheduler.init-worker-threads</name>
-    <value>5</value>
-    <description>Number of worker threads which would be used by
-    Initialization poller to initialize jobs in a set of queue.
-    If number mentioned in property is equal to number of job queues
-    then a single thread would initialize jobs in a queue. If lesser
-    then a thread would get a set of queues assigned. If the number
-    is greater then number of threads would be equal to number of 
-    job queues.
-    </description>
-  </property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/MAPREDUCE/configuration/core-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/MAPREDUCE/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/MAPREDUCE/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/MAPREDUCE/configuration/mapred-queue-acls.xml
deleted file mode 100644
index ce12380..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/MAPREDUCE/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/MAPREDUCE/configuration/mapred-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/MAPREDUCE/configuration/mapred-site.xml
deleted file mode 100644
index 11a72b1..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/MAPREDUCE/configuration/mapred-site.xml
+++ /dev/null
@@ -1,531 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.sort.mb</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.record.percent</name>
-    <value>.2</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.spill.percent</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.factor</name>
-    <value>100</value>
-    <description>No description</description>
-  </property>
-
-<!-- map/reduce properties -->
-
-<property>
-  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-  <value>250</value>
-  <description>Normally, this is the amount of time before killing
-  processes, and the recommended-default is 5.000 seconds - a value of
-  5000 here.  In this case, we are using it solely to blast tasks before
-  killing them, and killing them very quickly (1/4 second) to guarantee
-  that we do not leave VMs around for later jobs.
-  </description>
-</property>
-
-  <property>
-    <name>mapred.job.tracker.handler.count</name>
-    <value>50</value>
-    <description>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.system.dir</name>
-    <value>/mapred/system</value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.http.address</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <!-- cluster specific -->
-    <name>mapred.local.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-  <name>mapreduce.cluster.administrators</name>
-  <value> hadoop</value>
-  </property>
-
-  <property>
-    <name>mapred.reduce.parallel.copies</name>
-    <value>30</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>tasktracker.http.threads</name>
-    <value>50</value>
-  </property>
-
-  <property>
-    <name>mapred.map.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some map tasks
-               may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some reduce tasks
-               may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.slowstart.completed.maps</name>
-    <value>0.05</value>
-  </property>
-
-  <property>
-    <name>mapred.inmem.merge.threshold</name>
-    <value>1000</value>
-    <description>The threshold, in terms of the number of files
-  for the in-memory merge process. When we accumulate threshold number of files
-  we initiate the in-memory merge and spill to disk. A value of 0 or less than
-  0 indicates we want to DON'T have any threshold and instead depend only on
-  the ramfs's memory consumption to trigger the merge.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>The usage threshold at which an in-memory merge will be
-  initiated, expressed as a percentage of the total memory allocated to
-  storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>The percentage of memory to be allocated from the maximum heap
-  size to storing map outputs during the shuffle.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.map.output.compression.codec</name>
-    <value></value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-<property>
-  <name>mapred.output.compression.type</name>
-  <value>BLOCK</value>
-  <description>If the job outputs are to compressed as SequenceFiles, how should
-               they be compressed? Should be one of NONE, RECORD or BLOCK.
-  </description>
-</property>
-
-
-  <property>
-    <name>mapred.jobtracker.completeuserjobs.maximum</name>
-    <value>0</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.taskScheduler</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.restart.recover</name>
-    <value>false</value>
-    <description>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>The percentage of memory- relative to the maximum heap size- to
-  retain map outputs during the reduce. When the shuffle is concluded, any
-  remaining map outputs in memory must consume less than this threshold before
-  the reduce can begin.
-  </description>
-  </property>
-
- <property>
-  <name>mapreduce.reduce.input.limit</name>
-  <value>10737418240</value>
-  <description>The limit on the input size of the reduce. (This value
-  is 10 Gb.)  If the estimated input size of the reduce is greater than
-  this value, job is failed. A value of -1 means that there is no limit
-  set. </description>
-</property>
-
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapred.compress.map.output</name>
-    <value></value>
-  </property>
-
-
-  <property>
-    <name>mapred.task.timeout</name>
-    <value>600000</value>
-    <description>The number of milliseconds before a task will be
-  terminated if it neither reads an input, writes an output, nor
-  updates its status string.
-  </description>
-  </property>
-
-  <property>
-    <name>jetty.connector</name>
-    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.task.tracker.task-controller</name>
-    <value></value>
-   <description>
-     TaskController which is used to launch and manage task execution.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.child.root.logger</name>
-    <value>INFO,TLA</value>
-  </property>
-
-  <property>
-    <name>mapred.child.java.opts</name>
-    <value></value>
-
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.job.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-<property>
-  <name>mapred.hosts</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.hosts.exclude</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.max.tracker.blacklists</name>
-  <value>16</value>
-  <description>
-    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-  </description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.path</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.interval</name>
-  <value>135000</value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.timeout</name>
-  <value>60000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.active</name>
-  <value>false</value>
-  <description>Indicates if persistency of job status information is
-  active or not.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.hours</name>
-  <value>1</value>
-  <description>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value></value>
-  <description>The directory where the job status information is persisted
-   in a file system to be available after it drops of the memory queue and
-   between jobtracker restarts.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.check</name>
-  <value>10000</value>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.interval</name>
-  <value>0</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.history.completed.location</name>
-  <value>/mapred/history/done</value>
-  <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.task.maxvmem</name>
-  <value></value>
-  <final>true</final>
-   <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.maxtasks.per.job</name>
-  <value></value>
-  <final>true</final>
-  <description>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </description>
-</property>
-
-<property>
-  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-  <value>false</value>
-</property>
-
-<property>
-  <name>mapred.userlog.retain.hours</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.job.reuse.jvm.num.tasks</name>
-  <value>1</value>
-  <description>
-    How many tasks to run per jvm. If set to -1, there is no limit
-  </description>
-  <final>true</final>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.kerberos.principal</name>
-  <value></value>
-  <description>
-      JT user name key.
- </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.kerberos.principal</name>
-   <value></value>
-  <description>
-       tt user name key. "_HOST" is replaced by the host name of the task tracker.
-   </description>
-</property>
-
-
-  <property>
-    <name>hadoop.job.history.user.location</name>
-    <value>none</value>
-    <final>true</final>
-  </property>
-
-
- <property>
-   <name>mapreduce.jobtracker.keytab.file</name>
-   <value></value>
-   <description>
-       The keytab for the jobtracker principal.
-   </description>
-
-</property>
-
- <property>
-   <name>mapreduce.tasktracker.keytab.file</name>
-   <value></value>
-    <description>The filename of the keytab for the task tracker</description>
- </property>
-
- <property>
-   <name>mapreduce.jobtracker.staging.root.dir</name>
-   <value>/user</value>
- <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-   name. It is a path in the default file system.</description>
- </property>
-
- <property>
-      <name>mapreduce.tasktracker.group</name>
-      <value>hadoop</value>
-      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
-
- </property>
-
-  <property>
-    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
-    <value>50000000</value>
-    <final>true</final>
-     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-    initialize.
-   </description>
-  </property>
-  <property>
-    <name>mapreduce.history.server.embedded</name>
-    <value>false</value>
-    <description>Should job history server be embedded within Job tracker
-process</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.history.server.http.address</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Http address of the history server</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.kerberos.principal</name>
-    <!-- cluster variant -->
-  <value></value>
-    <description>Job history user name key. (must map to same user as JT
-user)</description>
-  </property>
-
- <property>
-   <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-   <value></value>
-   <description>The keytab for the job history server principal.</description>
- </property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-  <value>180</value>
-  <description>
-    3-hour sliding window (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-  <value>15</value>
-  <description>
-    15-minute bucket size (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.queue.names</name>
-  <value>default</value>
-  <description> Comma separated list of queues configured for this jobtracker.</description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/MAPREDUCE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/MAPREDUCE/metainfo.xml
deleted file mode 100644
index 79d219b..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/MAPREDUCE/metainfo.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop Distributed Processing Framework</comment>
-    <version>1.1.2</version>
-
-    <components>
-        <component>
-            <name>JOBTRACKER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>TASKTRACKER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MAPREDUCE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/NAGIOS/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/NAGIOS/metainfo.xml
deleted file mode 100644
index bd7de07..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Nagios Monitoring and Alerting system</comment>
-    <version>3.2.3</version>
-
-    <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/OOZIE/configuration/oozie-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index 1665ba8..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,245 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->     
-
-<configuration>
-
-<!--
-    Refer to the oozie-default.xml file for the complete list of
-    Oozie configuration properties and their default values.
--->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-   </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-    The Oozie system ID.
-    </description>
-   </property>
-
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
-     System mode for  Oozie at startup.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.AuthorizationService.security.enabled</name>
-     <value>true</value>
-     <description>
-     Specifies whether security (user name/admin role) is enabled or not.
-     If disabled any user can manage Oozie system and manage any job.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
-     Jobs older than this value, in days, will be purged by the PurgeService.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
-     Interval at which the purge service will run, in seconds.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
-     Maximum concurrency for a given callable type.
-     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-     All commands that use action executors (action-start, action-end, action-kill and action-check) use
-     the action type as the callable type.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-   </property>
-
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
-      Oozie DataBase Name
-     </description>
-   </property>
-
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
-      Whitelisted job tracker for Oozie service.
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
-      </description>
-    </property>
-
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-      </description>
-    </property>
-
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-      </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>
-        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
-        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
-        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        DEFAULT
-        </value>
-      <description>The mapping from kerberos principal names to local OS user names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
-          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-          the relevant Hadoop *-site.xml files. If the path is relative is looked within
-          the Oozie configuration directory; though the path can be absolute (i.e. to point
-          to Hadoop client conf/ directories in the local filesystem.
-      </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>
-            org.apache.oozie.action.email.EmailActionExecutor,
-            org.apache.oozie.action.hadoop.HiveActionExecutor,
-            org.apache.oozie.action.hadoop.ShellActionExecutor,
-            org.apache.oozie.action.hadoop.SqoopActionExecutor,
-            org.apache.oozie.action.hadoop.DistcpActionExecutor
-        </value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
-            Creates Oozie DB.
-
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>sa</value>
-        <description>
-            DB user name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
-            DB user password.
-
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
-</configuration>
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/OOZIE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/OOZIE/metainfo.xml
deleted file mode 100644
index 83ccb06..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
-    <version>3.2.0</version>
-
-    <components>
-        <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/PIG/configuration/pig.properties b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/PIG/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/PIG/metainfo.xml
deleted file mode 100644
index 4982fd2..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.10.1</version>
-
-    <components>
-        <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/SQOOP/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/SQOOP/metainfo.xml
deleted file mode 100644
index ae0e68b..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.2</version>
-
-    <components>
-        <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/WEBHCAT/configuration/webhcat-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index 31d0113..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-      <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value></value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
-
- <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/WEBHCAT/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index e65992f..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for WEBHCAT service</comment>
-    <version>0.5.0</version>
-
-    <components>
-        <component>
-            <name>WEBHCAT_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/ZOOKEEPER/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index 0e21f4f..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.1/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for ZOOKEEPER service</comment>
-    <version>3.4.5</version>
-
-    <components>
-        <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/repos/repoinfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/repos/repoinfo.xml
deleted file mode 100644
index 54b0644..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/repos/repoinfo.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos6</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>HDP-epel</repoid>
-      <reponame>HDP-epel</reponame>
-      <mirrorslist><![CDATA[http://mirrors.fedoraproject.org/mirrorlist?repo=epel-6&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos5</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>HDP-epel</repoid>
-      <reponame>HDP-epel</reponame>
-      <mirrorslist><![CDATA[http://mirrors.fedoraproject.org/mirrorlist?repo=epel-5&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="redhat6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos6</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>HDP-epel</repoid>
-      <reponame>HDP-epel</reponame>
-      <mirrorslist><![CDATA[http://mirrors.fedoraproject.org/mirrorlist?repo=epel-6&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="redhat5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos5</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>HDP-epel</repoid>
-      <reponame>HDP-epel</reponame>
-      <mirrorslist><![CDATA[http://mirrors.fedoraproject.org/mirrorlist?repo=epel-5&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="suse11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/suse11</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-  </os>
-    <os type="sles11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/suse11</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-  </os>
-</reposinfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/GANGLIA/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/GANGLIA/metainfo.xml
deleted file mode 100644
index 0b21f0f..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Ganglia Metrics Collection system</comment>
-    <version>3.2.0</version>
-
-    <components>
-        <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MONITOR_WEBSERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/configuration/hbase-policy.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/configuration/hbase-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index c4b3651..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,334 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value></value>
-    <description>The directory shared by region servers and into
-    which HBase persists.  The URL should be 'fully-qualified'
-    to include the filesystem scheme.  For example, to specify the
-    HDFS directory '/hbase' where the HDFS instance's namenode is
-    running at namenode.example.org on port 9000, set this value to:
-    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-    into /tmp.  Change this configuration else all data will be lost
-    on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value></value>
-    <description>Temporary directory on the local filesystem.
-    Change this setting to point to a location more permanent
-    than '/tmp' (The '/tmp' directory is often cleared on
-    machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value></value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value></value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value></value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-    Same property is used by the Master for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value></value>
-    <description>The time (in miliseconds) between 'major' compactions of all
-    HStoreFiles in a region.  Default: 1 day.
-    Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.thread.wakefrequency</name>
-    <value>3000</value>
-    <description>The interval between checks for expired region server leases.
-    This value has been reduced due to the other reduced values above so that
-    the master will notice a dead region server sooner. The default is 15 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value></value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value></value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-    time hbase.hregion.flush.size bytes.  Useful preventing
-    runaway memstore during spikes in update traffic.  Without an
-    upper-bound, memstore fills such that when it flushes the
-    resultant flush files take a long time to compact or split, or
-    worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value></value>
-    <description>
-    Memstore will be flushed to disk if size of the memstore
-    exceeds this number of bytes.  Value is checked by a thread that runs
-    every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value></value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value></value>
-    <description>
-    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-    grown to exceed this value, the hosting HRegion is split in two.
-    Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value></value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value></value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value></value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-    instance. This is to set an upper boundary for a single entry saved in a
-    storage file. Since they cannot be split it helps avoiding that a region
-    cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero
-    or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value></value>
-    <description>
-    If more than this number of HStoreFiles in any one HStore
-    (one HStoreFile is written per flush of memstore) then a compaction
-    is run to rewrite all HStoreFiles files as one.  Larger numbers
-    put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value></value>
-    <description>
-    If more than this number of StoreFiles in any one Store
-    (one StoreFile is written per flush of MemStore) then updates are
-    blocked for this HRegion until a compaction is completed, or
-    until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value></value>
-    <description>
-        Percentage of maximum heap (-Xmx setting) to allocate to block cache
-        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-        Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HMaster process.  The principal name should
-    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-    portion, it will be replaced with the actual hostname of the running
-    instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HRegionServer process.  The principal name
-    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-    hostname portion, it will be replaced with the actual hostname of the
-    running instance.  An entry for this principal must exist in the file
-    specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-    full privileges, regardless of stored ACLs, across the cluster.
-    Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-    default on all tables. For any override coprocessor method, these classes
-    will be called in order. After implementing your own Coprocessor, just put
-    it in HBase's classpath and add the fully qualified class name here.
-    A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value></value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-    By default this is set to localhost for local and pseudo-distributed modes
-    of operation. For a fully-distributed setup, this should be set to a full
-    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-    this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>dfs.support.append</name>
-    <value></value>
-    <description>Does HDFS allow appends to files?
-    This is an hdfs config. set in here so the hdfs client will do append support.
-    You must ensure that this config. is true serverside too when running hbase
-    (You will have to restart your cluster after setting it).
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value></value>
-    <description>Enable/Disable short circuit read for your client.
-    Hadoop servers should be configured to allow short circuit read
-    for the hbase user for this to take effect
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-
-  <property>
-    <name>hbase.regionserver.optionalcacheflushinterval</name>
-    <value>10000</value>
-    <description>
-      Amount of time to wait since the last time a region was flushed before
-      invoking an optional cache flush. Default 60,000.
-    </description>
-  </property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/metainfo.xml
deleted file mode 100644
index c91d9f0..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.94.2</version>
-
-    <components>
-        <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HCATALOG/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HCATALOG/metainfo.xml
deleted file mode 100644
index 1951a5d..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>0.5.0</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/core-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index a312e68..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,251 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
- 
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value></value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-
-  <property>
-    <name>io.compression.codec.lzo.class</name>
-    <value>com.hadoop.compression.lzo.LzoCodec</value>
-    <description>The implementation for lzo codec.</description>
-  </property>
-
-<!-- file system properties -->
-
-  <property>
-    <name>fs.default.name</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary images to merge.
-        If this is a comma-delimited list of directories then the image is
-        replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.edits.dir</name>
-    <value>${fs.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary edits to merge.
-        If this is a comma-delimited list of directoires then teh edits is
-        replicated in all of the directoires for redundancy.
-        Default value is same as fs.checkpoint.dir
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>536870912</value>
-    <description>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-  </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value></value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value></value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value></value>
-<description>The mapping from kerberos principal names to local OS user names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
-  </property>
-
-<!--
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").groups</name>
-  <value></value>
-  <description>
-    Proxy group for templeton.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").hosts</name>
-  <value></value>
-  <description>
-    Proxy host for templeton.
-  </description>
-</property>
--->
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/hadoop-policy.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 900da99..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.submission.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.task.umbilical.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value></value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/hdfs-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index db92d4b..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,415 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value></value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value></value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
- <property>
-    <name>dfs.datanode.socket.write.timeout</name>
-    <value>0</value>
-    <description>DFS Client write socket timeout</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value></value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value></value>
-    <description>the user who is allowed to perform short
-    circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value></value>
-    <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value></value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value></value>
-    <description>Default block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value></value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value></value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>4096</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>077</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value></value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value></value>
-    <description>
-        Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value></value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
- <description>
-        The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
- <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value></value>
-  <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value></value>
-<description>The permissions that should be there on dfs.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-  <name>dfs.access.time.precision</name>
-  <value>0</value>
-  <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
-
-<property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
-</property>
-
-<property>
-  <name>ipc.server.read.threadpool.size</name>
-  <value>5</value>
-  <description></description>
-</property>
-
-<property>
-  <name>dfs.datanode.failed.volumes.tolerated</name>
-  <value>0</value>
-  <description>Number of failed disks datanode would tolerate</description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/metainfo.xml
deleted file mode 100644
index 1b185e1..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Apache Hadoop Distributed File System</comment>
-    <version>1.1.2</version>
-
-    <components>
-        <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HIVE/configuration/hive-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index 7d35558..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,138 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>hive.metastore.local</name>
-    <value>false</value>
-    <description>controls whether to connect to remove metastore server or
-    open a new metastore server in Hive Client JVM</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value></value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value></value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-     Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-     thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-    string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value></value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hadoop.clientside.fs.operations</name>
-    <value>true</value>
-    <description>FS operations are owned by client</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>true</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hcatalog.security.HdfsAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HIVE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HIVE/metainfo.xml
deleted file mode 100644
index 6a52064..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.10.0</version>
-
-    <components>        
-        <component>
-            <name>HIVE_METASTORE</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
deleted file mode 100644
index 8034d19..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,195 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- This is the configuration file for the resource manager in Hadoop. -->
-<!-- You can configure various scheduling parameters related to queues. -->
-<!-- The properties for a queue follow a naming convention,such as, -->
-<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
-
-<configuration>
-
-  <property>
-    <name>mapred.capacity-scheduler.maximum-system-jobs</name>
-    <value>3000</value>
-    <description>Maximum number of jobs in the system which can be initialized,
-     concurrently, by the CapacityScheduler.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.capacity</name>
-    <value>100</value>
-    <description>Percentage of the number of slots in the cluster that are
-      to be available for jobs in this queue.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-capacity</name>
-    <value>-1</value>
-    <description>
-	maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
-	This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
-	The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
-        Default value of -1 implies a queue can use complete capacity of the cluster.
-
-        This property could be to curtail certain jobs which are long running in nature from occupying more than a 
-        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of 
-        other queues being affected.
-        
-        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
-        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in 
-        absolute terms would increase accordingly.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description> Each queue enforces a limit on the percentage of resources 
-    allocated to a user at any given time, if there is competition for them. 
-    This user limit can vary between a minimum and maximum value. The former
-    depends on the number of users who have submitted jobs, and the latter is
-    set to this property value. For example, suppose the value of this 
-    property is 25. If two users have submitted jobs to a queue, no single 
-    user can use more than 50% of the queue resources. If a third user submits
-    a job, no single user can use more than 33% of the queue resources. With 4 
-    or more users, no user can use more than 25% of the queue's resources. A 
-    value of 100 implies no user limits are imposed. 
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.user-limit-factor</name>
-    <value>1</value>
-    <description>The multiple of the queue capacity which can be configured to 
-    allow a single user to acquire more slots. 
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks</name>
-    <value>200000</value>
-    <description>The maximum number of tasks, across all jobs in the queue, 
-    which can be initialized concurrently. Once the queue's jobs exceed this 
-    limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The maximum number of tasks per-user, across all the of the 
-    user's jobs in the queue, which can be initialized concurrently. Once the 
-    user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The multipe of (maximum-system-jobs * queue-capacity) used to 
-    determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- The default configuration settings for the capacity task scheduler -->
-  <!-- The default values would be applied to all the queues which don't have -->
-  <!-- the appropriate property for the particular queue -->
-  <property>
-    <name>mapred.capacity-scheduler.default-supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions by default in a job queue.
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>The percentage of the resources limited to a particular user
-      for the job queue at any given point of time by default.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapred.capacity-scheduler.default-user-limit-factor</name>
-    <value>1</value>
-    <description>The default multiple of queue-capacity which is used to 
-    determine the amount of slots a single user can consume concurrently.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-queue</name>
-    <value>200000</value>
-    <description>The default maximum number of tasks, across all jobs in the 
-    queue, which can be initialized concurrently. Once the queue's jobs exceed 
-    this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The default maximum number of tasks per-user, across all the of 
-    the user's jobs in the queue, which can be initialized concurrently. Once 
-    the user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The default multipe of (maximum-system-jobs * queue-capacity) 
-    used to determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- Capacity scheduler Job Initialization configuration parameters -->
-  <property>
-    <name>mapred.capacity-scheduler.init-poll-interval</name>
-    <value>5000</value>
-    <description>The amount of time in miliseconds which is used to poll 
-    the job queues for jobs to initialize.
-    </description>
-  </property>
-  <property>
-    <name>mapred.capacity-scheduler.init-worker-threads</name>
-    <value>5</value>
-    <description>Number of worker threads which would be used by
-    Initialization poller to initialize jobs in a set of queue.
-    If number mentioned in property is equal to number of job queues
-    then a single thread would initialize jobs in a queue. If lesser
-    then a thread would get a set of queues assigned. If the number
-    is greater then number of threads would be equal to number of 
-    job queues.
-    </description>
-  </property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/core-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml
deleted file mode 100644
index ce12380..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-site.xml
deleted file mode 100644
index af4aa53..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-site.xml
+++ /dev/null
@@ -1,531 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.sort.mb</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.record.percent</name>
-    <value>.2</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.spill.percent</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.factor</name>
-    <value>100</value>
-    <description>No description</description>
-  </property>
-
-<!-- map/reduce properties -->
-
-<property>
-  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-  <value>250</value>
-  <description>Normally, this is the amount of time before killing
-  processes, and the recommended-default is 5.000 seconds - a value of
-  5000 here.  In this case, we are using it solely to blast tasks before
-  killing them, and killing them very quickly (1/4 second) to guarantee
-  that we do not leave VMs around for later jobs.
-  </description>
-</property>
-
-  <property>
-    <name>mapred.job.tracker.handler.count</name>
-    <value>50</value>
-    <description>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.system.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.http.address</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <!-- cluster specific -->
-    <name>mapred.local.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-  <name>mapreduce.cluster.administrators</name>
-  <value> hadoop</value>
-  </property>
-
-  <property>
-    <name>mapred.reduce.parallel.copies</name>
-    <value>30</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>tasktracker.http.threads</name>
-    <value>50</value>
-  </property>
-
-  <property>
-    <name>mapred.map.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some map tasks
-               may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some reduce tasks
-               may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.slowstart.completed.maps</name>
-    <value>0.05</value>
-  </property>
-
-  <property>
-    <name>mapred.inmem.merge.threshold</name>
-    <value>1000</value>
-    <description>The threshold, in terms of the number of files
-  for the in-memory merge process. When we accumulate threshold number of files
-  we initiate the in-memory merge and spill to disk. A value of 0 or less than
-  0 indicates we want to DON'T have any threshold and instead depend only on
-  the ramfs's memory consumption to trigger the merge.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>The usage threshold at which an in-memory merge will be
-  initiated, expressed as a percentage of the total memory allocated to
-  storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>The percentage of memory to be allocated from the maximum heap
-  size to storing map outputs during the shuffle.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.map.output.compression.codec</name>
-    <value></value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-<property>
-  <name>mapred.output.compression.type</name>
-  <value>BLOCK</value>
-  <description>If the job outputs are to compressed as SequenceFiles, how should
-               they be compressed? Should be one of NONE, RECORD or BLOCK.
-  </description>
-</property>
-
-
-  <property>
-    <name>mapred.jobtracker.completeuserjobs.maximum</name>
-    <value>0</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.taskScheduler</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.restart.recover</name>
-    <value>false</value>
-    <description>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>The percentage of memory- relative to the maximum heap size- to
-  retain map outputs during the reduce. When the shuffle is concluded, any
-  remaining map outputs in memory must consume less than this threshold before
-  the reduce can begin.
-  </description>
-  </property>
-
- <property>
-  <name>mapreduce.reduce.input.limit</name>
-  <value>10737418240</value>
-  <description>The limit on the input size of the reduce. (This value
-  is 10 Gb.)  If the estimated input size of the reduce is greater than
-  this value, job is failed. A value of -1 means that there is no limit
-  set. </description>
-</property>
-
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapred.compress.map.output</name>
-    <value></value>
-  </property>
-
-
-  <property>
-    <name>mapred.task.timeout</name>
-    <value>600000</value>
-    <description>The number of milliseconds before a task will be
-  terminated if it neither reads an input, writes an output, nor
-  updates its status string.
-  </description>
-  </property>
-
-  <property>
-    <name>jetty.connector</name>
-    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.task.tracker.task-controller</name>
-    <value></value>
-   <description>
-     TaskController which is used to launch and manage task execution.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.child.root.logger</name>
-    <value>INFO,TLA</value>
-  </property>
-
-  <property>
-    <name>mapred.child.java.opts</name>
-    <value></value>
-
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.job.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-<property>
-  <name>mapred.hosts</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.hosts.exclude</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.max.tracker.blacklists</name>
-  <value>16</value>
-  <description>
-    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-  </description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.path</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.interval</name>
-  <value>135000</value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.timeout</name>
-  <value>60000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.active</name>
-  <value>false</value>
-  <description>Indicates if persistency of job status information is
-  active or not.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.hours</name>
-  <value>1</value>
-  <description>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value></value>
-  <description>The directory where the job status information is persisted
-   in a file system to be available after it drops of the memory queue and
-   between jobtracker restarts.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.check</name>
-  <value>10000</value>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.interval</name>
-  <value>0</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.history.completed.location</name>
-  <value>/mapred/history/done</value>
-  <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.task.maxvmem</name>
-  <value></value>
-  <final>true</final>
-   <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.maxtasks.per.job</name>
-  <value></value>
-  <final>true</final>
-  <description>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </description>
-</property>
-
-<property>
-  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-  <value>false</value>
-</property>
-
-<property>
-  <name>mapred.userlog.retain.hours</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.job.reuse.jvm.num.tasks</name>
-  <value>1</value>
-  <description>
-    How many tasks to run per jvm. If set to -1, there is no limit
-  </description>
-  <final>true</final>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.kerberos.principal</name>
-  <value></value>
-  <description>
-      JT user name key.
- </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.kerberos.principal</name>
-   <value></value>
-  <description>
-       tt user name key. "_HOST" is replaced by the host name of the task tracker.
-   </description>
-</property>
-
-
-  <property>
-    <name>hadoop.job.history.user.location</name>
-    <value>none</value>
-    <final>true</final>
-  </property>
-
-
- <property>
-   <name>mapreduce.jobtracker.keytab.file</name>
-   <value></value>
-   <description>
-       The keytab for the jobtracker principal.
-   </description>
-
-</property>
-
- <property>
-   <name>mapreduce.tasktracker.keytab.file</name>
-   <value></value>
-    <description>The filename of the keytab for the task tracker</description>
- </property>
-
- <property>
-   <name>mapreduce.jobtracker.staging.root.dir</name>
-   <value>/user</value>
- <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-   name. It is a path in the default file system.</description>
- </property>
-
- <property>
-      <name>mapreduce.tasktracker.group</name>
-      <value>hadoop</value>
-      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
-
- </property>
-
-  <property>
-    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
-    <value>50000000</value>
-    <final>true</final>
-     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-    initialize.
-   </description>
-  </property>
-  <property>
-    <name>mapreduce.history.server.embedded</name>
-    <value>false</value>
-    <description>Should job history server be embedded within Job tracker
-process</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.history.server.http.address</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Http address of the history server</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.kerberos.principal</name>
-    <!-- cluster variant -->
-  <value></value>
-    <description>Job history user name key. (must map to same user as JT
-user)</description>
-  </property>
-
- <property>
-   <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-   <value></value>
-   <description>The keytab for the job history server principal.</description>
- </property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-  <value>180</value>
-  <description>
-    3-hour sliding window (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-  <value>15</value>
-  <description>
-    15-minute bucket size (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.queue.names</name>
-  <value>default</value>
-  <description> Comma separated list of queues configured for this jobtracker.</description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/metainfo.xml
deleted file mode 100644
index 79d219b..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/metainfo.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop Distributed Processing Framework</comment>
-    <version>1.1.2</version>
-
-    <components>
-        <component>
-            <name>JOBTRACKER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>TASKTRACKER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MAPREDUCE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/NAGIOS/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/NAGIOS/metainfo.xml
deleted file mode 100644
index bd7de07..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Nagios Monitoring and Alerting system</comment>
-    <version>3.2.3</version>
-
-    <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/configuration/oozie-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index 1665ba8..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,245 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->     
-
-<configuration>
-
-<!--
-    Refer to the oozie-default.xml file for the complete list of
-    Oozie configuration properties and their default values.
--->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-   </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-    The Oozie system ID.
-    </description>
-   </property>
-
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
-     System mode for  Oozie at startup.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.AuthorizationService.security.enabled</name>
-     <value>true</value>
-     <description>
-     Specifies whether security (user name/admin role) is enabled or not.
-     If disabled any user can manage Oozie system and manage any job.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
-     Jobs older than this value, in days, will be purged by the PurgeService.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
-     Interval at which the purge service will run, in seconds.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
-     Maximum concurrency for a given callable type.
-     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-     All commands that use action executors (action-start, action-end, action-kill and action-check) use
-     the action type as the callable type.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-   </property>
-
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
-      Oozie DataBase Name
-     </description>
-   </property>
-
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
-      Whitelisted job tracker for Oozie service.
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
-      </description>
-    </property>
-
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-      </description>
-    </property>
-
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-      </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>
-        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
-        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
-        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        DEFAULT
-        </value>
-      <description>The mapping from kerberos principal names to local OS user names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
-          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-          the relevant Hadoop *-site.xml files. If the path is relative is looked within
-          the Oozie configuration directory; though the path can be absolute (i.e. to point
-          to Hadoop client conf/ directories in the local filesystem.
-      </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>
-            org.apache.oozie.action.email.EmailActionExecutor,
-            org.apache.oozie.action.hadoop.HiveActionExecutor,
-            org.apache.oozie.action.hadoop.ShellActionExecutor,
-            org.apache.oozie.action.hadoop.SqoopActionExecutor,
-            org.apache.oozie.action.hadoop.DistcpActionExecutor
-        </value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
-            Creates Oozie DB.
-
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>sa</value>
-        <description>
-            DB user name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
-            DB user password.
-
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
-</configuration>
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/metainfo.xml
deleted file mode 100644
index 83ccb06..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
-    <version>3.2.0</version>
-
-    <components>
-        <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/configuration/pig.properties b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/metainfo.xml
deleted file mode 100644
index 4982fd2..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.10.1</version>
-
-    <components>
-        <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/SQOOP/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/SQOOP/metainfo.xml
deleted file mode 100644
index ae0e68b..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.2</version>
-
-    <components>
-        <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index 31d0113..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-      <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value></value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
-
- <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index e65992f..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for WEBHCAT service</comment>
-    <version>0.5.0</version>
-
-    <components>
-        <component>
-            <name>WEBHCAT_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/ZOOKEEPER/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index 0e21f4f..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for ZOOKEEPER service</comment>
-    <version>3.4.5</version>
-
-    <components>
-        <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/repos/repoinfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/repos/repoinfo.xml
deleted file mode 100644
index f5c0fee..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/repos/repoinfo.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>HDP-epel</repoid>
-      <reponame>HDP-epel</reponame>
-      <mirrorslist><![CDATA[http://mirrors.fedoraproject.org/mirrorlist?repo=epel-6&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>HDP-epel</repoid>
-      <reponame>HDP-epel</reponame>
-      <mirrorslist><![CDATA[http://mirrors.fedoraproject.org/mirrorlist?repo=epel-5&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="redhat6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>HDP-epel</repoid>
-      <reponame>HDP-epel</reponame>
-      <mirrorslist><![CDATA[http://mirrors.fedoraproject.org/mirrorlist?repo=epel-6&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="redhat5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>HDP-epel</repoid>
-      <reponame>HDP-epel</reponame>
-      <mirrorslist><![CDATA[http://mirrors.fedoraproject.org/mirrorlist?repo=epel-5&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="suse11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-  </os>
-    <os type="sles11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-  </os>
-</reposinfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/GANGLIA/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/GANGLIA/metainfo.xml
deleted file mode 100644
index 0b21f0f..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Ganglia Metrics Collection system</comment>
-    <version>3.2.0</version>
-
-    <components>
-        <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MONITOR_WEBSERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-policy.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index 7710cb0..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,345 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value></value>
-    <description>The directory shared by region servers and into
-    which HBase persists.  The URL should be 'fully-qualified'
-    to include the filesystem scheme.  For example, to specify the
-    HDFS directory '/hbase' where the HDFS instance's namenode is
-    running at namenode.example.org on port 9000, set this value to:
-    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-    into /tmp.  Change this configuration else all data will be lost
-    on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value></value>
-    <description>Temporary directory on the local filesystem.
-    Change this setting to point to a location more permanent
-    than '/tmp' (The '/tmp' directory is often cleared on
-    machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value></value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value></value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value></value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-    Same property is used by the Master for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value></value>
-    <description>The time (in miliseconds) between 'major' compactions of all
-    HStoreFiles in a region.  Default: 1 day.
-    Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.thread.wakefrequency</name>
-    <value>3000</value>
-    <description>The interval between checks for expired region server leases.
-    This value has been reduced due to the other reduced values above so that
-    the master will notice a dead region server sooner. The default is 15 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value></value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value></value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-    time hbase.hregion.flush.size bytes.  Useful preventing
-    runaway memstore during spikes in update traffic.  Without an
-    upper-bound, memstore fills such that when it flushes the
-    resultant flush files take a long time to compact or split, or
-    worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value></value>
-    <description>
-    Memstore will be flushed to disk if size of the memstore
-    exceeds this number of bytes.  Value is checked by a thread that runs
-    every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value></value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value></value>
-    <description>
-    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-    grown to exceed this value, the hosting HRegion is split in two.
-    Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value></value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value></value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value></value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-    instance. This is to set an upper boundary for a single entry saved in a
-    storage file. Since they cannot be split it helps avoiding that a region
-    cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero
-    or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value></value>
-    <description>
-    If more than this number of HStoreFiles in any one HStore
-    (one HStoreFile is written per flush of memstore) then a compaction
-    is run to rewrite all HStoreFiles files as one.  Larger numbers
-    put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value></value>
-    <description>
-    If more than this number of StoreFiles in any one Store
-    (one StoreFile is written per flush of MemStore) then updates are
-    blocked for this HRegion until a compaction is completed, or
-    until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value></value>
-    <description>
-        Percentage of maximum heap (-Xmx setting) to allocate to block cache
-        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-        Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HMaster process.  The principal name should
-    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-    portion, it will be replaced with the actual hostname of the running
-    instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HRegionServer process.  The principal name
-    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-    hostname portion, it will be replaced with the actual hostname of the
-    running instance.  An entry for this principal must exist in the file
-    specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-    full privileges, regardless of stored ACLs, across the cluster.
-    Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-    default on all tables. For any override coprocessor method, these classes
-    will be called in order. After implementing your own Coprocessor, just put
-    it in HBase's classpath and add the fully qualified class name here.
-    A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value></value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-    By default this is set to localhost for local and pseudo-distributed modes
-    of operation. For a fully-distributed setup, this should be set to a full
-    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-    this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>dfs.support.append</name>
-    <value></value>
-    <description>Does HDFS allow appends to files?
-    This is an hdfs config. set in here so the hdfs client will do append support.
-    You must ensure that this config. is true serverside too when running hbase
-    (You will have to restart your cluster after setting it).
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value></value>
-    <description>Enable/Disable short circuit read for your client.
-    Hadoop servers should be configured to allow short circuit read
-    for the hbase user for this to take effect
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-
-  <property>
-    <name>hbase.regionserver.optionalcacheflushinterval</name>
-    <value>10000</value>
-    <description>
-      Amount of time to wait since the last time a region was flushed before
-      invoking an optional cache flush. Default 60,000.
-    </description>
-  </property>
-  
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
-    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/metainfo.xml
deleted file mode 100644
index 553fa2b..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.94.5</version>
-
-    <components>
-        <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HCATALOG/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HCATALOG/metainfo.xml
deleted file mode 100644
index 1951a5d..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>0.5.0</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/core-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index a312e68..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,251 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
- 
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value></value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-
-  <property>
-    <name>io.compression.codec.lzo.class</name>
-    <value>com.hadoop.compression.lzo.LzoCodec</value>
-    <description>The implementation for lzo codec.</description>
-  </property>
-
-<!-- file system properties -->
-
-  <property>
-    <name>fs.default.name</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary images to merge.
-        If this is a comma-delimited list of directories then the image is
-        replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.edits.dir</name>
-    <value>${fs.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary edits to merge.
-        If this is a comma-delimited list of directoires then teh edits is
-        replicated in all of the directoires for redundancy.
-        Default value is same as fs.checkpoint.dir
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>536870912</value>
-    <description>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-  </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value></value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value></value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value></value>
-<description>The mapping from kerberos principal names to local OS user names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
-  </property>
-
-<!--
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").groups</name>
-  <value></value>
-  <description>
-    Proxy group for templeton.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").hosts</name>
-  <value></value>
-  <description>
-    Proxy host for templeton.
-  </description>
-</property>
--->
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/hadoop-policy.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 900da99..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.submission.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.task.umbilical.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value></value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/hdfs-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index db92d4b..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,415 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value></value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value></value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
- <property>
-    <name>dfs.datanode.socket.write.timeout</name>
-    <value>0</value>
-    <description>DFS Client write socket timeout</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value></value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value></value>
-    <description>the user who is allowed to perform short
-    circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value></value>
-    <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value></value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value></value>
-    <description>Default block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value></value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value></value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>4096</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>077</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value></value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value></value>
-    <description>
-        Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value></value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
- <description>
-        The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
- <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value></value>
-  <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value></value>
-<description>The permissions that should be there on dfs.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-  <name>dfs.access.time.precision</name>
-  <value>0</value>
-  <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
-
-<property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
-</property>
-
-<property>
-  <name>ipc.server.read.threadpool.size</name>
-  <value>5</value>
-  <description></description>
-</property>
-
-<property>
-  <name>dfs.datanode.failed.volumes.tolerated</name>
-  <value>0</value>
-  <description>Number of failed disks datanode would tolerate</description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/metainfo.xml
deleted file mode 100644
index 1b185e1..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Apache Hadoop Distributed File System</comment>
-    <version>1.1.2</version>
-
-    <components>
-        <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HIVE/configuration/hive-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index 7d35558..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,138 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>hive.metastore.local</name>
-    <value>false</value>
-    <description>controls whether to connect to remove metastore server or
-    open a new metastore server in Hive Client JVM</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value></value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value></value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-     Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-     thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-    string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value></value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hadoop.clientside.fs.operations</name>
-    <value>true</value>
-    <description>FS operations are owned by client</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>true</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hcatalog.security.HdfsAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HIVE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HIVE/metainfo.xml
deleted file mode 100644
index 6a52064..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.10.0</version>
-
-    <components>        
-        <component>
-            <name>HIVE_METASTORE</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/capacity-scheduler.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/capacity-scheduler.xml
deleted file mode 100644
index 8034d19..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,195 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- This is the configuration file for the resource manager in Hadoop. -->
-<!-- You can configure various scheduling parameters related to queues. -->
-<!-- The properties for a queue follow a naming convention,such as, -->
-<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
-
-<configuration>
-
-  <property>
-    <name>mapred.capacity-scheduler.maximum-system-jobs</name>
-    <value>3000</value>
-    <description>Maximum number of jobs in the system which can be initialized,
-     concurrently, by the CapacityScheduler.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.capacity</name>
-    <value>100</value>
-    <description>Percentage of the number of slots in the cluster that are
-      to be available for jobs in this queue.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-capacity</name>
-    <value>-1</value>
-    <description>
-	maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
-	This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
-	The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
-        Default value of -1 implies a queue can use complete capacity of the cluster.
-
-        This property could be to curtail certain jobs which are long running in nature from occupying more than a 
-        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of 
-        other queues being affected.
-        
-        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
-        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in 
-        absolute terms would increase accordingly.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description> Each queue enforces a limit on the percentage of resources 
-    allocated to a user at any given time, if there is competition for them. 
-    This user limit can vary between a minimum and maximum value. The former
-    depends on the number of users who have submitted jobs, and the latter is
-    set to this property value. For example, suppose the value of this 
-    property is 25. If two users have submitted jobs to a queue, no single 
-    user can use more than 50% of the queue resources. If a third user submits
-    a job, no single user can use more than 33% of the queue resources. With 4 
-    or more users, no user can use more than 25% of the queue's resources. A 
-    value of 100 implies no user limits are imposed. 
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.user-limit-factor</name>
-    <value>1</value>
-    <description>The multiple of the queue capacity which can be configured to 
-    allow a single user to acquire more slots. 
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks</name>
-    <value>200000</value>
-    <description>The maximum number of tasks, across all jobs in the queue, 
-    which can be initialized concurrently. Once the queue's jobs exceed this 
-    limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The maximum number of tasks per-user, across all the of the 
-    user's jobs in the queue, which can be initialized concurrently. Once the 
-    user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The multipe of (maximum-system-jobs * queue-capacity) used to 
-    determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- The default configuration settings for the capacity task scheduler -->
-  <!-- The default values would be applied to all the queues which don't have -->
-  <!-- the appropriate property for the particular queue -->
-  <property>
-    <name>mapred.capacity-scheduler.default-supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions by default in a job queue.
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>The percentage of the resources limited to a particular user
-      for the job queue at any given point of time by default.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapred.capacity-scheduler.default-user-limit-factor</name>
-    <value>1</value>
-    <description>The default multiple of queue-capacity which is used to 
-    determine the amount of slots a single user can consume concurrently.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-queue</name>
-    <value>200000</value>
-    <description>The default maximum number of tasks, across all jobs in the 
-    queue, which can be initialized concurrently. Once the queue's jobs exceed 
-    this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The default maximum number of tasks per-user, across all the of 
-    the user's jobs in the queue, which can be initialized concurrently. Once 
-    the user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The default multipe of (maximum-system-jobs * queue-capacity) 
-    used to determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- Capacity scheduler Job Initialization configuration parameters -->
-  <property>
-    <name>mapred.capacity-scheduler.init-poll-interval</name>
-    <value>5000</value>
-    <description>The amount of time in miliseconds which is used to poll 
-    the job queues for jobs to initialize.
-    </description>
-  </property>
-  <property>
-    <name>mapred.capacity-scheduler.init-worker-threads</name>
-    <value>5</value>
-    <description>Number of worker threads which would be used by
-    Initialization poller to initialize jobs in a set of queue.
-    If number mentioned in property is equal to number of job queues
-    then a single thread would initialize jobs in a queue. If lesser
-    then a thread would get a set of queues assigned. If the number
-    is greater then number of threads would be equal to number of 
-    job queues.
-    </description>
-  </property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/core-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/mapred-queue-acls.xml
deleted file mode 100644
index ce12380..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/mapred-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/mapred-site.xml
deleted file mode 100644
index af4aa53..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/mapred-site.xml
+++ /dev/null
@@ -1,531 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.sort.mb</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.record.percent</name>
-    <value>.2</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.spill.percent</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.factor</name>
-    <value>100</value>
-    <description>No description</description>
-  </property>
-
-<!-- map/reduce properties -->
-
-<property>
-  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-  <value>250</value>
-  <description>Normally, this is the amount of time before killing
-  processes, and the recommended-default is 5.000 seconds - a value of
-  5000 here.  In this case, we are using it solely to blast tasks before
-  killing them, and killing them very quickly (1/4 second) to guarantee
-  that we do not leave VMs around for later jobs.
-  </description>
-</property>
-
-  <property>
-    <name>mapred.job.tracker.handler.count</name>
-    <value>50</value>
-    <description>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.system.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.http.address</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <!-- cluster specific -->
-    <name>mapred.local.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-  <name>mapreduce.cluster.administrators</name>
-  <value> hadoop</value>
-  </property>
-
-  <property>
-    <name>mapred.reduce.parallel.copies</name>
-    <value>30</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>tasktracker.http.threads</name>
-    <value>50</value>
-  </property>
-
-  <property>
-    <name>mapred.map.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some map tasks
-               may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some reduce tasks
-               may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.slowstart.completed.maps</name>
-    <value>0.05</value>
-  </property>
-
-  <property>
-    <name>mapred.inmem.merge.threshold</name>
-    <value>1000</value>
-    <description>The threshold, in terms of the number of files
-  for the in-memory merge process. When we accumulate threshold number of files
-  we initiate the in-memory merge and spill to disk. A value of 0 or less than
-  0 indicates we want to DON'T have any threshold and instead depend only on
-  the ramfs's memory consumption to trigger the merge.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>The usage threshold at which an in-memory merge will be
-  initiated, expressed as a percentage of the total memory allocated to
-  storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>The percentage of memory to be allocated from the maximum heap
-  size to storing map outputs during the shuffle.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.map.output.compression.codec</name>
-    <value></value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-<property>
-  <name>mapred.output.compression.type</name>
-  <value>BLOCK</value>
-  <description>If the job outputs are to compressed as SequenceFiles, how should
-               they be compressed? Should be one of NONE, RECORD or BLOCK.
-  </description>
-</property>
-
-
-  <property>
-    <name>mapred.jobtracker.completeuserjobs.maximum</name>
-    <value>0</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.taskScheduler</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.restart.recover</name>
-    <value>false</value>
-    <description>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>The percentage of memory- relative to the maximum heap size- to
-  retain map outputs during the reduce. When the shuffle is concluded, any
-  remaining map outputs in memory must consume less than this threshold before
-  the reduce can begin.
-  </description>
-  </property>
-
- <property>
-  <name>mapreduce.reduce.input.limit</name>
-  <value>10737418240</value>
-  <description>The limit on the input size of the reduce. (This value
-  is 10 Gb.)  If the estimated input size of the reduce is greater than
-  this value, job is failed. A value of -1 means that there is no limit
-  set. </description>
-</property>
-
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapred.compress.map.output</name>
-    <value></value>
-  </property>
-
-
-  <property>
-    <name>mapred.task.timeout</name>
-    <value>600000</value>
-    <description>The number of milliseconds before a task will be
-  terminated if it neither reads an input, writes an output, nor
-  updates its status string.
-  </description>
-  </property>
-
-  <property>
-    <name>jetty.connector</name>
-    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.task.tracker.task-controller</name>
-    <value></value>
-   <description>
-     TaskController which is used to launch and manage task execution.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.child.root.logger</name>
-    <value>INFO,TLA</value>
-  </property>
-
-  <property>
-    <name>mapred.child.java.opts</name>
-    <value></value>
-
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.job.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-<property>
-  <name>mapred.hosts</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.hosts.exclude</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.max.tracker.blacklists</name>
-  <value>16</value>
-  <description>
-    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-  </description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.path</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.interval</name>
-  <value>135000</value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.timeout</name>
-  <value>60000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.active</name>
-  <value>false</value>
-  <description>Indicates if persistency of job status information is
-  active or not.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.hours</name>
-  <value>1</value>
-  <description>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value></value>
-  <description>The directory where the job status information is persisted
-   in a file system to be available after it drops of the memory queue and
-   between jobtracker restarts.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.check</name>
-  <value>10000</value>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.interval</name>
-  <value>0</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.history.completed.location</name>
-  <value>/mapred/history/done</value>
-  <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.task.maxvmem</name>
-  <value></value>
-  <final>true</final>
-   <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.maxtasks.per.job</name>
-  <value></value>
-  <final>true</final>
-  <description>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </description>
-</property>
-
-<property>
-  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-  <value>false</value>
-</property>
-
-<property>
-  <name>mapred.userlog.retain.hours</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.job.reuse.jvm.num.tasks</name>
-  <value>1</value>
-  <description>
-    How many tasks to run per jvm. If set to -1, there is no limit
-  </description>
-  <final>true</final>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.kerberos.principal</name>
-  <value></value>
-  <description>
-      JT user name key.
- </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.kerberos.principal</name>
-   <value></value>
-  <description>
-       tt user name key. "_HOST" is replaced by the host name of the task tracker.
-   </description>
-</property>
-
-
-  <property>
-    <name>hadoop.job.history.user.location</name>
-    <value>none</value>
-    <final>true</final>
-  </property>
-
-
- <property>
-   <name>mapreduce.jobtracker.keytab.file</name>
-   <value></value>
-   <description>
-       The keytab for the jobtracker principal.
-   </description>
-
-</property>
-
- <property>
-   <name>mapreduce.tasktracker.keytab.file</name>
-   <value></value>
-    <description>The filename of the keytab for the task tracker</description>
- </property>
-
- <property>
-   <name>mapreduce.jobtracker.staging.root.dir</name>
-   <value>/user</value>
- <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-   name. It is a path in the default file system.</description>
- </property>
-
- <property>
-      <name>mapreduce.tasktracker.group</name>
-      <value>hadoop</value>
-      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
-
- </property>
-
-  <property>
-    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
-    <value>50000000</value>
-    <final>true</final>
-     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-    initialize.
-   </description>
-  </property>
-  <property>
-    <name>mapreduce.history.server.embedded</name>
-    <value>false</value>
-    <description>Should job history server be embedded within Job tracker
-process</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.history.server.http.address</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Http address of the history server</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.kerberos.principal</name>
-    <!-- cluster variant -->
-  <value></value>
-    <description>Job history user name key. (must map to same user as JT
-user)</description>
-  </property>
-
- <property>
-   <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-   <value></value>
-   <description>The keytab for the job history server principal.</description>
- </property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-  <value>180</value>
-  <description>
-    3-hour sliding window (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-  <value>15</value>
-  <description>
-    15-minute bucket size (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.queue.names</name>
-  <value>default</value>
-  <description> Comma separated list of queues configured for this jobtracker.</description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/metainfo.xml
deleted file mode 100644
index 79d219b..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/metainfo.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop Distributed Processing Framework</comment>
-    <version>1.1.2</version>
-
-    <components>
-        <component>
-            <name>JOBTRACKER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>TASKTRACKER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MAPREDUCE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/NAGIOS/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/NAGIOS/metainfo.xml
deleted file mode 100644
index bd7de07..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Nagios Monitoring and Alerting system</comment>
-    <version>3.2.3</version>
-
-    <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/OOZIE/configuration/oozie-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index 1665ba8..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,245 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->     
-
-<configuration>
-
-<!--
-    Refer to the oozie-default.xml file for the complete list of
-    Oozie configuration properties and their default values.
--->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-   </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-    The Oozie system ID.
-    </description>
-   </property>
-
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
-     System mode for  Oozie at startup.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.AuthorizationService.security.enabled</name>
-     <value>true</value>
-     <description>
-     Specifies whether security (user name/admin role) is enabled or not.
-     If disabled any user can manage Oozie system and manage any job.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
-     Jobs older than this value, in days, will be purged by the PurgeService.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
-     Interval at which the purge service will run, in seconds.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
-     Maximum concurrency for a given callable type.
-     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-     All commands that use action executors (action-start, action-end, action-kill and action-check) use
-     the action type as the callable type.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-   </property>
-
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
-      Oozie DataBase Name
-     </description>
-   </property>
-
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
-      Whitelisted job tracker for Oozie service.
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
-      </description>
-    </property>
-
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-      </description>
-    </property>
-
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-      </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>
-        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
-        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
-        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        DEFAULT
-        </value>
-      <description>The mapping from kerberos principal names to local OS user names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
-          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-          the relevant Hadoop *-site.xml files. If the path is relative is looked within
-          the Oozie configuration directory; though the path can be absolute (i.e. to point
-          to Hadoop client conf/ directories in the local filesystem.
-      </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>
-            org.apache.oozie.action.email.EmailActionExecutor,
-            org.apache.oozie.action.hadoop.HiveActionExecutor,
-            org.apache.oozie.action.hadoop.ShellActionExecutor,
-            org.apache.oozie.action.hadoop.SqoopActionExecutor,
-            org.apache.oozie.action.hadoop.DistcpActionExecutor
-        </value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
-            Creates Oozie DB.
-
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>sa</value>
-        <description>
-            DB user name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
-            DB user password.
-
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
-</configuration>
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/OOZIE/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/OOZIE/metainfo.xml
deleted file mode 100644
index 83ccb06..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
-    <version>3.2.0</version>
-
-    <components>
-        <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/PIG/configuration/pig.properties b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/PIG/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/PIG/metainfo.xml
deleted file mode 100644
index 4982fd2..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.10.1</version>
-
-    <components>
-        <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/SQOOP/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/SQOOP/metainfo.xml
deleted file mode 100644
index ae0e68b..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.2</version>
-
-    <components>
-        <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/WEBHCAT/configuration/webhcat-site.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index 31d0113..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-      <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value></value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
-
- <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/WEBHCAT/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index e65992f..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for WEBHCAT service</comment>
-    <version>0.5.0</version>
-
-    <components>
-        <component>
-            <name>WEBHCAT_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/ZOOKEEPER/metainfo.xml b/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index 0e21f4f..0000000
--- a/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for ZOOKEEPER service</comment>
-    <version>3.4.5</version>
-
-    <components>
-        <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.1.sql b/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.1.sql
deleted file mode 100644
index e1f6762..0000000
--- a/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.1.sql
+++ /dev/null
@@ -1,21 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one
--- or more contributor license agreements.  See the NOTICE file
--- distributed with this work for additional information
--- regarding copyright ownership.  The ASF licenses this file
--- to you under the Apache License, Version 2.0 (the
--- "License"); you may not use this file except in compliance
--- with the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-\connect ambari;
-
-ALTER TABLE ambari.hosts
-  ALTER COLUMN disks_info TYPE VARCHAR(10000);
diff --git a/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Check.sql b/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Check.sql
deleted file mode 100644
index 185810a..0000000
--- a/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Check.sql
+++ /dev/null
@@ -1,20 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one
--- or more contributor license agreements.  See the NOTICE file
--- distributed with this work for additional information
--- regarding copyright ownership.  The ASF licenses this file
--- to you under the Apache License, Version 2.0 (the
--- "License"); you may not use this file except in compliance
--- with the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-\connect ambari;
-
-COPY (SELECT count(*) FROM ambari.serviceconfigmapping WHERE service_name = 'MAPREDUCE') TO STDOUT WITH CSV;
diff --git a/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Fix.sql b/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Fix.sql
deleted file mode 100644
index 61677b2..0000000
--- a/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Fix.sql
+++ /dev/null
@@ -1,26 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one
--- or more contributor license agreements.  See the NOTICE file
--- distributed with this work for additional information
--- regarding copyright ownership.  The ASF licenses this file
--- to you under the Apache License, Version 2.0 (the
--- "License"); you may not use this file except in compliance
--- with the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-\connect ambari;
-
-INSERT INTO ambari.serviceconfigmapping (cluster_id, service_name, config_type, config_tag, timestamp)
-    SELECT cluster_id, 'MAPREDUCE', type_name, version_tag, create_timestamp from ambari.clusterconfig 
-        WHERE type_name = 'global' ORDER BY create_timestamp DESC LIMIT 1;
-
-INSERT INTO ambari.serviceconfigmapping (cluster_id, service_name, config_type, config_tag, timestamp)
-    SELECT cluster_id, 'MAPREDUCE', type_name, version_tag, create_timestamp from ambari.clusterconfig
-         WHERE type_name = 'mapred-site' ORDER BY create_timestamp DESC LIMIT 1;
diff --git a/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.sql b/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.sql
deleted file mode 100644
index e1f6762..0000000
--- a/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.sql
+++ /dev/null
@@ -1,21 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one
--- or more contributor license agreements.  See the NOTICE file
--- distributed with this work for additional information
--- regarding copyright ownership.  The ASF licenses this file
--- to you under the Apache License, Version 2.0 (the
--- "License"); you may not use this file except in compliance
--- with the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-\connect ambari;
-
-ALTER TABLE ambari.hosts
-  ALTER COLUMN disks_info TYPE VARCHAR(10000);
diff --git a/branch-1.2/ambari-server/src/main/resources/upgrade/dml/Ambari-DML-Postgres-UPGRADE_STACK.sql b/branch-1.2/ambari-server/src/main/resources/upgrade/dml/Ambari-DML-Postgres-UPGRADE_STACK.sql
deleted file mode 100644
index 88f022b..0000000
--- a/branch-1.2/ambari-server/src/main/resources/upgrade/dml/Ambari-DML-Postgres-UPGRADE_STACK.sql
+++ /dev/null
@@ -1,44 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one
--- or more contributor license agreements.  See the NOTICE file
--- distributed with this work for additional information
--- regarding copyright ownership.  The ASF licenses this file
--- to you under the Apache License, Version 2.0 (the
--- "License"); you may not use this file except in compliance
--- with the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-\connect ambari;
-
-PREPARE clusters (text, text) AS
-  UPDATE ambari.clusters
-    SET desired_stack_version = '{"stackName":"' || $1 || '","stackVersion":"' || $2 || '"}';
-
-PREPARE hostcomponentdesiredstate (text, text) AS
-  UPDATE ambari.hostcomponentdesiredstate
-    SET desired_stack_version = '{"stackName":"' || $1 || '","stackVersion":"' || $2 || '"}';
-
-PREPARE hostcomponentstate (text, text) AS
-  UPDATE ambari.hostcomponentstate
-    SET current_stack_version = '{"stackName":"' || $1 || '","stackVersion":"' || $2 || '"}';
-
-PREPARE servicecomponentdesiredstate (text, text) AS
-  UPDATE ambari.servicecomponentdesiredstate
-    SET desired_stack_version = '{"stackName":"' || $1  || '","stackVersion":"' || $2 || '"}';
-
-PREPARE servicedesiredstate (text, text) AS
-  UPDATE ambari.servicedesiredstate
-    SET desired_stack_version = '{"stackName":"' || $1 || '","stackVersion":"' || $2 || '"}';
-
-EXECUTE clusters(:stack_name, :stack_version);
-EXECUTE hostcomponentdesiredstate(:stack_name, :stack_version);
-EXECUTE hostcomponentstate(:stack_name, :stack_version);
-EXECUTE servicecomponentdesiredstate(:stack_name, :stack_version);
-EXECUTE servicedesiredstate(:stack_name, :stack_version);
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/main/resources/webapp/WEB-INF/spring-security.xml b/branch-1.2/ambari-server/src/main/resources/webapp/WEB-INF/spring-security.xml
deleted file mode 100644
index 6a722d5..0000000
--- a/branch-1.2/ambari-server/src/main/resources/webapp/WEB-INF/spring-security.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<beans:beans xmlns="http://www.springframework.org/schema/security"
-             xmlns:beans="http://www.springframework.org/schema/beans"
-             xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-             xsi:schemaLocation="http://www.springframework.org/schema/beans
-                    http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
-                    http://www.springframework.org/schema/security
-                    http://www.springframework.org/schema/security/spring-security-3.1.xsd">
-
-  <http use-expressions="true"
-        disable-url-rewriting="true" entry-point-ref="ambariEntryPoint">
-    <http-basic entry-point-ref="ambariEntryPoint"/>
-    <intercept-url pattern="/**" access="isAuthenticated()" method="GET"/>
-    <intercept-url pattern="/**" access="hasRole('ADMIN')"/>
-  </http>
-
-  <!--<ldap-server id="ldapServer" root="dc=ambari,dc=apache,dc=org"/>-->
-
-  <authentication-manager>
-
-    <authentication-provider user-service-ref="ambariLocalUserService">
-      <password-encoder ref="passwordEncoder"/>
-    </authentication-provider>
-
-    <authentication-provider ref="ambariLdapAuthenticationProvider"/>
-
-  </authentication-manager>
-
-  <beans:bean id="ambariEntryPoint" class="org.apache.ambari.server.security.AmbariEntryPoint">
-  </beans:bean>
-</beans:beans>
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
deleted file mode 100644
index 89d913a..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
+++ /dev/null
@@ -1,232 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.actionmanager;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-
-import junit.framework.Assert;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.RoleCommand;
-import org.apache.ambari.server.agent.ActionQueue;
-import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.controller.HostsMap;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.ExecutionCommandDAO;
-import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
-import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
-import org.apache.ambari.server.utils.StageUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-
-import static org.junit.Assert.*;
-
-public class TestActionDBAccessorImpl {
-  private static final Logger log = LoggerFactory.getLogger(TestActionDBAccessorImpl.class);
-
-  private long requestId = 23;
-  private long stageId = 31;
-  private String hostName = "host1";
-  private String clusterName = "cluster1";
-  private Injector injector;
-  ActionDBAccessor db;
-  ActionManager am;
-
-  @Inject
-  private Clusters clusters;
-  @Inject
-  private ExecutionCommandDAO executionCommandDAO;
-  @Inject
-  private HostRoleCommandDAO hostRoleCommandDAO;
-
-  @Before
-  public void setup() throws AmbariException {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    injector.injectMembers(this);
-    clusters.addHost(hostName);
-    clusters.getHost(hostName).persist();
-    clusters.addCluster(clusterName);
-    db = injector.getInstance(ActionDBAccessorImpl.class);
-    
-    am = new ActionManager(5000, 1200000, new ActionQueue(), clusters, db,
-        new HostsMap((String) null));
-  }
-
-  @After
-  public void tearDown() throws AmbariException {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  @Test
-  public void testActionResponse() {
-    String hostname = "host1";
-    populateActionDB(db, hostname, requestId, stageId);
-    Stage stage = db.getAllStages(requestId).get(0);
-    Assert.assertEquals(stageId, stage.getStageId());
-    stage.setHostRoleStatus(hostname, "HBASE_MASTER", HostRoleStatus.QUEUED);
-    db.hostRoleScheduled(stage, hostname, "HBASE_MASTER");
-    List<CommandReport> reports = new ArrayList<CommandReport>();
-    CommandReport cr = new CommandReport();
-    cr.setTaskId(1);
-    cr.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr.setRole("HBASE_MASTER");
-    cr.setStatus("COMPLETED");
-    cr.setStdErr("");
-    cr.setStdOut("");
-    cr.setExitCode(215);
-    reports.add(cr);
-    am.processTaskResponse(hostname, reports);
-    assertEquals(215,
-        am.getAction(requestId, stageId).getExitCode(hostname, "HBASE_MASTER"));
-    assertEquals(HostRoleStatus.COMPLETED, am.getAction(requestId, stageId)
-        .getHostRoleStatus(hostname, "HBASE_MASTER"));
-    Stage s = db.getAllStages(requestId).get(0);
-    assertEquals(HostRoleStatus.COMPLETED,s.getHostRoleStatus(hostname, "HBASE_MASTER"));
-  }
-  
-  @Test
-  public void testGetStagesInProgress() {
-    String hostname = "host1";
-    populateActionDB(db, hostname, requestId, stageId);
-    populateActionDB(db, hostname, requestId, stageId+1);
-    List<Stage> stages = db.getStagesInProgress();
-    assertEquals(2, stages.size());
-  }
-  
-  @Test
-  public void testGetStagesInProgressWithFailures() {
-    String hostname = "host1";
-    populateActionDB(db, hostname, requestId, stageId);
-    populateActionDB(db, hostname, requestId+1, stageId);
-    db.abortOperation(requestId);
-    List<Stage> stages = db.getStagesInProgress();
-    assertEquals(1, stages.size());
-    assertEquals(requestId+1, stages.get(0).getRequestId());
-  }
-
-  @Test
-  public void testPersistActions() {
-    populateActionDB(db, hostName, requestId, stageId);
-    for (Stage stage : db.getAllStages(requestId)) {
-      log.info("taskId={}" + stage.getExecutionCommands(hostName).get(0).
-          getExecutionCommand().getTaskId());
-      assertTrue(stage.getExecutionCommands(hostName).get(0).
-          getExecutionCommand().getTaskId() > 0);
-      assertTrue(executionCommandDAO.findByPK(stage.getExecutionCommands(hostName).
-          get(0).getExecutionCommand().getTaskId()) != null);
-    }
-  }
-
-  @Test
-  public void testHostRoleScheduled() throws InterruptedException {
-    populateActionDB(db, hostName, requestId, stageId);
-    Stage stage = db.getAction(StageUtils.getActionId(requestId, stageId));
-    assertEquals(HostRoleStatus.PENDING, stage.getHostRoleStatus(hostName, Role.HBASE_MASTER.toString()));
-    List<HostRoleCommandEntity> entities=
-        hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, Role.HBASE_MASTER);
-
-    assertEquals(HostRoleStatus.PENDING, entities.get(0).getStatus());
-    stage.setHostRoleStatus(hostName, Role.HBASE_MASTER.toString(), HostRoleStatus.QUEUED);
-
-    entities = hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, Role.HBASE_MASTER);
-    assertEquals(HostRoleStatus.QUEUED, stage.getHostRoleStatus(hostName, Role.HBASE_MASTER.toString()));
-    assertEquals(HostRoleStatus.PENDING, entities.get(0).getStatus());
-    db.hostRoleScheduled(stage, hostName, Role.HBASE_MASTER.toString());
-
-    entities = hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, Role.HBASE_MASTER);
-    assertEquals(HostRoleStatus.QUEUED, entities.get(0).getStatus());
-
-    Thread thread = new Thread(){
-      @Override
-      public void run() {
-        Stage stage1 = db.getAction("23-31");
-        stage1.setHostRoleStatus(hostName, Role.HBASE_MASTER.toString(), HostRoleStatus.COMPLETED);
-        db.hostRoleScheduled(stage1, hostName, Role.HBASE_MASTER.toString());
-      }
-    };
-
-    thread.start();
-    thread.join();
-
-    entities = hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, Role.HBASE_MASTER);
-    assertEquals("Concurrent update failed", HostRoleStatus.COMPLETED, entities.get(0).getStatus());
-
-  }
-
-  @Test
-  public void testUpdateHostRole() throws Exception {
-    populateActionDB(db, hostName, requestId, stageId);
-    StringBuilder sb = new StringBuilder();
-    for (int i = 0; i < 50000; i++) {
-      sb.append("1234567890");
-    }
-    String largeString = sb.toString();
-
-    CommandReport commandReport = new CommandReport();
-    commandReport.setStatus(HostRoleStatus.COMPLETED.toString());
-    commandReport.setStdOut(largeString);
-    commandReport.setStdErr(largeString);
-    commandReport.setExitCode(123);
-    db.updateHostRoleState(hostName, requestId, stageId, Role.HBASE_MASTER.toString(), commandReport);
-
-    List<HostRoleCommandEntity> commandEntities = hostRoleCommandDAO.findByHostRole(hostName, requestId, stageId, Role.HBASE_MASTER);
-    assertEquals(1, commandEntities.size());
-    HostRoleCommandEntity commandEntity = commandEntities.get(0);
-    HostRoleCommand command = db.getTask(commandEntity.getTaskId());
-    assertNotNull(command);
-
-    assertEquals(largeString, command.getStdout());
-
-  }
-
-  private void populateActionDB(ActionDBAccessor db, String hostname,
-      long requestId, long stageId) {
-    Stage s = new Stage(requestId, "/a/b", "cluster1");
-    s.setStageId(stageId);
-    s.addHostRoleExecutionCommand(hostname, Role.HBASE_MASTER,
-        RoleCommand.START,
-        new ServiceComponentHostStartEvent(Role.HBASE_MASTER.toString(),
-            hostname, System.currentTimeMillis(),
-            new HashMap<String, String>()), "cluster1", "HBASE");
-    s.addHostRoleExecutionCommand(
-        hostname,
-        Role.HBASE_REGIONSERVER,
-        RoleCommand.START,
-        new ServiceComponentHostStartEvent(Role.HBASE_REGIONSERVER
-            .toString(), hostname, System.currentTimeMillis(),
-            new HashMap<String, String>()), "cluster1", "HBASE");
-    List<Stage> stages = new ArrayList<Stage>();
-    stages.add(s);
-    db.persistActions(stages);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
deleted file mode 100644
index 753a186..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.actionmanager;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-
-import junit.framework.Assert;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.RoleCommand;
-import org.apache.ambari.server.agent.ActionQueue;
-import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.controller.HostsMap;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
-import org.apache.ambari.server.utils.StageUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-
-public class TestActionManager {
-
-  private long requestId = 23;
-  private long stageId = 31;
-  private Injector injector;
-  private String hostname = "host1";
-  private String clusterName = "cluster1";
-
-  private Clusters clusters;
-
-  @Before
-  public void setup() throws AmbariException {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    clusters = injector.getInstance(Clusters.class);
-    clusters.addHost(hostname);
-    clusters.getHost(hostname).persist();
-    clusters.addCluster(clusterName);
-  }
-
-  @After
-  public void teardown() throws AmbariException {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  @Test
-  public void testActionResponse() {
-    ActionDBAccessor db = injector.getInstance(ActionDBAccessorImpl.class);
-    ActionManager am = new ActionManager(5000, 1200000, new ActionQueue(),
-        clusters, db, new HostsMap((String) null));
-    populateActionDB(db, hostname);
-    Stage stage = db.getAllStages(requestId).get(0);
-    Assert.assertEquals(stageId, stage.getStageId());
-    stage.setHostRoleStatus(hostname, "HBASE_MASTER", HostRoleStatus.QUEUED);
-    db.hostRoleScheduled(stage, hostname, "HBASE_MASTER");
-    List<CommandReport> reports = new ArrayList<CommandReport>();
-    CommandReport cr = new CommandReport();
-    cr.setTaskId(1);
-    cr.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr.setRole("HBASE_MASTER");
-    cr.setStatus("COMPLETED");
-    cr.setStdErr("ERROR");
-    cr.setStdOut("OUTPUT");
-    cr.setExitCode(215);
-    reports.add(cr);
-    am.processTaskResponse(hostname, reports);
-    assertEquals(215,
-        am.getAction(requestId, stageId).getExitCode(hostname, "HBASE_MASTER"));
-    assertEquals(HostRoleStatus.COMPLETED, am.getAction(requestId, stageId)
-        .getHostRoleStatus(hostname, "HBASE_MASTER"));
-    assertEquals(
-        "ERROR",
-        am.getAction(requestId, stageId)
-            .getHostRoleCommand(hostname, "HBASE_MASTER").getStderr());
-    assertEquals(
-        "OUTPUT",
-        am.getAction(requestId, stageId)
-            .getHostRoleCommand(hostname, "HBASE_MASTER").getStdout());
-    
-  }
-  
-  @Test
-  public void testLargeLogs() {
-    ActionDBAccessor db = injector.getInstance(ActionDBAccessorImpl.class);
-    ActionManager am = new ActionManager(5000, 1200000, new ActionQueue(),
-        clusters, db, new HostsMap((String) null));
-    populateActionDB(db, hostname);
-    Stage stage = db.getAllStages(requestId).get(0);
-    Assert.assertEquals(stageId, stage.getStageId());
-    stage.setHostRoleStatus(hostname, "HBASE_MASTER", HostRoleStatus.QUEUED);
-    db.hostRoleScheduled(stage, hostname, "HBASE_MASTER");
-    List<CommandReport> reports = new ArrayList<CommandReport>();
-    CommandReport cr = new CommandReport();
-    cr.setTaskId(1);
-    cr.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr.setRole("HBASE_MASTER");
-    cr.setStatus("COMPLETED");
-    String errLog = Arrays.toString(new byte[100000]);
-    String outLog = Arrays.toString(new byte[110000]);
-    cr.setStdErr(errLog);
-    cr.setStdOut(outLog);
-    cr.setExitCode(215);
-    reports.add(cr);
-    am.processTaskResponse(hostname, reports);
-    assertEquals(215,
-        am.getAction(requestId, stageId).getExitCode(hostname, "HBASE_MASTER"));
-    assertEquals(HostRoleStatus.COMPLETED, am.getAction(requestId, stageId)
-        .getHostRoleStatus(hostname, "HBASE_MASTER"));
-    assertEquals(
-        errLog.length(),
-        am.getAction(requestId, stageId)
-            .getHostRoleCommand(hostname, "HBASE_MASTER").getStderr().length());
-    assertEquals(
-        outLog.length(),
-        am.getAction(requestId, stageId)
-            .getHostRoleCommand(hostname, "HBASE_MASTER").getStdout().length());
-    
-  }
-
-  private void populateActionDB(ActionDBAccessor db, String hostname) {
-    Stage s = new Stage(requestId, "/a/b", "cluster1");
-    s.setStageId(stageId);
-    s.addHostRoleExecutionCommand(hostname, Role.HBASE_MASTER,
-        RoleCommand.START,
-        new ServiceComponentHostStartEvent(Role.HBASE_MASTER.toString(),
-            hostname, System.currentTimeMillis(),
-            new HashMap<String, String>()), "cluster1", "HBASE");
-    List<Stage> stages = new ArrayList<Stage>();
-    stages.add(s);
-    db.persistActions(stages);
-  }
-
-  @Test
-  public void testCascadeDeleteStages() throws Exception {
-    ActionDBAccessor db = injector.getInstance(ActionDBAccessorImpl.class);
-    ActionManager am = injector.getInstance(ActionManager.class);
-    populateActionDB(db, hostname);
-    assertEquals(1, clusters.getClusters().size());
-
-    Cluster cluster = clusters.getCluster(clusterName);
-
-    assertEquals(1, am.getRequests().size());
-
-    clusters.deleteCluster(clusterName);
-
-    assertEquals(0, clusters.getClusters().size());
-    assertEquals(0, am.getRequests().size());
-
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
deleted file mode 100644
index fe5c2f6..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.actionmanager;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import junit.framework.Assert;
-
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.actionmanager.ActionScheduler.RoleStats;
-import org.apache.ambari.server.agent.ActionQueue;
-import org.apache.ambari.server.agent.AgentCommand;
-import org.apache.ambari.server.agent.ExecutionCommand;
-import org.apache.ambari.server.controller.HostsMap;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.ServiceComponent;
-import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.utils.StageUtils;
-import org.apache.ambari.server.utils.TestStageUtils;
-import org.junit.Test;
-
-public class TestActionScheduler {
-
-  /**
-   * This test sends a new action to the action scheduler and verifies that the action
-   * shows up in the action queue.
-   */
-  @Test
-  public void testActionSchedule() throws Exception {
-    ActionQueue aq = new ActionQueue();
-    Clusters fsm = mock(Clusters.class);
-    Cluster oneClusterMock = mock(Cluster.class);
-    Service serviceObj = mock(Service.class);
-    ServiceComponent scomp = mock(ServiceComponent.class);
-    ServiceComponentHost sch = mock(ServiceComponentHost.class);
-    when(fsm.getCluster(anyString())).thenReturn(oneClusterMock);
-    when(oneClusterMock.getService(anyString())).thenReturn(serviceObj);
-    when(serviceObj.getServiceComponent(anyString())).thenReturn(scomp);
-    when(scomp.getServiceComponentHost(anyString())).thenReturn(sch);
-    when(serviceObj.getCluster()).thenReturn(oneClusterMock);
-
-    ActionDBAccessor db = mock(ActionDBAccessorImpl.class);
-    List<Stage> stages = new ArrayList<Stage>();
-    String hostname = "ahost.ambari.apache.org";
-    Stage s = StageUtils.getATestStage(1, 977, hostname);
-    stages.add(s);
-    when(db.getStagesInProgress()).thenReturn(stages);
-
-    //Keep large number of attempts so that the task is not expired finally
-    //Small action timeout to test rescheduling
-    ActionScheduler scheduler = new ActionScheduler(100, 100, db, aq, fsm,
-        10000, new HostsMap((String) null));
-    scheduler.setTaskTimeoutAdjustment(false);
-    // Start the thread
-    scheduler.start();
-
-    List<AgentCommand> ac = waitForQueueSize(hostname, aq, 1);
-    assertTrue(ac.get(0) instanceof ExecutionCommand);
-    assertEquals("1-977", ((ExecutionCommand) (ac.get(0))).getCommandId());
-
-    //The action status has not changed, it should be queued again.
-    ac = waitForQueueSize(hostname, aq, 1);
-    assertTrue(ac.get(0) instanceof ExecutionCommand);
-    assertEquals("1-977", ((ExecutionCommand) (ac.get(0))).getCommandId());
-
-    //Now change the action status
-    s.setHostRoleStatus(hostname, "NAMENODE", HostRoleStatus.COMPLETED);
-    ac = aq.dequeueAll(hostname);
-
-    //Wait for sometime, it shouldn't be scheduled this time.
-    ac = waitForQueueSize(hostname, aq, 0);
-    scheduler.stop();
-  }
-
-  private List<AgentCommand> waitForQueueSize(String hostname, ActionQueue aq,
-      int expectedQueueSize) throws InterruptedException {
-    while (true) {
-      List<AgentCommand> ac = aq.dequeueAll(hostname);
-      if (ac != null) {
-        if (ac.size() == expectedQueueSize) {
-          return ac;
-        } else if (ac.size() > expectedQueueSize) {
-          Assert.fail("Expected size : " + expectedQueueSize + " Actual size="
-              + ac.size());
-        }
-      }
-      Thread.sleep(100);
-    }
-  }
-
-  /**
-   * Test whether scheduler times out an action
-   */
-  @Test
-  public void testActionTimeout() throws Exception {
-    ActionQueue aq = new ActionQueue();
-    Clusters fsm = mock(Clusters.class);
-    Cluster oneClusterMock = mock(Cluster.class);
-    Service serviceObj = mock(Service.class);
-    ServiceComponent scomp = mock(ServiceComponent.class);
-    ServiceComponentHost sch = mock(ServiceComponentHost.class);
-    when(fsm.getCluster(anyString())).thenReturn(oneClusterMock);
-    when(oneClusterMock.getService(anyString())).thenReturn(serviceObj);
-    when(serviceObj.getServiceComponent(anyString())).thenReturn(scomp);
-    when(scomp.getServiceComponentHost(anyString())).thenReturn(sch);
-    when(serviceObj.getCluster()).thenReturn(oneClusterMock);
-
-    ActionDBAccessor db = new ActionDBInMemoryImpl();
-    String hostname = "ahost.ambari.apache.org";
-    List<Stage> stages = new ArrayList<Stage>();
-    Stage s = StageUtils.getATestStage(1, 977, hostname);
-    stages.add(s);
-    db.persistActions(stages);
-
-    //Small action timeout to test rescheduling
-    ActionScheduler scheduler = new ActionScheduler(100, 50, db, aq, fsm, 3, 
-        new HostsMap((String) null));
-    scheduler.setTaskTimeoutAdjustment(false);
-    // Start the thread
-    scheduler.start();
-
-    while (!stages.get(0).getHostRoleStatus(hostname, "NAMENODE")
-        .equals(HostRoleStatus.TIMEDOUT)) {
-      Thread.sleep(100);
-    }
-    assertEquals(stages.get(0).getHostRoleStatus(hostname, "NAMENODE"),
-        HostRoleStatus.TIMEDOUT);
-  }
-  
-  @Test
-  public void testSuccessFactors() {
-    Stage s = StageUtils.getATestStage(1, 1);
-    assertEquals(new Float(0.5), new Float(s.getSuccessFactor(Role.DATANODE)));
-    assertEquals(new Float(0.5), new Float(s.getSuccessFactor(Role.TASKTRACKER)));
-    assertEquals(new Float(0.5), new Float(s.getSuccessFactor(Role.GANGLIA_MONITOR)));
-    assertEquals(new Float(0.5), new Float(s.getSuccessFactor(Role.HBASE_REGIONSERVER)));
-    assertEquals(new Float(1.0), new Float(s.getSuccessFactor(Role.NAMENODE)));
-    assertEquals(new Float(1.0), new Float(s.getSuccessFactor(Role.GANGLIA_SERVER)));
-  }
-  
-  @Test
-  public void testSuccessCriteria() {
-    RoleStats rs1 = new RoleStats(1, (float)0.5);
-    rs1.numSucceeded = 1;
-    assertTrue(rs1.isSuccessFactorMet());
-    rs1.numSucceeded = 0;
-    assertFalse(rs1.isSuccessFactorMet());
-    
-    RoleStats rs2 = new RoleStats(2, (float)0.5);
-    rs2.numSucceeded = 1;
-    assertTrue(rs2.isSuccessFactorMet());
-    
-    RoleStats rs3 = new RoleStats(3, (float)0.5);
-    rs3.numSucceeded = 2;
-    assertTrue(rs2.isSuccessFactorMet());
-    rs3.numSucceeded = 1;
-    assertFalse(rs3.isSuccessFactorMet());
-    
-    RoleStats rs4 = new RoleStats(3, (float)1.0);
-    rs4.numSucceeded = 2;
-    assertFalse(rs3.isSuccessFactorMet());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestStage.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestStage.java
deleted file mode 100644
index ccc5126..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestStage.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.actionmanager;
-
-import static org.junit.Assert.*;
-
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.RoleCommand;
-import org.apache.ambari.server.controller.HostsMap;
-import org.apache.ambari.server.utils.StageUtils;
-import org.junit.Test;
-
-public class TestStage {
-
-  @Test
-  public void testTaskTimeout() {
-    Stage s = StageUtils.getATestStage(1, 1, "h1");
-    s.addHostRoleExecutionCommand("h1", Role.DATANODE, RoleCommand.INSTALL,
-        null, "c1", "HDFS");
-    s.addHostRoleExecutionCommand("h1", Role.HBASE_MASTER, RoleCommand.INSTALL,
-        null, "c1", "HBASE");
-    assertEquals(3*60000, s.getTaskTimeout());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentHostInfoTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentHostInfoTest.java
deleted file mode 100644
index a8dac31..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentHostInfoTest.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.agent;
-
-import java.io.IOException;
-
-import junit.framework.Assert;
-
-import org.codehaus.jackson.JsonParseException;
-import org.codehaus.jackson.map.DeserializationConfig;
-import org.codehaus.jackson.map.JsonMappingException;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.junit.Test;
-
-/**
- * This tests makes sure the contract between the server and agent for info
- * is in tact.
- */
-public class AgentHostInfoTest {
-  
-  @Test
-  public void testDeserializeHostInfo() throws JsonParseException, 
-    JsonMappingException, IOException {
-    String hostinfo = "{\"architecture\": \"x86_64\", " +
-        "\"augeasversion\": \"0.10.0\"," +
-    		"\"domain\": \"test.com\", " +
-    		"\"facterversion\": \"1.6.10\"," +
-    		"\"fqdn\": \"dev.test.com\", " +
-    		"\"hardwareisa\": \"x86_64\", " +
-    		"\"hardwaremodel\": \"x86_64\"," +
-    		"\"hostname\": \"dev\", " +
-    		"\"id\": \"root\", " +
-    		"\"interfaces\": \"eth0,lo\", " +
-    		"\"ipaddress\": \"10.0.2.15\"," +
-    		"\"ipaddress_eth0\": \"10.0.2.15\"," +
-    		"\"ipaddress_lo\": \"127.0.0.1\"," +
-    		"\"is_virtual\": true," +
-    		"\"kernel\": \"Linux\", " +
-    		"\"kernelmajversion\": \"2.6\"," +
-    		"\"kernelrelease\": \"2.6.18-238.12.1.el5\"," +
-    		"\"kernelversion\": \"2.6.18\", " +
-    		"\"lsbdistcodename\": \"Final\"," +
-    		"\"lsbdistdescription\": \"CentOS release 5.8 (Final)\"," +
-    		"\"lsbdistid\": \"CentOS\", " +
-    		"\"lsbdistrelease\": \"5.8\", " +
-    		"\"lsbmajdistrelease\": \"5\"," +
-    		"\"macaddress\": \"08:00:27:D2:59:B2\", " +
-    		"\"macaddress_eth0\": \"08:00:27:D2:59:B2\"," +
-    		"\"manufacturer\": \"innotek GmbH\"," +
-    		"\"memoryfree\": 2453667," +
-    		"\"memorysize\": 3051356, " +
-    		"\"memorytotal\": 3051356," +
-    		"\"netmask\": \"255.255.255.0\"}";
-    ObjectMapper mapper = new ObjectMapper();
-    mapper.configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, false);
-    HostInfo info = mapper.readValue(hostinfo, HostInfo.class);
-    Assert.assertEquals(info.getMemoryTotal(), 3051356L);
-    Assert.assertEquals(info.getKernel(), "Linux");
-    Assert.assertEquals(info.getFQDN(),"dev.test.com");
-    Assert.assertEquals(info.getAgentUserId(), "root");
-    Assert.assertEquals(info.getMemorySize(), 3051356L);
-    Assert.assertEquals(info.getArchitecture(), "x86_64");
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
deleted file mode 100644
index 6e691c1..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.agent;
-
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import javax.ws.rs.core.MediaType;
-
-import com.google.inject.assistedinject.FactoryModuleBuilder;
-import com.google.inject.persist.jpa.JpaPersistModule;
-import junit.framework.Assert;
-
-import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.actionmanager.HostRoleCommandFactory;
-import org.apache.ambari.server.actionmanager.StageFactory;
-import org.apache.ambari.server.agent.rest.AgentResource;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.state.*;
-import org.apache.ambari.server.state.cluster.ClusterFactory;
-import org.apache.ambari.server.state.cluster.ClusterImpl;
-import org.apache.ambari.server.state.cluster.ClustersImpl;
-import org.apache.ambari.server.state.host.HostFactory;
-import org.apache.ambari.server.state.host.HostImpl;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostImpl;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
-import org.junit.Test;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.sun.jersey.api.client.Client;
-import com.sun.jersey.api.client.UniformInterfaceException;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.api.client.config.ClientConfig;
-import com.sun.jersey.api.client.config.DefaultClientConfig;
-import com.sun.jersey.api.json.JSONConfiguration;
-import com.sun.jersey.spi.container.servlet.ServletContainer;
-import com.sun.jersey.test.framework.JerseyTest;
-import com.sun.jersey.test.framework.WebAppDescriptor;
-
-public class AgentResourceTest extends JerseyTest {
-  static String PACKAGE_NAME = "org.apache.ambari.server.agent.rest";
-  private static Log LOG = LogFactory.getLog(AgentResourceTest.class);
-  HeartBeatHandler handler;
-  ActionManager actionManager;
-  Injector injector;
-  protected Client client;
-  AmbariMetaInfo ambariMetaInfo;
-
-  public AgentResourceTest() {
-    super(new WebAppDescriptor.Builder(PACKAGE_NAME).servletClass(ServletContainer.class)
-        .initParam("com.sun.jersey.api.json.POJOMappingFeature", "true")
-        .build());
-  }
-
-  public class MockModule extends AbstractModule {
-
-    RegistrationResponse response = new RegistrationResponse();
-    HeartBeatResponse hresponse = new HeartBeatResponse();
-
-    @Override
-    protected void configure() {
-      installDependencies();
-
-      handler = mock(HeartBeatHandler.class);
-      response.setResponseStatus(RegistrationStatus.OK);
-      hresponse.setResponseId(0L);
-      try {
-        when(handler.handleRegistration(any(Register.class))).thenReturn(
-            response);
-        when(handler.handleHeartBeat(any(HeartBeat.class))).thenReturn(
-            hresponse);
-      } catch (Exception ex) {
-        // The test will fail anyway
-      }
-      requestStaticInjection(AgentResource.class);
-      bind(Clusters.class).to(ClustersImpl.class);
-      actionManager = mock(ActionManager.class);
-      ambariMetaInfo = mock(AmbariMetaInfo.class);
-      bind(ActionManager.class).toInstance(actionManager);
-      bind(AgentCommand.class).to(ExecutionCommand.class);
-      bind(HeartBeatHandler.class).toInstance(handler);
-      bind(AmbariMetaInfo.class).toInstance(ambariMetaInfo);
-    }
-
-    private void installDependencies() {
-      install(new JpaPersistModule("ambari-javadb"));
-      install(new FactoryModuleBuilder().implement(
-          Cluster.class, ClusterImpl.class).build(ClusterFactory.class));
-      install(new FactoryModuleBuilder().implement(
-          Host.class, HostImpl.class).build(HostFactory.class));
-      install(new FactoryModuleBuilder().implement(
-          Service.class, ServiceImpl.class).build(ServiceFactory.class));
-      install(new FactoryModuleBuilder().implement(
-          ServiceComponent.class, ServiceComponentImpl.class).build(
-          ServiceComponentFactory.class));
-      install(new FactoryModuleBuilder().implement(
-          ServiceComponentHost.class, ServiceComponentHostImpl.class).build(
-          ServiceComponentHostFactory.class));
-      install(new FactoryModuleBuilder().implement(
-          Config.class, ConfigImpl.class).build(ConfigFactory.class));
-      install(new FactoryModuleBuilder().build(StageFactory.class));
-      install(new FactoryModuleBuilder().build(HostRoleCommandFactory.class));
-    }
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    handler = mock(HeartBeatHandler.class);
-    injector = Guice.createInjector(new MockModule());
-    injector.injectMembers(handler);
-  }
-
-  private JSONObject createDummyJSONRegister() throws JSONException {
-    JSONObject json = new JSONObject();
-    json.put("responseId" , -1);
-    json.put("timestamp" , System.currentTimeMillis());
-    json.put("hostname",   "dummyHost");
-    return json;
-  }
-
-  private JSONObject createDummyHeartBeat() throws JSONException {
-    JSONObject json = new JSONObject();
-    json.put("responseId", -1);
-    json.put("timestamp" , System.currentTimeMillis());
-    json.put("hostname", "dummyHost");
-    return json;
-  }
-
-  @Test
-  public void agentRegistration() throws UniformInterfaceException, JSONException {
-    RegistrationResponse response;
-    ClientConfig clientConfig = new DefaultClientConfig();
-    clientConfig.getFeatures().put(JSONConfiguration.FEATURE_POJO_MAPPING, Boolean.TRUE);
-    client = Client.create(clientConfig);
-    WebResource webResource = client.resource("http://localhost:9998/register/dummyhost");
-    response = webResource.type(MediaType.APPLICATION_JSON)
-      .post(RegistrationResponse.class, createDummyJSONRegister());
-    LOG.info("Returned from Server responce=" + response);
-    Assert.assertEquals(response.getResponseStatus(), RegistrationStatus.OK);
-  }
-
-  @Test
-  public void agentHeartBeat() throws UniformInterfaceException, JSONException {
-    HeartBeatResponse response;
-    ClientConfig clientConfig = new DefaultClientConfig();
-    clientConfig.getFeatures().put(JSONConfiguration.FEATURE_POJO_MAPPING, Boolean.TRUE);
-    client = Client.create(clientConfig);
-    WebResource webResource = client.resource("http://localhost:9998/heartbeat/dummyhost");
-    response = webResource.type(MediaType.APPLICATION_JSON)
-        .post(HeartBeatResponse.class, createDummyHeartBeat());
-    LOG.info("Returned from Server: "
-        + " response=" +   response);
-    Assert.assertEquals(response.getResponseId(), 0L);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/DummyHeartbeatConstants.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/DummyHeartbeatConstants.java
deleted file mode 100644
index 3fc7ea0..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/DummyHeartbeatConstants.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-import org.apache.ambari.server.Role;
-
-public interface DummyHeartbeatConstants {
-
-  String DummyCluster = "cluster1";
-  String DummyHostname1 = "host1";
-  String DummyOs = "CentOS";
-  String DummyOsType = "centos5";
-  String DummyOSRelease = "5.8";
-
-  String DummyHostStatus = "I am ok";
-
-  String DummyStackId = "HDP-0.1";
-
-  String HDFS = "HDFS";
-  String HBASE = "HBASE";
-
-  String DATANODE = Role.DATANODE.name();
-  String NAMENODE = Role.NAMENODE.name();
-  String SECONDARY_NAMENODE = Role.SECONDARY_NAMENODE.name();
-  String HBASE_MASTER = Role.HBASE_MASTER.name();
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/LocalAgentSimulator.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/LocalAgentSimulator.java
deleted file mode 100644
index 8cf0858..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/LocalAgentSimulator.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-/**
- * The purpose of this class is to simulate the agent.
- */
-public class LocalAgentSimulator implements Runnable {
-
-  private static Log LOG = LogFactory.getLog(HeartBeatHandler.class);
-
-  private Thread agentThread = null;
-  private volatile boolean shouldRun = true;
-  private final HeartBeatHandler handler;
-  private long sleepTime = 500;
-  private long responseId = 1;
-
-  private String hostname = "localhost";
-
-  public LocalAgentSimulator(HeartBeatHandler hbh) {
-    this.handler = hbh;
-  }
-
-  public LocalAgentSimulator(HeartBeatHandler hbh, String hostname, long sleepTime) {
-    this(hbh);
-    this.sleepTime = sleepTime;
-    this.hostname  = hostname;
-  }
-
-  //Can be used to control exact number of heartbeats,
-  //Default is -1 which means keep heartbeating continuously
-  private volatile int numberOfHeartbeats = -1;
-  private int currentHeartbeatCount = 0;
-  private volatile boolean shouldSendRegistration = true;
-
-  private volatile Register nextRegistration = null;
-  private volatile HeartBeat nextHeartbeat = null;
-  private volatile RegistrationResponse lastRegistrationResponse = null;
-  private volatile HeartBeatResponse lastHeartBeatResponse = null;
-
-  public void start() {
-    agentThread = new Thread(this);
-    agentThread.start();
-  }
-
-  public void shutdown() {
-    shouldRun = false;
-    agentThread.interrupt();
-  }
-
-  @Override
-  public void run() {
-    while (shouldRun) {
-      try {
-        if (shouldSendRegistration) {
-          sendRegistration();
-        } else if (numberOfHeartbeats > 0
-            && (currentHeartbeatCount < numberOfHeartbeats)) {
-          sendHeartBeat();
-        }
-        Thread.sleep(sleepTime);
-      } catch (InterruptedException e) {
-      } catch (Exception ex) {
-        LOG.info("Exception received ", ex);
-        throw new RuntimeException(ex);
-      }
-    }
-  }
-
-  private void sendRegistration() {
-    Register reg;
-    if (nextRegistration != null) {
-      reg = nextRegistration;
-    } else {
-      reg = new Register();
-      reg.setTimestamp(System.currentTimeMillis());
-      reg.setHostname(this.hostname);
-    }
-    RegistrationResponse response;
-    try {
-      response = handler.handleRegistration(reg);
-    } catch (AmbariException e) {
-      LOG.info("Registration failed", e);
-      return;
-    } catch (InvalidStateTransitionException e) {
-      LOG.info("Registration failed", e);
-      return;
-    }
-    this.responseId = response.getResponseId();
-    this.lastRegistrationResponse  = response;
-    this.shouldSendRegistration = false;
-    this.nextRegistration = null;
-  }
-
-  private void sendHeartBeat() throws AmbariException {
-    HeartBeat hb;
-    if (nextHeartbeat != null) {
-      hb = nextHeartbeat;
-    } else {
-      hb = new HeartBeat();
-      hb.setResponseId(responseId);
-      hb.setHostname(hostname);
-      hb.setTimestamp(System.currentTimeMillis());
-    }
-    HeartBeatResponse response = handler.handleHeartBeat(hb);
-    this.responseId = response.getResponseId();
-    this.lastHeartBeatResponse = response;
-    this.nextHeartbeat = null;
-  }
-
-  /**
-   * After this value is set, the agent will send only those many heartbeats.
-   * A value of 0 means no heartbeats and -1 means keep sending continuously.
-   * @param numberOfHeartbeats
-   */
-  public void setNumberOfHeartbeats(int numberOfHeartbeats) {
-    this.numberOfHeartbeats = numberOfHeartbeats;
-    currentHeartbeatCount = 0;
-  }
-
-  public void setShouldSendRegistration(boolean shouldSendRegistration) {
-    this.shouldSendRegistration = shouldSendRegistration;
-  }
-
-  public RegistrationResponse getLastRegistrationResponse() {
-    return lastRegistrationResponse;
-  }
-
-  public HeartBeatResponse getLastHeartBeatResponse() {
-    return lastHeartBeatResponse;
-  }
-
-  public void setNextRegistration(Register nextRegistration) {
-    this.nextRegistration = nextRegistration;
-  }
-
-  public void setNextHeartbeat(HeartBeat nextHeartbeat) {
-    this.nextHeartbeat = nextHeartbeat;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/TestActionQueue.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/TestActionQueue.java
deleted file mode 100644
index 36b3419..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/TestActionQueue.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-import static org.junit.Assert.*;
-
-import java.util.List;
-
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class TestActionQueue {
-  
-  private static Logger LOG = LoggerFactory.getLogger(TestActionQueue.class);
-  
-  private static int threadCount = 100;
-  static class ActionQueueOperation implements Runnable {
-    
-    enum OpType {
-      ENQUEUE,
-      DEQUEUE,
-      DEQUEUEALL
-    }
-  
-    private volatile boolean shouldRun = true;
-    private long [] opCounts;
-    private ActionQueue actionQueue;
-    private OpType operation;
-    private String[] hosts;
-    
-    public ActionQueueOperation(ActionQueue aq, String [] hosts, OpType op) {
-      this.actionQueue = aq;
-      this.operation = op;
-      this.hosts = hosts;
-      opCounts = new long [hosts.length];
-      for (int i = 0; i < hosts.length; i++) {
-        opCounts[i] = 0;
-      }
-    }
-    
-    public long [] getOpCounts() {
-      return opCounts;
-    }
-    
-    public void stop() {
-      this.shouldRun = false;
-    }
-    
-    @Override
-    public void run() {
-      try {
-        switch (operation) {
-        case ENQUEUE:
-          enqueueOp();
-          break;
-        case DEQUEUE:
-          dequeueOp();
-          break;
-        case DEQUEUEALL:
-          dequeueAllOp();
-          break;
-        }
-      } catch (Exception ex) {
-        LOG.error("Failure", ex);
-        throw new RuntimeException("Failure", ex);
-      }
-    }
-    
-    private void enqueueOp() throws InterruptedException {
-      while (shouldRun) {
-        int index = 0;
-        for (String host: hosts) {
-          actionQueue.enqueue(host, new StatusCommand());
-          opCounts[index]++;
-          index++;
-        }
-        Thread.sleep(1);
-      }
-    }
-    
-    private void dequeueOp() throws InterruptedException {
-      while (shouldRun) {
-        int index = 0;
-        for (String host: hosts) {
-          AgentCommand cmd = actionQueue.dequeue(host);
-          if (cmd != null) {
-            opCounts[index]++;
-          }
-          index++;
-        }
-        Thread.sleep(1);
-      }
-    }
-    
-    private void dequeueAllOp() throws InterruptedException {
-      while (shouldRun) {
-        int index = 0;
-        for (String host : hosts) {
-          List<AgentCommand> cmds = actionQueue.dequeueAll(host);
-          if (cmds != null && !cmds.isEmpty()) {
-            opCounts[index] += cmds.size();
-          }
-          index++;
-        }
-        Thread.sleep(1);
-      }
-    }
-  }
-  
-  @Test
-  public void testConcurrentOperations() throws InterruptedException {
-    ActionQueue aq = new ActionQueue();
-    String[] hosts = new String[] { "h0", "h1", "h2", "h3", "h4", "h5", "h6",
-        "h7", "h8", "h9" };
-
-    ActionQueueOperation[] enqueOperators = new ActionQueueOperation[threadCount];
-    ActionQueueOperation[] dequeOperators = new ActionQueueOperation[threadCount];
-    ActionQueueOperation[] dequeAllOperators = new ActionQueueOperation[threadCount];
-
-    for (int i = 0; i < threadCount; i++) {
-      dequeOperators[i] = new ActionQueueOperation(aq, hosts,
-          ActionQueueOperation.OpType.DEQUEUE);
-      Thread t = new Thread(dequeOperators[i]);
-      t.start();
-    }
-
-    for (int i = 0; i < threadCount; i++) {
-      enqueOperators[i] = new ActionQueueOperation(aq, hosts,
-          ActionQueueOperation.OpType.ENQUEUE);
-      Thread t = new Thread(enqueOperators[i]);
-      t.start();
-    }
-
-    for (int i = 0; i < threadCount; i++) {
-      dequeAllOperators[i] = new ActionQueueOperation(aq, hosts,
-          ActionQueueOperation.OpType.DEQUEUEALL);
-      Thread t = new Thread(dequeAllOperators[i]);
-      t.start();
-    }
-
-    // Run for some time
-    Thread.sleep(100);
-
-    // Stop the enqueue
-    for (int i = 0; i < threadCount; i++) {
-      enqueOperators[i].stop();
-    }
-
-    // Give time to get everything dequeued
-    boolean allDequeued = false;
-    while (!allDequeued) {
-      Thread.sleep(10);
-      allDequeued = true;
-      for (String host: hosts) {
-        if (aq.size(host) > 0) {
-          allDequeued = false;
-          break;
-        }
-      }
-    }
-    
-    // Stop all threads
-    for (int i = 0; i < threadCount; i++) {
-      dequeOperators[i].stop();
-      dequeAllOperators[i].stop();
-    }
-    
-    for (int h = 0; h<hosts.length; h++) {
-      long opsEnqueued = 0;
-      long opsDequeued = 0;
-      for (int i = 0; i < threadCount; i++) {
-        opsEnqueued += enqueOperators[i].getOpCounts()[h];
-        opsDequeued += dequeOperators[i].getOpCounts()[h];
-        opsDequeued += dequeAllOperators[i].getOpCounts()[h];
-      }
-      assertTrue(opsEnqueued != 0); //Prevent degenerate case of all zeros.
-      assertEquals(0, aq.size(hosts[h])); //Everything should be dequeued
-      LOG.info("Host: " + hosts[h] + ", opsEnqueued: " + opsEnqueued
-          + ", opsDequeued: " + opsDequeued);
-      assertEquals(opsDequeued, opsEnqueued);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
deleted file mode 100644
index 088dc98..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
+++ /dev/null
@@ -1,659 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DATANODE;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyCluster;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyHostStatus;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyHostname1;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOSRelease;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOs;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOsType;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyStackId;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HBASE;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HBASE_MASTER;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HDFS;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.NAMENODE;
-import static org.apache.ambari.server.agent.DummyHeartbeatConstants.SECONDARY_NAMENODE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import javax.xml.bind.JAXBException;
-
-import junit.framework.Assert;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.RoleCommand;
-import org.apache.ambari.server.actionmanager.ActionDBAccessor;
-import org.apache.ambari.server.actionmanager.ActionDBAccessorImpl;
-import org.apache.ambari.server.actionmanager.ActionDBInMemoryImpl;
-import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.actionmanager.HostRoleStatus;
-import org.apache.ambari.server.actionmanager.Stage;
-import org.apache.ambari.server.agent.ActionQueue;
-import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.agent.ComponentStatus;
-import org.apache.ambari.server.agent.ExecutionCommand;
-import org.apache.ambari.server.agent.HeartBeat;
-import org.apache.ambari.server.agent.HeartBeatHandler;
-import org.apache.ambari.server.agent.HeartBeatResponse;
-import org.apache.ambari.server.agent.HeartbeatMonitor;
-import org.apache.ambari.server.agent.HostInfo;
-import org.apache.ambari.server.agent.HostStatus;
-import org.apache.ambari.server.agent.HostStatus.Status;
-import org.apache.ambari.server.agent.Register;
-import org.apache.ambari.server.agent.RegistrationResponse;
-import org.apache.ambari.server.agent.RegistrationStatus;
-import org.apache.ambari.server.agent.StatusCommand;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.HostsMap;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.HostState;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.State;
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
-import org.apache.ambari.server.utils.StageUtils;
-import org.codehaus.jackson.JsonGenerationException;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-
-public class TestHeartbeatHandler {
-
-  private static final Logger log = LoggerFactory.getLogger(TestHeartbeatHandler.class);
-  private Injector injector;
-  private Clusters clusters;
-  long requestId = 23;
-  long stageId = 31;
-
-  @Inject
-  AmbariMetaInfo metaInfo;
-  @Inject
-  Configuration config;
-
-  @Before
-  public void setup() throws Exception {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    clusters = injector.getInstance(Clusters.class);
-    injector.injectMembers(this);
-    metaInfo.init();
-    log.debug("Using server os type=" + config.getServerOsType());
-  }
-
-  @After
-  public void teardown() throws AmbariException {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  @Test
-  public void testHeartbeat() throws Exception {
-    ActionManager am = new ActionManager(0, 0, null, null,
-        new ActionDBInMemoryImpl(), new HostsMap((String) null));
-    Clusters fsm = clusters;
-    fsm.addHost(DummyHostname1);
-    Host hostObject = clusters.getHost(DummyHostname1);
-    hostObject.setIPv4("ipv4");
-    hostObject.setIPv6("ipv6");
-    hostObject.setOsType(DummyOsType);
-
-    ActionQueue aq = new ActionQueue();
-
-    HeartBeatHandler handler = new HeartBeatHandler(fsm, aq, am, injector);
-    Register reg = new Register();
-    HostInfo hi = new HostInfo();
-    hi.setHostName(DummyHostname1);
-    hi.setOS(DummyOs);
-    hi.setOSRelease(DummyOSRelease);
-    reg.setHostname(DummyHostname1);
-    reg.setHardwareProfile(hi);
-    handler.handleRegistration(reg);
-
-    hostObject.setState(HostState.UNHEALTHY);
-
-    ExecutionCommand execCmd = new ExecutionCommand();
-    execCmd.setCommandId("2-34");
-    execCmd.setHostname(DummyHostname1);
-    aq.enqueue(DummyHostname1, new ExecutionCommand());
-    HeartBeat hb = new HeartBeat();
-    hb.setResponseId(0);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-    hb.setHostname(DummyHostname1);
-
-    handler.handleHeartBeat(hb);
-    assertEquals(HostState.HEALTHY, hostObject.getState());
-    assertEquals(0, aq.dequeueAll(DummyHostname1).size());
-  }
-
-  @Test
-  public void testStatusHeartbeat() throws Exception {
-    ActionManager am = new ActionManager(0, 0, null, null,
-            new ActionDBInMemoryImpl(), new HostsMap((String) null));
-
-    clusters.addHost(DummyHostname1);
-    clusters.getHost(DummyHostname1).setOsType(DummyOsType);
-    clusters.getHost(DummyHostname1).persist();
-    clusters.addCluster(DummyCluster);
-
-    Cluster cluster = clusters.getCluster(DummyCluster);
-    cluster.setDesiredStackVersion(new StackId(DummyStackId));
-
-    @SuppressWarnings("serial")
-    Set<String> hostNames = new HashSet<String>(){{
-      add(DummyHostname1);
-    }};
-    clusters.mapHostsToCluster(hostNames, DummyCluster);
-    Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
-    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-
-    ActionQueue aq = new ActionQueue();
-    HeartBeatHandler handler = new HeartBeatHandler(clusters, aq, am, injector);
-
-    Register reg = new Register();
-    HostInfo hi = new HostInfo();
-    hi.setHostName(DummyHostname1);
-    hi.setOS(DummyOs);
-    hi.setOSRelease(DummyOSRelease);
-    reg.setHostname(DummyHostname1);
-    reg.setResponseId(0);
-    reg.setHardwareProfile(hi);
-    handler.handleRegistration(reg);
-
-    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
-            getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
-    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
-            getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
-    ServiceComponentHost serviceComponentHost3 = clusters.getCluster(DummyCluster).getService(HDFS).
-        getServiceComponent(SECONDARY_NAMENODE).getServiceComponentHost(DummyHostname1);
-    serviceComponentHost1.setState(State.INSTALLED);
-    serviceComponentHost2.setState(State.INSTALLED);
-    serviceComponentHost3.setState(State.STARTING);
-
-    HeartBeat hb = new HeartBeat();
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(0);
-    hb.setHostname(DummyHostname1);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-    hb.setReports(new ArrayList<CommandReport>());
-    ArrayList<ComponentStatus> componentStatuses = new ArrayList<ComponentStatus>();
-    ComponentStatus componentStatus1 = new ComponentStatus();
-    componentStatus1.setClusterName(DummyCluster);
-    componentStatus1.setServiceName(HDFS);
-    componentStatus1.setMessage(DummyHostStatus);
-    componentStatus1.setStatus(State.STARTED.name());
-    componentStatus1.setComponentName(DATANODE);
-    componentStatuses.add(componentStatus1);
-    ComponentStatus componentStatus2 = new ComponentStatus();
-    componentStatus2.setClusterName(DummyCluster);
-    componentStatus2.setServiceName(HDFS);
-    componentStatus2.setMessage(DummyHostStatus);
-    componentStatus2.setStatus(State.STARTED.name());
-    componentStatus2.setComponentName(SECONDARY_NAMENODE);
-    componentStatuses.add(componentStatus2);
-    hb.setComponentStatus(componentStatuses);
-
-    handler.handleHeartBeat(hb);
-    State componentState1 = serviceComponentHost1.getState();
-    State componentState2 = serviceComponentHost2.getState();
-    State componentState3 = serviceComponentHost3.getState();
-    assertEquals(State.STARTED, componentState1);
-    assertEquals(State.INSTALLED, componentState2);
-    assertEquals(State.STARTED, componentState3);
-  }
-
-  @Test
-  public void testLiveStatusUpdateAfterStopFailed() throws Exception {
-    ActionManager am = new ActionManager(0, 0, null, null,
-            new ActionDBInMemoryImpl(), new HostsMap((String) null));
-    clusters.addHost(DummyHostname1);
-    clusters.getHost(DummyHostname1).setOsType(DummyOsType);
-    clusters.getHost(DummyHostname1).persist();
-    clusters.addCluster(DummyCluster);
-
-    Cluster cluster = clusters.getCluster(DummyCluster);
-    cluster.setDesiredStackVersion(new StackId(DummyStackId));
-
-    @SuppressWarnings("serial")
-    Set<String> hostNames = new HashSet<String>(){{
-      add(DummyHostname1);
-    }};
-    clusters.mapHostsToCluster(hostNames, DummyCluster);
-    Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).
-            addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).
-            addServiceComponentHost(DummyHostname1).persist();
-
-    ActionQueue aq = new ActionQueue();
-    HeartBeatHandler handler = new HeartBeatHandler(clusters, aq, am, injector);
-
-    Register reg = new Register();
-    HostInfo hi = new HostInfo();
-    hi.setHostName(DummyHostname1);
-    hi.setOS(DummyOs);
-    hi.setOSRelease(DummyOSRelease);
-    reg.setHostname(DummyHostname1);
-    reg.setResponseId(0);
-    reg.setHardwareProfile(hi);
-    handler.handleRegistration(reg);
-
-    ServiceComponentHost serviceComponentHost1 = clusters.
-            getCluster(DummyCluster).getService(HDFS).
-            getServiceComponent(DATANODE).
-            getServiceComponentHost(DummyHostname1);
-    ServiceComponentHost serviceComponentHost2 = clusters.
-            getCluster(DummyCluster).getService(HDFS).
-            getServiceComponent(NAMENODE).
-            getServiceComponentHost(DummyHostname1);
-    serviceComponentHost1.setState(State.STOP_FAILED);
-    serviceComponentHost2.setState(State.STOP_FAILED);
-
-    HeartBeat hb = new HeartBeat();
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(0);
-    hb.setHostname(DummyHostname1);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-    hb.setReports(new ArrayList<CommandReport>());
-    ArrayList<ComponentStatus> componentStatuses = new ArrayList<ComponentStatus>();
-
-    ComponentStatus componentStatus1 = new ComponentStatus();
-    componentStatus1.setClusterName(DummyCluster);
-    componentStatus1.setServiceName(HDFS);
-    componentStatus1.setMessage(DummyHostStatus);
-    componentStatus1.setStatus(State.STARTED.name());
-    componentStatus1.setComponentName(DATANODE);
-    componentStatuses.add(componentStatus1);
-
-    ComponentStatus componentStatus2 = new ComponentStatus();
-    componentStatus2.setClusterName(DummyCluster);
-    componentStatus2.setServiceName(HDFS);
-    componentStatus2.setMessage(DummyHostStatus);
-    componentStatus2.setStatus(State.INSTALLED.name());
-    componentStatus2.setComponentName(NAMENODE);
-    componentStatuses.add(componentStatus2);
-
-    hb.setComponentStatus(componentStatuses);
-
-    handler.handleHeartBeat(hb);
-    State componentState1 = serviceComponentHost1.getState();
-    State componentState2 = serviceComponentHost2.getState();
-    assertEquals(State.STARTED, componentState1);
-    assertEquals(State.INSTALLED, componentState2);
-  }
-
-  @Test
-  public void testCommandReport() throws AmbariException {
-    injector.injectMembers(this);
-    clusters.addHost(DummyHostname1);
-    clusters.getHost(DummyHostname1).persist();
-    clusters.addCluster(DummyCluster);
-    ActionDBAccessor db = injector.getInstance(ActionDBAccessorImpl.class);
-    ActionManager am = new ActionManager(5000, 1200000, new ActionQueue(), clusters, db,
-        new HostsMap((String) null));
-    populateActionDB(db, DummyHostname1);
-    Stage stage = db.getAllStages(requestId).get(0);
-    Assert.assertEquals(stageId, stage.getStageId());
-    stage.setHostRoleStatus(DummyHostname1, HBASE_MASTER, HostRoleStatus.QUEUED);
-    db.hostRoleScheduled(stage, DummyHostname1, HBASE_MASTER);
-    List<CommandReport> reports = new ArrayList<CommandReport>();
-    CommandReport cr = new CommandReport();
-    cr.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr.setTaskId(1);
-    cr.setRole(HBASE_MASTER);
-    cr.setStatus("COMPLETED");
-    cr.setStdErr("");
-    cr.setStdOut("");
-    cr.setExitCode(215);
-    reports.add(cr);
-    am.processTaskResponse(DummyHostname1, reports);
-    assertEquals(215,
-            am.getAction(requestId, stageId).getExitCode(DummyHostname1, HBASE_MASTER));
-    assertEquals(HostRoleStatus.COMPLETED, am.getAction(requestId, stageId)
-            .getHostRoleStatus(DummyHostname1, HBASE_MASTER));
-    Stage s = db.getAllStages(requestId).get(0);
-    assertEquals(HostRoleStatus.COMPLETED,
-            s.getHostRoleStatus(DummyHostname1, HBASE_MASTER));
-    assertEquals(215,
-            s.getExitCode(DummyHostname1, HBASE_MASTER));
-  }
-
-  private void populateActionDB(ActionDBAccessor db, String DummyHostname1) {
-    Stage s = new Stage(requestId, "/a/b", DummyCluster);
-    s.setStageId(stageId);
-    String filename = null;
-    s.addHostRoleExecutionCommand(DummyHostname1, Role.HBASE_MASTER,
-        RoleCommand.START,
-        new ServiceComponentHostStartEvent(Role.HBASE_MASTER.toString(),
-            DummyHostname1, System.currentTimeMillis(),
-            new HashMap<String, String>()), DummyCluster, HBASE);
-    List<Stage> stages = new ArrayList<Stage>();
-    stages.add(s);
-    db.persistActions(stages);
-  }
-
-  @Test
-  public void testRegistration() throws AmbariException,
-      InvalidStateTransitionException {
-    ActionManager am = new ActionManager(0, 0, null, null,
-        new ActionDBInMemoryImpl(), new HostsMap((String) null));
-    Clusters fsm = clusters;
-    HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
-        injector);
-    clusters.addHost(DummyHostname1);
-    Host hostObject = clusters.getHost(DummyHostname1);
-    hostObject.setIPv4("ipv4");
-    hostObject.setIPv6("ipv6");
-
-    Register reg = new Register();
-    HostInfo hi = new HostInfo();
-    hi.setHostName(DummyHostname1);
-    hi.setOS(DummyOsType);
-    reg.setHostname(DummyHostname1);
-    reg.setHardwareProfile(hi);
-    handler.handleRegistration(reg);
-    assertEquals(hostObject.getState(), HostState.HEALTHY);
-    assertEquals(DummyOsType, hostObject.getOsType());
-    assertTrue(hostObject.getLastRegistrationTime() != 0);
-    assertEquals(hostObject.getLastHeartbeatTime(),
-        hostObject.getLastRegistrationTime());
-  }
-  
-  @Test
-  public void testRegistrationPublicHostname() throws AmbariException, InvalidStateTransitionException {
-    ActionManager am = new ActionManager(0, 0, null, null,
-        new ActionDBInMemoryImpl(), new HostsMap((String) null));
-    Clusters fsm = clusters;
-    HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
-        injector);
-    clusters.addHost(DummyHostname1);
-    Host hostObject = clusters.getHost(DummyHostname1);
-    hostObject.setIPv4("ipv4");
-    hostObject.setIPv6("ipv6");
-
-    Register reg = new Register();
-    HostInfo hi = new HostInfo();
-    hi.setHostName(DummyHostname1);
-    hi.setOS(DummyOsType);
-    reg.setHostname(DummyHostname1);
-    reg.setHardwareProfile(hi);
-    reg.setPublicHostname(DummyHostname1 + "-public");
-    handler.handleRegistration(reg);
-    assertEquals(hostObject.getState(), HostState.HEALTHY);
-    assertEquals(DummyOsType, hostObject.getOsType());
-    assertTrue(hostObject.getLastRegistrationTime() != 0);
-    assertEquals(hostObject.getLastHeartbeatTime(),
-        hostObject.getLastRegistrationTime());
-    
-    Host verifyHost = clusters.getHost(DummyHostname1);
-    assertEquals(verifyHost.getPublicHostName(), reg.getPublicHostname());
-  }
-  
-
-  @Test
-  public void testInvalidOSRegistration() throws AmbariException,
-      InvalidStateTransitionException {
-    ActionManager am = new ActionManager(0, 0, null, null,
-        new ActionDBInMemoryImpl(), new HostsMap((String) null));
-    Clusters fsm = clusters;
-    HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
-        injector);
-    clusters.addHost(DummyHostname1);
-    Host hostObject = clusters.getHost(DummyHostname1);
-    hostObject.setIPv4("ipv4");
-    hostObject.setIPv6("ipv6");
-
-    Register reg = new Register();
-    HostInfo hi = new HostInfo();
-    hi.setHostName(DummyHostname1);
-    hi.setOS("MegaOperatingSystem");
-    reg.setHostname(DummyHostname1);
-    reg.setHardwareProfile(hi);
-    try {
-      handler.handleRegistration(reg);
-      fail ("Expected failure for non matching os type");
-    } catch (AmbariException e) {
-      // Expected
-    }
-  }
-
-
-  @Test
-  public void testRegisterNewNode()
-      throws AmbariException, InvalidStateTransitionException {
-    ActionManager am = new ActionManager(0, 0, null, null,
-        new ActionDBInMemoryImpl(), new HostsMap((String) null));
-    Clusters fsm = clusters;
-    fsm.addHost(DummyHostname1);
-    Host hostObject = clusters.getHost(DummyHostname1);
-    hostObject.setIPv4("ipv4");
-    hostObject.setIPv6("ipv6");
-
-    HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
-        injector);
-    Register reg = new Register();
-    HostInfo hi = new HostInfo();
-    hi.setHostName(DummyHostname1);
-    hi.setOS("redhat5");
-    reg.setHostname(DummyHostname1);
-    reg.setHardwareProfile(hi);
-    RegistrationResponse response = handler.handleRegistration(reg);
-
-    assertEquals(hostObject.getState(), HostState.HEALTHY);
-    assertEquals("redhat5", hostObject.getOsType());
-    assertEquals(RegistrationStatus.OK, response.getResponseStatus());
-    assertEquals(0, response.getResponseId());
-    assertTrue(response.getStatusCommands().isEmpty());
-  }
-
-  @Test
-  public void testRequestId() throws IOException,
-      InvalidStateTransitionException, JsonGenerationException, JAXBException {
-    HeartBeatHandler heartBeatHandler = injector.getInstance(
-        HeartBeatHandler.class);
-
-    Register register = new Register();
-    register.setHostname("newHost");
-    register.setTimestamp(new Date().getTime());
-    register.setResponseId(123);
-    HostInfo hi = new HostInfo();
-    hi.setHostName(DummyHostname1);
-    hi.setOS("redhat5");
-    register.setHardwareProfile(hi);
-    RegistrationResponse registrationResponse = heartBeatHandler.handleRegistration(register);
-
-    assertEquals("ResponseId should start from zero", 0L, registrationResponse.getResponseId());
-
-    HeartBeat heartBeat = constructHeartBeat("newHost", registrationResponse.getResponseId(), Status.HEALTHY);
-    HeartBeatResponse hbResponse = heartBeatHandler.handleHeartBeat(heartBeat);
-
-    assertEquals("responseId was not incremented", 1L, hbResponse.getResponseId());
-    assertTrue("Not cached response returned", hbResponse == heartBeatHandler.handleHeartBeat(heartBeat));
-
-    heartBeat.setResponseId(1L);
-    hbResponse = heartBeatHandler.handleHeartBeat(heartBeat);
-    assertEquals("responseId was not incremented", 2L, hbResponse.getResponseId());
-    assertFalse("Agent is flagged for restart", hbResponse.isRestartAgent());
-
-    log.debug(StageUtils.jaxbToString(hbResponse));
-
-    heartBeat.setResponseId(20L);
-    hbResponse = heartBeatHandler.handleHeartBeat(heartBeat);
-//    assertEquals("responseId was not incremented", 2L, hbResponse.getResponseId());
-    assertTrue("Agent is not flagged for restart", hbResponse.isRestartAgent());
-
-    log.debug(StageUtils.jaxbToString(hbResponse));
-
-  }
-
-  private HeartBeat constructHeartBeat(String hostName, long responseId, Status status) {
-    HeartBeat heartBeat = new HeartBeat();
-    heartBeat.setHostname(hostName);
-    heartBeat.setTimestamp(new Date().getTime());
-    heartBeat.setResponseId(responseId);
-    HostStatus hs = new HostStatus();
-    hs.setCause("");
-    hs.setStatus(status);
-    heartBeat.setNodeStatus(hs);
-    heartBeat.setReports(Collections.<CommandReport>emptyList());
-
-    return heartBeat;
-  }
-
-  @Test
-  public void testStateCommandsAtRegistration() throws AmbariException, InvalidStateTransitionException {
-    List<StatusCommand> dummyCmds = new ArrayList<StatusCommand>();
-    StatusCommand statusCmd1 = new StatusCommand();
-    statusCmd1.setClusterName(DummyCluster);
-    statusCmd1.setServiceName(HDFS);
-    dummyCmds.add(statusCmd1);
-    HeartbeatMonitor hm = mock(HeartbeatMonitor.class);
-    when(hm.generateStatusCommands(anyString())).thenReturn(dummyCmds);
-
-    ActionManager am = new ActionManager(0, 0, null, null,
-            new ActionDBInMemoryImpl(), new HostsMap((String) null));
-    Clusters fsm = clusters;
-    ActionQueue actionQueue = new ActionQueue();
-    HeartBeatHandler handler = new HeartBeatHandler(fsm, actionQueue, am,
-        injector);
-    handler.setHeartbeatMonitor(hm);
-    clusters.addHost(DummyHostname1);
-    Host hostObject = clusters.getHost(DummyHostname1);
-    hostObject.setIPv4("ipv4");
-    hostObject.setIPv6("ipv6");
-
-    Register reg = new Register();
-    HostInfo hi = new HostInfo();
-    hi.setHostName(DummyHostname1);
-    hi.setOS(DummyOsType);
-    reg.setHostname(DummyHostname1);
-    reg.setHardwareProfile(hi);
-    RegistrationResponse registrationResponse = handler.handleRegistration(reg);
-    registrationResponse.getStatusCommands();
-    assertTrue(registrationResponse.getStatusCommands().size() == 1);
-    assertTrue(registrationResponse.getStatusCommands().get(0).equals(statusCmd1));
-  }
-
-  @Test
-  public void testTaskInProgressHandling() throws AmbariException, InvalidStateTransitionException {
-    ActionManager am = new ActionManager(0, 0, null, null,
-            new ActionDBInMemoryImpl(), new HostsMap((String) null));
-    clusters.addHost(DummyHostname1);
-    clusters.getHost(DummyHostname1).setOsType(DummyOsType);
-    clusters.getHost(DummyHostname1).persist();
-    clusters.addCluster(DummyCluster);
-
-    Cluster cluster = clusters.getCluster(DummyCluster);
-    cluster.setDesiredStackVersion(new StackId(DummyStackId));
-
-    @SuppressWarnings("serial")
-    Set<String> hostNames = new HashSet<String>(){{
-      add(DummyHostname1);
-    }};
-    clusters.mapHostsToCluster(hostNames, DummyCluster);
-    Service hdfs = cluster.addService(HDFS);
-    hdfs.persist();
-    hdfs.addServiceComponent(DATANODE).persist();
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(NAMENODE).persist();
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
-    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
-
-    ActionQueue aq = new ActionQueue();
-    HeartBeatHandler handler = new HeartBeatHandler(clusters, aq, am, injector);
-
-    Register reg = new Register();
-    HostInfo hi = new HostInfo();
-    hi.setHostName(DummyHostname1);
-    hi.setOS(DummyOs);
-    hi.setOSRelease(DummyOSRelease);
-    reg.setHostname(DummyHostname1);
-    reg.setResponseId(0);
-    reg.setHardwareProfile(hi);
-    handler.handleRegistration(reg);
-
-    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
-            getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
-    serviceComponentHost1.setState(State.INSTALLING);
-
-
-    HeartBeat hb = new HeartBeat();
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(0);
-    hb.setHostname(DummyHostname1);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
-
-    List<CommandReport> reports = new ArrayList<CommandReport>();
-    CommandReport cr = new CommandReport();
-    cr.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr.setTaskId(1);
-    cr.setClusterName(DummyCluster);
-    cr.setServiceName(HDFS);
-    cr.setRole(DATANODE);
-    cr.setStatus("IN_PROGRESS");
-    cr.setStdErr("none");
-    cr.setStdOut("dummy output");
-    cr.setExitCode(777);
-    reports.add(cr);
-    hb.setReports(reports);
-    hb.setComponentStatus(new ArrayList<ComponentStatus>());
-
-    handler.handleHeartBeat(hb);
-    State componentState1 = serviceComponentHost1.getState();
-    assertEquals("Host state should still be installing", State.INSTALLING, componentState1);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
deleted file mode 100644
index f42be24..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
+++ /dev/null
@@ -1,244 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.agent;
-
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.HostState;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.cluster.ClustersImpl;
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-import org.mockito.ArgumentCaptor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-public class TestHeartbeatMonitor {
-
-  private static Injector injector;
-
-  private String hostname1 = "host1";
-  private String hostname2 = "host2";
-  private String clusterName = "cluster1";
-  private String serviceName = "HDFS";
-  private int heartbeatMonitorWakeupIntervalMS = 30;
-  private AmbariMetaInfo ambariMetaInfo;
-
-  private static final Logger LOG =
-          LoggerFactory.getLogger(TestHeartbeatMonitor.class);
-
-  @Before
-  public void setup() throws Exception {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    //injector.getInstance(OrmTestHelper.class).createDefaultData();
-    ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
-    ambariMetaInfo.init();
-  }
-
-  @After
-  public void teardown() {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  @Test
-  public void testStateCommandsGeneration() throws AmbariException, InterruptedException,
-          InvalidStateTransitionException {
-    Clusters clusters = injector.getInstance(Clusters.class);
-    clusters.addHost(hostname1);
-    clusters.getHost(hostname1).setOsType("centos6");
-    clusters.getHost(hostname1).persist();
-    clusters.addHost(hostname2);
-    clusters.getHost(hostname2).setOsType("centos6");
-    clusters.getHost(hostname2).persist();
-    clusters.addCluster(clusterName);
-    Cluster cluster = clusters.getCluster(clusterName);
-    cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
-    Set<String> hostNames = new HashSet<String>(){{
-      add(hostname1);
-      add(hostname2);
-    }};
-    clusters.mapHostsToCluster(hostNames, clusterName);
-    Service hdfs = cluster.addService(serviceName);
-    hdfs.persist();
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1).persist();
-    hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1).persist();
-
-    ActionQueue aq = new ActionQueue();
-    ActionManager am = mock(ActionManager.class);
-    HeartbeatMonitor hm = new HeartbeatMonitor(clusters, aq, am, heartbeatMonitorWakeupIntervalMS);
-    HeartBeatHandler handler = new HeartBeatHandler(clusters, aq, am, injector);
-    Register reg = new Register();
-    reg.setHostname(hostname1);
-    reg.setResponseId(12);
-    reg.setTimestamp(System.currentTimeMillis() - 300);
-    HostInfo hi = new HostInfo();
-    hi.setOS("Centos5");
-    reg.setHardwareProfile(hi);
-    handler.handleRegistration(reg);
-
-    HeartBeat hb = new HeartBeat();
-    hb.setHostname(hostname1);
-    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, "cool"));
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(12);
-    handler.handleHeartBeat(hb);
-
-    List<StatusCommand> cmds = hm.generateStatusCommands(hostname1);
-    assertTrue("HeartbeatMonitor should generate StatusCommands for host1", cmds.size() == 3);
-    assertEquals("HDFS", cmds.get(0).getServiceName());
-    boolean  containsDATANODEStatus = false;
-    boolean  containsNAMENODEStatus = false;
-    boolean  containsSECONDARY_NAMENODEStatus = false;
-    for (StatusCommand cmd : cmds) {
-      containsDATANODEStatus |= cmd.getComponentName().equals("DATANODE");
-      containsNAMENODEStatus |= cmd.getComponentName().equals("NAMENODE");
-      containsSECONDARY_NAMENODEStatus |= cmd.getComponentName().equals("SECONDARY_NAMENODE");
-    }
-    assertEquals(true, containsDATANODEStatus);
-    assertEquals(true, containsNAMENODEStatus);
-    assertEquals(true, containsSECONDARY_NAMENODEStatus);
-    cmds = hm.generateStatusCommands(hostname2);
-    assertTrue("HeartbeatMonitor should not generate StatusCommands for host2 because it has no services", cmds.isEmpty());
-  }
-
-
-  @Test
-  public void testHeartbeatStateCommandsEnqueueing() throws AmbariException, InterruptedException,
-          InvalidStateTransitionException {
-    Clusters clusters = injector.getInstance(Clusters.class);
-    clusters.addHost(hostname1);
-    clusters.getHost(hostname1).setOsType("centos5");
-    clusters.getHost(hostname1).persist();
-    clusters.addCluster(clusterName);
-    Cluster cluster = clusters.getCluster(clusterName);
-    cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
-
-    Set<String> hostNames = new HashSet<String>(){{
-      add(hostname1);
-     }};
-
-    clusters.mapHostsToCluster(hostNames, clusterName);
-
-    Service hdfs = cluster.addService(serviceName);
-    hdfs.persist();
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname1).persist();
-    hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname1).persist();
-
-    ActionQueue aqMock = mock(ActionQueue.class);
-    ArgumentCaptor<AgentCommand> commandCaptor=ArgumentCaptor.
-            forClass(AgentCommand.class);
-
-    ActionManager am = mock(ActionManager.class);
-    HeartbeatMonitor hm = new HeartbeatMonitor(clusters, aqMock, am, heartbeatMonitorWakeupIntervalMS);
-    HeartBeatHandler handler = new HeartBeatHandler(clusters, aqMock, am,
-        injector);
-    Register reg = new Register();
-    reg.setHostname(hostname1);
-    reg.setResponseId(12);
-    reg.setTimestamp(System.currentTimeMillis() - 15);
-    HostInfo hi = new HostInfo();
-    hi.setOS("Centos5");
-    reg.setHardwareProfile(hi);
-    handler.handleRegistration(reg);
-    HeartBeat hb = new HeartBeat();
-    hb.setHostname(hostname1);
-    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, "cool"));
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(13);
-    handler.handleHeartBeat(hb);
-    LOG.info("YYY");
-    clusters.getHost(hostname1).setLastHeartbeatTime(System.currentTimeMillis() - 15);
-    hm.start();
-    Thread.sleep(3 * heartbeatMonitorWakeupIntervalMS);
-    hm.shutdown();
-    hm.join(2*heartbeatMonitorWakeupIntervalMS);
-    if (hm.isAlive()) {
-      fail("HeartbeatMonitor should be already stopped");
-    }
-    verify(aqMock, atLeast(2)).enqueue(eq(hostname1), commandCaptor.capture());  // After registration and by HeartbeatMonitor
-
-    List<AgentCommand> cmds = commandCaptor.getAllValues();
-    assertTrue("HeartbeatMonitor should generate StatusCommands for host1", cmds.size() >= 2);
-    for(AgentCommand command: cmds) {
-      assertEquals("HDFS", ((StatusCommand)command).getServiceName());
-    }
-
-  }
-
-  @Test
-  public void testHeartbeatLoss() throws AmbariException, InterruptedException,
-          InvalidStateTransitionException {
-    Clusters fsm = injector.getInstance(Clusters.class);
-    String hostname = "host1";
-    fsm.addHost(hostname);
-    ActionQueue aq = new ActionQueue();
-    ActionManager am = mock(ActionManager.class);
-    HeartbeatMonitor hm = new HeartbeatMonitor(fsm, aq, am, 10);
-    HeartBeatHandler handler = new HeartBeatHandler(fsm, aq, am, injector);
-    Register reg = new Register();
-    reg.setHostname(hostname);
-    reg.setResponseId(12);
-    reg.setTimestamp(System.currentTimeMillis() - 300);
-    HostInfo hi = new HostInfo();
-    hi.setOS("Centos5");
-    reg.setHardwareProfile(hi);
-    handler.handleRegistration(reg);
-    HeartBeat hb = new HeartBeat();
-    hb.setHostname(hostname);
-    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, "cool"));
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(12);
-    handler.handleHeartBeat(hb);
-    hm.start();
-    aq.enqueue(hostname, new ExecutionCommand());
-    //Heartbeat will expire and action queue will be flushed
-    while (aq.size(hostname) != 0) {
-      Thread.sleep(1);
-    }
-    assertEquals(fsm.getHost(hostname).getState(), HostState.HEARTBEAT_LOST);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/TestSuite.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/TestSuite.java
deleted file mode 100644
index 48307f0..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/TestSuite.java
+++ /dev/null
@@ -1,48 +0,0 @@
-package org.apache.ambari.server.api;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * All api unit tests.
- */
-
-import org.apache.ambari.server.api.handlers.*;
-import org.apache.ambari.server.api.predicate.QueryLexerTest;
-import org.apache.ambari.server.api.predicate.QueryParserTest;
-import org.apache.ambari.server.api.predicate.operators.*;
-import org.apache.ambari.server.api.query.QueryImplTest;
-import org.apache.ambari.server.api.resources.ResourceInstanceImplTest;
-import org.apache.ambari.server.api.services.*;
-import org.apache.ambari.server.api.services.parsers.JsonPropertyParserTest;
-import org.apache.ambari.server.api.services.serializers.JsonSerializerTest;
-import org.junit.runner.RunWith;
-import org.junit.runners.Suite;
-
-@RunWith(Suite.class)
-@Suite.SuiteClasses({ClusterServiceTest.class, HostServiceTest.class, ServiceServiceTest.class,
-    ComponentServiceTest.class, HostComponentServiceTest.class, ReadHandlerTest.class, QueryImplTest.class,
-    JsonPropertyParserTest.class, CreateHandlerTest.class, UpdateHandlerTest.class, DeleteHandlerTest.class,
-    PersistenceManagerImplTest.class, GetRequestTest.class, PutRequestTest.class, PostRequestTest.class,
-    DeleteRequestTest.class, JsonSerializerTest.class, QueryCreateHandlerTest.class, ResourceInstanceImplTest.class,
-    QueryLexerTest.class, QueryParserTest.class, IsEmptyOperatorTest.class, InOperatorTest.class,
-    AndOperatorTest.class, OrOperatorTest.class, EqualsOperatorTest.class, GreaterEqualsOperatorTest.class,
-    GreaterOperatorTest.class, LessEqualsOperatorTest.class, LessEqualsOperatorTest.class, NotEqualsOperatorTest.class,
-    NotOperatorTest.class})
-public class TestSuite {
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/CreateHandlerTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/CreateHandlerTest.java
deleted file mode 100644
index 9a27501..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/CreateHandlerTest.java
+++ /dev/null
@@ -1,183 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.handlers;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.services.ResultStatus;
-import org.apache.ambari.server.api.services.persistence.PersistenceManager;
-import org.apache.ambari.server.api.services.Request;
-import org.apache.ambari.server.api.services.Result;
-import org.apache.ambari.server.api.util.TreeNode;
-import org.apache.ambari.server.controller.spi.RequestStatus;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.*;
-
-import static org.easymock.EasyMock.*;
-import static org.junit.Assert.*;
-
-/**
- * Unit tests for CreateHandler.
- */
-public class CreateHandlerTest {
-
-  @Test
-  public void testHandleRequest__Synchronous() throws Exception {
-    Request request = createNiceMock(Request.class);
-    ResourceInstance resource = createNiceMock(ResourceInstance.class);
-    PersistenceManager pm = createStrictMock(PersistenceManager.class);
-    RequestStatus status = createNiceMock(RequestStatus.class);
-    Resource resource1 = createNiceMock(Resource.class);
-    Resource resource2 = createNiceMock(Resource.class);
-
-    Set<Map<String, Object>> setResourceProperties = new HashSet<Map<String, Object>>();
-
-    Set<Resource> setResources = new HashSet<Resource>();
-    setResources.add(resource1);
-    setResources.add(resource2);
-
-    // expectations
-    expect(request.getResource()).andReturn(resource).atLeastOnce();
-    expect(request.getQueryPredicate()).andReturn(null).atLeastOnce();
-    expect(request.getHttpBodyProperties()).andReturn(setResourceProperties).atLeastOnce();
-
-    expect(pm.create(resource, setResourceProperties)).andReturn(status);
-    expect(status.getStatus()).andReturn(RequestStatus.Status.Complete);
-    expect(status.getAssociatedResources()).andReturn(setResources);
-    expect(resource1.getType()).andReturn(Resource.Type.Cluster).anyTimes();
-    expect(resource2.getType()).andReturn(Resource.Type.Cluster).anyTimes();
-
-    replay(request, resource, pm, status, resource1, resource2);
-
-    Result result = new TestCreateHandler(pm).handleRequest(request);
-
-    assertNotNull(result);
-    TreeNode<Resource> tree = result.getResultTree();
-    assertEquals(1, tree.getChildren().size());
-    TreeNode<Resource> resourcesNode = tree.getChild("resources");
-    assertEquals(2, resourcesNode.getChildren().size());
-    boolean foundResource1 = false;
-    boolean foundResource2 = false;
-    for(TreeNode<Resource> child : resourcesNode.getChildren()) {
-      Resource r = child.getObject();
-      if (r == resource1 && ! foundResource1) {
-        foundResource1 = true;
-      } else if (r == resource2 && ! foundResource2) {
-        foundResource2 = true;
-      } else {
-        fail();
-      }
-    }
-
-    assertEquals(ResultStatus.STATUS.CREATED, result.getStatus().getStatus());
-    verify(request, resource, pm, status, resource1, resource2);
-  }
-
-  @Test
-  public void testHandleRequest__Asynchronous() throws Exception {
-    Request request = createNiceMock(Request.class);
-    ResourceInstance resource = createNiceMock(ResourceInstance.class);
-    PersistenceManager pm = createStrictMock(PersistenceManager.class);
-    RequestStatus status = createNiceMock(RequestStatus.class);
-    Resource resource1 = createNiceMock(Resource.class);
-    Resource resource2 = createNiceMock(Resource.class);
-    Resource requestResource = createNiceMock(Resource.class);
-
-    Set<Map<String, Object>> setResourceProperties = new HashSet<Map<String, Object>>();
-
-    Set<Resource> setResources = new HashSet<Resource>();
-    setResources.add(resource1);
-    setResources.add(resource2);
-
-    // expectations
-    expect(request.getResource()).andReturn(resource);
-    expect(request.getHttpBodyProperties()).andReturn(setResourceProperties);
-    expect(request.getQueryPredicate()).andReturn(null).atLeastOnce();
-
-    expect(pm.create(resource, setResourceProperties)).andReturn(status);
-    expect(status.getStatus()).andReturn(RequestStatus.Status.Accepted);
-    expect(status.getAssociatedResources()).andReturn(setResources);
-    expect(resource1.getType()).andReturn(Resource.Type.Cluster).anyTimes();
-    expect(resource2.getType()).andReturn(Resource.Type.Cluster).anyTimes();
-    expect(status.getRequestResource()).andReturn(requestResource).anyTimes();
-
-    replay(request, resource, pm, status, resource1, resource2, requestResource);
-
-    Result result = new TestCreateHandler(pm).handleRequest(request);
-
-    assertNotNull(result);
-    TreeNode<Resource> tree = result.getResultTree();
-    assertEquals(2, tree.getChildren().size());
-    TreeNode<Resource> resourcesNode = tree.getChild("resources");
-    assertEquals(2, resourcesNode.getChildren().size());
-    boolean foundResource1 = false;
-    boolean foundResource2 = false;
-    for(TreeNode<Resource> child : resourcesNode.getChildren()) {
-      Resource r = child.getObject();
-      if (r == resource1 && ! foundResource1) {
-        foundResource1 = true;
-      } else if (r == resource2 && ! foundResource2) {
-        foundResource2 = true;
-      } else {
-        fail();
-      }
-    }
-
-    TreeNode<Resource> statusNode = tree.getChild("request");
-    assertNotNull(statusNode);
-    assertEquals(0, statusNode.getChildren().size());
-    assertSame(requestResource, statusNode.getObject());
-
-    assertEquals(ResultStatus.STATUS.ACCEPTED, result.getStatus().getStatus());
-    verify(request, resource, pm, status, resource1, resource2, requestResource);
-  }
-
-  private class TestCreateHandler extends CreateHandler {
-    private PersistenceManager m_testPm;
-
-    private TestCreateHandler(PersistenceManager pm) {
-      m_testPm = pm;
-    }
-
-    @Override
-    protected PersistenceManager getPersistenceManager() {
-      return m_testPm;
-    }
-  }
-
-  @Test
-  public void testHandleRequest__InvalidQuery() throws Exception {
-    Request request = createNiceMock(Request.class);
-    ResourceInstance resource = createNiceMock(ResourceInstance.class);
-    Exception e = new InvalidQueryException("test exception");
-
-    expect(request.getResource()).andReturn(resource);
-    expect(request.getQueryPredicate()).andThrow(e);
-    replay(request, resource);
-
-    Result result = new CreateHandler().handleRequest(request);
-    assertEquals(ResultStatus.STATUS.BAD_REQUEST, result.getStatus().getStatus());
-    assertTrue(result.getStatus().getMessage().contains(e.getMessage()));
-
-    verify(request, resource);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/DeleteHandlerTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/DeleteHandlerTest.java
deleted file mode 100644
index d0b2432..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/DeleteHandlerTest.java
+++ /dev/null
@@ -1,191 +0,0 @@
-package org.apache.ambari.server.api.handlers;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.api.query.Query;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.services.ResultStatus;
-import org.apache.ambari.server.api.services.persistence.PersistenceManager;
-import org.apache.ambari.server.api.services.Request;
-import org.apache.ambari.server.api.services.Result;
-import org.apache.ambari.server.api.util.TreeNode;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.RequestStatus;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.junit.Test;
-
-import java.util.*;
-
-import static org.easymock.EasyMock.*;
-import static org.junit.Assert.*;
-import static org.junit.Assert.assertEquals;
-
-/**
- * Unit tests for DeleteHandler.
- */
-public class DeleteHandlerTest {
-
-  @Test
-  public void testHandleRequest__Synchronous() throws Exception {
-    Request request = createMock(Request.class);
-    ResourceInstance resource = createMock(ResourceInstance.class);
-    PersistenceManager pm = createStrictMock(PersistenceManager.class);
-    RequestStatus status = createMock(RequestStatus.class);
-    Resource resource1 = createMock(Resource.class);
-    Resource resource2 = createMock(Resource.class);
-    Predicate userPredicate = createNiceMock(Predicate.class);
-    Query query = createNiceMock(Query.class);
-
-    Set<Map<String, Object>> setResourceProperties = new HashSet<Map<String, Object>>();
-
-    Set<Resource> setResources = new HashSet<Resource>();
-    setResources.add(resource1);
-    setResources.add(resource2);
-
-    // expectations
-    expect(request.getResource()).andReturn(resource).atLeastOnce();
-    expect(request.getHttpBodyProperties()).andReturn(setResourceProperties).atLeastOnce();
-
-    expect(request.getQueryPredicate()).andReturn(userPredicate).atLeastOnce();
-    expect(resource.getQuery()).andReturn(query).atLeastOnce();
-    query.setUserPredicate(userPredicate);
-
-    expect(pm.delete(resource, setResourceProperties)).andReturn(status);
-    expect(status.getStatus()).andReturn(RequestStatus.Status.Complete);
-    expect(status.getAssociatedResources()).andReturn(setResources);
-    expect(resource1.getType()).andReturn(Resource.Type.Cluster).anyTimes();
-    expect(resource2.getType()).andReturn(Resource.Type.Cluster).anyTimes();
-
-    replay(request, resource, pm, status, resource1, resource2, userPredicate, query);
-
-    Result result = new TestDeleteHandler(pm).handleRequest(request);
-
-    assertNotNull(result);
-    TreeNode<Resource> tree = result.getResultTree();
-    assertEquals(1, tree.getChildren().size());
-    TreeNode<Resource> resourcesNode = tree.getChild("resources");
-    assertEquals(2, resourcesNode.getChildren().size());
-    boolean foundResource1 = false;
-    boolean foundResource2 = false;
-    for(TreeNode<Resource> child : resourcesNode.getChildren()) {
-      Resource r = child.getObject();
-      if (r == resource1 && ! foundResource1) {
-        foundResource1 = true;
-      } else if (r == resource2 && ! foundResource2) {
-        foundResource2 = true;
-      } else {
-        fail();
-      }
-    }
-
-    assertEquals(ResultStatus.STATUS.OK, result.getStatus().getStatus());
-    verify(request, resource, pm, status, resource1, resource2, userPredicate, query);
-  }
-
-  @Test
-  public void testHandleRequest__Asynchronous() throws Exception {
-    Request request = createMock(Request.class);
-    ResourceInstance resource = createMock(ResourceInstance.class);
-    PersistenceManager pm = createStrictMock(PersistenceManager.class);
-    RequestStatus status = createMock(RequestStatus.class);
-    Resource resource1 = createMock(Resource.class);
-    Resource resource2 = createMock(Resource.class);
-    Resource requestResource = createMock(Resource.class);
-
-    Set<Map<String, Object>> setResourceProperties = new HashSet<Map<String, Object>>();
-
-    Set<Resource> setResources = new HashSet<Resource>();
-    setResources.add(resource1);
-    setResources.add(resource2);
-
-    // expectations
-    expect(request.getResource()).andReturn(resource);
-    expect(request.getHttpBodyProperties()).andReturn(setResourceProperties);
-    // test delete with no user predicate
-    expect(request.getQueryPredicate()).andReturn(null).atLeastOnce();
-
-    expect(pm.delete(resource, setResourceProperties)).andReturn(status);
-    expect(status.getStatus()).andReturn(RequestStatus.Status.Accepted);
-    expect(status.getAssociatedResources()).andReturn(setResources);
-    expect(resource1.getType()).andReturn(Resource.Type.Cluster).anyTimes();
-    expect(resource2.getType()).andReturn(Resource.Type.Cluster).anyTimes();
-    expect(status.getRequestResource()).andReturn(requestResource).anyTimes();
-
-    replay(request, resource, pm, status, resource1, resource2, requestResource);
-
-    Result result = new TestDeleteHandler(pm).handleRequest(request);
-
-    assertNotNull(result);
-    TreeNode<Resource> tree = result.getResultTree();
-    assertEquals(2, tree.getChildren().size());
-    TreeNode<Resource> resourcesNode = tree.getChild("resources");
-    assertEquals(2, resourcesNode.getChildren().size());
-    boolean foundResource1 = false;
-    boolean foundResource2 = false;
-    for(TreeNode<Resource> child : resourcesNode.getChildren()) {
-      Resource r = child.getObject();
-      if (r == resource1 && ! foundResource1) {
-        foundResource1 = true;
-      } else if (r == resource2 && ! foundResource2) {
-        foundResource2 = true;
-      } else {
-        fail();
-      }
-    }
-
-    TreeNode<Resource> statusNode = tree.getChild("request");
-    assertNotNull(statusNode);
-    assertEquals(0, statusNode.getChildren().size());
-    assertSame(requestResource, statusNode.getObject());
-    assertEquals(ResultStatus.STATUS.ACCEPTED, result.getStatus().getStatus());
-
-    verify(request, resource, pm, status, resource1, resource2, requestResource);
-  }
-
-  private class TestDeleteHandler extends DeleteHandler {
-    private PersistenceManager m_testPm;
-
-    private TestDeleteHandler(PersistenceManager pm) {
-      m_testPm = pm;
-    }
-
-    @Override
-    protected PersistenceManager getPersistenceManager() {
-      return m_testPm;
-    }
-  }
-
-  @Test
-  public void testHandleRequest__InvalidQuery() throws Exception {
-    Request request = createNiceMock(Request.class);
-    ResourceInstance resource = createNiceMock(ResourceInstance.class);
-    Exception e = new InvalidQueryException("test exception");
-
-    expect(request.getResource()).andReturn(resource);
-    expect(request.getQueryPredicate()).andThrow(e);
-    replay(request, resource);
-
-    Result result = new DeleteHandler().handleRequest(request);
-    assertEquals(ResultStatus.STATUS.BAD_REQUEST, result.getStatus().getStatus());
-    assertTrue(result.getStatus().getMessage().contains(e.getMessage()));
-
-    verify(request, resource);
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/QueryCreateHandlerTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/QueryCreateHandlerTest.java
deleted file mode 100644
index 52e188a..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/QueryCreateHandlerTest.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.api.handlers;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.api.query.Query;
-import org.apache.ambari.server.api.resources.ResourceDefinition;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.resources.ResourceInstanceFactory;
-import org.apache.ambari.server.api.services.ResultStatus;
-import org.apache.ambari.server.api.services.persistence.PersistenceManager;
-import org.apache.ambari.server.api.services.Request;
-import org.apache.ambari.server.api.services.Result;
-import org.apache.ambari.server.api.util.TreeNode;
-import org.apache.ambari.server.api.util.TreeNodeImpl;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.*;
-
-import static org.easymock.EasyMock.*;
-import static org.junit.Assert.*;
-
-
-/**
- * Unit tests for QueryCreateHandler.
- */
-public class QueryCreateHandlerTest {
-
-  @Test
-  public void testHandleRequest() throws Exception {
-    Request request = createNiceMock(Request.class);
-    ResourceInstance resourceInstance = createNiceMock(ResourceInstance.class);
-    ResourceDefinition resourceDefinition = createNiceMock(ResourceDefinition.class);
-    ResourceInstanceFactory resourceInstanceFactory = createNiceMock(ResourceInstanceFactory.class);
-    Query query = createNiceMock(Query.class);
-    Predicate predicate = createNiceMock(Predicate.class);
-    Result result = createNiceMock(Result.class);
-    ResourceInstance subResource = createNiceMock(ResourceInstance.class);
-    ResourceDefinition subResourceDefinition = createNiceMock(ResourceDefinition.class);
-    ClusterController controller = createNiceMock(ClusterController.class);
-    Schema serviceSchema = createNiceMock(Schema.class);
-    Schema componentSchema = createNiceMock(Schema.class);
-    String resourceKeyProperty = "resourceKeyProperty";
-    String createKeyProperty = "createKeyProperty";
-    Resource resource1 = createNiceMock(Resource.class);
-    Resource resource2 = createNiceMock(Resource.class);
-    PersistenceManager pm = createNiceMock(PersistenceManager.class);
-    ResourceInstance createResource = createNiceMock(ResourceInstance.class);
-    RequestStatus status = createNiceMock(RequestStatus.class);
-    Resource statusResource1 = createNiceMock(Resource.class);
-    Resource statusResource2 = createNiceMock(Resource.class);
-    RequestHandler readHandler = createStrictMock(RequestHandler.class);
-    ResultStatus resultStatus = createNiceMock(ResultStatus.class);
-
-    String httpBody = "{" +
-        "\"components\" : [" +
-        "{\"ServiceComponentInfo\" : {" +
-        "        \"component_name\" : \"SECONDARY_NAMENODE\"" +
-        "      }" +
-        "}," +
-        "{\"ServiceComponentInfo\" : {" +
-        "        \"component_name\" : \"HDFS_CLIENT\"" +
-        "      }" +
-        "}" +
-        "] }";
-
-    Map<Resource.Type, String> mapIds = new HashMap<Resource.Type, String>();
-
-    Set<Map<String, Object>> setRequestProps = new HashSet<Map<String, Object>>();
-    setRequestProps.add(Collections.<String, Object>singletonMap(
-        PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name"), "SECONDARY_NAMENODE"));
-    setRequestProps.add(Collections.<String, Object>singletonMap(
-        PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name"), "HDFS_CLIENT"));
-
-    Set<Map<String, Object>> setCreateProps = new HashSet<Map<String, Object>>();
-    Map<String, Object> map1 = new HashMap<String, Object>();
-    map1.put(PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name"), "SECONDARY_NAMENODE");
-    map1.put(createKeyProperty, "id1");
-    setCreateProps.add(map1);
-    Map<String, Object> map2 = new HashMap<String, Object>();
-    map2.put(PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name"), "SECONDARY_NAMENODE");
-    map2.put(createKeyProperty, "id2");
-    setCreateProps.add(map2);
-    Map<String, Object> map3 = new HashMap<String, Object>();
-    map3.put(PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name"), "HDFS_CLIENT");
-    map3.put(createKeyProperty, "id1");
-    setCreateProps.add(map3);
-    Map<String, Object> map4 = new HashMap<String, Object>();
-    map4.put(PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name"), "HDFS_CLIENT");
-    map4.put(createKeyProperty, "id2");
-    setCreateProps.add(map4);
-
-    Map<String, ResourceInstance> mapSubResources = new HashMap<String, ResourceInstance>();
-    mapSubResources.put("components", subResource);
-
-    TreeNode<Resource> resultTree = new TreeNodeImpl<Resource>(null, null, "result");
-    resultTree.addChild(resource1, "resource1");
-    resultTree.addChild(resource2, "resource2");
-
-    Set<Resource> setStatusResources = new HashSet<Resource>();
-    setStatusResources.add(statusResource1);
-    setStatusResources.add(statusResource2);
-
-    //expectations
-    expect(readHandler.handleRequest(request)).andReturn(result);
-    expect(result.getStatus()).andReturn(resultStatus).anyTimes();
-    expect(resultStatus.isErrorState()).andReturn(false);
-    expect(result.getResultTree()).andReturn(resultTree);
-
-    expect(request.getResource()).andReturn(resourceInstance).anyTimes();
-    expect(request.getHttpBody()).andReturn(httpBody).anyTimes();
-    expect(request.getHttpBodyProperties()).andReturn(setRequestProps).anyTimes();
-
-    expect(resourceInstance.getResourceDefinition()).andReturn(resourceDefinition).anyTimes();
-    expect(resourceInstance.getIds()).andReturn(mapIds).anyTimes();
-    expect(resourceInstance.getSubResources()).andReturn(mapSubResources).anyTimes();
-
-    expect(resourceDefinition.getType()).andReturn(Resource.Type.Service).anyTimes();
-
-    expect(subResource.getResourceDefinition()).andReturn(subResourceDefinition).anyTimes();
-    expect(subResourceDefinition.getType()).andReturn(Resource.Type.Component).anyTimes();
-
-    expect(controller.getSchema(Resource.Type.Service)).andReturn(serviceSchema).anyTimes();
-    expect(controller.getSchema(Resource.Type.Component)).andReturn(componentSchema).anyTimes();
-
-    expect(serviceSchema.getKeyPropertyId(Resource.Type.Service)).andReturn(resourceKeyProperty).anyTimes();
-    expect(componentSchema.getKeyPropertyId(Resource.Type.Service)).andReturn(createKeyProperty).anyTimes();
-
-    expect(result.getResultTree()).andReturn(resultTree).anyTimes();
-    expect(resource1.getPropertyValue(resourceKeyProperty)).andReturn("id1").anyTimes();
-    expect(resource2.getPropertyValue(resourceKeyProperty)).andReturn("id2").anyTimes();
-
-    expect(resourceInstanceFactory.createResource(Resource.Type.Component, mapIds)).
-        andReturn(createResource).anyTimes();
-
-    expect(pm.create(createResource, setCreateProps)).andReturn(status);
-    expect(status.getStatus()).andReturn(RequestStatus.Status.Complete).anyTimes();
-    expect(status.getAssociatedResources()).andReturn(setStatusResources).anyTimes();
-
-    expect(statusResource1.getType()).andReturn(Resource.Type.Component).anyTimes();
-    expect(statusResource2.getType()).andReturn(Resource.Type.Component).anyTimes();
-
-    replay(request, resourceInstance, resourceDefinition, query, predicate, result, subResource,
-        subResourceDefinition, controller, serviceSchema, componentSchema, resource1, resource2,
-        pm, resourceInstanceFactory, createResource, status, statusResource1, statusResource2,
-        readHandler, resultStatus);
-
-    //test
-    Result testResult = new TestQueryCreateHandler(resourceInstanceFactory, controller, pm, readHandler).
-        handleRequest(request);
-
-    Collection<TreeNode<Resource>> children = testResult.getResultTree().getChild("resources").getChildren();
-    assertEquals(2, children.size());
-    boolean containsStatusResource1 = false;
-    boolean containsStatusResource2 = false;
-    for (TreeNode<Resource> child : children) {
-      Resource r = child.getObject();
-      if (r == statusResource1) {
-        containsStatusResource1 = true;
-      } else if(r == statusResource2) {
-        containsStatusResource2 = true;
-      }
-    }
-    assertTrue(containsStatusResource1);
-    assertTrue(containsStatusResource2);
-    assertEquals(ResultStatus.STATUS.CREATED, testResult.getStatus().getStatus());
-
-    verify(request, resourceInstance, resourceDefinition, query, predicate, result, subResource,
-        subResourceDefinition, controller, serviceSchema, componentSchema, resource1, resource2,
-        pm, resourceInstanceFactory, createResource, status, statusResource1, statusResource2,
-        readHandler, resultStatus);
-  }
-
-  static class TestQueryCreateHandler extends QueryCreateHandler {
-    private ResourceInstanceFactory m_resourceFactory;
-    private ClusterController m_controller;
-    private PersistenceManager m_testPm;
-    private RequestHandler m_testReadHandler;
-
-    TestQueryCreateHandler(ResourceInstanceFactory resourceFactory, ClusterController controller,
-                           PersistenceManager pm, RequestHandler readHandler) {
-      m_resourceFactory = resourceFactory;
-      m_controller = controller;
-      m_testPm = pm;
-      m_testReadHandler = readHandler;
-    }
-
-    @Override
-    protected ResourceInstanceFactory getResourceFactory() {
-      return m_resourceFactory;
-    }
-
-    @Override
-    protected ClusterController getClusterController() {
-      return m_controller;
-    }
-
-    @Override
-    protected PersistenceManager getPersistenceManager() {
-      return m_testPm;
-    }
-
-    @Override
-    protected RequestHandler getReadHandler() {
-      return m_testReadHandler;
-    }
-  }
-
-  @Test
-  public void testHandleRequest__InvalidQueryException() throws Exception {
-    Request request = createStrictMock(Request.class);
-    ResourceInstance resource = createStrictMock(ResourceInstance.class);
-    Query query = createMock(Query.class);
-    InvalidQueryException exception = new InvalidQueryException("test");
-
-    expect(request.getResource()).andReturn(resource);
-    expect(resource.getQuery()).andReturn(query);
-
-    expect(request.getFields()).andReturn(Collections.<String, TemporalInfo>emptyMap());
-
-    expect(request.getQueryPredicate()).andThrow(exception);
-    replay(request, resource, query);
-
-    //test
-    QueryCreateHandler handler = new QueryCreateHandler();
-    Result result = handler.handleRequest(request);
-
-    assertEquals(ResultStatus.STATUS.BAD_REQUEST, result.getStatus().getStatus());
-    assertTrue(result.getStatus().getMessage().contains(exception.getMessage()));
-    verify(request, resource, query);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/ReadHandlerTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/ReadHandlerTest.java
deleted file mode 100644
index 86f76bb..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/ReadHandlerTest.java
+++ /dev/null
@@ -1,303 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.handlers;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.api.query.Query;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.services.Request;
-import org.apache.ambari.server.api.services.Result;
-import org.apache.ambari.server.api.services.ResultStatus;
-import org.apache.ambari.server.controller.spi.*;
-import org.easymock.Capture;
-import org.junit.Test;
-
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.easymock.EasyMock.*;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Unit tests for ReadHandler.
- */
-public class ReadHandlerTest {
-
-  @Test
-  public void testHandleRequest__InvalidField() {
-    Request request = createNiceMock(Request.class);
-    ResourceInstance resource = createNiceMock(ResourceInstance.class);
-    Query query = createStrictMock(Query.class);
-
-    Map<String, TemporalInfo> mapPartialResponseFields = new HashMap<String, TemporalInfo>();
-    mapPartialResponseFields.put("foo/bar", null);
-
-    expect(request.getResource()).andReturn(resource);
-    expect(request.getFields()).andReturn(mapPartialResponseFields);
-    expect(resource.getQuery()).andReturn(query);
-
-    query.addProperty("foo", "bar", null);
-    expectLastCall().andThrow(new IllegalArgumentException("testMsg"));
-
-    replay(request, resource, query);
-
-    ReadHandler handler = new ReadHandler();
-    Result result = handler.handleRequest(request);
-
-    assertEquals(ResultStatus.STATUS.BAD_REQUEST, result.getStatus().getStatus());
-    assertEquals("testMsg", result.getStatus().getMessage());
-
-    verify(request, resource, query);
-  }
-
-  @Test
-  public void testHandleRequest__OK() throws Exception {
-    Request request = createStrictMock(Request.class);
-    ResourceInstance resource = createStrictMock(ResourceInstance.class);
-    Query query = createMock(Query.class);
-    Predicate predicate = createMock(Predicate.class);
-    Result result = createStrictMock(Result.class);
-    Capture<ResultStatus> resultStatusCapture = new Capture<ResultStatus>();
-
-    Map<String, TemporalInfo> mapPartialResponseFields = new HashMap<String, TemporalInfo>();
-    mapPartialResponseFields.put("foo", null);
-    mapPartialResponseFields.put("bar/c", null);
-    mapPartialResponseFields.put("bar/d/e", null);
-    mapPartialResponseFields.put("category/", null);
-    //expectations
-    expect(request.getResource()).andReturn(resource);
-    expect(resource.getQuery()).andReturn(query);
-
-    expect(request.getFields()).andReturn(mapPartialResponseFields);
-    query.addProperty(null, "foo", null);
-    query.addProperty("bar", "c", null);
-    query.addProperty("bar/d", "e", null);
-    query.addProperty("category", "", null);
-
-    expect(request.getQueryPredicate()).andReturn(predicate);
-    query.setUserPredicate(predicate);
-    expect(query.execute()).andReturn(result);
-    result.setResultStatus(capture(resultStatusCapture));
-
-    replay(request, resource, query, predicate, result);
-
-    //test
-    ReadHandler handler = new ReadHandler();
-    assertSame(result, handler.handleRequest(request));
-    assertEquals(ResultStatus.STATUS.OK, resultStatusCapture.getValue().getStatus());
-    verify(request, resource, query, predicate, result);
-  }
-
-  @Test
-  public void testHandleRequest__SystemException() throws Exception {
-    Request request = createStrictMock(Request.class);
-    ResourceInstance resource = createStrictMock(ResourceInstance.class);
-    Query query = createMock(Query.class);
-    Predicate predicate = createMock(Predicate.class);
-
-    expect(request.getResource()).andReturn(resource);
-    expect(resource.getQuery()).andReturn(query);
-
-    expect(request.getFields()).andReturn(Collections.<String, TemporalInfo>emptyMap());
-
-    expect(request.getQueryPredicate()).andReturn(predicate);
-    query.setUserPredicate(predicate);
-    SystemException systemException = new SystemException("testMsg", new RuntimeException());
-    expect(query.execute()).andThrow(systemException);
-
-    replay(request, resource, query, predicate);
-
-    //test
-    ReadHandler handler = new ReadHandler();
-    Result result = handler.handleRequest(request);
-    assertEquals(ResultStatus.STATUS.SERVER_ERROR, result.getStatus().getStatus());
-    assertEquals(systemException.toString(), result.getStatus().getMessage());
-    verify(request, resource, query, predicate);
-  }
-
-  @Test
-  public void testHandleRequest__NoSuchParentResourceException() throws Exception {
-    Request request = createStrictMock(Request.class);
-    ResourceInstance resource = createStrictMock(ResourceInstance.class);
-    Query query = createMock(Query.class);
-    Predicate predicate = createMock(Predicate.class);
-    NoSuchParentResourceException exception = new NoSuchParentResourceException("exceptionMsg", new RuntimeException());
-
-    expect(request.getResource()).andReturn(resource);
-    expect(resource.getQuery()).andReturn(query);
-
-    expect(request.getFields()).andReturn(Collections.<String, TemporalInfo>emptyMap());
-
-    expect(request.getQueryPredicate()).andReturn(predicate);
-    query.setUserPredicate(predicate);
-
-    expect(query.execute()).andThrow(exception);
-
-    replay(request, resource, query, predicate);
-
-    //test
-    ReadHandler handler = new ReadHandler();
-    Result result = handler.handleRequest(request);
-    assertEquals(ResultStatus.STATUS.NOT_FOUND, result.getStatus().getStatus());
-    assertEquals("exceptionMsg", result.getStatus().getMessage());
-    verify(request, resource, query, predicate);
-  }
-
-  @Test
-  public void testHandleRequest__UnsupportedPropertyException() throws Exception {
-    Request request = createStrictMock(Request.class);
-    ResourceInstance resource = createStrictMock(ResourceInstance.class);
-    Query query = createMock(Query.class);
-    Predicate predicate = createMock(Predicate.class);
-    UnsupportedPropertyException exception = new UnsupportedPropertyException(
-        Resource.Type.Cluster, Collections.singleton("foo"));
-
-    expect(request.getResource()).andReturn(resource);
-    expect(resource.getQuery()).andReturn(query);
-
-    expect(request.getFields()).andReturn(Collections.<String, TemporalInfo>emptyMap());
-
-    expect(request.getQueryPredicate()).andReturn(predicate);
-    query.setUserPredicate(predicate);
-
-    expect(query.execute()).andThrow(exception);
-
-    replay(request, resource, query, predicate);
-
-    //test
-    ReadHandler handler = new ReadHandler();
-    Result result = handler.handleRequest(request);
-    assertEquals(ResultStatus.STATUS.BAD_REQUEST, result.getStatus().getStatus());
-    assertEquals(exception.getMessage(), result.getStatus().getMessage());
-    verify(request, resource, query, predicate);
-  }
-
-  @Test
-  public void testHandleRequest__NoSuchResourceException_OK() throws Exception {
-    Request request = createStrictMock(Request.class);
-    ResourceInstance resource = createStrictMock(ResourceInstance.class);
-    Query query = createMock(Query.class);
-    Predicate predicate = createMock(Predicate.class);
-    NoSuchResourceException exception = new NoSuchResourceException("msg", new RuntimeException());
-
-    expect(request.getResource()).andReturn(resource);
-    expect(resource.getQuery()).andReturn(query);
-
-    expect(request.getFields()).andReturn(Collections.<String, TemporalInfo>emptyMap());
-
-    expect(request.getQueryPredicate()).andReturn(predicate).anyTimes();
-    query.setUserPredicate(predicate);
-
-    expect(query.execute()).andThrow(exception);
-
-    replay(request, resource, query, predicate);
-
-    //test
-    ReadHandler handler = new ReadHandler();
-    Result result = handler.handleRequest(request);
-    // ok because this is a query that returned no rows
-    assertEquals(ResultStatus.STATUS.OK, result.getStatus().getStatus());
-    verify(request, resource, query, predicate);
-  }
-
-  @Test
-  public void testHandleRequest__NoSuchResourceException_NOT_FOUND() throws Exception {
-    Request request = createStrictMock(Request.class);
-    ResourceInstance resource = createStrictMock(ResourceInstance.class);
-    Query query = createMock(Query.class);
-    NoSuchResourceException exception = new NoSuchResourceException("msg", new RuntimeException());
-
-    expect(request.getResource()).andReturn(resource);
-    expect(resource.getQuery()).andReturn(query);
-
-    expect(request.getFields()).andReturn(Collections.<String, TemporalInfo>emptyMap());
-
-    expect(request.getQueryPredicate()).andReturn(null).anyTimes();
-    query.setUserPredicate(null);
-
-    expect(query.execute()).andThrow(exception);
-
-    replay(request, resource, query);
-
-    //test
-    ReadHandler handler = new ReadHandler();
-    Result result = handler.handleRequest(request);
-    // not a query, so not found
-    assertEquals(ResultStatus.STATUS.NOT_FOUND, result.getStatus().getStatus());
-    assertEquals(exception.getMessage(), result.getStatus().getMessage());
-    verify(request, resource, query);
-  }
-
-  @Test
-  public void testHandleRequest__InvalidQueryException() throws Exception {
-    Request request = createStrictMock(Request.class);
-    ResourceInstance resource = createStrictMock(ResourceInstance.class);
-    Query query = createMock(Query.class);
-    InvalidQueryException exception = new InvalidQueryException("test");
-
-    expect(request.getResource()).andReturn(resource);
-    expect(resource.getQuery()).andReturn(query);
-
-    expect(request.getFields()).andReturn(Collections.<String, TemporalInfo>emptyMap());
-
-    expect(request.getQueryPredicate()).andThrow(exception);
-    replay(request, resource, query);
-
-    //test
-    ReadHandler handler = new ReadHandler();
-    Result result = handler.handleRequest(request);
-
-    assertEquals(ResultStatus.STATUS.BAD_REQUEST, result.getStatus().getStatus());
-    assertTrue(result.getStatus().getMessage().contains(exception.getMessage()));
-    verify(request, resource, query);
-  }
-
-  //todo: reverted to just logging the exception and re-throwing it
-//  @Test
-//  public void testHandleRequest__RuntimeException() throws Exception {
-//    Request request = createStrictMock(Request.class);
-//    ResourceInstance resource = createStrictMock(ResourceInstance.class);
-//    Query query = createMock(Query.class);
-//    RuntimeException exception = new RuntimeException("msg");
-//
-//    expect(request.getResource()).andReturn(resource);
-//    expect(resource.getQuery()).andReturn(query);
-//
-//    expect(request.getFields()).andReturn(Collections.<String, TemporalInfo>emptyMap());
-//
-//    expect(request.getQueryPredicate()).andReturn(null).anyTimes();
-//    query.setUserPredicate(null);
-//
-//    expect(query.execute()).andThrow(exception);
-//
-//    replay(request, resource, query);
-//
-//    //test
-//    ReadHandler handler = new ReadHandler();
-//    Result result = handler.handleRequest(request);
-//    // not a query, so not found
-//    assertEquals(ResultStatus.STATUS.SERVER_ERROR, result.getStatus().getStatus());
-//    assertEquals(exception.toString(), result.getStatus().getMessage());
-//    verify(request, resource, query);
-//  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/UpdateHandlerTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/UpdateHandlerTest.java
deleted file mode 100644
index 2788fa8..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/UpdateHandlerTest.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.handlers;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.api.query.Query;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.services.ResultStatus;
-import org.apache.ambari.server.api.services.persistence.PersistenceManager;
-import org.apache.ambari.server.api.services.Request;
-import org.apache.ambari.server.api.services.Result;
-import org.apache.ambari.server.api.util.TreeNode;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.RequestStatus;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.junit.Test;
-
-import java.util.*;
-
-import static org.easymock.EasyMock.*;
-import static org.junit.Assert.*;
-
-/**
- * Unit tests for UpdateHandler.
- */
-public class UpdateHandlerTest {
-
-  @Test
-  public void testHandleRequest__Synchronous() throws Exception {
-    Request request = createMock(Request.class);
-    ResourceInstance resource = createMock(ResourceInstance.class);
-    PersistenceManager pm = createStrictMock(PersistenceManager.class);
-    RequestStatus status = createMock(RequestStatus.class);
-    Resource resource1 = createMock(Resource.class);
-    Resource resource2 = createMock(Resource.class);
-    Predicate userPredicate = createNiceMock(Predicate.class);
-    Query query = createNiceMock(Query.class);
-
-    Set<Map<String, Object>> setResourceProperties = new HashSet<Map<String, Object>>();
-
-    Set<Resource> setResources = new HashSet<Resource>();
-    setResources.add(resource1);
-    setResources.add(resource2);
-
-    // expectations
-    expect(request.getResource()).andReturn(resource).anyTimes();
-    expect(request.getHttpBodyProperties()).andReturn(setResourceProperties).anyTimes();
-    expect(request.getQueryPredicate()).andReturn(userPredicate).atLeastOnce();
-
-    expect(resource.getQuery()).andReturn(query).atLeastOnce();
-    query.setUserPredicate(userPredicate);
-
-    expect(pm.update(resource, setResourceProperties)).andReturn(status);
-    expect(status.getStatus()).andReturn(RequestStatus.Status.Complete);
-    expect(status.getAssociatedResources()).andReturn(setResources);
-    expect(resource1.getType()).andReturn(Resource.Type.Cluster).anyTimes();
-    expect(resource2.getType()).andReturn(Resource.Type.Cluster).anyTimes();
-
-    replay(request, resource, pm, status, resource1, resource2, userPredicate, query);
-
-    Result result = new TestUpdateHandler(pm).handleRequest(request);
-
-    assertNotNull(result);
-    TreeNode<Resource> tree = result.getResultTree();
-    assertEquals(1, tree.getChildren().size());
-    TreeNode<Resource> resourcesNode = tree.getChild("resources");
-    assertEquals(2, resourcesNode.getChildren().size());
-    boolean foundResource1 = false;
-    boolean foundResource2 = false;
-    for(TreeNode<Resource> child : resourcesNode.getChildren()) {
-      Resource r = child.getObject();
-      if (r == resource1 && ! foundResource1) {
-        foundResource1 = true;
-      } else if (r == resource2 && ! foundResource2) {
-        foundResource2 = true;
-      } else {
-        fail();
-      }
-    }
-
-    assertEquals(ResultStatus.STATUS.OK, result.getStatus().getStatus());
-    verify(request, resource, pm, status, resource1, resource2, userPredicate, query);
-  }
-
-  @Test
-  public void testHandleRequest__Asynchronous() throws Exception {
-    Request request = createMock(Request.class);
-    ResourceInstance resource = createMock(ResourceInstance.class);
-    PersistenceManager pm = createStrictMock(PersistenceManager.class);
-    RequestStatus status = createMock(RequestStatus.class);
-    Resource resource1 = createMock(Resource.class);
-    Resource resource2 = createMock(Resource.class);
-    Resource requestResource = createMock(Resource.class);
-    Predicate userPredicate = createNiceMock(Predicate.class);
-    Query query = createNiceMock(Query.class);
-
-    Set<Map<String, Object>> setResourceProperties = new HashSet<Map<String, Object>>();
-
-    Set<Resource> setResources = new HashSet<Resource>();
-    setResources.add(resource1);
-    setResources.add(resource2);
-
-    // expectations
-    expect(request.getResource()).andReturn(resource);
-    expect(request.getHttpBodyProperties()).andReturn(setResourceProperties);
-    expect(request.getQueryPredicate()).andReturn(userPredicate).atLeastOnce();
-
-    expect(resource.getQuery()).andReturn(query).atLeastOnce();
-    query.setUserPredicate(userPredicate);
-
-    expect(pm.update(resource, setResourceProperties)).andReturn(status);
-    expect(status.getStatus()).andReturn(RequestStatus.Status.Accepted);
-    expect(status.getAssociatedResources()).andReturn(setResources);
-    expect(resource1.getType()).andReturn(Resource.Type.Cluster).anyTimes();
-    expect(resource2.getType()).andReturn(Resource.Type.Cluster).anyTimes();
-    expect(status.getRequestResource()).andReturn(requestResource).anyTimes();
-
-    replay(request, resource, pm, status, resource1, resource2, requestResource, userPredicate, query);
-
-    Result result = new TestUpdateHandler(pm).handleRequest(request);
-
-    assertNotNull(result);
-    TreeNode<Resource> tree = result.getResultTree();
-    assertEquals(2, tree.getChildren().size());
-    TreeNode<Resource> resourcesNode = tree.getChild("resources");
-    assertEquals(2, resourcesNode.getChildren().size());
-    boolean foundResource1 = false;
-    boolean foundResource2 = false;
-    for(TreeNode<Resource> child : resourcesNode.getChildren()) {
-      Resource r = child.getObject();
-      if (r == resource1 && ! foundResource1) {
-        foundResource1 = true;
-      } else if (r == resource2 && ! foundResource2) {
-        foundResource2 = true;
-      } else {
-        fail();
-      }
-    }
-
-    TreeNode<Resource> statusNode = tree.getChild("request");
-    assertNotNull(statusNode);
-    assertEquals(0, statusNode.getChildren().size());
-    assertSame(requestResource, statusNode.getObject());
-    assertEquals(ResultStatus.STATUS.ACCEPTED, result.getStatus().getStatus());
-
-    verify(request, resource, pm, status, resource1, resource2, requestResource, userPredicate, query);
-  }
-
-  private class TestUpdateHandler extends UpdateHandler {
-    private PersistenceManager m_testPm;
-
-    private TestUpdateHandler(PersistenceManager pm) {
-      m_testPm = pm;
-    }
-
-    @Override
-    protected PersistenceManager getPersistenceManager() {
-      return m_testPm;
-    }
-  }
-
-  @Test
-  public void testHandleRequest__InvalidQuery() throws Exception {
-    Request request = createNiceMock(Request.class);
-    ResourceInstance resource = createNiceMock(ResourceInstance.class);
-    Exception e = new InvalidQueryException("test exception");
-
-    expect(request.getResource()).andReturn(resource);
-    expect(request.getQueryPredicate()).andThrow(e);
-    replay(request, resource);
-
-    Result result = new UpdateHandler().handleRequest(request);
-    assertEquals(ResultStatus.STATUS.BAD_REQUEST, result.getStatus().getStatus());
-    assertTrue(result.getStatus().getMessage().contains(e.getMessage()));
-
-    verify(request, resource);
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/QueryLexerTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/QueryLexerTest.java
deleted file mode 100644
index e4bb220..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/QueryLexerTest.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.api.predicate;
-
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.List;
-import static org.junit.Assert.*;
-
-/**
- * QueryLexer unit tests
- */
-public class QueryLexerTest {
-
-  @Test
-  public void testTokens_simple() throws InvalidQueryException {
-    List<Token> listTokens = new ArrayList<Token>();
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "a"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "1"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "&"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_OPEN, "("));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "<="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "b"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "2"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "|"));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, ">"));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "c"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "3"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
-
-    QueryLexer lexer = new QueryLexer();
-    Token[] tokens = lexer.tokens("a=1&(b<=2|c>3)");
-
-    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
-  }
-
-  @Test
-  public void testTokens_multipleBrackets() throws InvalidQueryException {
-    List<Token> listTokens = new ArrayList<Token>();
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "<"));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "a"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "1"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "&"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_OPEN, "("));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "<="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "b"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "2"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "&"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_OPEN, "("));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, ">="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "c"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "3"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "|"));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "!="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "d"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "4"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
-
-    QueryLexer lexer = new QueryLexer();
-    Token[] tokens = lexer.tokens("a<1&(b<=2&(c>=3|d!=4))");
-
-    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
-  }
-
-  @Test
-  public void testUnaryNot() throws Exception {
-    QueryLexer lexer = new QueryLexer();
-    Token[] tokens = lexer.tokens("!foo<5");
-
-    List<Token> listTokens = new ArrayList<Token>();
-    listTokens.add(new Token(Token.TYPE.LOGICAL_UNARY_OPERATOR, "!"));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "<"));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "foo"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "5"));
-    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
-  }
-
-  @Test
-  public void testInOperator() throws Exception {
-    QueryLexer lexer = new QueryLexer();
-    Token[] tokens = lexer.tokens("foo.in(one, two, 3)");
-
-    List<Token> listTokens = new ArrayList<Token>();
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR_FUNC, ".in("));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "foo"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "one, two, 3"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
-
-    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
-  }
-
-  @Test
-  public void testIsEmptyOperator() throws Exception {
-    QueryLexer lexer = new QueryLexer();
-    Token[] tokens = lexer.tokens("category1.isEmpty()");
-
-    List<Token> listTokens = new ArrayList<Token>();
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR_FUNC, ".isEmpty("));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "category1"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
-
-    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
-  }
-
-  @Test
-  public void testTokens_ignoreFieldsSyntax___noPredicate() throws InvalidQueryException {
-
-    QueryLexer lexer = new QueryLexer();
-    Token[] tokens = lexer.tokens("fields=foo,bar");
-    assertEquals(0, tokens.length);
-  }
-
-  @Test
-  public void testTokens_ignoreFieldsSyntax___fieldsFirst() throws InvalidQueryException {
-
-    List<Token> listTokens = new ArrayList<Token>();
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "foo"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "1"));
-
-    QueryLexer lexer = new QueryLexer();
-    Token[] tokens = lexer.tokens("fields=foo,bar&foo=1");
-
-    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
-  }
-
-  @Test
-  public void testTokens_ignoreFieldsSyntax___fieldsLast() throws InvalidQueryException {
-
-    List<Token> listTokens = new ArrayList<Token>();
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "foo"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "1"));
-
-    QueryLexer lexer = new QueryLexer();
-    Token[] tokens = lexer.tokens("foo=1&fields=foo,bar");
-
-    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
-  }
-
-  @Test
-  public void testTokens_ignoreUnderscoreSyntax___noPredicate() throws InvalidQueryException {
-
-    QueryLexer lexer = new QueryLexer();
-    Token[] tokens = lexer.tokens("_=1");
-    assertEquals(0, tokens.length);
-  }
-
-  @Test
-  public void testTokens_ignoreUnderscoreSyntax___fieldsFirst() throws InvalidQueryException {
-
-    List<Token> listTokens = new ArrayList<Token>();
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "foo"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "1"));
-
-    QueryLexer lexer = new QueryLexer();
-    Token[] tokens = lexer.tokens("_=111111&foo=1");
-
-    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
-  }
-
-  @Test
-  public void testTokens_ignoreUnderscoreSyntax___fieldsLast() throws InvalidQueryException {
-
-    List<Token> listTokens = new ArrayList<Token>();
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "foo"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "1"));
-
-    QueryLexer lexer = new QueryLexer();
-    Token[] tokens = lexer.tokens("foo=1&_=11111");
-
-    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
-  }
-
-  @Test
-  public void testTokens_invalidRelationalOp() {
-    try {
-      new QueryLexer().tokens("foo=1&bar|5");
-      fail("Expected InvalidQueryException due to invalid relational op");
-    } catch (InvalidQueryException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testTokens_invalidLogicalOp() {
-    try {
-      new QueryLexer().tokens("foo=1<5=2");
-      fail("Expected InvalidQueryException due to invalid logical op");
-    } catch (InvalidQueryException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testTokens_invalidLogicalOp2() {
-    try {
-      new QueryLexer().tokens("foo=1&&5=2");
-      fail("Expected InvalidQueryException due to invalid logical op");
-    } catch (InvalidQueryException e) {
-      //expected
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/QueryParserTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/QueryParserTest.java
deleted file mode 100644
index a37114e..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/QueryParserTest.java
+++ /dev/null
@@ -1,288 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.api.predicate;
-
-import org.apache.ambari.server.controller.predicate.*;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.junit.Assert.*;
-
-/**
- * QueryParser unit tests.
- */
-public class QueryParserTest {
-
-  @Test
-  public void testParse_simple() throws Exception {
-    List<Token> listTokens = new ArrayList<Token>();
-    //a=b
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "a"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "b"));
-
-    QueryParser parser = new QueryParser();
-    Predicate p = parser.parse(listTokens.toArray(new Token[listTokens.size()]));
-
-    assertEquals(new EqualsPredicate<String>("a", "b"), p);
-  }
-
-  @Test
-  public void testParse() throws InvalidQueryException {
-    List<Token> listTokens = new ArrayList<Token>();
-    // foo=bar&(a<1&(b<=2|c>3)&d>=100)|e!=5&!(f=6|g=7)
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "foo"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "bar"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "&"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_OPEN, "("));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "<"));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "a"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "1"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "&"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_OPEN, "("));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "<="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "b"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "2"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "|"));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, ">"));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "c"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "3"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "&"));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, ">="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "d"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "100"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "|"));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "!="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "e"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "5"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "&"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_UNARY_OPERATOR, "!"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_OPEN, "("));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "f"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "6"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "|"));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "g"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "7"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
-
-    QueryParser parser = new QueryParser();
-    Predicate p = parser.parse(listTokens.toArray(new Token[listTokens.size()]));
-
-    EqualsPredicate<String> fooPred = new EqualsPredicate<String>("foo", "bar");
-    LessPredicate<String> aPred = new LessPredicate<String>("a", "1");
-    LessEqualsPredicate<String> bPred = new LessEqualsPredicate<String>("b", "2");
-    GreaterEqualsPredicate<String> cPred = new GreaterEqualsPredicate<String>("c", "3");
-    GreaterEqualsPredicate<String> dPred = new GreaterEqualsPredicate<String>("d", "100");
-    NotPredicate ePred = new NotPredicate(new EqualsPredicate<String>("e", "5"));
-    EqualsPredicate fPred = new EqualsPredicate<String>("f", "6");
-    EqualsPredicate gPRed = new EqualsPredicate<String>("g", "7");
-    OrPredicate bORcPred = new OrPredicate(bPred, cPred);
-    AndPredicate aANDbORcPred = new AndPredicate(aPred, bORcPred);
-    AndPredicate aANDbORcANDdPred = new AndPredicate(aANDbORcPred, dPred);
-    AndPredicate fooANDaANDbORcANDdPred = new AndPredicate(fooPred, aANDbORcANDdPred);
-    OrPredicate fORgPred = new OrPredicate(fPred, gPRed);
-    NotPredicate NOTfORgPred = new NotPredicate(fORgPred);
-    AndPredicate eANDNOTfORgPred = new AndPredicate(ePred, NOTfORgPred);
-    OrPredicate rootPredicate = new OrPredicate(fooANDaANDbORcANDdPred, eANDNOTfORgPred);
-
-    assertEquals(rootPredicate, p);
-  }
-
-  @Test
-  public void testParse_NotOp__simple() throws Exception {
-    List<Token> listTokens = new ArrayList<Token>();
-    //!a=b
-    listTokens.add(new Token(Token.TYPE.LOGICAL_UNARY_OPERATOR, "!"));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "a"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "b"));
-
-    QueryParser parser = new QueryParser();
-    Predicate p = parser.parse(listTokens.toArray(new Token[listTokens.size()]));
-
-    assertEquals(new NotPredicate(new EqualsPredicate<String>("a", "b")), p);
-  }
-
-  @Test
-  public void testParse_NotOp() throws Exception {
-    List<Token> listTokens = new ArrayList<Token>();
-     //a=1&!b=2
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "a"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "1"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "&"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_UNARY_OPERATOR, "!"));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "b"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "2"));
-
-    QueryParser parser = new QueryParser();
-    Predicate p = parser.parse(listTokens.toArray(new Token[listTokens.size()]));
-
-    EqualsPredicate aPred = new EqualsPredicate<String>("a", "1");
-    EqualsPredicate bPred = new EqualsPredicate<String>("b", "2");
-    NotPredicate notPred = new NotPredicate(bPred);
-    AndPredicate andPred = new AndPredicate(aPred, notPred);
-
-    assertEquals(andPred, p);
-  }
-
-  @Test
-  public void testParse_InOp__simple() throws Exception {
-    List<Token> listTokens = new ArrayList<Token>();
-    // foo.in(one,two,3)
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR_FUNC, ".in("));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "foo"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "one,two,3"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
-
-    QueryParser parser = new QueryParser();
-    Predicate p = parser.parse(listTokens.toArray(new Token[listTokens.size()]));
-
-    EqualsPredicate ep1 = new EqualsPredicate("foo", "one");
-    EqualsPredicate ep2 = new EqualsPredicate("foo", "two");
-    EqualsPredicate ep3 = new EqualsPredicate("foo", "3");
-
-    OrPredicate orPredicate = new OrPredicate(ep1, ep2, ep3);
-
-    assertEquals(orPredicate, p);
-  }
-
-  @Test
-  public void testParse_InOp__exception() throws Exception {
-    List<Token> listTokens = new ArrayList<Token>();
-    // foo.in()
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR_FUNC, ".in("));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "foo"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
-
-    QueryParser parser = new QueryParser();
-    try {
-      parser.parse(listTokens.toArray(new Token[listTokens.size()]));
-      fail("Expected InvalidQueryException due to missing right operand");
-    } catch (InvalidQueryException e) {
-      // expected
-    }
-  }
-
-  @Test
-  public void testParse_isEmptyOp__simple() throws Exception {
-    List<Token> listTokens = new ArrayList<Token>();
-    // category1.isEmpty()
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR_FUNC, ".isEmpty("));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "category1"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
-
-    QueryParser parser = new QueryParser();
-    Predicate p = parser.parse(listTokens.toArray(new Token[listTokens.size()]));
-
-    assertEquals(new CategoryIsEmptyPredicate("category1"), p);
-  }
-
-  @Test
-  public void testParse_isEmptyOp__exception() throws Exception {
-    List<Token> listTokens = new ArrayList<Token>();
-    // category1.isEmpty()
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR_FUNC, ".isEmpty("));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "category1"));
-    // missing closing bracket
-
-    QueryParser parser = new QueryParser();
-    try {
-      parser.parse(listTokens.toArray(new Token[listTokens.size()]));
-      fail("Expected InvalidQueryException due to missing closing bracket");
-    } catch (InvalidQueryException e) {
-      // expected
-    }
-  }
-
-  @Test
-  public void testParse_isEmptyOp__exception2() throws Exception {
-    List<Token> listTokens = new ArrayList<Token>();
-    // category1.isEmpty()
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR_FUNC, ".isEmpty("));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "category1"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "one,two,3"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
-
-    QueryParser parser = new QueryParser();
-    try {
-      parser.parse(listTokens.toArray(new Token[listTokens.size()]));
-      fail("Expected InvalidQueryException due to existence of right operand");
-    } catch (InvalidQueryException e) {
-      // expected
-    }
-  }
-
-  @Test
-  public void testParse_noTokens() throws InvalidQueryException {
-    assertNull(new QueryParser().parse(new Token[0]));
-  }
-
-  @Test
-  public void testParse_mismatchedBrackets() {
-    List<Token> listTokens = new ArrayList<Token>();
-    // a=1&(b<=2|c>3
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "a"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "1"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "&"));
-    listTokens.add(new Token(Token.TYPE.BRACKET_OPEN, "("));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "<="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "b"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "2"));
-    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "|"));
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, ">"));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "c"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "3"));
-
-    try {
-      new QueryParser().parse(listTokens.toArray(new Token[listTokens.size()]));
-      fail("Expected InvalidQueryException due to missing closing bracket");
-    } catch (InvalidQueryException e) {
-      // expected
-    }
-  }
-
-  @Test
-  public void testParse_outOfOrderTokens() {
-    List<Token> listTokens = new ArrayList<Token>();
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
-    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "a"));
-    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "1"));
-    // should be a logical operator
-    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
-
-    try {
-      new QueryParser().parse(listTokens.toArray(new Token[listTokens.size()]));
-      fail("Expected InvalidQueryException due to invalid last token");
-    } catch (InvalidQueryException e) {
-      // expected
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/AndOperatorTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/AndOperatorTest.java
deleted file mode 100644
index 3c4d587..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/AndOperatorTest.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-
-import org.apache.ambari.server.controller.predicate.AndPredicate;
-import org.apache.ambari.server.controller.predicate.EqualsPredicate;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-
-/**
- * AND operator test.
- */
-public class AndOperatorTest {
-
-  @Test
-  public void testGetName() {
-    assertEquals("AndOperator", new AndOperator(1).getName());
-  }
-
-  @Test
-  public void testToPredicate() {
-    EqualsPredicate p1 = new EqualsPredicate<String>("p1", "one");
-    EqualsPredicate p2 = new EqualsPredicate<String>("p2", "two");
-    AndPredicate andPredicate = new AndPredicate(p1, p2);
-
-    assertEquals(andPredicate, new AndOperator(1).toPredicate(p1, p2));
-  }
-
-  @Test
-  public void testGetType() {
-    assertSame(Operator.TYPE.AND, new AndOperator(1).getType());
-  }
-
-  @Test
-  public void testGetBasePrecedence() {
-    assertEquals(2, new AndOperator(1).getBasePrecedence());
-  }
-
-  @Test
-  public void testGetPrecedence() {
-    assertEquals(4, new AndOperator(2).getPrecedence());
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/EqualsOperatorTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/EqualsOperatorTest.java
deleted file mode 100644
index 7982a66..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/EqualsOperatorTest.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-
-import org.apache.ambari.server.controller.predicate.EqualsPredicate;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-
-/**
- * EQUALS operator test.
- */
-public class EqualsOperatorTest {
-
-  @Test
-  public void testGetName() {
-    assertEquals("EqualsOperator", new EqualsOperator().getName());
-  }
-
-  @Test
-  public void testToPredicate() {
-    assertEquals(new EqualsPredicate<String>("prop", "val"),
-        new EqualsOperator().toPredicate("prop", "val"));
-  }
-
-  @Test
-  public void testGetType() {
-    assertSame(Operator.TYPE.EQUAL, new EqualsOperator().getType());
-  }
-
-  @Test
-  public void testGetBasePrecedence() {
-    assertEquals(-1, new EqualsOperator().getBasePrecedence());
-  }
-
-  @Test
-  public void testGetPrecedence() {
-    assertEquals(-1, new EqualsOperator().getPrecedence());
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/GreaterEqualsOperatorTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/GreaterEqualsOperatorTest.java
deleted file mode 100644
index 6ac1014..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/GreaterEqualsOperatorTest.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-
-import org.apache.ambari.server.controller.predicate.GreaterEqualsPredicate;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-
-/**
- * GreaterEquals operator test.
- */
-public class GreaterEqualsOperatorTest {
-
-  @Test
-  public void testGetName() {
-    assertEquals("GreaterEqualsOperator", new GreaterEqualsOperator().getName());
-  }
-
-  @Test
-  public void testToPredicate() {
-    assertEquals(new GreaterEqualsPredicate<String>("1", "2"),
-        new GreaterEqualsOperator().toPredicate("1", "2"));
-  }
-
-  @Test
-  public void testGetType() {
-    assertSame(Operator.TYPE.GREATER_EQUAL, new GreaterEqualsOperator().getType());
-  }
-
-  @Test
-  public void testGetBasePrecedence() {
-    assertEquals(-1, new GreaterEqualsOperator().getBasePrecedence());
-  }
-
-  @Test
-  public void testGetPrecedence() {
-    assertEquals(-1, new GreaterEqualsOperator().getPrecedence());
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/GreaterOperatorTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/GreaterOperatorTest.java
deleted file mode 100644
index 59dce15..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/GreaterOperatorTest.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-
-import org.apache.ambari.server.controller.predicate.GreaterPredicate;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-
-/**
- * GREATER operator test.
- */
-public class GreaterOperatorTest {
-
-  @Test
-  public void testGetName() {
-    assertEquals("GreaterOperator", new GreaterOperator().getName());
-  }
-
-  @Test
-  public void testToPredicate() {
-    assertEquals(new GreaterPredicate<String>("1", "2"),
-        new GreaterOperator().toPredicate("1", "2"));
-  }
-
-  @Test
-  public void testGetType() {
-    assertSame(Operator.TYPE.GREATER, new GreaterOperator().getType());
-  }
-
-  @Test
-  public void testGetBasePrecedence() {
-    assertEquals(-1, new GreaterOperator().getBasePrecedence());
-  }
-
-  @Test
-  public void testGetPrecedence() {
-    assertEquals(-1, new GreaterOperator().getPrecedence());
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/InOperatorTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/InOperatorTest.java
deleted file mode 100644
index ae0e14f..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/InOperatorTest.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-
-import org.apache.ambari.server.controller.predicate.EqualsPredicate;
-import org.apache.ambari.server.controller.predicate.OrPredicate;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-/**
- * IN operator test.
- */
-public class InOperatorTest {
-
-  @Test
-  public void testGetName() {
-    assertEquals("InOperator", new InOperator().getName());
-  }
-
-  @Test
-  public void testToPredicate() throws Exception {
-    String prop = "prop";
-    String val = "one,2,three";
-    EqualsPredicate p1 = new EqualsPredicate<String>(prop, "one");
-    EqualsPredicate p2 = new EqualsPredicate<String>(prop, "2");
-    EqualsPredicate p3 = new EqualsPredicate<String>(prop, "three");
-    OrPredicate orPredicate = new OrPredicate(p1, p2, p3);
-
-    assertEquals(orPredicate, new InOperator().toPredicate(prop, val));
-  }
-
-  @Test
-  public void testGetType() {
-    assertSame(Operator.TYPE.IN, new InOperator().getType());
-  }
-
-  @Test
-  public void testGetBasePrecedence() {
-    assertEquals(-1, new InOperator().getBasePrecedence());
-  }
-
-  @Test
-  public void testGetPrecedence() {
-    assertEquals(-1, new InOperator().getPrecedence());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/IsEmptyOperatorTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/IsEmptyOperatorTest.java
deleted file mode 100644
index d31169f..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/IsEmptyOperatorTest.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.controller.predicate.CategoryIsEmptyPredicate;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-/**
- * IsEmpty operator test.
- */
-public class IsEmptyOperatorTest {
-
-  @Test
-  public void testGetName() {
-    assertEquals("IsEmptyOperator", new IsEmptyOperator().getName());
-  }
-
-  @Test
-  public void testToPredicate() throws InvalidQueryException {
-    String prop = "prop";
-    Predicate p = new CategoryIsEmptyPredicate(prop);
-
-    assertEquals(p, new IsEmptyOperator().toPredicate(prop, null));
-  }
-
-  @Test
-  public void testGetType() {
-    assertSame(Operator.TYPE.IS_EMPTY, new IsEmptyOperator().getType());
-  }
-
-  @Test
-  public void testGetBasePrecedence() {
-    assertEquals(-1, new IsEmptyOperator().getBasePrecedence());
-  }
-
-  @Test
-  public void testGetPrecedence() {
-    assertEquals(-1, new IsEmptyOperator().getPrecedence());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/LessEqualsOperatorTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/LessEqualsOperatorTest.java
deleted file mode 100644
index ef90a23..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/LessEqualsOperatorTest.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-
-import org.apache.ambari.server.controller.predicate.LessEqualsPredicate;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-
-/**
- * LessEquals operator test.
- */
-public class LessEqualsOperatorTest {
-
-  @Test
-  public void testGetName() {
-    assertEquals("LessEqualsOperator", new LessEqualsOperator().getName());
-  }
-
-  @Test
-  public void testToPredicate() {
-    assertEquals(new LessEqualsPredicate<String>("1", "2"),
-        new LessEqualsOperator().toPredicate("1", "2"));
-  }
-
-  @Test
-  public void testGetType() {
-    assertSame(Operator.TYPE.LESS_EQUAL, new LessEqualsOperator().getType());
-  }
-
-  @Test
-  public void testGetBasePrecedence() {
-    assertEquals(-1, new LessEqualsOperator().getBasePrecedence());
-  }
-
-  @Test
-  public void testGetPrecedence() {
-    assertEquals(-1, new LessEqualsOperator().getPrecedence());
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/LessOperatorTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/LessOperatorTest.java
deleted file mode 100644
index 753d988..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/LessOperatorTest.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-
-import org.apache.ambari.server.controller.predicate.LessPredicate;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-
-/**
- * LESS operator test.
- */
-public class LessOperatorTest {
-
-  @Test
-  public void testGetName() {
-    assertEquals("LessOperator", new LessOperator().getName());
-  }
-
-  @Test
-  public void testToPredicate() {
-    assertEquals(new LessPredicate<String>("1", "2"),
-        new LessOperator().toPredicate("1", "2"));
-  }
-
-  @Test
-  public void testGetType() {
-    assertSame(Operator.TYPE.LESS, new LessOperator().getType());
-  }
-
-  @Test
-  public void testGetBasePrecedence() {
-    assertEquals(-1, new LessOperator().getBasePrecedence());
-  }
-
-  @Test
-  public void testGetPrecedence() {
-    assertEquals(-1, new LessOperator().getPrecedence());
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/NotEqualsOperatorTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/NotEqualsOperatorTest.java
deleted file mode 100644
index 0ac1002..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/NotEqualsOperatorTest.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-
-import org.apache.ambari.server.controller.predicate.EqualsPredicate;
-import org.apache.ambari.server.controller.predicate.NotPredicate;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-
-/**
- * NOT EQUALS operator test.
- */
-public class NotEqualsOperatorTest {
-
-  @Test
-  public void testGetName() {
-    assertEquals("NotEqualsOperator", new NotEqualsOperator().getName());
-  }
-
-  @Test
-  public void testToPredicate() {
-    assertEquals(new NotPredicate(new EqualsPredicate<String>("prop", "val")),
-        new NotEqualsOperator().toPredicate("prop", "val"));
-  }
-
-  @Test
-  public void testGetType() {
-    assertSame(Operator.TYPE.NOT_EQUAL, new NotEqualsOperator().getType());
-  }
-
-  @Test
-  public void testGetBasePrecedence() {
-    assertEquals(-1, new NotEqualsOperator().getBasePrecedence());
-  }
-
-  @Test
-  public void testGetPrecedence() {
-    assertEquals(-1, new NotEqualsOperator().getPrecedence());
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/NotOperatorTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/NotOperatorTest.java
deleted file mode 100644
index 5dcfe6d..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/NotOperatorTest.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-
-import org.apache.ambari.server.controller.predicate.EqualsPredicate;
-import org.apache.ambari.server.controller.predicate.NotPredicate;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-
-/**
- * NOT operator test.
- */
-public class NotOperatorTest {
-
-  @Test
-  public void testGetName() {
-    assertEquals("NotOperator", new NotOperator(1).getName());
-  }
-
-  @Test
-  public void testToPredicate() {
-    EqualsPredicate p = new EqualsPredicate<String>("prop", "val");
-    NotPredicate notPredicate = new NotPredicate(p);
-
-    assertEquals(notPredicate, new NotOperator(1).toPredicate(null, p));
-  }
-
-  @Test
-  public void testGetType() {
-    assertSame(Operator.TYPE.NOT, new NotOperator(1).getType());
-  }
-
-  @Test
-  public void testGetBasePrecedence() {
-    assertEquals(3, new NotOperator(1).getBasePrecedence());
-  }
-
-  @Test
-  public void testGetPrecedence() {
-    assertEquals(5, new NotOperator(2).getPrecedence());
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/OrOperatorTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/OrOperatorTest.java
deleted file mode 100644
index c047062..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/operators/OrOperatorTest.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.predicate.operators;
-
-
-import org.apache.ambari.server.controller.predicate.EqualsPredicate;
-import org.apache.ambari.server.controller.predicate.OrPredicate;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-
-/**
- * OR operator test.
- */
-public class OrOperatorTest {
-
-  @Test
-  public void testGetName() {
-    assertEquals("OrOperator", new OrOperator(1).getName());
-  }
-
-  @Test
-  public void testToPredicate() {
-    EqualsPredicate p1 = new EqualsPredicate<String>("p1", "one");
-    EqualsPredicate p2 = new EqualsPredicate<String>("p2", "two");
-    OrPredicate orPRedicate = new OrPredicate(p1, p2);
-
-    assertEquals(orPRedicate, new OrOperator(1).toPredicate(p1, p2));
-  }
-
-  @Test
-  public void testGetType() {
-    assertSame(Operator.TYPE.OR, new OrOperator(1).getType());
-  }
-
-  @Test
-  public void testGetBasePrecedence() {
-    assertEquals(1, new OrOperator(1).getBasePrecedence());
-  }
-
-  @Test
-  public void testGetPrecedence() {
-    assertEquals(3, new OrOperator(2).getPrecedence());
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/query/QueryImplTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/query/QueryImplTest.java
deleted file mode 100644
index a0b41b4..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/query/QueryImplTest.java
+++ /dev/null
@@ -1,602 +0,0 @@
-package org.apache.ambari.server.api.query;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.ambari.server.api.resources.ResourceDefinition;
-import org.apache.ambari.server.api.util.TreeNode;
-import org.apache.ambari.server.api.util.TreeNodeImpl;
-import org.apache.ambari.server.controller.predicate.AndPredicate;
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.services.Result;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.After;
-import org.junit.Test;
-
-import java.util.*;
-
-import static org.easymock.EasyMock.*;
-
-import static org.easymock.EasyMock.eq;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-
-
-//todo: add assertions for temporal info
-public class QueryImplTest {
-
-  ClusterController m_controller = createNiceMock(ClusterController.class);
-
-  @Test
-  public void testExecute__Component_instance_noSpecifiedProps() throws Exception {
-    Result result = createNiceMock(Result.class);
-    ResourceInstance componentResourceInstance = createNiceMock(ResourceInstance.class);
-    ResourceDefinition componentResourceDefinition = createNiceMock(ResourceDefinition.class);
-    ResourceInstance hostResourceInstance = createNiceMock(ResourceInstance.class);
-    ResourceDefinition hostResourceDefinition = createNiceMock(ResourceDefinition.class);
-    Schema componentSchema = createNiceMock(Schema.class);
-    Resource componentResource = createNiceMock(Resource.class);
-    String componentPropertyId = "componentId";
-    Query hostComponentQuery = createStrictMock(Query.class);
-    Result hostComponentQueryResult = createNiceMock(Result.class);
-
-    TreeNode<Resource> tree = new TreeNodeImpl<Resource>(null, null, null);
-    TreeNode<Resource> hostComponentResultNode = new TreeNodeImpl<Resource>(null, null, null);
-    List<Resource> listResources = Collections.singletonList(componentResource);
-
-    Map<Resource.Type, String> mapResourceIds = new HashMap<Resource.Type, String>();
-    mapResourceIds.put(Resource.Type.Cluster, "clusterName");
-    mapResourceIds.put(Resource.Type.Service, "serviceName");
-    mapResourceIds.put(Resource.Type.Component, "componentName");
-
-    Map<String, ResourceInstance> mapChildren = new HashMap<String, ResourceInstance>();
-    mapChildren.put("host_components", hostResourceInstance);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate = pb.property("clusterId").equals("clusterName").and().
-        property("serviceId").equals("serviceName").and().
-        property("componentId").equals("componentName").toPredicate();
-
-    // expectations
-    expect(componentResourceInstance.getResourceDefinition()).andReturn(componentResourceDefinition).anyTimes();
-    expect(componentResourceInstance.getSubResources()).andReturn(mapChildren).anyTimes();
-    expect(componentResourceInstance.getIds()).andReturn(mapResourceIds).anyTimes();
-
-    expect(componentResourceDefinition.getType()).andReturn(Resource.Type.Component).anyTimes();
-
-    expect(componentResource.getType()).andReturn(Resource.Type.Component).anyTimes();
-    expect(componentResource.getPropertyValue(componentPropertyId)).andReturn("keyVal");
-
-    expect(m_controller.getSchema(Resource.Type.Component)).andReturn(componentSchema).anyTimes();
-
-    expect(componentSchema.getKeyPropertyId(Resource.Type.Cluster)).andReturn("clusterId");
-    expect(componentSchema.getKeyPropertyId(Resource.Type.Service)).andReturn("serviceId");
-    expect(componentSchema.getKeyPropertyId(Resource.Type.Component)).andReturn(componentPropertyId).atLeastOnce();
-
-    expect(m_controller.getResources(eq(Resource.Type.Component), eq(PropertyHelper.getReadRequest(Collections.<String>emptySet())),
-        eq(predicate))).andReturn(listResources);
-
-    expect(result.getResultTree()).andReturn(tree).anyTimes();
-
-    Map<Resource.Type, String> mapResourceIdsSet = new HashMap<Resource.Type, String>(mapResourceIds);
-    mapResourceIdsSet.put(Resource.Type.Component, "keyVal");
-    hostResourceInstance.setIds(mapResourceIdsSet);
-    expect(hostResourceInstance.getResourceDefinition()).andReturn(hostResourceDefinition).anyTimes();
-    expect(hostResourceInstance.getQuery()).andReturn(hostComponentQuery).anyTimes();
-
-    expect(hostResourceDefinition.getType()).andReturn(Resource.Type.Host);
-    expect(hostComponentQuery.execute()).andReturn(hostComponentQueryResult);
-    expect(hostComponentQueryResult.getResultTree()).andReturn(hostComponentResultNode);
-
-    replay(m_controller, result, componentResourceInstance, componentResourceDefinition, hostResourceInstance, componentSchema, componentResource,
-        hostComponentQuery, hostComponentQueryResult);
-
-    QueryImpl query = new TestQuery(componentResourceInstance, result);
-    query.execute();
-
-    verify(m_controller, result, componentResourceInstance, componentResourceDefinition, hostResourceInstance, componentSchema, componentResource,
-        hostComponentQuery, hostComponentQueryResult);
-
-    assertEquals(1, tree.getChildren().size());
-    TreeNode<Resource> componentNode = tree.getChild("Component:1");
-    assertEquals("Component:1", componentNode.getName());
-    assertEquals(componentResource, componentNode.getObject());
-    assertEquals(1, componentNode.getChildren().size());
-    assertSame(hostComponentResultNode, componentNode.getChild("host_components"));
-    assertEquals("false", hostComponentResultNode.getProperty("isCollection"));
-  }
-
-  @Test
-  public void testExecute__Component_collection_noSpecifiedProps() throws Exception {
-    Result result = createNiceMock(Result.class);
-    ResourceInstance componentResourceInstance = createNiceMock(ResourceInstance.class);
-    ResourceDefinition componentResourceDefinition = createNiceMock(ResourceDefinition.class);
-    Schema componentSchema = createNiceMock(Schema.class);
-    Resource componentResource = createNiceMock(Resource.class);
-    String componentPropertyId = "componentId";
-    String servicePropertyId = "serviceId";
-    String clusterPropertyId = "clusterId";
-
-    Set<String> setPropertyIds = new HashSet<String>();
-    setPropertyIds.add(clusterPropertyId);
-    setPropertyIds.add(servicePropertyId);
-    setPropertyIds.add(componentPropertyId);
-
-    TreeNode<Resource> tree = new TreeNodeImpl<Resource>(null, null, null);
-    List<Resource> listResources = Collections.singletonList(componentResource);
-
-    Map<Resource.Type, String> mapResourceIds = new HashMap<Resource.Type, String>();
-    mapResourceIds.put(Resource.Type.Cluster, "clusterName");
-    mapResourceIds.put(Resource.Type.Service, "serviceName");
-    mapResourceIds.put(Resource.Type.Component, null);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate = pb.property("clusterId").equals("clusterName").and().
-        property("serviceId").equals("serviceName").toPredicate();
-
-    // expectations
-    expect(componentResource.getType()).andReturn(Resource.Type.Component).anyTimes();
-
-    expect(componentResourceInstance.getIds()).andReturn(mapResourceIds).anyTimes();
-    expect(componentResourceInstance.getResourceDefinition()).andReturn(componentResourceDefinition).anyTimes();
-
-    expect(componentResourceDefinition.getType()).andReturn(Resource.Type.Component).anyTimes();
-
-    expect(m_controller.getSchema(Resource.Type.Component)).andReturn(componentSchema).anyTimes();
-
-    expect(componentSchema.getKeyPropertyId(Resource.Type.Component)).andReturn(componentPropertyId).anyTimes();
-    expect(componentSchema.getKeyPropertyId(Resource.Type.Cluster)).andReturn("clusterId").anyTimes();
-    expect(componentSchema.getKeyPropertyId(Resource.Type.Service)).andReturn("serviceId").anyTimes();
-
-    expect(result.getResultTree()).andReturn(tree).anyTimes();
-
-    expect(m_controller.getResources(eq(Resource.Type.Component), eq(PropertyHelper.getReadRequest(setPropertyIds)),
-        eq(predicate))).andReturn(listResources);
-
-    expect(componentResourceInstance.getSubResources()).andReturn(Collections.<String, ResourceInstance>emptyMap()).anyTimes();
-
-    replay(m_controller, result,componentResourceInstance, componentResourceDefinition, componentSchema, componentResource);
-
-    QueryImpl query = new TestQuery(componentResourceInstance, result);
-    query.execute();
-
-    verify(m_controller, result, componentResourceInstance, componentResourceDefinition, componentSchema, componentResource);
-
-    assertEquals("true", tree.getProperty("isCollection"));
-    assertEquals(1, tree.getChildren().size());
-    TreeNode<Resource> componentNode = tree.getChild("Component:1");
-    assertSame(componentResource, componentNode.getObject());
-    assertEquals(0, componentNode.getChildren().size());
-  }
-
-  @Test
-  public void testExecute__collection_nullInternalPredicate_nullUserPredicate() throws Exception {
-    Result result = createNiceMock(Result.class);
-    ResourceInstance clusterResourceInstance = createNiceMock(ResourceInstance.class);
-    ResourceDefinition clusterResourceDefinition = createNiceMock(ResourceDefinition.class);
-    Schema clusterSchema = createNiceMock(Schema.class);
-    Resource clusterResource = createNiceMock(Resource.class);
-    String clusterPropertyId = "clusterId";
-
-    TreeNode<Resource> tree = new TreeNodeImpl<Resource>(null, null, null);
-    List<Resource> listResources = Collections.singletonList(clusterResource);
-
-    Map<Resource.Type, String> mapResourceIds = new HashMap<Resource.Type, String>();
-
-    // expectations
-    expect(clusterResource.getType()).andReturn(Resource.Type.Cluster).anyTimes();
-
-    expect(clusterResourceInstance.getIds()).andReturn(mapResourceIds).anyTimes();
-    expect(clusterResourceInstance.getResourceDefinition()).andReturn(clusterResourceDefinition).anyTimes();
-
-    expect(clusterResourceDefinition.getType()).andReturn(Resource.Type.Component).anyTimes();
-
-    expect(m_controller.getSchema(Resource.Type.Component)).andReturn(clusterSchema).atLeastOnce();
-
-    expect(clusterSchema.getKeyPropertyId(Resource.Type.Component)).andReturn(clusterPropertyId).atLeastOnce();
-
-    expect(result.getResultTree()).andReturn(tree).atLeastOnce();
-
-    expect(m_controller.getResources(eq(Resource.Type.Component), eq(PropertyHelper.getReadRequest(Collections.singleton(clusterPropertyId))),
-        (Predicate) isNull())).andReturn(listResources);
-
-
-    expect(clusterResourceInstance.getSubResources()).andReturn(Collections.<String, ResourceInstance>emptyMap()).anyTimes();
-
-    replay(m_controller, result, clusterResourceInstance, clusterResourceDefinition, clusterSchema, clusterResource);
-
-    QueryImpl query = new TestQuery(clusterResourceInstance, result);
-    query.execute();
-
-    verify(m_controller, result, clusterResourceInstance, clusterResourceDefinition, clusterSchema, clusterResource);
-
-    assertEquals("true", tree.getProperty("isCollection"));
-    assertEquals(1, tree.getChildren().size());
-    TreeNode<Resource> clusterNode = tree.getChild("Cluster:1");
-    assertSame(clusterResource, clusterNode.getObject());
-    assertEquals(0, clusterNode.getChildren().size());
-
-  }
-
-  @Test
-  public void testExecute__collection_nullInternalPredicate_nonNullUserPredicate() throws Exception {
-    Result result = createNiceMock(Result.class);
-    ResourceInstance clusterResourceInstance = createNiceMock(ResourceInstance.class);
-    ResourceDefinition clusterResourceDefinition = createNiceMock(ResourceDefinition.class);
-    Schema clusterSchema = createNiceMock(Schema.class);
-    Resource clusterResource = createNiceMock(Resource.class);
-    String clusterPropertyId = "clusterId";
-    Predicate userPredicate = createNiceMock(Predicate.class);
-
-    TreeNode<Resource> tree = new TreeNodeImpl<Resource>(null, null, null);
-    List<Resource> listResources = Collections.singletonList(clusterResource);
-
-    Map<Resource.Type, String> mapResourceIds = new HashMap<Resource.Type, String>();
-
-    // expectations
-    expect(clusterResource.getType()).andReturn(Resource.Type.Cluster).anyTimes();
-
-    expect(clusterResourceInstance.getIds()).andReturn(mapResourceIds).anyTimes();
-    expect(clusterResourceInstance.getResourceDefinition()).andReturn(clusterResourceDefinition).anyTimes();
-
-    expect(clusterResourceDefinition.getType()).andReturn(Resource.Type.Component).atLeastOnce();
-
-    expect(m_controller.getSchema(Resource.Type.Component)).andReturn(clusterSchema).anyTimes();
-    expect(clusterSchema.getKeyPropertyId(Resource.Type.Component)).andReturn(clusterPropertyId).anyTimes();
-
-    expect(result.getResultTree()).andReturn(tree).anyTimes();
-
-    expect(m_controller.getResources(eq(Resource.Type.Component), eq(PropertyHelper.getReadRequest(Collections.singleton(clusterPropertyId))),
-        eq(userPredicate))).andReturn(listResources);
-
-    expect(clusterResourceInstance.getSubResources()).andReturn(Collections.<String, ResourceInstance>emptyMap()).anyTimes();
-
-    replay(m_controller, result,clusterResourceInstance, clusterResourceDefinition, clusterSchema, clusterResource, userPredicate);
-
-    QueryImpl query = new TestQuery(clusterResourceInstance, result);
-    query.setUserPredicate(userPredicate);
-    query.execute();
-
-    verify(m_controller, result, clusterResourceInstance, clusterResourceDefinition, clusterSchema, clusterResource, userPredicate);
-
-    assertEquals("true", tree.getProperty("isCollection"));
-    assertEquals(1, tree.getChildren().size());
-    TreeNode<Resource> clusterNode = tree.getChild("Cluster:1");
-    assertSame(clusterResource, clusterNode.getObject());
-    assertEquals(0, clusterNode.getChildren().size());
-  }
-
-  @Test
-  public void testExecute__collection_nonNullInternalPredicate_nonNullUserPredicate() throws Exception {
-    Result result = createNiceMock(Result.class);
-    ResourceInstance componentResourceInstance = createNiceMock(ResourceInstance.class);
-    ResourceDefinition componentResourceDefinition = createNiceMock(ResourceDefinition.class);
-    Schema componentSchema = createNiceMock(Schema.class);
-    Resource componentResource = createNiceMock(Resource.class);
-    String componentPropertyId = "componentId";
-    String servicePropertyId = "serviceId";
-    String clusterPropertyId = "clusterId";
-
-    Set<String> setPropertyIds = new HashSet<String>();
-    setPropertyIds.add(clusterPropertyId);
-    setPropertyIds.add(servicePropertyId);
-    setPropertyIds.add(componentPropertyId);
-
-    TreeNode<Resource> tree = new TreeNodeImpl<Resource>(null, null, null);
-    List<Resource> listResources = Collections.singletonList(componentResource);
-
-    Map<Resource.Type, String> mapResourceIds = new HashMap<Resource.Type, String>();
-    mapResourceIds.put(Resource.Type.Cluster, "clusterName");
-    mapResourceIds.put(Resource.Type.Service, "serviceName");
-    mapResourceIds.put(Resource.Type.Component, null);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate internalPredicate = pb.property("clusterId").equals("clusterName").and().
-        property("serviceId").equals("serviceName").toPredicate();
-
-    pb = new PredicateBuilder();
-    Predicate userPredicate = pb.property("foo").equals("bar").toPredicate();
-    // combine internal predicate and user predicate
-    //todo: for now, need to cast to BasePredicate
-    Predicate predicate = new AndPredicate((BasePredicate) internalPredicate, (BasePredicate) userPredicate);
-
-    // expectations
-    expect(componentResource.getType()).andReturn(Resource.Type.Component).anyTimes();
-
-    expect(componentResourceInstance.getIds()).andReturn(mapResourceIds).anyTimes();
-    expect(componentResourceInstance.getResourceDefinition()).andReturn(componentResourceDefinition).anyTimes();
-
-    expect(componentResourceDefinition.getType()).andReturn(Resource.Type.Component).anyTimes();
-
-    expect(m_controller.getSchema(Resource.Type.Component)).andReturn(componentSchema).anyTimes();
-    expect(componentSchema.getKeyPropertyId(Resource.Type.Component)).andReturn(componentPropertyId).atLeastOnce();
-    expect(componentSchema.getKeyPropertyId(Resource.Type.Cluster)).andReturn("clusterId").anyTimes();
-    expect(componentSchema.getKeyPropertyId(Resource.Type.Service)).andReturn("serviceId").anyTimes();
-
-    expect(result.getResultTree()).andReturn(tree).anyTimes();
-
-    expect(m_controller.getResources(eq(Resource.Type.Component), eq(PropertyHelper.getReadRequest(setPropertyIds)),
-        eq(predicate))).andReturn(listResources);
-
-    expect(componentResourceInstance.getSubResources()).andReturn(Collections.<String, ResourceInstance>emptyMap()).anyTimes();
-
-    replay(m_controller, result, componentResourceInstance, componentResourceDefinition, componentSchema, componentResource);
-
-    QueryImpl query = new TestQuery(componentResourceInstance, result);
-    query.setUserPredicate(userPredicate);
-    query.execute();
-
-    verify(m_controller, result, componentResourceInstance, componentResourceDefinition, componentSchema, componentResource);
-
-    assertEquals("true", tree.getProperty("isCollection"));
-    assertEquals(1, tree.getChildren().size());
-    TreeNode<Resource> componentNode = tree.getChild("Component:1");
-    assertSame(componentResource, componentNode.getObject());
-    assertEquals(0, componentNode.getChildren().size());
-  }
-
-  @Test
-  public void testAddProperty__localProperty() throws Exception {
-    ResourceInstance resource = createNiceMock(ResourceInstance.class);
-    ResourceDefinition resourceDefinition = createNiceMock(ResourceDefinition.class);
-    Schema schema = createNiceMock(Schema.class);
-
-    //expectations
-    expect(resource.getResourceDefinition()).andReturn(resourceDefinition).anyTimes();
-    expect(resource.getSubResources()).andReturn(Collections.<String, ResourceInstance>emptyMap()).anyTimes();
-
-    expect(resourceDefinition.getType()).andReturn(Resource.Type.Service).anyTimes();
-
-    expect(m_controller.getSchema(Resource.Type.Service)).andReturn(schema).anyTimes();
-
-    replay(m_controller, resource, resourceDefinition, schema);
-
-    Query query = new TestQuery(resource, null);
-    query.addProperty("category", "property", null);
-
-    assertEquals(1, query.getProperties().size());
-    assertTrue(query.getProperties().contains("category/property"));
-
-    query.addProperty(null, "property2", null);
-
-    assertEquals(2, query.getProperties().size());
-    assertTrue(query.getProperties().contains("property2"));
-
-    verify(m_controller, resource, resourceDefinition, schema);
-  }
-
-  @Test
-  public void testAddProperty__allProperties() throws Exception {
-    ResourceInstance resource = createNiceMock(ResourceInstance.class);
-    ResourceDefinition resourceDefinition = createNiceMock(ResourceDefinition.class);
-    Schema schema = createNiceMock(Schema.class);
-
-    //expectations
-    expect(resource.getResourceDefinition()).andReturn(resourceDefinition).anyTimes();
-    expect(resource.getSubResources()).andReturn(Collections.<String, ResourceInstance>emptyMap()).anyTimes();
-
-    expect(resourceDefinition.getType()).andReturn(Resource.Type.Service).anyTimes();
-
-    expect(m_controller.getSchema(Resource.Type.Service)).andReturn(schema).anyTimes();
-
-    replay(m_controller, resource, resourceDefinition, schema);
-
-    Query query = new TestQuery(resource, null);
-    query.addProperty(null, "*", null);
-
-    assertEquals(0, query.getProperties().size());
-
-    verify(m_controller, resource, resourceDefinition, schema);
-  }
-
-  @Test
-  public void testAddProperty__allCategoryProperties() throws Exception {
-    ResourceInstance resource = createNiceMock(ResourceInstance.class);
-    ResourceDefinition resourceDefinition = createNiceMock(ResourceDefinition.class);
-    Schema schema = createNiceMock(Schema.class);
-
-    //expectations
-    expect(resource.getResourceDefinition()).andReturn(resourceDefinition).anyTimes();
-    expect(resource.getSubResources()).andReturn(Collections.<String, ResourceInstance>emptyMap()).anyTimes();
-
-    expect(resourceDefinition.getType()).andReturn(Resource.Type.Service).anyTimes();
-
-    expect(m_controller.getSchema(Resource.Type.Service)).andReturn(schema).anyTimes();
-
-    replay(m_controller, resource, resourceDefinition, schema);
-
-    Query query = new TestQuery(resource, null);
-    query.addProperty("category", "*", null);
-
-    assertEquals(1, query.getProperties().size());
-    assertTrue(query.getProperties().contains("category"));
-
-    verify(m_controller, resource, resourceDefinition, schema);
-  }
-
-  // this is the case where service can't differentiate category and property name
-  // the category name is give as the property name
-  @Test
-  public void testAddProperty__localCategory_asPropertyName() throws Exception  {
-    ResourceInstance resource = createNiceMock(ResourceInstance.class);
-    ResourceDefinition resourceDefinition = createNiceMock(ResourceDefinition.class);
-    Schema schema = createNiceMock(Schema.class);
-
-    //expectations
-    expect(resource.getResourceDefinition()).andReturn(resourceDefinition).anyTimes();
-
-    expect(resourceDefinition.getType()).andReturn(Resource.Type.Service).anyTimes();
-
-    expect(m_controller.getSchema(Resource.Type.Service)).andReturn(schema).anyTimes();
-    expect(resource.getSubResources()).andReturn(Collections.<String, ResourceInstance>emptyMap()).anyTimes();
-
-    replay(m_controller, resource, resourceDefinition, schema);
-
-    Query query = new TestQuery(resource, null);
-    query.addProperty(null, "category", null);
-
-    Set<String> setProperties = query.getProperties();
-    assertEquals(1, setProperties.size());
-    assertTrue(setProperties.contains("category"));
-
-    verify(m_controller, resource, resourceDefinition, schema);
-  }
-
-  // This is the case where the service can determine that only a category was provided because it contained
-  // a trailing '/'
-  @Test
-  public void testAddProperty__localCategory_categoryNameOnly() throws Exception {
-    ResourceInstance resource = createNiceMock(ResourceInstance.class);
-    ResourceDefinition resourceDefinition = createNiceMock(ResourceDefinition.class);
-    Schema schema = createNiceMock(Schema.class);
-
-    //expectations
-    expect(resource.getResourceDefinition()).andReturn(resourceDefinition).anyTimes();
-
-    expect(resourceDefinition.getType()).andReturn(Resource.Type.Service).anyTimes();
-
-    expect(m_controller.getSchema(Resource.Type.Service)).andReturn(schema).anyTimes();
-    expect(resource.getSubResources()).andReturn(Collections.<String, ResourceInstance>emptyMap()).anyTimes();
-
-    replay(m_controller, resource, resourceDefinition, schema);
-
-    Query query = new TestQuery(resource, null);
-    query.addProperty("category/", "", null);
-
-    Set<String> setProperties = query.getProperties();
-    assertEquals(1, setProperties.size());
-    assertTrue(setProperties.contains("category"));
-
-    verify(m_controller, resource, resourceDefinition, schema);
-  }
-
-  @Test
-  public void testAddProperty__localSubCategory() throws Exception {
-    ResourceInstance resource = createNiceMock(ResourceInstance.class);
-    ResourceDefinition resourceDefinition = createNiceMock(ResourceDefinition.class);
-    Schema schema = createNiceMock(Schema.class);
-
-    //expectations
-    expect(resource.getResourceDefinition()).andReturn(resourceDefinition).anyTimes();
-
-    expect(resourceDefinition.getType()).andReturn(Resource.Type.Service).anyTimes();
-
-    expect(m_controller.getSchema(Resource.Type.Service)).andReturn(schema).anyTimes();
-
-    expect(resource.getSubResources()).andReturn(Collections.<String, ResourceInstance>emptyMap()).anyTimes();
-
-    replay(m_controller, resource, resourceDefinition, schema);
-
-    Query query = new TestQuery(resource, null);
-    query.addProperty("category", "nestedCategory", null);
-
-    Set<String> setProperties = query.getProperties();
-    assertEquals(1, setProperties.size());
-    assertTrue(setProperties.contains("category/nestedCategory"));
-
-    verify(m_controller, resource, resourceDefinition, schema);
-  }
-
-  @Test
-  public void testAddProperty__localCategorySubPropsOnly() throws Exception {
-    ResourceInstance resource = createNiceMock(ResourceInstance.class);
-    ResourceDefinition resourceDefinition = createNiceMock(ResourceDefinition.class);
-    Schema schema = createNiceMock(Schema.class);
-
-    //expectations
-    expect(resource.getResourceDefinition()).andReturn(resourceDefinition).anyTimes();
-
-    expect(resourceDefinition.getType()).andReturn(Resource.Type.Service).anyTimes();
-
-    expect(m_controller.getSchema(Resource.Type.Service)).andReturn(schema).anyTimes();
-    expect(resource.getSubResources()).andReturn(Collections.<String, ResourceInstance>emptyMap()).anyTimes();
-
-    replay(m_controller, resource, resourceDefinition, schema);
-
-    Query query = new TestQuery(resource, null);
-    query.addProperty(null, "category", null);
-
-    Set<String> setProperties = query.getProperties();
-    assertEquals(1, setProperties.size());
-    assertTrue(setProperties.contains("category"));
-
-    verify(m_controller, resource, resourceDefinition, schema);
-  }
-
-  @Test
-  public void testAddProperty__subProperty() throws Exception {
-    ResourceInstance resource = createNiceMock(ResourceInstance.class);
-    ResourceDefinition resourceDefinition = createNiceMock(ResourceDefinition.class);
-    ResourceInstance subResource = createNiceMock(ResourceInstance.class);
-    Schema schema = createNiceMock(Schema.class);
-
-    //expectations
-    expect(resource.getResourceDefinition()).andReturn(resourceDefinition).anyTimes();
-
-    expect(resourceDefinition.getType()).andReturn(Resource.Type.Service).anyTimes();
-
-    expect(m_controller.getSchema(Resource.Type.Service)).andReturn(schema).anyTimes();
-
-    expect(resource.getSubResources()).andReturn(Collections.singletonMap("components", subResource)).anyTimes();
-
-    //todo: ensure that sub-resource was added.
-
-    replay(m_controller, resource, resourceDefinition, subResource, schema);
-
-    Query query = new TestQuery(resource, null);
-    query.addProperty(null, "components", null);
-
-    verify(m_controller, resource, resourceDefinition, subResource, schema);
-  }
-
-  //todo: sub-resource with property and with sub-path
-
-//  @Test
-//  public void testAddProperty__invalidProperty() {
-//
-//  }
-
-  private class TestQuery extends QueryImpl {
-
-    private Result m_result;
-
-    public TestQuery(ResourceInstance ResourceInstance, Result result) {
-      super(ResourceInstance);
-      m_result = result;
-    }
-
-    @Override
-    ClusterController getClusterController() {
-      return m_controller;
-    }
-
-    @Override
-    Result createResult() {
-      return m_result;
-    }
-  }
-
-  @After
-  public void resetGlobalMocks() {
-    reset(m_controller);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/resources/ResourceInstanceImplTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/resources/ResourceInstanceImplTest.java
deleted file mode 100644
index 4f729b2..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/resources/ResourceInstanceImplTest.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.api.resources;
-
-
-import org.apache.ambari.server.controller.spi.Resource;
-import org.junit.Test;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.*;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-
-/**
- * ResourceInstanceImpl unit tests.
- */
-public class ResourceInstanceImplTest {
-  @Test
-  public void testIsCollection__True() {
-    ResourceDefinition resourceDefinition = createNiceMock(ResourceDefinition.class);
-
-    Map<Resource.Type, String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.Cluster, "cluster");
-    mapIds.put(Resource.Type.Service, null);
-
-    // expectations
-    expect(resourceDefinition.getType()).andReturn(Resource.Type.Service).anyTimes();
-    expect(resourceDefinition.getSubResourceDefinitions()).andReturn(Collections.<SubResourceDefinition>emptySet()).anyTimes();
-
-    replay(resourceDefinition);
-
-    //test
-    ResourceInstance instance = new ResourceInstanceImpl(mapIds, resourceDefinition, null);
-    assertTrue(instance.isCollectionResource());
-
-    verify(resourceDefinition);
-  }
-
-  @Test
-  public void testIsCollection__False() {
-    ResourceDefinition resourceDefinition = createNiceMock(ResourceDefinition.class);
-
-    Map<Resource.Type, String> mapIds = new HashMap<Resource.Type, String>();
-    mapIds.put(Resource.Type.Cluster, "cluster");
-    mapIds.put(Resource.Type.Service, "service");
-
-    // expectations
-    expect(resourceDefinition.getType()).andReturn(Resource.Type.Service).anyTimes();
-    expect(resourceDefinition.getSubResourceDefinitions()).andReturn(Collections.<SubResourceDefinition>emptySet()).anyTimes();
-
-    replay(resourceDefinition);
-
-    //test
-    ResourceInstance instance = new ResourceInstanceImpl(mapIds, resourceDefinition, null);
-    assertFalse(instance.isCollectionResource());
-
-    verify(resourceDefinition);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/ActionServiceTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/ActionServiceTest.java
deleted file mode 100644
index 05d5cc0..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/ActionServiceTest.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.api.services;
-
-import static org.junit.Assert.assertEquals;
-
-import javax.ws.rs.core.Response;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.api.handlers.RequestHandler;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.junit.Test;
-
-public class ActionServiceTest extends BaseServiceTest {
-
-  @Test
-  public void testGetActions() {
-    String clusterName = "c1";
-    String serviceName = "HDFS";
-
-    registerExpectations(Request.Type.GET, null, 200, false);
-    replayMocks();
-
-    //test
-    ActionService actionService = new TestActionService(getResource(), clusterName, getRequestFactory(),
-        getRequestHandler(), serviceName);
-    Response response = actionService.getActions(getHttpHeaders(), getUriInfo());
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testCreateActions() throws AmbariException {
-    String clusterName = "c1";
-    String serviceName = "HDFS";
-    String body = "body";
-
-    registerExpectations(Request.Type.POST, body, 201, false);
-    replayMocks();
-
-    //test
-    ActionService actionService = new TestActionService(getResource(), clusterName, getRequestFactory(),
-        getRequestHandler(), serviceName);
-    Response response = actionService.createActions(body, getHttpHeaders(), getUriInfo());
-    verifyResults(response, 201);
-  }
-
-  private class TestActionService extends ActionService {
-    private ResourceInstance m_resourceDef;
-    private String m_clusterId;
-    private String m_serviceId;
-    private RequestFactory m_requestFactory;
-    private RequestHandler m_requestHandler;
-
-    public TestActionService(ResourceInstance resourceDef,
-                             String clusterName, RequestFactory requestFactory,
-                             RequestHandler handler,
-                             String serviceName) {
-
-      super(clusterName, serviceName);
-      m_resourceDef = resourceDef;
-      m_clusterId = clusterName;
-      m_serviceId = serviceName;
-      m_requestFactory = requestFactory;
-      m_requestHandler = handler;
-    }
-
-    @Override
-    ResourceInstance createActionResource(String clusterName, String serviceName, String actionName) {
-      assertEquals(m_clusterId, clusterName);
-      assertEquals(m_serviceId, serviceName);
-      return m_resourceDef;
-    }
-
-    @Override
-    RequestFactory getRequestFactory() {
-      return m_requestFactory;
-    }
-
-
-    @Override
-    RequestHandler getRequestHandler(Request.Type requestType) {
-      return m_requestHandler;
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
deleted file mode 100644
index 7006339..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.File;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import junit.framework.Assert;
-
-import org.apache.ambari.server.state.ComponentInfo;
-import org.apache.ambari.server.state.RepositoryInfo;
-import org.apache.ambari.server.state.ServiceInfo;
-import org.apache.commons.io.FileUtils;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class AmbariMetaInfoTest {
-
-  private static String STACK_NAME_HDP = "HDP";
-  private static String STACK_VERSION_HDP = "0.1";
-  private static String SERVICE_NAME_HDFS = "HDFS";
-  private static String SERVICE_COMPONENT_NAME = "NAMENODE";
-
-  private AmbariMetaInfo metaInfo = null;
-  private final static Logger LOG =
-      LoggerFactory.getLogger(AmbariMetaInfoTest.class);
-
-  @Rule
-  public TemporaryFolder tmpFolder = new TemporaryFolder();
-
-  @Before
-  public void before() throws Exception {
-    File stackRoot = new File("src/test/resources/stacks");
-   LOG.info("Stacks file " + stackRoot.getAbsolutePath());
-    metaInfo = new AmbariMetaInfo(stackRoot);
-    try {
-      metaInfo.init();
-    } catch(Exception e) {
-      LOG.info("Error in initializing ", e);
-    }
-  }
-
-  @Test
-  public void getComponentCategory() {
-    ComponentInfo componentInfo = metaInfo.getComponentCategory(STACK_NAME_HDP,
-        STACK_VERSION_HDP, SERVICE_NAME_HDFS, SERVICE_COMPONENT_NAME);
-    assertNotNull(componentInfo);
-    componentInfo = metaInfo.getComponentCategory(STACK_NAME_HDP,
-        STACK_VERSION_HDP, SERVICE_NAME_HDFS, "DATANODE1");
-    Assert.assertNotNull(componentInfo);
-    assertTrue(!componentInfo.isClient());
-  }
-
-  @Test
-  public void getComponentsByService() {
-    List<ComponentInfo> components = metaInfo.getComponentsByService(
-        STACK_NAME_HDP, STACK_VERSION_HDP, SERVICE_NAME_HDFS);
-    assertNotNull(components);
-  }
-
-  @Test
-  public void getRepository() {
-    Map<String, List<RepositoryInfo>> repository = metaInfo.getRepository(
-        STACK_NAME_HDP, STACK_VERSION_HDP);
-    assertNotNull(repository);
-    assertFalse(repository.get("centos5").isEmpty());
-    assertFalse(repository.get("centos6").isEmpty());
-  }
-
-  @Test
-  public void isSupportedStack() {
-    boolean supportedStack = metaInfo.isSupportedStack(STACK_VERSION_HDP,
-        STACK_VERSION_HDP);
-    assertTrue(supportedStack);
-  }
-
-  @Test
-  public void isValidService() {
-    boolean valid = metaInfo.isValidService(STACK_NAME_HDP, STACK_VERSION_HDP,
-        SERVICE_NAME_HDFS);
-    assertTrue(valid);
-  }
-
-  /**
-   * Method: getSupportedConfigs(String stackName, String version, String
-   * serviceName)
-   */
-  @Test
-  public void getSupportedConfigs() throws Exception {
-
-    Map<String, Map<String, String>> configsAll = metaInfo.getSupportedConfigs(
-        STACK_NAME_HDP, STACK_VERSION_HDP, SERVICE_NAME_HDFS);
-    Set<String> filesKeys = configsAll.keySet();
-    for (String file : filesKeys) {
-      Map<String, String> configs = configsAll.get(file);
-      Set<String> propertyKeys = configs.keySet();
-      assertNotNull(propertyKeys);
-      assertNotSame(propertyKeys.size(), 0);
-    }
-  }
-
-  @Test
-  public void testServiceNameUsingComponentName() {
-    String serviceName = metaInfo.getComponentToService(STACK_NAME_HDP,
-        STACK_VERSION_HDP, "NAMENODE");
-    assertTrue("HDFS".equals(serviceName));
-  }
-
-  /**
-   * Method: Map<String, ServiceInfo> getServices(String stackName, String
-   * version, String serviceName)
-   */
-  @Test
-  public void getServices() {
-    Map<String, ServiceInfo> services = metaInfo.getServices(STACK_NAME_HDP,
-        STACK_VERSION_HDP);
-    LOG.info("Getting all the services ");
-    for (Map.Entry<String, ServiceInfo> entry : services.entrySet()) {
-      LOG.info("Service Name " + entry.getKey() + " values " + entry.getValue());
-    }
-    assertTrue(services.containsKey("HDFS"));
-    assertTrue(services.containsKey("MAPREDUCE"));
-    assertNotNull(services);
-    assertNotSame(services.keySet().size(), 0);
-  }
-
-  /**
-   * Method: getServiceInfo(String stackName, String version, String
-   * serviceName)
-   */
-  @Test
-  public void getServiceInfo() throws Exception {
-    ServiceInfo si = metaInfo.getServiceInfo(STACK_NAME_HDP, STACK_VERSION_HDP,
-        SERVICE_NAME_HDFS);
-    assertNotNull(si);
-  }
-
-  /**
-   * Method: getSupportedServices(String stackName, String version)
-   */
-  @Test
-  public void getSupportedServices() throws Exception {
-    List<ServiceInfo> services = metaInfo.getSupportedServices(STACK_NAME_HDP,
-        STACK_VERSION_HDP);
-    assertNotNull(services);
-    assertNotSame(services.size(), 0);
-
-  }
-
-  @Test
-  public void testGetRepos() throws Exception {
-    Map<String, List<RepositoryInfo>> repos = metaInfo.getRepository(
-        STACK_NAME_HDP, STACK_VERSION_HDP);
-    Set<String> centos5Cnt = new HashSet<String>();
-    Set<String> centos6Cnt = new HashSet<String>();
-    Set<String> redhat6cnt = new HashSet<String>();
-
-    for (List<RepositoryInfo> vals : repos.values()) {
-      for (RepositoryInfo repo : vals) {
-        LOG.debug("Dumping repo info : " + repo.toString());
-        if (repo.getOsType().equals("centos5")) {
-          centos5Cnt.add(repo.getRepoId());
-        } else if (repo.getOsType().equals("centos6")) {
-          centos6Cnt.add(repo.getRepoId());
-        } else if (repo.getOsType().equals("redhat6")) {
-          redhat6cnt.add(repo.getRepoId());
-        } else {
-          fail("Found invalid os" + repo.getOsType());
-        }
-
-        if (repo.getRepoId().equals("epel")) {
-          assertFalse(repo.getMirrorsList().isEmpty());
-          assertNull(repo.getBaseUrl());
-        } else {
-          assertNull(repo.getMirrorsList());
-          assertFalse(repo.getBaseUrl().isEmpty());
-        }
-      }
-    }
-
-    assertEquals(3, centos5Cnt.size());
-    assertEquals(3, redhat6cnt.size());
-    assertEquals(3, centos6Cnt.size());
-  }
-
-  @Test
-  public void testMetaInfoFileFilter() throws Exception {
-    String buildDir = tmpFolder.getRoot().getAbsolutePath();
-    File stackRoot = new File("src/test/resources/stacks");
-    File stackRootTmp = new File(buildDir + "/ambari-metaInfo"); stackRootTmp.mkdir();
-    FileUtils.copyDirectory(stackRoot, stackRootTmp);
-    AmbariMetaInfo ambariMetaInfo = new AmbariMetaInfo(stackRootTmp);
-    File f1, f2, f3;
-    f1 = new File(stackRootTmp.getAbsolutePath() + "/001.svn"); f1.createNewFile();
-    f2 = new File(stackRootTmp.getAbsolutePath() + "/abcd.svn/001.svn"); f2.mkdirs(); f2.createNewFile();
-    f3 = new File(stackRootTmp.getAbsolutePath() + "/.svn");
-    if (!f3.exists()) {
-      f3.createNewFile();
-    }
-    ambariMetaInfo.init();
-    // Tests the stack is loaded as expected
-    getServices();
-    getComponentsByService();
-    getComponentCategory();
-    getSupportedConfigs();
-    // Check .svn is not part of the stack but abcd.svn is
-    Assert.assertNotNull(ambariMetaInfo.getStackInfo("abcd.svn", "001.svn"));
-    Assert.assertNull(ambariMetaInfo.getStackInfo(".svn", ""));
-    Assert.assertNull(ambariMetaInfo.getServices(".svn", ""));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaServiceTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaServiceTest.java
deleted file mode 100644
index 6707362..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaServiceTest.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import junit.framework.Assert;
-
-import org.apache.ambari.server.state.ServiceInfo;
-import org.apache.ambari.server.state.StackInfo;
-import org.apache.ambari.server.utils.StageUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.codehaus.jackson.map.DeserializationConfig;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.type.TypeReference;
-import org.codehaus.jettison.json.JSONException;
-import org.junit.Test;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.sun.jersey.api.client.Client;
-import com.sun.jersey.api.client.UniformInterfaceException;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.api.client.config.ClientConfig;
-import com.sun.jersey.api.client.config.DefaultClientConfig;
-import com.sun.jersey.api.json.JSONConfiguration;
-import com.sun.jersey.spi.container.servlet.ServletContainer;
-import com.sun.jersey.test.framework.JerseyTest;
-import com.sun.jersey.test.framework.WebAppDescriptor;
-
-public class AmbariMetaServiceTest extends JerseyTest {
-  static String PACKAGE_NAME = "org.apache.ambari.server.api.services";
-  private static Log LOG = LogFactory.getLog(AmbariMetaService.class);
-  Injector injector;
-  protected Client client;
-
-  public  AmbariMetaServiceTest() {
-    super(new WebAppDescriptor.Builder(PACKAGE_NAME).servletClass(ServletContainer.class)
-        .initParam("com.sun.jersey.api.json.POJOMappingFeature", "true")
-        .build());
-  }
-
-  public class MockModule extends AbstractModule {
-    File stackRoot = new File("src/test/resources/stacks");
-    AmbariMetaInfo ambariMetaInfo;
-    
-    public MockModule() throws Exception {
-      this.ambariMetaInfo = new AmbariMetaInfo(stackRoot);
-    }
-
-    @Override
-    protected void configure() {
-      bind(AmbariMetaInfo.class).toInstance(ambariMetaInfo);
-      requestStaticInjection(AmbariMetaService.class);     
-    }
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    injector = Guice.createInjector(new MockModule());
-    AmbariMetaInfo metainfo = injector.getInstance(AmbariMetaInfo.class);
-    metainfo.init();
-  }
-
-  @Test
-  public void testStacks() throws UniformInterfaceException, JSONException,
-    IOException {
-    ClientConfig clientConfig = new DefaultClientConfig();
-    clientConfig.getFeatures().put(JSONConfiguration.FEATURE_POJO_MAPPING, Boolean.TRUE);
-    client = Client.create(clientConfig);
-    WebResource webResource = client.resource("http://localhost:9998/stacks");
-    
-    String output = webResource.get(String.class);
-    LOG.info("All Stack Info \n" + output);
-    ObjectMapper mapper = new ObjectMapper();
-    mapper.configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, 
-        false);
-    List<StackInfo> stackInfos = mapper.readValue(output,
-        new TypeReference<List<StackInfo>>(){});
-    StackInfo stackInfo = stackInfos.get(0);
-    Assert.assertEquals("HDP", stackInfo.getName());
-    webResource = client.resource("http://localhost:9998/stacks/" +
-    		"HDP/version/0.1/services/HDFS");
-    output = webResource.get(String.class);
-    ServiceInfo info = mapper.readValue(output, ServiceInfo.class);
-    Assert.assertEquals("HDFS", info.getName());
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/BaseRequestTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/BaseRequestTest.java
deleted file mode 100644
index e34525e..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/BaseRequestTest.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.predicate.InvalidQueryException;
-import org.apache.ambari.server.api.predicate.PredicateCompiler;
-import org.apache.ambari.server.controller.internal.TemporalInfoImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.TemporalInfo;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.MultivaluedMap;
-import javax.ws.rs.core.UriInfo;
-import java.io.UnsupportedEncodingException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.net.URLEncoder;
-import java.util.Map;
-
-import static org.easymock.EasyMock.*;
-import static org.junit.Assert.*;
-
-/**
- * Base tests for service requests.
- */
-public abstract class BaseRequestTest {
-
-  public void testGetQueryPredicate(String uriString) throws Exception {
-    PredicateCompiler compiler = createStrictMock(PredicateCompiler.class);
-    Predicate p = createMock(Predicate.class);
-    UriInfo uriInfo = createMock(UriInfo.class);
-    URI uri = new URI(URLEncoder.encode(uriString, "UTF-8"));
-
-    expect(uriInfo.getRequestUri()).andReturn(uri);
-    expect(compiler.compile(uriString.substring(uriString.indexOf("?") + 1))).andReturn(p);
-
-    replay(uriInfo, compiler, p);
-
-    Request request = getTestRequest(null, null, uriInfo, compiler);
-
-    assertEquals(p, request.getQueryPredicate());
-
-    verify(uriInfo, compiler, p);
-  }
-
-  @Test
-  public void testGetQueryPredicate_noQueryString() throws Exception {
-    String uriString = "http://localhost.com:8080/api/v1/clusters";
-    PredicateCompiler compiler = createStrictMock(PredicateCompiler.class);
-    UriInfo uriInfo = createMock(UriInfo.class);
-    URI uri = new URI(URLEncoder.encode(uriString, "UTF-8"));
-
-    expect(uriInfo.getRequestUri()).andReturn(uri);
-
-    replay(uriInfo, compiler);
-
-    Request request = getTestRequest(null, null, uriInfo, compiler);
-
-    assertEquals(null, request.getQueryPredicate());
-
-    verify(uriInfo, compiler);
-  }
-
-  @Test
-  public void testGetQueryPredicate_invalidQuery() throws Exception {
-    String uriString = "http://localhost.com:8080/api/v1/clusters?&foo|";
-    PredicateCompiler compiler = createStrictMock(PredicateCompiler.class);
-    UriInfo uriInfo = createMock(UriInfo.class);
-    URI uri = new URI(URLEncoder.encode(uriString, "UTF-8"));
-
-    expect(uriInfo.getRequestUri()).andReturn(uri);
-    expect(compiler.compile(uriString.substring(uriString.indexOf("?") + 1))).
-        andThrow(new InvalidQueryException("test"));
-    replay(uriInfo, compiler);
-
-    Request request = getTestRequest(null, null, uriInfo, compiler);
-
-    try {
-      request.getQueryPredicate();
-      fail("Expected InvalidQueryException due to invalid query");
-    } catch (InvalidQueryException e) {
-      //expected
-    }
-
-    verify(uriInfo, compiler);
-  }
-
-  public void testGetFields(String fields) {
-    UriInfo uriInfo = createMock(UriInfo.class);
-    MultivaluedMap<String, String> mapQueryParams = createMock(MultivaluedMap.class);
-    Request request = getTestRequest(null, null, uriInfo, null);
-
-    expect(uriInfo.getQueryParameters()).andReturn(mapQueryParams);
-    expect(mapQueryParams.getFirst("fields")).andReturn(fields);
-
-    replay(uriInfo, mapQueryParams);
-
-    Map<String, TemporalInfo> mapFields = request.getFields();
-
-    assertEquals(7, mapFields.size());
-
-    String prop = "prop";
-    assertTrue(mapFields.containsKey(prop));
-    assertNull(mapFields.get(prop));
-
-    String prop1 = PropertyHelper.getPropertyId("category", "prop1");
-    assertTrue(mapFields.containsKey(prop1));
-    assertNull(mapFields.get(prop1));
-
-    String prop2 = PropertyHelper.getPropertyId("category2/category3", "prop2");
-    assertTrue(mapFields.containsKey(prop2));
-    assertEquals(new TemporalInfoImpl(1, 2, 3), mapFields.get(prop2));
-
-    String prop3 = "prop3";
-    assertTrue(mapFields.containsKey(prop3));
-    assertEquals(new TemporalInfoImpl(4, 5, 6), mapFields.get(prop3));
-
-    String category4 = "category4";
-    assertTrue(mapFields.containsKey(category4));
-    assertEquals(new TemporalInfoImpl(7, 8, 9), mapFields.get(category4));
-
-    String subResource = PropertyHelper.getPropertyId("sub-resource", "*");
-    assertTrue(mapFields.containsKey(subResource));
-    assertEquals(new TemporalInfoImpl(10, 11, 12), mapFields.get(subResource));
-
-    String finalProp = "finalProp";
-    assertTrue(mapFields.containsKey(finalProp));
-    assertNull(mapFields.get(finalProp));
-
-    verify(uriInfo, mapQueryParams);
-  }
-
-   protected abstract Request getTestRequest(HttpHeaders headers, String body,
-                                             UriInfo uriInfo, PredicateCompiler compiler);
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/BaseServiceTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/BaseServiceTest.java
deleted file mode 100644
index 794f396..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/BaseServiceTest.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.handlers.RequestHandler;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.services.serializers.ResultSerializer;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-
-import static org.easymock.EasyMock.*;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.createStrictMock;
-import static org.junit.Assert.assertEquals;
-
-/**
- * Base class for service unit tests.
- */
-public abstract class BaseServiceTest {
-
-  private ResourceInstance resourceInstance = createNiceMock(ResourceInstance.class);
-  private RequestFactory requestFactory = createStrictMock(RequestFactory.class);
-  private ResultPostProcessor resultProcessor = createStrictMock(ResultPostProcessor.class);
-  private Request request = createNiceMock(Request.class);
-  private RequestHandler requestHandler = createStrictMock(RequestHandler.class);
-  private Result result = createNiceMock(Result.class);
-  private ResultStatus status = createNiceMock(ResultStatus.class);
-  private HttpHeaders httpHeaders = createNiceMock(HttpHeaders.class);
-  private UriInfo uriInfo = createNiceMock(UriInfo.class);
-  private ResultSerializer serializer = createStrictMock(ResultSerializer.class);
-  private Object serializedResult = new Object();
-
-  public ResourceInstance getResource() {
-    return resourceInstance;
-  }
-
-  public RequestFactory getRequestFactory() {
-    return requestFactory;
-  }
-
-  public ResultPostProcessor getResultProcessor() {
-    return resultProcessor;
-  }
-
-  public Request getRequest() {
-    return request;
-  }
-
-  public RequestHandler getRequestHandler() {
-    return requestHandler;
-  }
-
-  public Result getResult() {
-    return result;
-  }
-
-  public ResultStatus getStatus() {
-    return status;
-  }
-
-  public HttpHeaders getHttpHeaders() {
-    return httpHeaders;
-  }
-
-  public UriInfo getUriInfo() {
-    return uriInfo;
-  }
-
-  public ResultSerializer getSerializer() {
-    return serializer;
-  }
-
-  public Object getSerializedResult() {
-    return serializedResult;
-  }
-
-  protected void registerExpectations(Request.Type type, String body, int statusCode, boolean isErrorState) {
-    expect(requestFactory.createRequest(eq(httpHeaders), body == null ? isNull(String.class) : eq(body), eq(uriInfo), eq(type),
-        eq(resourceInstance))).andReturn(request);
-
-    expect(request.getRequestType()).andReturn(type).anyTimes();
-    expect(request.getResultSerializer()).andReturn(serializer).anyTimes();
-    expect(requestHandler.handleRequest(request)).andReturn(result);
-    expect(result.getStatus()).andReturn(status).anyTimes();
-    expect(status.isErrorState()).andReturn(isErrorState).anyTimes();
-    expect(status.getStatusCode()).andReturn(statusCode);
-    if (! isErrorState) {
-      expect(request.getResultPostProcessor()).andReturn(resultProcessor);
-      resultProcessor.process(result);
-    }
-
-    expect(serializer.serialize(result)).andReturn(serializedResult);
-
-  }
-
-  protected void replayMocks() {
-    replay(resourceInstance, requestFactory, resultProcessor, request, status, requestHandler,
-        result, httpHeaders, uriInfo, serializer);
-  }
-
-
-  protected void verifyResults(Response response, int statusCode) {
-    assertEquals(getSerializedResult(), response.getEntity());
-    assertEquals(statusCode, response.getStatus());
-
-    verify(resourceInstance, requestFactory, resultProcessor, request, status, requestHandler,
-        result, httpHeaders, uriInfo, serializer);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/ClusterServiceTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/ClusterServiceTest.java
deleted file mode 100644
index aa7e966..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/ClusterServiceTest.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.handlers.RequestHandler;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.junit.Test;
-
-import javax.ws.rs.core.Response;
-
-import static org.junit.Assert.assertEquals;
-
-
-/**
- * Unit tests for ClusterService.
- */
-public class ClusterServiceTest extends BaseServiceTest {
-
-  @Test
-   public void testGetCluster() {
-    String clusterName = "clusterName";
-
-    registerExpectations(Request.Type.GET, null, 200, false);
-    replayMocks();
-
-    //test
-    ClusterService clusterService = new TestClusterService(getResource(), clusterName, getRequestFactory(), getRequestHandler());
-    Response response = clusterService.getCluster(getHttpHeaders(), getUriInfo(), clusterName);
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testGetCluster__ErrorState() {
-    String clusterName = "clusterName";
-
-    registerExpectations(Request.Type.GET, null, 500, true);
-    replayMocks();
-
-    //test
-    ClusterService clusterService = new TestClusterService(getResource(), clusterName, getRequestFactory(), getRequestHandler());
-    Response response = clusterService.getCluster(getHttpHeaders(), getUriInfo(), clusterName);
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testGetClusters() {
-
-    registerExpectations(Request.Type.GET, null, 200, false);
-    replayMocks();
-
-    //test
-    ClusterService clusterService = new TestClusterService(getResource(), null, getRequestFactory(), getRequestHandler());
-    Response response = clusterService.getClusters(getHttpHeaders(), getUriInfo());
-
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testGetClusters__ErrorState() {
-    registerExpectations(Request.Type.GET, null, 500, true);
-    replayMocks();
-
-    //test
-    ClusterService clusterService = new TestClusterService(getResource(), null, getRequestFactory(), getRequestHandler());
-    Response response = clusterService.getClusters(getHttpHeaders(), getUriInfo());
-
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testCreateCluster() {
-    String clusterName = "clusterName";
-    String body = "body";
-
-    registerExpectations(Request.Type.POST, body, 201, false);
-    replayMocks();
-
-    //test
-    ClusterService clusterService = new TestClusterService(getResource(), clusterName, getRequestFactory(), getRequestHandler());
-    Response response = clusterService.createCluster(body, getHttpHeaders(), getUriInfo(), clusterName);
-
-    verifyResults(response, 201);
-  }
-
-  @Test
-  public void testCreateCluster__ErrorState() {
-    String clusterName = "clusterName";
-    String body = "body";
-
-    registerExpectations(Request.Type.POST, body, 500, true);
-    replayMocks();
-
-    //test
-    ClusterService clusterService = new TestClusterService(getResource(), clusterName, getRequestFactory(), getRequestHandler());
-    Response response = clusterService.createCluster(body, getHttpHeaders(), getUriInfo(), clusterName);
-
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testUpdateCluster() {
-    String clusterName = "clusterName";
-    String body = "body";
-
-    registerExpectations(Request.Type.PUT, body, 200, false);
-    replayMocks();
-
-    //test
-    ClusterService clusterService = new TestClusterService(getResource(), clusterName, getRequestFactory(), getRequestHandler());
-    Response response = clusterService.updateCluster(body, getHttpHeaders(), getUriInfo(), clusterName);
-
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testUpdateCluster__ErrorState() {
-    String clusterName = "clusterName";
-    String body = "body";
-
-    registerExpectations(Request.Type.PUT, body, 500, true);
-    replayMocks();
-
-    //test
-    ClusterService clusterService = new TestClusterService(getResource(), clusterName, getRequestFactory(), getRequestHandler());
-    Response response = clusterService.updateCluster(body, getHttpHeaders(), getUriInfo(), clusterName);
-
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testDeleteCluster() {
-    String clusterName = "clusterName";
-
-    registerExpectations(Request.Type.DELETE, null, 200, false);
-    replayMocks();
-
-    //test
-    ClusterService clusterService = new TestClusterService(getResource(), clusterName, getRequestFactory(), getRequestHandler());
-    Response response = clusterService.deleteCluster(getHttpHeaders(), getUriInfo(), clusterName);
-
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testDeleteCluster__ErrorState() {
-    String clusterName = "clusterName";
-
-    registerExpectations(Request.Type.DELETE, null, 500, true);
-    replayMocks();
-
-    //test
-    ClusterService clusterService = new TestClusterService(getResource(), clusterName, getRequestFactory(), getRequestHandler());
-    Response response = clusterService.deleteCluster(getHttpHeaders(), getUriInfo(), clusterName);
-
-    verifyResults(response, 500);
-  }
-
-  private class TestClusterService extends ClusterService {
-    private RequestFactory m_requestFactory;
-    private RequestHandler m_requestHandler;
-    private ResourceInstance m_resourceDef;
-    private String m_clusterId;
-
-    private TestClusterService(ResourceInstance resourceDef, String clusterId, RequestFactory requestFactory,
-                               RequestHandler handler) {
-      m_resourceDef = resourceDef;
-      m_requestFactory = requestFactory;
-      m_requestHandler = handler;
-      m_clusterId = clusterId;
-    }
-
-    @Override
-    ResourceInstance createClusterResource(String clusterName) {
-      assertEquals(m_clusterId, clusterName);
-      return m_resourceDef;
-    }
-
-    @Override
-    RequestFactory getRequestFactory() {
-      return m_requestFactory;
-    }
-
-    @Override
-    RequestHandler getRequestHandler(Request.Type requestType) {
-      return m_requestHandler;
-    }
-  }
-
-  //todo: test getHostHandler, getServiceHandler, getHostComponentHandler
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/ComponentServiceTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/ComponentServiceTest.java
deleted file mode 100644
index 76d8106..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/ComponentServiceTest.java
+++ /dev/null
@@ -1,277 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.api.services;
-
-
-import org.apache.ambari.server.api.handlers.RequestHandler;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.junit.Test;
-
-import javax.ws.rs.core.Response;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * Unit tests for ComponentService.
- */
-public class ComponentServiceTest extends BaseServiceTest {
-
-  @Test
-  public void testGetComponent()  {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-    String componentName = "componentName";
-
-    registerExpectations(Request.Type.GET, null, 200, false);
-    replayMocks();
-
-    //test
-    ComponentService componentService = new TestComponentService(getResource(), clusterName, serviceName, componentName,
-        getRequestFactory(), getRequestHandler());
-
-    Response response = componentService.getComponent(getHttpHeaders(), getUriInfo(), componentName);
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testGetComponent__ErrorState()  {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-    String componentName = "componentName";
-
-    registerExpectations(Request.Type.GET, null, 404, true);
-    replayMocks();
-
-    //test
-    ComponentService componentService = new TestComponentService(getResource(), clusterName, serviceName, componentName,
-        getRequestFactory(), getRequestHandler());
-
-    Response response = componentService.getComponent(getHttpHeaders(), getUriInfo(), componentName);
-    verifyResults(response, 404);
-  }
-
-  @Test
-  public void testGetComponents() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-
-    registerExpectations(Request.Type.GET, null, 200, false);
-    replayMocks();
-
-    //test
-    ComponentService componentService = new TestComponentService(getResource(), clusterName, serviceName, null,
-        getRequestFactory(), getRequestHandler());
-
-    Response response = componentService.getComponents(getHttpHeaders(), getUriInfo());
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testGetComponents__ErrorState() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-
-    registerExpectations(Request.Type.GET, null, 500, true);
-    replayMocks();
-
-    //test
-    ComponentService componentService = new TestComponentService(getResource(), clusterName, serviceName, null,
-        getRequestFactory(), getRequestHandler());
-
-    Response response = componentService.getComponents(getHttpHeaders(), getUriInfo());
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testCreateComponent() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-    String componentName = "componentName";
-    String body = "{body}";
-
-    registerExpectations(Request.Type.POST, body, 201, false);
-    replayMocks();
-
-    //test
-    ComponentService componentService = new TestComponentService(getResource(), clusterName, serviceName, componentName,
-        getRequestFactory(), getRequestHandler());
-
-    Response response = componentService.createComponent(body, getHttpHeaders(), getUriInfo(), componentName);
-    verifyResults(response, 201);
-  }
-
-  @Test
-  public void testCreateComponent__ErrorState() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-    String componentName = "componentName";
-    String body = "{body}";
-
-    registerExpectations(Request.Type.POST, body, 500, true);
-    replayMocks();
-
-    //test
-    ComponentService componentService = new TestComponentService(getResource(), clusterName, serviceName, componentName,
-        getRequestFactory(), getRequestHandler());
-
-    Response response = componentService.createComponent(body, getHttpHeaders(), getUriInfo(), componentName);
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testUpdateComponent() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-    String componentName = "componentName";
-    String body = "{body}";
-
-    registerExpectations(Request.Type.PUT, body, 200, false);
-    replayMocks();
-
-    //test
-    ComponentService componentService = new TestComponentService(getResource(), clusterName, serviceName, componentName,
-        getRequestFactory(), getRequestHandler());
-
-    Response response = componentService.updateComponent(body, getHttpHeaders(), getUriInfo(), componentName);
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testUpdateComponent__ErrorState() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-    String componentName = "componentName";
-    String body = "{body}";
-
-    registerExpectations(Request.Type.PUT, body, 500, true);
-    replayMocks();
-
-    //test
-    ComponentService componentService = new TestComponentService(getResource(), clusterName, serviceName, componentName,
-        getRequestFactory(), getRequestHandler());
-
-    Response response = componentService.updateComponent(body, getHttpHeaders(), getUriInfo(), componentName);
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testUpdateComponents() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-    String body = "{body}";
-
-    registerExpectations(Request.Type.PUT, body, 200, false);
-    replayMocks();
-
-    //test
-    ComponentService componentService = new TestComponentService(getResource(), clusterName, serviceName, null,
-        getRequestFactory(), getRequestHandler());
-
-    Response response = componentService.updateComponents(body, getHttpHeaders(), getUriInfo());
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testUpdateComponents__ErrorState() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-    String body = "{body}";
-
-    registerExpectations(Request.Type.PUT, body, 500, true);
-    replayMocks();
-
-    //test
-    ComponentService componentService = new TestComponentService(getResource(), clusterName, serviceName, null,
-        getRequestFactory(), getRequestHandler());
-
-    Response response = componentService.updateComponents(body, getHttpHeaders(), getUriInfo());
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testDeleteComponent() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-
-    registerExpectations(Request.Type.DELETE, null, 200, false);
-    replayMocks();
-
-    //test
-    ComponentService componentService = new TestComponentService(getResource(), clusterName, serviceName, null,
-        getRequestFactory(), getRequestHandler());
-
-    Response response = componentService.deleteComponent(getHttpHeaders(), getUriInfo(), null);
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testDeleteComponent__ErrorState() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-
-    registerExpectations(Request.Type.DELETE, null, 500, true);
-    replayMocks();
-
-    //test
-    ComponentService componentService = new TestComponentService(getResource(), clusterName, serviceName, null,
-        getRequestFactory(), getRequestHandler());
-
-    Response response = componentService.deleteComponent(getHttpHeaders(), getUriInfo(), null);
-    verifyResults(response, 500);
-  }
-
-  private class TestComponentService extends ComponentService {
-    private RequestFactory m_requestFactory;
-    private RequestHandler m_requestHandler;
-    private ResourceInstance m_resource;
-    private String m_clusterId;
-    private String m_serviceId;
-    private String m_componentId;
-
-    private TestComponentService(ResourceInstance resourceDef, String clusterId, String serviceId, String componentId,
-                                 RequestFactory requestFactory, RequestHandler handler) {
-      super(clusterId, serviceId);
-      m_requestFactory = requestFactory;
-      m_requestHandler = handler;
-      m_resource = resourceDef;
-      m_clusterId = clusterId;
-      m_serviceId = serviceId;
-      m_componentId = componentId;
-    }
-
-    @Override
-    ResourceInstance createComponentResource(String clusterName, String serviceName, String componentName) {
-      assertEquals(m_clusterId, clusterName);
-      assertEquals(m_serviceId, serviceName);
-      assertEquals(m_componentId, componentName);
-      return m_resource;
-    }
-
-    @Override
-    RequestFactory getRequestFactory() {
-      return m_requestFactory;
-    }
-
-    @Override
-    RequestHandler getRequestHandler(Request.Type requestType) {
-      return m_requestHandler;
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/ConfigurationServiceTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/ConfigurationServiceTest.java
deleted file mode 100644
index 23b4025..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/ConfigurationServiceTest.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.api.services;
-
-import static org.junit.Assert.assertEquals;
-
-import javax.ws.rs.core.*;
-
-import org.apache.ambari.server.api.handlers.RequestHandler;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.junit.Test;
-
-
-public class ConfigurationServiceTest extends BaseServiceTest {
-  
-  @Test
-  public void testCreateConfiguration() throws Exception{
-    String clusterName = "clusterName";
-
-    String body = "{ \"type\":\"hdfs-site\", \"tag\":\"my-new-tag\"," +
-        "\"properties\":{ \"key1\":\"value1\", \"key2\":\"value2\" } }";
-
-    registerExpectations(Request.Type.POST, body, 200, false);
-    replayMocks();
-
-    //test
-    ConfigurationService configService = new TestConfigurationService(getResource(), clusterName, getRequestFactory(), getRequestHandler());
-    Response response = configService.createConfigurations(body, getHttpHeaders(), getUriInfo());
-
-    verifyResults(response, 200);
-  }
-  
-  private class TestConfigurationService extends ConfigurationService {
-    private RequestFactory m_requestFactory;
-    private RequestHandler m_requestHandler;
-    private ResourceInstance m_resourceInstance;
-    private String m_clusterId;
-
-    private TestConfigurationService(ResourceInstance resourceInstance, String clusterId, RequestFactory requestFactory,
-                                     RequestHandler handler) {
-      super(clusterId);
-      m_resourceInstance = resourceInstance;
-      m_clusterId = clusterId;
-      m_requestFactory = requestFactory;
-      m_requestHandler = handler;
-    }
-
-    @Override
-    ResourceInstance createConfigurationResource(String clusterName) {
-      assertEquals(m_clusterId, clusterName);
-      return m_resourceInstance;
-    }
-
-    @Override
-    RequestFactory getRequestFactory() {
-      return m_requestFactory;
-    }
-
-    @Override
-    RequestHandler getRequestHandler(Request.Type requestType) {
-      return m_requestHandler;
-    }    
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/DeleteRequestTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/DeleteRequestTest.java
deleted file mode 100644
index 1bacc5a..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/DeleteRequestTest.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.predicate.PredicateCompiler;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.UriInfo;
-
-/**
- * DeleteRequest unit tests
- */
-public class DeleteRequestTest extends BaseRequestTest {
-  @Test
-  public void testRequestType() {
-    assertSame(Request.Type.DELETE, new DeleteRequest(null, null, null, null).getRequestType());
-  }
-
-  @Test
-  public void testGetQueryPredicate() throws Exception {
-    String uri = "http://foo.bar.com/api/v1/clusters?foo=bar&orProp1=5|orProp2!=6|orProp3<100&prop!=5&prop2>10&prop3>=20&prop4<500&prop5<=1&fields=field1,category/field2";
-    super.testGetQueryPredicate(uri);
-  }
-
-  @Test
-  public void testGetFields() {
-    String fields = "prop,category/prop1,category2/category3/prop2[1,2,3],prop3[4,5,6],category4[7,8,9],sub-resource/*[10,11,12],finalProp";
-    super.testGetFields(fields);
-  }
-
-  protected Request getTestRequest(HttpHeaders headers, String body, UriInfo uriInfo, final PredicateCompiler compiler) {
-    return new DeleteRequest(headers, body, uriInfo, null) {
-      @Override
-      protected PredicateCompiler getPredicateCompiler() {
-        return compiler;
-      }
-    };
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/GetRequestTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/GetRequestTest.java
deleted file mode 100644
index e25e53b..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/GetRequestTest.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.predicate.PredicateCompiler;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.UriInfo;
-
-/**
- * GetRequest unit tests.
- */
-public class GetRequestTest extends BaseRequestTest {
-
-  @Test
-  public void testRequestType() {
-    assertSame(Request.Type.GET, new GetRequest(null, null, null, null).getRequestType());
-  }
-
-  @Test
-  public void testGetQueryPredicate() throws Exception {
-    String uri = "http://foo.bar.com/api/v1/clusters?foo=bar&orProp1=5|orProp2!=6|orProp3<100&prop!=5&prop2>10&prop3>=20&prop4<500&prop5<=1&fields=field1,category/field2";
-    super.testGetQueryPredicate(uri);
-  }
-
-  @Test
-  public void testGetFields() {
-    String fields = "prop,category/prop1,category2/category3/prop2[1,2,3],prop3[4,5,6],category4[7,8,9],sub-resource/*[10,11,12],finalProp";
-    super.testGetFields(fields);
-  }
-
-  protected Request getTestRequest(HttpHeaders headers, String body, UriInfo uriInfo, final PredicateCompiler compiler) {
-    return new GetRequest(headers, body, uriInfo, null) {
-      @Override
-      protected PredicateCompiler getPredicateCompiler() {
-        return compiler;
-      }
-    };
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/HostComponentServiceTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/HostComponentServiceTest.java
deleted file mode 100644
index 49716f5..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/HostComponentServiceTest.java
+++ /dev/null
@@ -1,279 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.handlers.RequestHandler;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.junit.Test;
-
-import javax.ws.rs.core.Response;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * Unit tests for HostComponentService.
- */
-public class HostComponentServiceTest extends BaseServiceTest {
-  @Test
-  public void testGetHostComponent() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-    String hostComponentName = "hostComponentName";
-
-    registerExpectations(Request.Type.GET, null, 200, false);
-    replayMocks();
-
-    //test
-    HostComponentService hostComponentService = new TestHostComponentService(getResource(), clusterName,
-        hostName, hostComponentName, getRequestFactory(), getRequestHandler());
-
-    Response response = hostComponentService.getHostComponent(getHttpHeaders(), getUriInfo(), hostComponentName);
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testGetHostComponent__ErrorState() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-    String hostComponentName = "hostComponentName";
-
-    registerExpectations(Request.Type.GET, null, 404, true);
-    replayMocks();
-
-    //test
-    HostComponentService hostComponentService = new TestHostComponentService(getResource(), clusterName,
-        hostName, hostComponentName, getRequestFactory(), getRequestHandler());
-
-    Response response = hostComponentService.getHostComponent(getHttpHeaders(), getUriInfo(), hostComponentName);
-    verifyResults(response, 404);
-  }
-
-  @Test
-  public void testGetHostComponents() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-
-    registerExpectations(Request.Type.GET, null, 200, false);
-    replayMocks();
-
-    //test
-    HostComponentService hostComponentService = new TestHostComponentService(getResource(), clusterName,
-        hostName, null, getRequestFactory(), getRequestHandler());
-
-    Response response = hostComponentService.getHostComponents(getHttpHeaders(), getUriInfo());
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testGetHostComponents__ErrorState() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-
-    registerExpectations(Request.Type.GET, null, 500, true);
-    replayMocks();
-
-    //test
-    HostComponentService hostComponentService = new TestHostComponentService(getResource(), clusterName,
-        hostName, null, getRequestFactory(), getRequestHandler());
-
-    Response response = hostComponentService.getHostComponents(getHttpHeaders(), getUriInfo());
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testCreateHostComponent() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-    String hostComponentName = "hostComponentName";
-    String body = "body";
-
-    registerExpectations(Request.Type.POST, body, 201, false);
-    replayMocks();
-
-    //test
-    HostComponentService hostComponentService = new TestHostComponentService(getResource(), clusterName,
-        hostName, hostComponentName, getRequestFactory(), getRequestHandler());
-
-    Response response = hostComponentService.createHostComponent(body, getHttpHeaders(), getUriInfo(), hostComponentName);
-    verifyResults(response, 201);
-  }
-
-  @Test
-  public void testCreateHostComponent__ErrorState() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-    String hostComponentName = "hostComponentName";
-    String body = "body";
-
-    registerExpectations(Request.Type.POST, body, 500, true);
-    replayMocks();
-
-    //test
-    HostComponentService hostComponentService = new TestHostComponentService(getResource(), clusterName,
-        hostName, hostComponentName, getRequestFactory(), getRequestHandler());
-
-    Response response = hostComponentService.createHostComponent(body, getHttpHeaders(), getUriInfo(), hostComponentName);
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testUpdateHostComponent() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-    String hostComponentName = "hostComponentName";
-    String body = "body";
-
-    registerExpectations(Request.Type.PUT, body, 200, false);
-    replayMocks();
-
-    //test
-    HostComponentService hostComponentService = new TestHostComponentService(getResource(), clusterName,
-        hostName, hostComponentName, getRequestFactory(), getRequestHandler());
-
-    Response response = hostComponentService.updateHostComponent(body, getHttpHeaders(), getUriInfo(), hostComponentName);
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testUpdateHostComponent__ErrorState() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-    String hostComponentName = "hostComponentName";
-    String body = "body";
-
-    registerExpectations(Request.Type.PUT, body, 500, true);
-    replayMocks();
-
-    //test
-    HostComponentService hostComponentService = new TestHostComponentService(getResource(), clusterName,
-        hostName, hostComponentName, getRequestFactory(), getRequestHandler());
-
-    Response response = hostComponentService.updateHostComponent(body, getHttpHeaders(), getUriInfo(), hostComponentName);
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testUpdateHostComponents() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-    String body = "body";
-
-    registerExpectations(Request.Type.PUT, body, 200, false);
-    replayMocks();
-
-    //test
-    HostComponentService hostComponentService = new TestHostComponentService(getResource(), clusterName,
-        hostName, null, getRequestFactory(), getRequestHandler());
-
-    Response response = hostComponentService.updateHostComponents(body, getHttpHeaders(), getUriInfo());
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testUpdateHostComponents__ErrorState() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-    String body = "body";
-
-    registerExpectations(Request.Type.PUT, body, 500, true);
-    replayMocks();
-
-    //test
-    HostComponentService hostComponentService = new TestHostComponentService(getResource(), clusterName,
-        hostName, null, getRequestFactory(), getRequestHandler());
-
-    Response response = hostComponentService.updateHostComponents(body, getHttpHeaders(), getUriInfo());
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testDeleteHostComponent() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-    String hostComponentName = "hostComponentName";
-
-    registerExpectations(Request.Type.DELETE, null, 200, false);
-    replayMocks();
-
-    //test
-    HostComponentService hostComponentService = new TestHostComponentService(getResource(), clusterName,
-        hostName, hostComponentName, getRequestFactory(), getRequestHandler());
-
-    Response response = hostComponentService.deleteHostComponent(getHttpHeaders(), getUriInfo(), hostComponentName);
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testDeleteHostComponent__ErrorState() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-    String hostComponentName = "hostComponentName";
-
-    registerExpectations(Request.Type.DELETE, null, 500, true);
-    replayMocks();
-
-    //test
-    HostComponentService hostComponentService = new TestHostComponentService(getResource(), clusterName,
-        hostName, hostComponentName, getRequestFactory(), getRequestHandler());
-
-    Response response = hostComponentService.deleteHostComponent(getHttpHeaders(), getUriInfo(), hostComponentName);
-    verifyResults(response, 500);
-  }
-
-  private class TestHostComponentService extends HostComponentService {
-    private RequestFactory m_requestFactory;
-    private RequestHandler m_requestHandler;
-    private ResourceInstance m_resourceDef;
-    private String m_clusterId;
-    private String m_hostId;
-    private String m_hostComponentId;
-
-    private TestHostComponentService(ResourceInstance resourceDef, String clusterId, String hostId, String hostComponentId,
-                                     RequestFactory requestFactory, RequestHandler handler) {
-      super(clusterId, hostId);
-      m_resourceDef = resourceDef;
-      m_clusterId = clusterId;
-      m_hostId = hostId;
-      m_hostComponentId = hostComponentId;
-      m_requestFactory = requestFactory;
-      m_requestHandler = handler;
-    }
-
-
-    @Override
-    ResourceInstance createHostComponentResource(String clusterName, String hostName, String hostComponentName) {
-      assertEquals(m_clusterId, clusterName);
-      assertEquals(m_hostId, hostName);
-      assertEquals(m_hostComponentId, hostComponentName);
-      return m_resourceDef;
-    }
-
-    @Override
-    RequestFactory getRequestFactory() {
-      return m_requestFactory;
-    }
-
-    @Override
-    RequestHandler getRequestHandler(Request.Type requestType) {
-      return m_requestHandler;
-    }
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/HostServiceTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/HostServiceTest.java
deleted file mode 100644
index edc51ed..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/HostServiceTest.java
+++ /dev/null
@@ -1,300 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.ambari.server.api.services;
-
-
-import org.apache.ambari.server.api.handlers.RequestHandler;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.junit.Test;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * Unit tests for HostService.
- */
-public class HostServiceTest extends BaseServiceTest {
-
-  @Test
-  public void testGetHost() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-
-    registerExpectations(Request.Type.GET, null, 200, false);
-    replayMocks();
-
-    //test
-    HostService hostService = new TestHostService(getResource(), clusterName, hostName, getRequestFactory(), getRequestHandler());
-    Response response = hostService.getHost(getHttpHeaders(), getUriInfo(), hostName);
-
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testGetHost__ErrorState() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-
-    registerExpectations(Request.Type.GET, null, 500, true);
-    replayMocks();
-
-    //test
-    HostService hostService = new TestHostService(getResource(), clusterName, hostName, getRequestFactory(), getRequestHandler());
-    Response response = hostService.getHost(getHttpHeaders(), getUriInfo(), hostName);
-
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testGetHosts() {
-    String clusterName = "clusterName";
-
-    registerExpectations(Request.Type.GET, null, 200, false);
-    replayMocks();
-
-    //test
-    HostService hostService = new TestHostService(getResource(), clusterName, null, getRequestFactory(), getRequestHandler());
-    Response response = hostService.getHosts(getHttpHeaders(), getUriInfo());
-
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testGetHosts__ErrorState() {
-    String clusterName = "clusterName";
-
-    registerExpectations(Request.Type.GET, null, 500, true);
-    replayMocks();
-
-    //test
-    HostService hostService = new TestHostService(getResource(), clusterName, null, getRequestFactory(), getRequestHandler());
-    Response response = hostService.getHosts(getHttpHeaders(), getUriInfo());
-
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testCreateHost() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-    String body = "body";
-
-    registerExpectations(Request.Type.POST, body, 201, false);
-    replayMocks();
-
-    //test
-    HostService hostService = new TestHostService(getResource(), clusterName, hostName, getRequestFactory(), getRequestHandler());
-    Response response = hostService.createHost(body, getHttpHeaders(), getUriInfo(), hostName);
-
-    verifyResults(response, 201);
-  }
-
-  @Test
-  public void testCreateHost__ErrorState() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-    String body = "body";
-
-    registerExpectations(Request.Type.POST, body, 500, true);
-    replayMocks();
-
-    //test
-    HostService hostService = new TestHostService(getResource(), clusterName, hostName, getRequestFactory(), getRequestHandler());
-    Response response = hostService.createHost(body, getHttpHeaders(), getUriInfo(), hostName);
-
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testCreateHosts()  {
-    String clusterName = "clusterName";
-    String body = "[ " +
-        "{\"Hosts\" : {" +
-        "            \"cluster_name\" : \"mycluster\"," +
-        "            \"host_name\" : \"host1\"" +
-        "          }" +
-        "}," +
-        "{\"Hosts\" : {" +
-        "            \"cluster_name\" : \"mycluster\"," +
-        "            \"host_name\" : \"host2\"" +
-        "          }" +
-        "}," +
-        "{\"Hosts\" : {" +
-        "            \"cluster_name\" : \"mycluster\"," +
-        "            \"host_name\" : \"host3\"" +
-        "          }" +
-        "}]";
-
-    registerExpectations(Request.Type.POST, body, 201, false);
-    replayMocks();
-
-    //test
-    HostService hostService = new TestHostService(getResource(), clusterName, null, getRequestFactory(), getRequestHandler());
-    Response response = hostService.createHosts(body, getHttpHeaders(), getUriInfo());
-
-    verifyResults(response, 201);
-  }
-
-  @Test
-  public void testCreateHosts__ErrorState()  {
-    String clusterName = "clusterName";
-    String body = "body";
-
-    registerExpectations(Request.Type.POST, body, 500, true);
-    replayMocks();
-
-    //test
-    HostService hostService = new TestHostService(getResource(), clusterName, null, getRequestFactory(), getRequestHandler());
-    Response response = hostService.createHosts(body, getHttpHeaders(), getUriInfo());
-
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testUpdateHost() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-    String body = "body";
-
-    registerExpectations(Request.Type.PUT, body, 200, false);
-    replayMocks();
-
-    //test
-    HostService hostService = new TestHostService(getResource(), clusterName, hostName, getRequestFactory(), getRequestHandler());
-    Response response = hostService.updateHost(body, getHttpHeaders(), getUriInfo(), hostName);
-
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testUpdateHost__ErrorState() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-    String body = "body";
-
-    registerExpectations(Request.Type.PUT, body, 500, true);
-    replayMocks();
-
-    //test
-    HostService hostService = new TestHostService(getResource(), clusterName, hostName, getRequestFactory(), getRequestHandler());
-    Response response = hostService.updateHost(body, getHttpHeaders(), getUriInfo(), hostName);
-
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testUpdateHosts() {
-    String clusterName = "clusterName";
-    String body = "body";
-
-    registerExpectations(Request.Type.PUT, body, 200, false);
-    replayMocks();
-
-    //test
-    HostService hostService = new TestHostService(getResource(), clusterName, null, getRequestFactory(), getRequestHandler());
-    Response response = hostService.updateHosts(body, getHttpHeaders(), getUriInfo());
-
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testUpdateHosts__ErrorState() {
-    String clusterName = "clusterName";
-    String body = "body";
-
-    registerExpectations(Request.Type.PUT, body, 500, true);
-    replayMocks();
-
-    //test
-    HostService hostService = new TestHostService(getResource(), clusterName, null, getRequestFactory(), getRequestHandler());
-    Response response = hostService.updateHosts(body, getHttpHeaders(), getUriInfo());
-
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testDeleteHost() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-
-    registerExpectations(Request.Type.DELETE, null, 200, false);
-    replayMocks();
-
-    //test
-    HostService hostService = new TestHostService(getResource(), clusterName, hostName, getRequestFactory(), getRequestHandler());
-    Response response = hostService.deleteHost(getHttpHeaders(), getUriInfo(), hostName);
-
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testDeleteHost__ErrorState() {
-    String clusterName = "clusterName";
-    String hostName = "hostName";
-
-    registerExpectations(Request.Type.DELETE, null, 500, true);
-    replayMocks();
-
-    //test
-    HostService hostService = new TestHostService(getResource(), clusterName, hostName, getRequestFactory(), getRequestHandler());
-    Response response = hostService.deleteHost(getHttpHeaders(), getUriInfo(), hostName);
-
-    verifyResults(response, 500);
-  }
-
-
-  private class TestHostService extends HostService {
-    private RequestFactory m_requestFactory;
-    private RequestHandler m_requestHandler;
-    private ResourceInstance m_resourceDef;
-    private String m_clusterId;
-    private String m_hostId;
-
-    private TestHostService(ResourceInstance resourceDef, String clusterId, String hostId, RequestFactory requestFactory,
-                            RequestHandler handler) {
-      super(clusterId);
-      m_resourceDef = resourceDef;
-      m_clusterId = clusterId;
-      m_hostId = hostId;
-      m_requestFactory = requestFactory;
-      m_requestHandler = handler;
-    }
-
-    @Override
-    ResourceInstance createHostResource(String clusterName, String hostName, UriInfo ui) {
-      assertEquals(m_clusterId, clusterName);
-      assertEquals(m_hostId, hostName);
-      return m_resourceDef;
-    }
-
-    @Override
-    RequestFactory getRequestFactory() {
-      return m_requestFactory;
-    }
-
-    @Override
-    RequestHandler getRequestHandler(Request.Type requestType) {
-      return m_requestHandler;
-    }
-  }
-}
-
-
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/PersistKeyValueImplTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/PersistKeyValueImplTest.java
deleted file mode 100644
index b36285d..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/PersistKeyValueImplTest.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.Map;
-
-
-public class PersistKeyValueImplTest extends Assert {
-  private Injector injector;
-
-  @Before
-  public void setUp() throws Exception {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  @Test
-  public void testStore() throws Exception {
-    PersistKeyValueImpl impl = injector.getInstance(PersistKeyValueImpl.class);
-    Map<String, String> map = impl.getAllKeyValues();
-    assertEquals(0, map.size());
-
-    impl.put("key1", "value1");
-    impl.put("key2", "value2");
-
-    map = impl.getAllKeyValues();
-    assertEquals(2, map.size());
-    assertEquals("value1", impl.getValue("key1"));
-    assertEquals("value2", impl.getValue("key2"));
-    assertEquals(map.get("key1"), impl.getValue("key1"));
-
-    impl.put("key1", "value1-2");
-    assertEquals("value1-2", impl.getValue("key1"));
-    assertEquals(2, map.size());
-
-    StringBuilder largeValueBuilder = new StringBuilder();
-    for (int i = 0; i < 320; i++) {
-      largeValueBuilder.append("0123456789");
-    }
-    String largeValue = largeValueBuilder.toString();
-
-    impl.put("key3", largeValue);
-
-    assertEquals(largeValue, impl.getValue("key3"));
-
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/PersistServiceTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/PersistServiceTest.java
deleted file mode 100644
index bb11843..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/PersistServiceTest.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import java.io.IOException;
-import java.util.Map;
-
-import com.google.inject.persist.PersistService;
-import junit.framework.Assert;
-
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.utils.StageUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.codehaus.jettison.json.JSONException;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.sun.jersey.api.client.Client;
-import com.sun.jersey.api.client.UniformInterfaceException;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.api.client.config.ClientConfig;
-import com.sun.jersey.api.client.config.DefaultClientConfig;
-import com.sun.jersey.api.json.JSONConfiguration;
-import com.sun.jersey.spi.container.servlet.ServletContainer;
-import com.sun.jersey.test.framework.JerseyTest;
-import com.sun.jersey.test.framework.WebAppDescriptor;
-
-public class PersistServiceTest extends JerseyTest {
-  static String PACKAGE_NAME = "org.apache.ambari.server.api.services";
-  private static Log LOG = LogFactory.getLog(PersistServiceTest.class);
-  Injector injector;
-  protected Client client;
-
-  public  PersistServiceTest() {
-    super(new WebAppDescriptor.Builder(PACKAGE_NAME).servletClass(ServletContainer.class)
-        .initParam("com.sun.jersey.api.json.POJOMappingFeature", "true")
-        .build());
-  }
-
-  public class MockModule extends AbstractModule {
-
-
-    @Override
-    protected void configure() {
-      requestStaticInjection(PersistKeyValueService.class);
-    }
-  }
-
-  @Override
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
-    injector = Guice.createInjector(new InMemoryDefaultTestModule(), new MockModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-  }
-
-  @Override
-  @After
-  public void tearDown() throws Exception {
-    super.tearDown();
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  @Test
-  public void testPersist() throws UniformInterfaceException, JSONException,
-    IOException {
-    ClientConfig clientConfig = new DefaultClientConfig();
-    clientConfig.getFeatures().put(JSONConfiguration.FEATURE_POJO_MAPPING, Boolean.TRUE);
-    client = Client.create(clientConfig);
-    WebResource webResource = client.resource("http://localhost:9998/persist");
-    
-    webResource.post("{\"xyx\" : \"t\"}");
-    LOG.info("Done posting to the server");
-    String output = webResource.get(String.class);
-    LOG.info("All key values " + output);
-    Map<String, String> jsonOutput = StageUtils.fromJson(output, Map.class);
-    String value = jsonOutput.get("xyx");
-    Assert.assertEquals("t", value);
-    webResource = client.resource("http://localhost:9998/persist/xyx");
-    output = webResource.get(String.class);
-    Assert.assertEquals("t", output);
-    LOG.info("Value for xyx " + output);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/PersistenceManagerImplTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/PersistenceManagerImplTest.java
deleted file mode 100644
index f5a43ae..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/PersistenceManagerImplTest.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.api.query.Query;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.api.resources.ResourceDefinition;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.api.services.persistence.PersistenceManagerImpl;
-import org.apache.ambari.server.controller.internal.RequestStatusImpl;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-
-import static org.easymock.EasyMock.*;
-import static org.junit.Assert.*;
-
-/**
- * PersistenceManagerImpl unit tests.
- */
-public class PersistenceManagerImplTest {
-
-  @Test
-  public void testCreate() throws Exception {
-    ResourceInstance resource = createMock(ResourceInstance.class);
-    ResourceDefinition resourceDefinition = createMock(ResourceDefinition.class);
-    ClusterController controller = createMock(ClusterController.class);
-    Schema schema = createMock(Schema.class);
-    String clusterId = "clusterId";
-    String serviceId = "serviceId";
-    Request serverRequest = createStrictMock(Request.class);
-
-    Map<Resource.Type, String> mapResourceIds = new HashMap<Resource.Type, String>();
-    mapResourceIds.put(Resource.Type.Cluster, "clusterId");
-    mapResourceIds.put(Resource.Type.Service, "serviceId");
-
-    Set<Map<String, Object>> setProperties = new HashSet<Map<String, Object>>();
-    Map<String, Object> mapProperties = new HashMap<String, Object>();
-    mapProperties.put(clusterId, "clusterId");
-    mapProperties.put(serviceId, "serviceId");
-    mapProperties.put(PropertyHelper.getPropertyId("foo", "bar"), "value");
-    setProperties.add(mapProperties);
-
-    //expectations
-    expect(resource.getIds()).andReturn(mapResourceIds);
-    expect(resource.getResourceDefinition()).andReturn(resourceDefinition).atLeastOnce();
-    expect(resourceDefinition.getType()).andReturn(Resource.Type.Component);
-    expect(controller.getSchema(Resource.Type.Component)).andReturn(schema);
-    expect(schema.getKeyPropertyId(Resource.Type.Cluster)).andReturn(clusterId);
-    expect(schema.getKeyPropertyId(Resource.Type.Service)).andReturn(serviceId);
-
-    expect(controller.createResources(Resource.Type.Component, serverRequest)).andReturn(new RequestStatusImpl(null));
-
-    replay(resource, resourceDefinition, controller, schema, serverRequest);
-
-    new TestPersistenceManager(controller, setProperties, serverRequest).create(resource, setProperties);
-
-    verify(resource, resourceDefinition, controller, schema, serverRequest);
-  }
-
-  @Test
-  public void testCreate__MultipleResources() throws Exception {
-    ResourceInstance resource = createMock(ResourceInstance.class);
-    ResourceDefinition resourceDefinition = createMock(ResourceDefinition.class);
-    ClusterController controller = createMock(ClusterController.class);
-    Schema schema = createMock(Schema.class);
-    Request serverRequest = createStrictMock(Request.class);
-
-    String clusterId = "clusterId";
-    String serviceId = "serviceId";
-
-
-    Map<Resource.Type, String> mapResourceIds = new HashMap<Resource.Type, String>();
-    mapResourceIds.put(Resource.Type.Cluster, "clusterId");
-    mapResourceIds.put(Resource.Type.Service, "serviceId");
-
-    Set<Map<String, Object>> setProperties = new HashSet<Map<String, Object>>();
-
-    Map<String, Object> mapResourceProps1 = new HashMap<String, Object>();
-    mapResourceProps1.put(clusterId, "clusterId");
-    mapResourceProps1.put(serviceId, "serviceId");
-    mapResourceProps1.put(PropertyHelper.getPropertyId("foo", "bar"), "value");
-
-    Map<String, Object> mapResourceProps2 = new HashMap<String, Object>();
-    mapResourceProps2.put(clusterId, "clusterId");
-    mapResourceProps2.put(serviceId, "serviceId2");
-    mapResourceProps2.put(PropertyHelper.getPropertyId("foo", "bar2"), "value2");
-
-    setProperties.add(mapResourceProps1);
-    setProperties.add(mapResourceProps2);
-
-    //expectations
-    expect(resource.getIds()).andReturn(mapResourceIds);
-    expect(resource.getResourceDefinition()).andReturn(resourceDefinition);
-    expect(resourceDefinition.getType()).andReturn(Resource.Type.Component);
-    expect(controller.getSchema(Resource.Type.Component)).andReturn(schema);
-    expect(schema.getKeyPropertyId(Resource.Type.Cluster)).andReturn(clusterId).times(2);
-    expect(schema.getKeyPropertyId(Resource.Type.Service)).andReturn(serviceId).times(2);
-
-    expect(controller.createResources(Resource.Type.Component, serverRequest)).andReturn(new RequestStatusImpl(null));
-
-    replay(resource, resourceDefinition, controller, schema, serverRequest);
-
-    new TestPersistenceManager(controller, setProperties, serverRequest).create(resource, setProperties);
-
-    verify(resource, resourceDefinition, controller, schema, serverRequest);
-  }
-
-  @Test
-  public void testUpdate() throws Exception {
-    ResourceInstance resource = createMock(ResourceInstance.class);
-    ResourceDefinition resourceDefinition = createMock(ResourceDefinition.class);
-    ClusterController controller = createMock(ClusterController.class);
-    Schema schema = createMock(Schema.class);
-    Request serverRequest = createStrictMock(Request.class);
-    Query query = createMock(Query.class);
-    Predicate predicate = createMock(Predicate.class);
-
-    Set<Map<String, Object>> setProperties = new HashSet<Map<String, Object>>();
-    Map<String, Object> mapProperties = new HashMap<String, Object>();
-    mapProperties.put(PropertyHelper.getPropertyId("foo", "bar"), "value");
-    setProperties.add(mapProperties);
-
-    //expectations
-    expect(resource.getResourceDefinition()).andReturn(resourceDefinition);
-    expect(resourceDefinition.getType()).andReturn(Resource.Type.Component);
-    expect(resource.getQuery()).andReturn(query);
-    expect(query.getPredicate()).andReturn(predicate);
-
-    expect(controller.updateResources(Resource.Type.Component, serverRequest, predicate)).andReturn(new RequestStatusImpl(null));
-
-    replay(resource, resourceDefinition, controller, schema, serverRequest, query, predicate);
-
-    new TestPersistenceManager(controller, setProperties, serverRequest).update(resource, setProperties);
-
-    verify(resource, resourceDefinition, controller, schema, serverRequest, query, predicate);
-  }
-
-  @Test
-  public void testDelete() throws Exception {
-    ResourceInstance resource = createNiceMock(ResourceInstance.class);
-    ResourceDefinition resourceDefinition = createNiceMock(ResourceDefinition.class);
-    ClusterController controller = createMock(ClusterController.class);
-    Query query = createMock(Query.class);
-    Predicate predicate = createMock(Predicate.class);
-
-    //expectations
-    expect(resource.getResourceDefinition()).andReturn(resourceDefinition).anyTimes();
-    expect(resourceDefinition.getType()).andReturn(Resource.Type.Component).anyTimes();
-    expect(resource.getQuery()).andReturn(query).anyTimes();
-    expect(query.getPredicate()).andReturn(predicate).anyTimes();
-
-    expect(controller.deleteResources(Resource.Type.Component, predicate)).andReturn(new RequestStatusImpl(null));
-
-    replay(resource, resourceDefinition, controller, query, predicate);
-
-    new TestPersistenceManager(controller, null, null).delete(resource, null);
-
-    verify(resource, resourceDefinition, controller, query, predicate);
-  }
-
-
-  private class TestPersistenceManager extends PersistenceManagerImpl {
-
-    private Request m_request;
-    private Set<Map<String, Object>> m_setProperties;
-
-    private TestPersistenceManager(ClusterController controller,
-                                         Set<Map<String, Object>> setProperties,
-                                         Request controllerRequest) {
-      super(controller);
-      m_setProperties = setProperties;
-      m_request = controllerRequest;
-    }
-
-    @Override
-    protected Request createControllerRequest(Set<Map<String, Object>> setProperties) {
-      assertEquals(m_setProperties, setProperties);
-      return m_request;
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/PostRequestTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/PostRequestTest.java
deleted file mode 100644
index 8b99a15..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/PostRequestTest.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.predicate.PredicateCompiler;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.UriInfo;
-
-/**
- * PostRequest unit tests
- */
-public class PostRequestTest extends BaseRequestTest {
-  @Test
-  public void testRequestType() {
-    assertSame(Request.Type.POST, new PostRequest(null, null, null, null).getRequestType());
-  }
-
-  @Test
-  public void testGetQueryPredicate() throws Exception {
-    String uri = "http://foo.bar.com/api/v1/clusters?foo=bar&orProp1=5|orProp2!=6|orProp3<100&prop!=5&prop2>10&prop3>=20&prop4<500&prop5<=1&fields=field1,category/field2";
-    super.testGetQueryPredicate(uri);
-  }
-
-  @Test
-  public void testGetFields() {
-    String fields = "prop,category/prop1,category2/category3/prop2[1,2,3],prop3[4,5,6],category4[7,8,9],sub-resource/*[10,11,12],finalProp";
-    super.testGetFields(fields);
-  }
-
-  protected Request getTestRequest(HttpHeaders headers, String body, UriInfo uriInfo, final PredicateCompiler compiler) {
-    return new PostRequest(headers, body, uriInfo, null) {
-      @Override
-      protected PredicateCompiler getPredicateCompiler() {
-        return compiler;
-      }
-    };
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/PutRequestTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/PutRequestTest.java
deleted file mode 100644
index 79241b8..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/PutRequestTest.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.predicate.PredicateCompiler;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.UriInfo;
-
-/**
- * PutRequest unit tests
- */
-public class PutRequestTest extends BaseRequestTest {
-  @Test
-  public void testRequestType() {
-    assertSame(Request.Type.PUT, new PutRequest(null, null, null, null).getRequestType());
-  }
-
-  @Test
-  public void testGetQueryPredicate() throws Exception {
-    String uri = "http://foo.bar.com/api/v1/clusters?foo=bar&orProp1=5|orProp2!=6|orProp3<100&prop!=5&prop2>10&prop3>=20&prop4<500&prop5<=1&fields=field1,category/field2";
-    super.testGetQueryPredicate(uri);
-  }
-
-  @Test
-  public void testGetFields() {
-    String fields = "prop,category/prop1,category2/category3/prop2[1,2,3],prop3[4,5,6],category4[7,8,9],sub-resource/*[10,11,12],finalProp";
-    super.testGetFields(fields);
-  }
-
-  protected Request getTestRequest(HttpHeaders headers, String body, UriInfo uriInfo, final PredicateCompiler compiler) {
-    return new PutRequest(headers, body, uriInfo, null) {
-      @Override
-      protected PredicateCompiler getPredicateCompiler() {
-        return compiler;
-      }
-    };
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/ServiceServiceTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/ServiceServiceTest.java
deleted file mode 100644
index e7b4c53..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/ServiceServiceTest.java
+++ /dev/null
@@ -1,250 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-import org.apache.ambari.server.api.handlers.RequestHandler;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.junit.Test;
-
-import javax.ws.rs.core.Response;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * Unit tests for ServiceService.
- */
-public class ServiceServiceTest extends BaseServiceTest {
-
-  @Test
-  public void testGetService() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-
-    registerExpectations(Request.Type.GET, null, 200, false);
-    replayMocks();
-
-    //test
-    ServiceService hostService = new TestServiceService(getResource(), clusterName, serviceName, getRequestFactory(), getRequestHandler());
-    Response response = hostService.getService(getHttpHeaders(), getUriInfo(), serviceName);
-
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testGetService__ErrorState() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-
-    registerExpectations(Request.Type.GET, null, 500, true);
-    replayMocks();
-
-    //test
-    ServiceService hostService = new TestServiceService(getResource(), clusterName, serviceName, getRequestFactory(), getRequestHandler());
-    Response response = hostService.getService(getHttpHeaders(), getUriInfo(), serviceName);
-
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testGetServices()  {
-    String clusterName = "clusterName";
-
-    registerExpectations(Request.Type.GET, null, 200, false);
-    replayMocks();
-
-    //test
-    ServiceService hostService = new TestServiceService(getResource(), clusterName, null, getRequestFactory(), getRequestHandler());
-    Response response = hostService.getServices(getHttpHeaders(), getUriInfo());
-
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testGetServices__ErrorState(){
-    String clusterName = "clusterName";
-
-    registerExpectations(Request.Type.GET, null, 400, false);
-    replayMocks();
-
-    //test
-    ServiceService hostService = new TestServiceService(getResource(), clusterName, null, getRequestFactory(), getRequestHandler());
-    Response response = hostService.getServices(getHttpHeaders(), getUriInfo());
-
-    verifyResults(response, 400);
-  }
-
-  @Test
-  public void testCreateService() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-    String body = "{body}";
-
-    registerExpectations(Request.Type.POST, body, 201, false);
-    replayMocks();
-
-    //test
-    ServiceService hostService = new TestServiceService(getResource(), clusterName, serviceName, getRequestFactory(), getRequestHandler());
-    Response response = hostService.createService(body, getHttpHeaders(), getUriInfo(), serviceName);
-
-    verifyResults(response, 201);
-  }
-
-  @Test
-  public void testCreateService__ErrorState() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-    String body = "{body}";
-
-    registerExpectations(Request.Type.POST, body, 500, true);
-    replayMocks();
-
-    //test
-    ServiceService hostService = new TestServiceService(getResource(), clusterName, serviceName, getRequestFactory(), getRequestHandler());
-    Response response = hostService.createService(body, getHttpHeaders(), getUriInfo(), serviceName);
-
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testUpdateServices() {
-    String clusterName = "clusterName";
-    String body = "{body}";
-
-    registerExpectations(Request.Type.PUT, body, 200, false);
-    replayMocks();
-
-    //test
-    ServiceService hostService = new TestServiceService(getResource(), clusterName, null, getRequestFactory(), getRequestHandler());
-    Response response = hostService.updateServices(body, getHttpHeaders(), getUriInfo());
-
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testUpdateServices__ErrorState() {
-    String clusterName = "clusterName";
-    String body = "{body}";
-
-    registerExpectations(Request.Type.PUT, body, 500, true);
-    replayMocks();
-
-    //test
-    ServiceService hostService = new TestServiceService(getResource(), clusterName, null, getRequestFactory(), getRequestHandler());
-    Response response = hostService.updateServices(body, getHttpHeaders(), getUriInfo());
-
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testUpdateService() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-    String body = "{body}";
-
-    registerExpectations(Request.Type.PUT, body, 200, false);
-    replayMocks();
-
-    //test
-    ServiceService hostService = new TestServiceService(getResource(), clusterName, serviceName, getRequestFactory(), getRequestHandler());
-    Response response = hostService.updateService(body, getHttpHeaders(), getUriInfo(), serviceName);
-
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testUpdateService__ErrorState() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-    String body = "{body}";
-
-    registerExpectations(Request.Type.PUT, body, 500, true);
-    replayMocks();
-
-    //test
-    ServiceService hostService = new TestServiceService(getResource(), clusterName, serviceName, getRequestFactory(), getRequestHandler());
-    Response response = hostService.updateService(body, getHttpHeaders(), getUriInfo(), serviceName);
-
-    verifyResults(response, 500);
-  }
-
-  @Test
-  public void testDeleteService() {
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-
-    registerExpectations(Request.Type.DELETE, null, 200, false);
-    replayMocks();
-
-    //test
-    ServiceService hostService = new TestServiceService(getResource(), clusterName, serviceName, getRequestFactory(), getRequestHandler());
-    Response response = hostService.deleteService(getHttpHeaders(), getUriInfo(), serviceName);
-
-    verifyResults(response, 200);
-  }
-
-  @Test
-  public void testDeleteService__ErrorState(){
-    String clusterName = "clusterName";
-    String serviceName = "serviceName";
-
-    registerExpectations(Request.Type.DELETE, null, 500, true);
-    replayMocks();
-
-    //test
-    ServiceService hostService = new TestServiceService(getResource(), clusterName, serviceName, getRequestFactory(), getRequestHandler());
-    Response response = hostService.deleteService(getHttpHeaders(), getUriInfo(), serviceName);
-
-    verifyResults(response, 500);
-  }
-
-  private class TestServiceService extends ServiceService {
-    private RequestFactory m_requestFactory;
-    private RequestHandler m_requestHandler;
-    private ResourceInstance m_resourceDef;
-    private String m_clusterId;
-    private String m_serviceId;
-
-    private TestServiceService(ResourceInstance resourceDef, String clusterId, String serviceId, RequestFactory requestFactory,
-                               RequestHandler handler) {
-      super(clusterId);
-      m_resourceDef = resourceDef;
-      m_clusterId = clusterId;
-      m_serviceId = serviceId;
-      m_requestFactory = requestFactory;
-      m_requestHandler = handler;
-    }
-
-    @Override
-    ResourceInstance createServiceResource(String clusterName, String serviceName) {
-      assertEquals(m_clusterId, clusterName);
-      assertEquals(m_serviceId, serviceName);
-      return m_resourceDef;
-    }
-
-    @Override
-    RequestFactory getRequestFactory() {
-      return m_requestFactory;
-    }
-
-    @Override
-    RequestHandler getRequestHandler(Request.Type requestType) {
-      return m_requestHandler;
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/UpdatePersistenceManagerTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/UpdatePersistenceManagerTest.java
deleted file mode 100644
index b60680c..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/UpdatePersistenceManagerTest.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services;
-
-
-import org.apache.ambari.server.api.resources.ResourceDefinition;
-import org.apache.ambari.server.api.resources.ResourceInstance;
-import org.apache.ambari.server.controller.internal.RequestStatusImpl;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.api.query.Query;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.*;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import static org.junit.Assert.*;
-
-import static org.easymock.EasyMock.*;
-
-/**
- * Unit tests for UpdatePersistenceManager.
- */
-public class UpdatePersistenceManagerTest {
-//<<<<<<< Updated upstream
-//  @Test
-//  public void testPersist() throws Exception {
-//    ResourceInstance resource = createMock(ResourceInstance.class);
-//    ResourceDefinition resourceDefinition = createMock(ResourceDefinition.class);
-//    ClusterController controller = createMock(ClusterController.class);
-//    Schema schema = createMock(Schema.class);
-//    Request serverRequest = createStrictMock(Request.class);
-//    Query query = createMock(Query.class);
-//    Predicate predicate = createMock(Predicate.class);
-//
-//    Set<Map<String, Object>> setProperties = new HashSet<Map<String, Object>>();
-//    Map<String, Object> mapProperties = new HashMap<String, Object>();
-//    mapProperties.put(PropertyHelper.getPropertyId("foo", "bar"), "value");
-//    setProperties.add(mapProperties);
-//
-//    //expectations
-//    expect(resource.getResourceDefinition()).andReturn(resourceDefinition);
-//    expect(resourceDefinition.getType()).andReturn(Resource.Type.Component);
-//    expect(resource.getQuery()).andReturn(query);
-//    expect(query.getPredicate()).andReturn(predicate);
-//
-//    expect(controller.updateResources(Resource.Type.Component, serverRequest, predicate)).andReturn(new RequestStatusImpl(null));
-//
-//    replay(resource, resourceDefinition, controller, schema, serverRequest, query, predicate);
-//
-//    new TestUpdatePersistenceManager(controller, setProperties, serverRequest).persist(resource, setProperties);
-//
-//    verify(resource, resourceDefinition, controller, schema, serverRequest, query, predicate);
-//  }
-//
-//  private class TestUpdatePersistenceManager extends UpdatePersistenceManager {
-//
-//    private ClusterController m_controller;
-//    private Request m_request;
-//    private Set<Map<String, Object>> m_setProperties;
-//
-//    private TestUpdatePersistenceManager(ClusterController controller,
-//                                         Set<Map<String, Object>> setProperties,
-//                                         Request controllerRequest) {
-//      m_controller = controller;
-//      m_setProperties = setProperties;
-//      m_request = controllerRequest;
-//    }
-//
-//    @Override
-//    protected ClusterController getClusterController() {
-//      return m_controller;
-//    }
-//
-//    @Override
-//    protected Request createControllerRequest(Set<Map<String, Object>> setProperties) {
-//      assertEquals(1, setProperties.size());
-//      assertEquals(m_setProperties, setProperties);
-//      return m_request;
-//    }
-//  }
-//=======
-//  @Test
-//  public void testPersist() throws Exception {
-//    ResourceInstance resource = createMock(ResourceInstance.class);
-//    ResourceDefinition resourceDefinition = createMock(ResourceDefinition.class);
-//    ClusterController controller = createMock(ClusterController.class);
-//    Schema schema = createMock(Schema.class);
-//    Request serverRequest = createStrictMock(Request.class);
-//    Query query = createMock(Query.class);
-//    Predicate predicate = createMock(Predicate.class);
-//
-//    Set<Map<PropertyId, Object>> setProperties = new HashSet<Map<PropertyId, Object>>();
-//    Map<PropertyId, Object> mapProperties = new HashMap<PropertyId, Object>();
-//    mapProperties.put(PropertyHelper.getPropertyId("bar", "foo"), "value");
-//    setProperties.add(mapProperties);
-//
-//    //expectations
-//    expect(resource.getResourceDefinition()).andReturn(resourceDefinition);
-//    expect(resourceDefinition.getType()).andReturn(Resource.Type.Component);
-//    expect(resource.getQuery()).andReturn(query);
-//    expect(query.getPredicate()).andReturn(predicate);
-//
-//    expect(controller.updateResources(Resource.Type.Component, serverRequest, predicate)).andReturn(new RequestStatusImpl(null));
-//
-//    replay(resource, resourceDefinition, controller, schema, serverRequest, query, predicate);
-//
-//    new TestUpdatePersistenceManager(controller, setProperties, serverRequest).persist(resource, setProperties);
-//
-//    verify(resource, resourceDefinition, controller, schema, serverRequest, query, predicate);
-//  }
-//
-//  private class TestUpdatePersistenceManager extends UpdatePersistenceManager {
-//
-//    private ClusterController m_controller;
-//    private Request m_request;
-//    private Set<Map<PropertyId, Object>> m_setProperties;
-//
-//    private TestUpdatePersistenceManager(ClusterController controller,
-//                                         Set<Map<PropertyId, Object>> setProperties,
-//                                         Request controllerRequest) {
-//      m_controller = controller;
-//      m_setProperties = setProperties;
-//      m_request = controllerRequest;
-//    }
-//
-//    @Override
-//    protected ClusterController getClusterController() {
-//      return m_controller;
-//    }
-//
-//    @Override
-//    protected Request createControllerRequest(Set<Map<PropertyId, Object>> setProperties) {
-//      assertEquals(1, setProperties.size());
-//      assertEquals(m_setProperties, setProperties);
-//      return m_request;
-//    }
-//  }
-//>>>>>>> Stashed changes
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/parsers/JsonPropertyParserTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/parsers/JsonPropertyParserTest.java
deleted file mode 100644
index 9565ff5..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/parsers/JsonPropertyParserTest.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services.parsers;
-
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Unit tests for JsonPropertyParser.
- */
-public class JsonPropertyParserTest {
-
-  String serviceJson = "{\"Services\" : {" +
-      "    \"display_name\" : \"HDFS\"," +
-      "    \"description\" : \"Apache Hadoop Distributed File System\"," +
-      "    \"attributes\" : \"{ \\\"runnable\\\": true, \\\"mustInstall\\\": true, \\\"editable\\\": false, \\\"noDisplay\\\": false }\"," +
-      "    \"service_name\" : \"HDFS\"" +
-      "  }," +
-      "  \"ServiceInfo\" : {" +
-      "    \"cluster_name\" : \"tbmetrictest\"," +
-      "    \"state\" : \"STARTED\"" +
-      "  }," +
-      "\"OuterCategory\" : { \"propName\" : 100, \"nested1\" : { \"nested2\" : { \"innerPropName\" : \"innerPropValue\" } } } }";
-
-
-  String clustersJson = "[ {" +
-      "\"Clusters\" : {\n" +
-      "    \"cluster_name\" : \"unitTestCluster1\"" +
-      "} }," +
-      "{" +
-      "\"Clusters\" : {\n" +
-      "    \"cluster_name\" : \"unitTestCluster2\"," +
-      "    \"property1\" : \"prop1Value\"" +
-      "} }," +
-      "{" +
-      "\"Clusters\" : {\n" +
-      "    \"cluster_name\" : \"unitTestCluster3\"," +
-      "    \"Category\" : { \"property2\" : \"prop2Value\"}" +
-      "} } ]";
-
-
-  @Test
-  public void testParse() throws Exception {
-    RequestBodyParser parser = new JsonPropertyParser();
-    Set<Map<String, Object>> setProps = parser.parse(serviceJson);
-
-    assertEquals(1, setProps.size());
-
-    Map<String, Object> mapExpected = new HashMap<String, Object>();
-    mapExpected.put(PropertyHelper.getPropertyId("Services", "service_name"), "HDFS");
-    mapExpected.put(PropertyHelper.getPropertyId("Services", "display_name"), "HDFS");
-    mapExpected.put(PropertyHelper.getPropertyId("ServiceInfo", "cluster_name"), "tbmetrictest");
-    mapExpected.put(PropertyHelper.getPropertyId("Services", "attributes"), "{ \"runnable\": true, \"mustInstall\": true, \"editable\": false, \"noDisplay\": false }");
-    mapExpected.put(PropertyHelper.getPropertyId("Services", "description"), "Apache Hadoop Distributed File System");
-    mapExpected.put(PropertyHelper.getPropertyId("ServiceInfo", "state"), "STARTED");
-    mapExpected.put(PropertyHelper.getPropertyId("OuterCategory", "propName"), "100");
-    mapExpected.put(PropertyHelper.getPropertyId("OuterCategory.nested1.nested2", "innerPropName"), "innerPropValue");
-
-    assertEquals(mapExpected, setProps.iterator().next());
-  }
-
-  @Test
-  public void testParse_NullBody() {
-    RequestBodyParser parser = new JsonPropertyParser();
-    Set<Map<String, Object>> setProps = parser.parse(null);
-    assertNotNull(setProps);
-    assertEquals(0, setProps.size());
-  }
-
-  @Test
-  public void testParse_EmptyBody() {
-    RequestBodyParser parser = new JsonPropertyParser();
-    Set<Map<String, Object>> setProps = parser.parse("");
-    assertNotNull(setProps);
-    assertEquals(0, setProps.size());
-  }
-
-  @Test
-  public void testParse_Array() {
-    RequestBodyParser parser = new JsonPropertyParser();
-    Set<Map<String, Object>> setProps = parser.parse(clustersJson);
-    assertEquals(3, setProps.size());
-
-    boolean cluster1Matches = false;
-    boolean cluster2Matches = false;
-    boolean cluster3Matches = false;
-
-    Map<String, String> mapCluster1 = new HashMap<String, String>();
-    mapCluster1.put(PropertyHelper.getPropertyId("Clusters", "cluster_name"), "unitTestCluster1");
-
-    Map<String, String> mapCluster2 = new HashMap<String, String>();
-    mapCluster2.put(PropertyHelper.getPropertyId("Clusters", "cluster_name"), "unitTestCluster2");
-    mapCluster2.put(PropertyHelper.getPropertyId("Clusters", "property1"), "prop1Value");
-
-
-    Map<String, String> mapCluster3 = new HashMap<String, String>();
-    mapCluster3.put(PropertyHelper.getPropertyId("Clusters", "cluster_name"), "unitTestCluster3");
-    mapCluster3.put(PropertyHelper.getPropertyId("Clusters.Category", "property2"), "prop2Value");
-
-
-    for (Map<String, Object> mapProps : setProps) {
-      if (mapProps.equals(mapCluster1)) {
-        cluster1Matches = true;
-      } else if (mapProps.equals(mapCluster2)) {
-        cluster2Matches = true;
-      } else if (mapProps.equals(mapCluster3)) {
-        cluster3Matches = true;
-      }
-    }
-
-    assertTrue(cluster1Matches);
-    assertTrue(cluster2Matches);
-    assertTrue(cluster3Matches);
-  }
-}
-
-
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/serializers/JsonSerializerTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/serializers/JsonSerializerTest.java
deleted file mode 100644
index 048067b..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/services/serializers/JsonSerializerTest.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.api.services.serializers;
-
-import org.apache.ambari.server.api.services.Result;
-import org.apache.ambari.server.api.services.ResultImpl;
-import org.apache.ambari.server.api.services.ResultStatus;
-import org.apache.ambari.server.api.util.TreeNode;
-import org.apache.ambari.server.api.util.TreeNodeImpl;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.junit.Test;
-
-import javax.ws.rs.core.UriInfo;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.easymock.EasyMock.*;
-import static org.junit.Assert.assertEquals;
-
-/**
- * JSONSerializer unit tests
- */
-public class JsonSerializerTest {
-  @Test
-  public void testSerialize() throws Exception {
-    UriInfo uriInfo = createMock(UriInfo.class);
-    Resource resource = createMock(Resource.class);
-    //Resource resource2 = createMock(Resource.class);
-
-    Result result = new ResultImpl(true);
-    result.setResultStatus(new ResultStatus(ResultStatus.STATUS.OK));
-    TreeNode<Resource> tree = result.getResultTree();
-    //tree.setName("items");
-    TreeNode<Resource> child = tree.addChild(resource, "resource1");
-    //child.addChild(resource2, "sub-resource");
-
-    // resource properties
-    HashMap<String, Object> mapRootProps = new HashMap<String, Object>();
-    mapRootProps.put("prop1", "value1");
-    mapRootProps.put("prop2", "value2");
-
-    HashMap<String, Object> mapCategoryProps = new HashMap<String, Object>();
-    mapCategoryProps.put("catProp1", "catValue1");
-    mapCategoryProps.put("catProp2", "catValue2");
-
-    TreeNode<Map<String, Object>> treeProps1 = new TreeNodeImpl<Map<String, Object>>(
-        null, mapRootProps, null);
-
-    treeProps1.addChild(mapCategoryProps, "category");
-
-//    // resource2 properties
-//    HashMap<String, Object> map2RootProps = new HashMap<String, Object>();
-//    map2RootProps.put("2prop1", "2value1");
-//
-//    HashMap<String, Object> map2CategoryProps = new HashMap<String, Object>();
-//    map2CategoryProps.put("2catProp1", "2catValue1");
-//
-//    TreeNode<Map<String, Object>> treeProps2 = new TreeNodeImpl<Map<String, Object>>(
-//        null, map2RootProps, null);
-//
-//    treeProps2.addChild(mapCategoryProps, "little-category");
-
-
-    //expectations
-    expect(resource.getProperties()).andReturn(treeProps1).anyTimes();
-    expect(resource.getType()).andReturn(Resource.Type.Cluster).anyTimes();
-
-//    expect(resource2.getProperties()).andReturn(treeProps2).anyTimes();
-//    expect(resource2.getType()).andReturn(Resource.Type.Service).anyTimes();
-
-    replay(uriInfo, resource/*, resource2*/);
-
-    //execute test
-    Object o = new JsonSerializer().serialize(result);
-
-    String expected = "{\n" +
-        "  \"prop2\" : \"value2\",\n" +
-        "  \"prop1\" : \"value1\",\n" +
-        "  \"category\" : {\n" +
-        "    \"catProp1\" : \"catValue1\",\n" +
-        "    \"catProp2\" : \"catValue2\"\n" +
-        "  }\n" +
-        "}";
-
-    assertEquals(expected, o);
-
-    verify(uriInfo, resource/*, resource2*/);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/bootstrap/BootStrapResourceTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/bootstrap/BootStrapResourceTest.java
deleted file mode 100644
index e433879..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/bootstrap/BootStrapResourceTest.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.bootstrap;
-
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-
-import java.util.ArrayList;
-
-import javax.ws.rs.core.MediaType;
-
-import junit.framework.Assert;
-
-import org.apache.ambari.server.api.rest.BootStrapResource;
-import org.apache.ambari.server.bootstrap.BSResponse.BSRunStat;
-import org.apache.ambari.server.bootstrap.BootStrapStatus.BSStat;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
-import org.junit.Test;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.sun.jersey.api.client.UniformInterfaceException;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.spi.container.servlet.ServletContainer;
-import com.sun.jersey.test.framework.JerseyTest;
-import com.sun.jersey.test.framework.WebAppDescriptor;
-
-/**
- *  Testing bootstrap API.
- */
-public class BootStrapResourceTest extends JerseyTest {
-
-  static String PACKAGE_NAME = "org.apache.ambari.server.api.rest";
-  private static Log LOG = LogFactory.getLog(BootStrapResourceTest.class);
-  Injector injector;
-  BootStrapImpl bsImpl;
-
-  public BootStrapResourceTest() {
-    super(new WebAppDescriptor.Builder(PACKAGE_NAME).servletClass(ServletContainer.class)
-        .build());
-  }
-
-  public class MockModule extends AbstractModule {
-    @Override
-    protected void configure() {
-      BootStrapImpl bsImpl = mock(BootStrapImpl.class);
-      when(bsImpl.getStatus(0)).thenReturn(generateDummyBSStatus());
-      when(bsImpl.runBootStrap(any(SshHostInfo.class))).thenReturn(generateBSResponse());
-      bind(BootStrapImpl.class).toInstance(bsImpl);
-      requestStaticInjection(BootStrapResource.class);
-    }
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    injector = Guice.createInjector(new MockModule());
-  }
-
-  protected JSONObject createDummySshInfo() throws JSONException {
-    JSONObject json = new JSONObject();
-    json.put("sshkey", "awesome");
-    json.put("hosts", new ArrayList<String>());
-    return json;
-  }
-
-  protected BSResponse generateBSResponse() {
-    BSResponse response = new BSResponse();
-    response.setLog("Logging");
-    response.setRequestId(1);
-    response.setStatus(BSRunStat.OK);
-    return response;
-  }
-
-  protected BootStrapStatus generateDummyBSStatus() {
-    BootStrapStatus status = new BootStrapStatus();
-    status.setLog("Logging ");
-    status.setStatus(BSStat.ERROR);
-    status.setHostsStatus(new ArrayList<BSHostStatus>());
-    return status;
-  }
-
-  @Test
-  public void bootStrapGet() throws UniformInterfaceException, JSONException {
-    WebResource webResource = resource();
-    BootStrapStatus status = webResource.path("/bootstrap/0").type(
-        MediaType.APPLICATION_JSON)
-        .get(BootStrapStatus.class);
-    LOG.info("GET Response from the API " + status.getLog() + " " +
-        status.getStatus());
-    Assert.assertEquals(status.getStatus(), BSStat.ERROR);
-  }
-
-  @Test
-  public void bootStrapPost() throws UniformInterfaceException, JSONException {
-    WebResource webResource = resource();
-    JSONObject object = webResource.path("/bootstrap").type(
-        MediaType.APPLICATION_JSON).post(JSONObject.class, createDummySshInfo());
-
-    Assert.assertEquals("OK", object.get("status"));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/bootstrap/BootStrapTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/bootstrap/BootStrapTest.java
deleted file mode 100644
index 56ee375..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/bootstrap/BootStrapTest.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.bootstrap;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Properties;
-
-import junit.framework.Assert;
-import junit.framework.TestCase;
-
-import org.apache.ambari.server.bootstrap.BootStrapStatus.BSStat;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-
-/**
- * Test BootStrap Implementation.
- */
-public class BootStrapTest extends TestCase {
-  private static Log LOG = LogFactory.getLog(BootStrapTest.class);
-  public TemporaryFolder temp = new TemporaryFolder();
-
-  @Before
-  public void setUp() throws IOException {
-    temp.create();
-  }
-
-  @After
-  public void tearDown() throws IOException {
-    temp.delete();
-  }
-
-  @Test
-  public void testRun() throws Exception {
-    Properties properties = new Properties();
-    String bootdir =  temp.newFolder("bootdir").toString();
-    LOG.info("Bootdir is " + bootdir);
-    properties.setProperty(Configuration.BOOTSTRAP_DIR,
-       bootdir);
-    properties.setProperty(Configuration.BOOTSTRAP_SCRIPT, "echo");
-    properties.setProperty(Configuration.SRVR_KSTR_DIR_KEY, "target" + File.separator + "classes");
-    Configuration conf = new Configuration(properties);
-    BootStrapImpl impl = new BootStrapImpl(conf);
-    impl.init();
-    SshHostInfo info = new SshHostInfo();
-    info.setSshKey("xyz");
-    ArrayList<String> hosts = new ArrayList<String>();
-    hosts.add("host1");
-    hosts.add("host2");
-    info.setHosts(hosts);
-    BSResponse response = impl.runBootStrap(info);
-    LOG.info("Response id from bootstrap " + response.getRequestId());
-    /* do a query */
-    BootStrapStatus status = impl.getStatus(response.getRequestId());
-    LOG.info("Status " + status.getStatus());
-    int num = 0;
-    while ((status.getStatus() != BSStat.SUCCESS) && (num < 10000)) {
-        status = impl.getStatus(response.getRequestId());
-        Thread.sleep(100);
-        num++;
-    }
-    LOG.info("Status: log " + status.getLog() + " status=" + status.getStatus()
-        );
-    /* Note its an echo command so it should echo host1,host2 */
-    Assert.assertTrue(status.getLog().contains("host1,host2"));
-    Assert.assertEquals(BSStat.SUCCESS, status.getStatus());
-  }
-
-
-  @Test
-  public void testPolling() throws Exception {
-    File tmpFolder = temp.newFolder("bootstrap");
-    /* create log and done files */
-    FileUtils.writeStringToFile(new File(tmpFolder, "host1.done"), "0");
-    FileUtils.writeStringToFile(new File(tmpFolder, "host1.log"), "err_log_1");
-    FileUtils.writeStringToFile(new File(tmpFolder, "host2.done"), "1");
-    FileUtils.writeStringToFile(new File(tmpFolder, "host2.log"), "err_log_2");
-
-    List<String> listHosts = new ArrayList<String>();
-    listHosts.add("host1");
-    listHosts.add("host2");
-    BSHostStatusCollector collector = new BSHostStatusCollector(tmpFolder,
-        listHosts);
-    collector.run();
-    List<BSHostStatus> polledHostStatus = collector.getHostStatus();
-    Assert.assertTrue(polledHostStatus.size() == 2);
-    Assert.assertEquals(polledHostStatus.get(0).getHostName(), "host1");
-    Assert.assertEquals(polledHostStatus.get(0).getLog(), "err_log_1");
-    Assert.assertEquals(polledHostStatus.get(0).getStatus(), "DONE");
-    Assert.assertEquals(polledHostStatus.get(1).getHostName(), "host2");
-    Assert.assertEquals(polledHostStatus.get(1).getLog(), "err_log_2");
-    Assert.assertEquals(polledHostStatus.get(1).getStatus(), "FAILED");
-
-
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
deleted file mode 100644
index 59b6b96..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ /dev/null
@@ -1,1463 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import com.google.gson.Gson;
-import com.google.inject.Injector;
-import org.apache.ambari.server.*;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.state.*;
-import org.easymock.Capture;
-import org.junit.Test;
-
-import java.lang.reflect.Field;
-import java.util.*;
-
-import static org.junit.Assert.*;
-import static org.easymock.EasyMock.*;
-
-/**
- * AmbariManagementControllerImpl unit tests
- */
-public class AmbariManagementControllerImplTest {
-
-  @Test
-  public void testGetClusters() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    ClusterRequest request1 = new ClusterRequest(null, "cluster1", "1", Collections.<String>emptySet());
-    Cluster cluster = createNiceMock(Cluster.class);
-    ClusterResponse response = createNiceMock(ClusterResponse.class);
-
-    Set<ClusterRequest> setRequests = new HashSet<ClusterRequest>();
-    setRequests.add(request1);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getCluster
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(cluster.convertToResponse()).andReturn(response);
-
-    // replay mocks
-    replay(injector, clusters, cluster, response);
-
-    // test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    Set<ClusterResponse> setResponses = controller.getClusters(setRequests);
-
-    // assert and verify
-    assertEquals(1, setResponses.size());
-    assertTrue(setResponses.contains(response));
-
-    verify(injector, clusters, cluster, response);
-  }
-
-  /**
-   * Ensure that ClusterNotFoundException is propagated in case where there is a single request.
-   */
-  @Test
-  public void testGetClusters___ClusterNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    // requests
-    ClusterRequest request1 = new ClusterRequest(null, "cluster1", "1", Collections.<String>emptySet());
-
-    Set<ClusterRequest> setRequests = new HashSet<ClusterRequest>();
-    setRequests.add(request1);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getCluster
-    expect(clusters.getCluster("cluster1")).andThrow(new ClusterNotFoundException("cluster1"));
-
-    // replay mocks
-    replay(injector, clusters);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-
-    // assert that exception is thrown in case where there is a single request
-    try {
-      controller.getClusters(setRequests);
-      fail("expected ClusterNotFoundException");
-    } catch (ClusterNotFoundException e) {
-      // expected
-    }
-
-    verify(injector, clusters);
-  }
-
-  /**
-   * Ensure that ClusterNotFoundException is handled where there are multiple requests as would be the
-   * case when an OR predicate is provided in the query.
-   */
-  @Test
-  public void testGetClusters___OR_Predicate_ClusterNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Cluster cluster2 = createNiceMock(Cluster.class);
-    ClusterResponse response = createNiceMock(ClusterResponse.class);
-    ClusterResponse response2 = createNiceMock(ClusterResponse.class);
-
-    // requests
-    ClusterRequest request1 = new ClusterRequest(null, "cluster1", "1", Collections.<String>emptySet());
-    ClusterRequest request2 = new ClusterRequest(null, "cluster2", "1", Collections.<String>emptySet());
-    ClusterRequest request3 = new ClusterRequest(null, "cluster3", "1", Collections.<String>emptySet());
-    ClusterRequest request4 = new ClusterRequest(null, "cluster4", "1", Collections.<String>emptySet());
-
-    Set<ClusterRequest> setRequests = new HashSet<ClusterRequest>();
-    setRequests.add(request1);
-    setRequests.add(request2);
-    setRequests.add(request3);
-    setRequests.add(request4);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getCluster
-    expect(clusters.getCluster("cluster1")).andThrow(new ClusterNotFoundException("cluster1"));
-    expect(clusters.getCluster("cluster2")).andReturn(cluster);
-    expect(clusters.getCluster("cluster3")).andReturn(cluster2);
-    expect(clusters.getCluster("cluster4")).andThrow(new ClusterNotFoundException("cluster4"));
-
-    expect(cluster.convertToResponse()).andReturn(response);
-    expect(cluster2.convertToResponse()).andReturn(response2);
-    // replay mocks
-    replay(injector, clusters, cluster, cluster2, response, response2);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    Set<ClusterResponse> setResponses = controller.getClusters(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(2, setResponses.size());
-    assertTrue(setResponses.contains(response));
-    assertTrue(setResponses.contains(response2));
-
-    verify(injector, clusters, cluster, cluster2, response, response2);
-  }
-
-  @Test
-  public void testGetServices() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Service service = createNiceMock(Service.class);
-    ServiceResponse response = createNiceMock(ServiceResponse.class);
-
-    // requests
-    ServiceRequest request1 = new ServiceRequest("cluster1", "service1", Collections.<String, String>emptyMap(), null);
-
-    Set<ServiceRequest> setRequests = new HashSet<ServiceRequest>();
-    setRequests.add(request1);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getServices
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(cluster.getService("service1")).andReturn(service);
-
-    expect(service.convertToResponse()).andReturn(response);
-    // replay mocks
-    replay(injector, clusters, cluster, service, response);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    Set<ServiceResponse> setResponses = controller.getServices(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(1, setResponses.size());
-    assertTrue(setResponses.contains(response));
-
-    verify(injector, clusters, cluster, service, response);
-  }
-
-  /**
-   * Ensure that ServiceNotFoundException is propagated in case where there is a single request.
-   */
-  @Test
-  public void testGetServices___ServiceNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-
-    // requests
-    ServiceRequest request1 = new ServiceRequest("cluster1", "service1", Collections.<String, String>emptyMap(), null);
-    Set<ServiceRequest> setRequests = new HashSet<ServiceRequest>();
-    setRequests.add(request1);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getServices
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(cluster.getService("service1")).andThrow(new ServiceNotFoundException("custer1", "service1"));
-
-    // replay mocks
-    replay(injector, clusters, cluster);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-
-    // assert that exception is thrown in case where there is a single request
-    try {
-      controller.getServices(setRequests);
-      fail("expected ServiceNotFoundException");
-    } catch (ServiceNotFoundException e) {
-      // expected
-    }
-
-    assertSame(controller, controllerCapture.getValue());
-    verify(injector, clusters, cluster);
-  }
-
-  /**
-   * Ensure that ServiceNotFoundException is handled where there are multiple requests as would be the
-   * case when an OR predicate is provided in the query.
-   */
-  @Test
-  public void testGetServices___OR_Predicate_ServiceNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Service service1 = createNiceMock(Service.class);
-    Service service2 = createNiceMock(Service.class);
-    ServiceResponse response = createNiceMock(ServiceResponse.class);
-    ServiceResponse response2 = createNiceMock(ServiceResponse.class);
-
-    // requests
-    ServiceRequest request1 = new ServiceRequest("cluster1", "service1", Collections.<String, String>emptyMap(), null);
-    ServiceRequest request2 = new ServiceRequest("cluster1", "service2", Collections.<String, String>emptyMap(), null);
-    ServiceRequest request3 = new ServiceRequest("cluster1", "service3", Collections.<String, String>emptyMap(), null);
-    ServiceRequest request4 = new ServiceRequest("cluster1", "service4", Collections.<String, String>emptyMap(), null);
-
-    Set<ServiceRequest> setRequests = new HashSet<ServiceRequest>();
-    setRequests.add(request1);
-    setRequests.add(request2);
-    setRequests.add(request3);
-    setRequests.add(request4);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getServices
-    expect(clusters.getCluster("cluster1")).andReturn(cluster).times(4);
-    expect(cluster.getService("service1")).andReturn(service1);
-    expect(cluster.getService("service2")).andThrow(new ServiceNotFoundException("cluster1", "service2"));
-    expect(cluster.getService("service3")).andThrow(new ServiceNotFoundException("cluster1", "service3"));
-    expect(cluster.getService("service4")).andReturn(service2);
-
-    expect(service1.convertToResponse()).andReturn(response);
-    expect(service2.convertToResponse()).andReturn(response2);
-    // replay mocks
-    replay(injector, clusters, cluster, service1, service2, response, response2);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    Set<ServiceResponse> setResponses = controller.getServices(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(2, setResponses.size());
-    assertTrue(setResponses.contains(response));
-    assertTrue(setResponses.contains(response2));
-
-    verify(injector, clusters, cluster, service1, service2, response, response2);
-  }
-
-  @Test
-  public void testGetComponents() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Service service = createNiceMock(Service.class);
-    ServiceComponent component = createNiceMock(ServiceComponent.class);
-    ServiceComponentResponse response = createNiceMock(ServiceComponentResponse.class);
-
-    // requests
-    ServiceComponentRequest request1 = new ServiceComponentRequest("cluster1", "service1", "component1",
-        Collections.<String, String>emptyMap(), null);
-
-    Set<ServiceComponentRequest> setRequests = new HashSet<ServiceComponentRequest>();
-    setRequests.add(request1);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getComponents
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(cluster.getService("service1")).andReturn(service);
-    expect(service.getServiceComponent("component1")).andReturn(component);
-
-    expect(component.convertToResponse()).andReturn(response);
-    // replay mocks
-    replay(injector, clusters, cluster, service, component, response);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    Set<ServiceComponentResponse> setResponses = controller.getComponents(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(1, setResponses.size());
-    assertTrue(setResponses.contains(response));
-
-    verify(injector, clusters, cluster, service, component, response);
-  }
-
-  /**
-   * Ensure that ServiceComponentNotFoundException is propagated in case where there is a single request.
-   */
-  @Test
-  public void testGetComponents___ServiceComponentNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Service service = createNiceMock(Service.class);
-
-    // requests
-    ServiceComponentRequest request1 = new ServiceComponentRequest("cluster1", "service1", "component1",
-        Collections.<String, String>emptyMap(), null);
-
-    Set<ServiceComponentRequest> setRequests = new HashSet<ServiceComponentRequest>();
-    setRequests.add(request1);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getComponents
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(cluster.getService("service1")).andReturn(service);
-    expect(service.getServiceComponent("component1")).andThrow(
-        new ServiceComponentNotFoundException("cluster1", "service1", "component1"));
-    // replay mocks
-    replay(injector, clusters, cluster, service);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-
-    // assert that exception is thrown in case where there is a single request
-    try {
-      controller.getComponents(setRequests);
-      fail("expected ServiceComponentNotFoundException");
-    } catch (ServiceComponentNotFoundException e) {
-      // expected
-    }
-
-    assertSame(controller, controllerCapture.getValue());
-    verify(injector, clusters, cluster, service);
-  }
-
-  /**
-   * Ensure that ServiceComponentNotFoundException is handled where there are multiple requests as would be the
-   * case when an OR predicate is provided in the query.
-   */
-  @Test
-  public void testGetComponents___OR_Predicate_ServiceComponentNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Service service = createNiceMock(Service.class);
-    ServiceComponent component1 = createNiceMock(ServiceComponent.class);
-    ServiceComponent component2 = createNiceMock(ServiceComponent.class);
-    ServiceComponentResponse response1 = createNiceMock(ServiceComponentResponse.class);
-    ServiceComponentResponse response2 = createNiceMock(ServiceComponentResponse.class);
-
-    // requests
-    ServiceComponentRequest request1 = new ServiceComponentRequest("cluster1", "service1", "component1",
-        Collections.<String, String>emptyMap(), null);
-    ServiceComponentRequest request2 = new ServiceComponentRequest("cluster1", "service1", "component2",
-        Collections.<String, String>emptyMap(), null);
-    ServiceComponentRequest request3 = new ServiceComponentRequest("cluster1", "service1", "component3",
-        Collections.<String, String>emptyMap(), null);
-    ServiceComponentRequest request4 = new ServiceComponentRequest("cluster1", "service1", "component4",
-        Collections.<String, String>emptyMap(), null);
-
-    Set<ServiceComponentRequest> setRequests = new HashSet<ServiceComponentRequest>();
-    setRequests.add(request1);
-    setRequests.add(request2);
-    setRequests.add(request3);
-    setRequests.add(request4);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getComponents
-    expect(clusters.getCluster("cluster1")).andReturn(cluster).times(4);
-    expect(cluster.getService("service1")).andReturn(service).times(4);
-
-    expect(service.getServiceComponent("component1")).andThrow(new ServiceComponentNotFoundException("cluster1", "service1", "component1"));
-    expect(service.getServiceComponent("component2")).andThrow(new ServiceComponentNotFoundException("cluster1", "service1", "component2"));
-    expect(service.getServiceComponent("component3")).andReturn(component1);
-    expect(service.getServiceComponent("component4")).andReturn(component2);
-
-    expect(component1.convertToResponse()).andReturn(response1);
-    expect(component2.convertToResponse()).andReturn(response2);
-    // replay mocks
-    replay(injector, clusters, cluster, service, component1,  component2, response1, response2);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    Set<ServiceComponentResponse> setResponses = controller.getComponents(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(2, setResponses.size());
-    assertTrue(setResponses.contains(response1));
-    assertTrue(setResponses.contains(response2));
-
-    verify(injector, clusters, cluster, service, component1,  component2, response1, response2);
-  }
-
-  @Test
-  public void testGetHosts() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Host host = createNiceMock(Host.class);
-    HostResponse response = createNiceMock(HostResponse.class);
-
-    Set<Cluster> setCluster = Collections.singleton(cluster);
-
-    // requests
-    HostRequest request1 = new HostRequest("host1", "cluster1", Collections.<String, String>emptyMap());
-
-    Set<HostRequest> setRequests = new HashSet<HostRequest>();
-    setRequests.add(request1);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getHosts
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(clusters.getHost("host1")).andReturn(host);
-    expect(host.getHostName()).andReturn("host1").anyTimes();
-    expect(clusters.getClustersForHost("host1")).andReturn(setCluster);
-    expect(host.convertToResponse()).andReturn(response);
-    response.setClusterName("cluster1");
-
-    // replay mocks
-    replay(injector, clusters, cluster, host, response);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    Set<HostResponse> setResponses = controller.getHosts(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(1, setResponses.size());
-    assertTrue(setResponses.contains(response));
-
-    verify(injector, clusters, cluster, host, response);
-  }
-
-  /**
-   * Ensure that HostNotFoundException is propagated in case where there is a single request.
-   */
-  @Test
-  public void testGetHosts___HostNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-
-    // requests
-    HostRequest request1 = new HostRequest("host1", "cluster1", Collections.<String, String>emptyMap());
-    Set<HostRequest> setRequests = Collections.singleton(request1);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getHosts
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(clusters.getHost("host1")).andThrow(new HostNotFoundException("host1"));
-
-    // replay mocks
-    replay(injector, clusters, cluster);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-
-    // assert that exception is thrown in case where there is a single request
-    try {
-      controller.getHosts(setRequests);
-      fail("expected HostNotFoundException");
-    } catch (HostNotFoundException e) {
-      // expected
-    }
-    assertSame(controller, controllerCapture.getValue());
-    verify(injector, clusters, cluster);
-  }
-
-  /**
-   * Ensure that HostNotFoundException is propagated in case where there is a single request.
-   */
-  @Test
-  public void testGetHosts___HostNotFoundException_HostNotAssociatedWithCluster() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Host host = createNiceMock(Host.class);
-
-    // requests
-    HostRequest request1 = new HostRequest("host1", "cluster1", Collections.<String, String>emptyMap());
-    Set<HostRequest> setRequests = Collections.singleton(request1);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getHosts
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(clusters.getHost("host1")).andReturn(host);
-    expect(host.getHostName()).andReturn("host1").anyTimes();
-    // because cluster is not in set will result in HostNotFoundException
-    expect(clusters.getClustersForHost("host1")).andReturn(Collections.<Cluster>emptySet());
-
-    // replay mocks
-    replay(injector, clusters, cluster, host);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-
-    // assert that exception is thrown in case where there is a single request
-    try {
-      controller.getHosts(setRequests);
-      fail("expected HostNotFoundException");
-    } catch (HostNotFoundException e) {
-      // expected
-    }
-    assertSame(controller, controllerCapture.getValue());
-    verify(injector, clusters, cluster, host);
-  }
-
-
-  /**
-   * Ensure that HostNotFoundException is handled where there are multiple requests as would be the
-   * case when an OR predicate is provided in the query.
-   */
-  @Test
-  public void testGetHosts___OR_Predicate_HostNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Host host1 = createNiceMock(Host.class);
-    Host host2 = createNiceMock(Host.class);
-    HostResponse response = createNiceMock(HostResponse.class);
-    HostResponse response2 = createNiceMock(HostResponse.class);
-
-    // requests
-    HostRequest request1 = new HostRequest("host1", "cluster1", Collections.<String, String>emptyMap());
-    HostRequest request2 = new HostRequest("host2", "cluster1", Collections.<String, String>emptyMap());
-    HostRequest request3 = new HostRequest("host3", "cluster1", Collections.<String, String>emptyMap());
-    HostRequest request4 = new HostRequest("host4", "cluster1", Collections.<String, String>emptyMap());
-
-    Set<HostRequest> setRequests = new HashSet<HostRequest>();
-    setRequests.add(request1);
-    setRequests.add(request2);
-    setRequests.add(request3);
-    setRequests.add(request4);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getHosts
-    expect(clusters.getCluster("cluster1")).andReturn(cluster).times(4);
-
-    expect(clusters.getHost("host1")).andReturn(host1);
-    expect(host1.getHostName()).andReturn("host1").anyTimes();
-    expect(clusters.getClustersForHost("host1")).andReturn(Collections.singleton(cluster));
-    expect(host1.convertToResponse()).andReturn(response);
-    response.setClusterName("cluster1");
-
-    expect(clusters.getHost("host2")).andReturn(host2);
-    expect(host2.getHostName()).andReturn("host2").anyTimes();
-    expect(clusters.getClustersForHost("host2")).andReturn(Collections.singleton(cluster));
-    expect(host2.convertToResponse()).andReturn(response2);
-    response2.setClusterName("cluster1");
-
-    expect(clusters.getHost("host3")).andThrow(new HostNotFoundException("host3"));
-    expect(clusters.getHost("host4")).andThrow(new HostNotFoundException("host4"));
-
-    // replay mocks
-    replay(injector, clusters, cluster, host1, host2, response, response2);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    Set<HostResponse> setResponses = controller.getHosts(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(2, setResponses.size());
-    assertTrue(setResponses.contains(response));
-    assertTrue(setResponses.contains(response2));
-
-    verify(injector, clusters, cluster, host1, host2, response, response2);
-  }
-
-  @Test
-  public void testGetHostComponents() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-    StackId stack = createNiceMock(StackId.class);
-    AmbariMetaInfo metaInfo = createStrictMock(AmbariMetaInfo.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Host host = createNiceMock(Host.class);
-    Service service = createNiceMock(Service.class);
-    ServiceComponent component = createNiceMock(ServiceComponent.class);
-    ServiceComponentHost componentHost = createNiceMock(ServiceComponentHost.class);
-    ServiceComponentHostResponse response = createNiceMock(ServiceComponentHostResponse.class);
-
-    // requests
-    ServiceComponentHostRequest request1 = new ServiceComponentHostRequest(
-        "cluster1", null, "component1", "host1", Collections.<String, String>emptyMap(), null);
-
-
-    Set<ServiceComponentHostRequest> setRequests = new HashSet<ServiceComponentHostRequest>();
-    setRequests.add(request1);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getHostComponent
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(clusters.getClustersForHost("host1")).andReturn(Collections.singleton(cluster));
-
-    expect(cluster.getDesiredStackVersion()).andReturn(stack);
-    expect(stack.getStackName()).andReturn("stackName");
-    expect(stack.getStackVersion()).andReturn("stackVersion");
-
-    expect(metaInfo.getComponentToService("stackName", "stackVersion", "component1")).andReturn("service1");
-    expect(cluster.getService("service1")).andReturn(service);
-    expect(service.getServiceComponent("component1")).andReturn(component);
-    expect(component.getName()).andReturn("component1").anyTimes();
-    expect(component.getServiceComponentHost("host1")).andReturn(componentHost);
-    expect(componentHost.convertToResponse()).andReturn(response);
-
-    // replay mocks
-    replay(injector, clusters, cluster, host, response, stack, metaInfo, service, component, componentHost);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    //need to set private field 'ambariMetaInfo' which is injected at runtime
-    Class<?> c = controller.getClass();
-    Field f = c.getDeclaredField("ambariMetaInfo");
-    f.setAccessible(true);
-    f.set(controller, metaInfo);
-
-    Set<ServiceComponentHostResponse> setResponses = controller.getHostComponents(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(1, setResponses.size());
-    assertTrue(setResponses.contains(response));
-
-    verify(injector, clusters, cluster, host, response, stack, metaInfo, service, component, componentHost);
-  }
-
-  @Test
-  public void testGetHostComponents___ServiceComponentHostNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-    StackId stack = createNiceMock(StackId.class);
-    AmbariMetaInfo metaInfo = createStrictMock(AmbariMetaInfo.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Host host = createNiceMock(Host.class);
-    Service service = createNiceMock(Service.class);
-    ServiceComponent component = createNiceMock(ServiceComponent.class);
-
-    // requests
-    ServiceComponentHostRequest request1 = new ServiceComponentHostRequest(
-        "cluster1", null, "component1", "host1", Collections.<String, String>emptyMap(), null);
-
-
-    Set<ServiceComponentHostRequest> setRequests = new HashSet<ServiceComponentHostRequest>();
-    setRequests.add(request1);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getHostComponent
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(clusters.getClustersForHost("host1")).andReturn(Collections.singleton(cluster));
-
-    expect(cluster.getDesiredStackVersion()).andReturn(stack);
-    expect(stack.getStackName()).andReturn("stackName");
-    expect(stack.getStackVersion()).andReturn("stackVersion");
-
-    expect(metaInfo.getComponentToService("stackName", "stackVersion", "component1")).andReturn("service1");
-    expect(cluster.getService("service1")).andReturn(service);
-    expect(service.getServiceComponent("component1")).andReturn(component);
-    expect(component.getName()).andReturn("component1").anyTimes();
-    expect(component.getServiceComponentHost("host1")).andThrow(
-        new ServiceComponentHostNotFoundException("cluster1", "service1", "component1", "host1"));
-
-    // replay mocks
-    replay(injector, clusters, cluster, host, stack, metaInfo, service, component);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    //need to set private field 'ambariMetaInfo' which is injected at runtime
-    Class<?> c = controller.getClass();
-    Field f = c.getDeclaredField("ambariMetaInfo");
-    f.setAccessible(true);
-    f.set(controller, metaInfo);
-
-    try {
-      controller.getHostComponents(setRequests);
-      fail("expected ServiceComponentHostNotFoundException");
-    } catch (ServiceComponentHostNotFoundException e) {
-      //expected
-    }
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    verify(injector, clusters, cluster, host, stack, metaInfo, service, component);
-  }
-
-  @Test
-  public void testGetHostComponents___OR_Predicate_ServiceComponentHostNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-    StackId stack = createNiceMock(StackId.class);
-    AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Host host = createNiceMock(Host.class);
-    Service service = createNiceMock(Service.class);
-    ServiceComponent component = createNiceMock(ServiceComponent.class);
-    ServiceComponent component2 = createNiceMock(ServiceComponent.class);
-    ServiceComponent component3 = createNiceMock(ServiceComponent.class);
-
-    ServiceComponentHost componentHost1 = createNiceMock(ServiceComponentHost.class);
-    ServiceComponentHost componentHost2 = createNiceMock(ServiceComponentHost.class);
-    ServiceComponentHostResponse response1 = createNiceMock(ServiceComponentHostResponse.class);
-    ServiceComponentHostResponse response2 = createNiceMock(ServiceComponentHostResponse.class);
-
-    // requests
-    ServiceComponentHostRequest request1 = new ServiceComponentHostRequest(
-        "cluster1", null, "component1", "host1", Collections.<String, String>emptyMap(), null);
-
-    ServiceComponentHostRequest request2 = new ServiceComponentHostRequest(
-        "cluster1", null, "component2", "host1", Collections.<String, String>emptyMap(), null);
-
-    ServiceComponentHostRequest request3 = new ServiceComponentHostRequest(
-        "cluster1", null, "component3", "host1", Collections.<String, String>emptyMap(), null);
-
-
-    Set<ServiceComponentHostRequest> setRequests = new HashSet<ServiceComponentHostRequest>();
-    setRequests.add(request1);
-    setRequests.add(request2);
-    setRequests.add(request3);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getHostComponent
-    expect(clusters.getCluster("cluster1")).andReturn(cluster).times(3);
-    expect(clusters.getClustersForHost("host1")).andReturn(Collections.singleton(cluster)).anyTimes();
-    expect(cluster.getService("service1")).andReturn(service).times(3);
-
-    expect(cluster.getDesiredStackVersion()).andReturn(stack).anyTimes();
-    expect(stack.getStackName()).andReturn("stackName").anyTimes();
-    expect(stack.getStackVersion()).andReturn("stackVersion").anyTimes();
-
-
-    expect(metaInfo.getComponentToService("stackName", "stackVersion", "component1")).andReturn("service1");
-    expect(service.getServiceComponent("component1")).andReturn(component);
-    expect(component.getName()).andReturn("component1");
-    expect(component.getServiceComponentHost("host1")).andReturn(componentHost1);
-    expect(componentHost1.convertToResponse()).andReturn(response1);
-
-    expect(metaInfo.getComponentToService("stackName", "stackVersion", "component2")).andReturn("service1");
-    expect(service.getServiceComponent("component2")).andReturn(component2);
-    expect(component2.getName()).andReturn("component2");
-    expect(component2.getServiceComponentHost("host1")).andThrow(
-        new ServiceComponentHostNotFoundException("cluster1", "service1", "component2", "host1"));
-
-
-    expect(metaInfo.getComponentToService("stackName", "stackVersion", "component3")).andReturn("service1");
-    expect(service.getServiceComponent("component3")).andReturn(component3);
-    expect(component3.getName()).andReturn("component3");
-    expect(component3.getServiceComponentHost("host1")).andReturn(componentHost2);
-    expect(componentHost2.convertToResponse()).andReturn(response2);
-
-    // replay mocks
-    replay(injector, clusters, cluster, host, stack, metaInfo, service, component, component2, component3,
-        componentHost1, componentHost2, response1, response2);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    //need to set private field 'ambariMetaInfo' which is injected at runtime
-    Class<?> c = controller.getClass();
-    Field f = c.getDeclaredField("ambariMetaInfo");
-    f.setAccessible(true);
-    f.set(controller, metaInfo);
-
-    Set<ServiceComponentHostResponse> setResponses = controller.getHostComponents(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(2, setResponses.size());
-    assertTrue(setResponses.contains(response1));
-    assertTrue(setResponses.contains(response2));
-
-    verify(injector, clusters, cluster, host, stack, metaInfo, service, component, component2, component3,
-        componentHost1, componentHost2, response1, response2);
-  }
-
-  @Test
-  public void testGetHostComponents___OR_Predicate_ServiceNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-    StackId stack = createNiceMock(StackId.class);
-    AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Host host = createNiceMock(Host.class);
-    Service service = createNiceMock(Service.class);
-    ServiceComponent component = createNiceMock(ServiceComponent.class);
-    ServiceComponent component2 = createNiceMock(ServiceComponent.class);
-    ServiceComponent component3 = createNiceMock(ServiceComponent.class);
-
-    ServiceComponentHost componentHost1 = createNiceMock(ServiceComponentHost.class);
-    ServiceComponentHost componentHost2 = createNiceMock(ServiceComponentHost.class);
-    ServiceComponentHostResponse response1 = createNiceMock(ServiceComponentHostResponse.class);
-    ServiceComponentHostResponse response2 = createNiceMock(ServiceComponentHostResponse.class);
-
-    // requests
-    ServiceComponentHostRequest request1 = new ServiceComponentHostRequest(
-        "cluster1", null, "component1", "host1", Collections.<String, String>emptyMap(), null);
-
-    ServiceComponentHostRequest request2 = new ServiceComponentHostRequest(
-        "cluster1", null, "component2", "host1", Collections.<String, String>emptyMap(), null);
-
-    ServiceComponentHostRequest request3 = new ServiceComponentHostRequest(
-        "cluster1", null, "component3", "host1", Collections.<String, String>emptyMap(), null);
-
-
-    Set<ServiceComponentHostRequest> setRequests = new HashSet<ServiceComponentHostRequest>();
-    setRequests.add(request1);
-    setRequests.add(request2);
-    setRequests.add(request3);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getHostComponent
-    expect(clusters.getCluster("cluster1")).andReturn(cluster).times(3);
-    expect(clusters.getClustersForHost("host1")).andReturn(Collections.singleton(cluster)).anyTimes();
-    //expect(cluster.getService("service1")).andReturn(service).times(3);
-
-    expect(cluster.getDesiredStackVersion()).andReturn(stack).anyTimes();
-    expect(stack.getStackName()).andReturn("stackName").anyTimes();
-    expect(stack.getStackVersion()).andReturn("stackVersion").anyTimes();
-
-
-    expect(metaInfo.getComponentToService("stackName", "stackVersion", "component1")).andReturn("service1");
-    expect(cluster.getService("service1")).andReturn(service);
-    expect(service.getServiceComponent("component1")).andReturn(component);
-    expect(component.getName()).andReturn("component1");
-    expect(component.getServiceComponentHost("host1")).andReturn(componentHost1);
-    expect(componentHost1.convertToResponse()).andReturn(response1);
-
-    expect(metaInfo.getComponentToService("stackName", "stackVersion", "component2")).andReturn("service2");
-    expect(cluster.getService("service2")).andThrow(new ServiceNotFoundException("cluster1", "service2"));
-
-    expect(metaInfo.getComponentToService("stackName", "stackVersion", "component3")).andReturn("service1");
-    expect(cluster.getService("service1")).andReturn(service);
-    expect(service.getServiceComponent("component3")).andReturn(component3);
-    expect(component3.getName()).andReturn("component3");
-    expect(component3.getServiceComponentHost("host1")).andReturn(componentHost2);
-    expect(componentHost2.convertToResponse()).andReturn(response2);
-
-    // replay mocks
-    replay(injector, clusters, cluster, host, stack, metaInfo, service, component, component2, component3,
-        componentHost1, componentHost2, response1, response2);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    //need to set private field 'ambariMetaInfo' which is injected at runtime
-    Class<?> c = controller.getClass();
-    Field f = c.getDeclaredField("ambariMetaInfo");
-    f.setAccessible(true);
-    f.set(controller, metaInfo);
-
-    Set<ServiceComponentHostResponse> setResponses = controller.getHostComponents(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(2, setResponses.size());
-    assertTrue(setResponses.contains(response1));
-    assertTrue(setResponses.contains(response2));
-
-    verify(injector, clusters, cluster, host, stack, metaInfo, service, component, component2, component3,
-        componentHost1, componentHost2, response1, response2);
-  }
-
-  @Test
-  public void testGetHostComponents___OR_Predicate_ServiceComponentNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-    StackId stack = createNiceMock(StackId.class);
-    AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Host host = createNiceMock(Host.class);
-    Service service = createNiceMock(Service.class);
-    Service service2 = createNiceMock(Service.class);
-    ServiceComponent component = createNiceMock(ServiceComponent.class);
-    ServiceComponent component2 = createNiceMock(ServiceComponent.class);
-    ServiceComponent component3 = createNiceMock(ServiceComponent.class);
-
-    ServiceComponentHost componentHost1 = createNiceMock(ServiceComponentHost.class);
-    ServiceComponentHost componentHost2 = createNiceMock(ServiceComponentHost.class);
-    ServiceComponentHostResponse response1 = createNiceMock(ServiceComponentHostResponse.class);
-    ServiceComponentHostResponse response2 = createNiceMock(ServiceComponentHostResponse.class);
-
-    // requests
-    ServiceComponentHostRequest request1 = new ServiceComponentHostRequest(
-        "cluster1", null, "component1", "host1", Collections.<String, String>emptyMap(), null);
-
-    ServiceComponentHostRequest request2 = new ServiceComponentHostRequest(
-        "cluster1", null, "component2", "host1", Collections.<String, String>emptyMap(), null);
-
-    ServiceComponentHostRequest request3 = new ServiceComponentHostRequest(
-        "cluster1", null, "component3", "host1", Collections.<String, String>emptyMap(), null);
-
-
-    Set<ServiceComponentHostRequest> setRequests = new HashSet<ServiceComponentHostRequest>();
-    setRequests.add(request1);
-    setRequests.add(request2);
-    setRequests.add(request3);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getHostComponent
-    expect(clusters.getCluster("cluster1")).andReturn(cluster).times(3);
-    expect(clusters.getClustersForHost("host1")).andReturn(Collections.singleton(cluster)).anyTimes();
-
-    expect(cluster.getDesiredStackVersion()).andReturn(stack).anyTimes();
-    expect(stack.getStackName()).andReturn("stackName").anyTimes();
-    expect(stack.getStackVersion()).andReturn("stackVersion").anyTimes();
-
-
-    expect(metaInfo.getComponentToService("stackName", "stackVersion", "component1")).andReturn("service1");
-    expect(cluster.getService("service1")).andReturn(service);
-    expect(service.getServiceComponent("component1")).andReturn(component);
-    expect(component.getName()).andReturn("component1");
-    expect(component.getServiceComponentHost("host1")).andReturn(componentHost1);
-    expect(componentHost1.convertToResponse()).andReturn(response1);
-
-    expect(metaInfo.getComponentToService("stackName", "stackVersion", "component2")).andReturn("service2");
-    expect(cluster.getService("service2")).andReturn(service2);
-    expect(service2.getServiceComponent("component2")).
-        andThrow(new ServiceComponentNotFoundException("cluster1", "service2", "component2"));
-
-    expect(metaInfo.getComponentToService("stackName", "stackVersion", "component3")).andReturn("service1");
-    expect(cluster.getService("service1")).andReturn(service);
-    expect(service.getServiceComponent("component3")).andReturn(component3);
-    expect(component3.getName()).andReturn("component3");
-    expect(component3.getServiceComponentHost("host1")).andReturn(componentHost2);
-    expect(componentHost2.convertToResponse()).andReturn(response2);
-
-    // replay mocks
-    replay(injector, clusters, cluster, host, stack, metaInfo, service, service2, component, component2, component3,
-        componentHost1, componentHost2, response1, response2);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    //need to set private field 'ambariMetaInfo' which is injected at runtime
-    Class<?> c = controller.getClass();
-    Field f = c.getDeclaredField("ambariMetaInfo");
-    f.setAccessible(true);
-    f.set(controller, metaInfo);
-
-    Set<ServiceComponentHostResponse> setResponses = controller.getHostComponents(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(2, setResponses.size());
-    assertTrue(setResponses.contains(response1));
-    assertTrue(setResponses.contains(response2));
-
-    verify(injector, clusters, cluster, host, stack, metaInfo, service, service2, component, component2, component3,
-        componentHost1, componentHost2, response1, response2);
-  }
-
-  @Test
-  public void testGetHostComponents___OR_Predicate_HostNotFoundException_hostProvidedInQuery() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-    StackId stack = createNiceMock(StackId.class);
-    AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Host host = createNiceMock(Host.class);
-    Service service = createNiceMock(Service.class);
-    Service service2 = createNiceMock(Service.class);
-    ServiceComponent component = createNiceMock(ServiceComponent.class);
-    ServiceComponent component2 = createNiceMock(ServiceComponent.class);
-    ServiceComponent component3 = createNiceMock(ServiceComponent.class);
-
-    ServiceComponentHost componentHost1 = createNiceMock(ServiceComponentHost.class);
-    ServiceComponentHost componentHost2 = createNiceMock(ServiceComponentHost.class);
-    ServiceComponentHostResponse response1 = createNiceMock(ServiceComponentHostResponse.class);
-    ServiceComponentHostResponse response2 = createNiceMock(ServiceComponentHostResponse.class);
-
-    // requests
-    ServiceComponentHostRequest request1 = new ServiceComponentHostRequest(
-        "cluster1", null, "component1", null, Collections.<String, String>emptyMap(), null);
-
-    ServiceComponentHostRequest request2 = new ServiceComponentHostRequest(
-        "cluster1", null, "component2", "host2", Collections.<String, String>emptyMap(), null);
-
-    ServiceComponentHostRequest request3 = new ServiceComponentHostRequest(
-        "cluster1", null, "component3", null, Collections.<String, String>emptyMap(), null);
-
-
-    Set<ServiceComponentHostRequest> setRequests = new HashSet<ServiceComponentHostRequest>();
-    setRequests.add(request1);
-    setRequests.add(request2);
-    setRequests.add(request3);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getHostComponent
-    expect(clusters.getCluster("cluster1")).andReturn(cluster).times(3);
-    //expect(clusters.getClustersForHost("host1")).andReturn(Collections.singleton(cluster)).anyTimes();
-
-    expect(cluster.getDesiredStackVersion()).andReturn(stack).anyTimes();
-    expect(stack.getStackName()).andReturn("stackName").anyTimes();
-    expect(stack.getStackVersion()).andReturn("stackVersion").anyTimes();
-
-    expect(metaInfo.getComponentToService("stackName", "stackVersion", "component1")).andReturn("service1");
-    expect(cluster.getService("service1")).andReturn(service);
-    expect(service.getServiceComponent("component1")).andReturn(component);
-    expect(component.getName()).andReturn("component1");
-    expect(component.getServiceComponentHosts()).andReturn(Collections.singletonMap("foo", componentHost1));
-    expect(componentHost1.convertToResponse()).andReturn(response1);
-
-    expect(clusters.getClustersForHost("host2")).andThrow(new HostNotFoundException("host2"));
-
-    expect(metaInfo.getComponentToService("stackName", "stackVersion", "component3")).andReturn("service1");
-    expect(cluster.getService("service1")).andReturn(service);
-    expect(service.getServiceComponent("component3")).andReturn(component3);
-    expect(component3.getName()).andReturn("component3");
-    expect(component3.getServiceComponentHosts()).andReturn(Collections.singletonMap("foo", componentHost2));
-    expect(componentHost2.convertToResponse()).andReturn(response2);
-
-    // replay mocks
-    replay(injector, clusters, cluster, host, stack, metaInfo, service, service2, component, component2, component3,
-        componentHost1, componentHost2, response1, response2);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    //need to set private field 'ambariMetaInfo' which is injected at runtime
-    Class<?> c = controller.getClass();
-    Field f = c.getDeclaredField("ambariMetaInfo");
-    f.setAccessible(true);
-    f.set(controller, metaInfo);
-
-    Set<ServiceComponentHostResponse> setResponses = controller.getHostComponents(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(2, setResponses.size());
-    assertTrue(setResponses.contains(response1));
-    assertTrue(setResponses.contains(response2));
-
-    verify(injector, clusters, cluster, host, stack, metaInfo, service, service2, component, component2, component3,
-        componentHost1, componentHost2, response1, response2);
-  }
-
-  @Test
-  public void testGetHostComponents___OR_Predicate_HostNotFoundException_hostProvidedInURL() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-    StackId stack = createNiceMock(StackId.class);
-    AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-
-    // requests
-    ServiceComponentHostRequest request1 = new ServiceComponentHostRequest(
-        "cluster1", null, "component1", "host1", Collections.<String, String>emptyMap(), null);
-
-    ServiceComponentHostRequest request2 = new ServiceComponentHostRequest(
-        "cluster1", null, "component2", "host1", Collections.<String, String>emptyMap(), null);
-
-    ServiceComponentHostRequest request3 = new ServiceComponentHostRequest(
-        "cluster1", null, "component3", "host1", Collections.<String, String>emptyMap(), null);
-
-
-    Set<ServiceComponentHostRequest> setRequests = new HashSet<ServiceComponentHostRequest>();
-    setRequests.add(request1);
-    setRequests.add(request2);
-    setRequests.add(request3);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getHostComponent
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(clusters.getClustersForHost("host1")).andThrow(new HostNotFoundException("host1"));
-
-    // replay mocks
-    replay(injector, clusters, cluster, stack, metaInfo);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    //need to set private field 'ambariMetaInfo' which is injected at runtime
-    Class<?> c = controller.getClass();
-    Field f = c.getDeclaredField("ambariMetaInfo");
-    f.setAccessible(true);
-    f.set(controller, metaInfo);
-
-    try {
-      controller.getHostComponents(setRequests);
-      fail("expected exception");
-    } catch (AmbariException e) {
-      // expected
-    }
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-
-    verify(injector, clusters, cluster, stack, metaInfo);
-  }
-
-  @Test
-  public void testGetHostComponents___OR_Predicate_ClusterNotFoundException() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-    StackId stack = createNiceMock(StackId.class);
-    AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class);
-
-    // requests
-    ServiceComponentHostRequest request1 = new ServiceComponentHostRequest(
-        "cluster1", null, "component1", "host1", Collections.<String, String>emptyMap(), null);
-
-    ServiceComponentHostRequest request2 = new ServiceComponentHostRequest(
-        "cluster1", null, "component2", "host2", Collections.<String, String>emptyMap(), null);
-
-    ServiceComponentHostRequest request3 = new ServiceComponentHostRequest(
-        "cluster1", null, "component3", "host1", Collections.<String, String>emptyMap(), null);
-
-
-    Set<ServiceComponentHostRequest> setRequests = new HashSet<ServiceComponentHostRequest>();
-    setRequests.add(request1);
-    setRequests.add(request2);
-    setRequests.add(request3);
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getHostComponent
-    expect(clusters.getCluster("cluster1")).andThrow(new ClusterNotFoundException("cluster1"));
-
-    // replay mocks
-    replay(injector, clusters, stack, metaInfo);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    //need to set private field 'ambariMetaInfo' which is injected at runtime
-    Class<?> c = controller.getClass();
-    Field f = c.getDeclaredField("ambariMetaInfo");
-    f.setAccessible(true);
-    f.set(controller, metaInfo);
-
-    try {
-      controller.getHostComponents(setRequests);
-      fail("expected exception");
-    } catch (ParentObjectNotFoundException e) {
-      //expected
-    }
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-
-    verify(injector, clusters,stack, metaInfo);
-  }
-
-  @Test
-  public void testGetHostComponents___NullHostName() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-    StackId stack = createNiceMock(StackId.class);
-    AmbariMetaInfo metaInfo = createStrictMock(AmbariMetaInfo.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Service service = createNiceMock(Service.class);
-    ServiceComponent component = createNiceMock(ServiceComponent.class);
-    ServiceComponentHost componentHost1 = createNiceMock(ServiceComponentHost.class);
-    ServiceComponentHost componentHost2 = createNiceMock(ServiceComponentHost.class);
-    ServiceComponentHostResponse response1 = createNiceMock(ServiceComponentHostResponse.class);
-    ServiceComponentHostResponse response2 = createNiceMock(ServiceComponentHostResponse.class);
-
-    // requests
-    ServiceComponentHostRequest request1 = new ServiceComponentHostRequest(
-        "cluster1", null, "component1", null, Collections.<String, String>emptyMap(), null);
-
-
-    Set<ServiceComponentHostRequest> setRequests = new HashSet<ServiceComponentHostRequest>();
-    setRequests.add(request1);
-
-    Map<String, ServiceComponentHost> mapHostComponents = new HashMap<String, ServiceComponentHost>();
-    mapHostComponents.put("foo", componentHost1);
-    mapHostComponents.put("bar", componentHost2);
-
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getHostComponent
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-
-    expect(cluster.getDesiredStackVersion()).andReturn(stack);
-    expect(stack.getStackName()).andReturn("stackName");
-    expect(stack.getStackVersion()).andReturn("stackVersion");
-
-    expect(metaInfo.getComponentToService("stackName", "stackVersion", "component1")).andReturn("service1");
-    expect(cluster.getService("service1")).andReturn(service);
-    expect(service.getServiceComponent("component1")).andReturn(component);
-    expect(component.getName()).andReturn("component1").anyTimes();
-
-    expect(component.getServiceComponentHosts()).andReturn(mapHostComponents);
-    expect(componentHost1.convertToResponse()).andReturn(response1);
-    expect(componentHost2.convertToResponse()).andReturn(response2);
-
-    // replay mocks
-    replay(injector, clusters, cluster, response1, response2, stack, metaInfo, service, component, componentHost1, componentHost2);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    //need to set private field 'ambariMetaInfo' which is injected at runtime
-    Class<?> c = controller.getClass();
-    Field f = c.getDeclaredField("ambariMetaInfo");
-    f.setAccessible(true);
-    f.set(controller, metaInfo);
-
-    Set<ServiceComponentHostResponse> setResponses = controller.getHostComponents(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(2, setResponses.size());
-    assertTrue(setResponses.contains(response1));
-    assertTrue(setResponses.contains(response2));
-
-    verify(injector, clusters, cluster, response1, response2, stack, metaInfo, service, component, componentHost1, componentHost2);
-  }
-
-  @Test
-  public void testGetHostComponents___NullHostName_NullComponentName() throws Exception {
-    // member state mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = new Capture<AmbariManagementController>();
-    Clusters clusters = createNiceMock(Clusters.class);
-    StackId stack = createNiceMock(StackId.class);
-    AmbariMetaInfo metaInfo = createStrictMock(AmbariMetaInfo.class);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    Service service1 = createNiceMock(Service.class);
-    Service service2 = createNiceMock(Service.class);
-    ServiceComponent component1 = createNiceMock(ServiceComponent.class);
-    ServiceComponent component2 = createNiceMock(ServiceComponent.class);
-    ServiceComponentHost componentHost1 = createNiceMock(ServiceComponentHost.class);
-    ServiceComponentHost componentHost2 = createNiceMock(ServiceComponentHost.class);
-    ServiceComponentHost componentHost3 = createNiceMock(ServiceComponentHost.class);
-    ServiceComponentHostResponse response1 = createNiceMock(ServiceComponentHostResponse.class);
-    ServiceComponentHostResponse response2 = createNiceMock(ServiceComponentHostResponse.class);
-    ServiceComponentHostResponse response3 = createNiceMock(ServiceComponentHostResponse.class);
-
-    // requests
-    ServiceComponentHostRequest request1 = new ServiceComponentHostRequest(
-        "cluster1", null, null, null, Collections.<String, String>emptyMap(), null);
-
-
-    Set<ServiceComponentHostRequest> setRequests = new HashSet<ServiceComponentHostRequest>();
-    setRequests.add(request1);
-
-    Map<String, Service> mapServices = new HashMap<String, Service>();
-    mapServices.put("foo", service1);
-    mapServices.put("bar", service2);
-
-    Map<String, ServiceComponentHost> mapHostComponents = new HashMap<String, ServiceComponentHost>();
-    mapHostComponents.put("foo", componentHost1);
-    mapHostComponents.put("bar", componentHost2);
-
-
-    // expectations
-    // constructor init
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-
-    // getHostComponent
-    expect(clusters.getCluster("cluster1")).andReturn(cluster);
-
-    expect(cluster.getServices()).andReturn(mapServices);
-    expect(service1.getServiceComponents()).andReturn(Collections.singletonMap("foo", component1));
-    expect(service2.getServiceComponents()).andReturn(Collections.singletonMap("bar", component2));
-
-    expect(component1.getName()).andReturn("component1").anyTimes();
-    expect(component2.getName()).andReturn("component2").anyTimes();
-
-    expect(component1.getServiceComponentHosts()).andReturn(mapHostComponents);
-    expect(componentHost1.convertToResponse()).andReturn(response1);
-    expect(componentHost2.convertToResponse()).andReturn(response2);
-
-    expect(component2.getServiceComponentHosts()).andReturn(Collections.singletonMap("foobar", componentHost3));
-    expect(componentHost3.convertToResponse()).andReturn(response3);
-
-    // replay mocks
-    replay(injector, clusters, cluster, response1, response2, response3, stack, metaInfo, service1, service2,
-        component1, component2, componentHost1, componentHost2, componentHost3);
-
-    //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
-    //need to set private field 'ambariMetaInfo' which is injected at runtime
-    Class<?> c = controller.getClass();
-    Field f = c.getDeclaredField("ambariMetaInfo");
-    f.setAccessible(true);
-    f.set(controller, metaInfo);
-
-    Set<ServiceComponentHostResponse> setResponses = controller.getHostComponents(setRequests);
-
-    // assert and verify
-    assertSame(controller, controllerCapture.getValue());
-    assertEquals(3, setResponses.size());
-    assertTrue(setResponses.contains(response1));
-    assertTrue(setResponses.contains(response2));
-    assertTrue(setResponses.contains(response3));
-
-    verify(injector, clusters, cluster, response1, response2, response3, stack, metaInfo, service1, service2,
-        component1, component2, componentHost1, componentHost2, componentHost3);
-  }
-
-  //todo other resources
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
deleted file mode 100644
index b10cd9f..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ /dev/null
@@ -1,3963 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import junit.framework.Assert;
-
-import org.apache.ambari.server.*;
-import org.apache.ambari.server.actionmanager.*;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.RoleDAO;
-import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
-import org.apache.ambari.server.orm.entities.RoleEntity;
-import org.apache.ambari.server.security.authorization.Users;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigImpl;
-import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.ServiceComponent;
-import org.apache.ambari.server.state.ServiceComponentFactory;
-import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.state.ServiceComponentHostFactory;
-import org.apache.ambari.server.state.ServiceFactory;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.State;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
-import org.apache.ambari.server.utils.StageUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import javax.persistence.EntityManager;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-
-import javax.xml.bind.JAXBException;
-
-public class AmbariManagementControllerTest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(AmbariManagementControllerTest.class);
-
-  private AmbariManagementController controller;
-  private Clusters clusters;
-  private ActionDBAccessor actionDB;
-  private Injector injector;
-  private ServiceFactory serviceFactory;
-  private ServiceComponentFactory serviceComponentFactory;
-  private ServiceComponentHostFactory serviceComponentHostFactory;
-  private AmbariMetaInfo ambariMetaInfo;
-  private Users users;
-  private EntityManager entityManager;
-
-  @Before
-  public void setup() throws Exception {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    entityManager = injector.getInstance(EntityManager.class);
-    clusters = injector.getInstance(Clusters.class);
-    actionDB = injector.getInstance(ActionDBAccessor.class);
-    controller = injector.getInstance(AmbariManagementController.class);
-    serviceFactory = injector.getInstance(ServiceFactory.class);
-    serviceComponentFactory = injector.getInstance(
-        ServiceComponentFactory.class);
-    serviceComponentHostFactory = injector.getInstance(
-        ServiceComponentHostFactory.class);
-    ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
-    ambariMetaInfo.init();
-    users = injector.getInstance(Users.class);
-  }
-
-  @After
-  public void teardown() {
-    injector.getInstance(PersistService.class).stop();
-    actionDB = null;
-  }
-
-  private void createCluster(String clusterName) throws AmbariException {
-    ClusterRequest r = new ClusterRequest(null, clusterName, "HDP-0.1", null);
-    controller.createCluster(r);
-  }
-
-  private void createService(String clusterName,
-      String serviceName, State desiredState) throws AmbariException {
-    String dStateStr = null;
-    if (desiredState != null) {
-      dStateStr = desiredState.toString();
-    }
-    ServiceRequest r1 = new ServiceRequest(clusterName, serviceName, null,
-        dStateStr);
-    Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
-    requests.add(r1);
-    controller.createServices(requests);
-  }
-
-  private void createServiceComponent(String clusterName,
-      String serviceName, String componentName, State desiredState)
-          throws AmbariException {
-    String dStateStr = null;
-    if (desiredState != null) {
-      dStateStr = desiredState.toString();
-    }
-    ServiceComponentRequest r = new ServiceComponentRequest(clusterName,
-        serviceName, componentName, null, dStateStr);
-    Set<ServiceComponentRequest> requests =
-        new HashSet<ServiceComponentRequest>();
-    requests.add(r);
-    controller.createComponents(requests);
-  }
-
-  private void createServiceComponentHost(String clusterName,
-      String serviceName, String componentName, String hostname,
-      State desiredState) throws AmbariException {
-    String dStateStr = null;
-    if (desiredState != null) {
-      dStateStr = desiredState.toString();
-    }
-    ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName,
-        serviceName, componentName, hostname, null, dStateStr);
-    Set<ServiceComponentHostRequest> requests =
-        new HashSet<ServiceComponentHostRequest>();
-    requests.add(r);
-    controller.createHostComponents(requests);
-  }
-
-  @Test
-  public void testCreateClusterSimple() throws AmbariException {
-    String clusterName = "foo1";
-    createCluster(clusterName);
-    Set<ClusterResponse> r =
-        controller.getClusters(Collections.singleton(
-            new ClusterRequest(null, clusterName, null, null)));
-    Assert.assertEquals(1, r.size());
-    ClusterResponse c = r.iterator().next();
-    Assert.assertEquals(clusterName, c.getClusterName());
-
-    try {
-      createCluster(clusterName);
-      fail("Duplicate cluster creation should fail");
-    } catch (AmbariException e) {
-      // Expected
-    }
-  }
-
-  @Test
-  public void testCreateClusterWithInvalidStack() {
-    // TODO implement test after meta data integration
-  }
-
-  @Test
-  public void testCreateClusterWithHostMapping() throws AmbariException {
-    Set<String> hostNames = new HashSet<String>();
-    hostNames.add("h1");
-    hostNames.add("h2");
-    ClusterRequest r = new ClusterRequest(null, "c1", "HDP-0.1", hostNames);
-
-    try {
-      controller.createCluster(r);
-      fail("Expected create cluster to fail for invalid hosts");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    try {
-      clusters.getCluster("c1");
-      fail("Expected to fail for non created cluster");
-    } catch (ClusterNotFoundException e) {
-      // Expected
-    }
-
-    clusters.addHost("h1");
-    clusters.addHost("h2");
-    clusters.getHost("h1").setOsType("redhat6");
-    clusters.getHost("h2").setOsType("redhat6");
-    clusters.getHost("h1").persist();
-    clusters.getHost("h2").persist();
-
-    controller.createCluster(r);
-    Assert.assertNotNull(clusters.getCluster("c1"));
-  }
-
-  @Test
-  public void testCreateClusterWithDesiredClusterConfigs() {
-    // TODO implement after configs integration
-  }
-
-  @Test
-  public void testCreateClusterWithInvalidRequest() {
-    ClusterRequest r = new ClusterRequest(null, null, null, null);
-    r.toString();
-
-    try {
-      controller.createCluster(r);
-      fail("Expected create cluster for invalid request");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    r.setClusterId(new Long(1));
-    try {
-      controller.createCluster(r);
-      fail("Expected create cluster for invalid request");
-    } catch (Exception e) {
-      // Expected
-    }
-    r.setClusterId(null);
-
-    r.setClusterName("foo");
-    try {
-      controller.createCluster(r);
-     fail("Expected create cluster for invalid request - no stack version");
-    } catch (Exception e) {
-      // Expected
-    }
-  }
-
-  @Test
-  public void testCreateServicesSimple() throws AmbariException {
-    String clusterName = "foo1";
-    createCluster(clusterName);
-    String serviceName = "HDFS";
-    clusters.getCluster("foo1").setDesiredStackVersion(
-        new StackId("HDP-0.1"));
-    createService(clusterName, serviceName, State.INIT);
-
-    Service s =
-        clusters.getCluster(clusterName).getService(serviceName);
-    Assert.assertNotNull(s);
-    Assert.assertEquals(serviceName, s.getName());
-    Assert.assertEquals(clusterName, s.getCluster().getClusterName());
-
-    ServiceRequest req = new ServiceRequest(clusterName, serviceName,
-        null, null);
-
-    Set<ServiceResponse> r =
-        controller.getServices(Collections.singleton(req));
-    Assert.assertEquals(1, r.size());
-    ServiceResponse resp = r.iterator().next();
-    Assert.assertEquals(serviceName, resp.getServiceName());
-    Assert.assertEquals(clusterName, resp.getClusterName());
-    Assert.assertEquals(State.INIT.toString(),
-        resp.getDesiredState());
-    Assert.assertEquals("HDP-0.1", resp.getDesiredStackVersion());
-
-    // TODO test resp.getConfigVersions()
-  }
-
-  @Test
-  public void testCreateServicesWithInvalidRequest() throws AmbariException {
-    // invalid request
-    // dups in requests
-    // multi cluster updates
-
-    Set<ServiceRequest> set1 = new HashSet<ServiceRequest>();
-
-    try {
-      set1.clear();
-      ServiceRequest rInvalid = new ServiceRequest(null, null, null, null);
-      set1.add(rInvalid);
-      controller.createServices(set1);
-      fail("Expected failure for invalid requests");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    try {
-      set1.clear();
-      ServiceRequest rInvalid = new ServiceRequest("foo", null, null, null);
-      set1.add(rInvalid);
-      controller.createServices(set1);
-      fail("Expected failure for invalid requests");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    try {
-      set1.clear();
-      ServiceRequest rInvalid = new ServiceRequest("foo", "bar", null, null);
-      set1.add(rInvalid);
-      controller.createServices(set1);
-      fail("Expected failure for invalid cluster");
-    } catch (ParentObjectNotFoundException e) {
-      // Expected
-    }
-
-    clusters.addCluster("foo");
-    clusters.addCluster("bar");
-    clusters.getCluster("foo").setDesiredStackVersion(new StackId("HDP-0.1"));
-    clusters.getCluster("bar").setDesiredStackVersion(new StackId("HDP-0.1"));
-
-    try {
-      set1.clear();
-      ServiceRequest valid1 = new ServiceRequest("foo", "HDFS", null, null);
-      ServiceRequest valid2 = new ServiceRequest("foo", "HDFS", null, null);
-      set1.add(valid1);
-      set1.add(valid2);
-      controller.createServices(set1);
-      fail("Expected failure for invalid requests");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    try {
-      set1.clear();
-      ServiceRequest valid1 = new ServiceRequest("foo", "bar", null, null);
-      set1.add(valid1);
-      controller.createServices(set1);
-      fail("Expected failure for invalid service");
-    } catch (Exception e) {
-      // Expected
-    }
-
-
-    try {
-      set1.clear();
-      ServiceRequest valid1 = new ServiceRequest("foo", "HDFS", null, null);
-      ServiceRequest valid2 = new ServiceRequest("bar", "HDFS", null, null);
-      set1.add(valid1);
-      set1.add(valid2);
-      controller.createServices(set1);
-      fail("Expected failure for multiple clusters");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    Assert.assertNotNull(clusters.getCluster("foo"));
-    Assert.assertEquals(0, clusters.getCluster("foo").getServices().size());
-
-    set1.clear();
-    ServiceRequest valid = new ServiceRequest("foo", "HDFS", null, null);
-    set1.add(valid);
-    controller.createServices(set1);
-
-    try {
-      set1.clear();
-      ServiceRequest valid1 = new ServiceRequest("foo", "HDFS", null, null);
-      ServiceRequest valid2 = new ServiceRequest("foo", "HDFS", null, null);
-      set1.add(valid1);
-      set1.add(valid2);
-      controller.createServices(set1);
-      fail("Expected failure for existing service");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    Assert.assertEquals(1, clusters.getCluster("foo").getServices().size());
-
-  }
-
-  @Test
-  public void testCreateServiceWithInvalidInfo() throws AmbariException {
-    String clusterName = "foo1";
-    createCluster(clusterName);
-    String serviceName = "HDFS";
-    try {
-      createService(clusterName, serviceName, State.INSTALLING);
-      fail("Service creation should fail for invalid state");
-    } catch (Exception e) {
-      // Expected
-    }
-    try {
-      clusters.getCluster(clusterName).getService(serviceName);
-      fail("Service creation should have failed");
-    } catch (Exception e) {
-      // Expected
-    }
-    try {
-      createService(clusterName, serviceName, State.INSTALLED);
-      fail("Service creation should fail for invalid initial state");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    createService(clusterName, serviceName, null);
-
-    String serviceName2 = "MAPREDUCE";
-    createService(clusterName, serviceName2, State.INIT);
-
-    ServiceRequest r = new ServiceRequest(clusterName, null, null, null);
-    Set<ServiceResponse> response = controller.getServices(Collections.singleton(r));
-    Assert.assertEquals(2, response.size());
-
-    for (ServiceResponse svc : response) {
-      Assert.assertTrue(svc.getServiceName().equals(serviceName)
-          || svc.getServiceName().equals(serviceName2));
-      Assert.assertEquals("HDP-0.1", svc.getDesiredStackVersion());
-      Assert.assertEquals(State.INIT.toString(), svc.getDesiredState());
-    }
-  }
-
-  @Test
-  public void testCreateServicesMultiple() throws AmbariException {
-    Set<ServiceRequest> set1 = new HashSet<ServiceRequest>();
-    clusters.addCluster("foo");
-    clusters.getCluster("foo").setDesiredStackVersion(new StackId("HDP-0.1"));
-
-    ServiceRequest valid1 = new ServiceRequest("foo", "HDFS", null, null);
-    ServiceRequest valid2 = new ServiceRequest("foo", "MAPREDUCE", null, null);
-    set1.add(valid1);
-    set1.add(valid2);
-    controller.createServices(set1);
-
-    try {
-      valid1 = new ServiceRequest("foo", "bar", null, null);
-      valid2 = new ServiceRequest("foo", "MAPREDUCE", null, null);
-      set1.add(valid1);
-      set1.add(valid2);
-      controller.createServices(set1);
-      fail("Expected failure for invalid services");
-    } catch (IllegalArgumentException e) {
-      // Expected
-    }
-
-    Assert.assertNotNull(clusters.getCluster("foo"));
-    Assert.assertEquals(2, clusters.getCluster("foo").getServices().size());
-    Assert.assertNotNull(clusters.getCluster("foo").getService("HDFS"));
-    Assert.assertNotNull(clusters.getCluster("foo").getService("MAPREDUCE"));
-  }
-
-  @Test
-  public void testCreateServiceComponentSimple() throws AmbariException {
-    String clusterName = "foo1";
-    createCluster(clusterName);
-    String serviceName = "HDFS";
-    createService(clusterName, serviceName, null);
-
-    String componentName = "NAMENODE";
-    try {
-      createServiceComponent(clusterName, serviceName, componentName,
-          State.INSTALLING);
-      fail("ServiceComponent creation should fail for invalid state");
-    } catch (Exception e) {
-      // Expected
-    }
-    try {
-      clusters.getCluster(clusterName).getService(serviceName)
-          .getServiceComponent(componentName);
-      fail("ServiceComponent creation should have failed");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    createServiceComponent(clusterName, serviceName, componentName,
-        State.INIT);
-    Assert.assertNotNull(clusters.getCluster(clusterName)
-        .getService(serviceName).getServiceComponent(componentName));
-
-    ServiceComponentRequest r =
-        new ServiceComponentRequest(clusterName, serviceName, null, null, null);
-    Set<ServiceComponentResponse> response = controller.getComponents(Collections.singleton(r));
-    Assert.assertEquals(1, response.size());
-
-    ServiceComponentResponse sc = response.iterator().next();
-    Assert.assertEquals(State.INIT.toString(), sc.getDesiredState());
-    Assert.assertEquals(componentName, sc.getComponentName());
-    Assert.assertEquals(clusterName, sc.getClusterName());
-    Assert.assertEquals(serviceName, sc.getServiceName());
-  }
-
-  @Test
-  public void testCreateServiceComponentWithInvalidRequest()
-      throws AmbariException {
-    // multiple clusters
-    // dup objects
-    // existing components
-    // invalid request params
-    // invalid service
-    // invalid cluster
-
-    Set<ServiceComponentRequest> set1 = new HashSet<ServiceComponentRequest>();
-
-    try {
-      set1.clear();
-      ServiceComponentRequest rInvalid =
-          new ServiceComponentRequest(null, null, null, null, null);
-      set1.add(rInvalid);
-      controller.createComponents(set1);
-      fail("Expected failure for invalid requests");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    try {
-      set1.clear();
-      ServiceComponentRequest rInvalid =
-          new ServiceComponentRequest("c1", null, null, null, null);
-      set1.add(rInvalid);
-      controller.createComponents(set1);
-      fail("Expected failure for invalid requests");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    try {
-      set1.clear();
-      ServiceComponentRequest rInvalid =
-          new ServiceComponentRequest("c1", "s1", null, null, null);
-      set1.add(rInvalid);
-      controller.createComponents(set1);
-      fail("Expected failure for invalid requests");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    try {
-      set1.clear();
-      ServiceComponentRequest rInvalid =
-          new ServiceComponentRequest("c1", "s1", "sc1", null, null);
-      set1.add(rInvalid);
-      controller.createComponents(set1);
-      fail("Expected failure for invalid cluster");
-    } catch (ParentObjectNotFoundException e) {
-      // Expected
-    }
-
-    clusters.addCluster("c1");
-    clusters.addCluster("c2");
-
-
-    try {
-      set1.clear();
-      ServiceComponentRequest rInvalid =
-          new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null, null);
-      set1.add(rInvalid);
-      controller.createComponents(set1);
-      fail("Expected failure for invalid service");
-    } catch (ParentObjectNotFoundException e) {
-      // Expected
-    }
-
-    Cluster c1 = clusters.getCluster("c1");
-    c1.setDesiredStackVersion(new StackId("HDP-0.1"));
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
-    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
-    c1.addService(s1);
-    c1.addService(s2);
-    s1.persist();
-    s2.persist();
-
-    set1.clear();
-    ServiceComponentRequest valid1 =
-        new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null, null);
-    ServiceComponentRequest valid2 =
-        new ServiceComponentRequest("c1", "MAPREDUCE", "JOBTRACKER", null, null);
-    ServiceComponentRequest valid3 =
-        new ServiceComponentRequest("c1", "MAPREDUCE", "TASKTRACKER", null,
-            null);
-    set1.add(valid1);
-    set1.add(valid2);
-    set1.add(valid3);
-    controller.createComponents(set1);
-
-    try {
-      set1.clear();
-      ServiceComponentRequest rInvalid1 =
-          new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null, null);
-      ServiceComponentRequest rInvalid2 =
-          new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null, null);
-      set1.add(rInvalid1);
-      set1.add(rInvalid2);
-      controller.createComponents(set1);
-      fail("Expected failure for dups in requests");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    try {
-      set1.clear();
-      ServiceComponentRequest rInvalid1 =
-          new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null, null);
-      ServiceComponentRequest rInvalid2 =
-          new ServiceComponentRequest("c2", "HDFS", "HDFS_CLIENT", null, null);
-      set1.add(rInvalid1);
-      set1.add(rInvalid2);
-      controller.createComponents(set1);
-      fail("Expected failure for multiple clusters");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    try {
-      set1.clear();
-      ServiceComponentRequest rInvalid =
-          new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null, null);
-      set1.add(rInvalid);
-      controller.createComponents(set1);
-      fail("Expected failure for already existing component");
-    } catch (Exception e) {
-      // Expected
-    }
-
-
-    Assert.assertEquals(1, s1.getServiceComponents().size());
-    Assert.assertNotNull(s1.getServiceComponent("NAMENODE"));
-    Assert.assertEquals(2, s2.getServiceComponents().size());
-    Assert.assertNotNull(s2.getServiceComponent("JOBTRACKER"));
-    Assert.assertNotNull(s2.getServiceComponent("TASKTRACKER"));
-
-  }
-
-
-  @Test
-  public void testCreateServiceComponentWithConfigs() {
-    // FIXME after config impl
-  }
-
-  @Test
-  public void testCreateServiceComponentMultiple() throws AmbariException {
-    clusters.addCluster("c1");
-    clusters.addCluster("c2");
-
-    Cluster c1 = clusters.getCluster("c1");
-    c1.setDesiredStackVersion(new StackId("HDP-0.2"));
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
-    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
-    c1.addService(s1);
-    c1.addService(s2);
-    s1.persist();
-    s2.persist();
-
-    Set<ServiceComponentRequest> set1 = new HashSet<ServiceComponentRequest>();
-    ServiceComponentRequest valid1 =
-        new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null, null);
-    ServiceComponentRequest valid2 =
-        new ServiceComponentRequest("c1", "MAPREDUCE", "JOBTRACKER", null, null);
-    ServiceComponentRequest valid3 =
-        new ServiceComponentRequest("c1", "MAPREDUCE", "TASKTRACKER", null,
-            null);
-    set1.add(valid1);
-    set1.add(valid2);
-    set1.add(valid3);
-    controller.createComponents(set1);
-
-    Assert.assertEquals(1, c1.getService("HDFS").getServiceComponents().size());
-    Assert.assertEquals(2, c1.getService("MAPREDUCE").getServiceComponents().size());
-    Assert.assertNotNull(c1.getService("HDFS")
-        .getServiceComponent("NAMENODE"));
-    Assert.assertNotNull(c1.getService("MAPREDUCE")
-        .getServiceComponent("JOBTRACKER"));
-    Assert.assertNotNull(c1.getService("MAPREDUCE")
-        .getServiceComponent("TASKTRACKER"));
-  }
-
-  @Test
-  public void testCreateServiceComponentHostSimple() throws AmbariException {
-    String clusterName = "foo1";
-    createCluster(clusterName);
-    clusters.getCluster(clusterName)
-        .setDesiredStackVersion(new StackId("HDP-0.1"));
-    String serviceName = "HDFS";
-    createService(clusterName, serviceName, null);
-    String componentName1 = "NAMENODE";
-    String componentName2 = "DATANODE";
-    String componentName3 = "HDFS_CLIENT";
-    createServiceComponent(clusterName, serviceName, componentName1,
-        State.INIT);
-    createServiceComponent(clusterName, serviceName, componentName2,
-        State.INIT);
-    createServiceComponent(clusterName, serviceName, componentName3,
-        State.INIT);
-
-    String host1 = "h1";
-    clusters.addHost(host1);
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    String host2 = "h2";
-    clusters.addHost(host2);
-    clusters.getHost("h2").setOsType("centos5");
-    clusters.getHost("h2").persist();
-
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h2").setOsType("centos6");
-
-    try {
-      createServiceComponentHost(clusterName, serviceName, componentName1,
-          host1, State.INIT);
-      fail("ServiceComponentHost creation should fail for invalid host"
-          + " as host not mapped to cluster");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    clusters.mapHostToCluster(host1, clusterName);
-    clusters.mapHostToCluster(host2, clusterName);
-
-    try {
-      createServiceComponentHost(clusterName, serviceName, componentName1,
-          host1, State.INSTALLING);
-      fail("ServiceComponentHost creation should fail for invalid state");
-    } catch (Exception e) {
-      // Expected
-      e.printStackTrace();
-    }
-
-    try {
-      clusters.getCluster(clusterName).getService(serviceName)
-          .getServiceComponent(componentName1).getServiceComponentHost(host1);
-      fail("ServiceComponentHost creation should have failed earlier");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    // null service should work
-    createServiceComponentHost(clusterName, null, componentName1,
-        host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName2,
-        host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName2,
-        host2, null);
-    createServiceComponentHost(clusterName, serviceName, componentName3,
-        host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName3,
-        host2, null);
-
-    try {
-      createServiceComponentHost(clusterName, serviceName, componentName1,
-          host1, null);
-      fail("ServiceComponentHost creation should fail as duplicate");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    Assert.assertNotNull(clusters.getCluster(clusterName)
-        .getService(serviceName)
-        .getServiceComponent(componentName1)
-        .getServiceComponentHost(host1));
-    Assert.assertNotNull(clusters.getCluster(clusterName)
-        .getService(serviceName)
-        .getServiceComponent(componentName2)
-        .getServiceComponentHost(host1));
-    Assert.assertNotNull(clusters.getCluster(clusterName)
-        .getService(serviceName)
-        .getServiceComponent(componentName2)
-        .getServiceComponentHost(host2));
-    Assert.assertNotNull(clusters.getCluster(clusterName)
-        .getService(serviceName)
-        .getServiceComponent(componentName3)
-        .getServiceComponentHost(host1));
-    Assert.assertNotNull(clusters.getCluster(clusterName)
-        .getService(serviceName)
-        .getServiceComponent(componentName3)
-        .getServiceComponentHost(host2));
-
-    ServiceComponentHostRequest r =
-        new ServiceComponentHostRequest(clusterName, serviceName,
-            componentName2, null, null, null);
-
-    Set<ServiceComponentHostResponse> response =
-        controller.getHostComponents(Collections.singleton(r));
-    Assert.assertEquals(2, response.size());
-
-  }
-
-  @Test
-  public void testCreateServiceComponentHostMultiple()
-      throws AmbariException {
-    String clusterName = "foo1";
-    createCluster(clusterName);
-    String serviceName = "HDFS";
-    createService(clusterName, serviceName, null);
-    String componentName1 = "NAMENODE";
-    String componentName2 = "DATANODE";
-    createServiceComponent(clusterName, serviceName, componentName1,
-        State.INIT);
-    createServiceComponent(clusterName, serviceName, componentName2,
-        State.INIT);
-    String host1 = "h1";
-    clusters.addHost(host1);
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    String host2 = "h2";
-    clusters.addHost(host2);
-    clusters.getHost("h2").setOsType("centos5");
-    clusters.getHost("h2").persist();
-    clusters.mapHostToCluster(host1, clusterName);
-    clusters.mapHostToCluster(host2, clusterName);
-
-    Set<ServiceComponentHostRequest> set1 =
-        new HashSet<ServiceComponentHostRequest>();
-    ServiceComponentHostRequest r1 =
-        new ServiceComponentHostRequest(clusterName, serviceName,
-            componentName1, host1, null, State.INIT.toString());
-    ServiceComponentHostRequest r2 =
-        new ServiceComponentHostRequest(clusterName, serviceName,
-            componentName2, host1, null, State.INIT.toString());
-    ServiceComponentHostRequest r3 =
-        new ServiceComponentHostRequest(clusterName, serviceName,
-            componentName1, host2, null, State.INIT.toString());
-    ServiceComponentHostRequest r4 =
-        new ServiceComponentHostRequest(clusterName, serviceName,
-            componentName2, host2, null, State.INIT.toString());
-
-    set1.add(r1);
-    set1.add(r2);
-    set1.add(r3);
-    set1.add(r4);
-    controller.createHostComponents(set1);
-
-    Assert.assertEquals(2,
-      clusters.getCluster(clusterName).getServiceComponentHosts(host1).size());
-    Assert.assertEquals(2,
-      clusters.getCluster(clusterName).getServiceComponentHosts(host2).size());
-
-    Assert.assertNotNull(clusters.getCluster(clusterName)
-        .getService(serviceName).getServiceComponent(componentName1)
-        .getServiceComponentHost(host1));
-    Assert.assertNotNull(clusters.getCluster(clusterName)
-        .getService(serviceName).getServiceComponent(componentName1)
-        .getServiceComponentHost(host2));
-    Assert.assertNotNull(clusters.getCluster(clusterName)
-        .getService(serviceName).getServiceComponent(componentName2)
-        .getServiceComponentHost(host1));
-    Assert.assertNotNull(clusters.getCluster(clusterName)
-        .getService(serviceName).getServiceComponent(componentName2)
-        .getServiceComponentHost(host2));
-  }
-
-  @Test
-  public void testCreateServiceComponentHostWithInvalidRequest()
-      throws AmbariException {
-    // multiple clusters
-    // dup objects
-    // existing components
-    // invalid request params
-    // invalid service
-    // invalid cluster
-    // invalid component
-    // invalid host
-
-    Set<ServiceComponentHostRequest> set1 =
-        new HashSet<ServiceComponentHostRequest>();
-
-    try {
-      set1.clear();
-      ServiceComponentHostRequest rInvalid =
-          new ServiceComponentHostRequest(null, null, null, null, null, null);
-      set1.add(rInvalid);
-      controller.createHostComponents(set1);
-      fail("Expected failure for invalid requests");
-    } catch (IllegalArgumentException e) {
-      // Expected
-    }
-
-    try {
-      set1.clear();
-      ServiceComponentHostRequest rInvalid =
-          new ServiceComponentHostRequest("foo", null, null, null, null, null);
-      set1.add(rInvalid);
-      controller.createHostComponents(set1);
-      fail("Expected failure for invalid requests");
-    } catch (IllegalArgumentException e) {
-      // Expected
-    }
-
-    try {
-      set1.clear();
-      ServiceComponentHostRequest rInvalid =
-          new ServiceComponentHostRequest("foo", "HDFS", null, null,
-              null, null);
-      set1.add(rInvalid);
-      controller.createHostComponents(set1);
-      fail("Expected failure for invalid requests");
-    } catch (IllegalArgumentException e) {
-      // Expected
-    }
-
-    try {
-      set1.clear();
-      ServiceComponentHostRequest rInvalid =
-          new ServiceComponentHostRequest("foo", "HDFS", "NAMENODE", null,
-              null, null);
-      set1.add(rInvalid);
-      controller.createHostComponents(set1);
-      fail("Expected failure for invalid requests");
-    } catch (IllegalArgumentException e) {
-      // Expected
-    }
-
-    try {
-      set1.clear();
-      ServiceComponentHostRequest rInvalid =
-          new ServiceComponentHostRequest("foo", "HDFS", "NAMENODE", "h1",
-              null, null);
-      set1.add(rInvalid);
-      controller.createHostComponents(set1);
-      fail("Expected failure for invalid cluster");
-    } catch (ParentObjectNotFoundException e) {
-      // Expected
-    }
-
-    clusters.addCluster("foo");
-    clusters.addCluster("c1");
-    clusters.addCluster("c2");
-    Cluster foo = clusters.getCluster("foo");
-    Cluster c1 = clusters.getCluster("c1");
-    Cluster c2 = clusters.getCluster("c2");
-    foo.setDesiredStackVersion(new StackId("HDP-0.2"));
-    c1.setDesiredStackVersion(new StackId("HDP-0.2"));
-    c2.setDesiredStackVersion(new StackId("HDP-0.2"));
-
-    try {
-      set1.clear();
-      ServiceComponentHostRequest rInvalid =
-          new ServiceComponentHostRequest("foo", "HDFS", "NAMENODE", "h1",
-              null, null);
-      set1.add(rInvalid);
-      controller.createHostComponents(set1);
-      fail("Expected failure for invalid service");
-    } catch (IllegalArgumentException e) {
-      // Expected
-    }
-
-    Service s1 = serviceFactory.createNew(foo, "HDFS");
-    foo.addService(s1);
-    s1.persist();
-    Service s2 = serviceFactory.createNew(c1, "HDFS");
-    c1.addService(s2);
-    s2.persist();
-    Service s3 = serviceFactory.createNew(c2, "HDFS");
-    c2.addService(s3);
-    s3.persist();
-
-
-    try {
-      set1.clear();
-      ServiceComponentHostRequest rInvalid =
-          new ServiceComponentHostRequest("foo", "HDFS", "NAMENODE", "h1",
-              null, null);
-      set1.add(rInvalid);
-      controller.createHostComponents(set1);
-      fail("Expected failure for invalid service");
-    } catch (AmbariException e) {
-      // Expected
-    }
-
-    ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "NAMENODE");
-    s1.addServiceComponent(sc1);
-    sc1.persist();
-    ServiceComponent sc2 = serviceComponentFactory.createNew(s2, "NAMENODE");
-    s2.addServiceComponent(sc2);
-    sc2.persist();
-    ServiceComponent sc3 = serviceComponentFactory.createNew(s3, "NAMENODE");
-    s3.addServiceComponent(sc3);
-    sc3.persist();
-
-
-    try {
-      set1.clear();
-      ServiceComponentHostRequest rInvalid =
-          new ServiceComponentHostRequest("foo", "HDFS", "NAMENODE", "h1",
-              null, null);
-      set1.add(rInvalid);
-      controller.createHostComponents(set1);
-      fail("Expected failure for invalid host");
-    } catch (AmbariException e) {
-      // Expected
-    }
-
-    clusters.addHost("h1");
-    Host h1 = clusters.getHost("h1");
-    h1.setIPv4("ipv41");
-    h1.setIPv6("ipv61");
-    h1.setOsType("centos6");
-    h1.persist();
-    clusters.addHost("h2");
-    Host h2 = clusters.getHost("h2");
-    h2.setIPv4("ipv42");
-    h2.setIPv6("ipv62");
-    h2.setOsType("centos6");
-    h2.persist();
-    clusters.addHost("h3");
-    Host h3 = clusters.getHost("h3");
-    h3.setIPv4("ipv43");
-    h3.setIPv6("ipv63");
-    h3.setOsType("centos6");
-    h3.persist();
-
-    try {
-      set1.clear();
-      ServiceComponentHostRequest rInvalid =
-          new ServiceComponentHostRequest("foo", "HDFS", "NAMENODE", "h1",
-              null, null);
-      set1.add(rInvalid);
-      controller.createHostComponents(set1);
-      fail("Expected failure for invalid host cluster mapping");
-    } catch (AmbariException e) {
-      // Expected
-    }
-
-    Set<String> hostnames = new HashSet<String>();
-    hostnames.add("h1");
-    hostnames.add("h2");
-    hostnames.add("h3");
-    clusters.mapHostsToCluster(hostnames, "foo");
-    clusters.mapHostsToCluster(hostnames, "c1");
-    clusters.mapHostsToCluster(hostnames, "c2");
-
-    set1.clear();
-    ServiceComponentHostRequest valid =
-        new ServiceComponentHostRequest("foo", "HDFS", "NAMENODE", "h1",
-            null, null);
-    set1.add(valid);
-    controller.createHostComponents(set1);
-
-    try {
-      set1.clear();
-      ServiceComponentHostRequest rInvalid1 =
-          new ServiceComponentHostRequest("foo", "HDFS", "NAMENODE", "h2",
-              null, null);
-      ServiceComponentHostRequest rInvalid2 =
-          new ServiceComponentHostRequest("foo", "HDFS", "NAMENODE", "h2",
-              null, null);
-      set1.add(rInvalid1);
-      set1.add(rInvalid2);
-      controller.createHostComponents(set1);
-      fail("Expected failure for dup requests");
-    } catch (DuplicateResourceException e) {
-      // Expected
-    }
-
-    try {
-      set1.clear();
-      ServiceComponentHostRequest rInvalid1 =
-          new ServiceComponentHostRequest("c1", "HDFS", "NAMENODE", "h2",
-              null, null);
-      ServiceComponentHostRequest rInvalid2 =
-          new ServiceComponentHostRequest("c2", "HDFS", "NAMENODE", "h3",
-              null, null);
-      set1.add(rInvalid1);
-      set1.add(rInvalid2);
-      controller.createHostComponents(set1);
-      fail("Expected failure for multiple clusters");
-    } catch (IllegalArgumentException e) {
-      // Expected
-    }
-
-    try {
-      set1.clear();
-      ServiceComponentHostRequest rInvalid1 =
-          new ServiceComponentHostRequest("foo", "HDFS", "NAMENODE", "h1",
-              null, null);
-      ServiceComponentHostRequest rInvalid2 =
-          new ServiceComponentHostRequest("foo", "HDFS", "NAMENODE", "h2",
-              null, null);
-      set1.add(rInvalid1);
-      set1.add(rInvalid2);
-      controller.createHostComponents(set1);
-      fail("Expected failure for already existing");
-    } catch (DuplicateResourceException e) {
-      // Expected
-    }
-
-    Assert.assertEquals(1, foo.getServiceComponentHosts("h1").size());
-    Assert.assertEquals(0, foo.getServiceComponentHosts("h2").size());
-    Assert.assertEquals(0, foo.getServiceComponentHosts("h3").size());
-
-    set1.clear();
-    ServiceComponentHostRequest valid1 =
-        new ServiceComponentHostRequest("c1", "HDFS", "NAMENODE", "h1",
-            null, null);
-    set1.add(valid1);
-    controller.createHostComponents(set1);
-
-    set1.clear();
-    ServiceComponentHostRequest valid2 =
-        new ServiceComponentHostRequest("c2", "HDFS", "NAMENODE", "h1",
-            null, null);
-    set1.add(valid2);
-    controller.createHostComponents(set1);
-
-    Assert.assertEquals(1, foo.getServiceComponentHosts("h1").size());
-    Assert.assertEquals(1, c1.getServiceComponentHosts("h1").size());
-    Assert.assertEquals(1, c2.getServiceComponentHosts("h1").size());
-
-  }
-
-  @Test
-  public void testCreateHostSimple() throws AmbariException {
-    Map<String, String> hostAttributes = null;
-
-    HostRequest r1 = new HostRequest("h1", null, hostAttributes);
-    r1.toString();
-
-    Set<HostRequest> requests = new HashSet<HostRequest>();
-    requests.add(r1);
-    try {
-      controller.createHosts(requests);
-      fail("Create host should fail for non-bootstrapped host");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    clusters.addHost("h1");
-    clusters.addHost("h2");
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h2").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    clusters.getHost("h2").persist();
-
-    requests.add(new HostRequest("h2", "foo", new HashMap<String, String>()));
-
-    try {
-      controller.createHosts(requests);
-      fail("Create host should fail for invalid clusters");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    clusters.addCluster("foo");
-    clusters.getCluster("foo").setDesiredStackVersion(new StackId("HDP-0.1"));
-
-    controller.createHosts(requests);
-
-    Assert.assertNotNull(clusters.getHost("h1"));
-    Assert.assertNotNull(clusters.getHost("h2"));
-
-    Assert.assertEquals(0, clusters.getClustersForHost("h1").size());
-    Assert.assertEquals(1, clusters.getClustersForHost("h2").size());
-
-  }
-
-  @Test
-  public void testCreateHostMultiple() throws AmbariException {
-    clusters.addHost("h1");
-    clusters.addHost("h2");
-    clusters.addHost("h3");
-    clusters.addCluster("c1");
-    clusters.getCluster("c1").setDesiredStackVersion(new StackId("HDP-0.1"));
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h2").setOsType("centos5");
-    clusters.getHost("h3").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    clusters.getHost("h2").persist();
-    clusters.getHost("h3").persist();
-
-    Map<String, String> hostAttrs =
-        new HashMap<String, String>();
-    hostAttrs.put("attr1", "val1");
-    hostAttrs.put("attr2", "val2");
-
-    String clusterName = "c1";
-
-    HostRequest r1 = new HostRequest("h1", clusterName, null);
-    HostRequest r2 = new HostRequest("h2", clusterName, hostAttrs);
-    HostRequest r3 = new HostRequest("h3", null, hostAttrs);
-
-    Set<HostRequest> set1 = new HashSet<HostRequest>();
-    set1.add(r1);
-    set1.add(r2);
-    set1.add(r3);
-    controller.createHosts(set1);
-
-    Assert.assertEquals(1, clusters.getClustersForHost("h1").size());
-    Assert.assertEquals(1, clusters.getClustersForHost("h2").size());
-    Assert.assertEquals(0, clusters.getClustersForHost("h3").size());
-
-    Assert.assertEquals(2, clusters.getHost("h2").getHostAttributes().size());
-    Assert.assertEquals(2, clusters.getHost("h3").getHostAttributes().size());
-    Assert.assertEquals("val1",
-        clusters.getHost("h2").getHostAttributes().get("attr1"));
-    Assert.assertEquals("val2",
-        clusters.getHost("h2").getHostAttributes().get("attr2"));
-  }
-
-  @Test
-  public void testCreateHostWithInvalidRequests() throws AmbariException {
-    // unknown host
-    // invalid clusters
-    // duplicate host
-
-    Set<HostRequest> set1 = new HashSet<HostRequest>();
-
-    try {
-      set1.clear();
-      HostRequest rInvalid =
-          new HostRequest("h1", null, null);
-      set1.add(rInvalid);
-      controller.createHosts(set1);
-      fail("Expected failure for invalid host");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    clusters.addHost("h1");
-
-    String clusterName = "c1";
-
-    try {
-      set1.clear();
-      HostRequest rInvalid =
-          new HostRequest("h1", clusterName, null);
-      set1.add(rInvalid);
-      controller.createHosts(set1);
-      fail("Expected failure for invalid cluster");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    clusters.addCluster("c1");
-
-    try {
-      set1.clear();
-      HostRequest rInvalid1 =
-          new HostRequest("h1", clusterName, null);
-      HostRequest rInvalid2 =
-          new HostRequest("h1", clusterName, null);
-      set1.add(rInvalid1);
-      set1.add(rInvalid2);
-      controller.createHosts(set1);
-      fail("Expected failure for dup requests");
-    } catch (Exception e) {
-      // Expected
-    }
-
-  }
-
-  @Test
-  public void testInstallAndStartService() throws Exception {
-    testCreateServiceComponentHostSimple();
-
-    String clusterName = "foo1";
-    String serviceName = "HDFS";
-
-    Cluster cluster = clusters.getCluster(clusterName);
-    Service s1 = cluster.getService(serviceName);
-
-    Map<String, Config> configs = new HashMap<String, Config>();
-    Map<String, String> properties = new HashMap<String, String>();
-    properties.put("a", "a1");
-    properties.put("b", "b1");
-
-    Config c1 = new ConfigImpl(cluster, "hdfs-site", properties, injector);
-    properties.put("c", "c1");
-    properties.put("d", "d1");
-    Config c2 = new ConfigImpl(cluster, "core-site", properties, injector);
-    Config c3 = new ConfigImpl(cluster, "foo-site", properties, injector);
-
-    c1.setVersionTag("v1");
-    c2.setVersionTag("v1");
-    c3.setVersionTag("v1");
-
-    cluster.addDesiredConfig(c1);
-    cluster.addDesiredConfig(c2);
-    cluster.addDesiredConfig(c3);
-    c1.persist();
-    c2.persist();
-    c3.persist();
-
-    configs.put(c1.getType(), c1);
-    configs.put(c2.getType(), c2);
-    s1.updateDesiredConfigs(configs);
-    s1.persist();
-
-    ServiceRequest r = new ServiceRequest(clusterName, serviceName, null,
-        State.INSTALLED.toString());
-    Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
-    requests.add(r);
-
-    RequestStatusResponse trackAction =
-        controller.updateServices(requests);
-    Assert.assertEquals(State.INSTALLED,
-        clusters.getCluster(clusterName).getService(serviceName)
-        .getDesiredState());
-    for (ServiceComponent sc :
-      clusters.getCluster(clusterName).getService(serviceName)
-      .getServiceComponents().values()) {
-      Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
-      for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-        Assert.assertEquals(State.INSTALLED, sch.getDesiredState());
-        Assert.assertEquals(State.INIT, sch.getState());
-      }
-    }
-
-    List<ShortTaskStatus> taskStatuses = trackAction.getTasks();
-    Assert.assertEquals(5, taskStatuses.size());
-
-    boolean foundH1NN = false;
-    boolean foundH1DN = false;
-    boolean foundH2DN = false;
-    boolean foundH1CLT = false;
-    boolean foundH2CLT = false;
-
-    for (ShortTaskStatus taskStatus : taskStatuses) {
-      LOG.debug("Task dump :"
-          + taskStatus.toString());
-      Assert.assertEquals(RoleCommand.INSTALL.toString(),
-          taskStatus.getCommand());
-      Assert.assertEquals(HostRoleStatus.PENDING.toString(),
-          taskStatus.getStatus());
-      if (taskStatus.getHostName().equals("h1")) {
-        if (Role.NAMENODE.toString().equals(taskStatus.getRole())) {
-          foundH1NN = true;
-        } else if (Role.DATANODE.toString().equals(taskStatus.getRole())) {
-          foundH1DN = true;
-        } else if (Role.HDFS_CLIENT.toString().equals(taskStatus.getRole())) {
-          foundH1CLT = true;
-        } else {
-          fail("Found invalid role for host h1");
-        }
-      } else if (taskStatus.getHostName().equals("h2")) {
-        if (Role.DATANODE.toString().equals(taskStatus.getRole())) {
-          foundH2DN = true;
-        } else if (Role.HDFS_CLIENT.toString().equals(taskStatus.getRole())) {
-          foundH2CLT = true;
-        } else {
-          fail("Found invalid role for host h2");
-        }
-      } else {
-        fail("Found invalid host in task list");
-      }
-    }
-    Assert.assertTrue(foundH1DN && foundH1NN && foundH2DN
-        && foundH1CLT && foundH2CLT);
-
-    // TODO validate stages?
-    List<Stage> stages = actionDB.getAllStages(trackAction.getRequestId());
-    Assert.assertEquals(1, stages.size());
-
-    for (Stage stage : stages) {
-      LOG.info("Stage Details for Install Service"
-          + ", stageId="+ stage.getStageId()
-          + ", actionId=" + stage.getActionId());
-
-      for (String host : stage.getHosts()) {
-        for (ExecutionCommandWrapper ecw : stage.getExecutionCommands(host)) {
-          Assert.assertFalse(
-              ecw.getExecutionCommand().getHostLevelParams().get("repo_info").isEmpty());
-
-          LOG.info("Dumping host action details"
-              + ", stageId=" + stage.getStageId()
-              + ", actionId=" + stage.getActionId()
-              + ", commandDetails="
-              + StageUtils.jaxbToString(ecw.getExecutionCommand()));
-        }
-      }
-    }
-
-
-    RequestStatusRequest statusRequest =
-        new RequestStatusRequest(trackAction.getRequestId(), null);
-    Set<RequestStatusResponse> statusResponses =
-        controller.getRequestStatus(statusRequest);
-    Assert.assertEquals(1, statusResponses.size());
-    RequestStatusResponse statusResponse =
-        statusResponses.iterator().next();
-    Assert.assertNotNull(statusResponse);
-    Assert.assertEquals(trackAction.getRequestId(),
-        statusResponse.getRequestId());
-    Assert.assertEquals(5, statusResponse.getTasks().size());
-
-    Set<TaskStatusRequest> taskRequests = new HashSet<TaskStatusRequest>();
-    TaskStatusRequest t1, t2;
-    t1 = new TaskStatusRequest();
-    t2 = new TaskStatusRequest();
-    t1.setRequestId(trackAction.getRequestId());
-    taskRequests.add(t1);
-    Set<TaskStatusResponse> taskResponses =
-        controller.getTaskStatus(taskRequests);
-    Assert.assertEquals(5, taskResponses.size());
-
-    t1.setTaskId(1L);
-    t2.setRequestId(trackAction.getRequestId());
-    t2.setTaskId(2L);
-    taskRequests.clear();
-    taskRequests.add(t1);
-    taskRequests.add(t2);
-    taskResponses = controller.getTaskStatus(taskRequests);
-    Assert.assertEquals(2, taskResponses.size());
-
-    // manually change live state to installed as no running action manager
-    for (ServiceComponent sc :
-      clusters.getCluster(clusterName).getService(serviceName)
-      .getServiceComponents().values()) {
-      for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-        sch.setState(State.INSTALLED);
-      }
-    }
-
-    r = new ServiceRequest(clusterName, serviceName, null,
-        State.STARTED.toString());
-    requests.clear();
-    requests.add(r);
-    trackAction = controller.updateServices(requests);
-
-    Assert.assertEquals(State.STARTED,
-        clusters.getCluster(clusterName).getService(serviceName)
-        .getDesiredState());
-    for (ServiceComponent sc :
-      clusters.getCluster(clusterName).getService(serviceName)
-      .getServiceComponents().values()) {
-      if (sc.getName().equals("HDFS_CLIENT")) {
-        Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
-      } else {
-        Assert.assertEquals(State.STARTED, sc.getDesiredState());
-      }
-      for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-        if (sch.getServiceComponentName().equals("HDFS_CLIENT")) {
-          Assert.assertEquals(State.INSTALLED, sch.getDesiredState());
-        } else {
-          Assert.assertEquals(State.STARTED, sch.getDesiredState());
-        }
-      }
-    }
-
-    // TODO validate stages?
-    stages = actionDB.getAllStages(trackAction.getRequestId());
-    Assert.assertEquals(2, stages.size());
-
-    for (Stage stage : stages) {
-      LOG.info("Stage Details for Start Service"
-          + ", stageId="+ stage.getStageId()
-          + ", actionId=" + stage.getActionId());
-
-      for (String host : stage.getHosts()) {
-        LOG.info("Dumping host action details"
-            + ", stageId=" + stage.getStageId()
-            + ", actionId=" + stage.getActionId()
-            + ", commandDetails="
-            + StageUtils.jaxbToString(stage.getExecutionCommands(host).get(0)));
-      }
-    }
-
-    StringBuilder sb = new StringBuilder();
-    clusters.debugDump(sb);
-    LOG.info("Cluster Dump: " + sb.toString());
-
-    statusRequest = new RequestStatusRequest(null, null);
-    statusResponses = controller.getRequestStatus(statusRequest);
-    Assert.assertEquals(2, statusResponses.size());
-
-    int counter = 0;
-    for (ServiceComponent sc :
-      clusters.getCluster(clusterName).getService(serviceName)
-      .getServiceComponents().values()) {
-      for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-        if (sc.isClientComponent()) {
-          sch.setState(State.INSTALLED);
-        } else {
-          ++counter;
-          switch (counter%1) {
-            case 0:
-              sch.setState(State.START_FAILED);
-              break;
-            case 1:
-              sch.setState(State.STOP_FAILED);
-              break;
-            case 2:
-              sch.setState(State.STARTED);
-              break;
-          }
-        }
-      }
-    }
-
-    r = new ServiceRequest(clusterName, serviceName, null,
-        State.INSTALLED.toString());
-    requests.clear();
-    requests.add(r);
-    trackAction = controller.updateServices(requests);
-
-    Assert.assertEquals(State.INSTALLED,
-        clusters.getCluster(clusterName).getService(serviceName)
-        .getDesiredState());
-    for (ServiceComponent sc :
-      clusters.getCluster(clusterName).getService(serviceName)
-      .getServiceComponents().values()) {
-      Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
-      for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-        Assert.assertEquals(State.INSTALLED, sch.getDesiredState());
-      }
-    }
-
-    // TODO validate stages?
-    stages = actionDB.getAllStages(trackAction.getRequestId());
-
-    for (Stage stage : stages) {
-      LOG.info("Stage Details for Stop Service : " + stage.toString());
-    }
-    Assert.assertEquals(1, stages.size());
-
-  }
-
-  @Test
-  public void testGetClusters() throws AmbariException {
-    clusters.addCluster("c1");
-
-    Cluster c1 = clusters.getCluster("c1");
-
-    c1.setDesiredStackVersion(new StackId("HDP-0.1"));
-
-    ClusterRequest r = new ClusterRequest(null, null, null, null);
-    Set<ClusterResponse> resp = controller.getClusters(Collections.singleton(r));
-    Assert.assertEquals(1, resp.size());
-
-    ClusterResponse resp1 = resp.iterator().next();
-
-    Assert.assertEquals(c1.getClusterId(), resp1.getClusterId().longValue());
-    Assert.assertEquals(c1.getClusterName(), resp1.getClusterName());
-    Assert.assertEquals(c1.getDesiredStackVersion().getStackId(),
-        resp1.getDesiredStackVersion());
-  }
-
-  @Test
-  public void testGetClustersWithFilters() throws AmbariException {
-    clusters.addCluster("c1");
-    clusters.addCluster("c2");
-    clusters.addCluster("c3");
-    clusters.addCluster("c4");
-
-    Cluster c1 = clusters.getCluster("c1");
-    Cluster c2 = clusters.getCluster("c2");
-    Cluster c3 = clusters.getCluster("c3");
-    Cluster c4 = clusters.getCluster("c4");
-
-    c1.setDesiredStackVersion(new StackId("HDP-0.1"));
-    c2.setDesiredStackVersion(new StackId("HDP-0.1"));
-    c3.setDesiredStackVersion(new StackId("HDP-1.1.0"));
-
-    ClusterRequest r = new ClusterRequest(null, null, null, null);
-    Set<ClusterResponse> resp = controller.getClusters(Collections.singleton(r));
-    Assert.assertEquals(4, resp.size());
-
-    r = new ClusterRequest(null, "c1", null, null);
-    resp = controller.getClusters(Collections.singleton(r));
-    Assert.assertEquals(1, resp.size());
-    Assert.assertEquals(c1.getClusterId(),
-        resp.iterator().next().getClusterId().longValue());
-
-    r = new ClusterRequest(null, null, "HDP-0.1", null);
-    resp = controller.getClusters(Collections.singleton(r));
-    Assert.assertEquals(2, resp.size());
-
-    r = new ClusterRequest(null, null, "", null);
-    resp = controller.getClusters(Collections.singleton(r));
-    Assert.assertEquals(1, resp.size());
-    Assert.assertEquals(c4.getClusterId(),
-        resp.iterator().next().getClusterId().longValue());
-  }
-
-  @Test
-  public void testGetServices() throws AmbariException {
-    clusters.addCluster("c1");
-    Cluster c1 = clusters.getCluster("c1");
-    c1.setDesiredStackVersion(new StackId("HDP-0.1"));
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
-
-    c1.addService(s1);
-    s1.setDesiredStackVersion(new StackId("HDP-0.1"));
-    s1.setDesiredState(State.INSTALLED);
-
-    s1.persist();
-
-    ServiceRequest r = new ServiceRequest("c1", null, null, null);
-    Set<ServiceResponse> resp = controller.getServices(Collections.singleton(r));
-
-    ServiceResponse resp1 = resp.iterator().next();
-
-    Assert.assertEquals(s1.getClusterId(), resp1.getClusterId().longValue());
-    Assert.assertEquals(s1.getCluster().getClusterName(),
-        resp1.getClusterName());
-    Assert.assertEquals(s1.getName(), resp1.getServiceName());
-    Assert.assertEquals("HDP-0.1", s1.getDesiredStackVersion().getStackId());
-    Assert.assertEquals(s1.getDesiredStackVersion().getStackId(),
-        resp1.getDesiredStackVersion());
-    Assert.assertEquals(State.INSTALLED.toString(), resp1.getDesiredState());
-
-  }
-
-  @Test
-  public void testGetServicesWithFilters() throws AmbariException {
-    clusters.addCluster("c1");
-    clusters.addCluster("c2");
-    Cluster c1 = clusters.getCluster("c1");
-    Cluster c2 = clusters.getCluster("c2");
-    c1.setDesiredStackVersion(new StackId("HDP-0.2"));
-    c2.setDesiredStackVersion(new StackId("HDP-0.2"));
-
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
-    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
-    Service s3 = serviceFactory.createNew(c1, "HBASE");
-    Service s4 = serviceFactory.createNew(c2, "HIVE");
-    Service s5 = serviceFactory.createNew(c2, "ZOOKEEPER");
-
-    c1.addService(s1);
-    c1.addService(s2);
-    c1.addService(s3);
-    c2.addService(s4);
-    c2.addService(s5);
-
-    s1.setDesiredState(State.INSTALLED);
-    s2.setDesiredState(State.INSTALLED);
-    s4.setDesiredState(State.INSTALLED);
-
-    s1.persist();
-    s2.persist();
-    s3.persist();
-    s4.persist();
-    s5.persist();
-
-    ServiceRequest r = new ServiceRequest(null, null, null, null);
-    Set<ServiceResponse> resp;
-
-    try {
-      controller.getServices(Collections.singleton(r));
-      fail("Expected failure for invalid request");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    r = new ServiceRequest(c1.getClusterName(), null, null, null);
-    resp = controller.getServices(Collections.singleton(r));
-    Assert.assertEquals(3, resp.size());
-
-    r = new ServiceRequest(c1.getClusterName(), s2.getName(), null, null);
-    resp = controller.getServices(Collections.singleton(r));
-    Assert.assertEquals(1, resp.size());
-    Assert.assertEquals(s2.getName(), resp.iterator().next().getServiceName());
-
-    try {
-      r = new ServiceRequest(c2.getClusterName(), s1.getName(), null, null);
-      resp = controller.getServices(Collections.singleton(r));
-      fail("Expected failure for invalid service");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    r = new ServiceRequest(c1.getClusterName(), null, null, "INSTALLED");
-    resp = controller.getServices(Collections.singleton(r));
-    Assert.assertEquals(2, resp.size());
-
-    r = new ServiceRequest(c2.getClusterName(), null, null, "INIT");
-    resp = controller.getServices(Collections.singleton(r));
-    Assert.assertEquals(1, resp.size());
-
-    ServiceRequest r1, r2, r3;
-    r1 = new ServiceRequest(c1.getClusterName(), null, null, "INSTALLED");
-    r2 = new ServiceRequest(c2.getClusterName(), null, null, "INIT");
-    r3 = new ServiceRequest(c2.getClusterName(), null, null, "INIT");
-
-    Set<ServiceRequest> reqs = new HashSet<ServiceRequest>();
-    reqs.addAll(Arrays.asList(r1, r2, r3));
-    resp = controller.getServices(reqs);
-    Assert.assertEquals(3, resp.size());
-
-  }
-
-
-  @Test
-  public void testGetServiceComponents() throws AmbariException {
-    clusters.addCluster("c1");
-    Cluster c1 = clusters.getCluster("c1");
-    c1.setDesiredStackVersion(new StackId("HDP-0.2"));
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
-    c1.addService(s1);
-    s1.setDesiredState(State.INSTALLED);
-    s1.persist();
-    ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
-    s1.addServiceComponent(sc1);
-    sc1.persist();
-    sc1.setDesiredStackVersion(new StackId("HDP-0.1"));
-    sc1.setDesiredState(State.UNINSTALLED);
-
-    ServiceComponentRequest r = new ServiceComponentRequest("c1",
-       s1.getName(), sc1.getName(), null, null);
-
-    Set<ServiceComponentResponse> resps = controller.getComponents(Collections.singleton(r));
-    Assert.assertEquals(1, resps.size());
-
-    ServiceComponentResponse resp = resps.iterator().next();
-
-    Assert.assertEquals(c1.getClusterName(), resp.getClusterName());
-    Assert.assertEquals(sc1.getName(), resp.getComponentName());
-    Assert.assertEquals(s1.getName(), resp.getServiceName());
-    Assert.assertEquals("HDP-0.1", resp.getDesiredStackVersion());
-    Assert.assertEquals(sc1.getDesiredState().toString(),
-        resp.getDesiredState());
-    Assert.assertEquals(c1.getClusterId(), resp.getClusterId().longValue());
-
-  }
-
-
-  @Test
-  public void testGetServiceComponentsWithFilters() throws AmbariException {
-    clusters.addCluster("c1");
-    clusters.addCluster("c2");
-    Cluster c1 = clusters.getCluster("c1");
-    Cluster c2 = clusters.getCluster("c2");
-    c1.setDesiredStackVersion(new StackId("HDP-0.2"));
-    c2.setDesiredStackVersion(new StackId("HDP-0.2"));
-
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
-    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
-    Service s3 = serviceFactory.createNew(c1, "HBASE");
-    Service s4 = serviceFactory.createNew(c2, "HIVE");
-    Service s5 = serviceFactory.createNew(c2, "ZOOKEEPER");
-
-    c1.addService(s1);
-    c1.addService(s2);
-    c1.addService(s3);
-    c2.addService(s4);
-    c2.addService(s5);
-
-    s1.setDesiredState(State.INSTALLED);
-    s2.setDesiredState(State.INSTALLED);
-    s4.setDesiredState(State.INSTALLED);
-
-    s1.persist();
-    s2.persist();
-    s3.persist();
-    s4.persist();
-    s5.persist();
-
-    ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
-    ServiceComponent sc2 = serviceComponentFactory.createNew(s1, "NAMENODE");
-    ServiceComponent sc3 = serviceComponentFactory.createNew(s3,
-        "HBASE_REGIONSERVER");
-    ServiceComponent sc4 = serviceComponentFactory.createNew(s4, "HIVE_SERVER");
-    ServiceComponent sc5 = serviceComponentFactory.createNew(s4, "HIVE_CLIENT");
-    ServiceComponent sc6 = serviceComponentFactory.createNew(s4,
-        "MYSQL_SERVER");
-    ServiceComponent sc7 = serviceComponentFactory.createNew(s5,
-        "ZOOKEEPER_SERVER");
-    ServiceComponent sc8 = serviceComponentFactory.createNew(s5,
-        "ZOOKEEPER_CLIENT");
-
-    s1.addServiceComponent(sc1);
-    s1.addServiceComponent(sc2);
-    s3.addServiceComponent(sc3);
-    s4.addServiceComponent(sc4);
-    s4.addServiceComponent(sc5);
-    s4.addServiceComponent(sc6);
-    s5.addServiceComponent(sc7);
-    s5.addServiceComponent(sc8);
-
-    sc1.setDesiredState(State.UNINSTALLED);
-    sc3.setDesiredState(State.UNINSTALLED);
-    sc5.setDesiredState(State.UNINSTALLED);
-    sc6.setDesiredState(State.UNINSTALLED);
-    sc7.setDesiredState(State.UNINSTALLED);
-    sc8.setDesiredState(State.UNINSTALLED);
-
-    sc1.persist();
-    sc2.persist();
-    sc3.persist();
-    sc4.persist();
-    sc5.persist();
-    sc6.persist();
-    sc7.persist();
-    sc8.persist();
-
-    ServiceComponentRequest r = new ServiceComponentRequest(null, null,
-        null, null, null);
-
-    try {
-      controller.getComponents(Collections.singleton(r));
-      fail("Expected failure for invalid cluster");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    // all comps per cluster
-    r = new ServiceComponentRequest(c1.getClusterName(),
-        null, null, null, null);
-    Set<ServiceComponentResponse> resps = controller.getComponents(Collections.singleton(r));
-    Assert.assertEquals(3, resps.size());
-
-    // all comps per cluster filter on state
-    r = new ServiceComponentRequest(c2.getClusterName(),
-        null, null, null, State.UNINSTALLED.toString());
-    resps = controller.getComponents(Collections.singleton(r));
-    Assert.assertEquals(4, resps.size());
-
-    // all comps for given service
-    r = new ServiceComponentRequest(c2.getClusterName(),
-        s5.getName(), null, null, null);
-    resps = controller.getComponents(Collections.singleton(r));
-    Assert.assertEquals(2, resps.size());
-
-    // all comps for given service filter by state
-    r = new ServiceComponentRequest(c2.getClusterName(),
-        s4.getName(), null, null, State.INIT.toString());
-    resps = controller.getComponents(Collections.singleton(r));
-    Assert.assertEquals(1, resps.size());
-    Assert.assertEquals(sc4.getName(),
-        resps.iterator().next().getComponentName());
-
-    // get single given comp
-    r = new ServiceComponentRequest(c2.getClusterName(),
-        null, sc5.getName(), null, State.INIT.toString());
-    resps = controller.getComponents(Collections.singleton(r));
-    Assert.assertEquals(1, resps.size());
-    Assert.assertEquals(sc5.getName(),
-        resps.iterator().next().getComponentName());
-
-    // get single given comp and given svc
-    r = new ServiceComponentRequest(c2.getClusterName(),
-        s4.getName(), sc5.getName(), null, State.INIT.toString());
-    resps = controller.getComponents(Collections.singleton(r));
-    Assert.assertEquals(1, resps.size());
-    Assert.assertEquals(sc5.getName(),
-        resps.iterator().next().getComponentName());
-
-
-    ServiceComponentRequest r1, r2, r3;
-    Set<ServiceComponentRequest> reqs = new HashSet<ServiceComponentRequest>();
-    r1 = new ServiceComponentRequest(c2.getClusterName(),
-        null, null, null, State.UNINSTALLED.toString());
-    r2 = new ServiceComponentRequest(c1.getClusterName(),
-        null, null, null, null);
-    r3 = new ServiceComponentRequest(c1.getClusterName(),
-        null, null, null, State.INIT.toString());
-    reqs.addAll(Arrays.asList(r1, r2, r3));
-    resps = controller.getComponents(reqs);
-    Assert.assertEquals(7, resps.size());
-  }
-
-  @Test
-  public void testGetServiceComponentHosts() throws AmbariException {
-    clusters.addCluster("c1");
-    Cluster c1 = clusters.getCluster("c1");
-    c1.setDesiredStackVersion(new StackId("HDP-0.1"));
-    clusters.addHost("h1");
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    clusters.mapHostToCluster("h1", "c1");
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
-    c1.addService(s1);
-    s1.persist();
-    ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
-    s1.addServiceComponent(sc1);
-    sc1.setDesiredState(State.UNINSTALLED);
-    sc1.persist();
-    ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, "h1",
-        false);
-    sc1.addServiceComponentHost(sch1);
-    sch1.setDesiredState(State.INSTALLED);
-    sch1.setState(State.INSTALLING);
-    sch1.setDesiredStackVersion(new StackId("HDP-1.1.0"));
-    sch1.setStackVersion(new StackId("HDP-0.1"));
-
-    sch1.persist();
-
-    ServiceComponentHostRequest r =
-        new ServiceComponentHostRequest(c1.getClusterName(),
-            null, null, null, null, null);
-    Set<ServiceComponentHostResponse> resps = controller.getHostComponents(Collections.singleton(r));
-    Assert.assertEquals(1, resps.size());
-
-    ServiceComponentHostResponse resp =
-        resps.iterator().next();
-
-    Assert.assertEquals(c1.getClusterName(), resp.getClusterName());
-    Assert.assertEquals(sc1.getName(), resp.getComponentName());
-    Assert.assertEquals(s1.getName(), resp.getServiceName());
-    Assert.assertEquals(sch1.getHostName(), resp.getHostname());
-    Assert.assertEquals(sch1.getDesiredState().toString(),
-        resp.getDesiredState());
-    Assert.assertEquals(sch1.getState().toString(),
-        resp.getLiveState());
-    Assert.assertEquals(sch1.getStackVersion().getStackId(),
-        resp.getStackVersion());
-
-  }
-
-  @Test
-  public void testGetServiceComponentHostsWithFilters() throws AmbariException {
-    clusters.addCluster("c1");
-    Cluster c1 = clusters.getCluster("c1");
-    c1.setDesiredStackVersion(new StackId("HDP-0.2"));
-
-    clusters.addHost("h1");
-    clusters.addHost("h2");
-    clusters.addHost("h3");
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h2").setOsType("centos5");
-    clusters.getHost("h3").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    clusters.getHost("h2").persist();
-    clusters.getHost("h3").persist();
-    clusters.mapHostToCluster("h1", "c1");
-    clusters.mapHostToCluster("h2", "c1");
-    clusters.mapHostToCluster("h3", "c1");
-
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
-    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
-    Service s3 = serviceFactory.createNew(c1, "HBASE");
-
-    c1.addService(s1);
-    c1.addService(s2);
-    c1.addService(s3);
-
-    s1.setDesiredState(State.INSTALLED);
-    s2.setDesiredState(State.INSTALLED);
-
-    s1.persist();
-    s2.persist();
-    s3.persist();
-
-    ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
-    ServiceComponent sc2 = serviceComponentFactory.createNew(s1, "NAMENODE");
-    ServiceComponent sc3 = serviceComponentFactory.createNew(s3,
-        "HBASE_REGIONSERVER");
-
-    s1.addServiceComponent(sc1);
-    s1.addServiceComponent(sc2);
-    s3.addServiceComponent(sc3);
-
-    sc1.setDesiredState(State.UNINSTALLED);
-    sc3.setDesiredState(State.UNINSTALLED);
-
-    sc1.persist();
-    sc2.persist();
-    sc3.persist();
-
-    ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, "h1",
-        false);
-    ServiceComponentHost sch2 = serviceComponentHostFactory.createNew(sc1, "h2",
-        false);
-    ServiceComponentHost sch3 = serviceComponentHostFactory.createNew(sc1, "h3",
-        false);
-    ServiceComponentHost sch4 = serviceComponentHostFactory.createNew(sc2, "h1",
-        false);
-    ServiceComponentHost sch5 = serviceComponentHostFactory.createNew(sc2, "h2",
-        false);
-    ServiceComponentHost sch6 = serviceComponentHostFactory.createNew(sc3, "h3",
-        false);
-
-    sc1.addServiceComponentHost(sch1);
-    sc1.addServiceComponentHost(sch2);
-    sc1.addServiceComponentHost(sch3);
-    sc2.addServiceComponentHost(sch4);
-    sc2.addServiceComponentHost(sch5);
-    sc3.addServiceComponentHost(sch6);
-
-    sch1.setDesiredState(State.INSTALLED);
-    sch2.setDesiredState(State.INIT);
-    sch4.setDesiredState(State.INSTALLED);
-    sch5.setDesiredState(State.UNINSTALLED);
-
-    sch1.persist();
-    sch2.persist();
-    sch3.persist();
-    sch4.persist();
-    sch5.persist();
-    sch6.persist();
-
-    ServiceComponentHostRequest r =
-        new ServiceComponentHostRequest(null, null, null, null, null, null);
-
-    try {
-      controller.getHostComponents(Collections.singleton(r));
-      fail("Expected failure for invalid cluster");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    // all across cluster
-    r = new ServiceComponentHostRequest(c1.getClusterName(), null,
-        null, null, null, null);
-    Set<ServiceComponentHostResponse> resps = controller.getHostComponents(Collections.singleton(r));
-    Assert.assertEquals(6, resps.size());
-
-    // all for service
-    r = new ServiceComponentHostRequest(c1.getClusterName(), s1.getName(),
-        null, null, null, null);
-    resps = controller.getHostComponents(Collections.singleton(r));
-    Assert.assertEquals(5, resps.size());
-
-    // all for component
-    r = new ServiceComponentHostRequest(c1.getClusterName(), null,
-        sc3.getName(), null, null, null);
-    resps = controller.getHostComponents(Collections.singleton(r));
-    Assert.assertEquals(1, resps.size());
-
-    // all for host
-    r = new ServiceComponentHostRequest(c1.getClusterName(), null,
-        null, "h2", null, null);
-    resps = controller.getHostComponents(Collections.singleton(r));
-    Assert.assertEquals(2, resps.size());
-
-    // all across cluster with state filter
-    r = new ServiceComponentHostRequest(c1.getClusterName(), null,
-        null, null, null, State.UNINSTALLED.toString());
-    resps = controller.getHostComponents(Collections.singleton(r));
-    Assert.assertEquals(1, resps.size());
-
-    // all for service with state filter
-    r = new ServiceComponentHostRequest(c1.getClusterName(), s1.getName(),
-        null, null, null, State.INIT.toString());
-    resps = controller.getHostComponents(Collections.singleton(r));
-    Assert.assertEquals(2, resps.size());
-
-    // all for component with state filter
-    r = new ServiceComponentHostRequest(c1.getClusterName(), null,
-        sc3.getName(), null, null, State.INSTALLED.toString());
-    resps = controller.getHostComponents(Collections.singleton(r));
-    Assert.assertEquals(0, resps.size());
-
-    // all for host with state filter
-    r = new ServiceComponentHostRequest(c1.getClusterName(), null,
-        null, "h2", null, State.INIT.toString());
-    resps = controller.getHostComponents(Collections.singleton(r));
-    Assert.assertEquals(1, resps.size());
-
-    // for service and host
-    r = new ServiceComponentHostRequest(c1.getClusterName(), s3.getName(),
-        null, "h1", null, null);
-    resps = controller.getHostComponents(Collections.singleton(r));
-    Assert.assertEquals(0, resps.size());
-
-    // single sch - given service and host and component
-    r = new ServiceComponentHostRequest(c1.getClusterName(), s3.getName(),
-        sc3.getName(), "h3", null, State.INSTALLED.toString());
-    resps = controller.getHostComponents(Collections.singleton(r));
-    Assert.assertEquals(0, resps.size());
-
-    // single sch - given service and host and component
-    r = new ServiceComponentHostRequest(c1.getClusterName(), s3.getName(),
-        sc3.getName(), "h3", null, null);
-    resps = controller.getHostComponents(Collections.singleton(r));
-    Assert.assertEquals(1, resps.size());
-
-    ServiceComponentHostRequest r1, r2, r3;
-    r1 = new ServiceComponentHostRequest(c1.getClusterName(), null,
-        null, "h3", null, null);
-    r2 = new ServiceComponentHostRequest(c1.getClusterName(), s3.getName(),
-        sc3.getName(), "h2", null, null);
-    r3 = new ServiceComponentHostRequest(c1.getClusterName(), null,
-        null, "h2", null, null);
-    Set<ServiceComponentHostRequest> reqs =
-        new HashSet<ServiceComponentHostRequest>();
-    reqs.addAll(Arrays.asList(r1, r2, r3));
-    resps = controller.getHostComponents(reqs);
-    Assert.assertEquals(4, resps.size());
-  }
-
-  @Test
-  public void testGetHosts() throws AmbariException {
-    clusters.addCluster("c1");
-    clusters.addCluster("c2");
-    clusters.getCluster("c1").setDesiredStackVersion(new StackId("HDP-0.2"));
-    clusters.getCluster("c2").setDesiredStackVersion(new StackId("HDP-0.2"));
-    clusters.addHost("h1");
-    clusters.addHost("h2");
-    clusters.addHost("h3");
-    clusters.addHost("h4");
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h2").setOsType("centos5");
-    clusters.getHost("h3").setOsType("centos5");
-    clusters.getHost("h4").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    clusters.getHost("h2").persist();
-    clusters.getHost("h3").persist();
-    clusters.getHost("h4").persist();
-    clusters.mapHostToCluster("h1", "c1");
-    clusters.mapHostToCluster("h2", "c1");
-    clusters.mapHostToCluster("h3", "c2");
-
-    Map<String, String> attrs = new HashMap<String, String>();
-    attrs.put("a1", "b1");
-    clusters.getHost("h3").setHostAttributes(attrs);
-    attrs.put("a2", "b2");
-    clusters.getHost("h4").setHostAttributes(attrs);
-
-    HostRequest r = new HostRequest(null, null, null);
-
-    Set<HostResponse> resps = controller.getHosts(Collections.singleton(r));
-
-    Assert.assertEquals(4, resps.size());
-
-    Set<String> foundHosts = new HashSet<String>();
-
-    for (HostResponse resp : resps) {
-      foundHosts.add(resp.getHostname());
-      if (resp.getHostname().equals("h1")) {
-        Assert.assertEquals("c1", resp.getClusterName());
-        Assert.assertEquals(0, resp.getHostAttributes().size());
-      } else if (resp.getHostname().equals("h2")) {
-        Assert.assertEquals("c1", resp.getClusterName());
-        Assert.assertEquals(0, resp.getHostAttributes().size());
-      } else if (resp.getHostname().equals("h3")) {
-        Assert.assertEquals("c2", resp.getClusterName());
-        Assert.assertEquals(1, resp.getHostAttributes().size());
-      } else if (resp.getHostname().equals("h4")) {
-        //todo: why wouldn't this be null?
-        Assert.assertEquals("", resp.getClusterName());
-        Assert.assertEquals(2, resp.getHostAttributes().size());
-      } else {
-        fail("Found invalid host");
-      }
-    }
-
-    Assert.assertEquals(4, foundHosts.size());
-
-    r = new HostRequest("h1", null, null);
-    resps = controller.getHosts(Collections.singleton(r));
-    Assert.assertEquals(1, resps.size());
-    HostResponse resp = resps.iterator().next();
-    Assert.assertEquals("h1", resp.getHostname());
-    Assert.assertEquals("c1", resp.getClusterName());
-    Assert.assertEquals(0, resp.getHostAttributes().size());
-
-  }
-
-  @Test
-  public void testServiceUpdateBasic() throws AmbariException {
-    String clusterName = "foo1";
-    createCluster(clusterName);
-    String serviceName = "HDFS";
-    clusters.getCluster("foo1").setDesiredStackVersion(
-        new StackId("HDP-0.2"));
-    createService(clusterName, serviceName, State.INIT);
-
-    Service s =
-        clusters.getCluster(clusterName).getService(serviceName);
-    Assert.assertNotNull(s);
-    Assert.assertEquals(serviceName, s.getName());
-    Assert.assertEquals(State.INIT, s.getDesiredState());
-    Assert.assertEquals(clusterName, s.getCluster().getClusterName());
-
-    Set<ServiceRequest> reqs = new HashSet<ServiceRequest>();
-    ServiceRequest r;
-
-    try {
-      r = new ServiceRequest(clusterName, serviceName,
-          null, State.INSTALLING.toString());
-      reqs.clear();
-      reqs.add(r);
-      controller.updateServices(reqs);
-      fail("Expected fail for invalid state transition");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    r = new ServiceRequest(clusterName, serviceName,
-        null, State.INSTALLED.toString());
-    reqs.clear();
-    reqs.add(r);
-    RequestStatusResponse trackAction = controller.updateServices(reqs);
-    Assert.assertNull(trackAction);
-  }
-
-  @Test
-  public void testServiceUpdateInvalidRequest() throws AmbariException {
-    // multiple clusters
-    // dup services
-    // multiple diff end states
-
-    String clusterName1 = "foo1";
-    createCluster(clusterName1);
-    String clusterName2 = "foo2";
-    createCluster(clusterName2);
-    String serviceName1 = "HDFS";
-    createService(clusterName1, serviceName1, null);
-    String serviceName2 = "HBASE";
-    String serviceName3 = "HBASE";
-    try {
-      createService(clusterName2, serviceName3, null);
-      fail("Expected fail for invalid service for stack 0.1");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    clusters.getCluster(clusterName1).setDesiredStackVersion(
-        new StackId("HDP-0.2"));
-    clusters.getCluster(clusterName2).setDesiredStackVersion(
-        new StackId("HDP-0.2"));
-    createService(clusterName1, serviceName2, null);
-    createService(clusterName2, serviceName3, null);
-
-    Set<ServiceRequest> reqs = new HashSet<ServiceRequest>();
-    ServiceRequest req1, req2;
-    try {
-      reqs.clear();
-      req1 = new ServiceRequest(clusterName1, serviceName1, null,
-          State.INSTALLED.toString());
-      req2 = new ServiceRequest(clusterName2, serviceName2, null,
-          State.INSTALLED.toString());
-      reqs.add(req1);
-      reqs.add(req2);
-      controller.updateServices(reqs);
-      fail("Expected failure for multi cluster update");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    try {
-      reqs.clear();
-      req1 = new ServiceRequest(clusterName1, serviceName1, null,
-          State.INSTALLED.toString());
-      req2 = new ServiceRequest(clusterName1, serviceName1, null,
-          State.INSTALLED.toString());
-      reqs.add(req1);
-      reqs.add(req2);
-      controller.updateServices(reqs);
-      fail("Expected failure for dups services");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    clusters.getCluster(clusterName1).getService(serviceName2)
-        .setDesiredState(State.INSTALLED);
-
-    try {
-      reqs.clear();
-      req1 = new ServiceRequest(clusterName1, serviceName1, null,
-          State.INSTALLED.toString());
-      req2 = new ServiceRequest(clusterName1, serviceName2, null,
-          State.STARTED.toString());
-      reqs.add(req1);
-      reqs.add(req2);
-      controller.updateServices(reqs);
-      fail("Expected failure for different states");
-    } catch (Exception e) {
-      // Expected
-    }
-
-  }
-
-  @Test
-  public void testServiceUpdateInvalidUpdates() {
-    // FIXME test all invalid transitions
-  }
-
-  @Test
-  public void testServiceUpdateRecursive() throws AmbariException {
-    String clusterName = "foo1";
-    createCluster(clusterName);
-    clusters.getCluster(clusterName)
-        .setDesiredStackVersion(new StackId("HDP-0.2"));
-    String serviceName1 = "HDFS";
-    createService(clusterName, serviceName1, null);
-    String serviceName2 = "HBASE";
-    createService(clusterName, serviceName2, null);
-    String componentName1 = "NAMENODE";
-    String componentName2 = "DATANODE";
-    String componentName3 = "HBASE_MASTER";
-    String componentName4 = "HDFS_CLIENT";
-    createServiceComponent(clusterName, serviceName1, componentName1,
-        State.INIT);
-    createServiceComponent(clusterName, serviceName1, componentName2,
-        State.INIT);
-    createServiceComponent(clusterName, serviceName2, componentName3,
-        State.INIT);
-    createServiceComponent(clusterName, serviceName1, componentName4,
-        State.INIT);
-    String host1 = "h1";
-    clusters.addHost(host1);
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    String host2 = "h2";
-    clusters.addHost(host2);
-    clusters.getHost("h2").setOsType("centos5");
-    clusters.getHost("h2").persist();
-    clusters.mapHostToCluster(host1, clusterName);
-    clusters.mapHostToCluster(host2, clusterName);
-
-    Set<ServiceComponentHostRequest> set1 =
-        new HashSet<ServiceComponentHostRequest>();
-    ServiceComponentHostRequest r1 =
-        new ServiceComponentHostRequest(clusterName, serviceName1,
-            componentName1, host1, null, State.INIT.toString());
-    ServiceComponentHostRequest r2 =
-        new ServiceComponentHostRequest(clusterName, serviceName1,
-            componentName2, host1, null, State.INIT.toString());
-    ServiceComponentHostRequest r3 =
-        new ServiceComponentHostRequest(clusterName, serviceName1,
-            componentName1, host2, null, State.INIT.toString());
-    ServiceComponentHostRequest r4 =
-        new ServiceComponentHostRequest(clusterName, serviceName1,
-            componentName2, host2, null, State.INIT.toString());
-    ServiceComponentHostRequest r5 =
-        new ServiceComponentHostRequest(clusterName, serviceName2,
-            componentName3, host1, null, State.INIT.toString());
-    ServiceComponentHostRequest r6 =
-        new ServiceComponentHostRequest(clusterName, serviceName1,
-            componentName4, host2, null, State.INIT.toString());
-
-    set1.add(r1);
-    set1.add(r2);
-    set1.add(r3);
-    set1.add(r4);
-    set1.add(r5);
-    set1.add(r6);
-    controller.createHostComponents(set1);
-
-    Cluster c1 = clusters.getCluster(clusterName);
-    Service s1 = c1.getService(serviceName1);
-    Service s2 = c1.getService(serviceName2);
-    ServiceComponent sc1 = s1.getServiceComponent(componentName1);
-    ServiceComponent sc2 = s1.getServiceComponent(componentName2);
-    ServiceComponent sc3 = s2.getServiceComponent(componentName3);
-    ServiceComponent sc4 = s1.getServiceComponent(componentName4);
-    ServiceComponentHost sch1 = sc1.getServiceComponentHost(host1);
-    ServiceComponentHost sch2 = sc2.getServiceComponentHost(host1);
-    ServiceComponentHost sch3 = sc1.getServiceComponentHost(host2);
-    ServiceComponentHost sch4 = sc2.getServiceComponentHost(host2);
-    ServiceComponentHost sch5 = sc3.getServiceComponentHost(host1);
-    ServiceComponentHost sch6 = sc4.getServiceComponentHost(host2);
-
-    s1.setDesiredState(State.INSTALLED);
-    s2.setDesiredState(State.INSTALLED);
-    sc1.setDesiredState(State.STARTED);
-    sc2.setDesiredState(State.INIT);
-    sc3.setDesiredState(State.STARTED);
-    sc4.setDesiredState(State.INSTALLED);
-    sch1.setDesiredState(State.INSTALLED);
-    sch2.setDesiredState(State.INSTALLED);
-    sch3.setDesiredState(State.INSTALLED);
-    sch4.setDesiredState(State.INSTALLED);
-    sch5.setDesiredState(State.INSTALLED);
-    sch6.setDesiredState(State.INSTALLED);
-    sch1.setState(State.INSTALLED);
-    sch2.setState(State.INSTALLED);
-    sch3.setState(State.INSTALLED);
-    sch4.setState(State.INSTALLED);
-    sch5.setState(State.INSTALLED);
-    sch6.setState(State.INSTALLED);
-
-    Set<ServiceRequest> reqs = new HashSet<ServiceRequest>();
-    ServiceRequest req1, req2;
-    try {
-      reqs.clear();
-      req1 = new ServiceRequest(clusterName, serviceName1, null,
-          State.STARTED.toString());
-      reqs.add(req1);
-      controller.updateServices(reqs);
-      fail("Expected failure for invalid state update");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    s1.setDesiredState(State.INSTALLED);
-    s2.setDesiredState(State.INSTALLED);
-    sc1.setDesiredState(State.STARTED);
-    sc2.setDesiredState(State.INSTALLED);
-    sc3.setDesiredState(State.STARTED);
-    sch1.setDesiredState(State.INSTALLED);
-    sch2.setDesiredState(State.INSTALLED);
-    sch3.setDesiredState(State.INSTALLED);
-    sch4.setDesiredState(State.INSTALLED);
-    sch5.setDesiredState(State.INSTALLED);
-    sch1.setState(State.INIT);
-    sch2.setState(State.INSTALLED);
-    sch3.setState(State.INIT);
-    sch4.setState(State.INSTALLED);
-    sch5.setState(State.INSTALLED);
-
-    try {
-      reqs.clear();
-      req1 = new ServiceRequest(clusterName, serviceName1, null,
-          State.STARTED.toString());
-      reqs.add(req1);
-      controller.updateServices(reqs);
-      fail("Expected failure for invalid state update");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    s1.setDesiredState(State.INSTALLED);
-    s2.setDesiredState(State.INSTALLED);
-    sc1.setDesiredState(State.STARTED);
-    sc2.setDesiredState(State.INSTALLED);
-    sc3.setDesiredState(State.STARTED);
-    sch1.setDesiredState(State.STARTED);
-    sch2.setDesiredState(State.STARTED);
-    sch3.setDesiredState(State.STARTED);
-    sch4.setDesiredState(State.STARTED);
-    sch5.setDesiredState(State.STARTED);
-    sch1.setState(State.INSTALLED);
-    sch2.setState(State.START_FAILED);
-    sch3.setState(State.INSTALLED);
-    sch4.setState(State.STARTED);
-    sch5.setState(State.INSTALLED);
-
-    reqs.clear();
-    req1 = new ServiceRequest(clusterName, serviceName1, null,
-        State.STARTED.toString());
-    req2 = new ServiceRequest(clusterName, serviceName2, null,
-        State.STARTED.toString());
-    reqs.add(req1);
-    reqs.add(req2);
-    RequestStatusResponse trackAction = controller.updateServices(reqs);
-
-    Assert.assertEquals(State.STARTED, s1.getDesiredState());
-    Assert.assertEquals(State.STARTED, s2.getDesiredState());
-    Assert.assertEquals(State.STARTED, sc1.getDesiredState());
-    Assert.assertEquals(State.STARTED, sc2.getDesiredState());
-    Assert.assertEquals(State.STARTED, sc3.getDesiredState());
-    Assert.assertEquals(State.INSTALLED, sc4.getDesiredState());
-    Assert.assertEquals(State.STARTED, sch1.getDesiredState());
-    Assert.assertEquals(State.STARTED, sch2.getDesiredState());
-    Assert.assertEquals(State.STARTED, sch3.getDesiredState());
-    Assert.assertEquals(State.STARTED, sch4.getDesiredState());
-    Assert.assertEquals(State.STARTED, sch5.getDesiredState());
-    Assert.assertEquals(State.INSTALLED, sch6.getDesiredState());
-    Assert.assertEquals(State.INSTALLED, sch1.getState());
-    Assert.assertEquals(State.START_FAILED, sch2.getState());
-    Assert.assertEquals(State.INSTALLED, sch3.getState());
-    Assert.assertEquals(State.STARTED, sch4.getState());
-    Assert.assertEquals(State.INSTALLED, sch5.getState());
-    Assert.assertEquals(State.INSTALLED, sch6.getState());
-
-    long requestId = trackAction.getRequestId();
-    List<Stage> stages = actionDB.getAllStages(requestId);
-
-    for (Stage stage : stages) {
-      LOG.debug("Stage dump: " + stage.toString());
-    }
-
-    Assert.assertTrue(!stages.isEmpty());
-    Assert.assertEquals(3, stages.size());
-
-    // expected
-    // sch1 to start
-    // sch2 to start
-    // sch3 to start
-    // sch5 to start
-    Stage stage1 = null, stage2 = null, stage3 = null;
-    for (Stage s : stages) {
-      if (s.getStageId() == 1) { stage1 = s; }
-      if (s.getStageId() == 2) { stage2 = s; }
-      if (s.getStageId() == 3) { stage3 = s; }
-    }
-
-    Assert.assertEquals(2, stage1.getExecutionCommands(host1).size());
-    Assert.assertEquals(1, stage1.getExecutionCommands(host2).size());
-    Assert.assertEquals(1, stage2.getExecutionCommands(host1).size());
-
-    Assert.assertNotNull(stage1.getExecutionCommandWrapper(host1, "NAMENODE"));
-    Assert.assertNotNull(stage1.getExecutionCommandWrapper(host1, "DATANODE"));
-    Assert.assertNotNull(stage1.getExecutionCommandWrapper(host2, "NAMENODE"));
-    Assert.assertNotNull(stage2.getExecutionCommandWrapper(host1, "HBASE_MASTER"));
-    Assert.assertNull(stage1.getExecutionCommandWrapper(host2, "DATANODE"));
-    Assert.assertNotNull(stage3.getExecutionCommandWrapper(host1, "HBASE_SERVICE_CHECK"));
-    Assert.assertNotNull(stage2.getExecutionCommandWrapper(host2, "HDFS_SERVICE_CHECK"));
-
-    // manually set live state
-    sch1.setState(State.STARTED);
-    sch2.setState(State.STARTED);
-    sch3.setState(State.STARTED);
-    sch4.setState(State.STARTED);
-    sch5.setState(State.STARTED);
-
-    // test no-op
-    reqs.clear();
-    req1 = new ServiceRequest(clusterName, serviceName1, null,
-        State.STARTED.toString());
-    req2 = new ServiceRequest(clusterName, serviceName2, null,
-        State.STARTED.toString());
-    reqs.add(req1);
-    reqs.add(req2);
-    trackAction = controller.updateServices(reqs);
-    Assert.assertNull(trackAction);
-
-  }
-
-  @Test
-  public void testServiceComponentUpdateRecursive() throws AmbariException {
-    String clusterName = "foo1";
-    createCluster(clusterName);
-    String serviceName1 = "HDFS";
-    createService(clusterName, serviceName1, null);
-    String componentName1 = "NAMENODE";
-    String componentName2 = "DATANODE";
-    String componentName3 = "HDFS_CLIENT";
-    createServiceComponent(clusterName, serviceName1, componentName1,
-        State.INIT);
-    createServiceComponent(clusterName, serviceName1, componentName2,
-        State.INIT);
-    createServiceComponent(clusterName, serviceName1, componentName3,
-        State.INIT);
-    String host1 = "h1";
-    clusters.addHost(host1);
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    String host2 = "h2";
-    clusters.addHost(host2);
-    clusters.getHost("h2").setOsType("centos5");
-    clusters.getHost("h2").persist();
-    clusters.mapHostToCluster(host1, clusterName);
-    clusters.mapHostToCluster(host2, clusterName);
-
-    Set<ServiceComponentHostRequest> set1 =
-        new HashSet<ServiceComponentHostRequest>();
-    ServiceComponentHostRequest r1 =
-        new ServiceComponentHostRequest(clusterName, serviceName1,
-            componentName1, host1, null, State.INIT.toString());
-    ServiceComponentHostRequest r2 =
-        new ServiceComponentHostRequest(clusterName, serviceName1,
-            componentName2, host1, null, State.INIT.toString());
-    ServiceComponentHostRequest r3 =
-        new ServiceComponentHostRequest(clusterName, serviceName1,
-            componentName1, host2, null, State.INIT.toString());
-    ServiceComponentHostRequest r4 =
-        new ServiceComponentHostRequest(clusterName, serviceName1,
-            componentName2, host2, null, State.INIT.toString());
-    ServiceComponentHostRequest r5 =
-        new ServiceComponentHostRequest(clusterName, serviceName1,
-            componentName3, host1, null, State.INIT.toString());
-
-    set1.add(r1);
-    set1.add(r2);
-    set1.add(r3);
-    set1.add(r4);
-    set1.add(r5);
-    controller.createHostComponents(set1);
-
-    Cluster c1 = clusters.getCluster(clusterName);
-    Service s1 = c1.getService(serviceName1);
-    ServiceComponent sc1 = s1.getServiceComponent(componentName1);
-    ServiceComponent sc2 = s1.getServiceComponent(componentName2);
-    ServiceComponent sc3 = s1.getServiceComponent(componentName3);
-    ServiceComponentHost sch1 = sc1.getServiceComponentHost(host1);
-    ServiceComponentHost sch2 = sc2.getServiceComponentHost(host1);
-    ServiceComponentHost sch3 = sc1.getServiceComponentHost(host2);
-    ServiceComponentHost sch4 = sc2.getServiceComponentHost(host2);
-    ServiceComponentHost sch5 = sc3.getServiceComponentHost(host1);
-
-    s1.setDesiredState(State.INSTALLED);
-    sc1.setDesiredState(State.INIT);
-    sc2.setDesiredState(State.INIT);
-    sc3.setDesiredState(State.STARTED);
-    sch1.setDesiredState(State.INSTALLED);
-    sch2.setDesiredState(State.INSTALLED);
-    sch3.setDesiredState(State.STARTED);
-    sch4.setDesiredState(State.INSTALLED);
-    sch5.setDesiredState(State.INSTALLED);
-    sch1.setState(State.INSTALLED);
-    sch2.setState(State.INSTALLED);
-    sch3.setState(State.STARTED);
-    sch4.setState(State.INSTALLED);
-    sch5.setState(State.INSTALLED);
-
-    Set<ServiceComponentRequest> reqs =
-        new HashSet<ServiceComponentRequest>();
-    ServiceComponentRequest req1, req2, req3;
-    try {
-      reqs.clear();
-      req1 = new ServiceComponentRequest(clusterName, serviceName1,
-          sc1.getName(), null, State.INIT.toString());
-      reqs.add(req1);
-      controller.updateComponents(reqs);
-      fail("Expected failure for invalid state update");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    s1.setDesiredState(State.INSTALLED);
-    sc1.setDesiredState(State.STARTED);
-    sc2.setDesiredState(State.INSTALLED);
-    sc3.setDesiredState(State.STARTED);
-    sch1.setDesiredState(State.INIT);
-    sch2.setDesiredState(State.INIT);
-    sch3.setDesiredState(State.INIT);
-    sch4.setDesiredState(State.INIT);
-    sch5.setDesiredState(State.INIT);
-    sch1.setState(State.INIT);
-    sch2.setState(State.INSTALLED);
-    sch3.setState(State.INIT);
-    sch4.setState(State.INSTALLED);
-    sch5.setState(State.INSTALLED);
-
-    try {
-      reqs.clear();
-      req1 = new ServiceComponentRequest(clusterName, serviceName1,
-          sc1.getName(), null, State.STARTED.toString());
-      reqs.add(req1);
-      controller.updateComponents(reqs);
-      fail("Expected failure for invalid state update");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    s1.setDesiredState(State.INSTALLED);
-    sc1.setDesiredState(State.STARTED);
-    sc2.setDesiredState(State.INIT);
-    sc3.setDesiredState(State.STARTED);
-    sch1.setDesiredState(State.INIT);
-    sch2.setDesiredState(State.INIT);
-    sch3.setDesiredState(State.INIT);
-    sch4.setDesiredState(State.INIT);
-    sch5.setDesiredState(State.INIT);
-    sch1.setState(State.STARTED);
-    sch2.setState(State.INIT);
-    sch3.setState(State.INSTALLED);
-    sch4.setState(State.STOP_FAILED);
-    sch5.setState(State.INIT);
-
-    reqs.clear();
-    req1 = new ServiceComponentRequest(clusterName, serviceName1,
-        sc1.getName(), null, State.INSTALLED.toString());
-    req2 = new ServiceComponentRequest(clusterName, serviceName1,
-        sc2.getName(), null, State.INSTALLED.toString());
-    req3 = new ServiceComponentRequest(clusterName, serviceName1,
-        sc3.getName(), null, State.INSTALLED.toString());
-    reqs.add(req1);
-    reqs.add(req2);
-    reqs.add(req3);
-    RequestStatusResponse trackAction = controller.updateComponents(reqs);
-
-    Assert.assertEquals(State.INSTALLED, s1.getDesiredState());
-    Assert.assertEquals(State.INSTALLED, sc1.getDesiredState());
-    Assert.assertEquals(State.INSTALLED, sc2.getDesiredState());
-    Assert.assertEquals(State.INSTALLED, sc3.getDesiredState());
-    Assert.assertEquals(State.INSTALLED, sch1.getDesiredState());
-    Assert.assertEquals(State.INSTALLED, sch2.getDesiredState());
-    Assert.assertEquals(State.INSTALLED, sch3.getDesiredState());
-    Assert.assertEquals(State.INSTALLED, sch4.getDesiredState());
-    Assert.assertEquals(State.INSTALLED, sch5.getDesiredState());
-    Assert.assertEquals(State.STARTED, sch1.getState());
-    Assert.assertEquals(State.INIT, sch2.getState());
-    Assert.assertEquals(State.INSTALLED, sch3.getState());
-    Assert.assertEquals(State.STOP_FAILED, sch4.getState());
-    Assert.assertEquals(State.INIT, sch5.getState());
-
-    long requestId = trackAction.getRequestId();
-    List<Stage> stages = actionDB.getAllStages(requestId);
-    Assert.assertTrue(!stages.isEmpty());
-
-    // FIXME check stage count
-
-    for (Stage stage : stages) {
-      LOG.debug("Stage dump: " + stage.toString());
-    }
-
-    // FIXME verify stages content - execution commands, etc
-
-    // maually set live state
-    sch1.setState(State.INSTALLED);
-    sch2.setState(State.INSTALLED);
-    sch3.setState(State.INSTALLED);
-    sch4.setState(State.INSTALLED);
-    sch5.setState(State.INSTALLED);
-
-    // test no-op
-    reqs.clear();
-    req1 = new ServiceComponentRequest(clusterName, serviceName1,
-        sc1.getName(), null, State.INSTALLED.toString());
-    req2 = new ServiceComponentRequest(clusterName, serviceName1,
-        sc2.getName(), null, State.INSTALLED.toString());
-    reqs.add(req1);
-    reqs.add(req2);
-    trackAction = controller.updateComponents(reqs);
-    Assert.assertNull(trackAction);
-  }
-
-  @Test
-  public void testServiceComponentHostUpdateRecursive() throws AmbariException {
-    String clusterName = "foo1";
-    createCluster(clusterName);
-    String serviceName1 = "HDFS";
-    createService(clusterName, serviceName1, null);
-    String componentName1 = "NAMENODE";
-    String componentName2 = "DATANODE";
-    String componentName3 = "HDFS_CLIENT";
-    createServiceComponent(clusterName, serviceName1, componentName1,
-        State.INIT);
-    createServiceComponent(clusterName, serviceName1, componentName2,
-        State.INIT);
-    createServiceComponent(clusterName, serviceName1, componentName3,
-        State.INIT);
-    String host1 = "h1";
-    clusters.addHost(host1);
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    String host2 = "h2";
-    clusters.addHost(host2);
-    clusters.getHost("h2").setOsType("centos5");
-    clusters.getHost("h2").persist();
-    clusters.mapHostToCluster(host1, clusterName);
-    clusters.mapHostToCluster(host2, clusterName);
-
-    Set<ServiceComponentHostRequest> set1 =
-        new HashSet<ServiceComponentHostRequest>();
-    ServiceComponentHostRequest r1 =
-        new ServiceComponentHostRequest(clusterName, serviceName1,
-            componentName1, host1, null, State.INIT.toString());
-    ServiceComponentHostRequest r2 =
-        new ServiceComponentHostRequest(clusterName, serviceName1,
-            componentName2, host1, null, State.INIT.toString());
-    ServiceComponentHostRequest r3 =
-        new ServiceComponentHostRequest(clusterName, serviceName1,
-            componentName1, host2, null, State.INIT.toString());
-    ServiceComponentHostRequest r4 =
-        new ServiceComponentHostRequest(clusterName, serviceName1,
-            componentName2, host2, null, State.INIT.toString());
-    ServiceComponentHostRequest r5 =
-        new ServiceComponentHostRequest(clusterName, serviceName1,
-            componentName3, host1, null, State.INIT.toString());
-
-    set1.add(r1);
-    set1.add(r2);
-    set1.add(r3);
-    set1.add(r4);
-    set1.add(r5);
-    controller.createHostComponents(set1);
-
-    Cluster c1 = clusters.getCluster(clusterName);
-    Service s1 = c1.getService(serviceName1);
-    ServiceComponent sc1 = s1.getServiceComponent(componentName1);
-    ServiceComponent sc2 = s1.getServiceComponent(componentName2);
-    ServiceComponent sc3 = s1.getServiceComponent(componentName3);
-    ServiceComponentHost sch1 = sc1.getServiceComponentHost(host1);
-    ServiceComponentHost sch2 = sc2.getServiceComponentHost(host1);
-    ServiceComponentHost sch3 = sc1.getServiceComponentHost(host2);
-    ServiceComponentHost sch4 = sc2.getServiceComponentHost(host2);
-    ServiceComponentHost sch5 = sc3.getServiceComponentHost(host1);
-
-    s1.setDesiredState(State.INIT);
-    sc1.setDesiredState(State.INIT);
-    sc2.setDesiredState(State.INIT);
-    sc3.setDesiredState(State.INIT);
-    sch1.setDesiredState(State.INIT);
-    sch2.setDesiredState(State.INIT);
-    sch3.setDesiredState(State.INIT);
-    sch4.setDesiredState(State.INSTALLED);
-    sch5.setDesiredState(State.INSTALLED);
-    sch1.setState(State.INIT);
-    sch2.setState(State.INSTALL_FAILED);
-    sch3.setState(State.INIT);
-    sch4.setState(State.INSTALLED);
-    sch5.setState(State.INSTALLED);
-
-    ServiceComponentHostRequest req1, req2, req3, req4, req5;
-    Set<ServiceComponentHostRequest> reqs =
-        new HashSet<ServiceComponentHostRequest>();
-
-    try {
-      reqs.clear();
-      req1 = new ServiceComponentHostRequest(clusterName, serviceName1,
-          componentName1, host1,
-          null, State.STARTED.toString());
-      reqs.add(req1);
-      controller.updateHostComponents(reqs);
-      fail("Expected failure for invalid transition");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    try {
-      reqs.clear();
-      req1 = new ServiceComponentHostRequest(clusterName, serviceName1,
-          componentName1, host1,
-          null, State.INSTALLED.toString());
-      req2 = new ServiceComponentHostRequest(clusterName, serviceName1,
-          componentName1, host2,
-          null, State.INSTALLED.toString());
-      req3 = new ServiceComponentHostRequest(clusterName, serviceName1,
-          componentName2, host1,
-          null, State.INSTALLED.toString());
-      req4 = new ServiceComponentHostRequest(clusterName, serviceName1,
-          componentName2, host2,
-          null, State.INSTALLED.toString());
-      req5 = new ServiceComponentHostRequest(clusterName, serviceName1,
-          componentName3, host1,
-          null, State.STARTED.toString());
-      reqs.add(req1);
-      reqs.add(req2);
-      reqs.add(req3);
-      reqs.add(req4);
-      reqs.add(req5);
-      controller.updateHostComponents(reqs);
-      fail("Expected failure for invalid states");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    reqs.clear();
-    req1 = new ServiceComponentHostRequest(clusterName, null,
-        componentName1, host1,
-        null, State.INSTALLED.toString());
-    req2 = new ServiceComponentHostRequest(clusterName, serviceName1,
-        componentName1, host2,
-        null, State.INSTALLED.toString());
-    req3 = new ServiceComponentHostRequest(clusterName, null,
-        componentName2, host1,
-        null, State.INSTALLED.toString());
-    req4 = new ServiceComponentHostRequest(clusterName, serviceName1,
-        componentName2, host2,
-        null, State.INSTALLED.toString());
-    req5 = new ServiceComponentHostRequest(clusterName, serviceName1,
-        componentName3, host1,
-        null, State.INSTALLED.toString());
-    reqs.add(req1);
-    reqs.add(req2);
-    reqs.add(req3);
-    reqs.add(req4);
-    reqs.add(req5);
-    RequestStatusResponse trackAction = controller.updateHostComponents(reqs);
-    Assert.assertNotNull(trackAction);
-
-    long requestId = trackAction.getRequestId();
-
-    Assert.assertFalse(actionDB.getAllStages(requestId).isEmpty());
-    List<Stage> stages = actionDB.getAllStages(requestId);
-    // FIXME check stage count
-
-    for (Stage stage : stages) {
-      LOG.debug("Stage dump: " + stage.toString());
-    }
-
-    // FIXME verify stages content - execution commands, etc
-
-    // manually set live state
-    sch1.setState(State.INSTALLED);
-    sch2.setState(State.INSTALLED);
-    sch3.setState(State.INSTALLED);
-    sch4.setState(State.INSTALLED);
-    sch5.setState(State.INSTALLED);
-
-    // test no-op
-    reqs.clear();
-    req1 = new ServiceComponentHostRequest(clusterName, serviceName1,
-        componentName1, host1,
-        null, State.INSTALLED.toString());
-    req2 = new ServiceComponentHostRequest(clusterName, serviceName1,
-        componentName1, host2,
-        null, State.INSTALLED.toString());
-    reqs.add(req1);
-    reqs.add(req2);
-    trackAction = controller.updateHostComponents(reqs);
-    Assert.assertNull(trackAction);
-  }
-
-  @Test
-  public void testStartClientComponent() {
-    // FIXME write test after meta data integration
-    // start should fail
-  }
-
-  @Test
-  public void testStartClientHostComponent() {
-    // FIXME write test after meta data integration
-    // start should fail
-  }
-
-  @SuppressWarnings("serial")
-  @Test
-  public void testGetRequestAndTaskStatus() throws AmbariException {
-    long requestId = 3;
-    long stageId = 4;
-    String clusterName = "c1";
-    final String hostName1 = "h1";
-    final String hostName2 = "h2";
-    clusters.addCluster(clusterName);
-    clusters.getCluster(clusterName).setDesiredStackVersion(
-        new StackId("HDP-0.1"));
-    clusters.addHost(hostName1);
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost(hostName1).persist();
-    clusters.addHost(hostName2);
-    clusters.getHost("h2").setOsType("centos5");
-    clusters.getHost(hostName2).persist();
-    clusters.mapHostsToCluster(new HashSet<String>(){
-      {add(hostName1); add(hostName2);}}, clusterName);
-
-
-    List<Stage> stages = new ArrayList<Stage>();
-    stages.add(new Stage(requestId, "/a1", clusterName));
-    stages.get(0).setStageId(stageId++);
-    stages.get(0).addHostRoleExecutionCommand(hostName1, Role.HBASE_MASTER,
-        RoleCommand.START,
-        new ServiceComponentHostStartEvent(Role.HBASE_MASTER.toString(),
-            hostName1, System.currentTimeMillis(),
-            new HashMap<String, String>()),
-            clusterName, "HBASE");
-    stages.add(new Stage(requestId, "/a2", clusterName));
-    stages.get(1).setStageId(stageId);
-    stages.get(1).addHostRoleExecutionCommand(hostName1, Role.HBASE_CLIENT,
-        RoleCommand.START,
-        new ServiceComponentHostStartEvent(Role.HBASE_CLIENT.toString(),
-            hostName1, System.currentTimeMillis(),
-            new HashMap<String, String>()), clusterName, "HBASE");
-
-    actionDB.persistActions(stages);
-
-    Set<RequestStatusResponse> requestStatusResponses =
-        controller.getRequestStatus(new RequestStatusRequest(requestId, null));
-
-    RequestStatusResponse requestStatusResponse =
-        requestStatusResponses.iterator().next();
-    assertEquals(requestId, requestStatusResponse.getRequestId());
-    assertEquals(2, requestStatusResponse.getTasks().size());
-
-    ShortTaskStatus task1 = requestStatusResponse.getTasks().get(0);
-    ShortTaskStatus task2 = requestStatusResponse.getTasks().get(1);
-
-    assertEquals(RoleCommand.START.toString(), task1.getCommand());
-    assertEquals(Role.HBASE_MASTER.toString(), task1.getRole());
-
-    Set<TaskStatusRequest> taskStatusRequests = new HashSet<TaskStatusRequest>();
-    taskStatusRequests.add(new TaskStatusRequest(requestId, task1.getTaskId()));
-    taskStatusRequests.add(new TaskStatusRequest(requestId, task2.getTaskId()));
-    Set<TaskStatusResponse> taskStatusResponses =
-        controller.getTaskStatus(taskStatusRequests);
-
-    assertEquals(2, taskStatusResponses.size());
-  }
-
-
-  @SuppressWarnings("serial")
-  @Test
-  public void testGetActions() throws Exception {
-    Set<ActionResponse> responses = controller.getActions(
-        new HashSet<ActionRequest>() {{
-          add(new ActionRequest(null, "HDFS", null, null));
-    }});
-
-    assertFalse(responses.isEmpty());
-    assertEquals(1, responses.size());
-    ActionResponse response = responses.iterator().next();
-    assertEquals(Role.HDFS_SERVICE_CHECK.name(), response.getActionName());
-  }
-
-  @SuppressWarnings("serial")
-  @Test
-  public void testCreateActions() throws Exception {
-    clusters.addCluster("c1");
-    clusters.getCluster("c1").setDesiredStackVersion(new StackId("HDP-0.1"));
-    clusters.addHost("h1");
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    clusters.addHost("h2");
-    clusters.getHost("h2").setOsType("centos5");
-    clusters.getHost("h2").persist();
-    Set<String> hostNames = new HashSet<String>(){{
-      add("h1");
-      add("h2");
-    }};
-
-    clusters.mapHostsToCluster(hostNames, "c1");
-
-    Cluster cluster = clusters.getCluster("c1");
-    cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
-    Service hdfs = cluster.addService("HDFS");
-    Service mapReduce = cluster.addService("MAPREDUCE");
-    hdfs.persist();
-    mapReduce.persist();
-
-    hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
-    mapReduce.addServiceComponent(Role.MAPREDUCE_CLIENT.name()).persist();
-
-    hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost("h1").persist();
-    mapReduce.getServiceComponent(Role.MAPREDUCE_CLIENT.name()).addServiceComponentHost("h2").persist();
-
-    Set<ActionRequest> actionRequests = new HashSet<ActionRequest>();
-    Map<String, String> params = new HashMap<String, String>(){{
-      put("test", "test");
-    }};
-    ActionRequest actionRequest = new ActionRequest("c1", "HDFS", Role.HDFS_SERVICE_CHECK.name(), params);
-    actionRequests.add(actionRequest);
-
-    RequestStatusResponse response = controller.createActions(actionRequests);
-    assertEquals(1, response.getTasks().size());
-    ShortTaskStatus task = response.getTasks().get(0);
-
-    List<HostRoleCommand> storedTasks = actionDB.getRequestTasks(response.getRequestId());
-    assertEquals(1, storedTasks.size());
-    HostRoleCommand hostRoleCommand = storedTasks.get(0);
-
-    assertEquals(task.getTaskId(), hostRoleCommand.getTaskId());
-    assertEquals(actionRequest.getServiceName(), hostRoleCommand.getExecutionCommandWrapper().getExecutionCommand().getServiceName());
-    assertEquals(actionRequest.getClusterName(), hostRoleCommand.getExecutionCommandWrapper().getExecutionCommand().getClusterName());
-    assertEquals(actionRequest.getActionName(), hostRoleCommand.getExecutionCommandWrapper().getExecutionCommand().getRole().name());
-    assertEquals(Role.HDFS_CLIENT.name(), hostRoleCommand.getEvent().getEvent().getServiceComponentName());
-    assertEquals(actionRequest.getParameters(), hostRoleCommand.getExecutionCommandWrapper().getExecutionCommand().getRoleParams());
-
-    actionRequests.add(new ActionRequest("c1", "MAPREDUCE", Role.MAPREDUCE_SERVICE_CHECK.name(), null));
-
-    response = controller.createActions(actionRequests);
-
-    assertEquals(2, response.getTasks().size());
-
-    List<HostRoleCommand> tasks = actionDB.getRequestTasks(response.getRequestId());
-
-    assertEquals(2, tasks.size());
-  }
-
-  private void createUser(String userName) throws Exception {
-    UserRequest request = new UserRequest(userName);
-    request.setPassword("password");
-
-    controller.createUsers(new HashSet<UserRequest>(Collections.singleton(request)));
-  }
-
-  @Test
-  public void testCreateAndGetUsers() throws Exception {
-    createUser("user1");
-
-    Set<UserResponse> r =
-        controller.getUsers(Collections.singleton(new UserRequest("user1")));
-
-    Assert.assertEquals(1, r.size());
-    UserResponse resp = r.iterator().next();
-    Assert.assertEquals("user1", resp.getUsername());
-  }
-
-  @Test
-  public void testGetUsers() throws Exception {
-    createUser("user1");
-    createUser("user2");
-    createUser("user3");
-
-    UserRequest request = new UserRequest(null);
-
-    Set<UserResponse> responses = controller.getUsers(Collections.singleton(request));
-
-    Assert.assertEquals(3, responses.size());
-  }
-
-  @SuppressWarnings("serial")
-  @Test
-  public void testUpdateUsers() throws Exception {
-    createUser("user1");
-    users.createDefaultRoles();
-
-    UserRequest request = new UserRequest("user1");
-    request.setRoles(new HashSet<String>(){{
-      add("user");
-      add("admin");
-    }});
-
-    controller.updateUsers(Collections.singleton(request));
-  }
-
-  @SuppressWarnings("serial")
-  @Test
-  public void testDeleteUsers() throws Exception {
-    createUser("user1");
-
-    users.createDefaultRoles();
-
-    UserRequest request = new UserRequest("user1");
-    request.setRoles(new HashSet<String>(){{
-      add("user");
-      add("admin");
-    }});
-    controller.updateUsers(Collections.singleton(request));
-
-    request = new UserRequest("user1");
-    controller.deleteUsers(Collections.singleton(request));
-
-    Set<UserResponse> responses = controller.getUsers(
-        Collections.singleton(new UserRequest(null)));
-
-    Assert.assertEquals(0, responses.size());
-
-    RoleDAO roleDao = injector.getInstance(RoleDAO.class);
-    RoleEntity re1 = roleDao.findByName("user");
-    RoleEntity re2 = roleDao.findByName("admin");
-    Assert.assertNotNull(re1);
-    Assert.assertNotNull(re2);
-  }
-
-  @Test
-  public void testRcaOnJobtrackerHost() throws AmbariException {
-    String clusterName = "foo1";
-    createCluster(clusterName);
-    clusters.getCluster(clusterName)
-        .setDesiredStackVersion(new StackId("HDP-0.1"));
-    String serviceName = "MAPREDUCE";
-    createService(clusterName, serviceName, null);
-    String componentName1 = "JOBTRACKER";
-    String componentName2 = "TASKTRACKER";
-    String componentName3 = "MAPREDUCE_CLIENT";
-    createServiceComponent(clusterName, serviceName, componentName1,
-        State.INIT);
-    createServiceComponent(clusterName, serviceName, componentName2,
-        State.INIT);
-    createServiceComponent(clusterName, serviceName, componentName3,
-        State.INIT);
-
-    String host1 = "h1";
-    clusters.addHost(host1);
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    String host2 = "h2";
-    clusters.addHost(host2);
-    clusters.getHost("h2").setOsType("centos6");
-    clusters.getHost("h2").persist();
-
-    clusters.mapHostToCluster(host1, clusterName);
-    clusters.mapHostToCluster(host2, clusterName);
-
-
-    createServiceComponentHost(clusterName, serviceName, componentName1,
-        host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName2,
-        host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName2,
-        host2, null);
-    createServiceComponentHost(clusterName, serviceName, componentName3,
-        host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName3,
-        host2, null); 
-    
-    Map<String, String> configs = new HashMap<String, String>();
-    configs.put("a", "b");
-    configs.put("rca_enabled", "true");
-    
-    ConfigurationRequest cr1 = new ConfigurationRequest(clusterName, "global",
-        "v1", configs);
-    controller.createConfiguration(cr1);
-    
-    Set<ServiceRequest> sReqs = new HashSet<ServiceRequest>();
-    Map<String, String> configVersions = new HashMap<String, String>();
-    configVersions.put("global", "v1");
-    sReqs.clear();
-    sReqs.add(new ServiceRequest(clusterName, serviceName, configVersions,
-        "INSTALLED"));
-    RequestStatusResponse trackAction = controller.updateServices(sReqs);
-    List<Stage> stages = actionDB.getAllStages(trackAction.getRequestId());
-    for (ExecutionCommandWrapper cmd : stages.get(0)
-        .getExecutionCommands(host1)) {
-      assertEquals(
-          "true",
-          cmd.getExecutionCommand().getConfigurations().get("global")
-              .get("rca_enabled"));
-    }
-    for (ExecutionCommandWrapper cmd : stages.get(0)
-        .getExecutionCommands(host2)) {
-      assertEquals(
-          "false",
-          cmd.getExecutionCommand().getConfigurations().get("global")
-              .get("rca_enabled"));
-    }
-  }
-
-  @Test
-  public void testUpdateConfigForRunningService() throws Exception {
-      String clusterName = "foo1";
-      createCluster(clusterName);
-      clusters.getCluster(clusterName)
-              .setDesiredStackVersion(new StackId("HDP-0.1"));
-      String serviceName = "HDFS";
-      createService(clusterName, serviceName, null);
-      String componentName1 = "NAMENODE";
-      String componentName2 = "DATANODE";
-      String componentName3 = "HDFS_CLIENT";
-      createServiceComponent(clusterName, serviceName, componentName1,
-              State.INIT);
-      createServiceComponent(clusterName, serviceName, componentName2,
-              State.INIT);
-      createServiceComponent(clusterName, serviceName, componentName3,
-              State.INIT);
-
-      String host1 = "h1";
-      clusters.addHost(host1);
-      clusters.getHost("h1").setOsType("centos5");
-      clusters.getHost("h1").persist();
-      String host2 = "h2";
-      clusters.addHost(host2);
-      clusters.getHost("h2").setOsType("centos6");
-      clusters.getHost("h2").persist();
-
-      clusters.mapHostToCluster(host1, clusterName);
-      clusters.mapHostToCluster(host2, clusterName);
-
-
-      // null service should work
-      createServiceComponentHost(clusterName, null, componentName1,
-              host1, null);
-      createServiceComponentHost(clusterName, serviceName, componentName2,
-              host1, null);
-      createServiceComponentHost(clusterName, serviceName, componentName2,
-              host2, null);
-      createServiceComponentHost(clusterName, serviceName, componentName3,
-              host1, null);
-      createServiceComponentHost(clusterName, serviceName, componentName3,
-              host2, null);
-
-      Assert.assertNotNull(clusters.getCluster(clusterName)
-              .getService(serviceName)
-              .getServiceComponent(componentName1)
-              .getServiceComponentHost(host1));
-      Assert.assertNotNull(clusters.getCluster(clusterName)
-              .getService(serviceName)
-              .getServiceComponent(componentName2)
-              .getServiceComponentHost(host1));
-      Assert.assertNotNull(clusters.getCluster(clusterName)
-              .getService(serviceName)
-              .getServiceComponent(componentName2)
-              .getServiceComponentHost(host2));
-      Assert.assertNotNull(clusters.getCluster(clusterName)
-              .getService(serviceName)
-              .getServiceComponent(componentName3)
-              .getServiceComponentHost(host1));
-      Assert.assertNotNull(clusters.getCluster(clusterName)
-              .getService(serviceName)
-              .getServiceComponent(componentName3)
-              .getServiceComponentHost(host2));
-
-      // Install
-      ServiceRequest r = new ServiceRequest(clusterName, serviceName, null,
-              State.INSTALLED.toString());
-      Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
-      requests.add(r);
-
-      RequestStatusResponse trackAction =
-              controller.updateServices(requests);
-      Assert.assertEquals(State.INSTALLED,
-              clusters.getCluster(clusterName).getService(serviceName)
-                      .getDesiredState());
-
-      // manually change live state to installed as no running action manager
-      for (ServiceComponent sc :
-              clusters.getCluster(clusterName).getService(serviceName)
-                      .getServiceComponents().values()) {
-          for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-              sch.setState(State.INSTALLED);
-          }
-      }
-
-      // Start
-      r = new ServiceRequest(clusterName, serviceName, null,
-              State.STARTED.toString());
-      requests.clear();
-      requests.add(r);
-      trackAction = controller.updateServices(requests);
-
-      // manually change live state to started as no running action manager
-      for (ServiceComponent sc :
-              clusters.getCluster(clusterName).getService(serviceName)
-                      .getServiceComponents().values()) {
-          for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-              sch.setState(State.STARTED);
-          }
-      }
-
-      Assert.assertEquals(State.STARTED,
-              clusters.getCluster(clusterName).getService(serviceName)
-                      .getDesiredState());
-      for (ServiceComponent sc :
-              clusters.getCluster(clusterName).getService(serviceName)
-                      .getServiceComponents().values()) {
-          if (sc.getName().equals("HDFS_CLIENT")) {
-              Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
-          } else {
-              Assert.assertEquals(State.STARTED, sc.getDesiredState());
-          }
-          for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-              if (sch.getServiceComponentName().equals("HDFS_CLIENT")) {
-                  Assert.assertEquals(State.INSTALLED, sch.getDesiredState());
-              } else {
-                  Assert.assertEquals(State.STARTED, sch.getDesiredState());
-              }
-          }
-      }
-
-      Map<String, String> configs = new HashMap<String, String>();
-      configs.put("a", "b");
-
-      ConfigurationRequest cr1, cr2, cr3, cr4, cr5, cr6, cr7, cr8;
-      cr1 = new ConfigurationRequest(clusterName, "typeA","v1", configs);
-      cr2 = new ConfigurationRequest(clusterName, "typeB","v1", configs);
-      cr3 = new ConfigurationRequest(clusterName, "typeC","v1", configs);
-      cr4 = new ConfigurationRequest(clusterName, "typeD","v1", configs);
-      cr5 = new ConfigurationRequest(clusterName, "typeA","v2", configs);
-      cr6 = new ConfigurationRequest(clusterName, "typeB","v2", configs);
-      cr7 = new ConfigurationRequest(clusterName, "typeC","v2", configs);
-      cr8 = new ConfigurationRequest(clusterName, "typeE","v1", configs);
-      controller.createConfiguration(cr1);
-      controller.createConfiguration(cr2);
-      controller.createConfiguration(cr3);
-      controller.createConfiguration(cr4);
-      controller.createConfiguration(cr5);
-      controller.createConfiguration(cr6);
-      controller.createConfiguration(cr7);
-      controller.createConfiguration(cr8);
-
-      Cluster cluster = clusters.getCluster(clusterName);
-      Service s = cluster.getService(serviceName);
-      ServiceComponent sc1 = s.getServiceComponent(componentName1);
-      ServiceComponent sc2 = s.getServiceComponent(componentName2);
-      ServiceComponentHost sch1 = sc1.getServiceComponentHost(host1);
-
-      Set<ServiceComponentHostRequest> schReqs =
-              new HashSet<ServiceComponentHostRequest>();
-      Set<ServiceComponentRequest> scReqs =
-              new HashSet<ServiceComponentRequest>();
-      Set<ServiceRequest> sReqs = new HashSet<ServiceRequest>();
-      Map<String, String> configVersions = new HashMap<String, String>();
-
-      // update configs at SCH and SC level
-      configVersions.clear();
-      configVersions.put("typeA", "v1");
-      configVersions.put("typeB", "v1");
-      configVersions.put("typeC", "v1");
-      schReqs.clear();
-      schReqs.add(new ServiceComponentHostRequest(clusterName, serviceName,
-              componentName1, host1, configVersions, null));
-      Assert.assertNull(controller.updateHostComponents(schReqs));
-
-      Assert.assertEquals(0, s.getDesiredConfigs().size());
-      Assert.assertEquals(0, sc1.getDesiredConfigs().size());
-      Assert.assertEquals(3, sch1.getDesiredConfigs().size());
-
-      configVersions.clear();
-      configVersions.put("typeC", "v1");
-      configVersions.put("typeD", "v1");
-      scReqs.clear();
-      scReqs.add(new ServiceComponentRequest(clusterName, serviceName,
-              componentName2, configVersions, null));
-      Assert.assertNull(controller.updateComponents(scReqs));
-
-      Assert.assertEquals(0, s.getDesiredConfigs().size());
-      Assert.assertEquals(0, sc1.getDesiredConfigs().size());
-      Assert.assertEquals(2, sc2.getDesiredConfigs().size());
-      Assert.assertEquals(3, sch1.getDesiredConfigs().size());
-
-      // update configs at service level
-      configVersions.clear();
-      configVersions.put("typeA", "v2");
-      configVersions.put("typeC", "v2");
-      configVersions.put("typeE", "v1");
-      sReqs.clear();
-      sReqs.add(new ServiceRequest(clusterName, serviceName, configVersions,
-              null));
-      Assert.assertNull(controller.updateServices(sReqs));
-
-      Assert.assertEquals(3, s.getDesiredConfigs().size());
-      Assert.assertEquals(3, sc1.getDesiredConfigs().size());
-      Assert.assertEquals(4, sc2.getDesiredConfigs().size());
-      Assert.assertEquals(4, sch1.getDesiredConfigs().size());
-
-      Assert.assertEquals("v2",
-              s.getDesiredConfigs().get("typeA").getVersionTag());
-      Assert.assertEquals("v2",
-              s.getDesiredConfigs().get("typeC").getVersionTag());
-      Assert.assertEquals("v1",
-              s.getDesiredConfigs().get("typeE").getVersionTag());
-
-      Assert.assertEquals("v2",
-              sc1.getDesiredConfigs().get("typeA").getVersionTag());
-      Assert.assertEquals("v2",
-              sc1.getDesiredConfigs().get("typeC").getVersionTag());
-      Assert.assertEquals("v1",
-              sc1.getDesiredConfigs().get("typeE").getVersionTag());
-
-      Assert.assertEquals("v2",
-              sc2.getDesiredConfigs().get("typeA").getVersionTag());
-      Assert.assertEquals("v2",
-              sc2.getDesiredConfigs().get("typeC").getVersionTag());
-      Assert.assertEquals("v1",
-              sc2.getDesiredConfigs().get("typeE").getVersionTag());
-      Assert.assertEquals("v1",
-              sc2.getDesiredConfigs().get("typeD").getVersionTag());
-
-      Assert.assertEquals("v2",
-              sch1.getDesiredConfigs().get("typeA").getVersionTag());
-      Assert.assertEquals("v2",
-              sch1.getDesiredConfigs().get("typeC").getVersionTag());
-      Assert.assertEquals("v1",
-              sch1.getDesiredConfigs().get("typeE").getVersionTag());
-      Assert.assertEquals("v1",
-              sch1.getDesiredConfigs().get("typeB").getVersionTag());
-
-      // update configs at SCH level
-      configVersions.clear();
-      configVersions.put("typeA", "v1");
-      configVersions.put("typeB", "v1");
-      configVersions.put("typeC", "v1");
-      schReqs.clear();
-      schReqs.add(new ServiceComponentHostRequest(clusterName, serviceName,
-              componentName1, host1, configVersions, null));
-      Assert.assertNull(controller.updateHostComponents(schReqs));
-
-      Assert.assertEquals(3, s.getDesiredConfigs().size());
-      Assert.assertEquals(3, sc1.getDesiredConfigs().size());
-      Assert.assertEquals(4, sc2.getDesiredConfigs().size());
-      Assert.assertEquals(4, sch1.getDesiredConfigs().size());
-
-      Assert.assertEquals("v1",
-              sch1.getDesiredConfigs().get("typeA").getVersionTag());
-      Assert.assertEquals("v1",
-              sch1.getDesiredConfigs().get("typeC").getVersionTag());
-      Assert.assertEquals("v1",
-              sch1.getDesiredConfigs().get("typeE").getVersionTag());
-      Assert.assertEquals("v1",
-              sch1.getDesiredConfigs().get("typeB").getVersionTag());
-
-      // update configs at SC level
-      configVersions.clear();
-      configVersions.put("typeC", "v2");
-      configVersions.put("typeD", "v1");
-      scReqs.clear();
-      scReqs.add(new ServiceComponentRequest(clusterName, serviceName,
-              componentName1, configVersions, null));
-      Assert.assertNull(controller.updateComponents(scReqs));
-
-      Assert.assertEquals(3, s.getDesiredConfigs().size());
-      Assert.assertEquals(4, sc1.getDesiredConfigs().size());
-      Assert.assertEquals(4, sc2.getDesiredConfigs().size());
-      Assert.assertEquals(5, sch1.getDesiredConfigs().size());
-
-      Assert.assertEquals("v2",
-              sc1.getDesiredConfigs().get("typeA").getVersionTag());
-      Assert.assertEquals("v2",
-              sc1.getDesiredConfigs().get("typeC").getVersionTag());
-      Assert.assertEquals("v1",
-              sc2.getDesiredConfigs().get("typeD").getVersionTag());
-      Assert.assertEquals("v1",
-              sc1.getDesiredConfigs().get("typeE").getVersionTag());
-
-      Assert.assertEquals("v1",
-              sch1.getDesiredConfigs().get("typeA").getVersionTag());
-      Assert.assertEquals("v2",
-              sch1.getDesiredConfigs().get("typeC").getVersionTag());
-      Assert.assertEquals("v1",
-              sch1.getDesiredConfigs().get("typeD").getVersionTag());
-      Assert.assertEquals("v1",
-              sch1.getDesiredConfigs().get("typeE").getVersionTag());
-      Assert.assertEquals("v1",
-              sch1.getDesiredConfigs().get("typeB").getVersionTag());
-  }
-
-  @Test
-  public void testConfigUpdates() throws AmbariException {
-    String clusterName = "foo1";
-    createCluster(clusterName);
-    clusters.getCluster(clusterName)
-        .setDesiredStackVersion(new StackId("HDP-0.1"));
-    String serviceName = "HDFS";
-    createService(clusterName, serviceName, null);
-    String componentName1 = "NAMENODE";
-    String componentName2 = "DATANODE";
-    String componentName3 = "HDFS_CLIENT";
-    createServiceComponent(clusterName, serviceName, componentName1,
-        State.INIT);
-    createServiceComponent(clusterName, serviceName, componentName2,
-        State.INIT);
-    createServiceComponent(clusterName, serviceName, componentName3,
-        State.INIT);
-
-    String host1 = "h1";
-    clusters.addHost(host1);
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    String host2 = "h2";
-    clusters.addHost(host2);
-    clusters.getHost("h2").setOsType("centos6");
-    clusters.getHost("h2").persist();
-
-    clusters.mapHostToCluster(host1, clusterName);
-    clusters.mapHostToCluster(host2, clusterName);
-
-
-    // null service should work
-    createServiceComponentHost(clusterName, null, componentName1,
-        host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName2,
-        host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName2,
-        host2, null);
-    createServiceComponentHost(clusterName, serviceName, componentName3,
-        host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName3,
-        host2, null);
-
-    Assert.assertNotNull(clusters.getCluster(clusterName)
-        .getService(serviceName)
-        .getServiceComponent(componentName1)
-        .getServiceComponentHost(host1));
-    Assert.assertNotNull(clusters.getCluster(clusterName)
-        .getService(serviceName)
-        .getServiceComponent(componentName2)
-        .getServiceComponentHost(host1));
-    Assert.assertNotNull(clusters.getCluster(clusterName)
-        .getService(serviceName)
-        .getServiceComponent(componentName2)
-        .getServiceComponentHost(host2));
-    Assert.assertNotNull(clusters.getCluster(clusterName)
-        .getService(serviceName)
-        .getServiceComponent(componentName3)
-        .getServiceComponentHost(host1));
-    Assert.assertNotNull(clusters.getCluster(clusterName)
-        .getService(serviceName)
-        .getServiceComponent(componentName3)
-        .getServiceComponentHost(host2));
-
-    Map<String, String> configs = new HashMap<String, String>();
-    configs.put("a", "b");
-
-    ConfigurationRequest cr1, cr2, cr3, cr4, cr5, cr6, cr7, cr8;
-    cr1 = new ConfigurationRequest(clusterName, "typeA","v1", configs);
-    cr2 = new ConfigurationRequest(clusterName, "typeB","v1", configs);
-    cr3 = new ConfigurationRequest(clusterName, "typeC","v1", configs);
-    cr4 = new ConfigurationRequest(clusterName, "typeD","v1", configs);
-    cr5 = new ConfigurationRequest(clusterName, "typeA","v2", configs);
-    cr6 = new ConfigurationRequest(clusterName, "typeB","v2", configs);
-    cr7 = new ConfigurationRequest(clusterName, "typeC","v2", configs);
-    cr8 = new ConfigurationRequest(clusterName, "typeE","v1", configs);
-    controller.createConfiguration(cr1);
-    controller.createConfiguration(cr2);
-    controller.createConfiguration(cr3);
-    controller.createConfiguration(cr4);
-    controller.createConfiguration(cr5);
-    controller.createConfiguration(cr6);
-    controller.createConfiguration(cr7);
-    controller.createConfiguration(cr8);
-
-    Cluster cluster = clusters.getCluster(clusterName);
-    Service s = cluster.getService(serviceName);
-    ServiceComponent sc1 = s.getServiceComponent(componentName1);
-    ServiceComponent sc2 = s.getServiceComponent(componentName2);
-    ServiceComponentHost sch1 = sc1.getServiceComponentHost(host1);
-
-    Set<ServiceComponentHostRequest> schReqs =
-        new HashSet<ServiceComponentHostRequest>();
-    Set<ServiceComponentRequest> scReqs =
-        new HashSet<ServiceComponentRequest>();
-    Set<ServiceRequest> sReqs = new HashSet<ServiceRequest>();
-    Map<String, String> configVersions = new HashMap<String, String>();
-
-    // update configs at SCH and SC level
-    configVersions.clear();
-    configVersions.put("typeA", "v1");
-    configVersions.put("typeB", "v1");
-    configVersions.put("typeC", "v1");
-    schReqs.clear();
-    schReqs.add(new ServiceComponentHostRequest(clusterName, serviceName,
-        componentName1, host1, configVersions, null));
-    Assert.assertNull(controller.updateHostComponents(schReqs));
-
-    Assert.assertEquals(0, s.getDesiredConfigs().size());
-    Assert.assertEquals(0, sc1.getDesiredConfigs().size());
-    Assert.assertEquals(3, sch1.getDesiredConfigs().size());
-
-    configVersions.clear();
-    configVersions.put("typeC", "v1");
-    configVersions.put("typeD", "v1");
-    scReqs.clear();
-    scReqs.add(new ServiceComponentRequest(clusterName, serviceName,
-        componentName2, configVersions, null));
-    Assert.assertNull(controller.updateComponents(scReqs));
-
-    Assert.assertEquals(0, s.getDesiredConfigs().size());
-    Assert.assertEquals(0, sc1.getDesiredConfigs().size());
-    Assert.assertEquals(2, sc2.getDesiredConfigs().size());
-    Assert.assertEquals(3, sch1.getDesiredConfigs().size());
-
-    // update configs at service level
-    configVersions.clear();
-    configVersions.put("typeA", "v2");
-    configVersions.put("typeC", "v2");
-    configVersions.put("typeE", "v1");
-    sReqs.clear();
-    sReqs.add(new ServiceRequest(clusterName, serviceName, configVersions,
-        null));
-    Assert.assertNull(controller.updateServices(sReqs));
-
-    Assert.assertEquals(3, s.getDesiredConfigs().size());
-    Assert.assertEquals(3, sc1.getDesiredConfigs().size());
-    Assert.assertEquals(4, sc2.getDesiredConfigs().size());
-    Assert.assertEquals(4, sch1.getDesiredConfigs().size());
-
-    Assert.assertEquals("v2",
-        s.getDesiredConfigs().get("typeA").getVersionTag());
-    Assert.assertEquals("v2",
-        s.getDesiredConfigs().get("typeC").getVersionTag());
-    Assert.assertEquals("v1",
-        s.getDesiredConfigs().get("typeE").getVersionTag());
-
-    Assert.assertEquals("v2",
-        sc1.getDesiredConfigs().get("typeA").getVersionTag());
-    Assert.assertEquals("v2",
-        sc1.getDesiredConfigs().get("typeC").getVersionTag());
-    Assert.assertEquals("v1",
-        sc1.getDesiredConfigs().get("typeE").getVersionTag());
-
-    Assert.assertEquals("v2",
-        sc2.getDesiredConfigs().get("typeA").getVersionTag());
-    Assert.assertEquals("v2",
-        sc2.getDesiredConfigs().get("typeC").getVersionTag());
-    Assert.assertEquals("v1",
-        sc2.getDesiredConfigs().get("typeE").getVersionTag());
-    Assert.assertEquals("v1",
-        sc2.getDesiredConfigs().get("typeD").getVersionTag());
-
-    Assert.assertEquals("v2",
-        sch1.getDesiredConfigs().get("typeA").getVersionTag());
-    Assert.assertEquals("v2",
-        sch1.getDesiredConfigs().get("typeC").getVersionTag());
-    Assert.assertEquals("v1",
-        sch1.getDesiredConfigs().get("typeE").getVersionTag());
-    Assert.assertEquals("v1",
-        sch1.getDesiredConfigs().get("typeB").getVersionTag());
-
-    // update configs at SCH level
-    configVersions.clear();
-    configVersions.put("typeA", "v1");
-    configVersions.put("typeB", "v1");
-    configVersions.put("typeC", "v1");
-    schReqs.clear();
-    schReqs.add(new ServiceComponentHostRequest(clusterName, serviceName,
-        componentName1, host1, configVersions, null));
-    Assert.assertNull(controller.updateHostComponents(schReqs));
-
-    Assert.assertEquals(3, s.getDesiredConfigs().size());
-    Assert.assertEquals(3, sc1.getDesiredConfigs().size());
-    Assert.assertEquals(4, sc2.getDesiredConfigs().size());
-    Assert.assertEquals(4, sch1.getDesiredConfigs().size());
-
-    Assert.assertEquals("v1",
-        sch1.getDesiredConfigs().get("typeA").getVersionTag());
-    Assert.assertEquals("v1",
-        sch1.getDesiredConfigs().get("typeC").getVersionTag());
-    Assert.assertEquals("v1",
-        sch1.getDesiredConfigs().get("typeE").getVersionTag());
-    Assert.assertEquals("v1",
-        sch1.getDesiredConfigs().get("typeB").getVersionTag());
-
-    // update configs at SC level
-    configVersions.clear();
-    configVersions.put("typeC", "v2");
-    configVersions.put("typeD", "v1");
-    scReqs.clear();
-    scReqs.add(new ServiceComponentRequest(clusterName, serviceName,
-        componentName1, configVersions, null));
-    Assert.assertNull(controller.updateComponents(scReqs));
-
-    Assert.assertEquals(3, s.getDesiredConfigs().size());
-    Assert.assertEquals(4, sc1.getDesiredConfigs().size());
-    Assert.assertEquals(4, sc2.getDesiredConfigs().size());
-    Assert.assertEquals(5, sch1.getDesiredConfigs().size());
-
-    Assert.assertEquals("v2",
-        sc1.getDesiredConfigs().get("typeA").getVersionTag());
-    Assert.assertEquals("v2",
-        sc1.getDesiredConfigs().get("typeC").getVersionTag());
-    Assert.assertEquals("v1",
-        sc2.getDesiredConfigs().get("typeD").getVersionTag());
-    Assert.assertEquals("v1",
-        sc1.getDesiredConfigs().get("typeE").getVersionTag());
-
-    Assert.assertEquals("v1",
-        sch1.getDesiredConfigs().get("typeA").getVersionTag());
-    Assert.assertEquals("v2",
-        sch1.getDesiredConfigs().get("typeC").getVersionTag());
-    Assert.assertEquals("v1",
-        sch1.getDesiredConfigs().get("typeD").getVersionTag());
-    Assert.assertEquals("v1",
-        sch1.getDesiredConfigs().get("typeE").getVersionTag());
-    Assert.assertEquals("v1",
-        sch1.getDesiredConfigs().get("typeB").getVersionTag());
-
-  }
-
-  @Test
-  public void testReConfigureService() throws Exception {
-    String clusterName = "foo1";
-    createCluster(clusterName);
-    clusters.getCluster(clusterName)
-      .setDesiredStackVersion(new StackId("HDP-0.1"));
-    String serviceName = "HDFS";
-    createService(clusterName, serviceName, null);
-    String componentName1 = "NAMENODE";
-    String componentName2 = "DATANODE";
-    String componentName3 = "HDFS_CLIENT";
-    createServiceComponent(clusterName, serviceName, componentName1,
-      State.INIT);
-    createServiceComponent(clusterName, serviceName, componentName2,
-      State.INIT);
-    createServiceComponent(clusterName, serviceName, componentName3,
-      State.INIT);
-
-    String host1 = "h1";
-    clusters.addHost(host1);
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    String host2 = "h2";
-    clusters.addHost(host2);
-    clusters.getHost("h2").setOsType("centos6");
-    clusters.getHost("h2").persist();
-
-    clusters.mapHostToCluster(host1, clusterName);
-    clusters.mapHostToCluster(host2, clusterName);
-
-
-    // null service should work
-    createServiceComponentHost(clusterName, null, componentName1,
-      host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName2,
-      host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName2,
-      host2, null);
-    createServiceComponentHost(clusterName, serviceName, componentName3,
-      host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName3,
-      host2, null);
-
-    // Install
-    ServiceRequest r = new ServiceRequest(clusterName, serviceName, null,
-      State.INSTALLED.toString());
-    Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
-    requests.add(r);
-
-    controller.updateServices(requests);
-    Assert.assertEquals(State.INSTALLED,
-      clusters.getCluster(clusterName).getService(serviceName)
-        .getDesiredState());
-
-    // manually change live state to installed as no running action manager
-    for (ServiceComponent sc :
-      clusters.getCluster(clusterName).getService(serviceName)
-        .getServiceComponents().values()) {
-      for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-        sch.setState(State.INSTALLED);
-      }
-    }
-
-    // Create and attach config
-    Map<String, String> configs = new HashMap<String, String>();
-    configs.put("a", "b");
-
-    ConfigurationRequest cr1,cr2,cr3;
-    cr1 = new ConfigurationRequest(clusterName, "core-site","version1",
-      configs);
-    cr2 = new ConfigurationRequest(clusterName, "hdfs-site","version1",
-      configs);
-    cr3 = new ConfigurationRequest(clusterName, "core-site","version122",
-      configs);
-    controller.createConfiguration(cr1);
-    controller.createConfiguration(cr2);
-    controller.createConfiguration(cr3);
-
-    Cluster cluster = clusters.getCluster(clusterName);
-    Service s = cluster.getService(serviceName);
-    ServiceComponent sc1 = s.getServiceComponent(componentName1);
-    ServiceComponent sc2 = s.getServiceComponent(componentName2);
-    ServiceComponentHost sch1 = sc1.getServiceComponentHost(host1);
-
-    Set<ServiceComponentHostRequest> schReqs =
-      new HashSet<ServiceComponentHostRequest>();
-    Set<ServiceComponentRequest> scReqs =
-      new HashSet<ServiceComponentRequest>();
-    Set<ServiceRequest> sReqs = new HashSet<ServiceRequest>();
-    Map<String, String> configVersions = new HashMap<String, String>();
-
-    // SCH level
-    configVersions.clear();
-    configVersions.put("core-site", "version1");
-    configVersions.put("hdfs-site", "version1");
-    schReqs.clear();
-    schReqs.add(new ServiceComponentHostRequest(clusterName, serviceName,
-      componentName1, host1, configVersions, null));
-    Assert.assertNull(controller.updateHostComponents(schReqs));
-    Assert.assertEquals(2, sch1.getDesiredConfigs().size());
-
-    // Reconfigure SCH level
-    configVersions.clear();
-    configVersions.put("core-site", "version122");
-    schReqs.clear();
-    schReqs.add(new ServiceComponentHostRequest(clusterName, serviceName,
-      componentName1, host1, configVersions, null));
-    Assert.assertNull(controller.updateHostComponents(schReqs));
-
-    // Clear Entity Manager
-    entityManager.clear();
-
-    Assert.assertEquals(2, sch1.getDesiredConfigs().size());
-    Assert.assertEquals("version122", sch1.getDesiredConfigs().get
-      ("core-site").getVersionTag());
-
-    //SC Level
-    configVersions.clear();
-    configVersions.put("core-site", "version1");
-    configVersions.put("hdfs-site", "version1");
-    scReqs.add(new ServiceComponentRequest(clusterName, serviceName,
-      componentName2, configVersions, null));
-    Assert.assertNull(controller.updateComponents(scReqs));
-
-    scReqs.add(new ServiceComponentRequest(clusterName, serviceName,
-      componentName1, configVersions, null));
-    Assert.assertNull(controller.updateComponents(scReqs));
-    Assert.assertEquals(2, sc1.getDesiredConfigs().size());
-    Assert.assertEquals(2, sc2.getDesiredConfigs().size());
-
-    // Reconfigure SC level
-    configVersions.clear();
-    configVersions.put("core-site", "version122");
-
-    scReqs.clear();
-    scReqs.add(new ServiceComponentRequest(clusterName, serviceName,
-      componentName2, configVersions, null));
-    Assert.assertNull(controller.updateComponents(scReqs));
-
-    Assert.assertEquals(2, sc2.getDesiredConfigs().size());
-    Assert.assertEquals("version122", sc2.getDesiredConfigs().get
-      ("core-site").getVersionTag());
-    scReqs.clear();
-    scReqs.add(new ServiceComponentRequest(clusterName, serviceName,
-      componentName1, configVersions, null));
-    Assert.assertNull(controller.updateComponents(scReqs));
-
-    entityManager.clear();
-
-    Assert.assertEquals(2, sc1.getDesiredConfigs().size());
-    Assert.assertEquals("version122", sc1.getDesiredConfigs().get
-      ("core-site").getVersionTag());
-
-    // S level
-    configVersions.clear();
-    configVersions.put("core-site", "version1");
-    configVersions.put("hdfs-site", "version1");
-    sReqs.clear();
-    sReqs.add(new ServiceRequest(clusterName, serviceName, configVersions,
-      null));
-    Assert.assertNull(controller.updateServices(sReqs));
-    Assert.assertEquals(2, s.getDesiredConfigs().size());
-
-    // Reconfigure S Level
-    configVersions.clear();
-    configVersions.put("core-site", "version122");
-
-    sReqs.clear();
-    sReqs.add(new ServiceRequest(clusterName, serviceName, configVersions,
-      null));
-    Assert.assertNull(controller.updateServices(sReqs));
-
-    entityManager.clear();
-
-    Assert.assertEquals(2, s.getDesiredConfigs().size());
-    Assert.assertEquals("version122", s.getDesiredConfigs().get
-      ("core-site").getVersionTag());
-  }
-
-  @Test
-  public void testClientServiceSmokeTests() throws AmbariException {
-    String clusterName = "foo1";
-    createCluster(clusterName);
-    clusters.getCluster(clusterName)
-        .setDesiredStackVersion(new StackId("HDP-0.1"));
-    String serviceName = "PIG";
-    createService(clusterName, serviceName, null);
-    String componentName1 = "PIG";
-    createServiceComponent(clusterName, serviceName, componentName1,
-        State.INIT);
-
-    String host1 = "h1";
-    clusters.addHost(host1);
-    clusters.getHost("h1").persist();
-    String host2 = "h2";
-    clusters.addHost(host2);
-    clusters.getHost("h2").persist();
-
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h2").setOsType("centos6");
-    clusters.mapHostToCluster(host1, clusterName);
-    clusters.mapHostToCluster(host2, clusterName);
-
-
-    // null service should work
-    createServiceComponentHost(clusterName, null, componentName1,
-        host1, null);
-    createServiceComponentHost(clusterName, null, componentName1,
-        host2, null);
-
-    ServiceRequest r = new ServiceRequest(clusterName, serviceName, null,
-        State.INSTALLED.toString());
-    Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
-    requests.add(r);
-
-    RequestStatusResponse trackAction =
-        controller.updateServices(requests);
-    Assert.assertEquals(State.INSTALLED,
-        clusters.getCluster(clusterName).getService(serviceName)
-        .getDesiredState());
-    for (ServiceComponent sc :
-      clusters.getCluster(clusterName).getService(serviceName)
-      .getServiceComponents().values()) {
-      Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
-      for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-        Assert.assertEquals(State.INSTALLED, sch.getDesiredState());
-        Assert.assertEquals(State.INIT, sch.getState());
-      }
-    }
-
-    List<ShortTaskStatus> taskStatuses = trackAction.getTasks();
-    Assert.assertEquals(2, taskStatuses.size());
-
-    List<Stage> stages = actionDB.getAllStages(trackAction.getRequestId());
-    Assert.assertEquals(1, stages.size());
-
-    for (ServiceComponent sc :
-      clusters.getCluster(clusterName).getService(serviceName)
-          .getServiceComponents().values()) {
-      for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-        sch.setState(State.INSTALLED);
-      }
-    }
-
-    r = new ServiceRequest(clusterName, serviceName, null,
-        State.STARTED.toString());
-    requests.clear();
-    requests.add(r);
-
-    trackAction = controller.updateServices(requests);
-    Assert.assertNotNull(trackAction);
-    Assert.assertEquals(State.INSTALLED,
-        clusters.getCluster(clusterName).getService(serviceName)
-        .getDesiredState());
-    for (ServiceComponent sc :
-      clusters.getCluster(clusterName).getService(serviceName)
-          .getServiceComponents().values()) {
-      Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
-      for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-        Assert.assertEquals(State.INSTALLED, sch.getDesiredState());
-        Assert.assertEquals(State.INSTALLED, sch.getState());
-      }
-    }
-
-    stages = actionDB.getAllStages(trackAction.getRequestId());
-    for (Stage s : stages) {
-      LOG.info("Stage dump : " + s.toString());
-    }
-    Assert.assertEquals(1, stages.size());
-
-    taskStatuses = trackAction.getTasks();
-    Assert.assertEquals(1, taskStatuses.size());
-    Assert.assertEquals(Role.PIG_SERVICE_CHECK.toString(),
-        taskStatuses.get(0).getRole());
-
-
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/ClusterRequestTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/ClusterRequestTest.java
deleted file mode 100644
index c1ce0f3..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/ClusterRequestTest.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.ambari.server.state.StackId;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class ClusterRequestTest {
-
-  @Test
-  public void testBasicGetAndSet() {
-    Long clusterId = new Long(10);
-    String clusterName = "foo";
-    StackId stackVersion = new StackId("HDP-1.0.1");
-    Set<String> hostNames = new HashSet<String>();
-    hostNames.add("h1");
-
-    ClusterRequest r1 =
-        new ClusterRequest(clusterId, clusterName,
-            stackVersion.getStackId(), hostNames);
-
-    Assert.assertEquals(clusterId, r1.getClusterId());
-    Assert.assertEquals(clusterName, r1.getClusterName());
-    Assert.assertEquals(stackVersion.getStackId(),
-        r1.getStackVersion());
-    Assert.assertArrayEquals(hostNames.toArray(), r1.getHostNames().toArray());
-
-    r1.setClusterId(++clusterId);
-    r1.setHostNames(hostNames);
-    r1.setClusterName("foo1");
-    r1.setStackVersion("HDP-1.0.2");
-
-    hostNames.add("h2");
-
-    Assert.assertEquals(clusterId, r1.getClusterId());
-    Assert.assertEquals("foo1", r1.getClusterName());
-    Assert.assertEquals("HDP-1.0.2", r1.getStackVersion());
-    Assert.assertArrayEquals(hostNames.toArray(), r1.getHostNames().toArray());
-
-  }
-
-  @Test
-  public void testToString() {
-    ClusterRequest r1 = new ClusterRequest(null, null, null, null);
-    r1.toString();
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/ClusterResponseTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/ClusterResponseTest.java
deleted file mode 100644
index 7d37d83..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/ClusterResponseTest.java
+++ /dev/null
@@ -1,51 +0,0 @@
-package org.apache.ambari.server.controller;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.HashSet;
-import java.util.Set;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-public class ClusterResponseTest {
-
-  @Test
-  public void testBasicGetAndSet() {
-    Long clusterId = new Long(10);
-    String clusterName = "foo";
-    Set<String> hostNames = new HashSet<String>();
-    hostNames.add("h1");
-
-    ClusterResponse r1 =
-        new ClusterResponse(clusterId, clusterName, hostNames, "bar");
-
-    Assert.assertEquals(clusterId, r1.getClusterId());
-    Assert.assertEquals(clusterName, r1.getClusterName());
-    Assert.assertArrayEquals(hostNames.toArray(), r1.getHostNames().toArray());
-    Assert.assertEquals("bar", r1.getDesiredStackVersion());
-
-  }
-
-  @Test
-  public void testToString() {
-    ClusterResponse r = new ClusterResponse(null, null, null, null);
-    r.toString();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaHelperTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaHelperTest.java
deleted file mode 100644
index 0237207..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaHelperTest.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.ganglia;
-
-import org.junit.Ignore;
-import org.junit.Test;
-
-import java.text.SimpleDateFormat;
-import java.util.*;
-
-/**
- *
- */
-public class GangliaHelperTest {
-
-
-  @Ignore
-  @Test
-  public void testGetGangliaMetrics() throws Exception {
-    //MM/dd/yy HH:mm:ss
-
-    String target = "ec2-107-22-86-120.compute-1.amazonaws.com";
-    String cluster = "HDPNameNode";
-    String host = "domU-12-31-39-15-25-C7.compute-1.internal";
-    Date startTime = new SimpleDateFormat("MM/dd/yy HH:mm:ss").parse("09/12/12 10:00:00");
-    Date endTime = new SimpleDateFormat("MM/dd/yy HH:mm:ss").parse("09/12/12 16:15:00");
-    long step = 60;
-//        String api  = "rpcdetailed.rpcdetailed.sendHeartbeat_num_ops";
-    String metric = "cpu_nice";
-
-//    List<GangliaMetric> metrics = GangliaHelper.getGangliaMetrics(target, cluster, host, metric, startTime, endTime, step);
-
-    //TODO : assertions
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProviderTest.java
deleted file mode 100644
index d651f88..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProviderTest.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.ganglia;
-
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.internal.TemporalInfoImpl;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.TemporalInfo;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Test the Ganglia property provider.
- */
-public class GangliaPropertyProviderTest {
-
-  private static final String PROPERTY_ID = PropertyHelper.getPropertyId("metrics/jvm", "gcCount");
-  private static final String CLUSTER_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "cluster_name");
-  private static final String HOST_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "host_name");
-  private static final String COMPONENT_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "component_name");
-
-  @Test
-  public void testGetResources() throws Exception {
-    TestStreamProvider streamProvider  = new TestStreamProvider();
-    TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
-
-    GangliaPropertyProvider propertyProvider = new GangliaHostComponentPropertyProvider(
-        PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent),
-        streamProvider,
-        hostProvider,
-        CLUSTER_NAME_PROPERTY_ID,
-        HOST_NAME_PROPERTY_ID,
-        COMPONENT_NAME_PROPERTY_ID);
-
-    // namenode
-    Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-
-    resource.setProperty(HOST_NAME_PROPERTY_ID, "domU-12-31-39-0E-34-E1.compute-1.internal");
-    resource.setProperty(COMPONENT_NAME_PROPERTY_ID, "DATANODE");
-
-    // only ask for one property
-    Map<String, TemporalInfo> temporalInfoMap = new HashMap<String, TemporalInfo>();
-    temporalInfoMap.put(PROPERTY_ID, new TemporalInfoImpl(10L, 20L, 1L));
-    Request  request = PropertyHelper.getReadRequest(Collections.singleton(PROPERTY_ID), temporalInfoMap);
-
-    Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
-
-    Assert.assertEquals("http://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPSlaves&h=domU-12-31-39-0E-34-E1.compute-1.internal&m=jvm.metrics.gcCount&s=10&e=20&r=1",
-        streamProvider.getLastSpec());
-
-    Assert.assertEquals(3, PropertyHelper.getProperties(resource).size());
-    Assert.assertNotNull(resource.getPropertyValue(PROPERTY_ID));
-
-
-    // tasktracker
-    resource = new ResourceImpl(Resource.Type.HostComponent);
-    resource.setProperty(HOST_NAME_PROPERTY_ID, "domU-12-31-39-0E-34-E1.compute-1.internal");
-    resource.setProperty(COMPONENT_NAME_PROPERTY_ID, "TASKTRACKER");
-
-    // only ask for one property
-    temporalInfoMap = new HashMap<String, TemporalInfo>();
-
-    //http://ec2-174-129-152-147.compute-1.amazonaws.com/cgi-bin/rrd.py?c=HDPSlaves&m=jvm.metrics.gcCount,mapred.shuffleOutput.shuffle_exceptions_caught,mapred.shuffleOutput.shuffle_failed_outputs,mapred.shuffleOutput.shuffle_output_bytes,mapred.shuffleOutput.shuffle_success_outputs&s=10&e=20&r=1&h=ip-10-85-111-149.ec2.internal
-
-    Set<String> properties = new HashSet<String>();
-    String shuffle_exceptions_caught = PropertyHelper.getPropertyId("metrics/mapred/shuffleOutput", "shuffle_exceptions_caught");
-    String shuffle_failed_outputs    = PropertyHelper.getPropertyId("metrics/mapred/shuffleOutput", "shuffle_failed_outputs");
-    String shuffle_output_bytes      = PropertyHelper.getPropertyId("metrics/mapred/shuffleOutput", "shuffle_output_bytes");
-    String shuffle_success_outputs   = PropertyHelper.getPropertyId("metrics/mapred/shuffleOutput", "shuffle_success_outputs");
-
-    properties.add(shuffle_exceptions_caught);
-    properties.add(shuffle_failed_outputs);
-    properties.add(shuffle_output_bytes);
-    properties.add(shuffle_success_outputs);
-    request = PropertyHelper.getReadRequest(properties, temporalInfoMap);
-
-    temporalInfoMap.put(shuffle_exceptions_caught, new TemporalInfoImpl(10L, 20L, 1L));
-    temporalInfoMap.put(shuffle_failed_outputs, new TemporalInfoImpl(10L, 20L, 1L));
-    temporalInfoMap.put(shuffle_output_bytes, new TemporalInfoImpl(10L, 20L, 1L));
-    temporalInfoMap.put(shuffle_success_outputs, new TemporalInfoImpl(10L, 20L, 1L));
-
-    Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
-
-    Assert.assertEquals("http://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPSlaves&h=domU-12-31-39-0E-34-E1.compute-1.internal&m=mapred.shuffleOutput.shuffle_output_bytes,mapred.shuffleOutput.shuffle_success_outputs,mapred.shuffleOutput.shuffle_failed_outputs,mapred.shuffleOutput.shuffle_exceptions_caught&s=10&e=20&r=1",
-        streamProvider.getLastSpec());
-
-    Assert.assertEquals(6, PropertyHelper.getProperties(resource).size());
-    Assert.assertNotNull(resource.getPropertyValue(shuffle_exceptions_caught));
-    Assert.assertNotNull(resource.getPropertyValue(shuffle_failed_outputs));
-    Assert.assertNotNull(resource.getPropertyValue(shuffle_output_bytes));
-    Assert.assertNotNull(resource.getPropertyValue(shuffle_success_outputs));
-  }
-
-
-  @Test
-  public void testGetManyResources() throws Exception {
-    TestStreamProvider streamProvider  = new TestStreamProvider();
-    TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
-
-    GangliaPropertyProvider propertyProvider = new GangliaHostPropertyProvider(
-        PropertyHelper.getGangliaPropertyIds(Resource.Type.Host),
-        streamProvider,
-        hostProvider,
-        CLUSTER_NAME_PROPERTY_ID,
-        HOST_NAME_PROPERTY_ID
-    );
-
-    Set<Resource> resources = new HashSet<Resource>();
-
-    // host
-    Resource resource = new ResourceImpl(Resource.Type.Host);
-    resource.setProperty(HOST_NAME_PROPERTY_ID, "domU-12-31-39-0E-34-E1.compute-1.internal");
-    resources.add(resource);
-
-    resource = new ResourceImpl(Resource.Type.Host);
-    resource.setProperty(HOST_NAME_PROPERTY_ID, "domU-12-31-39-0E-34-E2.compute-1.internal");
-    resources.add(resource);
-
-    resource = new ResourceImpl(Resource.Type.Host);
-    resource.setProperty(HOST_NAME_PROPERTY_ID, "domU-12-31-39-0E-34-E3.compute-1.internal");
-    resources.add(resource);
-
-    // only ask for one property
-    Map<String, TemporalInfo> temporalInfoMap = new HashMap<String, TemporalInfo>();
-    temporalInfoMap.put(PROPERTY_ID, new TemporalInfoImpl(10L, 20L, 1L));
-    Request  request = PropertyHelper.getReadRequest(Collections.singleton(PROPERTY_ID), temporalInfoMap);
-
-    Assert.assertEquals(3, propertyProvider.populateResources(resources, request, null).size());
-    Assert.assertEquals("http://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPJobTracker,HDPHBaseMaster,HDPSlaves,HDPNameNode&h=domU-12-31-39-0E-34-E3.compute-1.internal,domU-12-31-39-0E-34-E1.compute-1.internal,domU-12-31-39-0E-34-E2.compute-1.internal&m=jvm.metrics.gcCount&s=10&e=20&r=1",
-        streamProvider.getLastSpec());
-
-    for (Resource res : resources) {
-      Assert.assertEquals(2, PropertyHelper.getProperties(res).size());
-      Assert.assertNotNull(res.getPropertyValue(PROPERTY_ID));
-    }
-  }
-
-  @Test
-  public void testPopulateResources__LargeNumberOfHostResources() throws Exception {
-    TestStreamProvider streamProvider  = new TestStreamProvider();
-    TestGangliaHostProvider hostProvider = new TestGangliaHostProvider();
-
-    GangliaPropertyProvider propertyProvider = new GangliaHostPropertyProvider(
-        PropertyHelper.getGangliaPropertyIds(Resource.Type.Host),
-        streamProvider,
-        hostProvider,
-        CLUSTER_NAME_PROPERTY_ID,
-        HOST_NAME_PROPERTY_ID
-    );
-
-    Set<Resource> resources = new HashSet<Resource>();
-
-    for (int i = 0; i < 150; ++i) {
-      Resource resource = new ResourceImpl(Resource.Type.Host);
-      resource.setProperty(HOST_NAME_PROPERTY_ID, "host" + i);
-      resources.add(resource);
-    }
-
-    // only ask for one property
-    Map<String, TemporalInfo> temporalInfoMap = new HashMap<String, TemporalInfo>();
-    temporalInfoMap.put(PROPERTY_ID, new TemporalInfoImpl(10L, 20L, 1L));
-    Request  request = PropertyHelper.getReadRequest(Collections.singleton(PROPERTY_ID), temporalInfoMap);
-
-    Assert.assertEquals(150, propertyProvider.populateResources(resources, request, null).size());
-
-    Assert.assertEquals("http://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPJobTracker,HDPHBaseMaster,HDPSlaves,HDPNameNode&m=jvm.metrics.gcCount&s=10&e=20&r=1",
-        streamProvider.getLastSpec());
-
-  }
-
-  private static class TestGangliaHostProvider implements GangliaHostProvider {
-
-    @Override
-    public String getGangliaCollectorHostName(String clusterName) {
-      return "domU-12-31-39-0E-34-E1.compute-1.internal";
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/TestStreamProvider.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/TestStreamProvider.java
deleted file mode 100644
index ca9f1a3..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/TestStreamProvider.java
+++ /dev/null
@@ -1,39 +0,0 @@
-package org.apache.ambari.server.controller.ganglia;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.ambari.server.controller.utilities.StreamProvider;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-public class TestStreamProvider implements StreamProvider {
-
-  private String lastSpec;
-
-  @Override
-  public InputStream readFrom(String spec) throws IOException {
-    lastSpec = spec;
-    return ClassLoader.getSystemResourceAsStream("temporal_ganglia_data.txt");
-  }
-
-  public String getLastSpec() {
-    return lastSpec;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerClusterProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerClusterProviderTest.java
deleted file mode 100644
index 2bc35d1..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerClusterProviderTest.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.Set;
-
-/**
- * Tests for GSInstallerClusterProvider
- */
-public class GSInstallerClusterProviderTest {
-
-  @Test
-  public void testGetResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-
-    GSInstallerResourceProvider provider = new GSInstallerClusterProvider(clusterDefinition);
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
-    Assert.assertEquals(1, resources.size());
-    Assert.assertEquals("ambari", resources.iterator().next().getPropertyValue(GSInstallerClusterProvider.CLUSTER_NAME_PROPERTY_ID));
-  }
-
-  @Test
-  public void testGetResourcesWithPredicate() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-
-    GSInstallerResourceProvider provider = new GSInstallerClusterProvider(clusterDefinition);
-
-    Predicate predicate = new PredicateBuilder().property(GSInstallerClusterProvider.CLUSTER_NAME_PROPERTY_ID).equals("ambari").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-    Resource next = resources.iterator().next();
-    Assert.assertEquals("ambari",    next.getPropertyValue(GSInstallerClusterProvider.CLUSTER_NAME_PROPERTY_ID));
-    Assert.assertEquals("HDP-1.2.0", next.getPropertyValue(GSInstallerClusterProvider.CLUSTER_VERSION_PROPERTY_ID));
-
-    predicate = new PredicateBuilder().property(GSInstallerClusterProvider.CLUSTER_NAME_PROPERTY_ID).equals("non-existent Cluster").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertTrue(resources.isEmpty());
-  }
-
-  @Test
-  public void testCreateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerClusterProvider(clusterDefinition);
-
-    try {
-      provider.createResources(PropertyHelper.getReadRequest());
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerClusterProvider(clusterDefinition);
-
-    try {
-      provider.updateResources(PropertyHelper.getUpdateRequest(new HashMap<String, Object>()), null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerClusterProvider(clusterDefinition);
-
-    try {
-      provider.deleteResources(null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProviderTest.java
deleted file mode 100644
index 38e3d64..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProviderTest.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.Set;
-
-/**
- * Tests for GSInstallerComponentProvider.
- */
-public class GSInstallerComponentProviderTest {
-
-  @Test
-  public void testGetResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerComponentProvider(clusterDefinition);
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
-    Assert.assertEquals(25, resources.size());
-  }
-
-  @Test
-  public void testGetResourcesWithPredicate() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerComponentProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerComponentProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("TASKTRACKER").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    predicate = new PredicateBuilder().property(GSInstallerComponentProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("TASKTRACKER").or().
-        property(GSInstallerComponentProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("GANGLIA_MONITOR").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(2, resources.size());
-
-    predicate = new PredicateBuilder().property(GSInstallerComponentProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("BadComponent").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertTrue(resources.isEmpty());
-  }
-
-  @Test
-  public void testCreateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerComponentProvider(clusterDefinition);
-
-    try {
-      provider.createResources(PropertyHelper.getReadRequest());
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerComponentProvider(clusterDefinition);
-
-    try {
-      provider.updateResources(PropertyHelper.getUpdateRequest(new HashMap<String, Object>()), null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerComponentProvider(clusterDefinition);
-
-    try {
-      provider.deleteResources(null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProviderTest.java
deleted file mode 100644
index d109612..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProviderTest.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.Set;
-
-/**
- *
- */
-public class GSInstallerHostComponentProviderTest {
-
-  @Test
-  public void testGetResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
-    Assert.assertEquals(33, resources.size());
-  }
-
-  @Test
-  public void testGetResourcesWithPredicate() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerHostComponentProvider.HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID).equals("MAPREDUCE").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(5, resources.size());
-
-    predicate = new PredicateBuilder().property(GSInstallerHostComponentProvider.HOST_COMPONENT_HOST_NAME_PROPERTY_ID).equals("UnknownHost").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertTrue(resources.isEmpty());
-  }
-
-  @Test
-  public void testGetResourcesCheckState() throws Exception {
-    TestGSInstallerStateProvider stateProvider = new TestGSInstallerStateProvider();
-    ClusterDefinition clusterDefinition = new ClusterDefinition(stateProvider, 500);
-    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerHostComponentProvider.HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("HBASE_REGIONSERVER").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(3, resources.size());
-
-    for (Resource resource : resources) {
-      Assert.assertEquals("STARTED", resource.getPropertyValue(GSInstallerHostComponentProvider.HOST_COMPONENT_STATE_PROPERTY_ID));
-    }
-
-    stateProvider.setHealthy(false);
-
-    // need to wait for old state value to expire
-    Thread.sleep(501);
-
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(3, resources.size());
-
-    for (Resource resource : resources) {
-      Assert.assertEquals("INIT", resource.getPropertyValue(GSInstallerHostComponentProvider.HOST_COMPONENT_STATE_PROPERTY_ID));
-    }
-  }
-
-  @Test
-  public void testGetResourcesCheckStateFromCategory() throws Exception {
-    TestGSInstallerStateProvider stateProvider = new TestGSInstallerStateProvider();
-    ClusterDefinition clusterDefinition = new ClusterDefinition(stateProvider, 500);
-    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerHostComponentProvider.HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("HBASE_REGIONSERVER").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest("HostRoles"), predicate);
-    Assert.assertEquals(3, resources.size());
-
-    for (Resource resource : resources) {
-      Assert.assertEquals("STARTED", resource.getPropertyValue(GSInstallerHostComponentProvider.HOST_COMPONENT_STATE_PROPERTY_ID));
-    }
-
-    stateProvider.setHealthy(false);
-
-    // need to wait for old state value to expire
-    Thread.sleep(501);
-
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(3, resources.size());
-
-    for (Resource resource : resources) {
-      Assert.assertEquals("INIT", resource.getPropertyValue(GSInstallerHostComponentProvider.HOST_COMPONENT_STATE_PROPERTY_ID));
-    }
-  }
-
-  @Test
-  public void testCreateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
-
-    try {
-      provider.createResources(PropertyHelper.getReadRequest());
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
-
-    try {
-      provider.updateResources(PropertyHelper.getUpdateRequest(new HashMap<String, Object>()), null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
-
-    try {
-      provider.deleteResources(null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostProviderTest.java
deleted file mode 100644
index 8bc151f..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostProviderTest.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.Set;
-
-/**
- *
- */
-public class GSInstallerHostProviderTest {
-
-  @Test
-  public void testGetResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
-    Assert.assertEquals(5, resources.size());
-  }
-
-  @Test
-  public void testGetResourcesWithPredicate() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerHostProvider.HOST_NAME_PROPERTY_ID).equals("ip-10-190-97-104.ec2.internal").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    predicate = new PredicateBuilder().property(GSInstallerHostProvider.HOST_NAME_PROPERTY_ID).equals("ip-10-190-97-104.ec2.internal").or().
-        property(GSInstallerHostProvider.HOST_NAME_PROPERTY_ID).equals("ip-10-8-113-183.ec2.internal").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(2, resources.size());
-
-    predicate = new PredicateBuilder().property(GSInstallerHostProvider.HOST_NAME_PROPERTY_ID).equals("unknownHost").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertTrue(resources.isEmpty());
-  }
-
-  @Test
-  public void testGetResourcesCheckState() throws Exception {
-    TestGSInstallerStateProvider stateProvider = new TestGSInstallerStateProvider();
-    ClusterDefinition clusterDefinition = new ClusterDefinition(stateProvider, 500);
-    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerHostProvider.HOST_NAME_PROPERTY_ID).equals("ip-10-190-97-104.ec2.internal").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    Resource resource = resources.iterator().next();
-
-    Assert.assertEquals("HEALTHY", resource.getPropertyValue(GSInstallerHostProvider.HOST_STATE_PROPERTY_ID));
-
-    stateProvider.setHealthy(false);
-
-    // need to wait for old state value to expire
-    Thread.sleep(501);
-
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    resource = resources.iterator().next();
-    Assert.assertEquals("INIT", resource.getPropertyValue(GSInstallerHostProvider.HOST_STATE_PROPERTY_ID));
-  }
-
-  @Test
-  public void testGetResourcesCheckStateFromCategory() throws Exception {
-    TestGSInstallerStateProvider stateProvider = new TestGSInstallerStateProvider();
-    ClusterDefinition clusterDefinition = new ClusterDefinition(stateProvider, 500);
-    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerHostProvider.HOST_NAME_PROPERTY_ID).equals("ip-10-190-97-104.ec2.internal").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest("Hosts"), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    Resource resource = resources.iterator().next();
-
-    Assert.assertEquals("HEALTHY", resource.getPropertyValue(GSInstallerHostProvider.HOST_STATE_PROPERTY_ID));
-
-    stateProvider.setHealthy(false);
-
-    // need to wait for old state value to expire
-    Thread.sleep(501);
-
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    resource = resources.iterator().next();
-    Assert.assertEquals("INIT", resource.getPropertyValue(GSInstallerHostProvider.HOST_STATE_PROPERTY_ID));
-  }
-
-  @Test
-  public void testCreateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
-
-    try {
-      provider.createResources(PropertyHelper.getReadRequest());
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
-
-    try {
-      provider.updateResources(PropertyHelper.getUpdateRequest(new HashMap<String, Object>()), null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
-
-    try {
-      provider.deleteResources(null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-}
-
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProviderTest.java
deleted file mode 100644
index 1c1fef8..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProviderTest.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.Set;
-
-/**
- *
- */
-public class GSInstallerServiceProviderTest {
-
-  @Test
-  public void testGetResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
-    Assert.assertEquals(12, resources.size());
-  }
-
-  @Test
-  public void testGetResourcesWithPredicate() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("MAPREDUCE").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    predicate = new PredicateBuilder().property(GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("GANGLIA").or().
-        property(GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("NAGIOS").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(2, resources.size());
-
-    predicate = new PredicateBuilder().property(GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("NO SERVICE").toPredicate();
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertTrue(resources.isEmpty());
-  }
-
-  @Test
-  public void testGetResourcesCheckState() throws Exception {
-    TestGSInstallerStateProvider stateProvider = new TestGSInstallerStateProvider();
-    ClusterDefinition clusterDefinition = new ClusterDefinition(stateProvider, 500);
-    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("MAPREDUCE").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    Resource resource = resources.iterator().next();
-
-    Assert.assertEquals("STARTED", resource.getPropertyValue(GSInstallerServiceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID));
-
-    stateProvider.setHealthy(false);
-
-    // need to wait for old state value to expire
-    Thread.sleep(501);
-
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    resource = resources.iterator().next();
-    Assert.assertEquals("INIT", resource.getPropertyValue(GSInstallerServiceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID));
-  }
-
-  @Test
-  public void testGetResourcesCheckStateFromCategory() throws Exception {
-    TestGSInstallerStateProvider stateProvider = new TestGSInstallerStateProvider();
-    ClusterDefinition clusterDefinition = new ClusterDefinition(stateProvider, 500);
-    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
-    Predicate predicate = new PredicateBuilder().property(GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("MAPREDUCE").toPredicate();
-    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest("ServiceInfo"), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    Resource resource = resources.iterator().next();
-
-    Assert.assertEquals("STARTED", resource.getPropertyValue(GSInstallerServiceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID));
-
-    stateProvider.setHealthy(false);
-
-    // need to wait for old state value to expire
-    Thread.sleep(501);
-
-    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
-    Assert.assertEquals(1, resources.size());
-
-    resource = resources.iterator().next();
-    Assert.assertEquals("INIT", resource.getPropertyValue(GSInstallerServiceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID));
-  }
-
-  @Test
-  public void testCreateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
-
-    try {
-      provider.createResources(PropertyHelper.getReadRequest());
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
-
-    try {
-      provider.updateResources(PropertyHelper.getUpdateRequest(new HashMap<String, Object>()), null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    ClusterDefinition clusterDefinition = new ClusterDefinition(new TestGSInstallerStateProvider());
-    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
-
-    try {
-      provider.deleteResources(null);
-      Assert.fail("Expected UnsupportedOperationException.");
-    } catch (UnsupportedOperationException e) {
-      //expected
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/TestGSInstallerStateProvider.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/TestGSInstallerStateProvider.java
deleted file mode 100644
index a8e7a6d..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/TestGSInstallerStateProvider.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.gsinstaller;
-
-/**
- * Test gsInstaller state provider.
- */
-public class TestGSInstallerStateProvider implements GSInstallerStateProvider {
-
-  private boolean healthy = true;
-
-  public void setHealthy(boolean healthy) {
-    this.healthy = healthy;
-  }
-
-  @Override
-  public boolean isHealthy(String hostName, String componentName) {
-    return healthy;
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractPropertyProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractPropertyProviderTest.java
deleted file mode 100644
index 5d4d42b..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractPropertyProviderTest.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.SystemException;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Abstract metric provider tests.
- */
-public class AbstractPropertyProviderTest {
-
-
-  @Test
-  public void testGetComponentMetrics() {
-    Map<String, Map<String, PropertyInfo>> componentMetrics = PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent);
-    AbstractPropertyProvider provider = new TestPropertyProvider(componentMetrics);
-    Assert.assertEquals(componentMetrics, provider.getComponentMetrics());
-  }
-
-  @Test
-  public void testGetPropertyInfoMap() {
-    AbstractPropertyProvider provider = new TestPropertyProvider(PropertyHelper.getGangliaPropertyIds(Resource.Type.HostComponent));
-
-    // specific property
-    Map<String, PropertyInfo> propertyInfoMap = provider.getPropertyInfoMap("NAMENODE", "metrics/cpu/cpu_aidle");
-    Assert.assertEquals(1, propertyInfoMap.size());
-    Assert.assertTrue(propertyInfoMap.containsKey("metrics/cpu/cpu_aidle"));
-
-    // category
-    propertyInfoMap = provider.getPropertyInfoMap("NAMENODE", "metrics/disk");
-    Assert.assertEquals(3, propertyInfoMap.size());
-    Assert.assertTrue(propertyInfoMap.containsKey("metrics/disk/disk_free"));
-    Assert.assertTrue(propertyInfoMap.containsKey("metrics/disk/disk_total"));
-    Assert.assertTrue(propertyInfoMap.containsKey("metrics/disk/part_max_used"));
-  }
-
-  static class TestPropertyProvider extends AbstractPropertyProvider {
-
-    public TestPropertyProvider(Map<String, Map<String, PropertyInfo>> componentMetrics) {
-      super(componentMetrics);
-    }
-
-    @Override
-    public Set<Resource> populateResources(Set<Resource> resources, Request request, Predicate predicate) throws SystemException {
-      return null;
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractResourceProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractResourceProviderTest.java
deleted file mode 100644
index 042ed7c..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractResourceProviderTest.java
+++ /dev/null
@@ -1,625 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.ActionRequest;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.ClusterRequest;
-import org.apache.ambari.server.controller.ConfigurationRequest;
-import org.apache.ambari.server.controller.HostRequest;
-import org.apache.ambari.server.controller.RequestStatusRequest;
-import org.apache.ambari.server.controller.ServiceComponentHostRequest;
-import org.apache.ambari.server.controller.ServiceComponentRequest;
-import org.apache.ambari.server.controller.ServiceRequest;
-import org.apache.ambari.server.controller.TaskStatusRequest;
-import org.apache.ambari.server.controller.UserRequest;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.easymock.EasyMock;
-import org.easymock.IArgumentMatcher;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.createMock;
-
-/**
- * Resource provider tests.
- */
-public class AbstractResourceProviderTest {
-
-  @Test
-  public void testCheckPropertyIds() {
-    Set<String> propertyIds = new HashSet<String>();
-    propertyIds.add("foo");
-    propertyIds.add("cat1/foo");
-    propertyIds.add("cat2/bar");
-    propertyIds.add("cat2/baz");
-    propertyIds.add("cat3/sub1/bam");
-    propertyIds.add("cat4/sub2/sub3/bat");
-    propertyIds.add("cat5/subcat5/map");
-
-    Map<Resource.Type, String> keyPropertyIds = new HashMap<Resource.Type, String>();
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    AbstractResourceProvider provider =
-        (AbstractResourceProvider) AbstractResourceProvider.getResourceProvider(
-            Resource.Type.Service,
-            propertyIds,
-            keyPropertyIds,
-            managementController);
-
-    Set<String> unsupported = provider.checkPropertyIds(Collections.singleton("foo"));
-    Assert.assertTrue(unsupported.isEmpty());
-
-    // note that key is not in the set of known property ids.  We allow it if its parent is a known property.
-    // this allows for Map type properties where we want to treat the entries as individual properties
-    Assert.assertTrue(provider.checkPropertyIds(Collections.singleton("cat5/subcat5/map/key")).isEmpty());
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("bar"));
-    Assert.assertEquals(1, unsupported.size());
-    Assert.assertTrue(unsupported.contains("bar"));
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("cat1/foo"));
-    Assert.assertTrue(unsupported.isEmpty());
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("cat1"));
-    Assert.assertTrue(unsupported.isEmpty());
-  }
-
-  @Test
-  public void testGetPropertyIds() {
-    Set<String> propertyIds = new HashSet<String>();
-    propertyIds.add("p1");
-    propertyIds.add("foo");
-    propertyIds.add("cat1/foo");
-    propertyIds.add("cat2/bar");
-    propertyIds.add("cat2/baz");
-    propertyIds.add("cat3/sub1/bam");
-    propertyIds.add("cat4/sub2/sub3/bat");
-
-    Map<Resource.Type, String> keyPropertyIds = new HashMap<Resource.Type, String>();
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    AbstractResourceProvider provider =
-        (AbstractResourceProvider) AbstractResourceProvider.getResourceProvider(
-            Resource.Type.Service,
-            propertyIds,
-            keyPropertyIds,
-            managementController);
-
-    Set<String> supportedPropertyIds = provider.getPropertyIds();
-    Assert.assertTrue(supportedPropertyIds.containsAll(propertyIds));
-  }
-
-
-  // ----- helper methods ----------------------------------------------------
-
-  /**
-   * Equals check that accounts for nulls.
-   *
-   * @param left   the left object
-   * @param right  the right object
-   *
-   * @return true if the left and right object are equal or both null
-   */
-  private static boolean eq(Object left, Object right) {
-    return  left == null ? right == null : right != null && left.equals(right);
-  }
-
-
-  // ----- inner classes -----------------------------------------------------
-
-  /**
-   * Utility class for getting various AmbariManagmentController request related matchers.
-   */
-  public static class Matcher
-  {
-    public static ClusterRequest getClusterRequest(
-        Long clusterId, String clusterName, String stackVersion, Set<String> hostNames)
-    {
-      EasyMock.reportMatcher(new ClusterRequestMatcher(clusterId, clusterName, stackVersion, hostNames));
-      return null;
-    }
-
-    public static ConfigurationRequest getConfigurationRequest(
-        String clusterName, String type, String tag, Map<String, String> configs)
-    {
-      EasyMock.reportMatcher(new ConfigurationRequestMatcher(clusterName, type, tag, configs));
-      return null;
-    }
-
-    public static RequestStatusRequest getRequestRequest(Long requestId)
-    {
-      EasyMock.reportMatcher(new RequestRequestMatcher(requestId));
-      return null;
-    }
-
-    public static Set<ActionRequest> getActionRequestSet(String clusterName, String serviceName, String actionName)
-    {
-      EasyMock.reportMatcher(new ActionRequestSetMatcher(clusterName, serviceName, actionName));
-      return null;
-    }
-
-    public static Set<ServiceComponentRequest> getComponentRequestSet(String clusterName, String serviceName,
-                                                                      String componentName,
-                                                                      Map<String, String> configVersions,
-                                                                      String desiredState)
-    {
-      EasyMock.reportMatcher(new ComponentRequestSetMatcher(clusterName, serviceName, componentName,
-          configVersions, desiredState));
-      return null;
-    }
-
-    public static Set<ConfigurationRequest> getConfigurationRequestSet(String clusterName, String type,
-                                                                       String tag, Map<String, String> configs)
-    {
-      EasyMock.reportMatcher(new ConfigurationRequestSetMatcher(clusterName, type, tag, configs));
-      return null;
-    }
-
-    public static Set<HostRequest> getHostRequestSet(String hostname, String clusterName,
-                                                     Map<String, String> hostAttributes)
-    {
-      EasyMock.reportMatcher(new HostRequestSetMatcher(hostname, clusterName, hostAttributes));
-      return null;
-    }
-
-    public static Set<ServiceComponentHostRequest> getHostComponentRequestSet(
-        String clusterName, String serviceName, String componentName, String hostName,
-        Map<String, String> configVersions, String desiredState)
-    {
-      EasyMock.reportMatcher(new HostComponentRequestSetMatcher(
-          clusterName, serviceName, componentName, hostName, configVersions, desiredState));
-      return null;
-    }
-
-    public static Set<ServiceRequest> getServiceRequestSet(String clusterName, String serviceName,
-                                                           Map<String, String> configVersions, String desiredState)
-    {
-      EasyMock.reportMatcher(new ServiceRequestSetMatcher(clusterName, serviceName, configVersions, desiredState));
-      return null;
-    }
-
-    public static Set<TaskStatusRequest> getTaskRequestSet(Long requestId, Long taskId)
-    {
-      EasyMock.reportMatcher(new TaskRequestSetMatcher(requestId, taskId));
-      return null;
-    }
-
-    public static Set<UserRequest> getUserRequestSet(String name)
-    {
-      EasyMock.reportMatcher(new UserRequestSetMatcher(name));
-      return null;
-    }
-  }
-
-  /**
-   * Matcher for a ClusterRequest.
-   */
-  public static class ClusterRequestMatcher extends ClusterRequest implements IArgumentMatcher {
-
-    public ClusterRequestMatcher(Long clusterId, String clusterName, String stackVersion, Set<String> hostNames) {
-      super(clusterId, clusterName, stackVersion, hostNames);
-    }
-
-    @Override
-    public boolean matches(Object o) {
-      return o instanceof ClusterRequest &&
-          eq(((ClusterRequest) o).getClusterId(), getClusterId()) &&
-          eq(((ClusterRequest) o).getClusterName(), getClusterName()) &&
-          eq(((ClusterRequest) o).getStackVersion(), getStackVersion()) &&
-          eq(((ClusterRequest) o).getHostNames(), getHostNames());
-    }
-
-    @Override
-    public void appendTo(StringBuffer stringBuffer) {
-      stringBuffer.append("ClusterRequestMatcher(" + super.toString() + ")");
-    }
-  }
-
-  /**
-   * Matcher for a ConfigurationRequest.
-   */
-  public static class ConfigurationRequestMatcher extends ConfigurationRequest implements IArgumentMatcher {
-
-    public ConfigurationRequestMatcher(String clusterName, String type, String tag, Map<String, String> configs) {
-      super(clusterName, type, tag, configs);
-    }
-
-    @Override
-    public boolean matches(Object o) {
-      return o instanceof ConfigurationRequest &&
-          eq(((ConfigurationRequest) o).getClusterName(), getClusterName()) &&
-          eq(((ConfigurationRequest) o).getType(), getType()) &&
-          eq(((ConfigurationRequest) o).getVersionTag(), getVersionTag()) &&
-          eq(((ConfigurationRequest) o).getConfigs(), getConfigs());
-
-    }
-
-    @Override
-    public void appendTo(StringBuffer stringBuffer) {
-      stringBuffer.append("ConfigurationRequestMatcher(" + super.toString() + ")");
-    }
-  }
-
-  /**
-   * Matcher for a RequestStatusRequest.
-   */
-  public static class RequestRequestMatcher extends RequestStatusRequest implements IArgumentMatcher {
-
-    public RequestRequestMatcher(Long requestId) {
-      super(requestId, "");
-    }
-
-    @Override
-    public boolean matches(Object o) {
-
-      return o instanceof RequestStatusRequest &&
-          eq(((RequestStatusRequest) o).getRequestId(), getRequestId());
-    }
-
-    @Override
-    public void appendTo(StringBuffer stringBuffer) {
-      stringBuffer.append("RequestRequestMatcher(" + super.toString() + ")");
-    }
-  }
-
-  /**
-   * Matcher for a ActionRequest set containing a single request.
-   */
-  public static class ActionRequestSetMatcher extends HashSet<ActionRequest> implements IArgumentMatcher {
-
-    private final ActionRequest actionRequest;
-
-    public ActionRequestSetMatcher(String clusterName, String serviceName, String actionName) {
-      this.actionRequest = new ActionRequest(clusterName, serviceName, actionName, null);
-      add(this.actionRequest);
-    }
-
-    @Override
-    public boolean matches(Object o) {
-      if (!(o instanceof Set)) {
-        return false;
-      }
-
-      Set set = (Set) o;
-
-      if (set.size() != 1) {
-        return false;
-      }
-
-      Object request = set.iterator().next();
-
-      return request instanceof ActionRequest &&
-          eq(((ActionRequest) request).getClusterName(), actionRequest.getClusterName()) &&
-          eq(((ActionRequest) request).getServiceName(), actionRequest.getServiceName()) &&
-          eq(((ActionRequest) request).getActionName(), actionRequest.getActionName());
-    }
-
-    @Override
-    public void appendTo(StringBuffer stringBuffer) {
-      stringBuffer.append("ActionRequestSetMatcher(" + actionRequest + ")");
-    }
-  }
-
-  /**
-   * Matcher for a ServiceComponentRequest set containing a single request.
-   */
-  public static class ComponentRequestSetMatcher extends HashSet<ServiceComponentRequest> implements IArgumentMatcher {
-
-    private final ServiceComponentRequest serviceComponentRequest;
-
-    public ComponentRequestSetMatcher(String clusterName, String serviceName, String componentName,
-                                   Map<String, String> configVersions, String desiredState) {
-      this.serviceComponentRequest =
-          new ServiceComponentRequest(clusterName, serviceName, componentName, configVersions, desiredState);
-      add(this.serviceComponentRequest);
-    }
-
-    @Override
-    public boolean matches(Object o) {
-
-      if (!(o instanceof Set)) {
-        return false;
-      }
-
-      Set set = (Set) o;
-
-      if (set.size() != 1) {
-        return false;
-      }
-
-      Object request = set.iterator().next();
-
-      return request instanceof ServiceComponentRequest &&
-          eq(((ServiceComponentRequest) request).getClusterName(), serviceComponentRequest.getClusterName()) &&
-          eq(((ServiceComponentRequest) request).getServiceName(), serviceComponentRequest.getServiceName()) &&
-          eq(((ServiceComponentRequest) request).getComponentName(), serviceComponentRequest.getComponentName()) &&
-          eq(((ServiceComponentRequest) request).getConfigVersions(), serviceComponentRequest.getConfigVersions()) &&
-          eq(((ServiceComponentRequest) request).getDesiredState(), serviceComponentRequest.getDesiredState());
-    }
-
-    @Override
-    public void appendTo(StringBuffer stringBuffer) {
-      stringBuffer.append("ComponentRequestSetMatcher(" + serviceComponentRequest + ")");
-    }
-  }
-
-  /**
-   * Matcher for a ConfigurationRequest set containing a single request.
-   */
-  public static class ConfigurationRequestSetMatcher extends HashSet<ConfigurationRequest> implements IArgumentMatcher {
-
-    private final ConfigurationRequest configurationRequest;
-
-    public ConfigurationRequestSetMatcher(String clusterName, String type, String tag, Map<String, String> configs) {
-      this.configurationRequest = new ConfigurationRequest(clusterName, type, tag, configs);
-      add(this.configurationRequest);
-    }
-
-    @Override
-    public boolean matches(Object o) {
-
-      if (!(o instanceof Set)) {
-        return false;
-      }
-
-      Set set = (Set) o;
-
-      if (set.size() != 1) {
-        return false;
-      }
-
-      Object request = set.iterator().next();
-
-      return request instanceof ConfigurationRequest &&
-          eq(((ConfigurationRequest) request).getClusterName(), configurationRequest.getClusterName()) &&
-          eq(((ConfigurationRequest) request).getType(), configurationRequest.getType()) &&
-          eq(((ConfigurationRequest) request).getVersionTag(), configurationRequest.getVersionTag()) &&
-          eq(((ConfigurationRequest) request).getConfigs(), configurationRequest.getConfigs());
-    }
-
-    @Override
-    public void appendTo(StringBuffer stringBuffer) {
-      stringBuffer.append("ConfigurationRequestSetMatcher(" + configurationRequest + ")");
-    }
-  }
-
-  /**
-   * Matcher for a HostRequest set containing a single request.
-   */
-  public static class HostRequestSetMatcher extends HashSet<HostRequest> implements IArgumentMatcher {
-
-    private final HostRequest hostRequest;
-
-    public HostRequestSetMatcher(String hostname, String clusterName, Map<String, String> hostAttributes) {
-      this.hostRequest = new HostRequest(hostname, clusterName, hostAttributes);
-      add(this.hostRequest);
-    }
-
-    @Override
-    public boolean matches(Object o) {
-      if (!(o instanceof Set)) {
-        return false;
-      }
-
-      Set set = (Set) o;
-
-      if (set.size() != 1) {
-        return false;
-      }
-
-      Object request = set.iterator().next();
-
-      return request instanceof HostRequest &&
-          eq(((HostRequest) request).getClusterName(), hostRequest.getClusterName()) &&
-          eq(((HostRequest) request).getHostname(), hostRequest.getHostname()) &&
-          eq(((HostRequest) request).getHostAttributes(), hostRequest.getHostAttributes());
-    }
-
-    @Override
-    public void appendTo(StringBuffer stringBuffer) {
-      stringBuffer.append("HostRequestSetMatcher(" + hostRequest + ")");
-    }
-  }
-
-  /**
-   * Matcher for a ServiceComponentHostRequest set containing a single request.
-   */
-  public static class HostComponentRequestSetMatcher extends HashSet<ServiceComponentHostRequest>
-      implements IArgumentMatcher {
-
-    private final ServiceComponentHostRequest hostComponentRequest;
-
-    public HostComponentRequestSetMatcher(String clusterName, String serviceName, String componentName, String hostName,
-                                      Map<String, String> configVersions, String desiredState) {
-      this.hostComponentRequest =
-          new ServiceComponentHostRequest(clusterName, serviceName, componentName,
-              hostName, configVersions, desiredState);
-      add(this.hostComponentRequest);
-    }
-
-    @Override
-    public boolean matches(Object o) {
-
-      if (!(o instanceof Set)) {
-        return false;
-      }
-
-      Set set = (Set) o;
-
-      if (set.size() != 1) {
-        return false;
-      }
-
-      Object request = set.iterator().next();
-
-      return request instanceof ServiceComponentHostRequest &&
-          eq(((ServiceComponentHostRequest) request).getClusterName(), hostComponentRequest.getClusterName()) &&
-          eq(((ServiceComponentHostRequest) request).getServiceName(), hostComponentRequest.getServiceName()) &&
-          eq(((ServiceComponentHostRequest) request).getComponentName(), hostComponentRequest.getComponentName()) &&
-          eq(((ServiceComponentHostRequest) request).getHostname(), hostComponentRequest.getHostname()) &&
-          eq(((ServiceComponentHostRequest) request).getConfigVersions(), hostComponentRequest.getConfigVersions()) &&
-          eq(((ServiceComponentHostRequest) request).getDesiredState(), hostComponentRequest.getDesiredState());
-    }
-
-    @Override
-    public void appendTo(StringBuffer stringBuffer) {
-      stringBuffer.append("HostComponentRequestSetMatcher(" + hostComponentRequest + ")");
-    }
-  }
-
-  /**
-   * Matcher for a ServiceRequest set containing a single request.
-   */
-  public static class ServiceRequestSetMatcher extends HashSet<ServiceRequest> implements IArgumentMatcher {
-
-    private final ServiceRequest serviceRequest;
-
-    public ServiceRequestSetMatcher(
-        String clusterName, String serviceName, Map<String, String> configVersions, String desiredState) {
-      this.serviceRequest = new ServiceRequest(clusterName, serviceName, configVersions, desiredState);
-      add(this.serviceRequest);
-    }
-
-    @Override
-    public boolean matches(Object o) {
-      if (!(o instanceof Set)) {
-        return false;
-      }
-
-      Set set = (Set) o;
-
-      if (set.size() != 1) {
-        return false;
-      }
-
-      Object request = set.iterator().next();
-
-      return request instanceof ServiceRequest &&
-          eq(((ServiceRequest) request).getClusterName(), serviceRequest.getClusterName()) &&
-          eq(((ServiceRequest) request).getServiceName(), serviceRequest.getServiceName()) &&
-          eq(((ServiceRequest) request).getConfigVersions(), serviceRequest.getConfigVersions()) &&
-          eq(((ServiceRequest) request).getDesiredState(), serviceRequest.getDesiredState());
-    }
-
-    @Override
-    public void appendTo(StringBuffer stringBuffer) {
-      stringBuffer.append("ServiceRequestSetMatcher(" + serviceRequest + ")");
-    }
-  }
-
-  /**
-   * Matcher for a TaskStatusRequest set containing a single request.
-   */
-  public static class TaskRequestSetMatcher extends HashSet<TaskStatusRequest> implements IArgumentMatcher {
-
-    private final TaskStatusRequest taskStatusRequest;
-
-    public TaskRequestSetMatcher(Long requestId, Long taskId) {
-      this.taskStatusRequest = new TaskStatusRequest(requestId, taskId);
-      add(this.taskStatusRequest);
-    }
-
-    @Override
-    public boolean matches(Object o) {
-
-      if (!(o instanceof Set)) {
-        return false;
-      }
-
-      Set set = (Set) o;
-
-      if (set.size() != 1) {
-        return false;
-      }
-
-      Object request = set.iterator().next();
-
-      return request instanceof TaskStatusRequest &&
-          eq(((TaskStatusRequest) request).getRequestId(), taskStatusRequest.getRequestId());
-    }
-
-    @Override
-    public void appendTo(StringBuffer stringBuffer) {
-      stringBuffer.append("TaskRequestSetMatcher(" + taskStatusRequest + ")");
-    }
-  }
-
-  /**
-   * Matcher for a UserRequest set containing a single request.
-   */
-  public static class UserRequestSetMatcher extends HashSet<UserRequest> implements IArgumentMatcher {
-
-    private final UserRequest userRequest;
-
-    public UserRequestSetMatcher(String name) {
-      this.userRequest = new UserRequest(name);
-      add(this.userRequest);
-    }
-
-    @Override
-    public boolean matches(Object o) {
-
-      if (!(o instanceof Set)) {
-        return false;
-      }
-
-      Set set = (Set) o;
-
-      if (set.size() != 1) {
-        return false;
-      }
-
-      Object request = set.iterator().next();
-
-      return request instanceof UserRequest &&
-          eq(((UserRequest) request).getUsername(), userRequest.getUsername());
-    }
-
-    @Override
-    public void appendTo(StringBuffer stringBuffer) {
-      stringBuffer.append("UserRequestSetMatcher(" + userRequest + ")");
-    }
-  }
-
-  /**
-   * A test observer that records the last event.
-   */
-  public static class TestObserver implements ResourceProviderObserver {
-
-    ResourceProviderEvent lastEvent = null;
-
-    @Override
-    public void update(ResourceProviderEvent event) {
-      lastEvent = event;
-    }
-
-    public ResourceProviderEvent getLastEvent() {
-      return lastEvent;
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ActionResourceProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ActionResourceProviderTest.java
deleted file mode 100644
index 3607638..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ActionResourceProviderTest.java
+++ /dev/null
@@ -1,237 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-
-/**
- * ActionResourceProvider tests.
- */
-public class ActionResourceProviderTest {
-  @Test
-  public void testCreateResources() throws Exception {
-    Resource.Type type = Resource.Type.Action;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    expect(managementController.createActions(AbstractResourceProviderTest.Matcher.getActionRequestSet(
-        "Cluster100", "Service100", "Action100"))).andReturn(response);
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    // add the property map to a set for the request.  add more maps for multiple creates
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
-
-    // Service 1: create a map of properties for the request
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    // add properties to the request map
-    properties.put(ActionResourceProvider.ACTION_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
-    properties.put(ActionResourceProvider.ACTION_SERVICE_NAME_PROPERTY_ID, "Service100");
-    properties.put(ActionResourceProvider.ACTION_ACTION_NAME_PROPERTY_ID, "Action100");
-
-    propertySet.add(properties);
-
-    // create the request
-    Request request = PropertyHelper.getCreateRequest(propertySet);
-
-    provider.createResources(request);
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testGetResources() throws Exception {
-    Resource.Type type = Resource.Type.Action;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    // replay
-    replay(managementController);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Set<String> propertyIds = new HashSet<String>();
-
-    propertyIds.add(ActionResourceProvider.ACTION_CLUSTER_NAME_PROPERTY_ID);
-    propertyIds.add(ActionResourceProvider.ACTION_SERVICE_NAME_PROPERTY_ID);
-    propertyIds.add(ActionResourceProvider.ACTION_ACTION_NAME_PROPERTY_ID);
-
-    // create the request
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-
-    // get all ... no predicate
-    try {
-      provider.getResources(request, null);
-      Assert.fail("Expected an UnsupportedOperationException");
-    } catch (UnsupportedOperationException e) {
-      // expected
-    }
-
-    // verify
-    verify(managementController);
-  }
-
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    Resource.Type type = Resource.Type.Action;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    // add the property map to a set for the request.
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    // create the request
-    Request request = PropertyHelper.getUpdateRequest(properties);
-
-    Predicate predicate =
-        new PredicateBuilder().property(ActionResourceProvider.ACTION_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").
-        and().property(ActionResourceProvider.ACTION_SERVICE_NAME_PROPERTY_ID).equals("Service102").
-        and().property(ActionResourceProvider.ACTION_ACTION_NAME_PROPERTY_ID).equals("Action100").toPredicate();
-    try {
-      provider.updateResources(request, predicate);
-      Assert.fail("Expected an UnsupportedOperationException");
-    } catch (UnsupportedOperationException e) {
-      // expected
-    }
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    Resource.Type type = Resource.Type.Action;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Predicate  predicate =
-        new PredicateBuilder().property(ActionResourceProvider.ACTION_ACTION_NAME_PROPERTY_ID).equals("Action100").
-            toPredicate();
-    try {
-      provider.deleteResources(predicate);
-      Assert.fail("Expected an UnsupportedOperationException");
-    } catch (UnsupportedOperationException e) {
-      // expected
-    }
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testCheckPropertyIds() throws Exception {
-    Set<String> propertyIds = new HashSet<String>();
-    propertyIds.add("foo");
-    propertyIds.add("cat1/foo");
-    propertyIds.add("cat2/bar");
-    propertyIds.add("cat2/baz");
-    propertyIds.add("cat3/sub1/bam");
-    propertyIds.add("cat4/sub2/sub3/bat");
-    propertyIds.add("cat5/subcat5/map");
-
-    Map<Resource.Type, String> keyPropertyIds = new HashMap<Resource.Type, String>();
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    AbstractResourceProvider provider =
-        (AbstractResourceProvider) AbstractResourceProvider.getResourceProvider(
-            Resource.Type.Action,
-            propertyIds,
-            keyPropertyIds,
-            managementController);
-
-    Set<String> unsupported = provider.checkPropertyIds(Collections.singleton("foo"));
-    Assert.assertTrue(unsupported.isEmpty());
-
-    // note that key is not in the set of known property ids.  We allow it if its parent is a known property.
-    // this allows for Map type properties where we want to treat the entries as individual properties
-    Assert.assertTrue(provider.checkPropertyIds(Collections.singleton("cat5/subcat5/map/key")).isEmpty());
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("bar"));
-    Assert.assertEquals(1, unsupported.size());
-    Assert.assertTrue(unsupported.contains("bar"));
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("cat1/foo"));
-    Assert.assertTrue(unsupported.isEmpty());
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("cat1"));
-    Assert.assertTrue(unsupported.isEmpty());
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("parameters/unknown_property"));
-    Assert.assertTrue(unsupported.isEmpty());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BaseProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BaseProviderTest.java
deleted file mode 100644
index e5bc59e..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BaseProviderTest.java
+++ /dev/null
@@ -1,261 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Base provider tests.
- */
-public class BaseProviderTest {
-  @Test
-  public void testGetProperties() {
-    Set<String> propertyIds = new HashSet<String>();
-    propertyIds.add("foo");
-    propertyIds.add("bar");
-    propertyIds.add("cat1/prop1");
-    propertyIds.add("cat2/prop2");
-    propertyIds.add("cat3/subcat3/prop3");
-
-    BaseProvider provider = new TestProvider(propertyIds);
-
-    Set<String> supportedPropertyIds = provider.getPropertyIds();
-    Assert.assertTrue(supportedPropertyIds.containsAll(propertyIds));
-  }
-
-  @Test
-  public void testCheckPropertyIds() {
-    Set<String> propertyIds = new HashSet<String>();
-    propertyIds.add("foo");
-    propertyIds.add("bar");
-    propertyIds.add("cat1/prop1");
-    propertyIds.add("cat2/prop2");
-    propertyIds.add("cat3/subcat3/prop3");
-    propertyIds.add("cat4/subcat4/map");
-
-    BaseProvider provider = new TestProvider(propertyIds);
-
-    Assert.assertTrue(provider.checkPropertyIds(propertyIds).isEmpty());
-
-    Assert.assertTrue(provider.checkPropertyIds(Collections.singleton("cat1")).isEmpty());
-    Assert.assertTrue(provider.checkPropertyIds(Collections.singleton("cat2")).isEmpty());
-    Assert.assertTrue(provider.checkPropertyIds(Collections.singleton("cat3")).isEmpty());
-    Assert.assertTrue(provider.checkPropertyIds(Collections.singleton("cat3/subcat3")).isEmpty());
-    Assert.assertTrue(provider.checkPropertyIds(Collections.singleton("cat4/subcat4/map")).isEmpty());
-
-    // note that key is not in the set of known property ids.  We allow it if its parent is a known property.
-    // this allows for Map type properties where we want to treat the entries as individual properties
-    Assert.assertTrue(provider.checkPropertyIds(Collections.singleton("cat4/subcat4/map/key")).isEmpty());
-
-    propertyIds.add("badprop");
-    propertyIds.add("badcat");
-
-    Set<String> unsupportedPropertyIds = provider.checkPropertyIds(propertyIds);
-    Assert.assertFalse(unsupportedPropertyIds.isEmpty());
-    Assert.assertEquals(2, unsupportedPropertyIds.size());
-    Assert.assertTrue(unsupportedPropertyIds.contains("badprop"));
-    Assert.assertTrue(unsupportedPropertyIds.contains("badcat"));
-  }
-
-  @Test
-  public void testGetRequestPropertyIds() {
-    Set<String> providerPropertyIds = new HashSet<String>();
-    providerPropertyIds.add("foo");
-    providerPropertyIds.add("bar");
-    providerPropertyIds.add("cat1/sub1");
-
-    BaseProvider provider = new TestProvider(providerPropertyIds);
-
-    Request request = PropertyHelper.getReadRequest("foo");
-
-    Set<String> requestedPropertyIds = provider.getRequestPropertyIds(request, null);
-
-    Assert.assertEquals(1, requestedPropertyIds.size());
-    Assert.assertTrue(requestedPropertyIds.contains("foo"));
-
-    request = PropertyHelper.getReadRequest("foo", "bar");
-
-    requestedPropertyIds = provider.getRequestPropertyIds(request, null);
-
-    Assert.assertEquals(2, requestedPropertyIds.size());
-    Assert.assertTrue(requestedPropertyIds.contains("foo"));
-    Assert.assertTrue(requestedPropertyIds.contains("bar"));
-
-    request = PropertyHelper.getReadRequest("foo", "baz", "bar", "cat", "cat1/prop1");
-
-    requestedPropertyIds = provider.getRequestPropertyIds(request, null);
-
-    Assert.assertEquals(2, requestedPropertyIds.size());
-    Assert.assertTrue(requestedPropertyIds.contains("foo"));
-    Assert.assertTrue(requestedPropertyIds.contains("bar"));
-
-    // ask for a property that isn't specified as supported, but its category is... the property
-    // should end up in the returned set for the case where the category is a Map property
-    request = PropertyHelper.getReadRequest("foo", "cat1/sub1/prop1");
-
-    requestedPropertyIds = provider.getRequestPropertyIds(request, null);
-
-    Assert.assertEquals(2, requestedPropertyIds.size());
-    Assert.assertTrue(requestedPropertyIds.contains("foo"));
-    Assert.assertTrue(requestedPropertyIds.contains("cat1/sub1/prop1"));
-  }
-
-  @Test
-  public void testSetResourceProperty() {
-    Set<String> propertyIds = new HashSet<String>();
-    propertyIds.add("p1");
-    propertyIds.add("foo");
-    propertyIds.add("cat1/foo");
-    propertyIds.add("cat2/bar");
-    propertyIds.add("cat2/baz");
-    propertyIds.add("cat3/sub1/bam");
-    propertyIds.add("cat4/sub2/sub3/bat");
-    propertyIds.add("cat5/sub5");
-
-    Resource resource = new ResourceImpl(Resource.Type.Service);
-
-    Assert.assertNull(resource.getPropertyValue("foo"));
-
-    BaseProvider.setResourceProperty(resource, "foo", "value1", propertyIds);
-    Assert.assertEquals("value1", resource.getPropertyValue("foo"));
-
-    BaseProvider.setResourceProperty(resource, "cat2/bar", "value2", propertyIds);
-    Assert.assertEquals("value2", resource.getPropertyValue("cat2/bar"));
-
-    Assert.assertNull(resource.getPropertyValue("unsupported"));
-    BaseProvider.setResourceProperty(resource, "unsupported", "valueX", propertyIds);
-    Assert.assertNull(resource.getPropertyValue("unsupported"));
-
-    // we should allow anything under the category cat5/sub5
-    BaseProvider.setResourceProperty(resource, "cat5/sub5/prop5", "value5", propertyIds);
-    Assert.assertEquals("value5", resource.getPropertyValue("cat5/sub5/prop5"));
-    BaseProvider.setResourceProperty(resource, "cat5/sub5/sub5a/prop5a", "value5", propertyIds);
-    Assert.assertEquals("value5", resource.getPropertyValue("cat5/sub5/sub5a/prop5a"));
-    // we shouldn't allow anything under the category cat5/sub7
-    BaseProvider.setResourceProperty(resource, "cat5/sub7/unsupported", "valueX", propertyIds);
-    Assert.assertNull(resource.getPropertyValue("cat5/sub7/unsupported"));
-  }
-
-  @Test
-  public void testSetResourcePropertyWithMaps() {
-    Set<String> propertyIds = new HashSet<String>();
-    propertyIds.add("cat1/emptyMapProperty");
-    propertyIds.add("cat1/mapProperty");
-    propertyIds.add("cat2/mapMapProperty");
-    propertyIds.add("cat3/mapProperty3/key2");
-    propertyIds.add("cat4/mapMapProperty4/subMap1/key3");
-    propertyIds.add("cat4/mapMapProperty4/subMap2");
-
-    Resource resource = new ResourceImpl(Resource.Type.Service);
-
-    // Adding an empty Map as a property should add the actual Map as a property
-    Map<String, String> emptyMapProperty = new HashMap<String, String>();
-    BaseProvider.setResourceProperty(resource, "cat1/emptyMapProperty", emptyMapProperty, propertyIds);
-    Assert.assertTrue(resource.getPropertiesMap().containsKey("cat1/emptyMapProperty"));
-
-    Map<String, String> mapProperty = new HashMap<String, String>();
-    mapProperty.put("key1", "value1");
-    mapProperty.put("key2", "value2");
-    mapProperty.put("key3", "value3");
-
-    // Adding a property of type Map should add all of its keys as sub properties
-    // if the map property was requested
-    BaseProvider.setResourceProperty(resource, "cat1/mapProperty", mapProperty, propertyIds);
-    Assert.assertNull(resource.getPropertyValue("cat1/mapProperty"));
-    Assert.assertEquals("value1", resource.getPropertyValue("cat1/mapProperty/key1"));
-    Assert.assertEquals("value2", resource.getPropertyValue("cat1/mapProperty/key2"));
-    Assert.assertEquals("value3", resource.getPropertyValue("cat1/mapProperty/key3"));
-
-    Map<String, Map<String, String>> mapMapProperty = new HashMap<String, Map<String, String>>();
-    Map<String, String> mapSubProperty1 = new HashMap<String, String>();
-    mapSubProperty1.put("key1", "value11");
-    mapSubProperty1.put("key2", "value12");
-    mapSubProperty1.put("key3", "value13");
-    mapMapProperty.put("subMap1", mapSubProperty1);
-    Map<String, String> mapSubProperty2 = new HashMap<String, String>();
-    mapSubProperty2.put("key1", "value21");
-    mapSubProperty2.put("key2", "value22");
-    mapSubProperty2.put("key3", "value23");
-    mapMapProperty.put("subMap2", mapSubProperty2);
-    Map<String, String> mapSubProperty3 = new HashMap<String, String>();
-    mapMapProperty.put("subMap3", mapSubProperty3);
-
-    // Map of maps ... adding a property of type Map should add all of its keys as sub properties
-    // if the map property was requested
-    BaseProvider.setResourceProperty(resource, "cat2/mapMapProperty", mapMapProperty, propertyIds);
-    Assert.assertNull(resource.getPropertyValue("cat2/mapMapProperty"));
-    Assert.assertNull(resource.getPropertyValue("cat2/mapMapProperty/subMap1"));
-    Assert.assertNull(resource.getPropertyValue("cat2/mapMapProperty/subMap2"));
-    Assert.assertTrue(resource.getPropertiesMap().containsKey("cat2/mapMapProperty/subMap3"));
-    Assert.assertEquals("value11", resource.getPropertyValue("cat2/mapMapProperty/subMap1/key1"));
-    Assert.assertEquals("value12", resource.getPropertyValue("cat2/mapMapProperty/subMap1/key2"));
-    Assert.assertEquals("value13", resource.getPropertyValue("cat2/mapMapProperty/subMap1/key3"));
-    Assert.assertEquals("value21", resource.getPropertyValue("cat2/mapMapProperty/subMap2/key1"));
-    Assert.assertEquals("value22", resource.getPropertyValue("cat2/mapMapProperty/subMap2/key2"));
-    Assert.assertEquals("value23", resource.getPropertyValue("cat2/mapMapProperty/subMap2/key3"));
-
-    Map<String, String> mapProperty3 = new HashMap<String, String>();
-    mapProperty3.put("key1", "value1");
-    mapProperty3.put("key2", "value2");
-    mapProperty3.put("key3", "value3");
-
-    // Adding a property of type Map shouldn't add the map if it wasn't requested and
-    // should only add requested keys as sub properties ...
-    // only "cat3/mapProperty3/key2" was requested
-    BaseProvider.setResourceProperty(resource, "cat3/mapProperty3", mapProperty3, propertyIds);
-    Assert.assertNull(resource.getPropertyValue("cat3/mapProperty3"));
-    Assert.assertNull(resource.getPropertyValue("cat3/mapProperty3/key1"));
-    Assert.assertEquals("value2", resource.getPropertyValue("cat3/mapProperty3/key2"));
-    Assert.assertNull(resource.getPropertyValue("cat3/mapProperty3/key3"));
-
-    Map<String, Map<String, String>> mapMapProperty4 = new HashMap<String, Map<String, String>>();
-    mapMapProperty4.put("subMap1", mapSubProperty1);
-    mapMapProperty4.put("subMap2", mapSubProperty2);
-    // Map of maps ... adding a property of type Map shouldn't add the map if it wasn't requested and
-    // should only add requested keys as sub properties ...
-    // only "cat4/mapMapProperty4/subMap1/key3" and "cat4/mapMapProperty4/subMap2" are requested
-    BaseProvider.setResourceProperty(resource, "cat4/mapMapProperty4", mapMapProperty4, propertyIds);
-    Assert.assertNull(resource.getPropertyValue("cat4/mapMapProperty4"));
-    Assert.assertNull(resource.getPropertyValue("cat4/mapMapProperty4/subMap1"));
-    Assert.assertNull(resource.getPropertyValue("cat4/mapMapProperty4/subMap2"));
-    Assert.assertNull(resource.getPropertyValue("cat4/mapMapProperty4/subMap1/key1"));
-    Assert.assertNull(resource.getPropertyValue("cat4/mapMapProperty4/subMap1/key2"));
-    Assert.assertEquals("value13", resource.getPropertyValue("cat4/mapMapProperty4/subMap1/key3"));
-    Assert.assertEquals("value21", resource.getPropertyValue("cat4/mapMapProperty4/subMap2/key1"));
-    Assert.assertEquals("value22", resource.getPropertyValue("cat4/mapMapProperty4/subMap2/key2"));
-    Assert.assertEquals("value23", resource.getPropertyValue("cat4/mapMapProperty4/subMap2/key3"));
-  }
-
-  static class TestProvider extends BaseProvider {
-
-    public TestProvider(Set<String> propertyIds) {
-      super(propertyIds);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterControllerImplTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterControllerImplTest.java
deleted file mode 100644
index 100e7f6..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterControllerImplTest.java
+++ /dev/null
@@ -1,540 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PredicateHelper;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.junit.Test;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Cluster controller tests
- */
-public class ClusterControllerImplTest {
-
-  private static final Set<String> propertyProviderProperties = new HashSet<String>();
-
-  private static final String UNSUPPORTED_PROPERTY = PropertyHelper.getPropertyId("c1", "unsupported");
-
-  static {
-    propertyProviderProperties.add(PropertyHelper.getPropertyId("c3", "p5"));
-    propertyProviderProperties.add(PropertyHelper.getPropertyId("c3", "p6"));
-    propertyProviderProperties.add(PropertyHelper.getPropertyId("c4", "p7"));
-    propertyProviderProperties.add(PropertyHelper.getPropertyId("c4", "p8"));
-  }
-
-  private static final PropertyProvider propertyProvider = new PropertyProvider() {
-    @Override
-    public Set<Resource> populateResources(Set<Resource> resources, Request request, Predicate predicate) {
-
-      int cnt = 0;
-      for (Resource resource : resources){
-        resource.setProperty(PropertyHelper.getPropertyId("c3", "p5"), cnt + 100);
-        resource.setProperty(PropertyHelper.getPropertyId("c3", "p6"), cnt % 2);
-        resource.setProperty(PropertyHelper.getPropertyId("c4", "p7"), "monkey");
-        resource.setProperty(PropertyHelper.getPropertyId("c4", "p8"), "runner");
-        ++cnt;
-      }
-      return resources;
-    }
-
-    @Override
-    public Set<String> checkPropertyIds(Set<String> propertyIds) {
-      if (!propertyProviderProperties.containsAll(propertyIds)) {
-        Set<String> unsupportedPropertyIds = new HashSet<String>(propertyIds);
-        unsupportedPropertyIds.removeAll(propertyProviderProperties);
-        return unsupportedPropertyIds;
-      }
-      return Collections.emptySet();
-    }
-  };
-
-  private static final List<PropertyProvider> propertyProviders = new LinkedList<PropertyProvider>();
-
-  static {
-    propertyProviders.add(propertyProvider);
-  }
-
-  private static final Map<Resource.Type, String> keyPropertyIds = new HashMap<Resource.Type, String>();
-
-  static {
-    keyPropertyIds.put(Resource.Type.Cluster, PropertyHelper.getPropertyId("c1", "p1"));
-    keyPropertyIds.put(Resource.Type.Host, PropertyHelper.getPropertyId("c1", "p2"));
-  }
-
-  private static final Set<String> resourceProviderProperties = new HashSet<String>();
-
-  static {
-    resourceProviderProperties.add(PropertyHelper.getPropertyId("c1", "p1"));
-    resourceProviderProperties.add(PropertyHelper.getPropertyId("c1", "p2"));
-    resourceProviderProperties.add(PropertyHelper.getPropertyId("c1", "p3"));
-    resourceProviderProperties.add(PropertyHelper.getPropertyId("c2", "p4"));
-  }
-
-  @Test
-  public void testGetResources() throws Exception{
-    ClusterController controller = new ClusterControllerImpl(new TestProviderModule());
-
-    Set<String> propertyIds = new HashSet<String>();
-
-    propertyIds.add(PropertyHelper.getPropertyId("c1", "p1"));
-    propertyIds.add(PropertyHelper.getPropertyId("c1", "p3"));
-
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-
-    Iterable<Resource> iterable = controller.getResources(Resource.Type.Host, request, null);
-
-    int cnt = 0;
-    for (Resource resource : iterable) {
-      Assert.assertEquals(Resource.Type.Host, resource.getType());
-      ++cnt;
-    }
-    Assert.assertEquals(4, cnt);
-  }
-
-  @Test
-  public void testGetResourcesEmptyRequest() throws Exception{
-    ClusterController controller = new ClusterControllerImpl(new TestProviderModule());
-
-    Set<String> propertyIds = new HashSet<String>();
-
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-
-    Iterable<Resource> iterable = controller.getResources(Resource.Type.Host, request, null);
-
-    int cnt = 0;
-    for (Resource resource : iterable) {
-      Assert.assertEquals(Resource.Type.Host, resource.getType());
-      ++cnt;
-    }
-    Assert.assertEquals(4, cnt);
-  }
-
-  @Test
-  public void testGetResourcesWithPredicate() throws Exception{
-    ClusterController controller = new ClusterControllerImpl(new TestProviderModule());
-
-    Set<String> propertyIds = new HashSet<String>();
-
-    propertyIds.add(PropertyHelper.getPropertyId("c1", "p1"));
-    propertyIds.add(PropertyHelper.getPropertyId("c1", "p2"));
-    propertyIds.add(PropertyHelper.getPropertyId("c1", "p3"));
-    propertyIds.add(PropertyHelper.getPropertyId("c2", "p4"));
-
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-
-    Predicate predicate = new PredicateBuilder().property("c1/p2").equals(1).toPredicate();
-
-    Iterable<Resource> iterable = controller.getResources(Resource.Type.Host, request, predicate);
-
-    int cnt = 0;
-    for (Resource resource : iterable) {
-      Assert.assertEquals(Resource.Type.Host, resource.getType());
-      ++cnt;
-    }
-    Assert.assertEquals(2, cnt);
-  }
-
-  @Test
-  public void testGetResourcesWithUnsupportedPropertyPredicate() throws Exception{
-    ClusterController controller = new ClusterControllerImpl(new TestProviderModule());
-
-    Set<String> propertyIds = new HashSet<String>();
-
-    propertyIds.add(PropertyHelper.getPropertyId("c1", "p1"));
-    propertyIds.add(PropertyHelper.getPropertyId("c1", "p2"));
-    propertyIds.add(PropertyHelper.getPropertyId("c1", "p3"));
-    propertyIds.add(PropertyHelper.getPropertyId("c2", "p4"));
-
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-
-    Predicate predicate = new PredicateBuilder().property(UNSUPPORTED_PROPERTY).equals(1).toPredicate();
-
-    try {
-      controller.getResources(Resource.Type.Host, request, predicate);
-      Assert.fail("Expected an UnsupportedPropertyException for the unsupported properties.");
-    } catch (UnsupportedPropertyException e) {
-      // Expected
-    }
-  }
-
-  @Test
-  public void testGetResourcesWithUnsupportedPropertyRequest() throws Exception{
-    ClusterController controller = new ClusterControllerImpl(new TestProviderModule());
-
-    Set<String> propertyIds = new HashSet<String>();
-
-    propertyIds.add(PropertyHelper.getPropertyId("c1", "p1"));
-    propertyIds.add(PropertyHelper.getPropertyId("c1", "p2"));
-    propertyIds.add(PropertyHelper.getPropertyId("c1", "p3"));
-    propertyIds.add(UNSUPPORTED_PROPERTY);
-
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-
-    Predicate predicate = new PredicateBuilder().property("c1/p2").equals(1).toPredicate();
-
-    try {
-      controller.getResources(Resource.Type.Host, request, predicate);
-      Assert.fail("Expected an UnsupportedPropertyException for the unsupported properties.");
-    } catch (UnsupportedPropertyException e) {
-      // Expected
-    }
-  }
-
-  @Test
-  public void testCreateResources() throws Exception{
-    TestProviderModule providerModule = new TestProviderModule();
-    TestResourceProvider resourceProvider = (TestResourceProvider) providerModule.getResourceProvider(Resource.Type.Host);
-    ClusterController controller = new ClusterControllerImpl(providerModule);
-
-    Set<Map<String, Object>> properties = new HashSet<Map<String, Object>>();
-    Map<String, Object> propertyMap = new HashMap<String, Object>();
-
-    propertyMap.put(PropertyHelper.getPropertyId("c1", "p1"), 99);
-    propertyMap.put(PropertyHelper.getPropertyId("c1", "p2"), 2);
-
-    properties.add(propertyMap);
-
-    Request request = PropertyHelper.getCreateRequest(properties);
-
-    controller.createResources(Resource.Type.Host, request);
-
-    Assert.assertEquals(TestResourceProvider.Action.Create, resourceProvider.getLastAction());
-    Assert.assertSame(request, resourceProvider.getLastRequest());
-    Assert.assertNull(resourceProvider.getLastPredicate());
-  }
-
-  @Test
-  public void testCreateResourcesWithUnsupportedProperty() throws Exception{
-    TestProviderModule providerModule = new TestProviderModule();
-    ClusterController controller = new ClusterControllerImpl(providerModule);
-
-    Set<Map<String, Object>> properties = new HashSet<Map<String, Object>>();
-    Map<String, Object> propertyMap = new HashMap<String, Object>();
-
-    propertyMap.put(PropertyHelper.getPropertyId("c1", "p1"), 99);
-    propertyMap.put(UNSUPPORTED_PROPERTY, 2);
-
-    properties.add(propertyMap);
-
-    Request request = PropertyHelper.getCreateRequest(properties);
-
-    try {
-      controller.createResources(Resource.Type.Host, request);
-      Assert.fail("Expected an UnsupportedPropertyException for the unsupported properties.");
-    } catch (UnsupportedPropertyException e) {
-      // Expected
-    }
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception{
-    TestProviderModule providerModule = new TestProviderModule();
-    TestResourceProvider resourceProvider = (TestResourceProvider) providerModule.getResourceProvider(Resource.Type.Host);
-    ClusterController controller = new ClusterControllerImpl(providerModule);
-
-    Map<String, Object> propertyMap = new HashMap<String, Object>();
-
-    propertyMap.put(PropertyHelper.getPropertyId("c1", "p1"), 99);
-    propertyMap.put(PropertyHelper.getPropertyId("c1", "p2"), 2);
-
-    Request request = PropertyHelper.getUpdateRequest(propertyMap);
-
-    Predicate predicate = new PredicateBuilder().property("c1/p2").equals(1).toPredicate();
-
-    controller.updateResources(Resource.Type.Host, request, predicate);
-
-    Assert.assertEquals(TestResourceProvider.Action.Update, resourceProvider.getLastAction());
-    Assert.assertSame(request, resourceProvider.getLastRequest());
-    Assert.assertSame(predicate, resourceProvider.getLastPredicate());
-  }
-
-  @Test
-  public void testUpdateResourcesWithUnsupportedPropertyRequest() throws Exception{
-    TestProviderModule providerModule = new TestProviderModule();
-    ClusterController controller = new ClusterControllerImpl(providerModule);
-
-    Map<String, Object> propertyMap = new HashMap<String, Object>();
-
-    propertyMap.put(PropertyHelper.getPropertyId("c1", "p1"), 99);
-    propertyMap.put(UNSUPPORTED_PROPERTY, 2);
-
-    Request request = PropertyHelper.getUpdateRequest(propertyMap);
-
-    Predicate predicate = new PredicateBuilder().property("c1/p2").equals(1).toPredicate();
-
-    try {
-      controller.updateResources(Resource.Type.Host, request, predicate);
-      Assert.fail("Expected an UnsupportedPropertyException for the unsupported properties.");
-    } catch (UnsupportedPropertyException e) {
-      // Expected
-    }
-  }
-
-  @Test
-  public void testUpdateResourcesWithUnsupportedPropertyPredicate() throws Exception{
-    TestProviderModule providerModule = new TestProviderModule();
-    ClusterController controller = new ClusterControllerImpl(providerModule);
-
-    Map<String, Object> propertyMap = new HashMap<String, Object>();
-
-    propertyMap.put(PropertyHelper.getPropertyId("c1", "p1"), 99);
-    propertyMap.put(PropertyHelper.getPropertyId("c1", "p2"), 2);
-
-    Request request = PropertyHelper.getUpdateRequest(propertyMap);
-
-    Predicate predicate = new PredicateBuilder().property(UNSUPPORTED_PROPERTY).equals(1).toPredicate();
-
-    try {
-      controller.updateResources(Resource.Type.Host, request, predicate);
-      Assert.fail("Expected an UnsupportedPropertyException for the unsupported properties.");
-    } catch (UnsupportedPropertyException e) {
-      // Expected
-    }
-  }
-
-  @Test
-  public void testUpdateResourcesResolvePredicate() throws Exception{
-    TestProviderModule providerModule = new TestProviderModule();
-    TestResourceProvider resourceProvider = (TestResourceProvider) providerModule.getResourceProvider(Resource.Type.Host);
-    ClusterController controller = new ClusterControllerImpl(providerModule);
-
-    Map<String, Object> propertyMap = new HashMap<String, Object>();
-
-    propertyMap.put(PropertyHelper.getPropertyId("c1", "p1"), 99);
-    propertyMap.put(PropertyHelper.getPropertyId("c1", "p2"), 2);
-
-    Request request = PropertyHelper.getUpdateRequest(propertyMap);
-
-    Predicate predicate = new PredicateBuilder().property("c3/p6").equals(1).toPredicate();
-
-    controller.updateResources(Resource.Type.Host, request, predicate);
-
-    Assert.assertEquals(TestResourceProvider.Action.Update, resourceProvider.getLastAction());
-    Assert.assertSame(request, resourceProvider.getLastRequest());
-    Predicate lastPredicate = resourceProvider.getLastPredicate();
-    Assert.assertFalse(predicate.equals(lastPredicate));
-    Set<String> predicatePropertyIds = PredicateHelper.getPropertyIds(lastPredicate);
-    Collection<String> keyPropertyIds = resourceProvider.getKeyPropertyIds().values();
-    Assert.assertEquals(predicatePropertyIds.size(), keyPropertyIds.size());
-    Assert.assertTrue(keyPropertyIds.containsAll(predicatePropertyIds));
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception{
-    TestProviderModule providerModule = new TestProviderModule();
-    TestResourceProvider resourceProvider = (TestResourceProvider) providerModule.getResourceProvider(Resource.Type.Host);
-    ClusterController controller = new ClusterControllerImpl(providerModule);
-
-    Predicate predicate = new PredicateBuilder().property("c1/p2").equals(1).toPredicate();
-
-    controller.deleteResources(Resource.Type.Host, predicate);
-
-    Assert.assertEquals(TestResourceProvider.Action.Delete, resourceProvider.getLastAction());
-    Assert.assertNull(resourceProvider.getLastRequest());
-    Assert.assertSame(predicate, resourceProvider.getLastPredicate());
-  }
-
-  @Test
-  public void testDeleteResourcesWithUnsupportedProperty() throws Exception{
-    TestProviderModule providerModule = new TestProviderModule();
-    ClusterController controller = new ClusterControllerImpl(providerModule);
-
-    Predicate predicate = new PredicateBuilder().property(UNSUPPORTED_PROPERTY).equals(1).toPredicate();
-
-    try {
-      controller.deleteResources(Resource.Type.Host, predicate);
-      Assert.fail("Expected an UnsupportedPropertyException for the unsupported properties.");
-    } catch (UnsupportedPropertyException e) {
-      // Expected
-    }
-  }
-
-  @Test
-  public void testDeleteResourcesResolvePredicate() throws Exception{
-    TestProviderModule providerModule = new TestProviderModule();
-    TestResourceProvider resourceProvider = (TestResourceProvider) providerModule.getResourceProvider(Resource.Type.Host);
-    ClusterController controller = new ClusterControllerImpl(providerModule);
-
-    Predicate predicate = new PredicateBuilder().property("c3/p6").equals(1).toPredicate();
-
-    controller.deleteResources(Resource.Type.Host, predicate);
-
-    Assert.assertEquals(TestResourceProvider.Action.Delete, resourceProvider.getLastAction());
-    Assert.assertNull(resourceProvider.getLastRequest());
-    Predicate lastPredicate = resourceProvider.getLastPredicate();
-    Assert.assertFalse(predicate.equals(lastPredicate));
-    Set<String> predicatePropertyIds = PredicateHelper.getPropertyIds(lastPredicate);
-    Collection<String> keyPropertyIds = resourceProvider.getKeyPropertyIds().values();
-    Assert.assertEquals(predicatePropertyIds.size(), keyPropertyIds.size());
-    Assert.assertTrue(keyPropertyIds.containsAll(predicatePropertyIds));
-  }
-
-//  @Test
-//  public void testGetSchema() {
-//    ProviderModule module = new TestProviderModule();
-//
-//    ClusterController controller = new ClusterControllerImpl(module);
-//    Schema schema = controller.getSchema(Resource.Type.Host);
-//
-//    ResourceProvider resourceProvider = module.getResourceProvider(Resource.Type.Host);
-//
-//    Map<Resource.Type, String> keyPropertyIds = resourceProvider.getKeyPropertyIds();
-//    for (Map.Entry<Resource.Type, String> entry : keyPropertyIds.entrySet()) {
-//      Assert.assertEquals(entry.getValue(), schema.getKeyPropertyId(entry.getKey()));
-//    }
-//
-//    Map<String, Set<String>> categories = schema.getCategoryProperties();
-//    for (String propertyId : resourceProvider.getPropertyIdsForSchema()) {
-//      String category = PropertyHelper.getPropertyCategory(propertyId);
-//      Set<String> properties = categories.get(category);
-//      Assert.assertNotNull(properties);
-//      Assert.assertTrue(properties.contains(PropertyHelper.getPropertyName(propertyId)));
-//    }
-//
-//    List<PropertyProvider> propertyProviders = module.getPropertyProviders(Resource.Type.Host);
-//
-//    for (PropertyProvider propertyProvider : propertyProviders) {
-//      for (String propertyId : propertyProvider.getPropertyIds()) {
-//        String category = PropertyHelper.getPropertyCategory(propertyId);
-//        Set<String> properties = categories.get(category);
-//        Assert.assertNotNull(properties);
-//        Assert.assertTrue(properties.contains(PropertyHelper.getPropertyName(propertyId)));
-//      }
-//    }
-//  }
-
-  private static class TestProviderModule implements ProviderModule {
-    private Map<Resource.Type, ResourceProvider> providers = new HashMap<Resource.Type, ResourceProvider>();
-
-    private TestProviderModule() {
-      providers.put(Resource.Type.Cluster, new TestResourceProvider());
-      providers.put(Resource.Type.Service, new TestResourceProvider());
-      providers.put(Resource.Type.Component, new TestResourceProvider());
-      providers.put(Resource.Type.Host, new TestResourceProvider());
-      providers.put(Resource.Type.HostComponent, new TestResourceProvider());
-    }
-
-    @Override
-    public ResourceProvider getResourceProvider(Resource.Type type) {
-      return providers.get(type);
-    }
-
-    @Override
-    public List<PropertyProvider> getPropertyProviders(Resource.Type type) {
-      return propertyProviders;
-    }
-  }
-
-  private static class TestResourceProvider implements ResourceProvider {
-    private Action lastAction = null;
-    private Request lastRequest = null;
-    private Predicate lastPredicate = null;
-
-    @Override
-    public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-
-      Set<Resource> resources = new HashSet<Resource>();
-
-      for (int cnt = 0; cnt < 4; ++ cnt) {
-        ResourceImpl resource = new ResourceImpl(Resource.Type.Host);
-
-        resource.setProperty(PropertyHelper.getPropertyId("c1", "p1"), cnt);
-        resource.setProperty(PropertyHelper.getPropertyId("c1", "p2"), cnt % 2);
-        resource.setProperty(PropertyHelper.getPropertyId("c1", "p3"), "foo");
-        resource.setProperty(PropertyHelper.getPropertyId("c2", "p4"), "bar");
-        resources.add(resource);
-      }
-
-      return resources;
-    }
-
-    @Override
-    public RequestStatus createResources(Request request)  {
-      lastAction = Action.Create;
-      lastRequest = request;
-      lastPredicate = null;
-      return new RequestStatusImpl(null);
-    }
-
-    @Override
-    public RequestStatus updateResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-      lastAction = Action.Update;
-      lastRequest = request;
-      lastPredicate = predicate;
-      return new RequestStatusImpl(null);
-    }
-
-    @Override
-    public RequestStatus deleteResources(Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-      lastAction = Action.Delete;
-      lastRequest = null;
-      lastPredicate = predicate;
-      return new RequestStatusImpl(null);
-    }
-
-    @Override
-    public Set<String> checkPropertyIds(Set<String> propertyIds) {
-      if (!resourceProviderProperties.containsAll(propertyIds)) {
-        Set<String> unsupportedPropertyIds = new HashSet<String>(propertyIds);
-        unsupportedPropertyIds.removeAll(resourceProviderProperties);
-        return unsupportedPropertyIds;
-      }
-      return Collections.emptySet();
-    }
-
-    @Override
-    public Map<Resource.Type, String> getKeyPropertyIds() {
-      return keyPropertyIds;
-    }
-
-    public Action getLastAction() {
-      return lastAction;
-    }
-
-    public Request getLastRequest() {
-      return lastRequest;
-    }
-
-    public Predicate getLastPredicate() {
-      return lastPredicate;
-    }
-
-    public enum Action {
-      Create,
-      Update,
-      Delete
-    }
-
-  }
-
-}
-
-
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
deleted file mode 100644
index 8bf5f5c..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
+++ /dev/null
@@ -1,301 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.ClusterRequest;
-import org.apache.ambari.server.controller.ClusterResponse;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.easymock.EasyMock;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-
-/**
- * ClusterResourceProvider tests.
- */
-public class ClusterResourceProviderTest {
-  @Test
-  public void testCreateResources() throws Exception{
-    Resource.Type type = Resource.Type.Cluster;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    managementController.createCluster(
-        AbstractResourceProviderTest.Matcher.getClusterRequest(null, "Cluster100", "HDP-0.1", null));
-    managementController.createCluster(
-        AbstractResourceProviderTest.Matcher.getClusterRequest(99L, null, "HDP-0.1", null));
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver();
-
-    ((ObservableResourceProvider)provider).addObserver(observer);
-
-    // add the property map to a set for the request.  add more maps for multiple creates
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
-
-    // Cluster 1: create a map of properties for the request
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    // add the cluster name to the properties map
-    properties.put(ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID, "Cluster100");
-
-    // add the version to the properties map
-    properties.put(ClusterResourceProvider.CLUSTER_VERSION_PROPERTY_ID, "HDP-0.1");
-
-    propertySet.add(properties);
-
-    // Cluster 2: create a map of properties for the request
-    properties = new LinkedHashMap<String, Object>();
-
-    // add the cluster id to the properties map
-    properties.put(ClusterResourceProvider.CLUSTER_ID_PROPERTY_ID, 99L);
-
-    // add the version to the properties map
-    properties.put(ClusterResourceProvider.CLUSTER_VERSION_PROPERTY_ID, "HDP-0.1");
-
-    propertySet.add(properties);
-
-    // create the request
-    Request request = PropertyHelper.getCreateRequest(propertySet);
-
-    provider.createResources(request);
-
-    ResourceProviderEvent lastEvent = observer.getLastEvent();
-    Assert.assertNotNull(lastEvent);
-    Assert.assertEquals(Resource.Type.Cluster, lastEvent.getResourceType());
-    Assert.assertEquals(ResourceProviderEvent.Type.Create, lastEvent.getType());
-    Assert.assertEquals(request, lastEvent.getRequest());
-    Assert.assertNull(lastEvent.getPredicate());
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testGetResources() throws Exception{
-    Resource.Type type = Resource.Type.Cluster;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    Set<ClusterResponse> allResponse = new HashSet<ClusterResponse>();
-    allResponse.add(new ClusterResponse(100L, "Cluster100", null, null));
-    allResponse.add(new ClusterResponse(101L, "Cluster101", null, null));
-    allResponse.add(new ClusterResponse(102L, "Cluster102", null, null));
-    allResponse.add(new ClusterResponse(103L, "Cluster103", null, null));
-    allResponse.add(new ClusterResponse(104L, "Cluster104", null, null));
-
-    Set<ClusterResponse> nameResponse = new HashSet<ClusterResponse>();
-    nameResponse.add(new ClusterResponse(102L, "Cluster102", null, null));
-
-    Set<ClusterResponse> idResponse = new HashSet<ClusterResponse>();
-    idResponse.add(new ClusterResponse(103L, "Cluster103", null, null));
-
-    // set expectations
-    expect(managementController.getClusters(EasyMock.<Set<ClusterRequest>>anyObject())).andReturn(allResponse).once();
-    expect(managementController.getClusters(EasyMock.<Set<ClusterRequest>>anyObject())).andReturn(nameResponse).once();
-    expect(managementController.getClusters(EasyMock.<Set<ClusterRequest>>anyObject())).andReturn(idResponse).once();
-
-    // replay
-    replay(managementController);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Set<String> propertyIds = new HashSet<String>();
-
-    propertyIds.add(ClusterResourceProvider.CLUSTER_ID_PROPERTY_ID);
-    propertyIds.add(ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID);
-
-    // create the request
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-
-    // get all ... no predicate
-    Set<Resource> resources = provider.getResources(request, null);
-
-    Assert.assertEquals(5, resources.size());
-    for (Resource resource : resources) {
-      Long id = (Long) resource.getPropertyValue(ClusterResourceProvider.CLUSTER_ID_PROPERTY_ID);
-      String name = (String) resource.getPropertyValue(ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID);
-      Assert.assertEquals(name, "Cluster" + id);
-    }
-
-    // get cluster named Cluster102
-    Predicate predicate =
-        new PredicateBuilder().property(ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID).equals("Cluster102").
-            toPredicate();
-    resources = provider.getResources(request, predicate);
-
-    Assert.assertEquals(1, resources.size());
-    Assert.assertEquals(102L, resources.iterator().next().
-        getPropertyValue(ClusterResourceProvider.CLUSTER_ID_PROPERTY_ID));
-    Assert.assertEquals("Cluster102", resources.iterator().next().
-        getPropertyValue(ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID));
-
-    // get cluster with id == 103
-    predicate =
-        new PredicateBuilder().property(ClusterResourceProvider.CLUSTER_ID_PROPERTY_ID).equals(103L).toPredicate();
-    resources = provider.getResources(request, predicate);
-
-    Assert.assertEquals(1, resources.size());
-    Assert.assertEquals(103L, resources.iterator().next().
-        getPropertyValue(ClusterResourceProvider.CLUSTER_ID_PROPERTY_ID));
-    Assert.assertEquals("Cluster103", resources.iterator().next().
-        getPropertyValue(ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID));
-
-    // verify
-    verify(managementController);
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception{
-    Resource.Type type = Resource.Type.Cluster;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    Set<ClusterResponse> nameResponse = new HashSet<ClusterResponse>();
-    nameResponse.add(new ClusterResponse(102L, "Cluster102", null, null));
-
-    // set expectations
-    expect(managementController.getClusters(EasyMock.<Set<ClusterRequest>>anyObject())).andReturn(nameResponse).once();
-    expect(managementController.updateCluster(
-        AbstractResourceProviderTest.Matcher.getClusterRequest(102L, "Cluster102", "HDP-0.1", null))).
-        andReturn(response).once();
-    expect(managementController.updateCluster(
-        AbstractResourceProviderTest.Matcher.getClusterRequest(103L, null, "HDP-0.1", null))).
-        andReturn(response).once();
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver();
-
-    ((ObservableResourceProvider)provider).addObserver(observer);
-
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    properties.put(ClusterResourceProvider.CLUSTER_VERSION_PROPERTY_ID, "HDP-0.1");
-
-    // create the request
-    Request request = PropertyHelper.getUpdateRequest(properties);
-
-    // update the cluster named Cluster102
-    Predicate  predicate = new PredicateBuilder().property(
-        ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID).equals("Cluster102").toPredicate();
-    provider.updateResources(request, predicate);
-
-    // update the cluster where id == 103
-    predicate = new PredicateBuilder().property(
-        ClusterResourceProvider.CLUSTER_ID_PROPERTY_ID).equals(103L).toPredicate();
-    provider.updateResources(request, predicate);
-
-    ResourceProviderEvent lastEvent = observer.getLastEvent();
-    Assert.assertNotNull(lastEvent);
-    Assert.assertEquals(Resource.Type.Cluster, lastEvent.getResourceType());
-    Assert.assertEquals(ResourceProviderEvent.Type.Update, lastEvent.getType());
-    Assert.assertEquals(request, lastEvent.getRequest());
-    Assert.assertEquals(predicate, lastEvent.getPredicate());
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception{
-    Resource.Type type = Resource.Type.Cluster;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    // set expectations
-    managementController.deleteCluster(
-        AbstractResourceProviderTest.Matcher.getClusterRequest(null, "Cluster102", null, null));
-    managementController.deleteCluster(
-        AbstractResourceProviderTest.Matcher.getClusterRequest(103L, null, null, null));
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver();
-
-    ((ObservableResourceProvider)provider).addObserver(observer);
-
-    // delete the cluster named Cluster102
-    Predicate  predicate = new PredicateBuilder().property(
-        ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID).equals("Cluster102").toPredicate();
-    provider.deleteResources(predicate);
-
-    // delete the cluster where id == 103
-    predicate = new PredicateBuilder().property(
-        ClusterResourceProvider.CLUSTER_ID_PROPERTY_ID).equals(103L).toPredicate();
-    provider.deleteResources(predicate);
-
-    ResourceProviderEvent lastEvent = observer.getLastEvent();
-    Assert.assertNotNull(lastEvent);
-    Assert.assertEquals(Resource.Type.Cluster, lastEvent.getResourceType());
-    Assert.assertEquals(ResourceProviderEvent.Type.Delete, lastEvent.getType());
-    Assert.assertEquals(predicate, lastEvent.getPredicate());
-    Assert.assertNull(lastEvent.getRequest());
-
-    // verify
-    verify(managementController, response);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java
deleted file mode 100644
index 3073311..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.ServiceComponentRequest;
-import org.apache.ambari.server.controller.ServiceComponentResponse;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.easymock.EasyMock;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-
-/**
- * Tests for the component resource provider.
- */
-public class ComponentResourceProviderTest {
-  @Test
-  public void testCreateResources() throws Exception {
-    Resource.Type type = Resource.Type.Component;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    managementController.createComponents(
-        AbstractResourceProviderTest.Matcher.getComponentRequestSet(
-            "Cluster100", "Service100", "Component100", null, null));
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    // add the property map to a set for the request.  add more maps for multiple creates
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
-
-    // Service 1: create a map of properties for the request
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    // add properties to the request map
-    properties.put(ComponentResourceProvider.COMPONENT_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
-    properties.put(ComponentResourceProvider.COMPONENT_SERVICE_NAME_PROPERTY_ID, "Service100");
-    properties.put(ComponentResourceProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID, "Component100");
-
-    propertySet.add(properties);
-
-    // create the request
-    Request request = PropertyHelper.getCreateRequest(propertySet);
-
-    provider.createResources(request);
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testGetResources() throws Exception {
-    Resource.Type type = Resource.Type.Component;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    Set<ServiceComponentResponse> allResponse = new HashSet<ServiceComponentResponse>();
-    allResponse.add(new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component100", null, null, ""));
-    allResponse.add(new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component101", null, null, ""));
-    allResponse.add(new ServiceComponentResponse(100L, "Cluster100", "Service100", "Component102", null, null, ""));
-
-    // set expectations
-    expect(managementController.getComponents(
-        AbstractResourceProviderTest.Matcher.getComponentRequestSet(
-            "Cluster100", null, null, null, null))).andReturn(allResponse).once();
-
-    // replay
-    replay(managementController);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Set<String> propertyIds = new HashSet<String>();
-
-    propertyIds.add(ComponentResourceProvider.COMPONENT_CLUSTER_NAME_PROPERTY_ID);
-    propertyIds.add(ComponentResourceProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID);
-
-    Predicate predicate = new PredicateBuilder().property(ComponentResourceProvider.COMPONENT_CLUSTER_NAME_PROPERTY_ID).
-        equals("Cluster100").toPredicate();
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-    Set<Resource> resources = provider.getResources(request, predicate);
-
-    Assert.assertEquals(3, resources.size());
-    Set<String> names = new HashSet<String>();
-    for (Resource resource : resources) {
-      String clusterName = (String) resource.getPropertyValue(
-          ComponentResourceProvider.COMPONENT_CLUSTER_NAME_PROPERTY_ID);
-      Assert.assertEquals("Cluster100", clusterName);
-      names.add((String) resource.getPropertyValue(ComponentResourceProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID));
-    }
-    // Make sure that all of the response objects got moved into resources
-    for (ServiceComponentResponse response : allResponse ) {
-      Assert.assertTrue(names.contains(response.getComponentName()));
-    }
-
-    // verify
-    verify(managementController);
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    Resource.Type type = Resource.Type.Component;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    Set<ServiceComponentResponse> nameResponse = new HashSet<ServiceComponentResponse>();
-    nameResponse.add(new ServiceComponentResponse(102L, "Cluster102", "Service", "Component", null, "1", "STARTED"));
-
-    // set expectations
-    expect(managementController.getComponents(EasyMock.<Set<ServiceComponentRequest>>anyObject())).
-        andReturn(nameResponse).once();
-    expect(managementController.updateComponents(
-        AbstractResourceProviderTest.Matcher.getComponentRequestSet(
-            "Cluster102", "Service", "Component", null, "STARTED"))).andReturn(response).once();
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    properties.put(ComponentResourceProvider.COMPONENT_STATE_PROPERTY_ID, "STARTED");
-
-    // create the request
-    Request request = PropertyHelper.getUpdateRequest(properties);
-
-    // update the cluster named Cluster102
-    Predicate predicate = new PredicateBuilder().property(ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID).
-        equals("Cluster102").toPredicate();
-    provider.updateResources(request, predicate);
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    Resource.Type type = Resource.Type.Component;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    // set expectations
-    expect(managementController.deleteComponents(AbstractResourceProviderTest.Matcher.
-        getComponentRequestSet(null, null, "Component100", null, null))).andReturn(response);
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver();
-
-    ((ObservableResourceProvider)provider).addObserver(observer);
-
-    Predicate  predicate = new PredicateBuilder().property(
-        ComponentResourceProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("Component100").toPredicate();
-    provider.deleteResources(predicate);
-
-
-    ResourceProviderEvent lastEvent = observer.getLastEvent();
-    Assert.assertNotNull(lastEvent);
-    Assert.assertEquals(Resource.Type.Component, lastEvent.getResourceType());
-    Assert.assertEquals(ResourceProviderEvent.Type.Delete, lastEvent.getType());
-    Assert.assertEquals(predicate, lastEvent.getPredicate());
-    Assert.assertNull(lastEvent.getRequest());
-
-    // verify
-    verify(managementController, response);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigurationResourceProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigurationResourceProviderTest.java
deleted file mode 100644
index 1eebea8..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigurationResourceProviderTest.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.ConfigurationResponse;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-
-/**
- * Tests for the configuration resource provider.
- */
-public class ConfigurationResourceProviderTest {
-  @Test
-  public void testCreateResources() throws Exception {
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    managementController.createConfiguration(AbstractResourceProviderTest.Matcher.getConfigurationRequest(
-        "Cluster100", "type", "tag", new HashMap<String, String>()));
-
-    // replay
-    replay(managementController, response);
-
-    ConfigurationResourceProvider provider = new ConfigurationResourceProvider(
-        PropertyHelper.getPropertyIds(Resource.Type.Configuration ),
-        PropertyHelper.getKeyPropertyIds(Resource.Type.Configuration),
-        managementController);
-
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
-
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    properties.put(ConfigurationResourceProvider.CONFIGURATION_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
-    properties.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID, "tag");
-    properties.put(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TYPE_PROPERTY_ID, "type");
-
-    propertySet.add(properties);
-
-    // create the request
-    Request request = PropertyHelper.getCreateRequest(propertySet);
-
-    provider.createResources(request);
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testGetResources() throws Exception {
-    Resource.Type type = Resource.Type.Configuration;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    Set<ConfigurationResponse> allResponse = new HashSet<ConfigurationResponse>();
-    allResponse.add(new ConfigurationResponse("Cluster100", "type", "tag1", null));
-    allResponse.add(new ConfigurationResponse("Cluster100", "type", "tag2", null));
-    allResponse.add(new ConfigurationResponse("Cluster100", "type", "tag3", null));
-
-    // set expectations
-    expect(managementController.getConfigurations(
-        AbstractResourceProviderTest.Matcher.getConfigurationRequestSet(
-            "Cluster100", null, null, Collections.<String, String>emptyMap()))).andReturn(allResponse).once();
-
-    // replay
-    replay(managementController);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Set<String> propertyIds = new HashSet<String>();
-
-    propertyIds.add(ConfigurationResourceProvider.CONFIGURATION_CLUSTER_NAME_PROPERTY_ID);
-    propertyIds.add(ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID);
-
-    Predicate predicate = new PredicateBuilder().property(
-        ConfigurationResourceProvider.CONFIGURATION_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").toPredicate();
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-    Set<Resource> resources = provider.getResources(request, predicate);
-
-    Assert.assertEquals(3, resources.size());
-    Set<String> tags = new HashSet<String>();
-    for (Resource resource : resources) {
-      String clusterName = (String) resource.getPropertyValue(
-          ConfigurationResourceProvider.CONFIGURATION_CLUSTER_NAME_PROPERTY_ID);
-      Assert.assertEquals("Cluster100", clusterName);
-      tags.add((String) resource.getPropertyValue(
-          ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID));
-    }
-    // Make sure that all of the response objects got moved into resources
-    for (ConfigurationResponse response : allResponse ) {
-      Assert.assertTrue(tags.contains(response.getVersionTag()));
-    }
-
-    // verify
-    verify(managementController);
-
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    Resource.Type type = Resource.Type.Configuration;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    // add the property map to a set for the request.
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    // create the request
-    Request request = PropertyHelper.getUpdateRequest(properties);
-
-    Predicate predicate = new PredicateBuilder().property(
-        ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID).equals("Configuration100").toPredicate();
-
-    try {
-      provider.updateResources(request, predicate);
-      Assert.fail("Expected an UnsupportedOperationException");
-    } catch (UnsupportedOperationException e) {
-      // expected
-    }
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    Resource.Type type = Resource.Type.Configuration;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    // replay
-    replay(managementController);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Predicate predicate = new PredicateBuilder().property(
-        ConfigurationResourceProvider.CONFIGURATION_CONFIG_TAG_PROPERTY_ID).equals("Configuration100").toPredicate();
-    try {
-      provider.deleteResources(predicate);
-      Assert.fail("Expected an UnsupportedOperationException");
-    } catch (UnsupportedOperationException e) {
-      // expected
-    }
-
-    // verify
-    verify(managementController);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java
deleted file mode 100644
index 12d5569..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java
+++ /dev/null
@@ -1,282 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.ServiceComponentHostRequest;
-import org.apache.ambari.server.controller.ServiceComponentHostResponse;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.easymock.EasyMock;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-
-/**
- * HostComponentResourceProvider tests.
- */
-public class HostComponentResourceProviderTest {
-  @Test
-  public void testCreateResources() throws Exception {
-    Resource.Type type = Resource.Type.HostComponent;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    managementController.createHostComponents(
-        AbstractResourceProviderTest.Matcher.getHostComponentRequestSet(
-            "Cluster100", "Service100", "Component100", "Host100", null, null));
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    // add the property map to a set for the request.  add more maps for multiple creates
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
-
-    // Service 1: create a map of properties for the request
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    // add properties to the request map
-    properties.put(HostComponentResourceProvider.HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
-    properties.put(HostComponentResourceProvider.HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID, "Service100");
-    properties.put(HostComponentResourceProvider.HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, "Component100");
-    properties.put(HostComponentResourceProvider.HOST_COMPONENT_HOST_NAME_PROPERTY_ID, "Host100");
-
-    propertySet.add(properties);
-
-    // create the request
-    Request request = PropertyHelper.getCreateRequest(propertySet);
-
-    provider.createResources(request);
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testGetResources() throws Exception {
-    Resource.Type type = Resource.Type.HostComponent;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    Set<ServiceComponentHostResponse> allResponse = new HashSet<ServiceComponentHostResponse>();
-    allResponse.add(new ServiceComponentHostResponse(
-        "Cluster100", "Service100", "Component100", "Host100", null, null, "", "", "" ));
-    allResponse.add(new ServiceComponentHostResponse(
-        "Cluster100", "Service100", "Component101", "Host100", null, null, "", "", "" ));
-    allResponse.add(new ServiceComponentHostResponse(
-        "Cluster100", "Service100", "Component102", "Host100", null, null, "", "", "" ));
-
-    // set expectations
-    expect(managementController.getHostComponents(
-        AbstractResourceProviderTest.Matcher.getHostComponentRequestSet(
-            "Cluster100", null, null, null, null, null))).andReturn(allResponse).once();
-
-    // replay
-    replay(managementController);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Set<String> propertyIds = new HashSet<String>();
-
-    propertyIds.add(HostComponentResourceProvider.HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID);
-    propertyIds.add(HostComponentResourceProvider.HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
-
-    Predicate predicate = new PredicateBuilder().property(
-        HostComponentResourceProvider.HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").toPredicate();
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-    Set<Resource> resources = provider.getResources(request, predicate);
-
-    Assert.assertEquals(3, resources.size());
-    Set<String> names = new HashSet<String>();
-    for (Resource resource : resources) {
-      String clusterName = (String) resource.getPropertyValue(
-          HostComponentResourceProvider.HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID);
-      Assert.assertEquals("Cluster100", clusterName);
-      names.add((String) resource.getPropertyValue(
-          HostComponentResourceProvider.HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID));
-    }
-    // Make sure that all of the response objects got moved into resources
-    for (ServiceComponentHostResponse response : allResponse ) {
-      Assert.assertTrue(names.contains(response.getComponentName()));
-    }
-
-    // verify
-    verify(managementController);
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    Resource.Type type = Resource.Type.HostComponent;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    Set<ServiceComponentHostResponse> nameResponse = new HashSet<ServiceComponentHostResponse>();
-    nameResponse.add(new ServiceComponentHostResponse(
-        "Cluster102", "Service100", "Component100", "Host100", null, null, "STARTED", "", ""));
-
-    // set expectations
-    expect(managementController.getHostComponents(
-        EasyMock.<Set<ServiceComponentHostRequest>>anyObject())).andReturn(nameResponse).once();
-    expect(managementController.updateHostComponents(
-        AbstractResourceProviderTest.Matcher.getHostComponentRequestSet(
-            "Cluster102", null, "Component100", "Host100", null, "STARTED"))).andReturn(response).once();
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    properties.put(HostComponentResourceProvider.HOST_COMPONENT_STATE_PROPERTY_ID, "STARTED");
-
-    // create the request
-    Request request = PropertyHelper.getUpdateRequest(properties);
-
-    // update the cluster named Cluster102
-    Predicate predicate = new PredicateBuilder().property(
-        HostComponentResourceProvider.HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID).equals("Cluster102").toPredicate();
-    provider.updateResources(request, predicate);
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    Resource.Type type = Resource.Type.HostComponent;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    // set expectations
-    expect(managementController.deleteHostComponents(
-        AbstractResourceProviderTest.Matcher.getHostComponentRequestSet(
-            null, null, "Component100", "Host100", null, null))).andReturn(response);
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver();
-
-    ((ObservableResourceProvider)provider).addObserver(observer);
-
-    Predicate predicate = new PredicateBuilder().
-        property(HostComponentResourceProvider.HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("Component100").and().
-        property(HostComponentResourceProvider.HOST_COMPONENT_HOST_NAME_PROPERTY_ID).equals("Host100").toPredicate();
-    provider.deleteResources(predicate);
-
-
-    ResourceProviderEvent lastEvent = observer.getLastEvent();
-    Assert.assertNotNull(lastEvent);
-    Assert.assertEquals(Resource.Type.HostComponent, lastEvent.getResourceType());
-    Assert.assertEquals(ResourceProviderEvent.Type.Delete, lastEvent.getType());
-    Assert.assertEquals(predicate, lastEvent.getPredicate());
-    Assert.assertNull(lastEvent.getRequest());
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testCheckPropertyIds() throws Exception {
-    Set<String> propertyIds = new HashSet<String>();
-    propertyIds.add("foo");
-    propertyIds.add("cat1/foo");
-    propertyIds.add("cat2/bar");
-    propertyIds.add("cat2/baz");
-    propertyIds.add("cat3/sub1/bam");
-    propertyIds.add("cat4/sub2/sub3/bat");
-    propertyIds.add("cat5/subcat5/map");
-
-    Map<Resource.Type, String> keyPropertyIds = new HashMap<Resource.Type, String>();
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    AbstractResourceProvider provider =
-        (AbstractResourceProvider) AbstractResourceProvider.getResourceProvider(
-            Resource.Type.HostComponent,
-            propertyIds,
-            keyPropertyIds,
-            managementController);
-
-    Set<String> unsupported = provider.checkPropertyIds(Collections.singleton("foo"));
-    Assert.assertTrue(unsupported.isEmpty());
-
-    // note that key is not in the set of known property ids.  We allow it if its parent is a known property.
-    // this allows for Map type properties where we want to treat the entries as individual properties
-    Assert.assertTrue(provider.checkPropertyIds(Collections.singleton("cat5/subcat5/map/key")).isEmpty());
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("bar"));
-    Assert.assertEquals(1, unsupported.size());
-    Assert.assertTrue(unsupported.contains("bar"));
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("cat1/foo"));
-    Assert.assertTrue(unsupported.isEmpty());
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("cat1"));
-    Assert.assertTrue(unsupported.isEmpty());
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("config"));
-    Assert.assertTrue(unsupported.isEmpty());
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("config/unknown_property"));
-    Assert.assertTrue(unsupported.isEmpty());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
deleted file mode 100644
index 52da6c3..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.HostRequest;
-import org.apache.ambari.server.controller.HostResponse;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.state.HostHealthStatus;
-import org.easymock.EasyMock;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-
-/**
- * HostResourceProvider tests.
- */
-public class HostResourceProviderTest {
-  @Test
-  public void testCreateResources() throws Exception {
-    Resource.Type type = Resource.Type.Host;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    managementController.createHosts(
-        AbstractResourceProviderTest.Matcher.getHostRequestSet("Host100", "Cluster100", null));
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    // add the property map to a set for the request.  add more maps for multiple creates
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
-
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    // add properties to the request map
-    properties.put(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
-    properties.put(HostResourceProvider.HOST_NAME_PROPERTY_ID, "Host100");
-
-    propertySet.add(properties);
-
-    // create the request
-    Request request = PropertyHelper.getCreateRequest(propertySet);
-
-    provider.createResources(request);
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testGetResources() throws Exception {
-    Resource.Type type = Resource.Type.Host;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    Set<HostResponse> allResponse = new HashSet<HostResponse>();
-    allResponse.add(new HostResponse("Host100", "Cluster100",
-        "", "", 2, "", "", "", 100000L, 200000L, null, 10L,
-        0L, "rack info", null, null,
-        new HostHealthStatus(HostHealthStatus.HealthStatus.HEALTHY, "HEALTHY"), "HEALTHY"));
-    allResponse.add(new HostResponse("Host101", "Cluster100",
-        "", "", 2, "", "", "", 100000L, 200000L, null, 10L,
-        0L, "rack info", null, null,
-        new HostHealthStatus(HostHealthStatus.HealthStatus.HEALTHY, "HEALTHY"), "HEALTHY"));
-    allResponse.add(new HostResponse("Host102", "Cluster100",
-        "", "", 2, "", "", "", 100000L, 200000L, null, 10L,
-        0L, "rack info", null, null,
-        new HostHealthStatus(HostHealthStatus.HealthStatus.HEALTHY, "HEALTHY"), "HEALTHY"));
-
-    // set expectations
-    expect(managementController.getHosts(
-        AbstractResourceProviderTest.Matcher.getHostRequestSet(null, "Cluster100", null))).
-        andReturn(allResponse).once();
-
-    // replay
-    replay(managementController);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Set<String> propertyIds = new HashSet<String>();
-
-    propertyIds.add(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID);
-    propertyIds.add(HostResourceProvider.HOST_NAME_PROPERTY_ID);
-
-    Predicate predicate =
-        new PredicateBuilder().property(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").
-            toPredicate();
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-    Set<Resource> resources = provider.getResources(request, predicate);
-
-    Assert.assertEquals(3, resources.size());
-    Set<String> names = new HashSet<String>();
-    for (Resource resource : resources) {
-      String clusterName = (String) resource.getPropertyValue(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID);
-      Assert.assertEquals("Cluster100", clusterName);
-      names.add((String) resource.getPropertyValue(HostResourceProvider.HOST_NAME_PROPERTY_ID));
-    }
-    // Make sure that all of the response objects got moved into resources
-    for (HostResponse response : allResponse ) {
-      Assert.assertTrue(names.contains(response.getHostname()));
-    }
-
-    // verify
-    verify(managementController);
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    Resource.Type type = Resource.Type.Host;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    Set<HostResponse> hostResponseSet = new HashSet<HostResponse>();
-    hostResponseSet.add(new HostResponse("Host100", "Cluster100",
-        "", "", 2, "", "", "", 100000L, 200000L, null, 10L,
-        0L, "rack info", null, null,
-        new HostHealthStatus(HostHealthStatus.HealthStatus.HEALTHY, "HEALTHY"), "HEALTHY"));
-
-    // set expectations
-    expect(managementController.getHosts(
-        AbstractResourceProviderTest.Matcher.getHostRequestSet("Host100", "Cluster100", null))).
-        andReturn(hostResponseSet);
-    managementController.updateHosts(EasyMock.<Set<HostRequest>>anyObject());
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    // add the property map to a set for the request.
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    properties.put(HostResourceProvider.HOST_RACK_INFO_PROPERTY_ID, "rack info");
-
-    // create the request
-    Request request = PropertyHelper.getUpdateRequest(properties);
-
-    Predicate  predicate = new PredicateBuilder().property(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID).
-        equals("Cluster100").
-        and().property(HostResourceProvider.HOST_NAME_PROPERTY_ID).equals("Host100").toPredicate();
-    provider.updateResources(request, predicate);
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    Resource.Type type = Resource.Type.Host;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    // set expectations
-    managementController.deleteHosts(AbstractResourceProviderTest.Matcher.getHostRequestSet("Host100", null, null));
-
-    // replay
-    replay(managementController);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver();
-
-    ((ObservableResourceProvider)provider).addObserver(observer);
-
-    Predicate predicate = new PredicateBuilder().property(HostResourceProvider.HOST_NAME_PROPERTY_ID).equals("Host100").
-        toPredicate();
-    provider.deleteResources(predicate);
-
-
-    ResourceProviderEvent lastEvent = observer.getLastEvent();
-    Assert.assertNotNull(lastEvent);
-    Assert.assertEquals(Resource.Type.Host, lastEvent.getResourceType());
-    Assert.assertEquals(ResourceProviderEvent.Type.Delete, lastEvent.getType());
-    Assert.assertEquals(predicate, lastEvent.getPredicate());
-    Assert.assertNull(lastEvent.getRequest());
-
-    // verify
-    verify(managementController);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HttpPropertyProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HttpPropertyProviderTest.java
deleted file mode 100644
index 04c6b7b..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HttpPropertyProviderTest.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.internal;
-
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.controller.utilities.StreamProvider;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class HttpPropertyProviderTest {
-  private static final String PROPERTY_ID_CLUSTER_NAME = PropertyHelper.getPropertyId("HostRoles", "cluster_name");
-  private static final String PROPERTY_ID_HOST_NAME = PropertyHelper.getPropertyId("HostRoles", "host_name");
-  private static final String PROPERTY_ID_COMPONENT_NAME = PropertyHelper.getPropertyId("HostRoles", "component_name");
-  
-  private static final String PROPERTY_ID_NAGIOS_ALERTS = PropertyHelper.getPropertyId("HostRoles", "nagios_alerts");
-
-  @Test
-  public void testReadNagiosServer() throws Exception {
-
-    Resource resource = doPopulate("NAGIOS_SERVER", Collections.<String>emptySet());
-    
-    Assert.assertNotNull("Expected non-null for 'nagios_alerts'",
-      resource.getPropertyValue(PROPERTY_ID_NAGIOS_ALERTS));
-  }
-  
-  @Test
-  public void testReadNotRequested() throws Exception {
-   
-   Set<String> propertyIds = new HashSet<String>();
-   propertyIds.add(PropertyHelper.getPropertyId("HostRoles", "state"));
-   propertyIds.add(PROPERTY_ID_COMPONENT_NAME);
-   
-   Resource resource = doPopulate("NAGIOS_SERVER", propertyIds);
-   
-   Assert.assertNull("Expected null for 'nagios_alerts'",
-     resource.getPropertyValue(PROPERTY_ID_NAGIOS_ALERTS));    
-  }
-  
-  @Test
-  public void testReadWithRequested() throws Exception {
-    
-   Set<String> propertyIds = new HashSet<String>();
-   propertyIds.add(PropertyHelper.getPropertyId("HostRoles", "nagios_alerts"));
-   propertyIds.add(PROPERTY_ID_COMPONENT_NAME);
-   
-   Resource resource = doPopulate("NAGIOS_SERVER", propertyIds);
-   
-   Assert.assertNotNull("Expected non-null for 'nagios_alerts'",
-     resource.getPropertyValue(PROPERTY_ID_NAGIOS_ALERTS));        
-  }
-  
-  
-  @Test
-  public void testReadGangliaServer() throws Exception {
-    
-    Resource resource = doPopulate("GANGLIA_SERVER", Collections.<String>emptySet());
-
-    // !!! GANGLIA_SERVER has no current http lookup
-    Assert.assertNull("Expected null, was: " +
-      resource.getPropertyValue(PROPERTY_ID_NAGIOS_ALERTS),
-      resource.getPropertyValue(PROPERTY_ID_NAGIOS_ALERTS));
-  }
-  
-  private Resource doPopulate(String componentName, Set<String> requestProperties) throws Exception {
-    
-    HttpProxyPropertyProvider propProvider = new HttpProxyPropertyProvider(
-       new TestStreamProvider(),
-       PROPERTY_ID_CLUSTER_NAME,
-       PROPERTY_ID_HOST_NAME,
-       PROPERTY_ID_COMPONENT_NAME);
-    
-    Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-
-    resource.setProperty(PROPERTY_ID_HOST_NAME, "ec2-54-234-33-50.compute-1.amazonaws.com");
-    resource.setProperty(PROPERTY_ID_COMPONENT_NAME, componentName);
-    
-    Request request = PropertyHelper.getReadRequest(requestProperties);
-    
-    propProvider.populateResources(Collections.singleton(resource), request, null);
-
-    return resource;
-  }
-  
-  
-  
-  private static class TestStreamProvider implements StreamProvider {
-
-    @Override
-    public InputStream readFrom(String spec) throws IOException {
-      return new ByteArrayInputStream("PROPERTY_TEST".getBytes());
-    }
-  }
-  
-  
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java
deleted file mode 100644
index 6778c92..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.internal;
-
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.controller.*;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.State;
-import org.apache.ambari.server.state.cluster.ClusterImpl;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-public class JMXHostProviderTest {
-  private Injector injector;
-  private Clusters clusters;
-  static AmbariManagementController controller;
-  private AmbariMetaInfo ambariMetaInfo;
-  private static final String NAMENODE_PORT = "dfs.http.address";
-  private static final String DATANODE_PORT = "dfs.datanode.http.address";
-
-  @Before
-  public void setup() throws Exception {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    clusters = injector.getInstance(Clusters.class);
-    controller = injector.getInstance(AmbariManagementController.class);
-    ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
-    ambariMetaInfo.init();
-  }
-
-  private void createService(String clusterName,
-                             String serviceName, State desiredState) throws AmbariException {
-    String dStateStr = null;
-    if (desiredState != null) {
-      dStateStr = desiredState.toString();
-    }
-    ServiceRequest r1 = new ServiceRequest(clusterName, serviceName, null,
-      dStateStr);
-    Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
-    requests.add(r1);
-    controller.createServices(requests);
-  }
-
-  private void createServiceComponent(String clusterName,
-                                      String serviceName, String componentName, State desiredState)
-    throws AmbariException {
-    String dStateStr = null;
-    if (desiredState != null) {
-      dStateStr = desiredState.toString();
-    }
-    ServiceComponentRequest r = new ServiceComponentRequest(clusterName,
-      serviceName, componentName, null, dStateStr);
-    Set<ServiceComponentRequest> requests =
-      new HashSet<ServiceComponentRequest>();
-    requests.add(r);
-    controller.createComponents(requests);
-  }
-
-  private void createServiceComponentHost(String clusterName,
-                                          String serviceName, String componentName, String hostname,
-                                          State desiredState) throws AmbariException {
-    String dStateStr = null;
-    if (desiredState != null) {
-      dStateStr = desiredState.toString();
-    }
-    ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName,
-      serviceName, componentName, hostname, null, dStateStr);
-    Set<ServiceComponentHostRequest> requests =
-      new HashSet<ServiceComponentHostRequest>();
-    requests.add(r);
-    controller.createHostComponents(requests);
-  }
-
-  private void createHDFSServiceConfigs() throws AmbariException {
-    String clusterName = "c1";
-    ClusterRequest r = new ClusterRequest(null, clusterName, "HDP-0.1", null);
-    controller.createCluster(r);
-    clusters.getCluster(clusterName).setDesiredStackVersion(new StackId("HDP-0.1"));
-    String serviceName = "HDFS";
-    createService(clusterName, serviceName, null);
-    String componentName1 = "NAMENODE";
-    String componentName2 = "DATANODE";
-    String componentName3 = "HDFS_CLIENT";
-
-    createServiceComponent(clusterName, serviceName, componentName1,
-      State.INIT);
-    createServiceComponent(clusterName, serviceName, componentName2,
-      State.INIT);
-    createServiceComponent(clusterName, serviceName, componentName3,
-      State.INIT);
-
-    String host1 = "h1";
-    clusters.addHost(host1);
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    String host2 = "h2";
-    clusters.addHost(host2);
-    clusters.getHost("h2").setOsType("centos6");
-    clusters.getHost("h2").persist();
-    clusters.mapHostToCluster(host1, clusterName);
-    clusters.mapHostToCluster(host2, clusterName);
-
-    createServiceComponentHost(clusterName, null, componentName1,
-      host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName2,
-      host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName2,
-      host2, null);
-    createServiceComponentHost(clusterName, serviceName, componentName3,
-      host1, null);
-    createServiceComponentHost(clusterName, serviceName, componentName3,
-      host2, null);
-
-    // Create configs
-    Map<String, String> configs = new HashMap<String, String>();
-    configs.put(NAMENODE_PORT, "localhost:70070");
-    configs.put(DATANODE_PORT, "localhost:70075");
-    ConfigurationRequest cr = new ConfigurationRequest(clusterName,
-      "hdfs-site", "version1", configs);
-    controller.createConfiguration(cr);
-
-    Map<String, String> configVersions = new HashMap<String, String>();
-    Set<ServiceRequest> sReqs = new HashSet<ServiceRequest>();
-    configVersions.put("hdfs-site", "version1");
-    sReqs.add(new ServiceRequest(clusterName, serviceName, configVersions,
-      null));
-    controller.updateServices(sReqs);
-  }
-
-
-  @Test
-  public void testJMXPortMapInit() throws NoSuchParentResourceException, ResourceAlreadyExistsException, UnsupportedPropertyException, SystemException, AmbariException, NoSuchResourceException {
-    createHDFSServiceConfigs();
-
-    JMXHostProviderModule providerModule = new JMXHostProviderModule();
-    providerModule.registerResourceProvider(Resource.Type.Service);
-    providerModule.registerResourceProvider(Resource.Type.Configuration);
-    // Non default port addresses
-    Assert.assertEquals("70070", providerModule.getPort("c1", "NAMENODE"));
-    Assert.assertEquals("70075", providerModule.getPort("c1", "DATANODE"));
-    // Default port addresses
-    Assert.assertEquals(null, providerModule.getPort("c1", "JOBTRACKER"));
-    Assert.assertEquals(null, providerModule.getPort("c1", "TASKTRACKER"));
-    Assert.assertEquals(null, providerModule.getPort("c1", "HBASE_MASTER"));
-  }
-
-  private static class JMXHostProviderModule extends
-    AbstractProviderModule {
-
-    ResourceProvider clusterResourceProvider = new
-      ClusterResourceProvider(PropertyHelper.getPropertyIds(Resource.Type
-      .Cluster), PropertyHelper.getKeyPropertyIds(Resource.Type.Cluster),
-      controller);
-
-    ResourceProvider serviceResourceProvider = new ServiceResourceProvider(PropertyHelper
-      .getPropertyIds(Resource.Type.Service),
-      PropertyHelper.getKeyPropertyIds(Resource.Type.Service), controller);
-
-    ResourceProvider hostCompResourceProvider = new
-      HostComponentResourceProvider(PropertyHelper.getPropertyIds(Resource
-      .Type.HostComponent), PropertyHelper.getKeyPropertyIds(Resource.Type
-      .HostComponent), controller);
-
-    ResourceProvider configResourceProvider = new
-      ConfigurationResourceProvider(PropertyHelper.getPropertyIds(Resource
-      .Type.Configuration), PropertyHelper.getKeyPropertyIds(Resource.Type
-      .Configuration), controller);
-
-    @Override
-    protected ResourceProvider createResourceProvider(Resource.Type type) {
-      if (type == Resource.Type.Cluster)
-        return clusterResourceProvider;
-      if (type == Resource.Type.Service)
-        return serviceResourceProvider;
-      else if (type == Resource.Type.HostComponent)
-        return hostCompResourceProvider;
-      else if (type == Resource.Type.Configuration)
-        return configResourceProvider;
-      return null;
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PropertyPredicateVisitorTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PropertyPredicateVisitorTest.java
deleted file mode 100644
index 736507d..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/PropertyPredicateVisitorTest.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.internal;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.predicate.AndPredicate;
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.predicate.CategoryIsEmptyPredicate;
-import org.apache.ambari.server.controller.predicate.OrPredicate;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.Map;
-
-/**
- * Tests for the property predicate visitor.
- */
-public class PropertyPredicateVisitorTest {
-
-  private static final String PROPERTY_A = PropertyHelper.getPropertyId("category", "A");
-  private static final String PROPERTY_B = PropertyHelper.getPropertyId("category", "B");
-
-  private static final BasePredicate PREDICATE_1 = new PredicateBuilder().property(PROPERTY_A).equals("Monkey").toPredicate();
-  private static final BasePredicate PREDICATE_2 = new PredicateBuilder().property(PROPERTY_B).equals("Runner").toPredicate();
-  private static final BasePredicate PREDICATE_3 = new AndPredicate(PREDICATE_1, PREDICATE_2);
-  private static final BasePredicate PREDICATE_4 = new OrPredicate(PREDICATE_1, PREDICATE_2);
-  private static final BasePredicate PREDICATE_5 = new CategoryIsEmptyPredicate("cat1");
-
-  @Test
-  public void testVisit() {
-    PropertyPredicateVisitor visitor = new PropertyPredicateVisitor();
-    PREDICATE_1.accept(visitor);
-    Map<String, Object> properties = visitor.getProperties();
-    Assert.assertEquals(1, properties.size());
-    Assert.assertEquals("Monkey", properties.get(PROPERTY_A));
-
-    visitor = new PropertyPredicateVisitor();
-    PREDICATE_3.accept(visitor);
-    properties = visitor.getProperties();
-    Assert.assertEquals(2, properties.size());
-    Assert.assertEquals("Monkey", properties.get(PROPERTY_A));
-    Assert.assertEquals("Runner", properties.get(PROPERTY_B));
-
-    visitor = new PropertyPredicateVisitor();
-    PREDICATE_4.accept(visitor);
-    properties = visitor.getProperties();
-    Assert.assertEquals(2, properties.size());
-    Assert.assertEquals("Monkey", properties.get(PROPERTY_A));
-    Assert.assertEquals("Runner", properties.get(PROPERTY_B));
-
-    visitor = new PropertyPredicateVisitor();
-    PREDICATE_5.accept(visitor);
-    properties = visitor.getProperties();
-    Assert.assertTrue(properties.isEmpty());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java
deleted file mode 100644
index 3149745..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.controller.spi.Request;
-import org.junit.Test;
-
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- *
- */
-public class RequestImplTest {
-
-  private static final Set<String> propertyIds = new HashSet<String>();
-
-  static {
-    propertyIds.add(PropertyHelper.getPropertyId("c1", "p1"));
-    propertyIds.add(PropertyHelper.getPropertyId("c1", "p2"));
-    propertyIds.add(PropertyHelper.getPropertyId("c2", "p3"));
-    propertyIds.add(PropertyHelper.getPropertyId("c3", "p4"));
-  }
-
-  @Test
-  public void testGetPropertyIds() {
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-
-    Assert.assertEquals(propertyIds, request.getPropertyIds());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
deleted file mode 100644
index db857bb..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-
-/**
- * RequestResourceProvider tests.
- */
-public class RequestResourceProviderTest {
-  @Test
-  public void testCreateResources() throws Exception {
-    Resource.Type type = Resource.Type.Request;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    // add the property map to a set for the request.  add more maps for multiple creates
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
-
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    // add properties to the request map
-    properties.put(RequestResourceProvider.REQUEST_ID_PROPERTY_ID, "Request100");
-
-    propertySet.add(properties);
-
-    // create the request
-    Request request = PropertyHelper.getCreateRequest(propertySet);
-
-    try {
-      provider.createResources(request);
-      Assert.fail("Expected an UnsupportedOperationException");
-    } catch (UnsupportedOperationException e) {
-      // expected
-    }
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testGetResources() throws Exception {
-    Resource.Type type = Resource.Type.Request;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    Set<RequestStatusResponse> allResponse = new HashSet<RequestStatusResponse>();
-    allResponse.add(new RequestStatusResponse(100L));
-
-    // set expectations
-    expect(managementController.getRequestStatus(AbstractResourceProviderTest.Matcher.getRequestRequest(100L))).
-        andReturn(allResponse).once();
-
-    // replay
-    replay(managementController);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Set<String> propertyIds = new HashSet<String>();
-
-    propertyIds.add(RequestResourceProvider.REQUEST_ID_PROPERTY_ID);
-
-    Predicate predicate = new PredicateBuilder().property(RequestResourceProvider.REQUEST_ID_PROPERTY_ID).equals("100").
-        toPredicate();
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-    Set<Resource> resources = provider.getResources(request, predicate);
-
-    Assert.assertEquals(1, resources.size());
-    for (Resource resource : resources) {
-      long userName = (Long) resource.getPropertyValue(RequestResourceProvider.REQUEST_ID_PROPERTY_ID);
-      Assert.assertEquals(100L, userName);
-    }
-
-    // verify
-    verify(managementController);  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    Resource.Type type = Resource.Type.Request;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    // add the property map to a set for the request.
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    // create the request
-    Request request = PropertyHelper.getUpdateRequest(properties);
-
-    Predicate predicate = new PredicateBuilder().property(RequestResourceProvider.REQUEST_ID_PROPERTY_ID).
-        equals("Request100").toPredicate();
-
-    try {
-      provider.updateResources(request, predicate);
-      Assert.fail("Expected an UnsupportedOperationException");
-    } catch (UnsupportedOperationException e) {
-      // expected
-    }
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    Resource.Type type = Resource.Type.Request;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    // replay
-    replay(managementController);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Predicate predicate = new PredicateBuilder().property(RequestResourceProvider.REQUEST_ID_PROPERTY_ID).
-        equals("Request100").toPredicate();
-    try {
-      provider.deleteResources(predicate);
-      Assert.fail("Expected an UnsupportedOperationException");
-    } catch (UnsupportedOperationException e) {
-      // expected
-    }
-
-    // verify
-    verify(managementController);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ResourceImplTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ResourceImplTest.java
deleted file mode 100644
index 94a8cad..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ResourceImplTest.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.junit.Test;
-
-/**
- *
- */
-public class ResourceImplTest {
-
-  @Test
-  public void testGetType() {
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    Assert.assertEquals(Resource.Type.Cluster, resource.getType());
-
-    resource = new ResourceImpl(Resource.Type.Service);
-    Assert.assertEquals(Resource.Type.Service, resource.getType());
-
-    resource = new ResourceImpl(Resource.Type.Host);
-    Assert.assertEquals(Resource.Type.Host, resource.getType());
-
-    resource = new ResourceImpl(Resource.Type.Component);
-    Assert.assertEquals(Resource.Type.Component, resource.getType());
-
-    resource = new ResourceImpl(Resource.Type.HostComponent);
-    Assert.assertEquals(Resource.Type.HostComponent, resource.getType());
-  }
-
-  @Test
-  public void testSetGetProperty() {
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-
-    String propertyId = PropertyHelper.getPropertyId("c1", "p1");
-    resource.setProperty(propertyId, "foo");
-    Assert.assertEquals("foo", resource.getPropertyValue(propertyId));
-
-    resource.setProperty(propertyId, 1);
-    Assert.assertEquals(1, resource.getPropertyValue(propertyId));
-
-    resource.setProperty(propertyId, (float) 1.99);
-    Assert.assertEquals((float) 1.99, resource.getPropertyValue(propertyId));
-
-    resource.setProperty(propertyId, 1.99);
-    Assert.assertEquals(1.99, resource.getPropertyValue(propertyId));
-
-    resource.setProperty(propertyId, 65L);
-    Assert.assertEquals(65L, resource.getPropertyValue(propertyId));
-  }
-
-  @Test
-  public void testAddCategory() {
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-
-    resource.addCategory("c1");
-    Assert.assertTrue(resource.getPropertiesMap().containsKey("c1"));
-
-    resource.addCategory("c2/sub2");
-    Assert.assertTrue(resource.getPropertiesMap().containsKey("c1"));
-    Assert.assertTrue(resource.getPropertiesMap().containsKey("c2"));
-    Assert.assertTrue(resource.getPropertiesMap().containsKey("c2/sub2"));
-
-    resource.addCategory("c3/sub3/sub3a");
-    Assert.assertTrue(resource.getPropertiesMap().containsKey("c1"));
-    Assert.assertTrue(resource.getPropertiesMap().containsKey("c2"));
-    Assert.assertTrue(resource.getPropertiesMap().containsKey("c2/sub2"));
-    Assert.assertTrue(resource.getPropertiesMap().containsKey("c3"));
-    Assert.assertTrue(resource.getPropertiesMap().containsKey("c3/sub3"));
-    Assert.assertTrue(resource.getPropertiesMap().containsKey("c3/sub3/sub3a"));
-  }
-
-  @Test
-  public void testCopyConstructor() {
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-
-    String p1 = PropertyHelper.getPropertyId(null, "p1");
-    String p2 = PropertyHelper.getPropertyId("c1", "p2");
-    String p3 = PropertyHelper.getPropertyId("c1/c2", "p3");
-    String p4 = PropertyHelper.getPropertyId("c1/c2/c3", "p4");
-    String p5 = PropertyHelper.getPropertyId("c1", "p5");
-
-    resource.setProperty(p1, "foo");
-    Assert.assertEquals("foo", resource.getPropertyValue(p1));
-
-    resource.setProperty(p2, 1);
-    Assert.assertEquals(1, resource.getPropertyValue(p2));
-
-    resource.setProperty(p3, (float) 1.99);
-    Assert.assertEquals((float) 1.99, resource.getPropertyValue(p3));
-
-    resource.setProperty(p4, 1.99);
-    Assert.assertEquals(1.99, resource.getPropertyValue(p4));
-
-    resource.setProperty(p5, 65L);
-    Assert.assertEquals(65L, resource.getPropertyValue(p5));
-
-    Resource copy = new ResourceImpl(resource);
-
-    Assert.assertEquals("foo", copy.getPropertyValue(p1));
-    Assert.assertEquals(1, copy.getPropertyValue(p2));
-    Assert.assertEquals((float) 1.99, copy.getPropertyValue(p3));
-    Assert.assertEquals(1.99, copy.getPropertyValue(p4));
-    Assert.assertEquals(65L, copy.getPropertyValue(p5));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/SchemaImplTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/SchemaImplTest.java
deleted file mode 100644
index 1c3d14a..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/SchemaImplTest.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- *
- */
-public class SchemaImplTest {
-
-  private static final Set<String> resourceProviderProperties = new HashSet<String>();
-
-  static {
-    resourceProviderProperties.add(PropertyHelper.getPropertyId("c1", "p1"));
-    resourceProviderProperties.add(PropertyHelper.getPropertyId("c1", "p2"));
-    resourceProviderProperties.add(PropertyHelper.getPropertyId("c1", "p3"));
-    resourceProviderProperties.add(PropertyHelper.getPropertyId("c2", "p4"));
-  }
-
-  private static final ResourceProvider resourceProvider = new ResourceProvider() {
-    @Override
-    public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-      return null;
-    }
-
-    @Override
-    public RequestStatus createResources(Request request)
-        throws SystemException, UnsupportedPropertyException, ResourceAlreadyExistsException, NoSuchParentResourceException {
-      return new RequestStatusImpl(null);
-    }
-
-    @Override
-    public RequestStatus updateResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-      return new RequestStatusImpl(null);
-    }
-
-    @Override
-    public RequestStatus deleteResources(Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-      return new RequestStatusImpl(null);
-    }
-
-    @Override
-    public Map<Resource.Type, String> getKeyPropertyIds() {
-      return keyPropertyIds;
-    }
-
-    @Override
-    public Set<String> checkPropertyIds(Set<String> propertyIds) {
-      if (!resourceProviderProperties.containsAll(propertyIds)) {
-        Set<String> unsupportedPropertyIds = new HashSet<String>(propertyIds);
-        unsupportedPropertyIds.removeAll(resourceProviderProperties);
-        return unsupportedPropertyIds;
-      }
-      return Collections.emptySet();
-    }
-  };
-
-  private static final Set<String> propertyProviderProperties = new HashSet<String>();
-
-  static {
-    propertyProviderProperties.add(PropertyHelper.getPropertyId("c3", "p5"));
-    propertyProviderProperties.add(PropertyHelper.getPropertyId("c3", "p6"));
-    propertyProviderProperties.add(PropertyHelper.getPropertyId("c4", "p7"));
-    propertyProviderProperties.add(PropertyHelper.getPropertyId("c4", "p8"));
-  }
-
-  private static final PropertyProvider propertyProvider = new PropertyProvider() {
-    @Override
-    public Set<Resource> populateResources(Set<Resource> resources, Request request, Predicate predicate) {
-      return null;
-    }
-
-    @Override
-    public Set<String> checkPropertyIds(Set<String> propertyIds) {
-      if (!propertyProviderProperties.containsAll(propertyIds)) {
-        Set<String> unsupportedPropertyIds = new HashSet<String>(propertyIds);
-        unsupportedPropertyIds.removeAll(propertyProviderProperties);
-        return unsupportedPropertyIds;
-      }
-      return Collections.emptySet();
-    }
-  };
-
-  private static final List<PropertyProvider> propertyProviders = new LinkedList<PropertyProvider>();
-
-  static {
-    propertyProviders.add(propertyProvider);
-  }
-
-  private static final Map<Resource.Type, String> keyPropertyIds = new HashMap<Resource.Type, String>();
-
-  static {
-    keyPropertyIds.put(Resource.Type.Cluster, PropertyHelper.getPropertyId("c1", "p1"));
-    keyPropertyIds.put(Resource.Type.Host, PropertyHelper.getPropertyId("c1", "p2"));
-    keyPropertyIds.put(Resource.Type.Component, PropertyHelper.getPropertyId("c1", "p3"));
-  }
-
-  @Test
-  public void testGetKeyPropertyId() {
-    Schema schema = new SchemaImpl(resourceProvider);
-
-    Assert.assertEquals(PropertyHelper.getPropertyId("c1", "p1"), schema.getKeyPropertyId(Resource.Type.Cluster));
-    Assert.assertEquals(PropertyHelper.getPropertyId("c1", "p2"), schema.getKeyPropertyId(Resource.Type.Host));
-    Assert.assertEquals(PropertyHelper.getPropertyId("c1", "p3"), schema.getKeyPropertyId(Resource.Type.Component));
-  }
-
-//  @Test
-//  public void testGetCategories() {
-//    Schema schema = new SchemaImpl(resourceProvider);
-//
-//    Map<String, Set<String>> categories = schema.getCategoryProperties();
-//    Assert.assertEquals(4, categories.size());
-//    Assert.assertTrue(categories.containsKey("c1"));
-//    Assert.assertTrue(categories.containsKey("c2"));
-//    Assert.assertTrue(categories.containsKey("c3"));
-//    Assert.assertTrue(categories.containsKey("c4"));
-//
-//    Set<String> properties = categories.get("c1");
-//    Assert.assertEquals(3, properties.size());
-//    Assert.assertTrue(properties.contains("p1"));
-//    Assert.assertTrue(properties.contains("p2"));
-//    Assert.assertTrue(properties.contains("p3"));
-//
-//    properties = categories.get("c2");
-//    Assert.assertEquals(1, properties.size());
-//    Assert.assertTrue(properties.contains("p4"));
-//
-//    properties = categories.get("c3");
-//    Assert.assertEquals(2, properties.size());
-//    Assert.assertTrue(properties.contains("p5"));
-//    Assert.assertTrue(properties.contains("p6"));
-//
-//    properties = categories.get("c4");
-//    Assert.assertEquals(2, properties.size());
-//    Assert.assertTrue(properties.contains("p7"));
-//    Assert.assertTrue(properties.contains("p8"));
-//  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java
deleted file mode 100644
index 3068d6b..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java
+++ /dev/null
@@ -1,300 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.ServiceRequest;
-import org.apache.ambari.server.controller.ServiceResponse;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.easymock.EasyMock;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-
-/**
- * ServiceResourceProvider tests.
- */
-public class ServiceResourceProviderTest {
-
-  @Test
-  public void testCreateResources() throws Exception{
-    Resource.Type type = Resource.Type.Service;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    managementController.createServices(AbstractResourceProviderTest.Matcher.getServiceRequestSet("Cluster100", "Service100", null, "DEPLOYED"));
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    // add the property map to a set for the request.  add more maps for multiple creates
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
-
-    // Service 1: create a map of properties for the request
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    // add properties to the request map
-    properties.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
-    properties.put(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID, "Service100");
-    properties.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "DEPLOYED");
-
-    propertySet.add(properties);
-
-    // create the request
-    Request request = PropertyHelper.getCreateRequest(propertySet);
-
-    provider.createResources(request);
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testGetResources() throws Exception{
-    Resource.Type type = Resource.Type.Service;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    Set<ServiceResponse> allResponse = new HashSet<ServiceResponse>();
-    allResponse.add(new ServiceResponse(100L, "Cluster100", "Service100", null, "HDP-0.1", "DEPLOYED"));
-    allResponse.add(new ServiceResponse(100L, "Cluster100", "Service101", null, "HDP-0.1", "DEPLOYED"));
-    allResponse.add(new ServiceResponse(100L, "Cluster100", "Service102", null, "HDP-0.1", "DEPLOYED"));
-    allResponse.add(new ServiceResponse(100L, "Cluster100", "Service103", null, "HDP-0.1", "DEPLOYED"));
-    allResponse.add(new ServiceResponse(100L, "Cluster100", "Service104", null, "HDP-0.1", "DEPLOYED"));
-
-    Set<ServiceResponse> nameResponse = new HashSet<ServiceResponse>();
-    nameResponse.add(new ServiceResponse(100L, "Cluster100", "Service102", null, "HDP-0.1", "DEPLOYED"));
-
-    Set<ServiceResponse> stateResponse = new HashSet<ServiceResponse>();
-    stateResponse.add(new ServiceResponse(100L, "Cluster100", "Service100", null, "HDP-0.1", "DEPLOYED"));
-    stateResponse.add(new ServiceResponse(100L, "Cluster100", "Service102", null, "HDP-0.1", "DEPLOYED"));
-    stateResponse.add(new ServiceResponse(100L, "Cluster100", "Service104", null, "HDP-0.1", "DEPLOYED"));
-
-    // set expectations
-    expect(managementController.getServices(EasyMock.<Set<ServiceRequest>>anyObject())).andReturn(allResponse).once();
-    expect(managementController.getServices(EasyMock.<Set<ServiceRequest>>anyObject())).andReturn(nameResponse).once();
-    expect(managementController.getServices(EasyMock.<Set<ServiceRequest>>anyObject())).andReturn(stateResponse).once();
-
-    // replay
-    replay(managementController);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Set<String> propertyIds = new HashSet<String>();
-
-    propertyIds.add(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID);
-    propertyIds.add(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID);
-
-    // create the request
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-    // get all ... no predicate
-    Set<Resource> resources = provider.getResources(request, null);
-
-    Assert.assertEquals(5, resources.size());
-    Set<String> names = new HashSet<String>();
-    for (Resource resource : resources) {
-      String clusterName = (String) resource.getPropertyValue(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID);
-      Assert.assertEquals("Cluster100", clusterName);
-      names.add((String) resource.getPropertyValue(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID));
-    }
-    // Make sure that all of the response objects got moved into resources
-    for (ServiceResponse serviceResponse : allResponse ) {
-      Assert.assertTrue(names.contains(serviceResponse.getServiceName()));
-    }
-
-    // get service named Service102
-    Predicate predicate = new PredicateBuilder().property(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("Service102").toPredicate();
-    request = PropertyHelper.getReadRequest("ServiceInfo");
-    resources = provider.getResources(request, predicate);
-
-    Assert.assertEquals(1, resources.size());
-    Assert.assertEquals("Service102", resources.iterator().next().getPropertyValue(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID));
-
-    // get services where state == "DEPLOYED"
-    predicate = new PredicateBuilder().property(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID).equals("DEPLOYED").toPredicate();
-    request = PropertyHelper.getReadRequest(propertyIds);
-    resources = provider.getResources(request, predicate);
-
-    Assert.assertEquals(3, resources.size());
-    names = new HashSet<String>();
-    for (Resource resource : resources) {
-      String clusterName = (String) resource.getPropertyValue(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID);
-      Assert.assertEquals("Cluster100", clusterName);
-      names.add((String) resource.getPropertyValue(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID));
-    }
-    // Make sure that all of the response objects got moved into resources
-    for (ServiceResponse serviceResponse : stateResponse ) {
-      Assert.assertTrue(names.contains(serviceResponse.getServiceName()));
-    }
-
-    // verify
-    verify(managementController);
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception{
-    Resource.Type type = Resource.Type.Service;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    // set expectations
-    expect(managementController.updateServices(EasyMock.<Set<ServiceRequest>>anyObject())).andReturn(response).once();
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    // add the property map to a set for the request.
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    properties.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "DEPLOYED");
-
-    // create the request
-    Request request = PropertyHelper.getUpdateRequest(properties);
-
-    // update the service named Service102
-    Predicate  predicate = new PredicateBuilder().property(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID).equals("Cluster100").
-        and().property(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("Service102").toPredicate();
-    provider.updateResources(request, predicate);
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception{
-    Resource.Type type = Resource.Type.Service;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    // set expectations
-    expect(managementController.deleteServices(AbstractResourceProviderTest.Matcher.getServiceRequestSet(null, "Service100", null, null))).andReturn(response);
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver();
-
-    ((ObservableResourceProvider)provider).addObserver(observer);
-
-    // delete the service named Service100
-    Predicate  predicate = new PredicateBuilder().property(ServiceResourceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("Service100").toPredicate();
-    provider.deleteResources(predicate);
-
-
-    ResourceProviderEvent lastEvent = observer.getLastEvent();
-    Assert.assertNotNull(lastEvent);
-    Assert.assertEquals(Resource.Type.Service, lastEvent.getResourceType());
-    Assert.assertEquals(ResourceProviderEvent.Type.Delete, lastEvent.getType());
-    Assert.assertEquals(predicate, lastEvent.getPredicate());
-    Assert.assertNull(lastEvent.getRequest());
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testCheckPropertyIds() throws Exception {
-    Set<String> propertyIds = new HashSet<String>();
-    propertyIds.add("foo");
-    propertyIds.add("cat1/foo");
-    propertyIds.add("cat2/bar");
-    propertyIds.add("cat2/baz");
-    propertyIds.add("cat3/sub1/bam");
-    propertyIds.add("cat4/sub2/sub3/bat");
-    propertyIds.add("cat5/subcat5/map");
-
-    Map<Resource.Type, String> keyPropertyIds = new HashMap<Resource.Type, String>();
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    AbstractResourceProvider provider =
-        (AbstractResourceProvider) AbstractResourceProvider.getResourceProvider(
-            Resource.Type.Service,
-            propertyIds,
-            keyPropertyIds,
-            managementController);
-
-    Set<String> unsupported = provider.checkPropertyIds(Collections.singleton("foo"));
-    Assert.assertTrue(unsupported.isEmpty());
-
-    // note that key is not in the set of known property ids.  We allow it if its parent is a known property.
-    // this allows for Map type properties where we want to treat the entries as individual properties
-    Assert.assertTrue(provider.checkPropertyIds(Collections.singleton("cat5/subcat5/map/key")).isEmpty());
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("bar"));
-    Assert.assertEquals(1, unsupported.size());
-    Assert.assertTrue(unsupported.contains("bar"));
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("cat1/foo"));
-    Assert.assertTrue(unsupported.isEmpty());
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("cat1"));
-    Assert.assertTrue(unsupported.isEmpty());
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("config"));
-    Assert.assertTrue(unsupported.isEmpty());
-
-    unsupported = provider.checkPropertyIds(Collections.singleton("config/unknown_property"));
-    Assert.assertTrue(unsupported.isEmpty());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/SimplifyingPredicateVisitorTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/SimplifyingPredicateVisitorTest.java
deleted file mode 100644
index d435d2f..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/SimplifyingPredicateVisitorTest.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.internal;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.predicate.AndPredicate;
-import org.apache.ambari.server.controller.predicate.BasePredicate;
-import org.apache.ambari.server.controller.predicate.CategoryIsEmptyPredicate;
-import org.apache.ambari.server.controller.predicate.OrPredicate;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Tests for SimplifyingPredicateVisitor
- */
-public class SimplifyingPredicateVisitorTest {
-
-  private static final String PROPERTY_A = PropertyHelper.getPropertyId("category", "A");
-  private static final String PROPERTY_B = PropertyHelper.getPropertyId("category", "B");
-  private static final String PROPERTY_C = PropertyHelper.getPropertyId("category", "C");
-  private static final String PROPERTY_D = PropertyHelper.getPropertyId("category", "D");
-
-  private static final BasePredicate PREDICATE_1 = new PredicateBuilder().property(PROPERTY_A).equals("Monkey").toPredicate();
-  private static final BasePredicate PREDICATE_2 = new PredicateBuilder().property(PROPERTY_B).equals("Runner").toPredicate();
-  private static final BasePredicate PREDICATE_3 = new AndPredicate(PREDICATE_1, PREDICATE_2);
-  private static final BasePredicate PREDICATE_4 = new OrPredicate(PREDICATE_1, PREDICATE_2);
-  private static final BasePredicate PREDICATE_5 = new PredicateBuilder().property(PROPERTY_C).equals("Racer").toPredicate();
-  private static final BasePredicate PREDICATE_6 = new OrPredicate(PREDICATE_5, PREDICATE_4);
-  private static final BasePredicate PREDICATE_7 = new PredicateBuilder().property(PROPERTY_C).equals("Power").toPredicate();
-  private static final BasePredicate PREDICATE_8 = new OrPredicate(PREDICATE_6, PREDICATE_7);
-  private static final BasePredicate PREDICATE_9 = new AndPredicate(PREDICATE_1, PREDICATE_8);
-  private static final BasePredicate PREDICATE_10 = new OrPredicate(PREDICATE_3, PREDICATE_5);
-  private static final BasePredicate PREDICATE_11 = new AndPredicate(PREDICATE_4, PREDICATE_10);
-  private static final BasePredicate PREDICATE_12 = new PredicateBuilder().property(PROPERTY_D).equals("Installer").toPredicate();
-  private static final BasePredicate PREDICATE_13 = new AndPredicate(PREDICATE_1, PREDICATE_12);
-  private static final BasePredicate PREDICATE_14 = new PredicateBuilder().property(PROPERTY_D).greaterThan(12).toPredicate();
-  private static final BasePredicate PREDICATE_15 = new AndPredicate(PREDICATE_1, PREDICATE_14);
-  private static final BasePredicate PREDICATE_16 = new CategoryIsEmptyPredicate("cat1");
-
-  @Test
-  public void testVisit() {
-    Set<String> supportedProperties = new HashSet<String>();
-    supportedProperties.add(PROPERTY_A);
-    supportedProperties.add(PROPERTY_B);
-    supportedProperties.add(PROPERTY_C);
-
-    SimplifyingPredicateVisitor visitor = new SimplifyingPredicateVisitor(supportedProperties);
-
-    PREDICATE_1.accept(visitor);
-
-    List<BasePredicate> simplifiedPredicates = visitor.getSimplifiedPredicates();
-
-    Assert.assertEquals(1, simplifiedPredicates.size());
-    Assert.assertEquals(PREDICATE_1, simplifiedPredicates.get(0));
-
-    PREDICATE_3.accept(visitor);
-
-    simplifiedPredicates = visitor.getSimplifiedPredicates();
-
-    Assert.assertEquals(1, simplifiedPredicates.size());
-    Assert.assertEquals(PREDICATE_3, simplifiedPredicates.get(0));
-
-
-    PREDICATE_4.accept(visitor);
-
-    simplifiedPredicates = visitor.getSimplifiedPredicates();
-
-    Assert.assertEquals(2, simplifiedPredicates.size());
-    Assert.assertEquals(PREDICATE_1, simplifiedPredicates.get(0));
-    Assert.assertEquals(PREDICATE_2, simplifiedPredicates.get(1));
-
-    PREDICATE_6.accept(visitor);
-
-    simplifiedPredicates = visitor.getSimplifiedPredicates();
-
-    Assert.assertEquals(3, simplifiedPredicates.size());
-    Assert.assertEquals(PREDICATE_5, simplifiedPredicates.get(0));
-    Assert.assertEquals(PREDICATE_1, simplifiedPredicates.get(1));
-    Assert.assertEquals(PREDICATE_2, simplifiedPredicates.get(2));
-
-    PREDICATE_8.accept(visitor);
-
-    simplifiedPredicates = visitor.getSimplifiedPredicates();
-
-    Assert.assertEquals(4, simplifiedPredicates.size());
-    Assert.assertEquals(PREDICATE_5, simplifiedPredicates.get(0));
-    Assert.assertEquals(PREDICATE_1, simplifiedPredicates.get(1));
-    Assert.assertEquals(PREDICATE_2, simplifiedPredicates.get(2));
-    Assert.assertEquals(PREDICATE_7, simplifiedPredicates.get(3));
-
-    PREDICATE_9.accept(visitor);
-
-    simplifiedPredicates = visitor.getSimplifiedPredicates();
-
-    Assert.assertEquals(4, simplifiedPredicates.size());
-//    Assert.assertEquals(???, simplifiedPredicates.get(0));
-
-    PREDICATE_11.accept(visitor);
-
-    simplifiedPredicates = visitor.getSimplifiedPredicates();
-
-    Assert.assertEquals(4, simplifiedPredicates.size());
-//    Assert.assertEquals(???, simplifiedPredicates.get(0));
-
-    PREDICATE_13.accept(visitor);
-
-    simplifiedPredicates = visitor.getSimplifiedPredicates();
-
-    Assert.assertEquals(1, simplifiedPredicates.size());
-    Assert.assertEquals(PREDICATE_1, simplifiedPredicates.get(0));
-
-    PREDICATE_15.accept(visitor);
-
-    simplifiedPredicates = visitor.getSimplifiedPredicates();
-
-    Assert.assertEquals(1, simplifiedPredicates.size());
-    Assert.assertEquals(PREDICATE_1, simplifiedPredicates.get(0));
-
-    PREDICATE_16.accept(visitor);
-
-    simplifiedPredicates = visitor.getSimplifiedPredicates();
-
-    Assert.assertEquals(1, simplifiedPredicates.size());
-    Assert.assertEquals(PREDICATE_16, simplifiedPredicates.get(0));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/TaskResourceProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/TaskResourceProviderTest.java
deleted file mode 100644
index c0b8c80..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/TaskResourceProviderTest.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.TaskStatusResponse;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-
-/**
- * TaskResourceProvider tests.
- */
-public class TaskResourceProviderTest {
-  @Test
-  public void testCreateResources() throws Exception {
-    Resource.Type type = Resource.Type.Task;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    // add the property map to a set for the request.  add more maps for multiple creates
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
-
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    // add properties to the request map
-    properties.put(TaskResourceProvider.TASK_REQUEST_ID_PROPERTY_ID, 100);
-    properties.put(TaskResourceProvider.TASK_ID_PROPERTY_ID, 100);
-
-    propertySet.add(properties);
-
-    // create the request
-    Request request = PropertyHelper.getCreateRequest(propertySet);
-
-    try {
-      provider.createResources(request);
-      Assert.fail("Expected an UnsupportedOperationException");
-    } catch (UnsupportedOperationException e) {
-      // expected
-    }
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testGetResources() throws Exception {
-    Resource.Type type = Resource.Type.Task;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    Set<TaskStatusResponse> allResponse = new HashSet<TaskStatusResponse>();
-    allResponse.add(new TaskStatusResponse(100L, 100, 100L, "HostName100", "", "", "", 0, "", "", 0L, (short) 0));
-
-    // set expectations
-    expect(managementController.getTaskStatus(AbstractResourceProviderTest.Matcher.getTaskRequestSet(100L, 100L))).
-        andReturn(allResponse).once();
-
-    // replay
-    replay(managementController);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Set<String> propertyIds = new HashSet<String>();
-
-    propertyIds.add(TaskResourceProvider.TASK_ID_PROPERTY_ID);
-    propertyIds.add(TaskResourceProvider.TASK_REQUEST_ID_PROPERTY_ID);
-
-    Predicate predicate = new PredicateBuilder().property(TaskResourceProvider.TASK_ID_PROPERTY_ID).equals("100").
-                          and().property(TaskResourceProvider.TASK_REQUEST_ID_PROPERTY_ID).equals("100").toPredicate();
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-    Set<Resource> resources = provider.getResources(request, predicate);
-
-    Assert.assertEquals(1, resources.size());
-    for (Resource resource : resources) {
-      long taskId = (Long) resource.getPropertyValue(TaskResourceProvider.TASK_ID_PROPERTY_ID);
-      Assert.assertEquals(100L, taskId);
-    }
-
-    // verify
-    verify(managementController);
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    Resource.Type type = Resource.Type.Task;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    // add the property map to a set for the request.
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    // create the request
-    Request request = PropertyHelper.getUpdateRequest(properties);
-
-    Predicate predicate = new PredicateBuilder().property(TaskResourceProvider.TASK_ID_PROPERTY_ID).equals("Task100").
-        toPredicate();
-
-    try {
-      provider.updateResources(request, predicate);
-      Assert.fail("Expected an UnsupportedOperationException");
-    } catch (UnsupportedOperationException e) {
-      // expected
-    }
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    Resource.Type type = Resource.Type.Task;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    // replay
-    replay(managementController);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Predicate predicate = new PredicateBuilder().property(TaskResourceProvider.TASK_ID_PROPERTY_ID).equals("Task100").
-        toPredicate();
-    try {
-      provider.deleteResources(predicate);
-      Assert.fail("Expected an UnsupportedOperationException");
-    } catch (UnsupportedOperationException e) {
-      // expected
-    }
-
-    // verify
-    verify(managementController);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/TestProviderModule.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/TestProviderModule.java
deleted file mode 100644
index abc0b76..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/TestProviderModule.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.jdbc.TestJDBCResourceProvider;
-import org.apache.ambari.server.controller.spi.PropertyProvider;
-import org.apache.ambari.server.controller.spi.ProviderModule;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.utilities.DBHelper;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Module to plug in the JDBC resource provider.
- */
-public class TestProviderModule implements ProviderModule {
-  private static final Map<Resource.Type, ResourceProvider> resourceProviders = new HashMap<Resource.Type, ResourceProvider>();
-  private static final Map<Resource.Type, List<PropertyProvider>> propertyProviders = new HashMap<Resource.Type, List<PropertyProvider>>();
-
-  static {
-
-    for (Resource.Type type : Resource.Type.values()) {
-      resourceProviders.put(type, new TestJDBCResourceProvider(
-          DBHelper.CONNECTION_FACTORY,
-          type,
-          PropertyHelper.getPropertyIds(type),
-          PropertyHelper.getKeyPropertyIds(type)));
-    }
-
-    propertyProviders.put(Resource.Type.Cluster, new LinkedList<PropertyProvider>());
-    propertyProviders.put(Resource.Type.Service, new LinkedList<PropertyProvider>());
-    propertyProviders.put(Resource.Type.Component, new LinkedList<PropertyProvider>());
-    propertyProviders.put(Resource.Type.Host, new LinkedList<PropertyProvider>());
-
-    List<PropertyProvider> providers = new LinkedList<PropertyProvider>();
-
-    propertyProviders.put(Resource.Type.Component, providers);
-
-    providers = new LinkedList<PropertyProvider>();
-
-    propertyProviders.put(Resource.Type.HostComponent, providers);
-  }
-
-  @Override
-  public ResourceProvider getResourceProvider(Resource.Type type) {
-    return resourceProviders.get(type);
-  }
-
-  @Override
-  public List<PropertyProvider> getPropertyProviders(Resource.Type type) {
-    return propertyProviders.get(type);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UserResourceProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UserResourceProviderTest.java
deleted file mode 100644
index 0e51616..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UserResourceProviderTest.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.UserRequest;
-import org.apache.ambari.server.controller.UserResponse;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceProvider;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.easymock.EasyMock;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Set;
-
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-
-/**
- * UserResourceProvider tests.
- */
-public class UserResourceProviderTest {
-  @Test
-  public void testCreateResources() throws Exception {
-    Resource.Type type = Resource.Type.User;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    managementController.createUsers(AbstractResourceProviderTest.Matcher.getUserRequestSet("User100"));
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    // add the property map to a set for the request.  add more maps for multiple creates
-    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
-
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    // add properties to the request map
-    properties.put(UserResourceProvider.USER_USERNAME_PROPERTY_ID, "User100");
-
-    propertySet.add(properties);
-
-    // create the request
-    Request request = PropertyHelper.getCreateRequest(propertySet);
-
-    provider.createResources(request);
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testGetResources() throws Exception {
-    Resource.Type type = Resource.Type.User;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    Set<UserResponse> allResponse = new HashSet<UserResponse>();
-    allResponse.add(new UserResponse("User100", false));
-
-    // set expectations
-    expect(managementController.getUsers(AbstractResourceProviderTest.Matcher.getUserRequestSet("User100"))).
-        andReturn(allResponse).once();
-
-    // replay
-    replay(managementController);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Set<String> propertyIds = new HashSet<String>();
-
-    propertyIds.add(UserResourceProvider.USER_USERNAME_PROPERTY_ID);
-    propertyIds.add(UserResourceProvider.USER_PASSWORD_PROPERTY_ID);
-
-    Predicate predicate = new PredicateBuilder().property(UserResourceProvider.USER_USERNAME_PROPERTY_ID).
-        equals("User100").toPredicate();
-    Request request = PropertyHelper.getReadRequest(propertyIds);
-    Set<Resource> resources = provider.getResources(request, predicate);
-
-    Assert.assertEquals(1, resources.size());
-    for (Resource resource : resources) {
-      String userName = (String) resource.getPropertyValue(UserResourceProvider.USER_USERNAME_PROPERTY_ID);
-      Assert.assertEquals("User100", userName);
-    }
-
-    // verify
-    verify(managementController);
-  }
-
-  @Test
-  public void testUpdateResources() throws Exception {
-    Resource.Type type = Resource.Type.User;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    // set expectations
-    managementController.updateUsers(EasyMock.<Set<UserRequest>>anyObject());
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    // add the property map to a set for the request.
-    Map<String, Object> properties = new LinkedHashMap<String, Object>();
-
-    properties.put(UserResourceProvider.USER_PASSWORD_PROPERTY_ID, "password");
-
-    // create the request
-    Request request = PropertyHelper.getUpdateRequest(properties);
-
-    Predicate  predicate = new PredicateBuilder().property(UserResourceProvider.USER_USERNAME_PROPERTY_ID).
-        equals("User100").toPredicate();
-    provider.updateResources(request, predicate);
-
-    // verify
-    verify(managementController, response);
-  }
-
-  @Test
-  public void testDeleteResources() throws Exception {
-    Resource.Type type = Resource.Type.User;
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-
-    // set expectations
-    managementController.deleteUsers(AbstractResourceProviderTest.Matcher.getUserRequestSet("User100"));
-
-    // replay
-    replay(managementController, response);
-
-    ResourceProvider provider = AbstractResourceProvider.getResourceProvider(
-        type,
-        PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type),
-        managementController);
-
-    Predicate predicate = new PredicateBuilder().property(UserResourceProvider.USER_USERNAME_PROPERTY_ID).
-        equals("User100").toPredicate();
-    provider.deleteResources(predicate);
-
-    // verify
-    verify(managementController, response);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/jdbc/TestJDBCResourceProvider.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/jdbc/TestJDBCResourceProvider.java
deleted file mode 100644
index 1e69778..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/jdbc/TestJDBCResourceProvider.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.jdbc;
-
-import org.apache.ambari.server.controller.spi.Resource;
-
-import java.util.Map;
-import java.util.Set;
-
-/**
- *
- */
-public class TestJDBCResourceProvider extends JDBCResourceProvider{
-  public TestJDBCResourceProvider(ConnectionFactory connectionFactory, Resource.Type type, Set<String> propertyIds, Map<Resource.Type, String> keyPropertyIds) {
-    super(connectionFactory, type, propertyIds, keyPropertyIds);
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXPropertyProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXPropertyProviderTest.java
deleted file mode 100644
index a6cf41e..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/JMXPropertyProviderTest.java
+++ /dev/null
@@ -1,277 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.jmx;
-
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.SystemException;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-
-
-/**
- * JMX property provider tests.
- */
-public class JMXPropertyProviderTest {
-  protected static final String HOST_COMPONENT_HOST_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "host_name");
-  protected static final String HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "component_name");
-
-  @Test
-  public void testGetResources() throws Exception {
-    TestStreamProvider  streamProvider = new TestStreamProvider();
-    TestJMXHostProvider hostProvider = new TestJMXHostProvider(false);
-
-    JMXPropertyProvider propertyProvider = new JMXPropertyProvider(
-        PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent),
-        streamProvider,
-        hostProvider, PropertyHelper.getPropertyId("HostRoles", "cluster_name"), PropertyHelper.getPropertyId("HostRoles", "host_name"), PropertyHelper.getPropertyId("HostRoles", "component_name"));
-
-    // namenode
-    Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-
-    resource.setProperty(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, "domu-12-31-39-0e-34-e1.compute-1.internal");
-    resource.setProperty(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, "NAMENODE");
-
-    // request with an empty set should get all supported properties
-    Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet());
-
-    Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
-
-    Assert.assertEquals(propertyProvider.getSpec("domu-12-31-39-0e-34-e1.compute-1.internal", "50070"), streamProvider.getLastSpec());
-
-    // see test/resources/hdfs_namenode_jmx.json for values
-    Assert.assertEquals(13670605,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/rpc", "ReceivedBytes")));
-    Assert.assertEquals(28,      resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/dfs/namenode", "CreateFileOps")));
-    Assert.assertEquals(1006632960, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryMax")));
-    Assert.assertEquals(473433016, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryUsed")));
-    Assert.assertEquals(136314880, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryMax")));
-    Assert.assertEquals(23634400, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryUsed")));
-
-
-    // datanode
-    resource = new ResourceImpl(Resource.Type.HostComponent);
-
-    resource.setProperty(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, "domu-12-31-39-14-ee-b3.compute-1.internal");
-    resource.setProperty(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, "DATANODE");
-
-    // request with an empty set should get all supported properties
-    request = PropertyHelper.getReadRequest(Collections.<String>emptySet());
-
-    propertyProvider.populateResources(Collections.singleton(resource), request, null);
-
-    Assert.assertEquals(propertyProvider.getSpec("domu-12-31-39-14-ee-b3.compute-1.internal", "50075"), streamProvider.getLastSpec());
-
-    // see test/resources/hdfs_datanode_jmx.json for values
-    Assert.assertEquals(856,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/rpc", "ReceivedBytes")));
-    Assert.assertEquals(954466304, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryMax")));
-    Assert.assertEquals(9772616, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryUsed")));
-    Assert.assertEquals(136314880, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryMax")));
-    Assert.assertEquals(21933376, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryUsed")));
-
-
-    // jobtracker
-    resource = new ResourceImpl(Resource.Type.HostComponent);
-
-    resource.setProperty(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, "domu-12-31-39-14-ee-b3.compute-1.internal");
-    resource.setProperty(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, "JOBTRACKER");
-
-    // only ask for specific properties
-    Set<String> properties = new HashSet<String>();
-    properties.add(PropertyHelper.getPropertyId("metrics/jvm", "threadsWaiting"));
-    properties.add(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryMax"));
-    properties.add(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryUsed"));
-    properties.add(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryMax"));
-    properties.add(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryUsed"));
-    properties.add(PropertyHelper.getPropertyId("metrics/mapred/jobtracker", "jobs_submitted"));
-    properties.add(PropertyHelper.getPropertyId("metrics/mapred/jobtracker", "jobs_completed"));
-    properties.add(PropertyHelper.getPropertyId("metrics/mapred/jobtracker", "jobs_failed"));
-
-    request = PropertyHelper.getReadRequest(properties);
-
-    propertyProvider.populateResources(Collections.singleton(resource), request, null);
-
-    Assert.assertEquals(propertyProvider.getSpec("domu-12-31-39-14-ee-b3.compute-1.internal", "50030"), streamProvider.getLastSpec());
-
-    // see test/resources/mapreduce_jobtracker_jmx.json for values
-    Assert.assertEquals(10, PropertyHelper.getProperties(resource).size());
-    Assert.assertEquals(59, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "threadsWaiting")));
-    Assert.assertEquals(1052770304, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryMax")));
-    Assert.assertEquals(43580400, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryUsed")));
-    Assert.assertEquals(136314880, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryMax")));
-    Assert.assertEquals(29602888, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryUsed")));
-    Assert.assertEquals(2, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/mapred/jobtracker", "jobs_submitted")));
-    Assert.assertEquals(1, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/mapred/jobtracker", "jobs_completed")));
-    Assert.assertEquals(1, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/mapred/jobtracker", "jobs_failed")));
-
-    Assert.assertNull(resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "gcCount")));
-
-    // tasktracker
-    resource = new ResourceImpl(Resource.Type.HostComponent);
-
-    resource.setProperty(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, "domu-12-31-39-14-ee-b3.compute-1.internal");
-    resource.setProperty(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, "TASKTRACKER");
-
-    // only ask for specific properties
-    properties = new HashSet<String>();
-    properties.add(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryMax"));
-    properties.add(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryUsed"));
-    properties.add(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryMax"));
-    properties.add(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryUsed"));
-    properties.add(PropertyHelper.getPropertyId("metrics/mapred/shuffleOutput", "shuffle_exceptions_caught"));
-    properties.add(PropertyHelper.getPropertyId("metrics/mapred/shuffleOutput", "shuffle_failed_outputs"));
-    properties.add(PropertyHelper.getPropertyId("metrics/mapred/shuffleOutput", "shuffle_output_bytes"));
-    properties.add(PropertyHelper.getPropertyId("metrics/mapred/shuffleOutput", "shuffle_success_outputs"));
-    properties.add(PropertyHelper.getPropertyId("metrics/mapred/tasktracker", "maps_running"));
-    properties.add(PropertyHelper.getPropertyId("metrics/mapred/tasktracker", "reduces_running"));
-    properties.add(PropertyHelper.getPropertyId("metrics/mapred/tasktracker", "mapTaskSlots"));
-    properties.add(PropertyHelper.getPropertyId("metrics/mapred/tasktracker", "reduceTaskSlots"));
-    properties.add(PropertyHelper.getPropertyId("metrics/mapred/tasktracker", "failedDirs"));
-    properties.add(PropertyHelper.getPropertyId("metrics/mapred/tasktracker", "tasks_completed"));
-    properties.add(PropertyHelper.getPropertyId("metrics/mapred/tasktracker", "tasks_failed_timeout"));
-    properties.add(PropertyHelper.getPropertyId("metrics/mapred/tasktracker", "tasks_failed_ping"));
-
-    request = PropertyHelper.getReadRequest(properties);
-
-    propertyProvider.populateResources(Collections.singleton(resource), request, null);
-
-    Assert.assertEquals(propertyProvider.getSpec("domu-12-31-39-14-ee-b3.compute-1.internal", "50060"), streamProvider.getLastSpec());
-
-    Assert.assertEquals(18, PropertyHelper.getProperties(resource).size());
-    Assert.assertEquals(954466304, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryMax")));
-    Assert.assertEquals(18330984, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryUsed")));
-    Assert.assertEquals(136314880, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryMax")));
-    Assert.assertEquals(24235104, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryUsed")));
-    Assert.assertEquals(0, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/mapred/shuffleOutput", "shuffle_exceptions_caught")));
-    Assert.assertEquals(0, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/mapred/shuffleOutput", "shuffle_failed_outputs")));
-    Assert.assertEquals(1841, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/mapred/shuffleOutput", "shuffle_output_bytes")));
-    Assert.assertEquals(1, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/mapred/shuffleOutput", "shuffle_success_outputs")));
-    Assert.assertEquals(1, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/mapred/tasktracker", "maps_running")));
-    Assert.assertEquals(1, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/mapred/tasktracker", "reduces_running")));
-    Assert.assertEquals(4, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/mapred/tasktracker", "mapTaskSlots")));
-    Assert.assertEquals(2, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/mapred/tasktracker", "reduceTaskSlots")));
-    Assert.assertEquals(1, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/mapred/tasktracker", "failedDirs")));
-    Assert.assertEquals(4, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/mapred/tasktracker", "tasks_completed")));
-    Assert.assertEquals(1, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/mapred/tasktracker", "tasks_failed_timeout")));
-    Assert.assertEquals(1, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/mapred/tasktracker", "tasks_failed_ping")));
-
-
-    Assert.assertNull(resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "gcCount")));
-
-    // hbase master
-    resource = new ResourceImpl(Resource.Type.HostComponent);
-
-    resource.setProperty(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, "domu-12-31-39-14-ee-b3.compute-1.internal");
-    resource.setProperty(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, "HBASE_MASTER");
-
-    // only ask for specific properties
-    properties = new HashSet<String>();
-    properties.add(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryMax"));
-    properties.add(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryUsed"));
-    properties.add(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryMax"));
-    properties.add(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryUsed"));
-    properties.add(PropertyHelper.getPropertyId("metrics/load", "AverageLoad"));
-    request = PropertyHelper.getReadRequest(properties);
-
-    propertyProvider.populateResources(Collections.singleton(resource), request, null);
-
-    Assert.assertEquals(propertyProvider.getSpec("domu-12-31-39-14-ee-b3.compute-1.internal", "60010"), streamProvider.getLastSpec());
-
-    Assert.assertEquals(7, PropertyHelper.getProperties(resource).size());
-    Assert.assertEquals(1069416448, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryMax")));
-    Assert.assertEquals(4806976, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryUsed")));
-    Assert.assertEquals(136314880, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryMax")));
-    Assert.assertEquals(28971240, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryUsed")));
-    Assert.assertEquals(3.0, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/load", "AverageLoad")));
-
-    Assert.assertNull(resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "gcCount")));
-  }
-
-  @Test
-  public void testGetResourcesWithUnknownPort() throws Exception {
-    TestStreamProvider  streamProvider = new TestStreamProvider();
-    TestJMXHostProvider hostProvider = new TestJMXHostProvider(true);
-
-    JMXPropertyProvider propertyProvider = new JMXPropertyProvider(
-        PropertyHelper.getJMXPropertyIds(Resource.Type.HostComponent),
-        streamProvider,
-        hostProvider, PropertyHelper.getPropertyId("HostRoles", "cluster_name"), PropertyHelper.getPropertyId("HostRoles", "host_name"), PropertyHelper.getPropertyId("HostRoles", "component_name"));
-
-    // namenode
-    Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-
-    resource.setProperty(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, "domu-12-31-39-0e-34-e1.compute-1.internal");
-    resource.setProperty(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, "NAMENODE");
-
-    // request with an empty set should get all supported properties
-    Request request = PropertyHelper.getReadRequest(Collections.<String>emptySet());
-
-    Assert.assertEquals(1, propertyProvider.populateResources(Collections.singleton(resource), request, null).size());
-
-    Assert.assertEquals(propertyProvider.getSpec("domu-12-31-39-0e-34-e1.compute-1.internal", "50070"), streamProvider.getLastSpec());
-
-    // see test/resources/hdfs_namenode_jmx.json for values
-    Assert.assertEquals(13670605,  resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/rpc", "ReceivedBytes")));
-    Assert.assertEquals(28,      resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/dfs/namenode", "CreateFileOps")));
-    Assert.assertEquals(1006632960, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryMax")));
-    Assert.assertEquals(473433016, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "HeapMemoryUsed")));
-    Assert.assertEquals(136314880, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryMax")));
-    Assert.assertEquals(23634400, resource.getPropertyValue(PropertyHelper.getPropertyId("metrics/jvm", "NonHeapMemoryUsed")));
-  }
-
-  private static class TestJMXHostProvider implements JMXHostProvider {
-    private final boolean unknownPort;
-
-    private TestJMXHostProvider(boolean unknownPort) {
-      this.unknownPort = unknownPort;
-    }
-
-    @Override
-    public String getHostName(String clusterName, String componentName) {
-      return null;
-    }
-
-    @Override
-    public String getPort(String clusterName, String componentName) throws
-      SystemException {
-
-      if (unknownPort) {
-        return null;
-      }
-      if (componentName.equals("NAMENODE"))
-        return "50070";
-      else if (componentName.equals("DATANODE"))
-        return "50075";
-      else if (componentName.equals("JOBTRACKER"))
-        return "50030";
-      else if (componentName.equals("TASKTRACKER"))
-        return "50060";
-      else if (componentName.equals("HBASE_MASTER"))
-        return "60010";
-      else
-        return null;
-    }
-
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/TestStreamProvider.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/TestStreamProvider.java
deleted file mode 100644
index 634d8b4..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/jmx/TestStreamProvider.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.jmx;
-
-import org.apache.ambari.server.controller.utilities.StreamProvider;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.HashMap;
-import java.util.Map;
-
-public class TestStreamProvider implements StreamProvider {
-
-  protected static Map<String, String> FILE_MAPPING = new HashMap<String, String>();
-
-  static {
-    FILE_MAPPING.put("50070", "hdfs_namenode_jmx.json");
-    FILE_MAPPING.put("50075", "hdfs_datanode_jmx.json");
-    FILE_MAPPING.put("50030", "mapreduce_jobtracker_jmx.json");
-    FILE_MAPPING.put("50060", "mapreduce_tasktracker_jmx.json");
-    FILE_MAPPING.put("60010", "hbase_hbasemaster_jmx.json");
-  }
-
-  private String lastSpec;
-
-  @Override
-  public InputStream readFrom(String spec) throws IOException {
-    lastSpec = spec;
-    String filename = FILE_MAPPING.get(getPort(spec));
-    if (filename == null) {
-      throw new IOException("Can't find JMX source for " + spec);
-    }
-    return ClassLoader.getSystemResourceAsStream(filename);
-  }
-
-  public String getLastSpec() {
-    return lastSpec;
-  }
-
-  private String getPort(String spec) {
-    int n = spec.indexOf(":", 5);
-    return spec.substring(n + 1, n + 6);
-  }
-
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/AndPredicateTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/AndPredicateTest.java
deleted file mode 100644
index d232aa4..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/AndPredicateTest.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.Set;
-
-/**
- *
- */
-public class AndPredicateTest {
-
-  @Test
-  public void testApply() {
-    Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-    String propertyId1 = PropertyHelper.getPropertyId("category1", "property1");
-    String propertyId2 = PropertyHelper.getPropertyId("category1", "property2");
-    String propertyId3 = PropertyHelper.getPropertyId("category1", "property3");
-
-    EqualsPredicate predicate1 = new EqualsPredicate<String>(propertyId1, "v1");
-    EqualsPredicate predicate2 = new EqualsPredicate<String>(propertyId2, "v2");
-    EqualsPredicate predicate3 = new EqualsPredicate<String>(propertyId3, "v3");
-
-    AndPredicate andPredicate = new AndPredicate(predicate1, predicate2, predicate3);
-
-    resource.setProperty(propertyId1, "v1");
-    resource.setProperty(propertyId2, "monkey");
-    resource.setProperty(propertyId3, "v3");
-    Assert.assertFalse(andPredicate.evaluate(resource));
-
-    resource.setProperty(propertyId2, "v2");
-    Assert.assertTrue(andPredicate.evaluate(resource));
-  }
-
-  @Test
-  public void testGetProperties() {
-    String propertyId1 = PropertyHelper.getPropertyId("category1", "property1");
-    String propertyId2 = PropertyHelper.getPropertyId("category1", "property2");
-    String propertyId3 = PropertyHelper.getPropertyId("category1", "property3");
-
-    EqualsPredicate predicate1 = new EqualsPredicate<String>(propertyId1, "v1");
-    EqualsPredicate predicate2 = new EqualsPredicate<String>(propertyId2, "v2");
-    EqualsPredicate predicate3 = new EqualsPredicate<String>(propertyId3, "v3");
-
-    AndPredicate andPredicate = new AndPredicate(predicate1, predicate2, predicate3);
-
-    Set<String> ids = andPredicate.getPropertyIds();
-
-    Assert.assertEquals(3, ids.size());
-    Assert.assertTrue(ids.contains(propertyId1));
-    Assert.assertTrue(ids.contains(propertyId2));
-    Assert.assertTrue(ids.contains(propertyId3));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/CategoryIsEmptyPredicateTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/CategoryIsEmptyPredicateTest.java
deleted file mode 100644
index 56999c9..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/CategoryIsEmptyPredicateTest.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.predicate;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Tests for CategoryIsEmptyPredicate.
- */
-public class CategoryIsEmptyPredicateTest {
-
-  @Test
-  public void testApply() {
-    Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-    String categoryId = PropertyHelper.getPropertyId("category1", null);
-    Predicate predicate = new CategoryIsEmptyPredicate(categoryId);
-
-    Assert.assertTrue(predicate.evaluate(resource));
-
-    resource.addCategory(categoryId);
-    Assert.assertTrue(predicate.evaluate(resource));
-
-    String propertyId = PropertyHelper.getPropertyId("category1", "bar");
-    resource.setProperty(propertyId, "value1");
-    Assert.assertFalse(predicate.evaluate(resource));
-  }
-
-  @Test
-  public void testApplyWithMap() {
-    Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-    String propertyId = PropertyHelper.getPropertyId("category1", "mapProperty");
-    Predicate predicate = new CategoryIsEmptyPredicate(propertyId);
-
-    Assert.assertTrue(predicate.evaluate(resource));
-
-    Map<String, String> mapProperty = new HashMap<String, String>();
-
-    resource.setProperty(propertyId, mapProperty);
-    Assert.assertTrue(predicate.evaluate(resource));
-
-    mapProperty.put("foo", "bar");
-
-    Assert.assertFalse(predicate.evaluate(resource));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/CategoryPredicateTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/CategoryPredicateTest.java
deleted file mode 100644
index 7774e2d..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/CategoryPredicateTest.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-/**
- * Tests for bas category predicate.
- */
-public class CategoryPredicateTest {
-  @Test
-  public void testAccept() {
-    String propertyId = PropertyHelper.getPropertyId("category1", "foo");
-    TestCategoryPredicate predicate = new TestCategoryPredicate(propertyId);
-
-    TestPredicateVisitor visitor = new TestPredicateVisitor();
-    predicate.accept(visitor);
-
-    Assert.assertSame(predicate, visitor.visitedCategoryPredicate);
-  }
-
-  public static class TestCategoryPredicate extends CategoryPredicate {
-
-    public TestCategoryPredicate(String propertyId) {
-      super(propertyId);
-    }
-
-    @Override
-    public boolean evaluate(Resource resource) {
-      return false;
-    }
-  }
-
-  public static class TestPredicateVisitor implements PredicateVisitor {
-
-    CategoryPredicate visitedCategoryPredicate = null;
-
-    @Override
-    public void acceptComparisonPredicate(ComparisonPredicate predicate) {
-    }
-
-    @Override
-    public void acceptArrayPredicate(ArrayPredicate predicate) {
-    }
-
-    @Override
-    public void acceptUnaryPredicate(UnaryPredicate predicate) {
-    }
-
-    @Override
-    public void acceptAlwaysPredicate(AlwaysPredicate predicate) {
-    }
-
-    @Override
-    public void acceptCategoryPredicate(CategoryPredicate predicate) {
-      visitedCategoryPredicate = predicate;
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/EqualsPredicateTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/EqualsPredicateTest.java
deleted file mode 100644
index bd3f1f7..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/EqualsPredicateTest.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.Set;
-
-/**
- *
- */
-public class EqualsPredicateTest {
-
-  @Test
-  public void testApply() {
-    Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-    String propertyId = PropertyHelper.getPropertyId("category1", "foo");
-    Predicate predicate = new EqualsPredicate<String>(propertyId, "bar");
-
-    resource.setProperty(propertyId, "monkey");
-    Assert.assertFalse(predicate.evaluate(resource));
-
-    resource.setProperty(propertyId, "bar");
-    Assert.assertTrue(predicate.evaluate(resource));
-
-
-    propertyId = PropertyHelper.getPropertyId("category1", "fun");
-    predicate = new EqualsPredicate<String>(propertyId, "bar");
-
-    Assert.assertFalse(predicate.evaluate(resource));
-  }
-
-  @Test
-  public void testGetProperties() {
-    String propertyId = PropertyHelper.getPropertyId("category1", "foo");
-    EqualsPredicate predicate = new EqualsPredicate<String>(propertyId, "bar");
-
-    Set<String> ids = predicate.getPropertyIds();
-
-    Assert.assertEquals(1, ids.size());
-    Assert.assertTrue(ids.contains(propertyId));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/GreaterEqualsPredicateTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/GreaterEqualsPredicateTest.java
deleted file mode 100644
index 59a396e..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/GreaterEqualsPredicateTest.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.Set;
-
-/**
- *
- */
-public class GreaterEqualsPredicateTest {
-
-  @Test
-  public void testApply() {
-    Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-    String propertyId = PropertyHelper.getPropertyId("category1", "foo");
-    Predicate predicate = new GreaterEqualsPredicate<Integer>(propertyId, 10);
-
-    resource.setProperty(propertyId, 1);
-    Assert.assertFalse(predicate.evaluate(resource));
-
-    resource.setProperty(propertyId, 100);
-    Assert.assertTrue(predicate.evaluate(resource));
-
-    resource.setProperty(propertyId, 10);
-    Assert.assertTrue(predicate.evaluate(resource));
-  }
-
-  @Test
-  public void testGetProperties() {
-    String propertyId = PropertyHelper.getPropertyId("category1", "foo");
-    GreaterEqualsPredicate predicate = new GreaterEqualsPredicate<Integer>(propertyId, 10);
-
-    Set<String> ids = predicate.getPropertyIds();
-
-    Assert.assertEquals(1, ids.size());
-    Assert.assertTrue(ids.contains(propertyId));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/GreaterPredicateTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/GreaterPredicateTest.java
deleted file mode 100644
index a24bd48..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/GreaterPredicateTest.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.Set;
-
-/**
- *
- */
-public class GreaterPredicateTest {
-
-  @Test
-  public void testApply() {
-    Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-    String propertyId = PropertyHelper.getPropertyId("category1", "foo");
-    Predicate predicate = new GreaterPredicate<Integer>(propertyId, 10);
-
-    resource.setProperty(propertyId, 1);
-    Assert.assertFalse(predicate.evaluate(resource));
-
-    resource.setProperty(propertyId, 100);
-    Assert.assertTrue(predicate.evaluate(resource));
-
-    resource.setProperty(propertyId, 10);
-    Assert.assertFalse(predicate.evaluate(resource));
-  }
-
-  @Test
-  public void testGetProperties() {
-    String propertyId = PropertyHelper.getPropertyId("category1", "foo");
-    GreaterPredicate predicate = new GreaterPredicate<Integer>(propertyId, 10);
-
-    Set<String> ids = predicate.getPropertyIds();
-
-    Assert.assertEquals(1, ids.size());
-    Assert.assertTrue(ids.contains(propertyId));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/LessEqualsPredicateTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/LessEqualsPredicateTest.java
deleted file mode 100644
index 1791c4c..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/LessEqualsPredicateTest.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.Set;
-
-/**
- *
- */
-public class LessEqualsPredicateTest {
-
-  @Test
-  public void testApply() {
-    Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-    String propertyId = PropertyHelper.getPropertyId("category1", "foo");
-    Predicate predicate = new LessEqualsPredicate<Integer>(propertyId, 10);
-
-    resource.setProperty(propertyId, 1);
-    Assert.assertTrue(predicate.evaluate(resource));
-
-    resource.setProperty(propertyId, 100);
-    Assert.assertFalse(predicate.evaluate(resource));
-
-    resource.setProperty(propertyId, 10);
-    Assert.assertTrue(predicate.evaluate(resource));
-  }
-
-  @Test
-  public void testGetProperties() {
-    String propertyId = PropertyHelper.getPropertyId("category1", "foo");
-    LessEqualsPredicate predicate = new LessEqualsPredicate<Integer>(propertyId, 10);
-
-    Set<String> ids = predicate.getPropertyIds();
-
-    Assert.assertEquals(1, ids.size());
-    Assert.assertTrue(ids.contains(propertyId));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/LessPredicateTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/LessPredicateTest.java
deleted file mode 100644
index 00991cb..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/LessPredicateTest.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.Set;
-
-/**
- *
- */
-public class LessPredicateTest {
-
-  @Test
-  public void testApply() {
-    Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-    String propertyId = PropertyHelper.getPropertyId("category1", "foo");
-    Predicate predicate = new LessPredicate<Integer>(propertyId, 10);
-
-    resource.setProperty(propertyId, 1);
-    Assert.assertTrue(predicate.evaluate(resource));
-
-    resource.setProperty(propertyId, 100);
-    Assert.assertFalse(predicate.evaluate(resource));
-
-    resource.setProperty(propertyId, 10);
-    Assert.assertFalse(predicate.evaluate(resource));
-  }
-
-  @Test
-  public void testGetProperties() {
-    String propertyId = PropertyHelper.getPropertyId("category1", "foo");
-    LessPredicate predicate = new LessPredicate<Integer>(propertyId, 1);
-
-    Set<String> ids = predicate.getPropertyIds();
-
-    Assert.assertEquals(1, ids.size());
-    Assert.assertTrue(ids.contains(propertyId));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/NotPredicateTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/NotPredicateTest.java
deleted file mode 100644
index 62bb606..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/NotPredicateTest.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.Set;
-
-/**
- *
- */
-public class NotPredicateTest {
-
-  @Test
-  public void testApply() {
-    Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-    String propertyId = PropertyHelper.getPropertyId("category1", "foo");
-    EqualsPredicate predicate = new EqualsPredicate<String>(propertyId, "bar");
-    NotPredicate notPredicate = new NotPredicate(predicate);
-
-    resource.setProperty(propertyId, "monkey");
-    Assert.assertTrue(notPredicate.evaluate(resource));
-
-    resource.setProperty(propertyId, "bar");
-    Assert.assertFalse(notPredicate.evaluate(resource));
-  }
-
-  @Test
-  public void testGetProperties() {
-    String propertyId = PropertyHelper.getPropertyId("category1", "foo");
-    EqualsPredicate predicate = new EqualsPredicate<String>(propertyId, "bar");
-
-    Set<String> ids = predicate.getPropertyIds();
-
-    Assert.assertEquals(1, ids.size());
-    Assert.assertTrue(ids.contains(propertyId));
-  }
-
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/OrPredicateTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/OrPredicateTest.java
deleted file mode 100644
index 138ed21..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/OrPredicateTest.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-import java.util.Set;
-
-/**
- *
- */
-public class OrPredicateTest {
-
-  @Test
-  public void testApply() {
-    Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-    String propertyId1 = PropertyHelper.getPropertyId("category1", "property1");
-    String propertyId2 = PropertyHelper.getPropertyId("category1", "property2");
-    String propertyId3 = PropertyHelper.getPropertyId("category1", "property3");
-
-    EqualsPredicate predicate1 = new EqualsPredicate<String>(propertyId1, "v1");
-    EqualsPredicate predicate2 = new EqualsPredicate<String>(propertyId2, "v2");
-    EqualsPredicate predicate3 = new EqualsPredicate<String>(propertyId3, "v3");
-
-    OrPredicate orPredicate = new OrPredicate(predicate1, predicate2, predicate3);
-
-    resource.setProperty(propertyId1, "big");
-    resource.setProperty(propertyId2, "monkey");
-    resource.setProperty(propertyId3, "runner");
-    Assert.assertFalse(orPredicate.evaluate(resource));
-
-    resource.setProperty(propertyId2, "v2");
-    Assert.assertTrue(orPredicate.evaluate(resource));
-  }
-
-  @Test
-  public void testGetProperties() {
-    String propertyId1 = PropertyHelper.getPropertyId("category1", "property1");
-    String propertyId2 = PropertyHelper.getPropertyId("category1", "property2");
-    String propertyId3 = PropertyHelper.getPropertyId("category1", "property3");
-
-    EqualsPredicate predicate1 = new EqualsPredicate<String>(propertyId1, "v1");
-    EqualsPredicate predicate2 = new EqualsPredicate<String>(propertyId2, "v2");
-    EqualsPredicate predicate3 = new EqualsPredicate<String>(propertyId3, "v3");
-
-    OrPredicate orPredicate = new OrPredicate(predicate1, predicate2, predicate3);
-
-    Set<String> ids = orPredicate.getPropertyIds();
-
-    Assert.assertEquals(3, ids.size());
-    Assert.assertTrue(ids.contains(propertyId1));
-    Assert.assertTrue(ids.contains(propertyId2));
-    Assert.assertTrue(ids.contains(propertyId3));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/PredicateVisitorTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/PredicateVisitorTest.java
deleted file mode 100644
index 503306c..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/predicate/PredicateVisitorTest.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.predicate;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.junit.Test;
-
-/**
- *  Tests for predicate visitors.
- */
-public class PredicateVisitorTest {
-
-  @Test
-  public void testVisitor() {
-
-    String propertyId = PropertyHelper.getPropertyId("category1", "foo");
-    EqualsPredicate equalsPredicate = new EqualsPredicate<String>(propertyId, "bar");
-
-    TestPredicateVisitor visitor = new TestPredicateVisitor();
-    equalsPredicate.accept(visitor);
-
-    Assert.assertSame(equalsPredicate, visitor.visitedComparisonPredicate);
-    Assert.assertNull(visitor.visitedArrayPredicate);
-    Assert.assertNull(visitor.visitedUnaryPredicate);
-
-    AndPredicate andPredicate = new AndPredicate(equalsPredicate);
-
-    visitor = new TestPredicateVisitor();
-    andPredicate.accept(visitor);
-
-    Assert.assertNull(visitor.visitedComparisonPredicate);
-    Assert.assertSame(andPredicate, visitor.visitedArrayPredicate);
-    Assert.assertNull(visitor.visitedUnaryPredicate);
-
-    NotPredicate notPredicate = new NotPredicate(andPredicate);
-
-    visitor = new TestPredicateVisitor();
-    notPredicate.accept(visitor);
-
-    Assert.assertNull(visitor.visitedComparisonPredicate);
-    Assert.assertNull(visitor.visitedArrayPredicate);
-    Assert.assertSame(notPredicate, visitor.visitedUnaryPredicate);
-
-
-    CategoryPredicate categoryPredicate = new CategoryIsEmptyPredicate("cat1");
-
-    visitor = new TestPredicateVisitor();
-    categoryPredicate.accept(visitor);
-
-    Assert.assertNull(visitor.visitedComparisonPredicate);
-    Assert.assertNull(visitor.visitedArrayPredicate);
-    Assert.assertNull(visitor.visitedUnaryPredicate);
-    Assert.assertSame(categoryPredicate, visitor.visitedCategoryPredicate);
-  }
-
-  public static class TestPredicateVisitor implements PredicateVisitor {
-
-    ComparisonPredicate visitedComparisonPredicate = null;
-    ArrayPredicate visitedArrayPredicate = null;
-    UnaryPredicate visitedUnaryPredicate = null;
-    AlwaysPredicate visitedAlwaysPredicate = null;
-    CategoryPredicate visitedCategoryPredicate = null;
-
-    @Override
-    public void acceptComparisonPredicate(ComparisonPredicate predicate) {
-      visitedComparisonPredicate = predicate;
-    }
-
-    @Override
-    public void acceptArrayPredicate(ArrayPredicate predicate) {
-      visitedArrayPredicate = predicate;
-    }
-
-    @Override
-    public void acceptUnaryPredicate(UnaryPredicate predicate) {
-      visitedUnaryPredicate = predicate;
-    }
-
-    @Override
-    public void acceptAlwaysPredicate(AlwaysPredicate predicate) {
-      visitedAlwaysPredicate = predicate;
-    }
-
-    @Override
-    public void acceptCategoryPredicate(CategoryPredicate predicate) {
-      visitedCategoryPredicate = predicate;
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/PredicateBuilderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/PredicateBuilderTest.java
deleted file mode 100644
index 5c43adc..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/PredicateBuilderTest.java
+++ /dev/null
@@ -1,804 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.utilities;
-
-import junit.framework.Assert;
-import org.apache.ambari.server.controller.internal.ResourceImpl;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.junit.Test;
-
-/**
- *
- */
-public class PredicateBuilderTest {
-
-  @Test
-  public void testSimple() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, "foo");
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).equals("foo").toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).equals("bar").toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testSimpleNot() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, "foo");
-
-    /*  ! p1 == "foo" */
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.not().property(p1).equals("foo").toPredicate();
-
-    Assert.assertFalse(predicate1.evaluate(resource));
-
-    /*  ! p1 == "bar" */
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.not().property(p1).equals("bar").toPredicate();
-
-    Assert.assertTrue(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testDone() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, "foo");
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate = pb.property(p1).equals("foo").toPredicate();
-
-    // can't reuse a builder after toPredicate is called.
-    try {
-      pb.property(p1).equals("foo").toPredicate();
-      Assert.fail("Expected IllegalStateException.");
-    } catch (IllegalStateException e) {
-      // expected
-    }
-
-    Assert.assertSame(predicate, pb.toPredicate());
-  }
-
-  @Test
-  public void testSimpleAnd() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-    String p2 = PropertyHelper.getPropertyId("cat1", "prop2");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, "foo");
-    resource.setProperty(p2, "bar");
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).equals("foo").and().property(p2).equals("bar").toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).equals("foo").and().property(p2).equals("car").toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testSimpleAndNot() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-    String p2 = PropertyHelper.getPropertyId("cat1", "prop2");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, "foo");
-    resource.setProperty(p2, "bar");
-
-    /* p1 == foo and !p2 == bar */
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).equals("foo").and().not().property(p2).equals("bar").toPredicate();
-
-    Assert.assertFalse(predicate1.evaluate(resource));
-
-    /* p1 == foo and !p2 == car */
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).equals("foo").and().not().property(p2).equals("car").toPredicate();
-
-    Assert.assertTrue(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testLongAnd() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-    String p2 = PropertyHelper.getPropertyId("cat1", "prop2");
-    String p3 = PropertyHelper.getPropertyId("cat1", "prop3");
-    String p4 = PropertyHelper.getPropertyId("cat1", "prop4");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, "foo");
-    resource.setProperty(p2, "bar");
-    resource.setProperty(p3, "cat");
-    resource.setProperty(p4, "dog");
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).equals("foo").and().property(p2).equals("bar").and().property(p3).equals("cat").and().property(p4).equals("dog").toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).equals("foo").and().property(p2).equals("bar").and().property(p3).equals("cat").and().property(p4).equals("dot").toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testSimpleOr() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-    String p2 = PropertyHelper.getPropertyId("cat1", "prop2");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, "foo");
-    resource.setProperty(p2, "bar");
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).equals("foo").or().property(p2).equals("bar").toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).equals("foo").or().property(p2).equals("car").toPredicate();
-
-    Assert.assertTrue(predicate2.evaluate(resource));
-
-    PredicateBuilder pb3 = new PredicateBuilder();
-    Predicate predicate3 = pb3.property(p1).equals("fun").or().property(p2).equals("car").toPredicate();
-
-    Assert.assertFalse(predicate3.evaluate(resource));
-  }
-
-  @Test
-  public void testLongOr() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-    String p2 = PropertyHelper.getPropertyId("cat1", "prop2");
-    String p3 = PropertyHelper.getPropertyId("cat1", "prop3");
-    String p4 = PropertyHelper.getPropertyId("cat1", "prop4");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, "foo");
-    resource.setProperty(p2, "bar");
-    resource.setProperty(p3, "cat");
-    resource.setProperty(p4, "dog");
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).equals("foo").or().property(p2).equals("bar").or().property(p3).equals("cat").or().property(p4).equals("dog").toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).equals("foo").or().property(p2).equals("car").or().property(p3).equals("cat").or().property(p4).equals("dog").toPredicate();
-
-    Assert.assertTrue(predicate2.evaluate(resource));
-
-    PredicateBuilder pb3 = new PredicateBuilder();
-    Predicate predicate3 = pb3.property(p1).equals("fun").or().property(p2).equals("car").or().property(p3).equals("bat").or().property(p4).equals("dot").toPredicate();
-
-    Assert.assertFalse(predicate3.evaluate(resource));
-  }
-
-  @Test
-  public void testAndOr() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-    String p2 = PropertyHelper.getPropertyId("cat1", "prop2");
-    String p3 = PropertyHelper.getPropertyId("cat1", "prop3");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, "foo");
-    resource.setProperty(p2, "bar");
-    resource.setProperty(p3, "cat");
-
-    PredicateBuilder pb1 = new PredicateBuilder();
-    Predicate predicate1 = pb1.property(p1).equals("foo").and().property(p2).equals("bar").or().property(p3).equals("cat").toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).equals("foo").and().property(p2).equals("car").or().property(p3).equals("cat").toPredicate();
-
-    Assert.assertTrue(predicate2.evaluate(resource));
-
-
-    PredicateBuilder pb3 = new PredicateBuilder();
-    Predicate predicate3 = pb3.property(p1).equals("foo").and().property(p2).equals("bar").or().property(p3).equals("can").toPredicate();
-
-    Assert.assertTrue(predicate3.evaluate(resource));
-
-
-    PredicateBuilder pb4 = new PredicateBuilder();
-    Predicate predicate4 = pb4.property(p1).equals("foo").and().property(p2).equals("bat").or().property(p3).equals("can").toPredicate();
-
-    Assert.assertFalse(predicate4.evaluate(resource));
-  }
-
-
-  @Test
-  public void testBlocks() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-    String p2 = PropertyHelper.getPropertyId("cat1", "prop2");
-    String p3 = PropertyHelper.getPropertyId("cat1", "prop3");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, "foo");
-    resource.setProperty(p2, "bar");
-    resource.setProperty(p3, "cat");
-
-
-    /*   (p1==foo && p2==bar) || p3 == cat   */
-    PredicateBuilder pb1 = new PredicateBuilder();
-    Predicate predicate1 = pb1.begin().property(p1).equals("foo").and().property(p2).equals("bar").end().or().property(p3).equals("cat").toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    /*   (p1==foo && p2==bat) || p3 == cat   */
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.begin().property(p1).equals("foo").and().property(p2).equals("bat").end().or().property(p3).equals("cat").toPredicate();
-
-    Assert.assertTrue(predicate2.evaluate(resource));
-
-    /*   (p1==foo && p2==bar) || p3 == can   */
-    PredicateBuilder pb3 = new PredicateBuilder();
-    Predicate predicate3 = pb3.begin().property(p1).equals("foo").and().property(p2).equals("bar").end().or().property(p3).equals("can").toPredicate();
-
-    Assert.assertTrue(predicate3.evaluate(resource));
-
-    /*   (p1==foo && p2==bat) || p3 == can   */
-    PredicateBuilder pb4 = new PredicateBuilder();
-    Predicate predicate4 = pb4.begin().property(p1).equals("foo").and().property(p2).equals("bat").end().or().property(p3).equals("can").toPredicate();
-
-    Assert.assertFalse(predicate4.evaluate(resource));
-
-
-    /*   p1==foo && (p2==bar || p3 == cat)   */
-    PredicateBuilder pb5 = new PredicateBuilder();
-    Predicate predicate5 = pb5.property(p1).equals("foo").and().begin().property(p2).equals("bar").or().property(p3).equals("cat").end().toPredicate();
-
-    Assert.assertTrue(predicate5.evaluate(resource));
-
-    /*   p1==foo && (p2==bat || p3 == cat)   */
-    PredicateBuilder pb6 = new PredicateBuilder();
-    Predicate predicate6 = pb6.property(p1).equals("foo").and().begin().property(p2).equals("bat").or().property(p3).equals("cat").end().toPredicate();
-
-    Assert.assertTrue(predicate6.evaluate(resource));
-
-    /*   p1==foo && (p2==bat || p3 == can)   */
-    PredicateBuilder pb7 = new PredicateBuilder();
-    Predicate predicate7 = pb7.property(p1).equals("foo").and().begin().property(p2).equals("bat").or().property(p3).equals("can").end().toPredicate();
-
-    Assert.assertFalse(predicate7.evaluate(resource));
-
-    /*   p1==fat && (p2==bar || p3 == cat)   */
-    PredicateBuilder pb8 = new PredicateBuilder();
-    Predicate predicate8 = pb8.property(p1).equals("fat").and().begin().property(p2).equals("bar").or().property(p3).equals("cat").end().toPredicate();
-
-    Assert.assertFalse(predicate8.evaluate(resource));
-
-    /*   p1==foo && !(p2==bar || p3 == cat)   */
-    PredicateBuilder pb9 = new PredicateBuilder();
-    Predicate predicate9 = pb9.property(p1).equals("foo").and().not().begin().property(p2).equals("bar").or().property(p3).equals("cat").end().toPredicate();
-
-    Assert.assertFalse(predicate9.evaluate(resource));
-
-
-    /*   p1==foo && !(p2==bat || p3 == car)   */
-    PredicateBuilder pb10 = new PredicateBuilder();
-    Predicate predicate10 = pb10.property(p1).equals("foo").and().not().begin().property(p2).equals("bat").or().property(p3).equals("car").end().toPredicate();
-
-    Assert.assertTrue(predicate10.evaluate(resource));
-  }
-
-  @Test
-  public void testNestedBlocks() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-    String p2 = PropertyHelper.getPropertyId("cat1", "prop2");
-    String p3 = PropertyHelper.getPropertyId("cat1", "prop3");
-    String p4 = PropertyHelper.getPropertyId("cat1", "prop4");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, "foo");
-    resource.setProperty(p2, "bar");
-    resource.setProperty(p3, "cat");
-    resource.setProperty(p4, "dog");
-
-    /*   (p1==foo && (p2==bar || p3==cat)) || p4 == dog   */
-    PredicateBuilder pb1 = new PredicateBuilder();
-    Predicate predicate1 = pb1.
-        begin().
-        property(p1).equals("foo").and().
-        begin().
-        property(p2).equals("bar").or().property(p3).equals("cat").
-        end().
-        end().
-        or().property(p4).equals("dog").toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-
-    /*   (p1==fat && (p2==bar || p3==cat)) || p4 == dot   */
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.
-        begin().
-        property(p1).equals("fat").and().
-        begin().
-        property(p2).equals("bar").or().property(p3).equals("cat").
-        end().
-        end().
-        or().property(p4).equals("dot").toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-
-  @Test
-  public void testUnbalancedBlocks() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-    String p2 = PropertyHelper.getPropertyId("cat1", "prop2");
-    String p3 = PropertyHelper.getPropertyId("cat1", "prop3");
-    String p4 = PropertyHelper.getPropertyId("cat1", "prop4");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, "foo");
-    resource.setProperty(p2, "bar");
-    resource.setProperty(p3, "cat");
-    resource.setProperty(p4, "dog");
-
-    /*   (p1==foo && (p2==bar || p3==cat) || p4 == dog   */
-    PredicateBuilder pb1 = new PredicateBuilder();
-    try {
-      pb1.
-          begin().
-          property(p1).equals("foo").and().
-          begin().
-          property(p2).equals("bar").or().property(p3).equals("cat").
-          end().
-          or().property(p4).equals("dog").toPredicate();
-      Assert.fail("Expected IllegalStateException.");
-    } catch (IllegalStateException e) {
-      // expected
-    }
-
-    /*   (p1==foo && p2==bar || p3==cat)) || p4 == dog   */
-    PredicateBuilder pb2 = new PredicateBuilder();
-    try {
-      pb2.
-          begin().
-          property(p1).equals("foo").and().
-          property(p2).equals("bar").or().property(p3).equals("cat").
-          end().
-          end().
-          or().property(p4).equals("dog").toPredicate();
-      Assert.fail("Expected IllegalStateException.");
-    } catch (IllegalStateException e) {
-      // expected
-    }
-  }
-
-  @Test
-  public void testAltProperty() {
-    String p1 = "cat1/prop1";
-    String p2 = "cat1/prop2";
-    String p3 = "prop3";
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, "foo");
-    resource.setProperty(p2, "bar");
-    resource.setProperty(p3, "cat");
-
-
-    /*   (p1==foo && p2==bar) || p3 == cat   */
-    PredicateBuilder pb1 = new PredicateBuilder();
-    Predicate predicate1 = pb1.begin().property("cat1/prop1").equals("foo").and().property("cat1/prop2").equals("bar").end().or().property("prop3").equals("cat").toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-  }
-
-
-  @Test
-  public void testEqualsString() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, "foo");
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).equals("foo").toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).equals("bar").toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testEqualsInteger() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, 1);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).equals(1).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).equals(99).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testEqualsFloat() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, (float) 1);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).equals(Float.valueOf(1)).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).equals(Float.valueOf(99)).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testEqualsDouble() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, 1.999);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).equals(Double.valueOf(1.999)).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).equals(Double.valueOf(99.998)).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testEqualsLong() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, 1L);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).equals(1L).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).equals(99L).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testGreaterInteger() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, 2);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).greaterThan(1).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).greaterThan(99).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testGreaterFloat() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, (float) 2);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).greaterThan((float) 1).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).greaterThan((float) 99).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testGreaterDouble() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, 2.999);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).greaterThan(1.999).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).greaterThan(99.998).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testGreaterLong() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, 2L);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).greaterThan(1L).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).greaterThan(99L).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testGreaterThanEqualToInteger() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, 2);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).greaterThanEqualTo(1).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).greaterThanEqualTo(99).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testGreaterThanEqualToFloat() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, (float) 2);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).greaterThanEqualTo((float) 1).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).greaterThanEqualTo((float) 99).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testGreaterThanEqualToDouble() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, 2.999);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).greaterThanEqualTo(1.999).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).greaterThanEqualTo(99.998).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testGreaterThanEqualToLong() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, 2L);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).greaterThanEqualTo(1L).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).greaterThanEqualTo(99L).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testLessInteger() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, 2);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).lessThan(99).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).lessThan(1).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testLessFloat() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, (float) 2);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).lessThan((float) 99).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).lessThan((float) 1).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testLessDouble() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, 2.999);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).lessThan(99.999).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).lessThan(1.998).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testLessLong() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, 2L);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).lessThan(99L).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).lessThan(1L).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testLessThanEqualToInteger() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, 2);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).lessThanEqualTo(99).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).lessThanEqualTo(1).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testLessThanEqualToFloat() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, (float) 2);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).lessThanEqualTo((float) 99).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).lessThanEqualTo((float) 1).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testLessThanEqualToDouble() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, 2.999);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).lessThanEqualTo(99.999).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).lessThanEqualTo(1.998).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-
-  @Test
-  public void testLessThanEqualToLong() {
-    String p1 = PropertyHelper.getPropertyId("cat1", "prop1");
-
-    Resource resource = new ResourceImpl(Resource.Type.Cluster);
-    resource.setProperty(p1, 2L);
-
-    PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate1 = pb.property(p1).lessThanEqualTo(99L).toPredicate();
-
-    Assert.assertTrue(predicate1.evaluate(resource));
-
-    PredicateBuilder pb2 = new PredicateBuilder();
-    Predicate predicate2 = pb2.property(p1).lessThanEqualTo(1L).toPredicate();
-
-    Assert.assertFalse(predicate2.evaluate(resource));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/PropertyHelperTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/PropertyHelperTest.java
deleted file mode 100644
index 8217f88..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/PropertyHelperTest.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.utilities;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-
-/**
- * Property helper tests.
- */
-public class PropertyHelperTest {
-
-  @Test
-  public void testGetPropertyId() {
-    Assert.assertEquals("foo", PropertyHelper.getPropertyId("", "foo"));
-    Assert.assertEquals("foo", PropertyHelper.getPropertyId(null, "foo"));
-    Assert.assertEquals("foo", PropertyHelper.getPropertyId(null, "foo/"));
-
-    Assert.assertEquals("cat", PropertyHelper.getPropertyId("cat", ""));
-    Assert.assertEquals("cat", PropertyHelper.getPropertyId("cat", null));
-    Assert.assertEquals("cat", PropertyHelper.getPropertyId("cat/", null));
-
-    Assert.assertEquals("cat/foo", PropertyHelper.getPropertyId("cat", "foo"));
-    Assert.assertEquals("cat/sub/foo", PropertyHelper.getPropertyId("cat/sub", "foo"));
-    Assert.assertEquals("cat/sub/foo", PropertyHelper.getPropertyId("cat/sub", "foo/"));
-  }
-}
-
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/webserver/StartServer.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/webserver/StartServer.java
deleted file mode 100644
index 0ed0566..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/webserver/StartServer.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.controller.utilities.webserver;
-
-import com.sun.jersey.api.core.PackagesResourceConfig;
-import com.sun.jersey.api.core.ResourceConfig;
-import com.sun.net.httpserver.HttpServer;
-import com.sun.jersey.api.container.httpserver.HttpServerFactory;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- *
- */
-public class StartServer {
-
-  public static void main(String[] args) throws IOException {
-    Map<String, String> mapArgs = parseArgs(args);
-    System.out.println("Starting Ambari API server using the following properties: " + mapArgs);
-    System.setProperty("ambariapi.dbfile", mapArgs.get("db"));
-
-    ResourceConfig config = new PackagesResourceConfig("org.apache.ambari.server.api.services");
-    System.out.println("Starting server: http://localhost:" + mapArgs.get("port") + '/');
-    HttpServer server = HttpServerFactory.create("http://localhost:" + mapArgs.get("port") + '/', config);
-    server.start();
-
-    System.out.println("SERVER RUNNING: http://localhost:" + mapArgs.get("port") + '/');
-    System.out.println("Hit return to stop...");
-    System.in.read();
-    System.out.println("Stopping server");
-    server.stop(0);
-    System.out.println("Server stopped");
-  }
-
-  private static Map<String, String> parseArgs(String[] args) {
-    Map<String, String> mapProps = new HashMap<String, String>();
-    mapProps.put("port", "9998");
-    mapProps.put("db", "/var/db/hmc/data/data.db");
-
-    for (int i = 0; i < args.length; i += 2) {
-      String arg = args[i];
-      if (arg.equals("-p")) {
-        mapProps.put("port", args[i + 1]);
-      } else if (arg.equals("-d")) {
-        mapProps.put("db", args[i + 1]);
-      } else {
-        printUsage();
-        throw new RuntimeException("Unexpected argument, See usage message.");
-      }
-    }
-    return mapProps;
-  }
-
-  public static void printUsage() {
-    System.err.println("Usage: java StartServer [-p portNum] [-d abs path to ambari db file]");
-    System.err.println("Default Values: portNum=9998, ambariDb=/var/db/hmc/data/data.db");
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/orm/InMemoryDefaultTestModule.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/orm/InMemoryDefaultTestModule.java
deleted file mode 100644
index 7cfd736..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/orm/InMemoryDefaultTestModule.java
+++ /dev/null
@@ -1,42 +0,0 @@
-package org.apache.ambari.server.orm;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import com.google.inject.AbstractModule;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.ControllerModule;
-
-import java.util.Properties;
-
-public class InMemoryDefaultTestModule extends AbstractModule {
-  @Override
-  protected void configure() {
-    Properties properties = new Properties();
-    properties.setProperty(Configuration.PERSISTENCE_IN_MEMORY_KEY, "true");
-    properties.setProperty(Configuration.METADETA_DIR_PATH,
-        "src/test/resources/stacks");
-    properties.setProperty(Configuration.OS_VERSION_KEY,
-        "centos5");
-    try {
-      install(new ControllerModule(properties));
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
deleted file mode 100644
index 99ab8ee..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.Provider;
-import com.google.inject.Singleton;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.RoleCommand;
-import org.apache.ambari.server.actionmanager.HostRoleStatus;
-import org.apache.ambari.server.orm.dao.*;
-import org.apache.ambari.server.orm.entities.*;
-import org.apache.ambari.server.state.HostState;
-import org.springframework.security.crypto.password.PasswordEncoder;
-
-import javax.persistence.EntityManager;
-import java.util.*;
-
-@Singleton
-public class OrmTestHelper {
-
-  @Inject
-  public Provider<EntityManager> entityManagerProvider;
-  @Inject
-  public Injector injector;
-  @Inject
-  public UserDAO userDAO;
-  @Inject
-  public RoleDAO roleDAO;
-
-  public EntityManager getEntityManager() {
-    return entityManagerProvider.get();
-  }
-
-  /**
-   * creates some test data
-    */
-  @Transactional
-  public void createDefaultData() {
-
-    ClusterEntity clusterEntity = new ClusterEntity();
-    clusterEntity.setClusterName("test_cluster1");
-    clusterEntity.setClusterInfo("test_cluster_info1");
-
-    HostEntity host1 = new HostEntity();
-    HostEntity host2 = new HostEntity();
-    HostEntity host3 = new HostEntity();
-
-    host1.setHostName("test_host1");
-    host2.setHostName("test_host2");
-    host3.setHostName("test_host3");
-    host1.setIpv4("192.168.0.1");
-    host2.setIpv4("192.168.0.2");
-    host3.setIpv4("192.168.0.3");
-
-    List<HostEntity> hostEntities = new ArrayList<HostEntity>();
-    hostEntities.add(host1);
-    hostEntities.add(host2);
-
-    clusterEntity.setHostEntities(hostEntities);
-
-    //both sides of relation should be set when modifying in runtime
-    host1.setClusterEntities(Arrays.asList(clusterEntity));
-    host2.setClusterEntities(Arrays.asList(clusterEntity));
-
-    HostStateEntity hostStateEntity1 = new HostStateEntity();
-    hostStateEntity1.setCurrentState(HostState.HEARTBEAT_LOST);
-    hostStateEntity1.setHostEntity(host1);
-    HostStateEntity hostStateEntity2 = new HostStateEntity();
-    hostStateEntity2.setCurrentState(HostState.HEALTHY);
-    hostStateEntity2.setHostEntity(host2);
-    host1.setHostStateEntity(hostStateEntity1);
-    host2.setHostStateEntity(hostStateEntity2);
-
-    ClusterServiceEntity clusterServiceEntity = new ClusterServiceEntity();
-    clusterServiceEntity.setServiceName("HDFS");
-    clusterServiceEntity.setClusterEntity(clusterEntity);
-    List<ClusterServiceEntity> clusterServiceEntities = new ArrayList<ClusterServiceEntity>();
-    clusterServiceEntities.add(clusterServiceEntity);
-    clusterEntity.setClusterServiceEntities(clusterServiceEntities);
-
-    getEntityManager().persist(host1);
-    getEntityManager().persist(host2);
-    getEntityManager().persist(clusterEntity);
-    getEntityManager().persist(hostStateEntity1);
-    getEntityManager().persist(hostStateEntity2);
-    getEntityManager().persist(clusterServiceEntity);
-
-  }
-
-  @Transactional
-  public void createTestUsers() {
-    PasswordEncoder encoder = injector.getInstance(PasswordEncoder.class);
-
-    RoleEntity adminRole = new RoleEntity();
-    adminRole.setRoleName("admin");
-
-    UserEntity admin = new UserEntity();
-    admin.setUserName("administrator");
-    admin.setUserPassword(encoder.encode("admin"));
-
-    Set<RoleEntity> roles = new HashSet<RoleEntity>();
-    Set<UserEntity> users = new HashSet<UserEntity>();
-
-    roles.add(adminRole);
-    users.add(admin);
-
-    admin.setRoleEntities(roles);
-    adminRole.setUserEntities(users);
-
-    userDAO.create(admin);
-    roleDAO.create(adminRole);
-
-    UserEntity userWithoutRoles = new UserEntity();
-    userWithoutRoles.setUserName("userWithoutRoles");
-    userWithoutRoles.setUserPassword(encoder.encode("test"));
-    userDAO.create(userWithoutRoles);
-
-  }
-
-  @Transactional
-  public void performTransactionMarkedForRollback() {
-    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
-    clusterDAO.removeByName("test_cluster1");
-    getEntityManager().getTransaction().setRollbackOnly();
-  }
-
-  @Transactional
-  public void createStageCommands() {
-    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
-    StageDAO stageDAO = injector.getInstance(StageDAO.class);
-    HostRoleCommandDAO hostRoleCommandDAO = injector.getInstance(HostRoleCommandDAO.class);
-    HostDAO hostDAO = injector.getInstance(HostDAO.class);
-    StageEntity stageEntity = new StageEntity();
-    stageEntity.setCluster(clusterDAO.findByName("test_cluster1"));
-    stageEntity.setRequestId(0L);
-    stageEntity.setStageId(0L);
-
-    HostRoleCommandEntity commandEntity = new HostRoleCommandEntity();
-    HostRoleCommandEntity commandEntity2 = new HostRoleCommandEntity();
-    HostRoleCommandEntity commandEntity3 = new HostRoleCommandEntity();
-    HostEntity host1 = hostDAO.findByName("test_host1");
-    HostEntity host2 = hostDAO.findByName("test_host2");
-    commandEntity.setHost(host1);
-    host1.getHostRoleCommandEntities().add(commandEntity);
-    commandEntity.setHostName("test_host1");
-    commandEntity.setRoleCommand(RoleCommand.INSTALL);
-    commandEntity.setStatus(HostRoleStatus.QUEUED);
-    commandEntity.setRole(Role.DATANODE);
-    commandEntity2.setHost(host2);
-    host2.getHostRoleCommandEntities().add(commandEntity2);
-    commandEntity2.setRoleCommand(RoleCommand.EXECUTE);
-    commandEntity2.setRole(Role.NAMENODE);
-    commandEntity2.setStatus(HostRoleStatus.COMPLETED);
-    commandEntity3.setHost(host1);
-    host1.getHostRoleCommandEntities().add(commandEntity3);
-    commandEntity3.setRoleCommand(RoleCommand.START);
-    commandEntity3.setRole(Role.SECONDARY_NAMENODE);
-    commandEntity3.setStatus(HostRoleStatus.IN_PROGRESS);
-    commandEntity.setStage(stageEntity);
-    commandEntity2.setStage(stageEntity);
-    commandEntity3.setStage(stageEntity);
-
-    stageEntity.setHostRoleCommands(new ArrayList<HostRoleCommandEntity>());
-    stageEntity.getHostRoleCommands().add(commandEntity);
-    stageEntity.getHostRoleCommands().add(commandEntity2);
-    stageEntity.getHostRoleCommands().add(commandEntity3);
-
-    stageDAO.create(stageEntity);
-    hostRoleCommandDAO.create(commandEntity3);
-    hostRoleCommandDAO.create(commandEntity);
-    hostRoleCommandDAO.create(commandEntity2);
-    hostDAO.merge(host1);
-    hostDAO.merge(host2);
-
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/orm/TestOrmImpl.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/orm/TestOrmImpl.java
deleted file mode 100644
index b7e2f19..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/orm/TestOrmImpl.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm;
-
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.actionmanager.HostRoleStatus;
-import org.apache.ambari.server.orm.dao.*;
-import org.apache.ambari.server.orm.entities.*;
-import org.junit.*;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.persistence.EntityManager;
-import javax.persistence.RollbackException;
-import java.util.Arrays;
-import java.util.Date;
-import java.util.List;
-
-public class TestOrmImpl extends Assert {
-  private static final Logger log = LoggerFactory.getLogger(TestOrmImpl.class);
-
-  private static Injector injector;
-
-  @Before
-  public void setup() {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    injector.getInstance(OrmTestHelper.class).createDefaultData();
-  }
-
-  @After
-  public void teardown() {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  /**
-   * persistence provider is responsible for returning empty collection if relation doesn't exists
-   */
-  @Test
-  public void testEmptyPersistentCollection() {
-    String testClusterName = "test_cluster2";
-
-    ClusterEntity clusterEntity = new ClusterEntity();
-    clusterEntity.setClusterName(testClusterName);
-    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
-    clusterDAO.create(clusterEntity);
-    clusterEntity = clusterDAO.findByName(clusterEntity.getClusterName());
-
-    assertTrue("empty relation wasn't instantiated", clusterEntity.getHostEntities() != null);
-  }
-
-  /**
-   * Transaction marked for rollback should not be allowed for commit
-   * @throws Throwable
-   */
-  @Test(expected = RollbackException.class)
-  public void testRollbackException() throws Throwable{
-    injector.getInstance(OrmTestHelper.class).performTransactionMarkedForRollback();
-  }
-
-  /**
-   * Rollback test
-   */
-  @Test
-  public void testSafeRollback() {
-    String testClusterName = "don't save";
-
-    EntityManager entityManager = injector.getInstance(OrmTestHelper.class).getEntityManager();
-    entityManager.getTransaction().begin();
-    ClusterEntity clusterEntity = new ClusterEntity();
-    clusterEntity.setClusterName(testClusterName);
-    entityManager.persist(clusterEntity);
-    entityManager.getTransaction().rollback();
-
-    assertNull("transaction was not rolled back", injector.getInstance(ClusterDAO.class).findByName(testClusterName));
-  }
-
-  /**
-   * Test auto incremented field and custom query example
-   */
-  @Test
-  public void testAutoIncrementedField() {
-    ClusterServiceDAO clusterServiceDAO = injector.getInstance(ClusterServiceDAO.class);
-    Date currentTime = new Date();
-    String serviceName = "MapReduce1";
-    String clusterName = "test_cluster1";
-
-    createService(currentTime, serviceName, clusterName);
-
-    ClusterServiceEntity clusterServiceEntity = clusterServiceDAO.findByClusterAndServiceNames(clusterName, serviceName);
-
-    clusterServiceDAO.remove(clusterServiceEntity);
-
-    assertNull(clusterServiceDAO.findByClusterAndServiceNames(clusterName, serviceName));
-
-  }
-
-  private void createService(Date currentTime, String serviceName, String clusterName) {
-    ClusterServiceDAO clusterServiceDAO = injector.getInstance(ClusterServiceDAO.class);
-    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
-    ClusterEntity cluster = clusterDAO.findByName(clusterName);
-
-    ClusterServiceEntity clusterServiceEntity = new ClusterServiceEntity();
-    clusterServiceEntity.setClusterEntity(cluster);
-    clusterServiceEntity.setServiceName(serviceName);
-
-    cluster.getClusterServiceEntities().add(clusterServiceEntity);
-
-    clusterServiceDAO.create(clusterServiceEntity);
-    clusterDAO.merge(cluster);
-
-    clusterServiceEntity = clusterServiceDAO.findByClusterAndServiceNames(clusterName, serviceName);
-    assertNotNull(clusterServiceEntity);
-
-    clusterServiceDAO.merge(clusterServiceEntity);
-  }
-
-  /**
-   * to clarify: are cascade operations allowed?
-   */
-  @Test
-  public void testCascadeRemoveFail() {
-    ClusterServiceDAO clusterServiceDAO = injector.getInstance(ClusterServiceDAO.class);
-    Date currentTime = new Date();
-    String serviceName = "MapReduce2";
-    String clusterName = "test_cluster1";
-
-    createService(currentTime, serviceName, clusterName);
-
-    ClusterServiceEntity clusterServiceEntity = clusterServiceDAO.findByClusterAndServiceNames(clusterName, serviceName);
-    clusterServiceDAO.remove(clusterServiceEntity);
-    
-    Assert.assertNull(
-        clusterServiceDAO.findByClusterAndServiceNames(clusterName,
-            serviceName));
-  }
-
-  @Test
-  public void testSortedCommands() {
-    injector.getInstance(OrmTestHelper.class).createStageCommands();
-    HostRoleCommandDAO hostRoleCommandDAO = injector.getInstance(HostRoleCommandDAO.class);
-    HostDAO hostDAO = injector.getInstance(HostDAO.class);
-    StageDAO stageDAO = injector.getInstance(StageDAO.class);
-
-    List<HostRoleCommandEntity> list =
-        hostRoleCommandDAO.findSortedCommandsByStageAndHost(
-            stageDAO.findByActionId("0-0"), hostDAO.findByName("test_host1"));
-    log.info("command '{}' - taskId '{}' ", list.get(0).getRoleCommand(),
-        list.get(0).getTaskId());
-    log.info("command '{}' - taskId '{}'", list.get(1).getRoleCommand(),
-        list.get(1).getTaskId());
-   assertTrue(list.get(0).getTaskId() < list.get(1).getTaskId());
-  }
-
-  @Test
-  public void testFindHostsByStage() {
-    injector.getInstance(OrmTestHelper.class).createStageCommands();
-    HostDAO hostDAO = injector.getInstance(HostDAO.class);
-    StageDAO stageDAO = injector.getInstance(StageDAO.class);
-    StageEntity stageEntity = stageDAO.findByActionId("0-0");
-    log.info("StageEntity {} {}" + stageEntity.getRequestId() + " "
-        + stageEntity.getStageId());
-    List<HostEntity> hosts = hostDAO.findByStage(stageEntity);
-    assertEquals(2, hosts.size());
-  }
-
-  @Test
-  public void testAbortHostRoleCommands() {
-    injector.getInstance(OrmTestHelper.class).createStageCommands();
-    HostRoleCommandDAO hostRoleCommandDAO = injector.getInstance(HostRoleCommandDAO.class);
-    int result = hostRoleCommandDAO.updateStatusByRequestId(0L, HostRoleStatus.ABORTED, Arrays.asList(HostRoleStatus.QUEUED, HostRoleStatus.IN_PROGRESS, HostRoleStatus.PENDING));
-    assertEquals(2, result);
-  }
-
-  @Test
-  public void testFindStageByHostRole() {
-    injector.getInstance(OrmTestHelper.class).createStageCommands();
-    HostRoleCommandDAO hostRoleCommandDAO = injector.getInstance(HostRoleCommandDAO.class);
-    List<HostRoleCommandEntity> list = hostRoleCommandDAO.findByHostRole("test_host1", 0L, 0L, Role.DATANODE);
-    assertEquals(1, list.size());
-  }
-
-  @Test
-  public void testLastRequestId() {
-    injector.getInstance(OrmTestHelper.class).createStageCommands();
-    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
-    StageDAO stageDAO = injector.getInstance(StageDAO.class);
-    StageEntity stageEntity = new StageEntity();
-    stageEntity.setCluster(clusterDAO.findByName("test_cluster1"));
-    stageEntity.setRequestId(0L);
-    stageEntity.setStageId(1L);
-    stageDAO.create(stageEntity);
-    StageEntity stageEntity2 = new StageEntity();
-    stageEntity2.setCluster(clusterDAO.findByName("test_cluster1"));
-    stageEntity2.setRequestId(0L);
-    stageEntity2.setStageId(2L);
-    stageDAO.create(stageEntity2);
-    assertEquals(0L, stageDAO.getLastRequestId());
-  }
-
-  @Test
-  public void testConcurrentModification() throws InterruptedException {
-    final ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
-    ClusterEntity clusterEntity = new ClusterEntity();
-    clusterEntity.setClusterName("cluster1");
-    clusterDAO.create(clusterEntity);
-
-    clusterEntity = clusterDAO.findById(clusterEntity.getClusterId());
-    assertEquals("cluster1", clusterEntity.getClusterName());
-
-    Thread thread = new Thread(){
-      @Override
-      public void run() {
-        ClusterEntity clusterEntity1 = clusterDAO.findByName("cluster1");
-        clusterEntity1.setClusterName("anotherName");
-        clusterDAO.merge(clusterEntity1);
-      }
-    };
-
-    thread.start();
-    thread.join();
-
-    clusterEntity = clusterDAO.findById(clusterEntity.getClusterId());
-    assertEquals("anotherName", clusterEntity.getClusterName());
-
-    thread = new Thread(){
-      @Override
-      public void run() {
-        clusterDAO.removeByName("anotherName");
-      }
-    };
-
-    thread.start();
-    thread.join();
-
-    assertNull(clusterDAO.findById(clusterEntity.getClusterId()));
-
-    List<ClusterEntity> result = clusterDAO.findAll();
-
-    thread = new Thread(){
-      @Override
-      public void run() {
-        ClusterEntity temp = new ClusterEntity();
-        temp.setClusterName("temp_cluster");
-        clusterDAO.create(temp);
-      }
-    };
-
-    thread.start();
-    thread.join();
-
-    assertEquals(result.size() + 1, (result = clusterDAO.findAll()).size());
-
-
-    thread = new Thread(){
-      @Override
-      public void run() {
-        ClusterEntity temp = new ClusterEntity();
-        temp.setClusterName("temp_cluster2");
-        clusterDAO.create(temp);
-      }
-    };
-
-    thread.start();
-    thread.join();
-
-    assertEquals(result.size() + 1, (clusterDAO.findAll()).size());
-
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/resources/TestResources.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/resources/TestResources.java
deleted file mode 100644
index 32d77dc..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/resources/TestResources.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.resources;
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.util.Properties;
-
-import junit.framework.TestCase;
-
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.commons.io.FileUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-public class TestResources extends TestCase {
-	
-  private static ResourceManager resMan;
-  private static final String RESOURCE_FILE_NAME = "resources.ext";
-  private static final String RESOURCE_FILE_CONTENT = "CONTENT";
-  Injector injector;
-  private TemporaryFolder tempFolder = new TemporaryFolder();
-  private File resourceFile;
-  
-  protected Properties buildTestProperties() {
-	   
-	Properties properties = new Properties();
-    try {
-		tempFolder.create();
-		
-		properties.setProperty(Configuration.SRVR_KSTR_DIR_KEY, tempFolder.getRoot().getAbsolutePath());
-		properties.setProperty(Configuration.RESOURCES_DIR_KEY, tempFolder.getRoot().getAbsolutePath());
-
-	    resourceFile = tempFolder.newFile(RESOURCE_FILE_NAME);
-	    FileUtils.writeStringToFile(resourceFile, RESOURCE_FILE_CONTENT);
-	} catch (IOException e) {
-		e.printStackTrace();
-	}
-    return properties;
-  }
-  
-  protected Constructor<Configuration> getConfigurationConstructor() {
-    try {
-	  return Configuration.class.getConstructor(Properties.class);
-    } catch (NoSuchMethodException e) {
-	  throw new RuntimeException("Expected constructor not found in Configuration.java", e);
-	}
-  }
-
-  private class ResourceModule extends AbstractModule {
-  @Override
-    protected void configure() {
-      bind(Properties.class).toInstance(buildTestProperties());
-      bind(Configuration.class).toConstructor(getConfigurationConstructor());
-	  requestStaticInjection(TestResources.class);
-	}
-  }
-
-  @Inject
-  static void init(ResourceManager instance) {
-    resMan = instance;
-  }
-
-  @Before
-  public void setUp() throws IOException {
-    injector = Guice.createInjector(new ResourceModule());
-    resMan = injector.getInstance(ResourceManager.class);
-  }
-	
-  @After
-  public void tearDown() throws IOException {
-    resourceFile.delete();
-	tempFolder.delete();
-  }
-	
-  @Test
-  public void testGetResource() throws Exception {
-    File resFile = resMan.getResource(resourceFile.getName());
-    assertTrue(resFile.exists());
-    String resContent = FileUtils.readFileToString(resFile);
-    assertEquals(resContent, RESOURCE_FILE_CONTENT);
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/CertGenerationTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/CertGenerationTest.java
deleted file mode 100644
index bca67ea..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/CertGenerationTest.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.security;
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.util.Properties;
-
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-import junit.framework.TestCase;
-
-public class CertGenerationTest extends TestCase {
-	
-  private static Log LOG = LogFactory.getLog(CertGenerationTest.class);
-  public TemporaryFolder temp = new TemporaryFolder();
-
-  Injector injector;
-
-  private static CertificateManager certMan;
-
-  @Inject
-  static void init(CertificateManager instance) {
-    certMan = instance;
-  }
-
-
-  private class SecurityModule extends AbstractModule {
-    @Override
-    protected void configure() {
-      bind(Properties.class).toInstance(buildTestProperties());
-      bind(Configuration.class).toConstructor(getConfigurationConstructor());
-      requestStaticInjection(CertGenerationTest.class);
-    }
-  }
-  
-  protected Properties buildTestProperties() {
-    try {
-		temp.create();
-	} catch (IOException e) {
-		e.printStackTrace();
-	}
-	Properties properties = new Properties();
-	properties.setProperty(Configuration.SRVR_KSTR_DIR_KEY, temp.getRoot().getAbsolutePath());
-	
-	
-	System.out.println(properties.get(Configuration.SRVR_CRT_PASS_KEY));
-	
-	return properties;
-  }
- 
-  protected Constructor<Configuration> getConfigurationConstructor() {
-    try {
-      return Configuration.class.getConstructor(Properties.class);
-	} catch (NoSuchMethodException e) {
-	    throw new RuntimeException("Expected constructor not found in Configuration.java", e);
-	   }
-	}
-	
-  @Before
-  public void setUp() throws IOException {
-
-
-    injector = Guice.createInjector(new SecurityModule());
-    certMan = injector.getInstance(CertificateManager.class);
-
-    certMan.initRootCert();
-  }
-	
-  @After
-  public void tearDown() throws IOException {
-    temp.delete();
-  }
-	
-  @Test
-  public void testServerCertGen() throws Exception {
-
-    File serverCrt = new File(temp.getRoot().getAbsoluteFile() +
-    						  File.separator + Configuration.SRVR_CRT_NAME_DEFAULT);
-    assertTrue(serverCrt.exists());
-  }
-
-  @Test
-  public void testServerKeyGen() throws Exception {
-
-    File serverKey = new File(temp.getRoot().getAbsoluteFile() +
-    						  File.separator + Configuration.SRVR_KEY_NAME_DEFAULT);
-    assertTrue(serverKey.exists());
-  }
-
-  @Test
-  public void testServerKeystoreGen() throws Exception {
-
-    File serverKeyStrore = new File(temp.getRoot().getAbsoluteFile() +
-    						  File.separator + Configuration.KSTR_NAME_DEFAULT);
-    assertTrue(serverKeyStrore.exists());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/SslExecutionTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/SslExecutionTest.java
deleted file mode 100644
index 0e2b704..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/SslExecutionTest.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.junit.*;
-import org.junit.rules.TemporaryFolder;
-import static org.junit.Assert.assertTrue;
-
-import java.io.*;
-import java.lang.reflect.Constructor;
-import java.util.Properties;
-
-public class SslExecutionTest {
-
-  private static Log LOG = LogFactory.getLog(SslExecutionTest.class);
-  public TemporaryFolder temp = new TemporaryFolder();
-
-  Injector injector;
-
-  private static CertificateManager certMan;
-
-  @Inject
-  static void init(CertificateManager instance) {
-    certMan = instance;
-  }
-
-
-  private class SecurityModule extends AbstractModule {
-    @Override
-    protected void configure() {
-      bind(Properties.class).toInstance(buildTestProperties());
-      bind(Configuration.class).toConstructor(getConfigurationConstructor());
-      requestStaticInjection(SslExecutionTest.class);
-    }
-  }
-
-  protected Properties buildTestProperties() {
-    try {
-      temp.create();
-    } catch (IOException e) {
-      e.printStackTrace();
-    }
-    Properties properties = new Properties();
-    properties.setProperty(Configuration.SRVR_KSTR_DIR_KEY, temp.getRoot().getAbsolutePath());
-
-    return properties;
-  }
-
-  protected Constructor<Configuration> getConfigurationConstructor() {
-    try {
-      return Configuration.class.getConstructor(Properties.class);
-    } catch (NoSuchMethodException e) {
-      throw new RuntimeException("Expected constructor not found in Configuration.java", e);
-    }
-  }
-
-  @Before
-  public void setUp() throws IOException {
-
-    injector = Guice.createInjector(new SecurityModule());
-    certMan = injector.getInstance(CertificateManager.class);
-
-    certMan.initRootCert();
-
-  }
-
-  @After
-  public void tearDown() throws IOException {
-    temp.delete();
-  }
-
-  @Test
-  public void testSslLogging() throws Exception {
-    LOG.info("Testing sign");
-
-    certMan.configs.getConfigsMap().put(Configuration.PASSPHRASE_KEY, "123123");
-
-    LOG.info("key dir = " + certMan.configs.getConfigsMap().get(Configuration.SRVR_KSTR_DIR_KEY));
-
-    SignCertResponse signAgentCrt = certMan.signAgentCrt("somehost", "gdfgdfg", "123123");
-    LOG.info("-------------RESPONCE-------------");
-    LOG.info("-------------MESSAGE--------------");
-    LOG.info(signAgentCrt.getMessage());
-    LOG.info("---------------------------------");
-    LOG.info("-------------RESULT--------------");
-    LOG.info(signAgentCrt.getResult());
-    LOG.info("---------------------------------");
-    assertTrue(SignCertResponse.ERROR_STATUS.equals(signAgentCrt.getResult()));
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/TestPassFileGeneration.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/TestPassFileGeneration.java
deleted file mode 100644
index cc3f5ad..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/TestPassFileGeneration.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.security;
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.util.Properties;
-import java.util.Random;
-
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.RandomStringUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-import junit.framework.TestCase;
-
-public class TestPassFileGeneration extends TestCase {
-
-  private static final int PASS_FILE_NAME_LEN = 20;
-  private static final float MAX_PASS_LEN = 100;
-
-  public TemporaryFolder temp = new TemporaryFolder();
-
-  Injector injector;
-
-  private static CertificateManager certMan;
-  private String passFileName;
-  private int passLen;
-
-  @Inject
-  static void init(CertificateManager instance) {
-    certMan = instance;
-  }
-
-  private class SecurityModule extends AbstractModule {
-    @Override
-    protected void configure() {
-      bind(Properties.class).toInstance(buildTestProperties());
-      bind(Configuration.class).toConstructor(getConfigurationConstructor());
-      requestStaticInjection(CertGenerationTest.class);
-    }
-  }
-
-  protected Properties buildTestProperties() {
-    try {
-      temp.create();
-    } catch (IOException e) {
-      e.printStackTrace();
-    }
-    Properties properties = new Properties();
-    properties.setProperty(Configuration.SRVR_KSTR_DIR_KEY, temp.getRoot()
-        .getAbsolutePath());
-
-    passLen = (int) Math.abs((new Random().nextFloat() * MAX_PASS_LEN));
-
-    properties.setProperty(Configuration.SRVR_CRT_PASS_LEN_KEY,
-        String.valueOf(passLen));
-
-    passFileName = RandomStringUtils.randomAlphabetic(PASS_FILE_NAME_LEN);
-    properties.setProperty(Configuration.SRVR_CRT_PASS_FILE_KEY, passFileName);
-
-    return properties;
-  }
-
-  protected Constructor<Configuration> getConfigurationConstructor() {
-    try {
-      return Configuration.class.getConstructor(Properties.class);
-    } catch (NoSuchMethodException e) {
-      throw new RuntimeException(
-          "Expected constructor not found in Configuration.java", e);
-    }
-  }
-
-  @Before
-  public void setUp() throws IOException {
-
-    injector = Guice.createInjector(new SecurityModule());
-    certMan = injector.getInstance(CertificateManager.class);
-
-    certMan.initRootCert();
-  }
-
-  @After
-  public void tearDown() throws IOException {
-    temp.delete();
-  }
-
-  @Test
-  public void testPassFileGen() throws Exception {
-
-    File passFile = new File(temp.getRoot().getAbsolutePath() + File.separator
-        + passFileName);
-
-    assertTrue(passFile.exists());
-
-    String pass = FileUtils.readFileToString(passFile);
-
-    assertEquals(pass.length(), passLen);
-
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthenticationProviderTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthenticationProviderTest.java
deleted file mode 100644
index dbe256e..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariLdapAuthenticationProviderTest.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security.authorization;
-
-import junit.framework.Assert;
-
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.jpa.JpaPersistModule;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.dao.UserDAO;
-import org.apache.ambari.server.security.ClientSecurityType;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.springframework.security.authentication.BadCredentialsException;
-import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
-import org.springframework.security.core.Authentication;
-import org.springframework.security.core.userdetails.UsernameNotFoundException;
-import org.springframework.security.ldap.server.ApacheDSContainer;
-
-import static org.junit.Assert.*;
-
-public class AmbariLdapAuthenticationProviderTest{
-
-  private static ApacheDSContainer apacheDSContainer;
-  private static Injector injector;
-
-  @Inject
-  private AmbariLdapAuthenticationProvider authenticationProvider;
-  @Inject
-  private UserDAO userDAO;
-  @Inject
-  Configuration configuration;
-
-  @BeforeClass
-  public static void beforeClass() throws Exception{
-    injector = Guice.createInjector(new AuthorizationTestModule(), new JpaPersistModule("ambari-javadb"));
-    injector.getInstance(GuiceJpaInitializer.class);
-
-    apacheDSContainer = new ApacheDSContainer("dc=ambari,dc=apache,dc=org", "classpath:/users.ldif");
-    apacheDSContainer.setPort(33389);
-    apacheDSContainer.afterPropertiesSet();
-  }
-
-  @Before
-  public void setUp() {
-    injector.injectMembers(this);
-    configuration.setClientSecurityType(ClientSecurityType.LDAP);
-  }
-
-  @Test(expected = BadCredentialsException.class)
-  public void testBadCredential() throws Exception {
-    Authentication authentication = new UsernamePasswordAuthenticationToken("notFound", "wrong");
-    authenticationProvider.authenticate(authentication);
-  }
-
-  @Test
-  public void testAuthenticate() throws Exception {
-    assertNull("User alread exists in DB", userDAO.findLdapUserByName("allowedUser"));
-    Authentication authentication = new UsernamePasswordAuthenticationToken("allowedUser", "password");
-    Authentication result = authenticationProvider.authenticate(authentication);
-    assertTrue(result.isAuthenticated());
-    assertNotNull("User was not created", userDAO.findLdapUserByName("allowedUser"));
-    result = authenticationProvider.authenticate(authentication);
-    assertTrue(result.isAuthenticated());
-  }
-
-  @Test
-  public void testDisabled() throws Exception {
-    configuration.setClientSecurityType(ClientSecurityType.LOCAL);
-    Authentication authentication = new UsernamePasswordAuthenticationToken("allowedUser", "password");
-    Authentication auth = authenticationProvider.authenticate(authentication);
-    Assert.assertTrue(auth == null);
-  }
-
-  @AfterClass
-  public static void afterClass() {
-    apacheDSContainer.stop();
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariLocalUserDetailsServiceTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariLocalUserDetailsServiceTest.java
deleted file mode 100644
index 12d07c5..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariLocalUserDetailsServiceTest.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security.authorization;
-
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.jpa.JpaPersistModule;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.OrmTestHelper;
-import org.apache.ambari.server.orm.dao.UserDAO;
-import org.apache.ambari.server.orm.entities.UserEntity;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.springframework.security.core.userdetails.UserDetails;
-import org.springframework.security.core.userdetails.UsernameNotFoundException;
-import org.springframework.security.crypto.password.PasswordEncoder;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-public class AmbariLocalUserDetailsServiceTest {
-
-  private static Injector injector;
-
-  @Inject
-  AmbariLocalUserDetailsService userDetailsService;
-  @Inject
-  PasswordEncoder passwordEncoder;
-  @Inject
-  UserDAO userDAO;
-
-  @BeforeClass
-  public static void prepareData() {
-    injector = Guice.createInjector(new AuthorizationTestModule(), new JpaPersistModule("ambari-javadb"));
-    injector.getInstance(GuiceJpaInitializer.class);
-    injector.getInstance(OrmTestHelper.class).createTestUsers();
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    injector.injectMembers(this);
-  }
-
-  @Test
-  public void testLoadUserByUsername() throws Exception {
-    UserDetails userDetails = userDetailsService.loadUserByUsername("administrator");
-    assertEquals("Wrong username", "administrator", userDetails.getUsername());
-    assertTrue("Password not matches", passwordEncoder.matches("admin", userDetails.getPassword()));
-    assertFalse("Wrong password accepted", passwordEncoder.matches("wrong", userDetails.getPassword()));
-  }
-
-  @Test(expected = UsernameNotFoundException.class)
-  public void testUsernameNotFound() throws Exception {
-    userDetailsService.loadUserByUsername("notExists_123123123");
-  }
-
-  @Test(expected = UsernameNotFoundException.class)
-  public void testEmptyRoles() throws Exception {
-    UserEntity user = userDAO.findLocalUserByName("userWithoutRoles");
-    userDetailsService.loadUserByUsername(user.getUserName());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AuthorizationHelperTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AuthorizationHelperTest.java
deleted file mode 100644
index ab4c8af..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AuthorizationHelperTest.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security.authorization;
-
-import org.apache.ambari.server.orm.entities.RoleEntity;
-import org.junit.Test;
-import org.springframework.security.core.GrantedAuthority;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-
-import static org.junit.Assert.assertEquals;
-
-public class AuthorizationHelperTest {
-
-  @Test
-  public void testConvertRolesToAuthorities() throws Exception {
-    Collection<RoleEntity> roles = new ArrayList<RoleEntity>();
-    RoleEntity role = new RoleEntity();
-    role.setRoleName("admin");
-    roles.add(role);
-    role = new RoleEntity();
-    role.setRoleName("user");
-    roles.add(role);
-
-    Collection<GrantedAuthority> authorities = new AuthorizationHelper().convertRolesToAuthorities(roles);
-
-    assertEquals("Wrong number of authorities", 2, authorities.size());
-    Iterator<GrantedAuthority> iterator = authorities.iterator();
-    assertEquals("Wrong authority name", "ADMIN", iterator.next().getAuthority());
-
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AuthorizationTestModule.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AuthorizationTestModule.java
deleted file mode 100644
index ed36094..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AuthorizationTestModule.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security.authorization;
-
-import com.google.inject.AbstractModule;
-import org.apache.ambari.server.configuration.Configuration;
-import org.springframework.security.crypto.password.PasswordEncoder;
-import org.springframework.security.crypto.password.StandardPasswordEncoder;
-
-import java.lang.reflect.Constructor;
-import java.util.Properties;
-
-public class AuthorizationTestModule extends AbstractModule {
-  @Override
-  protected void configure() {
-
-    bind(PasswordEncoder.class).to(StandardPasswordEncoder.class);
-    bind(Properties.class).toInstance(buildTestProperties());
-    bind(Configuration.class).toConstructor(getConfigurationConstructor());
-  }
-
-  protected Properties buildTestProperties() {
-    Properties properties = new Properties();
-    properties.setProperty(Configuration.CLIENT_SECURITY_KEY, "ldap");
-    return properties;
-  }
-
-  protected Constructor<Configuration> getConfigurationConstructor() {
-    try {
-      return Configuration.class.getConstructor(Properties.class);
-    } catch (NoSuchMethodException e) {
-      throw new RuntimeException("Expected constructor not found in Configuration.java", e);
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/LdapServerPropertiesTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/LdapServerPropertiesTest.java
deleted file mode 100644
index 9043439..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/LdapServerPropertiesTest.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security.authorization;
-
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import org.apache.ambari.server.configuration.Configuration;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-public class LdapServerPropertiesTest {
-
-  private final Injector injector;
-
-  private static final String INCORRECT_URL_LIST = "Incorrect LDAP URL list created";
-  private static final String INCORRECT_USER_SEARCH_FILTER = "Incorrect search filter";
-
-  protected LdapServerProperties ldapServerProperties;
-
-  @Inject
-  Configuration configuration;
-
-  public LdapServerPropertiesTest() {
-    injector = Guice.createInjector(new AuthorizationTestModule());
-    injector.injectMembers(this);
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    ldapServerProperties =  new LdapServerProperties();
-    ldapServerProperties.setAnonymousBind(true);
-    ldapServerProperties.setBaseDN("dc=ambari,dc=apache,dc=org");
-    ldapServerProperties.setManagerDn("uid=manager," + ldapServerProperties.getBaseDN());
-    ldapServerProperties.setManagerPassword("password");
-    ldapServerProperties.setUseSsl(false);
-    ldapServerProperties.setPrimaryUrl("1.2.3.4:389");
-    ldapServerProperties.setUsernameAttribute("uid");
-  }
-
-  @Test
-  public void testGetLdapUrls() throws Exception {
-    List<String> urls = ldapServerProperties.getLdapUrls();
-    assertEquals(INCORRECT_URL_LIST, 1, urls.size());
-    assertEquals(INCORRECT_URL_LIST, "ldap://1.2.3.4:389", urls.get(0));
-    ldapServerProperties.setSecondaryUrl("4.3.2.1:1234");
-    urls = ldapServerProperties.getLdapUrls();
-    assertEquals(INCORRECT_URL_LIST, 2, urls.size());
-    assertEquals(INCORRECT_URL_LIST, "ldap://4.3.2.1:1234", urls.get(1));
-    ldapServerProperties.setUseSsl(true);
-    urls = ldapServerProperties.getLdapUrls();
-    assertEquals(INCORRECT_URL_LIST, "ldaps://1.2.3.4:389", urls.get(0));
-    assertEquals(INCORRECT_URL_LIST, "ldaps://4.3.2.1:1234", urls.get(1));
-  }
-
-  @Test
-  public void testGetUserSearchFilter() throws Exception {
-    assertEquals(INCORRECT_USER_SEARCH_FILTER, "(uid={0})", ldapServerProperties.getUserSearchFilter());
-    ldapServerProperties.setUsernameAttribute("anotherName");
-    assertEquals(INCORRECT_USER_SEARCH_FILTER, "(anotherName={0})", ldapServerProperties.getUserSearchFilter());
-  }
-
-  @Test
-  public void testEquals() throws Exception {
-    LdapServerProperties properties1 = configuration.getLdapServerProperties();
-    LdapServerProperties properties2 = configuration.getLdapServerProperties();
-    assertTrue("Properties object is same", properties1 != properties2);
-    assertTrue("Objects are not equal", properties1.equals(properties2));
-    assertTrue("Hash codes are not equal", properties1.hashCode() == properties2.hashCode());
-    properties2.setSecondaryUrl("5.6.7.8:389");
-    assertFalse("Objects are equal", properties1.equals(properties2));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/TestUsers.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/TestUsers.java
deleted file mode 100644
index f9bc143..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/TestUsers.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.security.authorization;
-
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.actionmanager.ActionDBAccessorImpl;
-import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.agent.ActionQueue;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.RoleDAO;
-import org.apache.ambari.server.orm.dao.UserDAO;
-import org.apache.ambari.server.orm.entities.UserEntity;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.springframework.security.crypto.password.PasswordEncoder;
-
-import java.util.List;
-
-import static org.junit.Assert.*;
-
-public class TestUsers {
-  private Injector injector;
-
-  @Inject
-  protected Users users;
-  @Inject
-  protected UserDAO userDAO;
-  @Inject
-  protected RoleDAO roleDAO;
-  @Inject
-  protected PasswordEncoder passwordEncoder;
-
-  @Before
-  public void setup() throws AmbariException {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    injector.injectMembers(this);
-    users.createDefaultRoles();
-  }
-
-  @After
-  public void tearDown() throws AmbariException {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  @Test
-  public void testGetAllUsers() throws Exception {
-    users.createUser("user", "user");
-    users.createUser("admin", "admin");
-
-    List<User> userList = users.getAllUsers();
-
-    assertEquals(2, userList.size());
-
-    for (User user : userList) {
-      assertEquals(false, user.isLdapUser());
-    }
-
-    assertEquals(2, userDAO.findAll().size());
-
-    UserEntity userEntity = userDAO.findLocalUserByName("user");
-    assertNotNull("user", userEntity.getUserPassword());
-
-    users.modifyPassword("user", "user", "resu");
-
-    assertNotSame(userEntity.getUserPassword(), userDAO.findLocalUserByName("user").getUserPassword());
-  }
-
-  @Test(expected = AmbariException.class)
-  public void testModifyPassword() throws Exception {
-    users.createUser("user", "user");
-
-    UserEntity userEntity = userDAO.findLocalUserByName("user");
-
-    assertNotSame("user", userEntity.getUserPassword());
-    assertTrue(passwordEncoder.matches("user", userEntity.getUserPassword()));
-
-    users.modifyPassword("user", "user", "resu");
-
-    assertNotSame(userEntity.getUserPassword(), userDAO.findLocalUserByName("user").getUserPassword());
-
-    users.modifyPassword("user", "error", "new");
-
-    fail("Exception was not thrown");
-  }
-
-  @Test
-  public void testPromoteUser() throws Exception {
-    users.createUser("admin", "admin");
-    User user = users.getLocalUser("admin");
-    assertTrue(user.getRoles().contains(users.getUserRole()));
-    assertFalse(user.getRoles().contains(users.getAdminRole()));
-
-    users.promoteToAdmin(user);
-
-    user = users.getLocalUser("admin");
-    assertTrue(user.getRoles().contains(users.getAdminRole()));
-
-    users.demoteAdmin(user);
-
-    user = users.getLocalUser("admin");
-    assertFalse(user.getRoles().contains(users.getAdminRole()));
-
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
deleted file mode 100644
index 69cdc17..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/stageplanner/TestStagePlanner.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.stageplanner;
-
-import static org.junit.Assert.*;
-
-import java.util.HashMap;
-import java.util.List;
-
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.RoleCommand;
-import org.apache.ambari.server.actionmanager.Stage;
-import org.apache.ambari.server.controller.HostsMap;
-import org.apache.ambari.server.metadata.RoleCommandOrder;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
-import org.apache.ambari.server.utils.StageUtils;
-import org.junit.Test;
-
-public class TestStagePlanner {
-
-  @Test
-  public void testSingleStagePlan() {
-    RoleCommandOrder.initialize();
-    RoleCommandOrder rco = new RoleCommandOrder();
-    RoleGraph rg = new RoleGraph(rco);
-    String hostname = "dummy";
-    Stage stage = StageUtils.getATestStage(1, 1, hostname);
-    rg.build(stage);
-    List<Stage> outStages = rg.getStages();
-    for (Stage s: outStages) {
-      System.out.println(s.toString());
-    }
-    assertEquals(1, outStages.size());
-    assertEquals(stage.getExecutionCommands(hostname), outStages.get(0)
-        .getExecutionCommands(hostname));
-  }
-
-  @Test
-  public void testMultiStagePlan() {
-    RoleCommandOrder.initialize();
-    RoleCommandOrder rco = new RoleCommandOrder();
-    RoleGraph rg = new RoleGraph(rco);
-    long now = System.currentTimeMillis();
-    Stage stage = StageUtils.getATestStage(1, 1, "host1");
-    stage.addHostRoleExecutionCommand("host2", Role.HBASE_MASTER,
-        RoleCommand.START, new ServiceComponentHostStartEvent("HBASE_MASTER",
-            "host2", now, new HashMap<String, String>()), "cluster1", "HBASE");
-    stage.addHostRoleExecutionCommand("host3", Role.ZOOKEEPER_SERVER,
-        RoleCommand.START, new ServiceComponentHostStartEvent("ZOOKEEPER_SERVER",
-            "host3", now, new HashMap<String, String>()), "cluster1", "ZOOKEEPER");
-    System.out.println(stage.toString());
-
-    rg.build(stage);
-    System.out.println(rg.stringifyGraph());
-    List<Stage> outStages = rg.getStages();
-    for (Stage s: outStages) {
-      System.out.println(s.toString());
-    }
-    assertEquals(3, outStages.size());
-  }
-
-  @Test
-  public void testManyStages() {
-    RoleCommandOrder.initialize();
-    RoleCommandOrder rco = new RoleCommandOrder();
-    RoleGraph rg = new RoleGraph(rco);
-    long now = System.currentTimeMillis();
-    Stage stage = StageUtils.getATestStage(1, 1, "host1");
-    stage.addHostRoleExecutionCommand("host11", Role.SECONDARY_NAMENODE,
-        RoleCommand.START, new ServiceComponentHostStartEvent("SECONDARY_NAMENODE",
-            "host11", now, new HashMap<String, String>()), "cluster1", "HDFS");
-    stage.addHostRoleExecutionCommand("host2", Role.HBASE_MASTER,
-        RoleCommand.START, new ServiceComponentHostStartEvent("HBASE_MASTER",
-            "host2", now, new HashMap<String, String>()), "cluster1", "HBASE");
-    stage.addHostRoleExecutionCommand("host3", Role.ZOOKEEPER_SERVER,
-        RoleCommand.START, new ServiceComponentHostStartEvent("ZOOKEEPER_SERVER",
-            "host3", now, new HashMap<String, String>()), "cluster1", "ZOOKEEPER");
-    stage.addHostRoleExecutionCommand("host4", Role.DATANODE,
-        RoleCommand.START, new ServiceComponentHostStartEvent("DATANODE",
-            "host4", now, new HashMap<String, String>()), "cluster1", "HDFS");
-    stage.addHostRoleExecutionCommand("host4", Role.HBASE_REGIONSERVER,
-        RoleCommand.START, new ServiceComponentHostStartEvent("HBASE_REGIONSERVER",
-            "host4", now, new HashMap<String, String>()), "cluster1", "HBASE");
-    stage.addHostRoleExecutionCommand("host4", Role.TASKTRACKER,
-        RoleCommand.START, new ServiceComponentHostStartEvent("TASKTRACKER",
-            "host4", now, new HashMap<String, String>()), "cluster1", "MAPREDUCE");
-    stage.addHostRoleExecutionCommand("host5", Role.JOBTRACKER,
-        RoleCommand.START, new ServiceComponentHostStartEvent("JOBTRACKER",
-            "host5", now, new HashMap<String, String>()), "cluster1", "MAPREDUCE");
-    stage.addHostRoleExecutionCommand("host6", Role.OOZIE_SERVER,
-        RoleCommand.START, new ServiceComponentHostStartEvent("OOZIE_SERVER",
-            "host6", now, new HashMap<String, String>()), "cluster1", "OOZIE");
-    stage.addHostRoleExecutionCommand("host7", Role.WEBHCAT_SERVER,
-        RoleCommand.START, new ServiceComponentHostStartEvent("WEBHCAT_SERVER",
-            "host7", now, new HashMap<String, String>()), "cluster1", "WEBHCAT");
-    stage.addHostRoleExecutionCommand("host8", Role.NAGIOS_SERVER,
-        RoleCommand.START, new ServiceComponentHostStartEvent("NAGIOS_SERVER",
-            "host8", now, new HashMap<String, String>()), "cluster1", "NAGIOS");
-    stage.addHostRoleExecutionCommand("host4", Role.GANGLIA_MONITOR,
-        RoleCommand.START, new ServiceComponentHostStartEvent("GANGLIA_MONITOR",
-            "host4", now, new HashMap<String, String>()), "cluster1", "GANGLIA");
-    stage.addHostRoleExecutionCommand("host9", Role.GANGLIA_SERVER,
-        RoleCommand.START, new ServiceComponentHostStartEvent("GANGLIA_SERVER",
-            "host9", now, new HashMap<String, String>()), "cluster1", "GANGLIA");
-    System.out.println(stage.toString());
-    rg.build(stage);
-    System.out.println(rg.stringifyGraph());
-    List<Stage> outStages = rg.getStages();
-    for (Stage s: outStages) {
-      System.out.println(s.toString());
-    }
-    assertEquals(4, outStages.size());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
deleted file mode 100644
index 32567f5..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
+++ /dev/null
@@ -1,329 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import static org.junit.Assert.fail;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import junit.framework.Assert;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.controller.ServiceComponentResponse;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
-import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntityPK;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntityPK;
-import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntityPK;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostImpl;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-
-public class ServiceComponentTest {
-
-  private Clusters clusters;
-  private Cluster cluster;
-  private Service service;
-  private String clusterName;
-  private String serviceName;
-  private Injector injector;
-  private ServiceFactory serviceFactory;
-  private ServiceComponentFactory serviceComponentFactory;
-  private ServiceComponentHostFactory serviceComponentHostFactory;
-  private AmbariMetaInfo metaInfo;
-
-  @Before
-  public void setup() throws Exception {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    clusters = injector.getInstance(Clusters.class);
-    serviceFactory = injector.getInstance(ServiceFactory.class);
-    serviceComponentFactory = injector.getInstance(
-        ServiceComponentFactory.class);
-    serviceComponentHostFactory = injector.getInstance(
-        ServiceComponentHostFactory.class);
-    metaInfo = injector.getInstance(AmbariMetaInfo.class);
-    metaInfo.init();
-
-    clusterName = "foo";
-    serviceName = "HDFS";
-    clusters.addCluster(clusterName);
-    cluster = clusters.getCluster(clusterName);
-    cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
-    Assert.assertNotNull(cluster);
-    Service s = serviceFactory.createNew(cluster, serviceName);
-    cluster.addService(s);
-    s.persist();
-    service = cluster.getService(serviceName);
-    Assert.assertNotNull(service);
-  }
-
-  @After
-  public void teardown() throws AmbariException {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  @Test
-  public void testCreateServiceComponent() throws AmbariException {
-    String componentName = "DATANODE2";
-    ServiceComponent component = serviceComponentFactory.createNew(service,
-        componentName);
-    service.addServiceComponent(component);
-    component.persist();
-
-    ServiceComponent sc = service.getServiceComponent(componentName);
-    Assert.assertNotNull(sc);
-
-    Assert.assertEquals(componentName, sc.getName());
-    Assert.assertEquals(serviceName, sc.getServiceName());
-    Assert.assertEquals(cluster.getClusterId(),
-        sc.getClusterId());
-    Assert.assertEquals(cluster.getClusterName(),
-        sc.getClusterName());
-    Assert.assertEquals(State.INIT, sc.getDesiredState());
-    Assert.assertFalse(
-        sc.getDesiredStackVersion().getStackId().isEmpty());
-  }
-
-
-  @Test
-  public void testGetAndSetServiceComponentInfo() throws AmbariException {
-    String componentName = "NAMENODE";
-    ServiceComponent component = serviceComponentFactory.createNew(service,
-        componentName);
-    service.addServiceComponent(component);
-    component.persist();
-
-    ServiceComponent sc = service.getServiceComponent(componentName);
-    Assert.assertNotNull(sc);
-
-
-    sc.setDesiredState(State.INSTALLED);
-    Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
-
-    sc.setDesiredStackVersion(new StackId("HDP-1.0.0"));
-    Assert.assertEquals("HDP-1.0.0", sc.getDesiredStackVersion().getStackId());
-
-    ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO =
-        injector.getInstance(ServiceComponentDesiredStateDAO.class);
-
-    ServiceComponentDesiredStateEntityPK primaryKey =
-        new ServiceComponentDesiredStateEntityPK();
-    primaryKey.setClusterId(cluster.getClusterId());
-    primaryKey.setComponentName(componentName);
-    primaryKey.setServiceName(serviceName);
-
-    ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity =
-        serviceComponentDesiredStateDAO.findByPK(primaryKey);
-
-    ServiceComponent sc1 = serviceComponentFactory.createExisting(service,
-        serviceComponentDesiredStateEntity);
-    Assert.assertNotNull(sc1);
-    Assert.assertEquals(State.INSTALLED, sc1.getDesiredState());
-    Assert.assertEquals("HDP-1.0.0",
-        sc1.getDesiredStackVersion().getStackId());
-
-  }
-
-  @Test
-  public void testGetAndSetConfigs() {
-    // FIXME add unit tests for configs once impl done
-    /*
-      public Map<String, Config> getDesiredConfigs();
-      public void updateDesiredConfigs(Map<String, Config> configs);
-     */
-  }
-
-  private void addHostToCluster(String hostname,
-      String clusterName) throws AmbariException {
-    clusters.addHost(hostname);
-    Host h = clusters.getHost(hostname);
-    h.setIPv4(hostname + "ipv4");
-    h.setIPv6(hostname + "ipv6");
-    h.setOsType("centos6");
-    h.persist();
-    clusters.mapHostToCluster(hostname, clusterName);
-  }
-
-  @Test
-  public void testAddAndGetServiceComponentHosts() throws AmbariException {
-    String componentName = "NAMENODE";
-    ServiceComponent component = serviceComponentFactory.createNew(service,
-        componentName);
-    service.addServiceComponent(component);
-    component.persist();
-
-    ServiceComponent sc = service.getServiceComponent(componentName);
-    Assert.assertNotNull(sc);
-
-    Assert.assertTrue(sc.getServiceComponentHosts().isEmpty());
-
-    try {
-      serviceComponentHostFactory.createNew(sc, "h1", false);
-      fail("Expected error for invalid host");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    addHostToCluster("h1", service.getCluster().getClusterName());
-    addHostToCluster("h2", service.getCluster().getClusterName());
-    addHostToCluster("h3", service.getCluster().getClusterName());
-
-    ServiceComponentHost sch1 =
-        serviceComponentHostFactory.createNew(sc, "h1", false);
-    ServiceComponentHost sch2 =
-        serviceComponentHostFactory.createNew(sc, "h2", false);
-    ServiceComponentHost failSch =
-        serviceComponentHostFactory.createNew(sc, "h2", false);
-
-    Map<String, ServiceComponentHost> compHosts =
-        new HashMap<String, ServiceComponentHost>();
-    compHosts.put("h1", sch1);
-    compHosts.put("h2", sch2);
-    compHosts.put("h3", failSch);
-
-    try {
-      sc.addServiceComponentHosts(compHosts);
-      fail("Expected error for dups");
-    } catch (Exception e) {
-      // Expected
-    }
-    Assert.assertTrue(sc.getServiceComponentHosts().isEmpty());
-
-    compHosts.remove("h3");
-    sc.addServiceComponentHosts(compHosts);
-
-    Assert.assertEquals(2, sc.getServiceComponentHosts().size());
-
-    sch1.persist();
-    sch2.persist();
-
-    ServiceComponentHost schCheck = sc.getServiceComponentHost("h2");
-    Assert.assertNotNull(schCheck);
-    Assert.assertEquals("h2", schCheck.getHostName());
-
-    ServiceComponentHost sch3 =
-        serviceComponentHostFactory.createNew(sc, "h3", false);
-    sc.addServiceComponentHost(sch3);
-    sch3.persist();
-    Assert.assertNotNull(sc.getServiceComponentHost("h3"));
-
-    sch1.setDesiredStackVersion(new StackId("HDP-1.1.0"));
-    sch1.setState(State.STARTING);
-    sch1.setStackVersion(new StackId("HDP-1.0.0"));
-    sch1.setDesiredState(State.STARTED);
-
-    HostComponentDesiredStateDAO desiredStateDAO = injector.getInstance(
-        HostComponentDesiredStateDAO.class);
-    HostComponentStateDAO liveStateDAO = injector.getInstance(
-        HostComponentStateDAO.class);
-
-    HostComponentDesiredStateEntityPK dPK =
-        new HostComponentDesiredStateEntityPK();
-    HostComponentStateEntityPK lPK =
-        new HostComponentStateEntityPK();
-
-    dPK.setClusterId(cluster.getClusterId());
-    dPK.setComponentName(componentName);
-    dPK.setHostName("h1");
-    dPK.setServiceName(serviceName);
-    lPK.setClusterId(cluster.getClusterId());
-    lPK.setComponentName(componentName);
-    lPK.setHostName("h1");
-    lPK.setServiceName(serviceName);
-
-    HostComponentDesiredStateEntity desiredStateEntity =
-        desiredStateDAO.findByPK(dPK);
-    HostComponentStateEntity stateEntity =
-        liveStateDAO.findByPK(lPK);
-
-    ServiceComponentHost sch = serviceComponentHostFactory.createExisting(sc,
-        stateEntity, desiredStateEntity);
-    Assert.assertNotNull(sch);
-    Assert.assertEquals(State.STARTING, sch.getState());
-    Assert.assertEquals(State.STARTED, sch.getDesiredState());
-    Assert.assertEquals("HDP-1.0.0",
-        sch.getStackVersion().getStackId());
-    Assert.assertEquals("HDP-1.1.0",
-        sch.getDesiredStackVersion().getStackId());
-  }
-
-  @Test
-  public void testConvertToResponse() throws AmbariException {
-    String componentName = "NAMENODE";
-    ServiceComponent component = serviceComponentFactory.createNew(service,
-        componentName);
-    service.addServiceComponent(component);
-    component.persist();
-
-    ServiceComponent sc = service.getServiceComponent(componentName);
-    Assert.assertNotNull(sc);
-    sc.setDesiredState(State.INSTALLED);
-    sc.setDesiredStackVersion(new StackId("HDP-1.0.0"));
-
-    ServiceComponentResponse r = sc.convertToResponse();
-    Assert.assertEquals(sc.getClusterName(), r.getClusterName());
-    Assert.assertEquals(sc.getClusterId(), r.getClusterId().longValue());
-    Assert.assertEquals(sc.getName(), r.getComponentName());
-    Assert.assertEquals(sc.getServiceName(), r.getServiceName());
-    Assert.assertEquals(sc.getDesiredStackVersion().getStackId(),
-        r.getDesiredStackVersion());
-    Assert.assertEquals(sc.getDesiredState().toString(),
-        r.getDesiredState());
-
-    // TODO check configs
-    // r.getConfigVersions()
-
-    // TODO test debug dump
-    StringBuilder sb = new StringBuilder();
-    sc.debugDump(sb);
-    Assert.assertFalse(sb.toString().isEmpty());
-  }
-
-  @Test
-  public void testCanBeRemoved() throws Exception{
-    String componentName = "NAMENODE";
-    ServiceComponent component = serviceComponentFactory.createNew(service,
-        componentName);
-
-    for (State state : State.values()) {
-      component.setDesiredState(state);
-
-      if (state.isRemovableState()) {
-        org.junit.Assert.assertTrue(component.canBeRemoved());
-      }
-      else {
-        org.junit.Assert.assertFalse(component.canBeRemoved());
-      }
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
deleted file mode 100644
index f9658fa..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
+++ /dev/null
@@ -1,282 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-
-import junit.framework.Assert;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.controller.ServiceResponse;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
-public class ServiceTest {
-
-  private Clusters clusters;
-  private Cluster cluster;
-  private String clusterName;
-  private Injector injector;
-  private ServiceFactory serviceFactory;
-  private ServiceComponentFactory serviceComponentFactory;
-  private AmbariMetaInfo metaInfo;
-
-  @Before
-  public void setup() throws Exception {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    clusters = injector.getInstance(Clusters.class);
-    serviceFactory = injector.getInstance(ServiceFactory.class);
-    serviceComponentFactory = injector.getInstance(
-        ServiceComponentFactory.class);
-    metaInfo = injector.getInstance(AmbariMetaInfo.class);
-    metaInfo.init();
-    clusterName = "foo";
-    clusters.addCluster(clusterName);
-    cluster = clusters.getCluster(clusterName);
-    cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
-    Assert.assertNotNull(cluster);
-  }
-
-  @After
-  public void teardown() throws AmbariException {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  @Test
-  public void testCreateService() throws AmbariException {
-    String serviceName = "HDFS";
-    Service s = serviceFactory.createNew(cluster, serviceName);
-    cluster.addService(s);
-    s.persist();
-    Service service = cluster.getService(serviceName);
-
-    Assert.assertNotNull(service);
-    Assert.assertEquals(serviceName, service.getName());
-    Assert.assertEquals(cluster.getClusterId(),
-        service.getCluster().getClusterId());
-    Assert.assertEquals(cluster.getClusterName(),
-        service.getCluster().getClusterName());
-    Assert.assertEquals(State.INIT, service.getDesiredState());
-    Assert.assertFalse(
-        service.getDesiredStackVersion().getStackId().isEmpty());
-  }
-
-  @Test
-  public void testGetAndSetServiceInfo() throws AmbariException {
-    String serviceName = "HDFS";
-    Service s = serviceFactory.createNew(cluster, serviceName);
-    cluster.addService(s);
-    s.persist();
-
-    Service service = cluster.getService(serviceName);
-    Assert.assertNotNull(service);
-
-    service.setDesiredStackVersion(new StackId("HDP-1.1.0"));
-    Assert.assertEquals("HDP-1.1.0",
-        service.getDesiredStackVersion().getStackId());
-
-    service.setDesiredState(State.INSTALLING);
-    Assert.assertEquals(State.INSTALLING, service.getDesiredState());
-
-    // FIXME todo use DAO to verify persisted object maps to inmemory state
-
-  }
-
-
-  @Test
-  public void testAddAndGetServiceComponents() throws AmbariException {
-    String serviceName = "HDFS";
-    Service s = serviceFactory.createNew(cluster, serviceName);
-    cluster.addService(s);
-    s.persist();
-
-    Service service = cluster.getService(serviceName);
-
-    Assert.assertNotNull(service);
-
-    Assert.assertTrue(s.getServiceComponents().isEmpty());
-
-    ServiceComponent sc1 =
-        serviceComponentFactory.createNew(s, "NAMENODE");
-    ServiceComponent sc2 =
-        serviceComponentFactory.createNew(s, "DATANODE1");
-    ServiceComponent sc3 =
-        serviceComponentFactory.createNew(s, "DATANODE2");
-
-    Map<String, ServiceComponent> comps = new
-        HashMap<String, ServiceComponent>();
-    comps.put(sc1.getName(), sc1);
-    comps.put(sc2.getName(), sc2);
-
-    s.addServiceComponents(comps);
-
-    Assert.assertEquals(2, s.getServiceComponents().size());
-    Assert.assertNotNull(s.getServiceComponent(sc1.getName()));
-    Assert.assertNotNull(s.getServiceComponent(sc2.getName()));
-
-    try {
-      s.getServiceComponent(sc3.getName());
-      fail("Expected error when looking for invalid component");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    s.addServiceComponent(sc3);
-
-    sc1.persist();
-    sc2.persist();
-    sc3.persist();
-
-    ServiceComponent sc4 = s.addServiceComponent("HDFS_CLIENT");
-    Assert.assertNotNull(s.getServiceComponent(sc4.getName()));
-    Assert.assertEquals(State.INIT,
-        s.getServiceComponent("HDFS_CLIENT").getDesiredState());
-    Assert.assertTrue(sc4.isClientComponent());
-    sc4.persist();
-
-    Assert.assertEquals(4, s.getServiceComponents().size());
-
-    Assert.assertNotNull(s.getServiceComponent(sc3.getName()));
-    Assert.assertEquals(sc3.getName(),
-        s.getServiceComponent(sc3.getName()).getName());
-    Assert.assertEquals(s.getName(),
-        s.getServiceComponent(sc3.getName()).getServiceName());
-    Assert.assertEquals(cluster.getClusterName(),
-        s.getServiceComponent(sc3.getName()).getClusterName());
-
-    sc4.setDesiredState(State.INSTALLING);
-    Assert.assertEquals(State.INSTALLING,
-        s.getServiceComponent("HDFS_CLIENT").getDesiredState());
-
-  }
-
-  @Test
-  public void testGetAndSetConfigs() {
-    // FIXME add unit tests for configs once impl done
-    /*
-      public Map<String, Config> getDesiredConfigs();
-      public void updateDesiredConfigs(Map<String, Config> configs);
-     */
-  }
-
-
-  @Test
-  public void testConvertToResponse() throws AmbariException {
-    String serviceName = "HDFS";
-    Service s = serviceFactory.createNew(cluster, serviceName);
-    cluster.addService(s);
-    Service service = cluster.getService(serviceName);
-    Assert.assertNotNull(service);
-
-    ServiceResponse r = s.convertToResponse();
-    Assert.assertEquals(s.getName(), r.getServiceName());
-    Assert.assertEquals(s.getCluster().getClusterName(),
-        r.getClusterName());
-    Assert.assertEquals(s.getDesiredStackVersion().getStackId(),
-        r.getDesiredStackVersion());
-    Assert.assertEquals(s.getDesiredState().toString(),
-        r.getDesiredState());
-
-    service.setDesiredStackVersion(new StackId("HDP-1.1.0"));
-    service.setDesiredState(State.INSTALLING);
-    r = s.convertToResponse();
-    Assert.assertEquals(s.getName(), r.getServiceName());
-    Assert.assertEquals(s.getCluster().getClusterName(),
-        r.getClusterName());
-    Assert.assertEquals(s.getDesiredStackVersion().getStackId(),
-        r.getDesiredStackVersion());
-    Assert.assertEquals(s.getDesiredState().toString(),
-        r.getDesiredState());
-    // FIXME add checks for configs
-
-    StringBuilder sb = new StringBuilder();
-    s.debugDump(sb);
-    // TODO better checks?
-    Assert.assertFalse(sb.toString().isEmpty());
-
-  }
-
-  @Test
-  public void testDeleteServiceComponent() throws Exception {
-    Service hdfs = cluster.addService("HDFS");
-    Service mapReduce = cluster.addService("MAPREDUCE");
-
-    hdfs.persist();
-
-    ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
-    nameNode.persist();
-    ServiceComponent jobTracker = mapReduce.addServiceComponent("JOBTRACKER");
-
-    assertEquals(2, cluster.getServices().size());
-    assertEquals(1, hdfs.getServiceComponents().size());
-    assertEquals(1, mapReduce.getServiceComponents().size());
-    assertTrue(hdfs.isPersisted());
-    assertFalse(mapReduce.isPersisted());
-
-    hdfs.deleteServiceComponent("NAMENODE");
-
-    assertEquals(0, hdfs.getServiceComponents().size());
-    assertEquals(1, mapReduce.getServiceComponents().size());
-
-    mapReduce.deleteServiceComponent("JOBTRACKER");
-
-    assertEquals(0, hdfs.getServiceComponents().size());
-    assertEquals(0, mapReduce.getServiceComponents().size());
-
-  }
-
-  @Test
-  public void testCanBeRemoved() throws Exception{
-    Service service = cluster.addService("HDFS");
-
-    for (State state : State.values()) {
-      service.setDesiredState(state);
-
-      if (state.isRemovableState()) {
-        org.junit.Assert.assertTrue(service.canBeRemoved());
-      }
-      else {
-        org.junit.Assert.assertFalse(service.canBeRemoved());
-      }
-    }
-
-    ServiceComponent component = service.addServiceComponent("NAMENODE");
-    // can't remove a STARTED component
-    component.setDesiredState(State.STARTED);
-
-    for (State state : State.values()) {
-      service.setDesiredState(state);
-      // should always be false if the sub component can not be removed
-      org.junit.Assert.assertFalse(service.canBeRemoved());
-    }
-  }
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/action/JobTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/action/JobTest.java
deleted file mode 100644
index 88da9b7..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/action/JobTest.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.action;
-
-import org.apache.ambari.server.state.action.Action;
-import org.apache.ambari.server.state.action.ActionCompletedEvent;
-import org.apache.ambari.server.state.action.ActionEvent;
-import org.apache.ambari.server.state.action.ActionFailedEvent;
-import org.apache.ambari.server.state.action.ActionId;
-import org.apache.ambari.server.state.action.ActionImpl;
-import org.apache.ambari.server.state.action.ActionProgressUpdateEvent;
-import org.apache.ambari.server.state.action.ActionState;
-import org.apache.ambari.server.state.action.ActionType;
-import org.apache.ambari.server.state.action.ActionInitEvent;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class JobTest {
-
-  private Action createNewJob(long id, String jobName, long startTime) {
-    ActionId jId = new ActionId(id, new ActionType(jobName));
-    Action job = new ActionImpl(jId, startTime);
-    return job;
-  }
-
-  private Action getRunningJob(long id, String jobName, long startTime)
-      throws Exception {
-    Action job = createNewJob(id, jobName, startTime);
-    verifyProgressUpdate(job, ++startTime);
-    return job;
-  }
-
-  private Action getCompletedJob(long id, String jobName, long startTime,
-      boolean failedJob) throws Exception {
-    Action job = getRunningJob(1, "JobNameFoo", startTime);
-    completeJob(job, failedJob, ++startTime);
-    return job;
-  }
-
-  private void verifyNewJob(Action job, long startTime) {
-    Assert.assertEquals(ActionState.INIT, job.getState());
-    Assert.assertEquals(startTime, job.getStartTime());
-  }
-
-
-  @Test
-  public void testNewJob() {
-    long currentTime = System.currentTimeMillis();
-    Action job = createNewJob(1, "JobNameFoo", currentTime);
-    verifyNewJob(job, currentTime);
-  }
-
-  private void verifyProgressUpdate(Action job, long updateTime)
-      throws Exception {
-    ActionProgressUpdateEvent e = new ActionProgressUpdateEvent(job.getId(),
-        updateTime);
-    job.handleEvent(e);
-    Assert.assertEquals(ActionState.IN_PROGRESS, job.getState());
-    Assert.assertEquals(updateTime, job.getLastUpdateTime());
-  }
-
-
-  @Test
-  public void testJobProgressUpdates() throws Exception {
-    long currentTime = 1;
-    Action job = createNewJob(1, "JobNameFoo", currentTime);
-    verifyNewJob(job, currentTime);
-
-    verifyProgressUpdate(job, ++currentTime);
-    verifyProgressUpdate(job, ++currentTime);
-    verifyProgressUpdate(job, ++currentTime);
-
-  }
-
-  private void completeJob(Action job, boolean failJob, long endTime)
-      throws Exception {
-    ActionEvent e = null;
-    ActionState endState = null;
-    if (failJob) {
-      e = new ActionFailedEvent(job.getId(), endTime);
-      endState = ActionState.FAILED;
-    } else {
-      e = new ActionCompletedEvent(job.getId(), endTime);
-      endState = ActionState.COMPLETED;
-    }
-    job.handleEvent(e);
-    Assert.assertEquals(endState, job.getState());
-    Assert.assertEquals(endTime, job.getLastUpdateTime());
-    Assert.assertEquals(endTime, job.getCompletionTime());
-  }
-
-
-  @Test
-  public void testJobSuccessfulCompletion() throws Exception {
-    long currentTime = 1;
-    Action job = getRunningJob(1, "JobNameFoo", currentTime);
-    completeJob(job, false, ++currentTime);
-  }
-
-  @Test
-  public void testJobFailedCompletion() throws Exception {
-    long currentTime = 1;
-    Action job = getRunningJob(1, "JobNameFoo", currentTime);
-    completeJob(job, true, ++currentTime);
-  }
-
-  @Test
-  public void completeNewJob() throws Exception {
-    long currentTime = 1;
-    Action job = createNewJob(1, "JobNameFoo", currentTime);
-    verifyNewJob(job, currentTime);
-    completeJob(job, false, ++currentTime);
-  }
-
-  @Test
-  public void failNewJob() throws Exception {
-    long currentTime = 1;
-    Action job = createNewJob(1, "JobNameFoo", currentTime);
-    verifyNewJob(job, currentTime);
-    completeJob(job, true, ++currentTime);
-  }
-
-  @Test
-  public void reInitCompletedJob() throws Exception {
-    Action job = getCompletedJob(1, "JobNameFoo", 1, false);
-    ActionId jId = new ActionId(2, new ActionType("JobNameFoo"));
-    ActionInitEvent e = new ActionInitEvent(jId, 100);
-    job.handleEvent(e);
-    Assert.assertEquals(ActionState.INIT, job.getState());
-    Assert.assertEquals(100, job.getStartTime());
-    Assert.assertEquals(-1, job.getLastUpdateTime());
-    Assert.assertEquals(-1, job.getCompletionTime());
-    Assert.assertEquals(2, job.getId().actionId);
-  }
-
-
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
deleted file mode 100644
index 75f6ffc..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ /dev/null
@@ -1,391 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.cluster;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import static org.mockito.Mockito.*;
-
-import javax.persistence.EntityManager;
-
-import junit.framework.Assert;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.agent.AgentEnv;
-import org.apache.ambari.server.agent.AgentEnv.Directory;
-import org.apache.ambari.server.agent.DiskInfo;
-import org.apache.ambari.server.agent.HostInfo;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.controller.ClusterResponse;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.orm.entities.HostStateEntity;
-import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
-import org.apache.ambari.server.state.AgentVersion;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.HostState;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.ServiceComponent;
-import org.apache.ambari.server.state.ServiceComponentFactory;
-import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.state.ServiceComponentHostFactory;
-import org.apache.ambari.server.state.ServiceFactory;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-import org.apache.ambari.server.state.host.HostHealthyHeartbeatEvent;
-import org.apache.ambari.server.state.host.HostRegistrationRequestEvent;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.gson.Gson;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-
-public class ClusterTest {
-  private static final Logger LOG = LoggerFactory.getLogger(ClusterTest.class);
-  
-  private Clusters clusters;
-  private Cluster c1;
-  private Injector injector;
-  private ServiceFactory serviceFactory;
-  private ServiceComponentFactory serviceComponentFactory;
-  private ServiceComponentHostFactory serviceComponentHostFactory;
-  private AmbariMetaInfo metaInfo;
-
-  @Before
-  public void setup() throws Exception {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    clusters = injector.getInstance(Clusters.class);
-    serviceFactory = injector.getInstance(ServiceFactory.class);
-    serviceComponentFactory = injector.getInstance(
-        ServiceComponentFactory.class);
-    serviceComponentHostFactory = injector.getInstance(
-        ServiceComponentHostFactory.class);
-    metaInfo = injector.getInstance(AmbariMetaInfo.class);
-    metaInfo.init();
-    clusters.addCluster("c1");
-    c1 = clusters.getCluster("c1");
-    Assert.assertEquals("c1", c1.getClusterName());
-    Assert.assertEquals(1, c1.getClusterId());
-    clusters.addHost("h1");
-    Host host = clusters.getHost("h1");
-    host.setIPv4("ipv4");
-    host.setIPv6("ipv6");
-    host.setOsType("centos5");
-    host.persist();
-    c1.setDesiredStackVersion(new StackId("HDP-0.1"));
-    clusters.mapHostToCluster("h1", "c1");
-  }
-
-  @After
-  public void teardown() {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  @Test
-  public void testAddHost() throws AmbariException {
-    clusters.addHost("h2");
-
-    try {
-      clusters.addHost("h2");
-      fail("Duplicate add should fail");
-    }
-    catch (AmbariException e) {
-      // Expected
-    }
-
-  }
-
-
-  @Test
-  public void testGetHostState() throws AmbariException {
-    Assert.assertEquals(HostState.INIT, clusters.getHost("h1").getState());
-  }
-
-  @Test
-  public void testSetHostState() throws AmbariException {
-    clusters.getHost("h1").setState(HostState.HEARTBEAT_LOST);
-    Assert.assertEquals(HostState.HEARTBEAT_LOST,
-        clusters.getHost("h1").getState());
-  }
-
-  @Test
-  public void testHostEvent() throws AmbariException,
-      InvalidStateTransitionException {
-    HostInfo hostInfo = new HostInfo();
-    hostInfo.setHostName("h1");
-    hostInfo.setInterfaces("fip_4");
-    hostInfo.setArchitecture("os_arch");
-    hostInfo.setOS("os_type");
-    hostInfo.setMemoryTotal(10);
-    hostInfo.setMemorySize(100);
-    hostInfo.setProcessorCount(10);
-    List<DiskInfo> mounts = new ArrayList<DiskInfo>();
-    mounts.add(new DiskInfo("/dev/sda", "/mnt/disk1",
-        "5000000", "4000000", "10%", "size", "fstype"));
-    hostInfo.setMounts(mounts);
-
-    AgentEnv agentEnv = new AgentEnv();
-    
-    Directory dir1 = new Directory();
-    dir1.setName("/etc/hadoop");
-    dir1.setType("not_exist");
-    Directory dir2 = new Directory();
-    dir2.setName("/var/log/hadoop");
-    dir2.setType("not_exist");
-    agentEnv.setPaths(new Directory[] { dir1, dir2 });
-    
-    
-    AgentVersion agentVersion = new AgentVersion("0.0.x");
-    long currentTime = 1001;
-
-    clusters.getHost("h1").handleEvent(new HostRegistrationRequestEvent(
-        "h1", agentVersion, currentTime, hostInfo, agentEnv));
-
-    Assert.assertEquals(HostState.WAITING_FOR_HOST_STATUS_UPDATES,
-        clusters.getHost("h1").getState());
-
-    clusters.getHost("h1").setState(HostState.HEARTBEAT_LOST);
-
-    try {
-      clusters.getHost("h1").handleEvent(
-          new HostHealthyHeartbeatEvent("h1", currentTime, null));
-      fail("Exception should be thrown on invalid event");
-    }
-    catch (InvalidStateTransitionException e) {
-      // Expected
-    }
-
-  }
-
-  @Test
-  public void testBasicClusterSetup() throws AmbariException {
-    String clusterName = "c2";
-
-    try {
-      clusters.getCluster(clusterName);
-      fail("Exception expected for invalid cluster");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    clusters.addCluster(clusterName);
-    Cluster c2 = clusters.getCluster(clusterName);
-
-    Assert.assertNotNull(c2);
-
-    Assert.assertEquals(clusterName, c2.getClusterName());
-
-    c2.setClusterName("foo2");
-    Assert.assertEquals("foo2", c2.getClusterName());
-
-    Assert.assertNotNull(c2.getDesiredStackVersion());
-    Assert.assertEquals("", c2.getDesiredStackVersion().getStackId());
-
-    StackId stackVersion = new StackId("HDP-1.0");
-    c2.setDesiredStackVersion(stackVersion);
-    Assert.assertEquals("HDP-1.0", c2.getDesiredStackVersion().getStackId());
-  }
-
-  @Test
-  public void testAddAndGetServices() throws AmbariException {
-    // TODO write unit tests for
-    // public void addService(Service service) throws AmbariException;
-    // public Service getService(String serviceName) throws AmbariException;
-    // public Map<String, Service> getServices();
-
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
-    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
-
-    c1.addService(s1);
-    c1.addService(s2);
-
-    s1.persist();
-    s2.persist();
-
-    Service s3 = serviceFactory.createNew(c1, "MAPREDUCE");
-
-    try {
-      c1.addService(s3);
-      fail("Expected error on adding dup service");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    Service s = c1.getService("HDFS");
-    Assert.assertNotNull(s);
-    Assert.assertEquals("HDFS", s.getName());
-    Assert.assertEquals(c1.getClusterId(), s.getClusterId());
-
-    try {
-      c1.getService("HBASE");
-      fail("Expected error for unknown service");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    Map<String, Service> services = c1.getServices();
-    Assert.assertEquals(2, services.size());
-    Assert.assertTrue(services.containsKey("HDFS"));
-    Assert.assertTrue(services.containsKey("MAPREDUCE"));
-  }
-
-
-  @Test
-  public void testGetServiceComponentHosts() throws AmbariException {
-    // TODO write unit tests
-    // public List<ServiceComponentHost> getServiceComponentHosts(String hostname);
-
-    Service s = serviceFactory.createNew(c1, "HDFS");
-    c1.addService(s);
-    s.persist();
-    ServiceComponent sc = serviceComponentFactory.createNew(s, "NAMENODE");
-    s.addServiceComponent(sc);
-    sc.persist();
-    ServiceComponentHost sch =
-        serviceComponentHostFactory.createNew(sc, "h1", false);
-    sc.addServiceComponentHost(sch);
-    sch.persist();
-
-    List<ServiceComponentHost> scHosts = c1.getServiceComponentHosts("h1");
-    Assert.assertEquals(1, scHosts.size());
-  }
-
-
-  @Test
-  public void testGetAndSetConfigs() {
-    // FIXME write unit tests
-    // public Map<String, Config> getConfigsByType(String configType);
-    // public Config getConfig(String configType, String versionTag);
-    // public void addConfig(Config config);
-  }
-  
-  public ClusterEntity createDummyData() {
-    ClusterEntity clusterEntity = new ClusterEntity();
-    clusterEntity.setClusterName("test_cluster1");
-    clusterEntity.setClusterInfo("test_cluster_info1");
-
-    HostEntity host1 = new HostEntity();
-    HostEntity host2 = new HostEntity();
-    HostEntity host3 = new HostEntity();
-
-    host1.setHostName("test_host1");
-    host2.setHostName("test_host2");
-    host3.setHostName("test_host3");
-    host1.setIpv4("192.168.0.1");
-    host2.setIpv4("192.168.0.2");
-    host3.setIpv4("192.168.0.3");
-
-    List<HostEntity> hostEntities = new ArrayList<HostEntity>();
-    hostEntities.add(host1);
-    hostEntities.add(host2);
-
-    clusterEntity.setHostEntities(hostEntities);
-    clusterEntity.setClusterConfigEntities(Collections.EMPTY_LIST);
-    //both sides of relation should be set when modifying in runtime
-    host1.setClusterEntities(Arrays.asList(clusterEntity));
-    host2.setClusterEntities(Arrays.asList(clusterEntity));
-
-    HostStateEntity hostStateEntity1 = new HostStateEntity();
-    hostStateEntity1.setCurrentState(HostState.HEARTBEAT_LOST);
-    hostStateEntity1.setHostEntity(host1);
-    HostStateEntity hostStateEntity2 = new HostStateEntity();
-    hostStateEntity2.setCurrentState(HostState.HEALTHY);
-    hostStateEntity2.setHostEntity(host2);
-    host1.setHostStateEntity(hostStateEntity1);
-    host2.setHostStateEntity(hostStateEntity2);
-
-    ClusterServiceEntity clusterServiceEntity = new ClusterServiceEntity();
-    clusterServiceEntity.setServiceName("HDFS");
-    clusterServiceEntity.setClusterEntity(clusterEntity);
-    clusterServiceEntity.setServiceComponentDesiredStateEntities(
-        Collections.EMPTY_LIST);
-    clusterServiceEntity.setServiceConfigMappings(Collections.EMPTY_LIST);
-    ServiceDesiredStateEntity stateEntity = mock(ServiceDesiredStateEntity.class);
-    Gson gson = new Gson();
-    when(stateEntity.getDesiredStackVersion()).thenReturn(gson.toJson(new StackId("HDP-0.1"),
-        StackId.class));
-    clusterServiceEntity.setServiceDesiredStateEntity(stateEntity);
-    List<ClusterServiceEntity> clusterServiceEntities = new ArrayList<ClusterServiceEntity>();
-    clusterServiceEntities.add(clusterServiceEntity);
-    clusterEntity.setClusterServiceEntities(clusterServiceEntities);
-    return clusterEntity;
-  }
-  
-  @Test
-  public void testClusterRecovery() throws AmbariException {
-    ClusterEntity entity = createDummyData();
-    ClusterImpl cluster = new ClusterImpl(entity, injector);
-    Service service = cluster.getService("HDFS");
-    /* make sure the services are recovered */
-    Assert.assertEquals("HDFS",service.getName());
-    Map<String, Service> services = cluster.getServices();
-    Assert.assertNotNull(services.get("HDFS"));
-  }
-  
-
-  @Test
-  public void testConvertToResponse() throws AmbariException {
-    ClusterResponse r = c1.convertToResponse();
-    Assert.assertEquals(c1.getClusterId(), r.getClusterId().longValue());
-    Assert.assertEquals(c1.getClusterName(), r.getClusterName());
-    Assert.assertEquals(1, r.getHostNames().size());
-
-    // TODO write unit tests for debug dump
-    StringBuilder sb = new StringBuilder();
-    c1.debugDump(sb);
-  }
-
-  @Test
-  public void testDeleteService() throws Exception {
-    c1.addService("MAPREDUCE").persist();
-
-    Service hdfs = c1.addService("HDFS");
-    hdfs.persist();
-    ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
-    nameNode.persist();
-
-
-    assertEquals(2, c1.getServices().size());
-    assertEquals(2, injector.getProvider(EntityManager.class).get().
-        createQuery("SELECT service FROM ClusterServiceEntity service").getResultList().size());
-
-    c1.deleteService("HDFS");
-
-    assertEquals(1, c1.getServices().size());
-    assertEquals(1, injector.getProvider(EntityManager.class).get().
-        createQuery("SELECT service FROM ClusterServiceEntity service").getResultList().size());
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
deleted file mode 100644
index bf73c21..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
+++ /dev/null
@@ -1,354 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.cluster;
-
-import java.util.*;
-
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-import junit.framework.Assert;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.ClusterNotFoundException;
-import org.apache.ambari.server.DuplicateResourceException;
-import org.apache.ambari.server.HostNotFoundException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.*;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntityPK;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntityPK;
-import org.apache.ambari.server.state.*;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import javax.persistence.EntityManager;
-
-import static org.junit.Assert.*;
-
-public class ClustersTest {
-
-  private Clusters clusters;
-  private Injector injector;
-  @Inject
-  private AmbariMetaInfo metaInfo;
-
-  @Before
-  public void setup() throws Exception {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    clusters = injector.getInstance(Clusters.class);
-    injector.injectMembers(this);
-    metaInfo.init();
-  }
-
-  @After
-  public void teardown() {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  @Test
-  public void testGetInvalidCluster() throws AmbariException {
-    try {
-      clusters.getCluster("foo");
-      fail("Exception should be thrown on invalid get");
-    }
-    catch (ClusterNotFoundException e) {
-      // Expected
-    }
-
-  }
-
-  @Test
-  public void testAddAndGetCluster() throws AmbariException {
-
-    String c1 = "foo";
-    String c2 = "foo";
-    clusters.addCluster(c1);
-
-    try {
-      clusters.addCluster(c1);
-      fail("Exception should be thrown on invalid add");
-    }
-    catch (AmbariException e) {
-      // Expected
-    }
-
-    try {
-      clusters.addCluster(c2);
-      fail("Exception should be thrown on invalid add");
-    }
-    catch (AmbariException e) {
-      // Expected
-    }
-
-    c2 = "foo2";
-    clusters.addCluster(c2);
-
-    Assert.assertNotNull(clusters.getCluster(c1));
-    Assert.assertNotNull(clusters.getCluster(c2));
-
-    Assert.assertEquals(c1, clusters.getCluster(c1).getClusterName());
-    Assert.assertEquals(c2, clusters.getCluster(c2).getClusterName());
-
-    Map<String, Cluster> verifyClusters = clusters.getClusters();
-    Assert.assertTrue(verifyClusters.containsKey(c1));
-    Assert.assertTrue(verifyClusters.containsKey(c2));
-    Assert.assertNotNull(verifyClusters.get(c1));
-    Assert.assertNotNull(verifyClusters.get(c2));
-
-    Cluster c = clusters.getCluster(c1);
-    c.setClusterName("foobar");
-    long cId = c.getClusterId();
-
-    Cluster changed = clusters.getCluster("foobar");
-    Assert.assertNotNull(changed);
-    Assert.assertEquals(cId, changed.getClusterId());
-
-    Assert.assertEquals("foobar",
-        clusters.getClusterById(cId).getClusterName());
-
-  }
-
-
-  @Test
-  public void testAddAndGetHost() throws AmbariException {
-    String h1 = "h1";
-    String h2 = "h2";
-    String h3 = "h3";
-
-    clusters.addHost(h1);
-
-    try {
-      clusters.addHost(h1);
-      fail("Expected exception on duplicate host entry");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    clusters.addHost(h2);
-    clusters.addHost(h3);
-
-    List<Host> hosts = clusters.getHosts();
-    Assert.assertEquals(3, hosts.size());
-
-    Assert.assertNotNull(clusters.getHost(h1));
-    Assert.assertNotNull(clusters.getHost(h2));
-    Assert.assertNotNull(clusters.getHost(h3));
-
-    Host h = clusters.getHost(h2);
-    Assert.assertNotNull(h);
-
-    try {
-      clusters.getHost("foo");
-      fail("Expected error for unknown host");
-    } catch (HostNotFoundException e) {
-      // Expected
-    }
-
-  }
-
-  @Test
-  public void testClusterHostMapping() throws AmbariException {
-    String c1 = "c1";
-    String c2 = "c2";
-    String h1 = "h1";
-    String h2 = "h2";
-    String h3 = "h3";
-
-    try {
-      clusters.mapHostToCluster(h1, c1);
-      fail("Expected exception for invalid cluster/host");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    clusters.addCluster(c1);
-    clusters.addCluster(c2);
-    clusters.getCluster(c1).setDesiredStackVersion(new StackId("HDP-0.1"));
-    clusters.getCluster(c2).setDesiredStackVersion(new StackId("HDP-0.1"));
-    Assert.assertNotNull(clusters.getCluster(c1));
-    Assert.assertNotNull(clusters.getCluster(c2));
-    try {
-      clusters.mapHostToCluster(h1, c1);
-      fail("Expected exception for invalid host");
-    } catch (Exception e) {
-      // Expected
-    }
-
-    clusters.addHost(h1);
-    clusters.addHost(h2);
-    clusters.addHost(h3);
-    Assert.assertNotNull(clusters.getHost(h1));
-    clusters.getHost(h1).setOsType("redhat6");
-    clusters.getHost(h2).setOsType("centos5");
-    clusters.getHost(h3).setOsType("centos6");
-    clusters.getHost(h1).persist();
-    clusters.getHost(h2).persist();
-    clusters.getHost(h3).persist();
-
-    Set<Cluster> c = clusters.getClustersForHost(h3);
-    Assert.assertEquals(0, c.size());
-
-    clusters.mapHostToCluster(h1, c1);
-    clusters.mapHostToCluster(h2, c1);
-    
-    try {
-      clusters.mapHostToCluster(h1, c1);
-      fail("Expected exception for duplicate");
-    } catch (DuplicateResourceException e) {
-      // expected
-    }
-    
-    /* make sure 2 host mapping to same cluster are the same cluster objects */
-    
-    Cluster c3 = (Cluster) clusters.getClustersForHost(h1).toArray()[0];
-    Cluster c4 = (Cluster) clusters.getClustersForHost(h2).toArray()[0];
-    
-    Assert.assertEquals(c3, c4);
-    Set<String> hostnames = new HashSet<String>();
-    hostnames.add(h1);
-    hostnames.add(h2);
-
-    clusters.mapHostsToCluster(hostnames, c2);
-
-    c = clusters.getClustersForHost(h1);
-    Assert.assertEquals(2, c.size());
-    
-    c = clusters.getClustersForHost(h2);
-    Assert.assertEquals(2, c.size());
-
-
-    // TODO write test for getHostsForCluster
-    Map<String, Host> hostsForC1 = clusters.getHostsForCluster(c1);
-    Assert.assertEquals(2, hostsForC1.size());
-    Assert.assertTrue(hostsForC1.containsKey(h1));
-    Assert.assertTrue(hostsForC1.containsKey(h2));
-    Assert.assertNotNull(hostsForC1.get(h1));
-    Assert.assertNotNull(hostsForC1.get(h2));
-  }
-
-  @Test
-  public void testDebugDump() throws AmbariException {
-    String c1 = "c1";
-    String c2 = "c2";
-    String h1 = "h1";
-    String h2 = "h2";
-    String h3 = "h3";
-    clusters.addCluster(c1);
-    clusters.addCluster(c2);
-    clusters.getCluster(c1).setDesiredStackVersion(new StackId("HDP-0.1"));
-    clusters.getCluster(c2).setDesiredStackVersion(new StackId("HDP-0.1"));
-    clusters.addHost(h1);
-    clusters.addHost(h2);
-    clusters.addHost(h3);
-    clusters.getHost(h1).setOsType("redhat6");
-    clusters.getHost(h2).setOsType("centos5");
-    clusters.getHost(h3).setOsType("centos6");
-    clusters.getHost(h1).persist();
-    clusters.getHost(h2).persist();
-    clusters.getHost(h3).persist();
-    clusters.mapHostToCluster(h1, c1);
-    clusters.mapHostToCluster(h2, c1);
-
-    StringBuilder sb = new StringBuilder();
-    clusters.debugDump(sb);
-    // TODO verify dump output?
-  }
-
-  @Test
-  public void testDeleteCluster() throws Exception {
-    String c1 = "c1";
-    final String h1 = "h1";
-    final String h2 = "h2";
-
-    clusters.addCluster(c1);
-
-    Cluster cluster = clusters.getCluster(c1);
-    cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
-
-    Config config = injector.getInstance(ConfigFactory.class).createNew(cluster, "t1", new HashMap<String, String>() {{
-      put("prop1", "val1");
-    }});
-    config.setVersionTag("1");
-    config.persist();
-
-    clusters.addHost(h1);
-    clusters.addHost(h2);
-
-    Host host1 = clusters.getHost(h1);
-    host1.setOsType("centos5");
-    Host host2 = clusters.getHost(h2);
-    host2.setOsType("centos5");
-    host1.persist();
-    host2.persist();
-
-    clusters.mapHostsToCluster(new HashSet<String>() {
-      {
-        addAll(Arrays.asList(h1, h2));
-      }
-    }, c1);
-
-
-    Service hdfs = cluster.addService("HDFS");
-    hdfs.persist();
-
-    assertNotNull(injector.getInstance(ClusterServiceDAO.class).findByClusterAndServiceNames(c1, "HDFS"));
-
-    ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
-    nameNode.persist();
-    ServiceComponent dataNode = hdfs.addServiceComponent("DATANODE");
-    dataNode.persist();
-
-    ServiceComponentHost nameNodeHost = nameNode.addServiceComponentHost(h1);
-    nameNodeHost.persist();
-
-    ServiceComponentHost dataNodeHost = dataNode.addServiceComponentHost(h2);
-    dataNodeHost.persist();
-
-    HostComponentStateEntityPK hkspk = new HostComponentStateEntityPK();
-    HostComponentDesiredStateEntityPK hkdspk = new HostComponentDesiredStateEntityPK();
-
-    hkspk.setClusterId(nameNodeHost.getClusterId());
-    hkspk.setHostName(nameNodeHost.getHostName());
-    hkspk.setServiceName(nameNodeHost.getServiceName());
-    hkspk.setComponentName(nameNodeHost.getServiceComponentName());
-
-    hkdspk.setClusterId(nameNodeHost.getClusterId());
-    hkdspk.setHostName(nameNodeHost.getHostName());
-    hkdspk.setServiceName(nameNodeHost.getServiceName());
-    hkdspk.setComponentName(nameNodeHost.getServiceComponentName());
-
-    assertNotNull(injector.getInstance(HostComponentStateDAO.class).findByPK(hkspk));
-    assertNotNull(injector.getInstance(HostComponentDesiredStateDAO.class).findByPK(hkdspk));
-    assertEquals(1, injector.getProvider(EntityManager.class).get().createQuery("SELECT config FROM ClusterConfigEntity config").getResultList().size());
-
-    clusters.deleteCluster(c1);
-
-    assertEquals(2, injector.getInstance(HostDAO.class).findAll().size());
-    assertNull(injector.getInstance(HostComponentStateDAO.class).findByPK(hkspk));
-    assertNull(injector.getInstance(HostComponentDesiredStateDAO.class).findByPK(hkdspk));
-    //configs are removed implicitly by cascade operation
-    assertEquals(0, injector.getProvider(EntityManager.class).get().createQuery("SELECT config FROM ClusterConfigEntity config").getResultList().size());
-
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java
deleted file mode 100644
index 947a380..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/host/HostTest.java
+++ /dev/null
@@ -1,341 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.state.host;
-
-import static org.junit.Assert.fail;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.mock;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.agent.ActionQueue;
-import org.apache.ambari.server.agent.AgentEnv;
-import org.apache.ambari.server.agent.DiskInfo;
-import org.apache.ambari.server.agent.HeartBeatHandler;
-import org.apache.ambari.server.agent.HostInfo;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.HostDAO;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.state.AgentVersion;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.HostHealthStatus;
-import org.apache.ambari.server.state.HostHealthStatus.HealthStatus;
-import org.apache.ambari.server.state.HostState;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-
-public class HostTest {
-
-  private Injector injector;
-  private Clusters clusters;
-  private HostDAO hostDAO;
-  private static Log LOG = LogFactory.getLog(HostTest.class);
-
-  @Before
-   public void setup() throws AmbariException{
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    clusters = injector.getInstance(Clusters.class);
-    hostDAO = injector.getInstance(HostDAO.class);
-  }
-
-  @After
-  public void teardown() throws AmbariException {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  @Test
-  public void testHostInfoImport() throws AmbariException{
-    HostInfo info = new HostInfo();
-    info.setMemorySize(100);
-    info.setPhysicalProcessorCount(10);
-    List<DiskInfo> mounts = new ArrayList<DiskInfo>();
-    mounts.add(new DiskInfo("/dev/sda", "/mnt/disk1",
-        "5000000", "4000000", "10%", "size", "fstype"));
-    info.setMounts(mounts);
-
-    info.setHostName("foo");
-    info.setInterfaces("fip_4");
-    info.setArchitecture("os_arch");
-    info.setOS("os_type");
-    info.setMemoryTotal(10);
-
-    clusters.addHost("foo");
-    Host host = clusters.getHost("foo");
-
-    host.importHostInfo(info);
-
-    Assert.assertEquals(info.getHostName(), host.getHostName());
-    Assert.assertEquals(info.getFreeMemory(), host.getAvailableMemBytes());
-    Assert.assertEquals(info.getMemoryTotal(), host.getTotalMemBytes());
-    Assert.assertEquals(info.getPhysicalProcessorCount(), host.getCpuCount());
-    Assert.assertEquals(info.getMounts().size(), host.getDisksInfo().size());
-    Assert.assertEquals(info.getArchitecture(), host.getOsArch());
-    Assert.assertEquals(info.getOS(), host.getOsType());
-  }
-
-  private void registerHost(Host host) throws Exception {
-    registerHost(host, true);
-  }
-  
-  @Test
-  public void testHostOs() throws Exception {
-    Clusters clusters = mock(Clusters.class);
-    ActionQueue queue = mock(ActionQueue.class);
-    ActionManager manager = mock(ActionManager.class);
-    Injector injector = mock(Injector.class);
-    doNothing().when(injector).injectMembers(any());
-    HeartBeatHandler handler = new HeartBeatHandler(clusters, queue, manager, injector);
-    String os = handler.getOsType("RedHat", "6.1");
-    Assert.assertEquals("redhat6", os);
-    os = handler.getOsType("RedHat", "6");
-    Assert.assertEquals("redhat6", os);
-    os = handler.getOsType("RedHat6","");
-    Assert.assertEquals("redhat6", os);
-    
-  }
-
-  private void registerHost(Host host, boolean firstReg) throws Exception {
-    HostInfo info = new HostInfo();
-    info.setMemorySize(100);
-    info.setProcessorCount(10);
-    List<DiskInfo> mounts = new ArrayList<DiskInfo>();
-    mounts.add(new DiskInfo("/dev/sda", "/mnt/disk1",
-        "5000000", "4000000", "10%", "size", "fstype"));
-    info.setMounts(mounts);
-
-    info.setHostName("foo");
-    info.setInterfaces("fip_4");
-    info.setArchitecture("os_arch");
-    info.setOS("os_type");
-    info.setMemoryTotal(10);
-
-    AgentVersion agentVersion = null;
-    long currentTime = System.currentTimeMillis();
-    
-    AgentEnv agentEnv = new AgentEnv();
-
-    HostRegistrationRequestEvent e =
-        new HostRegistrationRequestEvent("foo", agentVersion, currentTime,
-            info, agentEnv);
-    if (!firstReg) {
-      Assert.assertTrue(host.isPersisted());
-    }
-    host.handleEvent(e);
-    Assert.assertEquals(currentTime, host.getLastRegistrationTime());
-    
-    Assert.assertNotNull(host.getLastAgentEnv());
-
-    HostEntity entity = hostDAO.findByName(host.getHostName());
-    Assert.assertEquals(currentTime,
-        entity.getLastRegistrationTime().longValue());
-    Assert.assertEquals("os_arch", entity.getOsArch());
-    Assert.assertEquals("os_type", entity.getOsType());
-    Assert.assertEquals(10, entity.getTotalMem().longValue());
-  }
-
-  private void ensureHostUpdatesReceived(Host host) throws Exception {
-    HostStatusUpdatesReceivedEvent e =
-        new HostStatusUpdatesReceivedEvent(host.getHostName(), 1);
-    host.handleEvent(e);
-  }
-
-  private void verifyHostState(Host host, HostState state) {
-    Assert.assertEquals(state, host.getState());
-  }
-
-  private void sendHealthyHeartbeat(Host host, long counter)
-      throws Exception {
-    HostHealthyHeartbeatEvent e = new HostHealthyHeartbeatEvent(
-        host.getHostName(), counter, null);
-    host.handleEvent(e);
-  }
-
-  private void sendUnhealthyHeartbeat(Host host, long counter)
-      throws Exception {
-    HostHealthStatus healthStatus = new HostHealthStatus(HealthStatus.UNHEALTHY,
-        "Unhealthy server");
-    HostUnhealthyHeartbeatEvent e = new HostUnhealthyHeartbeatEvent(
-        host.getHostName(), counter, healthStatus);
-    host.handleEvent(e);
-  }
-
-  private void timeoutHost(Host host) throws Exception {
-    HostHeartbeatLostEvent e = new HostHeartbeatLostEvent(
-        host.getHostName());
-    host.handleEvent(e);
-  }
-
-  @Test
-  public void testHostFSMInit() throws AmbariException{
-    clusters.addHost("foo");
-    Host host = clusters.getHost("foo");
-    verifyHostState(host, HostState.INIT);
-  }
-
-  @Test
-  public void testHostRegistrationFlow() throws Exception {
-    clusters.addHost("foo");
-    Host host = clusters.getHost("foo");
-    registerHost(host);
-    verifyHostState(host, HostState.WAITING_FOR_HOST_STATUS_UPDATES);
-
-    boolean exceptionThrown = false;
-    try {
-      registerHost(host);
-    } catch (Exception e) {
-      // Expected
-      exceptionThrown = true;
-    }
-    if (!exceptionThrown) {
-      fail("Expected invalid transition exception to be thrown");
-    }
-
-    ensureHostUpdatesReceived(host);
-    verifyHostState(host, HostState.HEALTHY);
-
-    exceptionThrown = false;
-    try {
-      ensureHostUpdatesReceived(host);
-    } catch (Exception e) {
-      // Expected
-      exceptionThrown = true;
-    }
-    if (!exceptionThrown) {
-      fail("Expected invalid transition exception to be thrown");
-    }
-  }
-
-  @Test
-  public void testHostHeartbeatFlow() throws Exception {
-    clusters.addHost("foo");
-    Host host = clusters.getHost("foo");
-    registerHost(host);
-    ensureHostUpdatesReceived(host);
-
-    // TODO need to verify audit logs generated
-    // TODO need to verify health status updated properly
-
-    long counter = 0;
-    sendHealthyHeartbeat(host, ++counter);
-    verifyHostState(host, HostState.HEALTHY);
-    Assert.assertEquals(counter, host.getLastHeartbeatTime());
-
-    sendHealthyHeartbeat(host, ++counter);
-    verifyHostState(host, HostState.HEALTHY);
-    Assert.assertEquals(counter, host.getLastHeartbeatTime());
-    Assert.assertEquals(HealthStatus.HEALTHY,
-        host.getHealthStatus().getHealthStatus());
-
-    sendUnhealthyHeartbeat(host, ++counter);
-    verifyHostState(host, HostState.UNHEALTHY);
-    Assert.assertEquals(counter, host.getLastHeartbeatTime());
-    Assert.assertEquals(HealthStatus.UNHEALTHY,
-        host.getHealthStatus().getHealthStatus());
-
-    sendUnhealthyHeartbeat(host, ++counter);
-    verifyHostState(host, HostState.UNHEALTHY);
-    Assert.assertEquals(counter, host.getLastHeartbeatTime());
-    Assert.assertEquals(HealthStatus.UNHEALTHY,
-        host.getHealthStatus().getHealthStatus());
-
-    sendHealthyHeartbeat(host, ++counter);
-    verifyHostState(host, HostState.HEALTHY);
-    Assert.assertEquals(counter, host.getLastHeartbeatTime());
-    Assert.assertEquals(HealthStatus.HEALTHY,
-        host.getHealthStatus().getHealthStatus());
-
-    timeoutHost(host);
-    verifyHostState(host, HostState.HEARTBEAT_LOST);
-    Assert.assertEquals(counter, host.getLastHeartbeatTime());
-    Assert.assertEquals(HealthStatus.UNKNOWN,
-        host.getHealthStatus().getHealthStatus());
-
-    timeoutHost(host);
-    verifyHostState(host, HostState.HEARTBEAT_LOST);
-    Assert.assertEquals(counter, host.getLastHeartbeatTime());
-    Assert.assertEquals(HealthStatus.UNKNOWN,
-        host.getHealthStatus().getHealthStatus());
-
-    try {
-      sendUnhealthyHeartbeat(host, ++counter);
-      fail("Invalid event should have triggered an exception");
-    } catch (Exception e) {
-      // Expected
-    }
-    verifyHostState(host, HostState.HEARTBEAT_LOST);
-
-    try {
-      sendHealthyHeartbeat(host, ++counter);
-      fail("Invalid event should have triggered an exception");
-    } catch (Exception e) {
-      // Expected
-    }
-    verifyHostState(host, HostState.HEARTBEAT_LOST);
-  }
-
-  @Test
-  public void testHostRegistrationsInAnyState() throws Exception {
-    clusters.addHost("foo");
-    Host host = clusters.getHost("foo");
-    host.setIPv4("ipv4");
-    host.setIPv6("ipv6");
-
-    long counter = 0;
-
-    registerHost(host);
-
-    ensureHostUpdatesReceived(host);
-    registerHost(host, false);
-
-    ensureHostUpdatesReceived(host);
-    sendHealthyHeartbeat(host, ++counter);
-    verifyHostState(host, HostState.HEALTHY);
-    registerHost(host, false);
-    ensureHostUpdatesReceived(host);
-
-    sendUnhealthyHeartbeat(host, ++counter);
-    verifyHostState(host, HostState.UNHEALTHY);
-    registerHost(host, false);
-    ensureHostUpdatesReceived(host);
-
-    timeoutHost(host);
-    verifyHostState(host, HostState.HEARTBEAT_LOST);
-    registerHost(host, false);
-    ensureHostUpdatesReceived(host);
-
-    host.setState(HostState.INIT);
-    registerHost(host, false);
-
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
deleted file mode 100644
index 81e8691..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
+++ /dev/null
@@ -1,676 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.ambari.server.state.svccomphost;
-
-import static org.junit.Assert.fail;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import com.google.inject.Provider;
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.ServiceComponentNotFoundException;
-import org.apache.ambari.server.ServiceNotFoundException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.controller.ServiceComponentHostResponse;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
-import org.apache.ambari.server.orm.entities.HostComponentConfigMappingEntity;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntityPK;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntityPK;
-import org.apache.ambari.server.state.*;
-import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-
-import javax.persistence.EntityManager;
-
-public class ServiceComponentHostTest {
-  private static Logger LOG = LoggerFactory.getLogger(ServiceComponentHostTest.class);
-  @Inject
-  private Injector injector;
-  @Inject
-  private Clusters clusters;
-  @Inject
-  private ServiceFactory serviceFactory;
-  @Inject
-  private ServiceComponentFactory serviceComponentFactory;
-  @Inject
-  private ServiceComponentHostFactory serviceComponentHostFactory;
-  @Inject
-  private AmbariMetaInfo metaInfo;
-  @Inject
-  private HostComponentStateDAO hostComponentStateDAO;
-  @Inject
-  private HostComponentDesiredStateDAO hostComponentDesiredStateDAO;
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-  @Inject
-  private ConfigFactory configFactory;
-
-  @Before
-  public void setup() throws Exception {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    injector.injectMembers(this);
-    clusters.addCluster("C1");
-    clusters.addHost("h1");
-    clusters.getHost("h1").setOsType("centos5");
-    clusters.getHost("h1").persist();
-    clusters.getCluster("C1").setDesiredStackVersion(
-        new StackId("HDP-0.1"));
-    metaInfo.init();
-    clusters.mapHostToCluster("h1","C1");
-  }
-
-  @After
-  public void teardown() {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  private ServiceComponentHost createNewServiceComponentHost(
-      String svc,
-      String svcComponent,
-      String hostName, boolean isClient) throws AmbariException{
-    Cluster c = clusters.getCluster("C1");
-    Service s = null;
-
-    try {
-      s = c.getService(svc);
-    } catch (ServiceNotFoundException e) {
-      LOG.debug("Calling service create"
-          + ", serviceName=" + svc);
-      s = serviceFactory.createNew(c, svc);
-      c.addService(s);
-      s.persist();
-    }
-
-    ServiceComponent sc = null;
-    try {
-      sc = s.getServiceComponent(svcComponent);
-    } catch (ServiceComponentNotFoundException e) {
-      sc = serviceComponentFactory.createNew(s, svcComponent);
-      s.addServiceComponent(sc);
-      sc.persist();
-    }
-
-    ServiceComponentHost impl = serviceComponentHostFactory.createNew(
-        sc, hostName, isClient);
-    impl.persist();
-    Assert.assertEquals(State.INIT,
-        impl.getState());
-    Assert.assertEquals(State.INIT,
-        impl.getDesiredState());
-    Assert.assertEquals("C1", impl.getClusterName());
-    Assert.assertEquals(c.getClusterId(), impl.getClusterId());
-    Assert.assertEquals(s.getName(), impl.getServiceName());
-    Assert.assertEquals(sc.getName(), impl.getServiceComponentName());
-    Assert.assertEquals(hostName, impl.getHostName());
-    Assert.assertFalse(
-        impl.getDesiredStackVersion().getStackId().isEmpty());
-    Assert.assertTrue(impl.getStackVersion().getStackId().isEmpty());
-
-    return impl;
-  }
-
-  @Test
-  public void testNewServiceComponentHost() throws AmbariException{
-    createNewServiceComponentHost("HDFS", "NAMENODE", "h1", false);
-    createNewServiceComponentHost("HDFS", "HDFS_CLIENT", "h1", true);
-  }
-
-  private ServiceComponentHostEvent createEvent(ServiceComponentHostImpl impl,
-      long timestamp, ServiceComponentHostEventType eventType)
-      throws AmbariException {
-    Map<String, String> configs = new HashMap<String, String>();
-
-    Cluster c = clusters.getCluster("C1");
-    if (c.getDesiredConfig("time", "" + timestamp) == null) {
-      Config config = configFactory.createNew (c, "time",
-          new HashMap<String, String>());
-      config.setVersionTag("" + timestamp);
-      c.addDesiredConfig(config);
-      config.persist();
-    }
-
-    configs.put("time", "" + timestamp);
-    switch (eventType) {
-      case HOST_SVCCOMP_INSTALL:
-        return new ServiceComponentHostInstallEvent(
-            impl.getServiceComponentName(), impl.getHostName(), timestamp,
-            impl.getDesiredStackVersion().getStackId());
-      case HOST_SVCCOMP_START:
-        return new ServiceComponentHostStartEvent(
-            impl.getServiceComponentName(), impl.getHostName(), timestamp,
-            configs);
-      case HOST_SVCCOMP_STOP:
-        return new ServiceComponentHostStopEvent(
-            impl.getServiceComponentName(), impl.getHostName(), timestamp);
-      case HOST_SVCCOMP_UNINSTALL:
-        return new ServiceComponentHostUninstallEvent(
-            impl.getServiceComponentName(), impl.getHostName(), timestamp);
-      case HOST_SVCCOMP_OP_FAILED:
-        return new ServiceComponentHostOpFailedEvent(
-            impl.getServiceComponentName(), impl.getHostName(), timestamp);
-      case HOST_SVCCOMP_OP_SUCCEEDED:
-        return new ServiceComponentHostOpSucceededEvent(
-            impl.getServiceComponentName(), impl.getHostName(), timestamp);
-      case HOST_SVCCOMP_OP_IN_PROGRESS:
-        return new ServiceComponentHostOpInProgressEvent(
-            impl.getServiceComponentName(), impl.getHostName(), timestamp);
-      case HOST_SVCCOMP_OP_RESTART:
-        return new ServiceComponentHostOpRestartedEvent(
-            impl.getServiceComponentName(), impl.getHostName(), timestamp);
-      case HOST_SVCCOMP_WIPEOUT:
-        return new ServiceComponentHostWipeoutEvent(
-            impl.getServiceComponentName(), impl.getHostName(), timestamp);
-    }
-    return null;
-  }
-
-  private void runStateChanges(ServiceComponentHostImpl impl,
-      ServiceComponentHostEventType startEventType,
-      State startState,
-      State inProgressState,
-      State failedState,
-      State completedState)
-    throws Exception {
-    long timestamp = 0;
-
-    boolean checkConfigs = false;
-    if (startEventType == ServiceComponentHostEventType.HOST_SVCCOMP_START) {
-      checkConfigs = true;
-    }
-    boolean checkStack = false;
-    if (startEventType == ServiceComponentHostEventType.HOST_SVCCOMP_INSTALL) {
-      checkStack = true;
-      impl.setStackVersion(null);
-    }
-
-    Assert.assertEquals(startState,
-        impl.getState());
-    ServiceComponentHostEvent startEvent = createEvent(impl, ++timestamp,
-        startEventType);
-
-    long startTime = timestamp;
-    impl.handleEvent(startEvent);
-    Assert.assertEquals(startTime, impl.getLastOpStartTime());
-    Assert.assertEquals(-1, impl.getLastOpLastUpdateTime());
-    Assert.assertEquals(-1, impl.getLastOpEndTime());
-    Assert.assertEquals(inProgressState,
-        impl.getState());
-    if (checkConfigs) {
-      Assert.assertTrue(impl.getConfigVersions().size() > 0);
-      Assert.assertEquals("" + startTime, impl.getConfigVersions().get("time"));
-    }
-    if (checkStack) {
-      Assert.assertNotNull(impl.getStackVersion());
-      Assert.assertEquals(impl.getDesiredStackVersion().getStackId(),
-          impl.getStackVersion().getStackId());
-    }
-
-    ServiceComponentHostEvent installEvent2 = createEvent(impl, ++timestamp,
-        startEventType);
-   
-    boolean exceptionThrown = false;
-    LOG.info("Transitioning from " + impl.getState() + " " + installEvent2.getType());
-    try {
-      impl.handleEvent(installEvent2);
-    } catch (Exception e) {
-      exceptionThrown = true;
-    }
-    if (impl.getState() == State.INSTALLING || impl.getState() == State.STARTING) {
-      startTime = timestamp;
-    // We need to allow install on a install.
-      Assert.assertTrue("Exception not thrown on invalid event", !exceptionThrown);
-    } 
-    else {
-      Assert.assertTrue("Exception not thrown on invalid event", exceptionThrown);
-    }
-    Assert.assertEquals(startTime, impl.getLastOpStartTime());
-    Assert.assertEquals(-1, impl.getLastOpLastUpdateTime());
-    Assert.assertEquals(-1, impl.getLastOpEndTime());
-    Assert.assertEquals(inProgressState,
-        impl.getState());
-
-    ServiceComponentHostOpInProgressEvent inProgressEvent1 = new
-        ServiceComponentHostOpInProgressEvent(impl.getServiceComponentName(),
-            impl.getHostName(), ++timestamp);
-    impl.handleEvent(inProgressEvent1);
-    Assert.assertEquals(startTime, impl.getLastOpStartTime());
-    Assert.assertEquals(timestamp, impl.getLastOpLastUpdateTime());
-    Assert.assertEquals(-1, impl.getLastOpEndTime());
-    Assert.assertEquals(inProgressState,
-        impl.getState());
-
-    ServiceComponentHostOpInProgressEvent inProgressEvent2 = new
-        ServiceComponentHostOpInProgressEvent(impl.getServiceComponentName(),
-            impl.getHostName(), ++timestamp);
-    impl.handleEvent(inProgressEvent2);
-    Assert.assertEquals(startTime, impl.getLastOpStartTime());
-    Assert.assertEquals(timestamp, impl.getLastOpLastUpdateTime());
-    Assert.assertEquals(-1, impl.getLastOpEndTime());
-    Assert.assertEquals(inProgressState,
-        impl.getState());
-
-
-    ServiceComponentHostOpFailedEvent failEvent = new
-        ServiceComponentHostOpFailedEvent(impl.getServiceComponentName(),
-            impl.getHostName(), ++timestamp);
-    long endTime = timestamp;
-    impl.handleEvent(failEvent);
-    Assert.assertEquals(startTime, impl.getLastOpStartTime());
-    Assert.assertEquals(timestamp, impl.getLastOpLastUpdateTime());
-    Assert.assertEquals(endTime, impl.getLastOpEndTime());
-    Assert.assertEquals(failedState,
-        impl.getState());
-
-    ServiceComponentHostOpRestartedEvent restartEvent = new
-        ServiceComponentHostOpRestartedEvent(impl.getServiceComponentName(),
-            impl.getHostName(), ++timestamp);
-    startTime = timestamp;
-    impl.handleEvent(restartEvent);
-    Assert.assertEquals(startTime, impl.getLastOpStartTime());
-    Assert.assertEquals(-1, impl.getLastOpLastUpdateTime());
-    Assert.assertEquals(-1, impl.getLastOpEndTime());
-    Assert.assertEquals(inProgressState,
-        impl.getState());
-
-    ServiceComponentHostOpInProgressEvent inProgressEvent3 = new
-        ServiceComponentHostOpInProgressEvent(impl.getServiceComponentName(),
-            impl.getHostName(), ++timestamp);
-    impl.handleEvent(inProgressEvent3);
-    Assert.assertEquals(startTime, impl.getLastOpStartTime());
-    Assert.assertEquals(timestamp, impl.getLastOpLastUpdateTime());
-    Assert.assertEquals(-1, impl.getLastOpEndTime());
-    Assert.assertEquals(inProgressState,
-        impl.getState());
-
-    ServiceComponentHostOpFailedEvent failEvent2 = new
-        ServiceComponentHostOpFailedEvent(impl.getServiceComponentName(),
-            impl.getHostName(), ++timestamp);
-    endTime = timestamp;
-    impl.handleEvent(failEvent2);
-    Assert.assertEquals(startTime, impl.getLastOpStartTime());
-    Assert.assertEquals(timestamp, impl.getLastOpLastUpdateTime());
-    Assert.assertEquals(endTime, impl.getLastOpEndTime());
-    Assert.assertEquals(failedState,
-        impl.getState());
-
-    ServiceComponentHostEvent startEvent2 = createEvent(impl, ++timestamp,
-        startEventType);
-    startTime = timestamp;
-    impl.handleEvent(startEvent2);
-    Assert.assertEquals(-1, impl.getLastOpLastUpdateTime());
-    Assert.assertEquals(-1, impl.getLastOpEndTime());
-    Assert.assertEquals(inProgressState,
-        impl.getState());
-
-    ServiceComponentHostOpInProgressEvent inProgressEvent4 = new
-        ServiceComponentHostOpInProgressEvent(impl.getServiceComponentName(),
-            impl.getHostName(), ++timestamp);
-    impl.handleEvent(inProgressEvent4);
-    Assert.assertEquals(startTime, impl.getLastOpStartTime());
-    Assert.assertEquals(timestamp, impl.getLastOpLastUpdateTime());
-    Assert.assertEquals(-1, impl.getLastOpEndTime());
-    Assert.assertEquals(inProgressState,
-        impl.getState());
-
-    ServiceComponentHostOpSucceededEvent succeededEvent = new
-        ServiceComponentHostOpSucceededEvent(impl.getServiceComponentName(),
-            impl.getHostName(), ++timestamp);
-    endTime = timestamp;
-    impl.handleEvent(succeededEvent);
-    Assert.assertEquals(startTime, impl.getLastOpStartTime());
-    Assert.assertEquals(timestamp, impl.getLastOpLastUpdateTime());
-    Assert.assertEquals(endTime, impl.getLastOpEndTime());
-    Assert.assertEquals(completedState,
-        impl.getState());
-
-  }
-
-  @Test
-  public void testClientStateFlow() throws Exception {
-    ServiceComponentHostImpl impl = (ServiceComponentHostImpl)
-        createNewServiceComponentHost("HDFS", "HDFS_CLIENT", "h1", true);
-
-    runStateChanges(impl, ServiceComponentHostEventType.HOST_SVCCOMP_INSTALL,
-        State.INIT,
-        State.INSTALLING,
-        State.INSTALL_FAILED,
-        State.INSTALLED);
-
-    boolean exceptionThrown = false;
-    try {
-      runStateChanges(impl, ServiceComponentHostEventType.HOST_SVCCOMP_START,
-        State.INSTALLED,
-        State.STARTING,
-        State.START_FAILED,
-        State.STARTED);
-    }
-    catch (Exception e) {
-      exceptionThrown = true;
-    }
-    Assert.assertTrue("Exception not thrown on invalid event", exceptionThrown);
-
-    runStateChanges(impl, ServiceComponentHostEventType.HOST_SVCCOMP_UNINSTALL,
-        State.INSTALLED,
-        State.UNINSTALLING,
-        State.UNINSTALL_FAILED,
-        State.UNINSTALLED);
-
-    runStateChanges(impl, ServiceComponentHostEventType.HOST_SVCCOMP_WIPEOUT,
-        State.UNINSTALLED,
-        State.WIPING_OUT,
-        State.WIPEOUT_FAILED,
-        State.INIT);
-
-  }
-
-  @Test
-  public void testDaemonStateFlow() throws Exception {
-    ServiceComponentHostImpl impl = (ServiceComponentHostImpl)
-        createNewServiceComponentHost("HDFS", "DATANODE", "h1", false);
-
-    runStateChanges(impl, ServiceComponentHostEventType.HOST_SVCCOMP_INSTALL,
-        State.INIT,
-        State.INSTALLING,
-        State.INSTALL_FAILED,
-        State.INSTALLED);
-
-    runStateChanges(impl, ServiceComponentHostEventType.HOST_SVCCOMP_START,
-      State.INSTALLED,
-      State.STARTING,
-      State.START_FAILED,
-      State.STARTED);
-
-    runStateChanges(impl, ServiceComponentHostEventType.HOST_SVCCOMP_STOP,
-      State.STARTED,
-      State.STOPPING,
-      State.STOP_FAILED,
-      State.INSTALLED);
-
-    runStateChanges(impl, ServiceComponentHostEventType.HOST_SVCCOMP_UNINSTALL,
-        State.INSTALLED,
-        State.UNINSTALLING,
-        State.UNINSTALL_FAILED,
-        State.UNINSTALLED);
-
-    runStateChanges(impl, ServiceComponentHostEventType.HOST_SVCCOMP_WIPEOUT,
-        State.UNINSTALLED,
-        State.WIPING_OUT,
-        State.WIPEOUT_FAILED,
-        State.INIT);
-  }
-
-  @Test
-  public void testJobHandling() {
-    // TODO fix once jobs are handled
-  }
-
-
-  @Test
-  public void testGetAndSetConfigs() {
-    // FIXME config handling
-    /*
-    public Map<String, Config> getDesiredConfigs();
-    public void updateDesiredConfigs(Map<String, Config> configs);
-    public Map<String, Config> getConfigs();
-    public void updateConfigs(Map<String, Config> configs);
-    */
-  }
-
-  @Test
-  public void testGetAndSetBasicInfo() throws AmbariException {
-    ServiceComponentHost sch =
-        createNewServiceComponentHost("HDFS", "NAMENODE", "h1", false);
-    sch.setDesiredState(State.INSTALLED);
-    sch.setState(State.INSTALLING);
-    sch.setStackVersion(new StackId("HDP-1.0.0"));
-    sch.setDesiredStackVersion(new StackId("HDP-1.1.0"));
-
-    Assert.assertEquals(State.INSTALLING, sch.getState());
-    Assert.assertEquals(State.INSTALLED, sch.getDesiredState());
-    Assert.assertEquals("HDP-1.0.0",
-        sch.getStackVersion().getStackId());
-    Assert.assertEquals("HDP-1.1.0",
-        sch.getDesiredStackVersion().getStackId());
-  }
-
-  @Test
-  public void testConvertToResponse() throws AmbariException {
-    ServiceComponentHost sch =
-        createNewServiceComponentHost("HDFS", "DATANODE", "h1", false);
-    sch.setDesiredState(State.INSTALLED);
-    sch.setState(State.INSTALLING);
-    sch.setStackVersion(new StackId("HDP-1.0.0"));
-    ServiceComponentHostResponse r =
-        sch.convertToResponse();
-    Assert.assertEquals("HDFS", r.getServiceName());
-    Assert.assertEquals("DATANODE", r.getComponentName());
-    Assert.assertEquals("h1", r.getHostname());
-    Assert.assertEquals("C1", r.getClusterName());
-    Assert.assertEquals(State.INSTALLED.toString(), r.getDesiredState());
-    Assert.assertEquals(State.INSTALLING.toString(), r.getLiveState());
-    Assert.assertEquals("HDP-1.0.0", r.getStackVersion());
-
-    // TODO check configs
-
-    StringBuilder sb = new StringBuilder();
-    sch.debugDump(sb);
-    Assert.assertFalse(sb.toString().isEmpty());
-  }
-
-  @Test
-  public void testStopInVariousStates() throws AmbariException,
-      InvalidStateTransitionException {
-    ServiceComponentHost sch =
-        createNewServiceComponentHost("HDFS", "DATANODE", "h1", false);
-    ServiceComponentHostImpl impl =  (ServiceComponentHostImpl) sch;
-
-    sch.setDesiredState(State.STARTED);
-    sch.setState(State.START_FAILED);
-
-    long timestamp = 0;
-
-    ServiceComponentHostEvent stopEvent = createEvent(impl, ++timestamp,
-        ServiceComponentHostEventType.HOST_SVCCOMP_STOP);
-
-    long startTime = timestamp;
-    impl.handleEvent(stopEvent);
-    Assert.assertEquals(startTime, impl.getLastOpStartTime());
-    Assert.assertEquals(-1, impl.getLastOpLastUpdateTime());
-    Assert.assertEquals(-1, impl.getLastOpEndTime());
-    Assert.assertEquals(State.STOPPING,
-        impl.getState());
-
-    sch.setState(State.INSTALL_FAILED);
-
-    boolean exceptionThrown = false;
-    try {
-      impl.handleEvent(stopEvent);
-    } catch (Exception e) {
-      exceptionThrown = true;
-    }
-    Assert.assertTrue("Exception not thrown on invalid event", exceptionThrown);
-
-    Assert.assertEquals(startTime, impl.getLastOpStartTime());
-    Assert.assertEquals(-1, impl.getLastOpLastUpdateTime());
-    Assert.assertEquals(-1, impl.getLastOpEndTime());
-
-    sch.setState(State.INSTALLED);
-    ServiceComponentHostEvent stopEvent2 = createEvent(impl, ++timestamp,
-        ServiceComponentHostEventType.HOST_SVCCOMP_STOP);
-
-    startTime = timestamp;
-    impl.handleEvent(stopEvent2);
-    Assert.assertEquals(startTime, impl.getLastOpStartTime());
-    Assert.assertEquals(-1, impl.getLastOpLastUpdateTime());
-    Assert.assertEquals(-1, impl.getLastOpEndTime());
-    Assert.assertEquals(State.STOPPING,
-        impl.getState());
-  }
-
-  @Test
-  public void testLiveStateUpdatesForReconfigure() throws Exception {
-    ServiceComponentHost sch =
-        createNewServiceComponentHost("HDFS", "DATANODE", "h1", false);
-    ServiceComponentHostImpl impl =  (ServiceComponentHostImpl) sch;
-
-    sch.setDesiredState(State.INSTALLED);
-    sch.setState(State.INSTALLED);
-
-    Map<String, Config> desired = new HashMap<String, Config>();
-    ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
-
-    Cluster cluster = clusters.getCluster("C1");
-    Config c1 = configFactory.createNew(cluster, "type1", new HashMap<String, String>());
-//        new ConfigImpl(cluster, "type1", new HashMap<String, String>(), injector);
-    Config c2 = configFactory.createNew(cluster, "type2", new HashMap<String, String>());
-//        new ConfigImpl(cluster, "type2", new HashMap<String, String>(),        injector);
-    Config c3 = configFactory.createNew(cluster, "type3", new HashMap<String, String>());
-//        new ConfigImpl(cluster, "type3", new HashMap<String, String>(),   injector);
-    Config c4v5 = configFactory.createNew(cluster, "type4", new HashMap<String, String>());
-    Config c2v3 = configFactory.createNew(cluster, "type2", new HashMap<String, String>());
-
-
-    c1.setVersionTag("v1");
-    c2.setVersionTag("v1");
-    c3.setVersionTag("v1");
-    c4v5.setVersionTag("v5");
-    c2v3.setVersionTag("v3");
-
-    c1.persist();
-    c2.persist();
-    c3.persist();
-    c4v5.persist();
-    c2v3.persist();
-
-    desired.put("type1", c1);
-    desired.put("type2", c2);
-    desired.put("type3", c3);
-    impl.updateDesiredConfigs(desired);
-    impl.persist();
-
-    HostComponentDesiredStateEntityPK desiredPK =
-        new HostComponentDesiredStateEntityPK();
-    desiredPK.setClusterId(clusters.getCluster("C1").getClusterId());
-    desiredPK.setServiceName("HDFS");
-    desiredPK.setComponentName("DATANODE");
-    desiredPK.setHostName("h1");
-
-    HostComponentDesiredStateEntity desiredEntity =
-        hostComponentDesiredStateDAO.findByPK(desiredPK);
-    Assert.assertEquals(3,
-        desiredEntity.getHostComponentDesiredConfigMappingEntities().size());
-
-    Map<String, String> oldConfigs = new HashMap<String, String>();
-    oldConfigs.put("type1", "v1");
-    oldConfigs.put("type2", "v1");
-    oldConfigs.put("type3", "v1");
-
-    HostComponentStateEntityPK primaryKey =
-        new HostComponentStateEntityPK();
-    primaryKey.setClusterId(clusters.getCluster("C1").getClusterId());
-    primaryKey.setServiceName("HDFS");
-    primaryKey.setComponentName("DATANODE");
-    primaryKey.setHostName("h1");
-    HostComponentStateEntity entity =
-        hostComponentStateDAO.findByPK(primaryKey);
-    Collection<HostComponentConfigMappingEntity> entities =
-        entity.getHostComponentConfigMappingEntities();
-    Assert.assertEquals(0, entities.size());
-
-    impl.setConfigs(oldConfigs);
-    impl.persist();
-
-    Assert.assertEquals(3, impl.getConfigVersions().size());
-    entity = hostComponentStateDAO.findByPK(primaryKey);
-    entities = entity.getHostComponentConfigMappingEntities();
-    Assert.assertEquals(3, entities.size());
-
-    Map<String, String> newConfigs = new HashMap<String, String>();
-    newConfigs.put("type1", "v1");
-    newConfigs.put("type2", "v3");
-    newConfigs.put("type4", "v5");
-
-    ServiceComponentHostStartEvent startEvent =
-        new ServiceComponentHostStartEvent("DATANODE", "h1", 1, newConfigs);
-
-    impl.handleEvent(startEvent);
-
-    Assert.assertEquals(newConfigs.size(),
-        impl.getConfigVersions().size());
-
-    entity = hostComponentStateDAO.findByPK(primaryKey);
-    entities = entity.getHostComponentConfigMappingEntities();
-    Assert.assertEquals(3, entities.size());
-
-    for (HostComponentConfigMappingEntity e : entities) {
-      LOG.debug("Found live config "
-          + e.getConfigType() + ":" + e.getVersionTag());
-      Assert.assertTrue(e.getComponentName().equals("DATANODE")
-          && e.getClusterId() == primaryKey.getClusterId()
-          && e.getHostName().equals("h1")
-          && e.getServiceName().equals("HDFS"));
-      if (e.getConfigType().equals("type1")) {
-        Assert.assertEquals("v1", e.getVersionTag());
-      } else if (e.getConfigType().equals("type2")) {
-        Assert.assertEquals("v3", e.getVersionTag());
-      } else if (e.getConfigType().equals("type4")) {
-        Assert.assertEquals("v5", e.getVersionTag());
-      } else {
-        fail("Found invalid type");
-      }
-    }
-  }
-
-  @Test
-  public void testCanBeRemoved() throws Exception{
-    ServiceComponentHostImpl impl = (ServiceComponentHostImpl)
-        createNewServiceComponentHost("HDFS", "HDFS_CLIENT", "h1", true);
-
-    for (State state : State.values()) {
-      impl.setState(state);
-
-      if (state.isRemovableState()) {
-        Assert.assertTrue(impl.canBeRemoved());
-      }
-      else {
-        Assert.assertFalse(impl.canBeRemoved());
-      }
-    }
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/utils/TestStageUtils.java b/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/utils/TestStageUtils.java
deleted file mode 100644
index cfe704f..0000000
--- a/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/utils/TestStageUtils.java
+++ /dev/null
@@ -1,178 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.utils;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-import javax.xml.bind.JAXBException;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
-import org.apache.ambari.server.actionmanager.Stage;
-import org.apache.ambari.server.agent.ExecutionCommand;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.HostsMap;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.state.*;
-import org.apache.ambari.server.state.cluster.ClustersImpl;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostImpl;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.codehaus.jackson.JsonGenerationException;
-import org.codehaus.jackson.map.JsonMappingException;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-
-public class TestStageUtils {
-  private static Log LOG = LogFactory.getLog(TestStageUtils.class);
-
-  private AmbariMetaInfo ambariMetaInfo;
-
-  private Injector injector;
-
-//  ServiceComponentFactory serviceComponentFactory;
-  static ServiceComponentHostFactory serviceComponentHostFactory;
-
-  @Before
-  public void setup() throws Exception {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-    serviceComponentHostFactory = injector.getInstance(ServiceComponentHostFactory.class);
-    ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
-    ambariMetaInfo.init();
-
-  }
-
-
-  public static void addHdfsService(Cluster cl, String [] hostList,
-      Injector injector) throws AmbariException {
-    cl.setDesiredStackVersion(new StackId("HDP-0.1"));
-    cl.addService("HDFS");
-    cl.getService("HDFS").addServiceComponent("NAMENODE");
-    cl.getService("HDFS").addServiceComponent("DATANODE");
-    cl.getService("HDFS").addServiceComponent("SECONDARY_NAMENODE");
-    cl.getService("HDFS")
-        .getServiceComponent("NAMENODE")
-        .addServiceComponentHost(
-            serviceComponentHostFactory.createNew(cl.getService("HDFS")
-                .getServiceComponent("NAMENODE"), hostList[0], false));
-    cl.getService("HDFS")
-        .getServiceComponent("SECONDARY_NAMENODE")
-        .addServiceComponentHost(
-            serviceComponentHostFactory.createNew(cl.getService("HDFS")
-                .getServiceComponent("SECONDARY_NAMENODE"), hostList[1], false)
-        );
-    for (int i = 1; i < hostList.length; i++) {
-      cl.getService("HDFS")
-          .getServiceComponent("DATANODE")
-          .addServiceComponentHost(serviceComponentHostFactory.createNew(cl.getService("HDFS")
-              .getServiceComponent("DATANODE"), hostList[i], false));
-    }
-  }
-
-  public static void addHbaseService(Cluster cl, String [] hostList,
-      Injector injector) throws AmbariException {
-    cl.setDesiredStackVersion(new StackId("HDP-0.2"));
-    cl.addService("HBASE");
-    cl.getService("HBASE").addServiceComponent("HBASE_MASTER");
-    cl.getService("HBASE").addServiceComponent("HBASE_REGIONSERVER");
-    cl.getService("HBASE")
-        .getServiceComponent("HBASE_MASTER")
-        .addServiceComponentHost(
-            serviceComponentHostFactory.createNew(cl.getService("HBASE")
-                .getServiceComponent("HBASE_MASTER"), hostList[0], false));
-    for (int i = 1; i < hostList.length; i++) {
-      cl.getService("HBASE")
-          .getServiceComponent("HBASE_REGIONSERVER")
-          .addServiceComponentHost(
-              serviceComponentHostFactory.createNew(cl.getService("HBASE")
-                  .getServiceComponent("HBASE_REGIONSERVER"), hostList[i],
-                  false));
-    }
-  }
-
-  @Test
-  public void testGetATestStage() {
-    Stage s = StageUtils.getATestStage(1, 2, "host2");
-    String hostname = s.getHosts().get(0);
-    List<ExecutionCommandWrapper> wrappers = s.getExecutionCommands(hostname);
-    for (ExecutionCommandWrapper wrapper : wrappers) {
-      assertEquals("cluster1", wrapper.getExecutionCommand().getClusterName());
-      assertEquals(StageUtils.getActionId(1, 2), wrapper.getExecutionCommand().getCommandId());
-      assertEquals(hostname, wrapper.getExecutionCommand().getHostname());
-    }
-  }
-
-  @Test
-  public void testJaxbToString() throws Exception {
-    Stage s = StageUtils.getATestStage(1, 2, "host1");
-    String hostname = s.getHosts().get(0);
-    List<ExecutionCommandWrapper> wrappers = s.getExecutionCommands(hostname);
-    for (ExecutionCommandWrapper wrapper : wrappers) {
-      LOG.info("Command is " + StageUtils.jaxbToString(wrapper.getExecutionCommand()));
-    }
-    assertEquals(StageUtils.getActionId(1, 2), s.getActionId());
-  }
-
-  @Test
-  public void testJasonToExecutionCommand() throws JsonGenerationException,
-      JsonMappingException, JAXBException, IOException {
-    Stage s = StageUtils.getATestStage(1, 2, "host1");
-    ExecutionCommand cmd = s.getExecutionCommands("host1").get(0).getExecutionCommand();
-    String json = StageUtils.jaxbToString(cmd);
-    ExecutionCommand cmdDes = StageUtils.stringToExecutionCommand(json);
-    assertEquals(cmd.toString(), cmdDes.toString());
-    assertEquals(cmd, cmdDes);
-  }
-
-  @Test
-  public void testGetClusterHostInfo() throws AmbariException {
-    Clusters fsm = injector.getInstance(Clusters.class);
-    fsm.addCluster("c1");
-    fsm.addHost("h1");
-    fsm.addHost("h2");
-    fsm.addHost("h3");
-    fsm.getCluster("c1").setDesiredStackVersion(new StackId("HDP-0.1"));
-    fsm.getHost("h1").setOsType("centos5");
-    fsm.getHost("h2").setOsType("centos5");
-    fsm.getHost("h3").setOsType("centos5");
-    fsm.getHost("h1").persist();
-    fsm.getHost("h2").persist();
-    fsm.getHost("h3").persist();
-    fsm.mapHostToCluster("h1", "c1");
-    fsm.mapHostToCluster("h2", "c1");
-    fsm.mapHostToCluster("h3", "c1");
-    String [] hostList = {"h1", "h2", "h3" };
-    addHdfsService(fsm.getCluster("c1"), hostList, injector);
-    addHbaseService(fsm.getCluster("c1"), hostList, injector);
-    Map<String, List<String>> info = StageUtils.getClusterHostInfo(fsm
-        .getCluster("c1"), new HostsMap(injector.getInstance(Configuration.class)));
-    assertEquals(2, info.get("slave_hosts").size());
-    assertEquals(1, info.get("hbase_master_host").size());
-    assertEquals("h1", info.get("hbase_master_host").get(0));
-  }
-}
diff --git a/branch-1.2/ambari-server/src/test/python/TestAmbaryServer.py b/branch-1.2/ambari-server/src/test/python/TestAmbaryServer.py
deleted file mode 100644
index a4af59e..0000000
--- a/branch-1.2/ambari-server/src/test/python/TestAmbaryServer.py
+++ /dev/null
@@ -1,1118 +0,0 @@
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import StringIO
-from unittest import TestCase
-import sys
-from mock.mock import patch
-from mock.mock import MagicMock
-from mock.mock import create_autospec
-import os, errno, tempfile
-import stat
-# We have to use this import HACK because the filename contains a dash
-ambari_server = __import__('ambari-server')
-
-
-class TestAmbariServer(TestCase):
-
-  @patch.object(ambari_server, 'configure_postgres_username_password')
-  @patch.object(ambari_server, 'run_os_command')
-  @patch('optparse.Values')
-  def test_configure_pg_hba_ambaridb_users(self, OptParseValuesMock,
-                                run_os_command_method,
-                                configure_postgres_username_password_method):
-    # Prepare mocks
-    run_os_command_method.return_value = (0, "", "")
-    opvm = OptParseValuesMock.return_value
-    opvm.postgres_username = "ffdf"
-    tf1 = tempfile.NamedTemporaryFile()
-    ambari_server.PG_HBA_CONF_FILE = tf1.name
-    # Run test
-    ambari_server.configure_pg_hba_ambaridb_users()
-    # Check results
-    self.assertTrue(run_os_command_method.called)
-    self.assertTrue(configure_postgres_username_password_method.called)
-    string_expected = self.get_file_string(self
-      .get_samples_dir("configure_pg_hba_ambaridb_users1"))
-    string_actual = self.get_file_string(ambari_server.PG_HBA_CONF_FILE)
-    self.assertEquals(string_expected, string_actual)
-    pass
-
-
-
-  def test_configure_pg_hba_postgres_user(self):
-
-    tf1 = tempfile.NamedTemporaryFile()
-    ambari_server.PG_HBA_CONF_FILE = tf1.name
-
-    with open(ambari_server.PG_HBA_CONF_FILE, 'w') as fout:
-      fout.write("\n")
-      fout.write("local  all  all md5\n")
-      fout.write("host  all   all 0.0.0.0/0  md5\n")
-      fout.write("host  all   all ::/0 md5\n")
-
-    ambari_server.configure_pg_hba_postgres_user()
-
-    expected  = self.get_file_string(self.get_samples_dir(
-      "configure_pg_hba_ambaridb_users2"))
-    result = self.get_file_string(ambari_server.PG_HBA_CONF_FILE)
-    self.assertEqual(expected, result, "pg_hba_conf not processed")
-
-    mode = oct(os.stat(ambari_server.PG_HBA_CONF_FILE)[stat.ST_MODE])
-    str_mode = str(mode)[-4:]
-    self.assertEqual("0644", str_mode, "Wrong file permissions")
-
-
-
-  @patch('__builtin__.raw_input')
-  def test_get_choice_string_input(self, raw_input_method):
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-
-    prompt = "blablabla"
-    default = "default blablabla"
-    firstChoice = set(['yes','ye', 'y'])
-    secondChoice = set(['no','n'])
-    # test first input
-    raw_input_method.return_value = "Y"
-
-    result = ambari_server.get_choice_string_input(prompt, default,
-        firstChoice, secondChoice)
-    self.assertEquals(result, True)
-    raw_input_method.reset_mock()
-    # test second input
-
-    raw_input_method.return_value = "N"
-
-    result = ambari_server.get_choice_string_input(prompt, default,
-        firstChoice, secondChoice)
-    self.assertEquals(result, False)
-
-    raw_input_method.reset_mock()
-
-    # test enter pressed
-
-    raw_input_method.return_value = ""
-
-    result = ambari_server.get_choice_string_input(prompt, default,
-        firstChoice, secondChoice)
-    self.assertEquals(result, default)
-
-    raw_input_method.reset_mock()
-
-    # test wrong input
-    list_of_return_values= ['yes', 'dsad', 'fdsfds']
-    def side_effect(list):
-      return list_of_return_values.pop()
-    raw_input_method.side_effect = side_effect
-
-    result = ambari_server.get_choice_string_input(prompt, default,
-        firstChoice, secondChoice)
-    self.assertEquals(result, True)
-    self.assertEquals(raw_input_method.call_count, 3)
-
-    sys.stdout = sys.__stdout__
-
-    pass
-
-
-
-  @patch('re.search')
-  @patch('__builtin__.raw_input')
-  @patch('getpass.getpass')
-  def test_get_validated_string_input(self, get_pass_method,
-      raw_input_method, re_search_method):
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    prompt = "blabla"
-    default = "default_pass"
-    pattern = "pattern_pp"
-    description = "blabla2"
-    # check password input
-    is_pass = True
-    get_pass_method.return_value = "dfdsfdsfds"
-
-    result = ambari_server.get_validated_string_input(prompt, default,
-        pattern, description, is_pass)
-
-    self.assertEquals(get_pass_method.return_value, result)
-    get_pass_method.assure_called_once(prompt)
-    self.assertFalse(raw_input_method.called)
-
-    # check raw input
-    get_pass_method.reset_mock()
-    raw_input_method.reset_mock()
-    is_pass = False
-    raw_input_method.return_value = "dkf90ewuf0"
-
-    result = ambari_server.get_validated_string_input(prompt, default,
-        pattern, description, is_pass)
-
-    self.assertEquals(raw_input_method.return_value, result)
-    self.assertFalse(get_pass_method.called)
-    raw_input_method.assure_called_once(prompt)
-
-    sys.stdout = sys.__stdout__
-
-
-
-  def test_get_pass_file_path(self):
-    result = ambari_server.get_pass_file_path("/etc/ambari/conf_file")
-    self.assertEquals("/etc/ambari/password.dat", result)
-    pass
-
-
-  @patch('__builtin__.file')
-  @patch('__builtin__.open')
-  @patch.object(ambari_server, 'Properties')
-  @patch.object(ambari_server, 'search_file')
-  def test_configure_postgres_username_password_test_configured(self,
-                  search_file_message, properties_mock, open_method, file_obj):
-    """
-      Tests situation when database username + password are already configured
-    """
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    search_file_message.return_value = "blablabla-properties"
-    pm  = properties_mock.return_value
-    def tf(self, key):
-      return {
-               ambari_server.JDBC_USER_NAME_PROPERTY : "fake_username",
-               ambari_server.JDBC_PASSWORD_FILE_PROPERTY : "fake_passwd_file"
-             }[key]
-      pass
-    pm.__getitem__ = tf
-    options = MagicMock()
-    open_method.return_value = file_obj
-    file_obj.read.return_value = "fake_password"
-
-    ambari_server.configure_postgres_username_password(options)
-
-    self.assertTrue(pm.load.called)
-    self.assertTrue(file_obj.read.called)
-    self.assertEquals(options.postgres_username,
-      pm[ambari_server.JDBC_USER_NAME_PROPERTY])
-    self.assertEquals(options.postgres_password, file_obj.read.return_value)
-
-    sys.stdout = sys.__stdout__
-
-
-
-  @patch.object(ambari_server, 'get_pass_file_path', autospec=True)
-  @patch('os.chmod', autospec=True)
-  @patch.object(ambari_server, 'write_property', autospec=True)
-  @patch.object(ambari_server, 'configure_postgres_password')
-  @patch.object(ambari_server, 'get_validated_string_input')
-  @patch.object(ambari_server, 'get_YN_input')
-  @patch('__builtin__.file')
-  @patch('__builtin__.open')
-  @patch.object(ambari_server, 'Properties')
-  @patch.object(ambari_server, 'search_file')
-  def test_configure_postgres_username_password_test_full_setup(self,
-          search_file_message, properties_mock, open_method, file_obj,
-          get_YN_input_method, get_validated_string_input_method,
-          configure_postgres_password_method, write_property_method,
-          os_chmod_method, get_pass_file_path_method):
-    """
-      Tests situation when database username + password are not
-      already configured. Includes advanced DB configuration
-    """
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    search_file_message.return_value = "blablabla-properties"
-    pm  = properties_mock.return_value
-    def tf(self, key):
-      return {
-               ambari_server.JDBC_USER_NAME_PROPERTY : "fake_user",
-               ambari_server.JDBC_PASSWORD_FILE_PROPERTY : False
-             }[key]
-      pass
-    pm.__getitem__ = tf
-    options = MagicMock()
-    open_method.return_value = file_obj
-    file_obj.read.return_value = "fake_password"
-    file_obj.write.return_value = None
-    get_YN_input_method.return_value = False
-    get_validated_string_input_method.return_value = "blablabla-input"
-    get_pass_file_path_method.return_value = "blablabla-path"
-
-    ambari_server.configure_postgres_username_password(options)
-
-    self.assertTrue(get_YN_input_method.called)
-    self.assertEquals(write_property_method.call_args_list, [
-      ((ambari_server.JDBC_USER_NAME_PROPERTY,
-        'ambari-server'),),
-      ((ambari_server.JDBC_PASSWORD_FILE_PROPERTY,
-        get_pass_file_path_method.return_value),)
-    ])
-    get_pass_file_path_method.\
-        assert_called_once_with(search_file_message.return_value)
-    os_chmod_method.assert_called_once_with("blablabla-path", 384)
-    self.assertTrue(pm.load.called)
-    self.assertFalse(get_validated_string_input_method.called)
-    self.assertFalse(configure_postgres_password_method.called)
-
-    sys.stdout = sys.__stdout__
-
-
-  @patch.object(ambari_server, 'get_pass_file_path', autospec=True)
-  @patch('os.chmod', autospec=True)
-  @patch.object(ambari_server, 'write_property', autospec=True)
-  @patch.object(ambari_server, 'configure_postgres_password')
-  @patch.object(ambari_server, 'get_validated_string_input')
-  @patch.object(ambari_server, 'get_YN_input')
-  @patch('__builtin__.file')
-  @patch('__builtin__.open')
-  @patch.object(ambari_server, 'Properties')
-  @patch.object(ambari_server, 'search_file')
-  def test_configure_postgres_username_password_test_full_setup_advanced(self,
-          search_file_message, properties_mock, open_method, file_obj,
-          get_YN_input_method, get_validated_string_input_method,
-          configure_postgres_password_method, write_property_method,
-          os_chmod_method, get_pass_file_path_method):
-    """
-      Tests situation when database username + password are not
-      already configured. Includes advanced DB configuration
-    """
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    search_file_message.return_value = "blablabla-properties"
-    pm  = properties_mock.return_value
-    def tf(self, key):
-      return {
-               ambari_server.JDBC_USER_NAME_PROPERTY : "fake_user",
-               ambari_server.JDBC_PASSWORD_FILE_PROPERTY : False
-             }[key]
-      pass
-    pm.__getitem__ = tf
-    options = MagicMock()
-    open_method.return_value = file_obj
-    file_obj.read.return_value = "fake_password"
-    file_obj.write.return_value = None
-    get_YN_input_method.return_value = True
-    get_validated_string_input_method.return_value = "blablabla-input"
-    get_pass_file_path_method.return_value = "blablabla-path"
-
-    ambari_server.configure_postgres_username_password(options)
-
-    self.assertTrue(get_YN_input_method.called)
-    self.assertEquals(write_property_method.call_args_list, [
-      ((ambari_server.JDBC_USER_NAME_PROPERTY,
-        get_validated_string_input_method.return_value),),
-      ((ambari_server.JDBC_PASSWORD_FILE_PROPERTY,
-        get_pass_file_path_method.return_value),)
-    ])
-    get_pass_file_path_method.\
-        assert_called_once_with(search_file_message.return_value)
-    os_chmod_method.assert_called_once_with("blablabla-path", 384)
-    self.assertTrue(pm.load.called)
-    self.assertTrue(get_validated_string_input_method.called)
-    self.assertTrue(configure_postgres_password_method.called)
-
-    sys.stdout = sys.__stdout__
-
-
-
-  @patch.object(ambari_server, 'setup')
-  @patch.object(ambari_server, 'start')
-  @patch.object(ambari_server, 'stop')
-  @patch.object(ambari_server, 'reset')
-  @patch('optparse.OptionParser')
-  def test_main_test_setup(self, OptionParserMock, reset_method, stop_method,
-                           start_method, setup_method):
-    opm = OptionParserMock.return_value
-    options = MagicMock()
-    args = ["setup"]
-    opm.parse_args.return_value = (options, args)
-
-    ambari_server.main()
-
-    self.assertTrue(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
-
-    self.assertFalse(False, ambari_server.VERBOSE)
-    self.assertFalse(False, ambari_server.SILENT)
-
-
-
-  @patch.object(ambari_server, 'setup')
-  @patch.object(ambari_server, 'start')
-  @patch.object(ambari_server, 'stop')
-  @patch.object(ambari_server, 'reset')
-  @patch('optparse.OptionParser')
-  def test_main_test_start(self, OptionParserMock, reset_method, stop_method,
-                           start_method, setup_method):
-    opm = OptionParserMock.return_value
-    options = MagicMock()
-    args = ["setup"]
-    opm.parse_args.return_value = (options, args)
-
-    ambari_server.main()
-
-    self.assertTrue(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
-
-    self.assertFalse(False, ambari_server.VERBOSE)
-    self.assertFalse(False, ambari_server.SILENT)
-
-
-
-  @patch.object(ambari_server, 'setup')
-  @patch.object(ambari_server, 'start')
-  @patch.object(ambari_server, 'stop')
-  @patch.object(ambari_server, 'reset')
-  @patch('optparse.OptionParser')
-  def test_main_test_stop(self, OptionParserMock, reset_method, stop_method,
-                          start_method, setup_method):
-    opm = OptionParserMock.return_value
-    options = MagicMock()
-    args = ["stop"]
-    opm.parse_args.return_value = (options, args)
-
-    ambari_server.main()
-
-    self.assertFalse(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertTrue(stop_method.called)
-    self.assertFalse(reset_method.called)
-
-    self.assertFalse(False, ambari_server.VERBOSE)
-    self.assertFalse(False, ambari_server.SILENT)
-
-
-
-  @patch.object(ambari_server, 'setup')
-  @patch.object(ambari_server, 'start')
-  @patch.object(ambari_server, 'stop')
-  @patch.object(ambari_server, 'reset')
-  @patch('optparse.OptionParser')
-  def test_main_test_reset(self, OptionParserMock, reset_method, stop_method,
-                           start_method, setup_method):
-    opm = OptionParserMock.return_value
-
-    options = MagicMock()
-    args = ["reset"]
-    opm.parse_args.return_value = (options, args)
-
-    ambari_server.main()
-
-    self.assertFalse(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertTrue(reset_method.called)
-
-    self.assertFalse(False, ambari_server.VERBOSE)
-    self.assertFalse(False, ambari_server.SILENT)
-
-
-
-  def test_configure_postgresql_conf(self):
-
-    tf1 = tempfile.NamedTemporaryFile()
-    ambari_server.POSTGRESQL_CONF_FILE = tf1.name
-
-    with open(ambari_server.POSTGRESQL_CONF_FILE, 'w') as f:
-      f.write("#listen_addresses = '127.0.0.1'        #\n")
-      f.write("#listen_addresses = '127.0.0.1'")
-
-    ambari_server.configure_postgresql_conf()
-
-    expected  = self.get_file_string(self.get_samples_dir(
-      "configure_postgresql_conf1"))
-    result = self.get_file_string(ambari_server.POSTGRESQL_CONF_FILE)
-    self.assertEqual(expected, result, "postgresql.conf not updated")
-
-    mode = oct(os.stat(ambari_server.POSTGRESQL_CONF_FILE)[stat.ST_MODE])
-    str_mode = str(mode)[-4:]
-    self.assertEqual("0644", str_mode, "Wrong file permissions")
-
-
-
-  @patch.object(ambari_server, "restart_postgres")
-  @patch.object(ambari_server, "get_postgre_status")
-  @patch.object(ambari_server, "configure_postgresql_conf")
-  @patch.object(ambari_server, "configure_pg_hba_ambaridb_users")
-  @patch.object(ambari_server, "configure_pg_hba_postgres_user")
-  def test_configure_postgres(self, configure_pg_hba_postgres_user_mock,
-                              configure_pg_hba_ambaridb_users_mock,
-                              configure_postgresql_conf_mock,
-                              get_postgre_status_mock,
-                              restart_postgres_mock):
-
-
-    tf1 = tempfile.NamedTemporaryFile()
-    tf2 = tempfile.NamedTemporaryFile()
-    ambari_server.PG_HBA_CONF_FILE = tf1.name
-    ambari_server.PG_HBA_CONF_FILE_BACKUP = tf2.name
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-    rcode = ambari_server.configure_postgres()
-    sys.stdout = sys.__stdout__
-    self.assertEqual(0, rcode)
-    self.assertEqual("Backup for pg_hba found, reconfiguration not required\n",
-      out.getvalue())
-
-    ambari_server.PG_HBA_CONF_FILE_BACKUP = tempfile.mktemp()
-    get_postgre_status_mock.return_value = ambari_server.PG_STATUS_RUNNING
-    restart_postgres_mock.return_value = 0
-
-    rcode = ambari_server.configure_postgres()
-
-    self.assertTrue(os.path.isfile(ambari_server.PG_HBA_CONF_FILE_BACKUP),
-      "postgresql.conf backup not created")
-    self.assertTrue(configure_pg_hba_postgres_user_mock.called)
-    self.assertTrue(configure_pg_hba_ambaridb_users_mock.called)
-    mode = oct(os.stat(ambari_server.PG_HBA_CONF_FILE)[stat.ST_MODE])
-    str_mode = str(mode)[-4:]
-    self.assertEqual("0644", str_mode, "Wrong file permissions")
-    self.assertTrue(configure_postgresql_conf_mock.called)
-    self.assertEqual(0, rcode)
-
-    os.unlink(ambari_server.PG_HBA_CONF_FILE_BACKUP)
-    get_postgre_status_mock.return_value = "stopped"
-    rcode = ambari_server.configure_postgres()
-    self.assertEqual(0, rcode)
-    os.unlink(ambari_server.PG_HBA_CONF_FILE_BACKUP)
-    sys.stdout = sys.__stdout__
-
-
-
-  @patch("time.sleep")
-  @patch("subprocess.Popen")
-  @patch.object(ambari_server, "run_os_command")
-  @patch.object(ambari_server, "get_postgre_status")
-  @patch.object(ambari_server, "print_info_msg")
-  def test_restart_postgres(self, printInfoMsg_mock, get_postgre_status_mock,
-                            run_os_command_mock, popenMock, sleepMock):
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    p = MagicMock()
-    p.poll.return_value = 0
-    popenMock.return_value = p
-    rcode = ambari_server.restart_postgres()
-    self.assertEqual(0, rcode)
-
-    p.poll.return_value = None
-    get_postgre_status_mock.return_value = "stopped"
-    run_os_command_mock.return_value = (1, None, None)
-    rcode = ambari_server.restart_postgres()
-    self.assertEqual(1, rcode)
-    sys.stdout = sys.__stdout__
-
-
-
-  @patch("shlex.split")
-  @patch("subprocess.Popen")
-  @patch.object(ambari_server, "print_info_msg")
-  def test_run_os_command(self, printInfoMsg_mock, popenMock, splitMock):
-
-    p = MagicMock()
-    p.communicate.return_value = (None, None)
-    p.returncode = 3
-    popenMock.return_value = p
-
-    # with list arg
-    cmd = ["exec", "arg"]
-    ambari_server.run_os_command(cmd)
-    self.assertFalse(splitMock.called)
-
-    # with str arg
-    resp = ambari_server.run_os_command("runme")
-    self.assertEqual(3, resp[0])
-    self.assertTrue(splitMock.called)
-
-
-
-  @patch.object(ambari_server, "get_conf_dir")
-  @patch.object(ambari_server, "search_file")
-  def test_write_property(self, search_file_mock, get_conf_dir_mock):
-
-    expected_content = "key1=val1\n"
-
-    tf1 = tempfile.NamedTemporaryFile()
-    search_file_mock.return_value = tf1.name
-    ambari_server.write_property("key1", "val1")
-    result = tf1.read()
-    self.assertEqual(expected_content, result)
-
-
-
-  @patch.object(ambari_server, "configure_postgres_username_password")
-  @patch.object(ambari_server, "run_os_command")
-  def test_setup_db(self, run_os_command_mock,
-                    configure_postgres_username_password_mock):
-
-    run_os_command_mock.return_value = (0, None, None)
-    result = ambari_server.setup_db(MagicMock())
-    self.assertTrue(configure_postgres_username_password_mock.called)
-    self.assertEqual(0, result)
-
-
-
-  @patch.object(ambari_server, "get_YN_input")
-  @patch.object(ambari_server, "run_os_command")
-  def test_check_selinux(self, run_os_command_mock, getYNInput_mock):
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    run_os_command_mock.return_value = (0, ambari_server.SE_STATUS_DISABLED,
-                                        None)
-    rcode = ambari_server.check_selinux()
-    self.assertEqual(0, rcode)
-
-    getYNInput_mock.return_value = True
-    run_os_command_mock.return_value = (0,"enabled "
-                                          + ambari_server.SE_MODE_ENFORCING,
-                                        None)
-    rcode = ambari_server.check_selinux()
-    self.assertEqual(0, rcode)
-    self.assertTrue(run_os_command_mock.called)
-    self.assertTrue(getYNInput_mock.called)
-    sys.stdout = sys.__stdout__
-
-
-
-  @patch.object(ambari_server, "print_info_msg")
-  def test_get_ambari_jars(self, printInfoMsg_mock):
-
-    env = "/ambari/jars"
-    os.environ[ambari_server.AMBARI_SERVER_LIB] = env
-    result = ambari_server.get_ambari_jars()
-    self.assertEqual(env, result)
-
-    del os.environ[ambari_server.AMBARI_SERVER_LIB]
-    result = ambari_server.get_ambari_jars()
-    self.assertEqual("/usr/lib/ambari-server", result)
-    self.assertTrue(printInfoMsg_mock.called)
-
-
-  @patch.object(ambari_server, "print_info_msg")
-  def test_get_conf_dir(self, printInfoMsg_mock):
-
-    env = "/ambari/conf"
-    os.environ[ambari_server.AMBARI_CONF_VAR] = env
-    result = ambari_server.get_conf_dir()
-    self.assertEqual(env, result)
-
-    del os.environ[ambari_server.AMBARI_CONF_VAR]
-    result = ambari_server.get_conf_dir()
-    self.assertEqual("/etc/ambari-server/conf", result)
-    self.assertTrue(printInfoMsg_mock.called)
-
-
-
-  def test_search_file(self):
-
-    path = os.path.dirname(__file__)
-    result = ambari_server.search_file(__file__, path)
-    expected = os.path.abspath(__file__)
-    self.assertEqual(expected, result)
-
-    result = ambari_server.search_file("non_existent_file", path)
-    self.assertEqual(None, result)
-
-
-  @patch.object(ambari_server, "run_os_command")
-  def test_check_iptables(self, run_os_command_mock):
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    run_os_command_mock.return_value = (1, "test", "")
-    rcode, info = ambari_server.check_iptables()
-    self.assertEqual(1, rcode)
-    self.assertEqual("test", info)
-
-    run_os_command_mock.return_value = (2, "",
-                                        ambari_server.IP_TBLS_SRVC_NT_FND)
-    rcode = ambari_server.check_iptables()
-    self.assertEqual(0, rcode)
-
-    sys.stdout = sys.__stdout__
-
-
-  def test_dlprogress(self):
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-    ambari_server.dlprogress("filename", 10, 2, 100)
-    sys.stdout = sys.__stdout__
-    self.assertNotEqual("", out.getvalue())
-
-
-  @patch("urllib2.urlopen")
-  @patch("__builtin__.open")
-  @patch.object(ambari_server, "dlprogress")
-  def test_track_jdk(self, dlprogress_mock, openMock, urlopenMock):
-
-    u = MagicMock()
-    u.info.return_value = {"Content-Length":"24576"}
-    chunks = [None, "second", "first"]
-    def side_effect(*args, **kwargs):
-      return chunks.pop()
-    u.read.side_effect = side_effect
-    urlopenMock.return_value = u
-
-    f = MagicMock()
-    openMock.return_value = f
-
-    ambari_server.track_jdk("base", "url", "local")
-
-    self.assertEqual(0, len(chunks))
-    self.assertTrue(f.write.called)
-    self.assertTrue(f.flush.called)
-    self.assertTrue(f.close.called)
-    self.assertEqual(2, len(dlprogress_mock.call_args_list))
-
-
-
-  @patch("os.stat")
-  @patch("os.path.isfile")
-  @patch("os.path.exists")
-  @patch("__builtin__.open")
-  @patch.object(ambari_server, "track_jdk")
-  @patch.object(ambari_server, "get_YN_input")
-  @patch.object(ambari_server, "run_os_command")
-  @patch.object(ambari_server, "Properties")
-  @patch.object(ambari_server, "write_property")
-  @patch.object(ambari_server, "print_info_msg")
-  @patch.object(ambari_server, "get_JAVA_HOME")
-  @patch.object(ambari_server, "get_conf_dir")
-  @patch.object(ambari_server, "search_file")
-  def test_download_jdk(self, search_file_mock, get_conf_dir_mock,
-                        get_JAVA_HOME_mock, print_info_msg_mock,
-                        write_property_mock, Properties_mock,
-                        run_os_command_mock, get_YN_input_mock, track_jdk_mock,
-                        openMock, path_existsMock,
-                        path_isfileMock, statMock):
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    args = MagicMock()
-    args.java_home = "somewhere"
-    search_file_mock.return_value = None
-
-    rcode = ambari_server.download_jdk(args)
-
-    self.assertEqual(-1, rcode)
-    self.assertTrue(search_file_mock.called)
-    self.assertTrue(get_conf_dir_mock.called)
-
-    search_file_mock.return_value = "something"
-    get_JAVA_HOME_mock.return_value = True
-    path_existsMock.return_value = True
-    rcode = ambari_server.download_jdk(args)
-    self.assertEqual(0, rcode)
-
-    get_JAVA_HOME_mock.return_value = False
-    rcode = ambari_server.download_jdk(args)
-    self.assertEqual(0, rcode)
-    self.assertTrue(write_property_mock.called)
-
-    p = MagicMock()
-    Properties_mock.return_value = p
-    openMock.side_effect = Exception("test exception")
-    path_existsMock.return_value = False
-    rcode = ambari_server.download_jdk(args)
-    self.assertEqual(-1, rcode)
-
-    openMock.side_effect = None
-    p.__getitem__.side_effect = KeyError("test exception")
-    rcode = ambari_server.download_jdk(args)
-    self.assertEqual(-1, rcode)
-
-    p.__getitem__.return_value = "somewhere"
-    p.__getitem__.side_effect = None
-    path_existsMock.return_value = False
-    run_os_command_mock.return_value = (0, "Wrong out", None)
-    rcode = ambari_server.download_jdk(args)
-    self.assertEqual(-1, rcode)
-
-    ambari_server.JDK_INSTALL_DIR = os.getcwd()
-    get_YN_input_mock.return_value = True
-    run_os_command_mock.return_value = (0, "Creating jdk-1.2/jre"
-                                           "Content-Length: 32000\r\n"
-                                           , None)
-    statResult = MagicMock()
-    statResult.st_size = 32000
-    statMock.return_value = statResult
-    rcode = ambari_server.download_jdk(args)
-    self.assertEqual(0, rcode)
-
-    sys.stdout = sys.__stdout__
-
-
-
-  @patch.object(ambari_server, "run_os_command")
-  def test_get_postgre_status(self, run_os_command_mock):
-
-    run_os_command_mock.return_value = (1, "running", None)
-    result = ambari_server.get_postgre_status()
-    self.assertEqual("running", result)
-
-    run_os_command_mock.return_value = (1, "wrong", None)
-    result = ambari_server.get_postgre_status()
-    self.assertEqual(None, result)
-
-
-
-  @patch.object(ambari_server, "run_os_command")
-  @patch.object(ambari_server, "get_postgre_status")
-  def test_check_postgre_up(self, get_postgre_status_mock, run_os_command_mock):
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    get_postgre_status_mock.return_value = ambari_server.PG_STATUS_RUNNING
-    rcode = ambari_server.check_postgre_up()
-    self.assertEqual(0, rcode)
-
-    run_os_command_mock.return_value = (4, None, None)
-    get_postgre_status_mock.return_value = None
-    rcode = ambari_server.check_postgre_up()
-    self.assertEqual(4, rcode)
-
-    sys.stdout = sys.__stdout__
-
-
-
-  @patch("platform.linux_distribution")
-  @patch("platform.system")
-  @patch("__builtin__.open")
-  @patch.object(ambari_server, "Properties")
-  @patch.object(ambari_server, "print_info_msg")
-  @patch.object(ambari_server, "print_error_msg")
-  @patch.object(ambari_server, "search_file")
-  @patch.object(ambari_server, "get_conf_dir")
-  def test_configure_os_settings(self, get_conf_dir_mock, search_file_mock,
-                                 print_error_msg_mock, print_info_msg_mock,
-                                 Properties_mock, openMock, systemMock,
-                                 distMock):
-
-    search_file_mock.return_value = None
-    rcode = ambari_server.configure_os_settings()
-    self.assertEqual(-1, rcode)
-
-    search_file_mock.return_value = "something"
-    p = MagicMock()
-    Properties_mock.return_value = p
-    openMock.side_effect = Exception("exception")
-    rcode = ambari_server.configure_os_settings()
-    self.assertEqual(-1, rcode)
-
-    p.__getitem__.return_value = "something"
-    openMock.side_effect = None
-    rcode = ambari_server.configure_os_settings()
-    self.assertEqual(0, rcode)
-
-    p.__getitem__.return_value = ""
-    systemMock.return_value = "NonLinux"
-    rcode = ambari_server.configure_os_settings()
-    self.assertEqual(-1, rcode)
-
-    systemMock.return_value = "Linux"
-    distMock.return_value = ("CentOS", "6.3", None)
-    f = MagicMock()
-    openMock.return_value = f
-    rcode = ambari_server.configure_os_settings()
-    self.assertEqual(0, rcode)
-
-
-
-  @patch("__builtin__.open")
-  @patch.object(ambari_server, "Properties")
-  @patch.object(ambari_server, "search_file")
-  @patch.object(ambari_server, "get_conf_dir")
-  def test_get_JAVA_HOME(self, get_conf_dir_mock, search_file_mock,
-                         Properties_mock, openMock):
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    openMock.side_effect = Exception("exception")
-    result = ambari_server.get_JAVA_HOME()
-    self.assertEqual(None, result)
-
-    expected = os.path.dirname(__file__)
-    p = MagicMock()
-    p.__getitem__.return_value = expected
-    openMock.side_effect = None
-    Properties_mock.return_value = p
-    result = ambari_server.get_JAVA_HOME()
-    self.assertEqual(expected, result)
-
-    sys.stdout = sys.__stdout__
-
-
-
-  @patch("glob.glob")
-  @patch.object(ambari_server, "get_JAVA_HOME")
-  def test_find_jdk(self, get_JAVA_HOME_mock, globMock):
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    get_JAVA_HOME_mock.return_value = "somewhere"
-    result = ambari_server.find_jdk()
-    self.assertEqual("somewhere", result)
-
-    get_JAVA_HOME_mock.return_value = None
-    globMock.return_value = []
-    result = ambari_server.find_jdk()
-    self.assertEqual(None, result)
-
-    globMock.return_value = ["one", "two"]
-    result = ambari_server.find_jdk()
-    self.assertNotEqual(None, result)
-
-    sys.stdout = sys.__stdout__
-
-
-  @patch.object(ambari_server, "configure_os_settings")
-  @patch.object(ambari_server, "download_jdk")
-  @patch.object(ambari_server, "configure_postgres")
-  @patch.object(ambari_server, "setup_db")
-  @patch.object(ambari_server, "check_postgre_up")
-  @patch.object(ambari_server, "check_iptables")
-  @patch.object(ambari_server, "check_selinux")
-  def test_setup(self, check_selinux_mock, check_iptables_mock,
-                 check_postgre_up_mock, setup_db_mock, configure_postgres_mock,
-                 download_jdk_mock, configure_os_settings_mock):
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    args = MagicMock()
-
-    check_selinux_mock.return_value = 0
-    check_iptables_mock.return_value = (0, "other")
-    check_postgre_up_mock.return_value = 0
-    setup_db_mock.return_value = 0
-    configure_postgres_mock.return_value = 0
-    download_jdk_mock.return_value = 0
-    configure_os_settings_mock.return_value = 0
-    result = ambari_server.setup(args)
-    self.assertEqual(None, result)
-
-    sys.stdout = sys.__stdout__
-
-
-
-  @patch("__builtin__.raw_input")
-  @patch.object(ambari_server, "setup_db")
-  @patch.object(ambari_server, "print_info_msg")
-  @patch.object(ambari_server, "run_os_command")
-  @patch.object(ambari_server, "configure_postgres_username_password")
-  def test_reset(self, configure_postgres_username_password_mock,
-                 run_os_command_mock, print_info_msg_mock,
-                 setup_db_mock, raw_inputMock):
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    args = MagicMock()
-    raw_inputMock.return_value = "No"
-    rcode = ambari_server.reset(args)
-    self.assertEqual(-1, rcode)
-
-    raw_inputMock.return_value = "yes"
-    run_os_command_mock.return_value = (1, None, None)
-    rcode = ambari_server.reset(args)
-    self.assertEqual(1, rcode)
-
-    run_os_command_mock.return_value = (0, None, None)
-    rcode = ambari_server.reset(args)
-    self.assertEqual(None, rcode)
-    self.assertTrue(setup_db_mock.called)
-
-    sys.stdout = sys.__stdout__
-
-
-
-  @patch("os.kill")
-  @patch("os.path.exists")
-  @patch("__builtin__.open")
-  @patch("subprocess.Popen")
-  @patch.object(ambari_server, "print_info_msg")
-  @patch.object(ambari_server, "get_conf_dir")
-  @patch.object(ambari_server, "find_jdk")
-  @patch.object(ambari_server, "print_error_msg")
-  @patch.object(ambari_server, "check_postgre_up")
-  @patch.object(ambari_server, "check_iptables")
-  def test_start(self, check_iptables_mock, check_postgre_up_mock,
-                 print_error_msg_mock, find_jdk_mock, get_conf_dir_mock,
-                 print_info_msg_mock, popenMock, openMock, pexistsMock,
-                 killMock):
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    args = MagicMock()
-    f = MagicMock()
-    f.readline.return_value = 42
-    openMock.return_value = f
-    pexistsMock.return_value = True
-    rcode = ambari_server.start(args)
-    self.assertTrue(killMock.called)
-    self.assertEqual(None, rcode)
-
-    pexistsMock.return_value = False
-    find_jdk_mock.return_value = None
-    rcode = ambari_server.start(args)
-    self.assertEqual(-1, rcode)
-
-    find_jdk_mock.return_value = "somewhere"
-    check_postgre_up_mock.return_value = 0
-    check_iptables_mock.return_value = (0, None)
-    p = MagicMock()
-    popenMock.return_value = p
-    rcode = ambari_server.start(args)
-    self.assertEqual(None, rcode)
-    self.assertTrue(f.write.called)
-
-    sys.stdout = sys.__stdout__
-
-
-
-  @patch("__builtin__.open")
-  @patch("os.path.exists")
-  @patch("os.remove")
-  @patch("os.killpg")
-  @patch("os.getpgid")
-  @patch.object(ambari_server, "print_info_msg")
-  def test_stop(self, print_info_msg_mock, gpidMock, removeMock,
-                killMock, pexistsMock, openMock):
-
-    pexistsMock.return_value = True
-    f = MagicMock()
-    f.readline.return_value = "42"
-    openMock.return_value = f
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    ambari_server.stop(None)
-
-    self.assertTrue(f.readline.called)
-    self.assertTrue(killMock.called)
-    self.assertTrue(killMock.called)
-    self.assertTrue(f.close.called)
-    self.assertTrue(removeMock.called)
-
-    sys.stdout = sys.__stdout__
-
-
-
-  def test_print_info_msg(self):
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    ambari_server.VERBOSE = True
-    ambari_server.print_info_msg("msg")
-    self.assertNotEqual("", out.getvalue())
-
-    sys.stdout = sys.__stdout__
-
-
-
-  def test_print_error_msg(self):
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    ambari_server.VERBOSE = True
-    ambari_server.print_error_msg("msg")
-    self.assertNotEqual("", out.getvalue())
-
-    sys.stdout = sys.__stdout__
-
-
-
-  def test_print_warning_msg(self):
-
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-    ambari_server.VERBOSE = True
-    ambari_server.print_warning_msg("msg")
-    self.assertNotEqual("", out.getvalue())
-
-    sys.stdout = sys.__stdout__
-
-
-
-  @patch.object(ambari_server, "get_choice_string_input")
-  def test_get_YN_input(self, get_choice_string_input_mock):
-
-    ambari_server.get_YN_input("prompt", "default")
-    self.assertTrue(get_choice_string_input_mock.called)
-    self.assertEqual(4, len(get_choice_string_input_mock.call_args_list[0][0]))
-
-
-
-  def get_sample(self, sample):
-    """
-    Returns sample file content as string with normalized line endings
-    """
-    path = self.get_samples_dir(sample)
-    return self.get_file_string(path)
-
-
-
-  def get_file_string(self, file):
-    """
-    Returns file content as string with normalized line endings
-    """
-    string = open(file, 'r').read()
-    return self.normalize(string)
-
-
-
-  def normalize(self, string):
-    """
-    Normalizes line ending in string according to platform-default encoding
-    """
-    return string.replace("\n", os.linesep)
-
-
-
-  def get_samples_dir(self, sample):
-    """
-    Returns full file path by sample name
-    """
-    testdir = os.path.dirname(__file__)
-    return os.path.dirname(testdir) + os.sep + "resources" + os.sep \
-           + 'TestAmbaryServer.samples/' + sample
diff --git a/branch-1.2/ambari-server/src/test/python/unitTests.py b/branch-1.2/ambari-server/src/test/python/unitTests.py
deleted file mode 100644
index 7810c83..0000000
--- a/branch-1.2/ambari-server/src/test/python/unitTests.py
+++ /dev/null
@@ -1,45 +0,0 @@
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import unittest
-import glob
-import os
-import sys
-
-TEST_MASK = 'Test*.py'
-
-def main():
-
-  pwd = os.path.dirname(__file__)
-  if pwd:
-    global TEST_MASK
-    TEST_MASK = pwd + os.sep + TEST_MASK
-
-  tests = glob.glob(TEST_MASK)
-  modules = [os.path.basename(s)[:-3] for s in tests]
-  suites = [unittest.defaultTestLoader.loadTestsFromName(name) for name in
-    modules]
-  testSuite = unittest.TestSuite(suites)
-
-  textRunner = unittest.TextTestRunner(verbosity=2).run(testSuite)
-  return 0 if textRunner.wasSuccessful() else 1
-
-
-if __name__ == "__main__":
-  sys.exit(main())
-
diff --git a/branch-1.2/ambari-server/src/test/resources/TestAmbaryServer.samples/configure_pg_hba_ambaridb_users1 b/branch-1.2/ambari-server/src/test/resources/TestAmbaryServer.samples/configure_pg_hba_ambaridb_users1
deleted file mode 100644
index be422af..0000000
--- a/branch-1.2/ambari-server/src/test/resources/TestAmbaryServer.samples/configure_pg_hba_ambaridb_users1
+++ /dev/null
@@ -1,4 +0,0 @@
-
-local  all  ffdf,mapred md5
-host  all   ffdf,mapred 0.0.0.0/0  md5
-host  all   ffdf,mapred ::/0 md5
diff --git a/branch-1.2/ambari-server/src/test/resources/TestAmbaryServer.samples/configure_pg_hba_ambaridb_users2 b/branch-1.2/ambari-server/src/test/resources/TestAmbaryServer.samples/configure_pg_hba_ambaridb_users2
deleted file mode 100644
index 8c0dcf4..0000000
--- a/branch-1.2/ambari-server/src/test/resources/TestAmbaryServer.samples/configure_pg_hba_ambaridb_users2
+++ /dev/null
@@ -1,4 +0,0 @@
-
-local  all   postgres md5
-host  all   postgres 0.0.0.0/0  md5
-host  all   postgres ::/0 md5
diff --git a/branch-1.2/ambari-server/src/test/resources/TestAmbaryServer.samples/configure_postgresql_conf1 b/branch-1.2/ambari-server/src/test/resources/TestAmbaryServer.samples/configure_postgresql_conf1
deleted file mode 100644
index 6d78da6..0000000
--- a/branch-1.2/ambari-server/src/test/resources/TestAmbaryServer.samples/configure_postgresql_conf1
+++ /dev/null
@@ -1,2 +0,0 @@
-listen_addresses = '*'        #
-listen_addresses = '*'        #
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/resources/ambari.properties b/branch-1.2/ambari-server/src/test/resources/ambari.properties
deleted file mode 100644
index f1b96c7..0000000
--- a/branch-1.2/ambari-server/src/test/resources/ambari.properties
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 2011 The Apache Software Foundation
-# 
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addcluster.sh b/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addcluster.sh
deleted file mode 100644
index 6d77a12..0000000
--- a/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addcluster.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-curl -i -X POST -d '{"Clusters": {"version" : "HDP-1.2.0"}}' http://localhost:8080/api/clusters/c1
-curl -i -X POST http://localhost:8080/api/clusters/c1/hosts/localhost.localdomain
diff --git a/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addganglia.sh b/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addganglia.sh
deleted file mode 100644
index b4f6fc1..0000000
--- a/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addganglia.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/GANGLIA
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/GANGLIA/components/GANGLIA_SERVER
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/GANGLIA/components/GANGLIA_MONITOR
-curl -i -X POST http://localhost:8080/api/clusters/c1/hosts/localhost.localdomain/host_components/GANGLIA_SERVER
-curl -i -X POST http://localhost:8080/api/clusters/c1/hosts/localhost.localdomain/host_components/GANGLIA_MONITOR
-curl -i -X PUT  -d '{"ServiceInfo": {"state" : "INSTALLED"}}' http://localhost:8080/api/clusters/c1/services/GANGLIA/
diff --git a/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addhbase.sh b/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addhbase.sh
deleted file mode 100644
index 5c81f3e..0000000
--- a/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addhbase.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/HBASE
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/HBASE/components/HBASE_MASTER
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/HBASE/components/HBASE_REGIONSERVER
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/HBASE/components/HBASE_CLIENT
-curl -i -X POST http://localhost:8080/api/clusters/c1/hosts/localhost.localdomain/host_components/HBASE_MASTER
-curl -i -X POST http://localhost:8080/api/clusters/c1/hosts/localhost.localdomain/host_components/HBASE_REGIONSERVER
-curl -i -X POST http://localhost:8080/api/clusters/c1/hosts/localhost.localdomain/host_components/HBASE_CLIENT
-curl -i -X POST -d '{"type": "hbase-site", "tag": "version1", "properties" : { "hbase.rootdir" : "hdfs://localhost:8020/apps/hbase/", "hbase.cluster.distributed" : "true", "hbase.zookeeper.quorum": "localhost", "zookeeper.session.timeout": "60000" }}' http://localhost:8080/api/clusters/c1/configurations
-curl -i -X POST -d '{"type": "hbase-env", "tag": "version1", "properties" : { "hbase_hdfs_root_dir" : "/apps/hbase/"}}' http://localhost:8080/api/clusters/c1/configurations
-curl -i -X PUT -d '{"config": {"hbase-site": "version1", "hbase-env": "version1"}}'  http://localhost:8080/api/clusters/c1/services/HBASE
-curl -i -X PUT  -d '{"ServiceInfo": {"state" : "INSTALLED"}}' http://localhost:8080/api/clusters/c1/services/HBASE/
diff --git a/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addhdfs.sh b/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addhdfs.sh
deleted file mode 100644
index 6e40b5a..0000000
--- a/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addhdfs.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/HDFS
-curl -i -X POST -d '{"type": "core-site", "tag": "version1", "properties" : { "fs.default.name" : "localhost:8020"}}' http://localhost:8080/api/clusters/c1/configurations
-curl -i -X POST -d '{"type": "hdfs-site", "tag": "version1", "properties" : { "dfs.datanode.data.dir.perm" : "750"}}' http://localhost:8080/api/clusters/c1/configurations
-curl -i -X POST -d '{"type": "global", "tag": "version1", "properties" : { "hbase_hdfs_root_dir" : "/apps/hbase/"}}' http://localhost:8080/api/clusters/c1/configurations
-curl -i -X PUT -d '{"config": {"core-site": "version1", "hdfs-site": "version1", "global" : "version1" }}'  http://localhost:8080/api/clusters/c1/services/HDFS
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/HDFS/components/NAMENODE
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/HDFS/components/SECONDARY_NAMENODE
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/HDFS/components/DATANODE
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/HDFS/components/HDFS_CLIENT
-curl -i -X POST http://localhost:8080/api/clusters/c1/hosts/localhost.localdomain/host_components/NAMENODE
-curl -i -X POST http://localhost:8080/api/clusters/c1/hosts/localhost.localdomain/host_components/SECONDARY_NAMENODE
-curl -i -X POST http://localhost:8080/api/clusters/c1/hosts/localhost.localdomain/host_components/DATANODE
-curl -i -X POST http://localhost:8080/api/clusters/c1/hosts/localhost.localdomain/host_components/HDFS_CLIENT
-curl -i -X PUT  -d '{"ServiceInfo": {"state" : "INSTALLED"}}' http://localhost:8080/api/clusters/c1/services/HDFS
diff --git a/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addmapreduce.sh b/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addmapreduce.sh
deleted file mode 100644
index eefed1a..0000000
--- a/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addmapreduce.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/MAPREDUCE
-curl -i -X POST -d '{"type": "core-site", "tag": "version2", "properties" : { "fs.default.name" : "localhost:8020"}}' http://localhost:8080/api/clusters/c1/configurations
-curl -i -X POST -d '{"type": "mapred-site", "tag": "version1", "properties" : { "mapred.job.tracker" : "localhost:50300", "mapreduce.history.server.embedded": "false", "mapreduce.history.server.http.address": "localhost:51111"}}' http://localhost:8080/api/clusters/c1/configurations
-curl -i -X PUT -d '{"config": {"core-site": "version2", "mapred-site": "version1"}}'  http://localhost:8080/api/clusters/c1/services/MAPREDUCE
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/MAPREDUCE/components/JOBTRACKER
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/MAPREDUCE/components/TASKTRACKER
-curl -i -X POST http://localhost:8080/api/clusters/c1/hosts/localhost.localdomain/host_components/JOBTRACKER
-curl -i -X POST http://localhost:8080/api/clusters/c1/hosts/localhost.localdomain/host_components/TASKTRACKER
-curl -i -X PUT  -d '{"ServiceInfo": {"state" : "INSTALLED"}}'   http://localhost:8080/api/clusters/c1/services/MAPREDUCE/
diff --git a/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addnagios.sh b/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addnagios.sh
deleted file mode 100644
index 59e56eb..0000000
--- a/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addnagios.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/NAGIOS
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/NAGIOS/components/NAGIOS_SERVER
-curl -i -X POST http://localhost:8080/api/clusters/c1/hosts/localhost.localdomain/host_components/NAGIOS_SERVER
-curl -i -X POST -d '{"type": "nagios-global", "tag": "version1", "properties" : { "nagios_web_login" : "nagiosadmin", "nagios_web_password" : "password", "nagios_contact": "a\u0040b.c" }}' http://localhost:8080/api/clusters/c1/configurations
-curl -i -X PUT -d '{"config": {"nagios-global": "version1" }}'  http://localhost:8080/api/clusters/c1/services/NAGIOS
-curl -i -X PUT  -d '{"ServiceInfo": {"state" : "INSTALLED"}}' http://localhost:8080/api/clusters/c1/services/NAGIOS/
diff --git a/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-adduser.sh b/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-adduser.sh
deleted file mode 100644
index 62bd55a..0000000
--- a/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-adduser.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-curl -i -o - -X GET http://localhost:8080/api/users
-curl -i -o - -X GET http://localhost:8080/api/users/admin
-#POST - to create a user test3 with password ambari belonging to both roles user and admin, the roles can just be admin
-curl -i -X POST -d '{"Users": {"password" : "ambari", "roles" : "user,admin"}}' http://localhost:8080/api/users/test3
-#PUT -
-#similar to post for update, for password change you will have to do something like:
-curl -i -X PUT -d '{"Users": {"password" : "ambari2", "old_password" : "ambari"}}' http://localhost:8080/api/users/test3
-curl -i -o - -X GET http://localhost:8080/api/users/
-curl -i -o - -X GET http://localhost:8080/api/users/admin
diff --git a/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addzk.sh b/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addzk.sh
deleted file mode 100644
index 5f698ef..0000000
--- a/branch-1.2/ambari-server/src/test/resources/api_testscripts/curl-addzk.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-curl -i -X POST -d '{"type": "zoo", "tag": "version1", "properties" : { "tickTime" : "20"}}' http://localhost:8080/api/clusters/c1/configurations
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/ZOOKEEPER
-curl -i -X POST http://localhost:8080/api/clusters/c1/services/ZOOKEEPER/components/ZOOKEEPER_SERVER
-curl -i -X PUT -d '{"config": {"zoo": "version1"}}'  http://localhost:8080/api/clusters/c1/services/ZOOKEEPER/components/ZOOKEEPER_SERVER
-curl -i -X POST http://localhost:8080/api/clusters/c1/hosts/localhost.localdomain/host_components/ZOOKEEPER_SERVER
-curl -i -X PUT  -d '{"ServiceInfo": {"state" : "INSTALLED"}}' http://localhost:8080/api/clusters/c1/services/ZOOKEEPER/
diff --git a/branch-1.2/ambari-server/src/test/resources/data.db b/branch-1.2/ambari-server/src/test/resources/data.db
deleted file mode 100644
index 7536b32..0000000
--- a/branch-1.2/ambari-server/src/test/resources/data.db
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-server/src/test/resources/gsInstaller-hosts.txt b/branch-1.2/ambari-server/src/test/resources/gsInstaller-hosts.txt
deleted file mode 100644
index 7d4b74c..0000000
--- a/branch-1.2/ambari-server/src/test/resources/gsInstaller-hosts.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-CLUSTER=ambari
-VERSION=HDP-1.2.0
-HDFS HDFS_CLIENT ip-10-190-97-104.ec2.internal
-HDFS NAMENODE ip-10-8-113-183.ec2.internal
-HDFS SECONDARY_NAMENODE ip-10-8-113-183.ec2.internal
-HDFS DATANODE ip-10-140-16-157.ec2.internal
-MAPREDUCE TASKTRACKER ip-10-140-16-157.ec2.internal
-HDFS DATANODE ip-10-191-122-198.ec2.internal
-MAPREDUCE TASKTRACKER ip-10-191-122-198.ec2.internal
-HDFS DATANODE ip-10-68-150-107.ec2.internal
-MAPREDUCE TASKTRACKER ip-10-68-150-107.ec2.internal
-MAPREDUCE MAPREDUCE_CLIENT ip-10-190-97-104.ec2.internal
-MAPREDUCE JOBTRACKER ip-10-8-113-183.ec2.internal
-PIG PIG ip-10-190-97-104.ec2.internal
-HBASE HBASE_CLIENT ip-10-190-97-104.ec2.internal
-HBASE HBASE_MASTER ip-10-8-113-183.ec2.internal
-HBASE HBASE_REGIONSERVER ip-10-140-16-157.ec2.internal
-HBASE HBASE_REGIONSERVER ip-10-191-122-198.ec2.internal
-HBASE HBASE_REGIONSERVER ip-10-68-150-107.ec2.internal
-ZOOKEEPER ZOOKEEPER_CLIENT ip-10-190-97-104.ec2.internal
-ZOOKEEPER ZOOKEEPER_SERVER ip-10-140-16-157.ec2.internal
-ZOOKEEPER ZOOKEEPER_SERVER ip-10-191-122-198.ec2.internal
-ZOOKEEPER ZOOKEEPER_SERVER ip-10-68-150-107.ec2.internal
-HIVE HIVE_CLIENT ip-10-190-97-104.ec2.internal
-HIVE HIVE_SERVER ip-10-8-113-183.ec2.internal
-HIVE HIVE_METASTORE ip-10-8-113-183.ec2.internal
-HIVE MYSQL_SERVER ip-10-190-97-104.ec2.internal
-HCATALOG HCAT ip-10-190-97-104.ec2.internal
-WEBHCAT WEBHCAT_SERVER ip-10-190-97-104.ec2.internal
-SQOOP SQOOP ip-10-190-97-104.ec2.internal
-OOZIE OOZIE_CLIENT ip-10-190-97-104.ec2.internal
-OOZIE OOZIE_SERVER ip-10-8-113-183.ec2.internal
-GANGLIA GANGLIA ip-10-190-97-104.ec2.internal
-GANGLIA GANGLIA_MONITOR ip-10-190-97-104.ec2.internal
-NAGIOS NAGIOS_SERVER ip-10-190-97-104.ec2.internal
diff --git a/branch-1.2/ambari-server/src/test/resources/hbase_hbasemaster_jmx.json b/branch-1.2/ambari-server/src/test/resources/hbase_hbasemaster_jmx.json
deleted file mode 100644
index 5010796..0000000
--- a/branch-1.2/ambari-server/src/test/resources/hbase_hbasemaster_jmx.json
+++ /dev/null
@@ -1,1183 +0,0 @@
-{
-  "beans" : [ {
-    "name" : "java.lang:type=MemoryPool,name=CMS Old Gen",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "CMS Old Gen",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "Usage" : {
-      "committed" : 65404928,
-      "init" : 65404928,
-      "max" : 1030160384,
-      "used" : 4226480
-    },
-    "CollectionUsage" : {
-      "committed" : 65404928,
-      "init" : 65404928,
-      "max" : 1030160384,
-      "used" : 1680304
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep" ],
-    "PeakUsage" : {
-      "committed" : 65404928,
-      "init" : 65404928,
-      "max" : 1030160384,
-      "used" : 4226480
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "java.lang:type=Memory",
-    "modelerType" : "sun.management.MemoryImpl",
-    "Verbose" : false,
-    "HeapMemoryUsage" : {
-      "committed" : 85000192,
-      "init" : 0,
-      "max" : 1069416448,
-      "used" : 4806976
-    },
-    "NonHeapMemoryUsage" : {
-      "committed" : 44236800,
-      "init" : 24313856,
-      "max" : 136314880,
-      "used" : 28971240
-    },
-    "ObjectPendingFinalizationCount" : 0
-  }, {
-    "name" : "hadoop:service=HBase,name=Info",
-    "modelerType" : "org.apache.hadoop.hbase.metrics.HBaseInfo$HBaseInfoMBean",
-    "revision" : "Unknown",
-    "hdfsUser" : "jenkins",
-    "hdfsDate" : "Thu Jan 10 03:38:39 PST 2013",
-    "hdfsUrl" : "",
-    "date" : "Thu Jan 10 03:45:56 PST 2013",
-    "hdfsRevision" : "",
-    "user" : "jenkins",
-    "hdfsVersion" : "1.1.2.21",
-    "url" : "file:///mnt/disk1/jenkins/workspace/BIGTOP-Bimota-Centos6/build/hbase/rpm/BUILD/hbase-0.94.2.21",
-    "version" : "0.94.2.21"
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Code Cache",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Code Cache",
-    "Type" : "NON_HEAP",
-    "Valid" : true,
-    "Usage" : {
-      "committed" : 3080192,
-      "init" : 2555904,
-      "max" : 50331648,
-      "used" : 3022208
-    },
-    "CollectionUsage" : null,
-    "MemoryManagerNames" : [ "CodeCacheManager" ],
-    "PeakUsage" : {
-      "committed" : 3080192,
-      "init" : 2555904,
-      "max" : 50331648,
-      "used" : 3023488
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdSupported" : false,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "java.lang:type=Runtime",
-    "modelerType" : "sun.management.RuntimeImpl",
-    "Name" : "17489@ip-10-85-118-178.ec2.internal",
-    "ClassPath" : "/etc/hbase/conf:/usr/jdk/jdk1.6.0_31/lib/tools.jar:/usr/lib/hbase/bin/..:/usr/lib/hbase/bin/../hbase-0.94.2.21-tests.jar:/usr/lib/hbase/bin/../hbase-0.94.2.21.jar:/usr/lib/hbase/bin/../lib/activation-1.1.jar:/usr/lib/hbase/bin/../lib/asm-3.1.jar:/usr/lib/hbase/bin/../lib/avro-1.5.3.jar:/usr/lib/hbase/bin/../lib/avro-ipc-1.5.3.jar:/usr/lib/hbase/bin/../lib/commons-beanutils-1.7.0.jar:/usr/lib/hbase/bin/../lib/commons-beanutils-core-1.8.0.jar:/usr/lib/hbase/bin/../lib/commons-cli-1.2.jar:/usr/lib/hbase/bin/../lib/commons-codec-1.4.jar:/usr/lib/hbase/bin/../lib/commons-collections-3.2.1.jar:/usr/lib/hbase/bin/../lib/commons-configuration-1.6.jar:/usr/lib/hbase/bin/../lib/commons-digester-1.8.jar:/usr/lib/hbase/bin/../lib/commons-el-1.0.jar:/usr/lib/hbase/bin/../lib/commons-httpclient-3.1.jar:/usr/lib/hbase/bin/../lib/commons-io-2.1.jar:/usr/lib/hbase/bin/../lib/commons-lang-2.5.jar:/usr/lib/hbase/bin/../lib/commons-logging-1.1.1.jar:/usr/lib/hbase/bin/../lib/commons-math-2.1.jar:/usr/lib/hbase/bin/../lib/commons-net-1.4.1.jar:/usr/lib/hbase/bin/../lib/core-3.1.1.jar:/usr/lib/hbase/bin/../lib/guava-11.0.2.jar:/usr/lib/hbase/bin/../lib/hadoop-core.jar:/usr/lib/hbase/bin/../lib/high-scale-lib-1.1.1.jar:/usr/lib/hbase/bin/../lib/httpclient-4.1.2.jar:/usr/lib/hbase/bin/../lib/httpcore-4.1.3.jar:/usr/lib/hbase/bin/../lib/jackson-core-asl-1.8.8.jar:/usr/lib/hbase/bin/../lib/jackson-jaxrs-1.8.8.jar:/usr/lib/hbase/bin/../lib/jackson-mapper-asl-1.8.8.jar:/usr/lib/hbase/bin/../lib/jackson-xc-1.8.8.jar:/usr/lib/hbase/bin/../lib/jamon-runtime-2.3.1.jar:/usr/lib/hbase/bin/../lib/jasper-compiler-5.5.23.jar:/usr/lib/hbase/bin/../lib/jasper-runtime-5.5.23.jar:/usr/lib/hbase/bin/../lib/jaxb-api-2.1.jar:/usr/lib/hbase/bin/../lib/jaxb-impl-2.2.3-1.jar:/usr/lib/hbase/bin/../lib/jersey-core-1.8.jar:/usr/lib/hbase/bin/../lib/jersey-json-1.8.jar:/usr/lib/hbase/bin/../lib/jersey-server-1.8.jar:/usr/lib/hbase/bin/../lib/jettison-1.1.jar:/usr/lib/hbase/bin/../lib/jetty-6.1.26.jar:/usr/lib/hbase/bin/../lib/jetty-util-6.1.26.jar:/usr/lib/hbase/bin/../lib/jruby-complete-1.6.5.jar:/usr/lib/hbase/bin/../lib/jsp-2.1-6.1.14.jar:/usr/lib/hbase/bin/../lib/jsp-api-2.1-6.1.14.jar:/usr/lib/hbase/bin/../lib/jsr305-1.3.9.jar:/usr/lib/hbase/bin/../lib/junit-4.10-HBASE-1.jar:/usr/lib/hbase/bin/../lib/libthrift-0.8.0.jar:/usr/lib/hbase/bin/../lib/log4j-1.2.16.jar:/usr/lib/hbase/bin/../lib/metrics-core-2.1.2.jar:/usr/lib/hbase/bin/../lib/netty-3.2.4.Final.jar:/usr/lib/hbase/bin/../lib/protobuf-java-2.4.0a.jar:/usr/lib/hbase/bin/../lib/servlet-api-2.5-6.1.14.jar:/usr/lib/hbase/bin/../lib/slf4j-api-1.4.3.jar:/usr/lib/hbase/bin/../lib/slf4j-log4j12-1.4.3.jar:/usr/lib/hbase/bin/../lib/snappy-java-1.0.3.2.jar:/usr/lib/hbase/bin/../lib/stax-api-1.0.1.jar:/usr/lib/hbase/bin/../lib/velocity-1.7.jar:/usr/lib/hbase/bin/../lib/xmlenc-0.52.jar:/usr/lib/hbase/bin/../lib/zookeeper.jar::::/usr/lib/hadoop/libexec/../conf:/usr/jdk/jdk1.6.0_31/lib/tools.jar:/usr/lib/hadoop/libexec/..:/usr/lib/hadoop/libexec/../hadoop-core-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/ambari-log4j-1.0.jar:/usr/lib/hadoop/libexec/../lib/asm-3.2.jar:/usr/lib/hadoop/libexec/../lib/aspectjrt-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/aspectjtools-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-1.7.0.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-core-1.8.0.jar:/usr/lib/hadoop/libexec/../lib/commons-cli-1.2.jar:/usr/lib/hadoop/libexec/../lib/commons-codec-1.4.jar:/usr/lib/hadoop/libexec/../lib/commons-collections-3.2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-configuration-1.6.jar:/usr/lib/hadoop/libexec/../lib/commons-daemon-1.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-digester-1.8.jar:/usr/lib/hadoop/libexec/../lib/commons-el-1.0.jar:/usr/lib/hadoop/libexec/../lib/commons-httpclient-3.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-io-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-lang-2.4.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-1.1.1.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-api-1.0.4.jar:/usr/lib/hadoop/libexec/../lib/commons-math-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-net-3.1.jar:/usr/lib/hadoop/libexec/../lib/core-3.1.1.jar:/usr/lib/hadoop/libexec/../lib/hadoop-capacity-scheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-fairscheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-lzo-0.5.0.jar:/usr/lib/hadoop/libexec/../lib/hadoop-thriftfs-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-tools.jar:/usr/lib/hadoop/libexec/../lib/hsqldb-1.8.0.10.jar:/usr/lib/hadoop/libexec/../lib/jackson-core-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jackson-mapper-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jasper-compiler-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jasper-runtime-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jdeb-0.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-core-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-json-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-server-1.8.jar:/usr/lib/hadoop/libexec/../lib/jets3t-0.6.1.jar:/usr/lib/hadoop/libexec/../lib/jetty-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jetty-util-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jsch-0.1.42.jar:/usr/lib/hadoop/libexec/../lib/junit-4.5.jar:/usr/lib/hadoop/libexec/../lib/kfs-0.2.2.jar:/usr/lib/hadoop/libexec/../lib/log4j-1.2.15.jar:/usr/lib/hadoop/libexec/../lib/mockito-all-1.8.5.jar:/usr/lib/hadoop/libexec/../lib/oro-2.0.8.jar:/usr/lib/hadoop/libexec/../lib/postgresql-9.1-901-1.jdbc4.jar:/usr/lib/hadoop/libexec/../lib/servlet-api-2.5-20081211.jar:/usr/lib/hadoop/libexec/../lib/slf4j-api-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/slf4j-log4j12-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/xmlenc-0.52.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-2.1.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-api-2.1.jar",
-    "StartTime" : 1358364475754,
-    "VmName" : "Java HotSpot(TM) 64-Bit Server VM",
-    "VmVendor" : "Sun Microsystems Inc.",
-    "VmVersion" : "20.6-b01",
-    "InputArguments" : [ "-XX:OnOutOfMemoryError=kill", "-9", "%p", "-Xmx1000m", "-ea", "-XX:+UseConcMarkSweepGC", "-XX:+CMSIncrementalMode", "-Xmx1024m", "-Dhbase.log.dir=/var/log/hbase", "-Dhbase.log.file=hbase-hbase-master-ip-10-85-118-178.log", "-Dhbase.home.dir=/usr/lib/hbase/bin/..", "-Dhbase.id.str=hbase", "-Dhbase.root.logger=INFO,DRFA", "-Djava.library.path=/usr/lib/hadoop/libexec/../lib/native/Linux-amd64-64:/usr/lib/hbase/bin/../lib/native/Linux-amd64-64", "-Dhbase.security.logger=INFO,DRFAS" ],
-    "BootClassPath" : "/usr/jdk/jdk1.6.0_31/jre/lib/resources.jar:/usr/jdk/jdk1.6.0_31/jre/lib/rt.jar:/usr/jdk/jdk1.6.0_31/jre/lib/sunrsasign.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jsse.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jce.jar:/usr/jdk/jdk1.6.0_31/jre/lib/charsets.jar:/usr/jdk/jdk1.6.0_31/jre/lib/modules/jdk.boot.jar:/usr/jdk/jdk1.6.0_31/jre/classes",
-    "LibraryPath" : "/usr/lib/hadoop/libexec/../lib/native/Linux-amd64-64:/usr/lib/hbase/bin/../lib/native/Linux-amd64-64",
-    "BootClassPathSupported" : true,
-    "ManagementSpecVersion" : "1.2",
-    "SpecName" : "Java Virtual Machine Specification",
-    "SpecVendor" : "Sun Microsystems Inc.",
-    "SpecVersion" : "1.0",
-    "SystemProperties" : [ {
-      "key" : "java.ext.dirs",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/ext:/usr/java/packages/lib/ext"
-    }, {
-      "key" : "java.vm.specification.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "hbase.log.dir",
-      "value" : "/var/log/hbase"
-    }, {
-      "key" : "user.timezone",
-      "value" : "America/New_York"
-    }, {
-      "key" : "java.vm.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "user.name",
-      "value" : "hbase"
-    }, {
-      "key" : "java.vm.specification.name",
-      "value" : "Java Virtual Machine Specification"
-    }, {
-      "key" : "user.dir",
-      "value" : "/var/run/hbase"
-    }, {
-      "key" : "user.country",
-      "value" : "US"
-    }, {
-      "key" : "user.language",
-      "value" : "en"
-    }, {
-      "key" : "java.specification.version",
-      "value" : "1.6"
-    }, {
-      "key" : "sun.cpu.endian",
-      "value" : "little"
-    }, {
-      "key" : "java.home",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre"
-    }, {
-      "key" : "hbase.log.file",
-      "value" : "hbase-hbase-master-ip-10-85-118-178.log"
-    }, {
-      "key" : "sun.jnu.encoding",
-      "value" : "ANSI_X3.4-1968"
-    }, {
-      "key" : "file.separator",
-      "value" : "/"
-    }, {
-      "key" : "java.vendor.url",
-      "value" : "http://java.sun.com/"
-    }, {
-      "key" : "java.awt.graphicsenv",
-      "value" : "sun.awt.X11GraphicsEnvironment"
-    }, {
-      "key" : "os.arch",
-      "value" : "amd64"
-    }, {
-      "key" : "java.io.tmpdir",
-      "value" : "/tmp"
-    }, {
-      "key" : "java.runtime.name",
-      "value" : "Java(TM) SE Runtime Environment"
-    }, {
-      "key" : "java.awt.printerjob",
-      "value" : "sun.print.PSPrinterJob"
-    }, {
-      "key" : "file.encoding",
-      "value" : "ANSI_X3.4-1968"
-    }, {
-      "key" : "java.version",
-      "value" : "1.6.0_31"
-    }, {
-      "key" : "java.vendor.url.bug",
-      "value" : "http://java.sun.com/cgi-bin/bugreport.cgi"
-    }, {
-      "key" : "java.vm.specification.version",
-      "value" : "1.0"
-    }, {
-      "key" : "file.encoding.pkg",
-      "value" : "sun.io"
-    }, {
-      "key" : "sun.java.command",
-      "value" : "org.apache.hadoop.hbase.master.HMaster start"
-    }, {
-      "key" : "sun.java.launcher",
-      "value" : "SUN_STANDARD"
-    }, {
-      "key" : "path.separator",
-      "value" : ":"
-    }, {
-      "key" : "java.runtime.version",
-      "value" : "1.6.0_31-b04"
-    }, {
-      "key" : "java.class.path",
-      "value" : "/etc/hbase/conf:/usr/jdk/jdk1.6.0_31/lib/tools.jar:/usr/lib/hbase/bin/..:/usr/lib/hbase/bin/../hbase-0.94.2.21-tests.jar:/usr/lib/hbase/bin/../hbase-0.94.2.21.jar:/usr/lib/hbase/bin/../lib/activation-1.1.jar:/usr/lib/hbase/bin/../lib/asm-3.1.jar:/usr/lib/hbase/bin/../lib/avro-1.5.3.jar:/usr/lib/hbase/bin/../lib/avro-ipc-1.5.3.jar:/usr/lib/hbase/bin/../lib/commons-beanutils-1.7.0.jar:/usr/lib/hbase/bin/../lib/commons-beanutils-core-1.8.0.jar:/usr/lib/hbase/bin/../lib/commons-cli-1.2.jar:/usr/lib/hbase/bin/../lib/commons-codec-1.4.jar:/usr/lib/hbase/bin/../lib/commons-collections-3.2.1.jar:/usr/lib/hbase/bin/../lib/commons-configuration-1.6.jar:/usr/lib/hbase/bin/../lib/commons-digester-1.8.jar:/usr/lib/hbase/bin/../lib/commons-el-1.0.jar:/usr/lib/hbase/bin/../lib/commons-httpclient-3.1.jar:/usr/lib/hbase/bin/../lib/commons-io-2.1.jar:/usr/lib/hbase/bin/../lib/commons-lang-2.5.jar:/usr/lib/hbase/bin/../lib/commons-logging-1.1.1.jar:/usr/lib/hbase/bin/../lib/commons-math-2.1.jar:/usr/lib/hbase/bin/../lib/commons-net-1.4.1.jar:/usr/lib/hbase/bin/../lib/core-3.1.1.jar:/usr/lib/hbase/bin/../lib/guava-11.0.2.jar:/usr/lib/hbase/bin/../lib/hadoop-core.jar:/usr/lib/hbase/bin/../lib/high-scale-lib-1.1.1.jar:/usr/lib/hbase/bin/../lib/httpclient-4.1.2.jar:/usr/lib/hbase/bin/../lib/httpcore-4.1.3.jar:/usr/lib/hbase/bin/../lib/jackson-core-asl-1.8.8.jar:/usr/lib/hbase/bin/../lib/jackson-jaxrs-1.8.8.jar:/usr/lib/hbase/bin/../lib/jackson-mapper-asl-1.8.8.jar:/usr/lib/hbase/bin/../lib/jackson-xc-1.8.8.jar:/usr/lib/hbase/bin/../lib/jamon-runtime-2.3.1.jar:/usr/lib/hbase/bin/../lib/jasper-compiler-5.5.23.jar:/usr/lib/hbase/bin/../lib/jasper-runtime-5.5.23.jar:/usr/lib/hbase/bin/../lib/jaxb-api-2.1.jar:/usr/lib/hbase/bin/../lib/jaxb-impl-2.2.3-1.jar:/usr/lib/hbase/bin/../lib/jersey-core-1.8.jar:/usr/lib/hbase/bin/../lib/jersey-json-1.8.jar:/usr/lib/hbase/bin/../lib/jersey-server-1.8.jar:/usr/lib/hbase/bin/../lib/jettison-1.1.jar:/usr/lib/hbase/bin/../lib/jetty-6.1.26.jar:/usr/lib/hbase/bin/../lib/jetty-util-6.1.26.jar:/usr/lib/hbase/bin/../lib/jruby-complete-1.6.5.jar:/usr/lib/hbase/bin/../lib/jsp-2.1-6.1.14.jar:/usr/lib/hbase/bin/../lib/jsp-api-2.1-6.1.14.jar:/usr/lib/hbase/bin/../lib/jsr305-1.3.9.jar:/usr/lib/hbase/bin/../lib/junit-4.10-HBASE-1.jar:/usr/lib/hbase/bin/../lib/libthrift-0.8.0.jar:/usr/lib/hbase/bin/../lib/log4j-1.2.16.jar:/usr/lib/hbase/bin/../lib/metrics-core-2.1.2.jar:/usr/lib/hbase/bin/../lib/netty-3.2.4.Final.jar:/usr/lib/hbase/bin/../lib/protobuf-java-2.4.0a.jar:/usr/lib/hbase/bin/../lib/servlet-api-2.5-6.1.14.jar:/usr/lib/hbase/bin/../lib/slf4j-api-1.4.3.jar:/usr/lib/hbase/bin/../lib/slf4j-log4j12-1.4.3.jar:/usr/lib/hbase/bin/../lib/snappy-java-1.0.3.2.jar:/usr/lib/hbase/bin/../lib/stax-api-1.0.1.jar:/usr/lib/hbase/bin/../lib/velocity-1.7.jar:/usr/lib/hbase/bin/../lib/xmlenc-0.52.jar:/usr/lib/hbase/bin/../lib/zookeeper.jar::::/usr/lib/hadoop/libexec/../conf:/usr/jdk/jdk1.6.0_31/lib/tools.jar:/usr/lib/hadoop/libexec/..:/usr/lib/hadoop/libexec/../hadoop-core-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/ambari-log4j-1.0.jar:/usr/lib/hadoop/libexec/../lib/asm-3.2.jar:/usr/lib/hadoop/libexec/../lib/aspectjrt-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/aspectjtools-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-1.7.0.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-core-1.8.0.jar:/usr/lib/hadoop/libexec/../lib/commons-cli-1.2.jar:/usr/lib/hadoop/libexec/../lib/commons-codec-1.4.jar:/usr/lib/hadoop/libexec/../lib/commons-collections-3.2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-configuration-1.6.jar:/usr/lib/hadoop/libexec/../lib/commons-daemon-1.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-digester-1.8.jar:/usr/lib/hadoop/libexec/../lib/commons-el-1.0.jar:/usr/lib/hadoop/libexec/../lib/commons-httpclient-3.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-io-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-lang-2.4.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-1.1.1.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-api-1.0.4.jar:/usr/lib/hadoop/libexec/../lib/commons-math-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-net-3.1.jar:/usr/lib/hadoop/libexec/../lib/core-3.1.1.jar:/usr/lib/hadoop/libexec/../lib/hadoop-capacity-scheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-fairscheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-lzo-0.5.0.jar:/usr/lib/hadoop/libexec/../lib/hadoop-thriftfs-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-tools.jar:/usr/lib/hadoop/libexec/../lib/hsqldb-1.8.0.10.jar:/usr/lib/hadoop/libexec/../lib/jackson-core-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jackson-mapper-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jasper-compiler-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jasper-runtime-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jdeb-0.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-core-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-json-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-server-1.8.jar:/usr/lib/hadoop/libexec/../lib/jets3t-0.6.1.jar:/usr/lib/hadoop/libexec/../lib/jetty-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jetty-util-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jsch-0.1.42.jar:/usr/lib/hadoop/libexec/../lib/junit-4.5.jar:/usr/lib/hadoop/libexec/../lib/kfs-0.2.2.jar:/usr/lib/hadoop/libexec/../lib/log4j-1.2.15.jar:/usr/lib/hadoop/libexec/../lib/mockito-all-1.8.5.jar:/usr/lib/hadoop/libexec/../lib/oro-2.0.8.jar:/usr/lib/hadoop/libexec/../lib/postgresql-9.1-901-1.jdbc4.jar:/usr/lib/hadoop/libexec/../lib/servlet-api-2.5-20081211.jar:/usr/lib/hadoop/libexec/../lib/slf4j-api-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/slf4j-log4j12-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/xmlenc-0.52.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-2.1.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-api-2.1.jar"
-    }, {
-      "key" : "os.name",
-      "value" : "Linux"
-    }, {
-      "key" : "hbase.home.dir",
-      "value" : "/usr/lib/hbase/bin/.."
-    }, {
-      "key" : "line.separator",
-      "value" : "\n"
-    }, {
-      "key" : "os.version",
-      "value" : "2.6.32-220.17.1.el6.centos.plus.x86_64"
-    }, {
-      "key" : "sun.arch.data.model",
-      "value" : "64"
-    }, {
-      "key" : "java.class.version",
-      "value" : "50.0"
-    }, {
-      "key" : "sun.io.unicode.encoding",
-      "value" : "UnicodeLittle"
-    }, {
-      "key" : "java.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "sun.boot.class.path",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/resources.jar:/usr/jdk/jdk1.6.0_31/jre/lib/rt.jar:/usr/jdk/jdk1.6.0_31/jre/lib/sunrsasign.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jsse.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jce.jar:/usr/jdk/jdk1.6.0_31/jre/lib/charsets.jar:/usr/jdk/jdk1.6.0_31/jre/lib/modules/jdk.boot.jar:/usr/jdk/jdk1.6.0_31/jre/classes"
-    }, {
-      "key" : "hbase.root.logger",
-      "value" : "INFO,DRFA"
-    }, {
-      "key" : "java.vm.info",
-      "value" : "mixed mode"
-    }, {
-      "key" : "java.specification.name",
-      "value" : "Java Platform API Specification"
-    }, {
-      "key" : "java.vm.name",
-      "value" : "Java HotSpot(TM) 64-Bit Server VM"
-    }, {
-      "key" : "java.vm.version",
-      "value" : "20.6-b01"
-    }, {
-      "key" : "sun.boot.library.path",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/amd64"
-    }, {
-      "key" : "java.endorsed.dirs",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/endorsed"
-    }, {
-      "key" : "sun.os.patch.level",
-      "value" : "unknown"
-    }, {
-      "key" : "sun.cpu.isalist",
-      "value" : ""
-    }, {
-      "key" : "hbase.security.logger",
-      "value" : "INFO,DRFAS"
-    }, {
-      "key" : "hbase.id.str",
-      "value" : "hbase"
-    }, {
-      "key" : "user.home",
-      "value" : "/var/run/hbase"
-    }, {
-      "key" : "java.library.path",
-      "value" : "/usr/lib/hadoop/libexec/../lib/native/Linux-amd64-64:/usr/lib/hbase/bin/../lib/native/Linux-amd64-64"
-    }, {
-      "key" : "java.specification.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "sun.management.compiler",
-      "value" : "HotSpot 64-Bit Tiered Compilers"
-    } ],
-    "Uptime" : 94410730
-  }, {
-    "name" : "java.lang:type=ClassLoading",
-    "modelerType" : "sun.management.ClassLoadingImpl",
-    "LoadedClassCount" : 3318,
-    "UnloadedClassCount" : 0,
-    "TotalLoadedClassCount" : 3318,
-    "Verbose" : false
-  }, {
-    "name" : "hadoop:service=Master,name=MasterStatistics",
-    "modelerType" : "org.apache.hadoop.hbase.master.metrics.MasterStatistics",
-    "splitTimeNumOps" : 1,
-    "splitTimeAvgTime" : 0,
-    "splitTimeMinTime" : 0,
-    "splitTimeMaxTime" : 0,
-    "splitSizeNumOps" : 1,
-    "splitSizeAvgTime" : 0,
-    "splitSizeMinTime" : 0,
-    "splitSizeMaxTime" : 0,
-    "cluster_requests" : 0.0
-  }, {
-    "name" : "hadoop:service=Master,name=Master",
-    "modelerType" : "org.apache.hadoop.hbase.master.MXBeanImpl",
-    "ClusterId" : "6ea765df-de99-404a-8ba2-2ae5524e3334",
-    "MasterStartTime" : 1358364479179,
-    "MasterActiveTime" : 1358364479213,
-    "Coprocessors" : [ ],
-    "ServerName" : "ip-10-85-118-178.ec2.internal,60000,1358364478188",
-    "AverageLoad" : 3.0,
-    "RegionsInTransition" : [ ],
-    "RegionServers" : [ {
-      "key" : "ip-10-85-111-149.ec2.internal,60020,1358364532498",
-      "value" : {
-        "coprocessors" : [ ],
-        "load" : 3,
-        "maxHeapMB" : 1004,
-        "memStoreSizeInMB" : 0,
-        "numberOfRegions" : 3,
-        "numberOfRequests" : 0,
-        "regionsLoad" : [ {
-          "key" : [ 117, 115, 101, 114, 116, 97, 98, 108, 101, 44, 44, 49, 51, 53, 56, 51, 54, 52, 53, 56, 50, 51, 56, 53, 46, 50, 50, 52, 52, 99, 48, 99, 101, 57, 55, 102, 102, 56, 51, 50, 98, 101, 53, 52, 100, 101, 99, 56, 98, 97, 49, 49, 54, 54, 53, 48, 99, 46 ],
-          "value" : {
-            "currentCompactedKVs" : 0,
-            "memStoreSizeMB" : 0,
-            "name" : [ 117, 115, 101, 114, 116, 97, 98, 108, 101, 44, 44, 49, 51, 53, 56, 51, 54, 52, 53, 56, 50, 51, 56, 53, 46, 50, 50, 52, 52, 99, 48, 99, 101, 57, 55, 102, 102, 56, 51, 50, 98, 101, 53, 52, 100, 101, 99, 56, 98, 97, 49, 49, 54, 54, 53, 48, 99, 46 ],
-            "nameAsString" : "usertable,,1358364582385.2244c0ce97ff832be54dec8ba116650c.",
-            "readRequestsCount" : 2,
-            "requestsCount" : 3,
-            "rootIndexSizeKB" : 0,
-            "storefileIndexSizeMB" : 0,
-            "storefileSizeMB" : 0,
-            "storefiles" : 0,
-            "stores" : 1,
-            "totalCompactingKVs" : 0,
-            "totalStaticBloomSizeKB" : 0,
-            "totalStaticIndexSizeKB" : 0,
-            "version" : 2,
-            "writeRequestsCount" : 1
-          }
-        }, {
-          "key" : [ 46, 77, 69, 84, 65, 46, 44, 44, 49 ],
-          "value" : {
-            "currentCompactedKVs" : 0,
-            "memStoreSizeMB" : 0,
-            "name" : [ 46, 77, 69, 84, 65, 46, 44, 44, 49 ],
-            "nameAsString" : ".META.,,1",
-            "readRequestsCount" : 653,
-            "requestsCount" : 655,
-            "rootIndexSizeKB" : 0,
-            "storefileIndexSizeMB" : 0,
-            "storefileSizeMB" : 0,
-            "storefiles" : 0,
-            "stores" : 1,
-            "totalCompactingKVs" : 0,
-            "totalStaticBloomSizeKB" : 0,
-            "totalStaticIndexSizeKB" : 0,
-            "version" : 2,
-            "writeRequestsCount" : 2
-          }
-        }, {
-          "key" : [ 45, 82, 79, 79, 84, 45, 44, 44, 48 ],
-          "value" : {
-            "currentCompactedKVs" : 0,
-            "memStoreSizeMB" : 0,
-            "name" : [ 45, 82, 79, 79, 84, 45, 44, 44, 48 ],
-            "nameAsString" : "-ROOT-,,0",
-            "readRequestsCount" : 59,
-            "requestsCount" : 60,
-            "rootIndexSizeKB" : 0,
-            "storefileIndexSizeMB" : 0,
-            "storefileSizeMB" : 0,
-            "storefiles" : 1,
-            "stores" : 1,
-            "totalCompactingKVs" : 0,
-            "totalStaticBloomSizeKB" : 0,
-            "totalStaticIndexSizeKB" : 0,
-            "version" : 2,
-            "writeRequestsCount" : 1
-          }
-        } ],
-        "storefileIndexSizeInMB" : 0,
-        "storefileSizeInMB" : 0,
-        "storefiles" : 1,
-        "totalNumberOfRequests" : 0,
-        "usedHeapMB" : 103,
-        "version" : 2
-      }
-    } ],
-    "ZookeeperQuorum" : "ip-10-116-103-5.ec2.internal:2181,ip-10-85-118-178.ec2.internal:2181,ip-10-85-111-149.ec2.internal:2181",
-    "DeadRegionServers" : [ ],
-    "IsActiveMaster" : true
-  }, {
-    "name" : "java.lang:type=GarbageCollector,name=ConcurrentMarkSweep",
-    "modelerType" : "sun.management.GarbageCollectorImpl",
-    "LastGcInfo" : {
-      "GcThreadCount" : 3,
-      "duration" : 65878,
-      "endTime" : 72276,
-      "id" : 2,
-      "memoryUsageAfterGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 29929472,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 24692864
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 2555904,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 1380480
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 65404928,
-          "init" : 65404928,
-          "max" : 1030160384,
-          "used" : 1680304
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 17432576,
-          "init" : 17432576,
-          "max" : 34930688,
-          "used" : 8752024
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 2162688,
-          "init" : 2162688,
-          "max" : 4325376,
-          "used" : 2122944
-        }
-      } ],
-      "memoryUsageBeforeGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 29929472,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 22330776
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 2555904,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 1103040
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 65404928,
-          "init" : 65404928,
-          "max" : 1030160384,
-          "used" : 779512
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 17432576,
-          "init" : 17432576,
-          "max" : 34930688,
-          "used" : 8419616
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 2162688,
-          "init" : 2162688,
-          "max" : 4325376,
-          "used" : 1933744
-        }
-      } ],
-      "startTime" : 6398
-    },
-    "CollectionCount" : 2,
-    "CollectionTime" : 52,
-    "Name" : "ConcurrentMarkSweep",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Par Eden Space", "Par Survivor Space", "CMS Old Gen", "CMS Perm Gen" ]
-  }, {
-    "name" : "java.lang:type=Threading",
-    "modelerType" : "sun.management.ThreadImpl",
-    "ThreadAllocatedMemoryEnabled" : true,
-    "ThreadAllocatedMemorySupported" : true,
-    "ThreadContentionMonitoringEnabled" : false,
-    "AllThreadIds" : [ 3816, 3772, 122, 109, 105, 104, 103, 102, 88, 97, 96, 95, 94, 91, 90, 89, 63, 10, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 11, 23, 25, 24, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 5, 3, 2, 1 ],
-    "DaemonThreadCount" : 64,
-    "PeakThreadCount" : 77,
-    "CurrentThreadCpuTimeSupported" : true,
-    "ObjectMonitorUsageSupported" : true,
-    "SynchronizerUsageSupported" : true,
-    "ThreadContentionMonitoringSupported" : true,
-    "ThreadCpuTimeEnabled" : true,
-    "CurrentThreadCpuTime" : 370000000,
-    "CurrentThreadUserTime" : 350000000,
-    "ThreadCount" : 72,
-    "TotalStartedThreadCount" : 3809,
-    "ThreadCpuTimeSupported" : true
-  }, {
-    "name" : "java.util.logging:type=Logging",
-    "modelerType" : "java.util.logging.Logging",
-    "LoggerNames" : [ "sun.awt.AppContext", "javax.management", "global", "javax.management.mbeanserver", "" ]
-  }, {
-    "name" : "java.lang:type=Compilation",
-    "modelerType" : "sun.management.CompilationImpl",
-    "Name" : "HotSpot 64-Bit Tiered Compilers",
-    "CompilationTimeMonitoringSupported" : true,
-    "TotalCompilationTime" : 17603
-  }, {
-    "name" : "java.lang:type=GarbageCollector,name=ParNew",
-    "modelerType" : "sun.management.GarbageCollectorImpl",
-    "LastGcInfo" : {
-      "GcThreadCount" : 3,
-      "duration" : 2,
-      "endTime" : 94402220,
-      "id" : 646,
-      "memoryUsageAfterGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 41156608,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 25944224
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 3080192,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 3022208
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 65404928,
-          "init" : 65404928,
-          "max" : 1030160384,
-          "used" : 4226480
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 17432576,
-          "init" : 17432576,
-          "max" : 34930688,
-          "used" : 0
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 2162688,
-          "init" : 2162688,
-          "max" : 4325376,
-          "used" : 100136
-        }
-      } ],
-      "memoryUsageBeforeGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 41156608,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 25944224
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 3080192,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 3022208
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 65404928,
-          "init" : 65404928,
-          "max" : 1030160384,
-          "used" : 4226480
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 17432576,
-          "init" : 17432576,
-          "max" : 34930688,
-          "used" : 17432576
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 2162688,
-          "init" : 2162688,
-          "max" : 4325376,
-          "used" : 242504
-        }
-      } ],
-      "startTime" : 94402218
-    },
-    "CollectionCount" : 646,
-    "CollectionTime" : 1685,
-    "Name" : "ParNew",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Par Eden Space", "Par Survivor Space" ]
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Par Eden Space",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Par Eden Space",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "Usage" : {
-      "committed" : 17432576,
-      "init" : 17432576,
-      "max" : 34930688,
-      "used" : 575008
-    },
-    "CollectionUsage" : {
-      "committed" : 17432576,
-      "init" : 17432576,
-      "max" : 34930688,
-      "used" : 0
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep", "ParNew" ],
-    "PeakUsage" : {
-      "committed" : 17432576,
-      "init" : 17432576,
-      "max" : 34930688,
-      "used" : 17432576
-    },
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdSupported" : false
-  }, {
-    "name" : "hadoop:service=HBase,name=RPCStatistics-60000",
-    "modelerType" : "org.apache.hadoop.hbase.ipc.HBaseRPCStatistics",
-    "enableTableNumOps" : 0,
-    "enableTableAvgTime" : 0,
-    "enableTableMinTime" : -1,
-    "enableTableMaxTime" : 0,
-    "assignNumOps" : 0,
-    "assignAvgTime" : 0,
-    "assignMinTime" : -1,
-    "assignMaxTime" : 0,
-    "enableTable.aboveOneSec.NumOps" : 0,
-    "enableTable.aboveOneSec.AvgTime" : 0,
-    "enableTable.aboveOneSec.MinTime" : -1,
-    "enableTable.aboveOneSec.MaxTime" : 0,
-    "RpcSlowResponseNumOps" : 0,
-    "RpcSlowResponseAvgTime" : 0,
-    "RpcSlowResponseMinTime" : -1,
-    "RpcSlowResponseMaxTime" : 0,
-    "getStoreFileListNumOps" : 0,
-    "getStoreFileListAvgTime" : 0,
-    "getStoreFileListMinTime" : -1,
-    "getStoreFileListMaxTime" : 0,
-    "getProtocolVersion.aboveOneSec.NumOps" : 0,
-    "getProtocolVersion.aboveOneSec.AvgTime" : 0,
-    "getProtocolVersion.aboveOneSec.MinTime" : -1,
-    "getProtocolVersion.aboveOneSec.MaxTime" : 0,
-    "getAlterStatus.aboveOneSec.NumOps" : 0,
-    "getAlterStatus.aboveOneSec.AvgTime" : 0,
-    "getAlterStatus.aboveOneSec.MinTime" : -1,
-    "getAlterStatus.aboveOneSec.MaxTime" : 0,
-    "moveNumOps" : 0,
-    "moveAvgTime" : 0,
-    "moveMinTime" : -1,
-    "moveMaxTime" : 0,
-    "openRegionNumOps" : 0,
-    "openRegionAvgTime" : 0,
-    "openRegionMinTime" : -1,
-    "openRegionMaxTime" : 0,
-    "incrementNumOps" : 0,
-    "incrementAvgTime" : 0,
-    "incrementMinTime" : -1,
-    "incrementMaxTime" : 0,
-    "regionServerStartup.aboveOneSec.NumOps" : 0,
-    "regionServerStartup.aboveOneSec.AvgTime" : 0,
-    "regionServerStartup.aboveOneSec.MinTime" : -1,
-    "regionServerStartup.aboveOneSec.MaxTime" : 0,
-    "deleteTableNumOps" : 0,
-    "deleteTableAvgTime" : 0,
-    "deleteTableMinTime" : -1,
-    "deleteTableMaxTime" : 0,
-    "balance.aboveOneSec.NumOps" : 0,
-    "balance.aboveOneSec.AvgTime" : 0,
-    "balance.aboveOneSec.MinTime" : -1,
-    "balance.aboveOneSec.MaxTime" : 0,
-    "execCoprocessorNumOps" : 0,
-    "execCoprocessorAvgTime" : 0,
-    "execCoprocessorMinTime" : -1,
-    "execCoprocessorMaxTime" : 0,
-    "getHTableDescriptorsNumOps" : 0,
-    "getHTableDescriptorsAvgTime" : 0,
-    "getHTableDescriptorsMinTime" : 7,
-    "getHTableDescriptorsMaxTime" : 1763,
-    "addColumnNumOps" : 0,
-    "addColumnAvgTime" : 0,
-    "addColumnMinTime" : -1,
-    "addColumnMaxTime" : 0,
-    "offline.aboveOneSec.NumOps" : 0,
-    "offline.aboveOneSec.AvgTime" : 0,
-    "offline.aboveOneSec.MinTime" : -1,
-    "offline.aboveOneSec.MaxTime" : 0,
-    "multiNumOps" : 0,
-    "multiAvgTime" : 0,
-    "multiMinTime" : -1,
-    "multiMaxTime" : 0,
-    "closeRegionNumOps" : 0,
-    "closeRegionAvgTime" : 0,
-    "closeRegionMinTime" : -1,
-    "closeRegionMaxTime" : 0,
-    "disableTableNumOps" : 0,
-    "disableTableAvgTime" : 0,
-    "disableTableMinTime" : -1,
-    "disableTableMaxTime" : 0,
-    "bulkLoadHFilesNumOps" : 0,
-    "bulkLoadHFilesAvgTime" : 0,
-    "bulkLoadHFilesMinTime" : -1,
-    "bulkLoadHFilesMaxTime" : 0,
-    "putNumOps" : 0,
-    "putAvgTime" : 0,
-    "putMinTime" : -1,
-    "putMaxTime" : 0,
-    "createTableNumOps" : 0,
-    "createTableAvgTime" : 0,
-    "createTableMinTime" : 38,
-    "createTableMaxTime" : 38,
-    "nextNumOps" : 0,
-    "nextAvgTime" : 0,
-    "nextMinTime" : -1,
-    "nextMaxTime" : 0,
-    "unlockRowNumOps" : 0,
-    "unlockRowAvgTime" : 0,
-    "unlockRowMinTime" : -1,
-    "unlockRowMaxTime" : 0,
-    "reportRSFatalErrorNumOps" : 0,
-    "reportRSFatalErrorAvgTime" : 0,
-    "reportRSFatalErrorMinTime" : -1,
-    "reportRSFatalErrorMaxTime" : 0,
-    "rpcAuthenticationFailures" : 0,
-    "getRegionInfoNumOps" : 0,
-    "getRegionInfoAvgTime" : 0,
-    "getRegionInfoMinTime" : -1,
-    "getRegionInfoMaxTime" : 0,
-    "openScannerNumOps" : 0,
-    "openScannerAvgTime" : 0,
-    "openScannerMinTime" : -1,
-    "openScannerMaxTime" : 0,
-    "offlineNumOps" : 0,
-    "offlineAvgTime" : 0,
-    "offlineMinTime" : -1,
-    "offlineMaxTime" : 0,
-    "getAlterStatusNumOps" : 0,
-    "getAlterStatusAvgTime" : 0,
-    "getAlterStatusMinTime" : -1,
-    "getAlterStatusMaxTime" : 0,
-    "deleteNumOps" : 0,
-    "deleteAvgTime" : 0,
-    "deleteMinTime" : -1,
-    "deleteMaxTime" : 0,
-    "RpcProcessingTimeNumOps" : 4,
-    "RpcProcessingTimeAvgTime" : 0,
-    "RpcProcessingTimeMinTime" : 0,
-    "RpcProcessingTimeMaxTime" : 1763,
-    "move.aboveOneSec.NumOps" : 0,
-    "move.aboveOneSec.AvgTime" : 0,
-    "move.aboveOneSec.MinTime" : -1,
-    "move.aboveOneSec.MaxTime" : 0,
-    "getLastFlushTimeNumOps" : 0,
-    "getLastFlushTimeAvgTime" : 0,
-    "getLastFlushTimeMinTime" : -1,
-    "getLastFlushTimeMaxTime" : 0,
-    "shutdownNumOps" : 0,
-    "shutdownAvgTime" : 0,
-    "shutdownMinTime" : -1,
-    "shutdownMaxTime" : 0,
-    "openRegionsNumOps" : 0,
-    "openRegionsAvgTime" : 0,
-    "openRegionsMinTime" : -1,
-    "openRegionsMaxTime" : 0,
-    "getClosestRowBeforeNumOps" : 0,
-    "getClosestRowBeforeAvgTime" : 0,
-    "getClosestRowBeforeMinTime" : -1,
-    "getClosestRowBeforeMaxTime" : 0,
-    "getHServerInfoNumOps" : 0,
-    "getHServerInfoAvgTime" : 0,
-    "getHServerInfoMinTime" : -1,
-    "getHServerInfoMaxTime" : 0,
-    "getProtocolSignatureNumOps" : 0,
-    "getProtocolSignatureAvgTime" : 0,
-    "getProtocolSignatureMinTime" : -1,
-    "getProtocolSignatureMaxTime" : 0,
-    "replicationCallQueueLen" : 0,
-    "SentBytes" : 64,
-    "existsNumOps" : 0,
-    "existsAvgTime" : 0,
-    "existsMinTime" : -1,
-    "existsMaxTime" : 0,
-    "shutdown.aboveOneSec.NumOps" : 0,
-    "shutdown.aboveOneSec.AvgTime" : 0,
-    "shutdown.aboveOneSec.MinTime" : -1,
-    "shutdown.aboveOneSec.MaxTime" : 0,
-    "regionServerStartupNumOps" : 0,
-    "regionServerStartupAvgTime" : 0,
-    "regionServerStartupMinTime" : 7,
-    "regionServerStartupMaxTime" : 7,
-    "compactRegionNumOps" : 0,
-    "compactRegionAvgTime" : 0,
-    "compactRegionMinTime" : -1,
-    "compactRegionMaxTime" : 0,
-    "unassign.aboveOneSec.NumOps" : 0,
-    "unassign.aboveOneSec.AvgTime" : 0,
-    "unassign.aboveOneSec.MinTime" : -1,
-    "unassign.aboveOneSec.MaxTime" : 0,
-    "balanceSwitchNumOps" : 0,
-    "balanceSwitchAvgTime" : 0,
-    "balanceSwitchMinTime" : -1,
-    "balanceSwitchMaxTime" : 0,
-    "rollHLogWriterNumOps" : 0,
-    "rollHLogWriterAvgTime" : 0,
-    "rollHLogWriterMinTime" : -1,
-    "rollHLogWriterMaxTime" : 0,
-    "splitRegionNumOps" : 0,
-    "splitRegionAvgTime" : 0,
-    "splitRegionMinTime" : -1,
-    "splitRegionMaxTime" : 0,
-    "ReceivedBytes" : 1008,
-    "isMasterRunning.aboveOneSec.NumOps" : 0,
-    "isMasterRunning.aboveOneSec.AvgTime" : 0,
-    "isMasterRunning.aboveOneSec.MinTime" : -1,
-    "isMasterRunning.aboveOneSec.MaxTime" : 0,
-    "addColumn.aboveOneSec.NumOps" : 0,
-    "addColumn.aboveOneSec.AvgTime" : 0,
-    "addColumn.aboveOneSec.MinTime" : -1,
-    "addColumn.aboveOneSec.MaxTime" : 0,
-    "createTable.aboveOneSec.NumOps" : 0,
-    "createTable.aboveOneSec.AvgTime" : 0,
-    "createTable.aboveOneSec.MinTime" : -1,
-    "createTable.aboveOneSec.MaxTime" : 0,
-    "getOnlineRegionsNumOps" : 0,
-    "getOnlineRegionsAvgTime" : 0,
-    "getOnlineRegionsMinTime" : -1,
-    "getOnlineRegionsMaxTime" : 0,
-    "closeNumOps" : 0,
-    "closeAvgTime" : 0,
-    "closeMinTime" : -1,
-    "closeMaxTime" : 0,
-    "balanceNumOps" : 0,
-    "balanceAvgTime" : 0,
-    "balanceMinTime" : -1,
-    "balanceMaxTime" : 0,
-    "getProtocolSignature.aboveOneSec.NumOps" : 0,
-    "getProtocolSignature.aboveOneSec.AvgTime" : 0,
-    "getProtocolSignature.aboveOneSec.MinTime" : -1,
-    "getProtocolSignature.aboveOneSec.MaxTime" : 0,
-    "getClusterStatusNumOps" : 0,
-    "getClusterStatusAvgTime" : 0,
-    "getClusterStatusMinTime" : -1,
-    "getClusterStatusMaxTime" : 0,
-    "balanceSwitch.aboveOneSec.NumOps" : 0,
-    "balanceSwitch.aboveOneSec.AvgTime" : 0,
-    "balanceSwitch.aboveOneSec.MinTime" : -1,
-    "balanceSwitch.aboveOneSec.MaxTime" : 0,
-    "modifyTable.aboveOneSec.NumOps" : 0,
-    "modifyTable.aboveOneSec.AvgTime" : 0,
-    "modifyTable.aboveOneSec.MinTime" : -1,
-    "modifyTable.aboveOneSec.MaxTime" : 0,
-    "appendNumOps" : 0,
-    "appendAvgTime" : 0,
-    "appendMinTime" : -1,
-    "appendMaxTime" : 0,
-    "getBlockCacheColumnFamilySummariesNumOps" : 0,
-    "getBlockCacheColumnFamilySummariesAvgTime" : 0,
-    "getBlockCacheColumnFamilySummariesMinTime" : -1,
-    "getBlockCacheColumnFamilySummariesMaxTime" : 0,
-    "synchronousBalanceSwitchNumOps" : 0,
-    "synchronousBalanceSwitchAvgTime" : 0,
-    "synchronousBalanceSwitchMinTime" : -1,
-    "synchronousBalanceSwitchMaxTime" : 0,
-    "getNumOps" : 0,
-    "getAvgTime" : 0,
-    "getMinTime" : -1,
-    "getMaxTime" : 0,
-    "stopMasterNumOps" : 0,
-    "stopMasterAvgTime" : 0,
-    "stopMasterMinTime" : -1,
-    "stopMasterMaxTime" : 0,
-    "priorityCallQueueLen" : 0,
-    "checkAndPutNumOps" : 0,
-    "checkAndPutAvgTime" : 0,
-    "checkAndPutMinTime" : -1,
-    "checkAndPutMaxTime" : 0,
-    "deleteColumnNumOps" : 0,
-    "deleteColumnAvgTime" : 0,
-    "deleteColumnMinTime" : -1,
-    "deleteColumnMaxTime" : 0,
-    "disableTable.aboveOneSec.NumOps" : 0,
-    "disableTable.aboveOneSec.AvgTime" : 0,
-    "disableTable.aboveOneSec.MinTime" : -1,
-    "disableTable.aboveOneSec.MaxTime" : 0,
-    "stopMaster.aboveOneSec.NumOps" : 0,
-    "stopMaster.aboveOneSec.AvgTime" : 0,
-    "stopMaster.aboveOneSec.MinTime" : -1,
-    "stopMaster.aboveOneSec.MaxTime" : 0,
-    "callQueueLen" : 0,
-    "replicateLogEntriesNumOps" : 0,
-    "replicateLogEntriesAvgTime" : 0,
-    "replicateLogEntriesMinTime" : -1,
-    "replicateLogEntriesMaxTime" : 0,
-    "rpcAuthorizationSuccesses" : 0,
-    "stopNumOps" : 0,
-    "stopAvgTime" : 0,
-    "stopMinTime" : -1,
-    "stopMaxTime" : 0,
-    "incrementColumnValueNumOps" : 0,
-    "incrementColumnValueAvgTime" : 0,
-    "incrementColumnValueMinTime" : -1,
-    "incrementColumnValueMaxTime" : 0,
-    "flushRegionNumOps" : 0,
-    "flushRegionAvgTime" : 0,
-    "flushRegionMinTime" : -1,
-    "flushRegionMaxTime" : 0,
-    "unassignNumOps" : 0,
-    "unassignAvgTime" : 0,
-    "unassignMinTime" : -1,
-    "unassignMaxTime" : 0,
-    "getClusterStatus.aboveOneSec.NumOps" : 0,
-    "getClusterStatus.aboveOneSec.AvgTime" : 0,
-    "getClusterStatus.aboveOneSec.MinTime" : -1,
-    "getClusterStatus.aboveOneSec.MaxTime" : 0,
-    "reportRSFatalError.aboveOneSec.NumOps" : 0,
-    "reportRSFatalError.aboveOneSec.AvgTime" : 0,
-    "reportRSFatalError.aboveOneSec.MinTime" : -1,
-    "reportRSFatalError.aboveOneSec.MaxTime" : 0,
-    "NumOpenConnections" : 1,
-    "rpcAuthenticationSuccesses" : 0,
-    "mutateRowNumOps" : 0,
-    "mutateRowAvgTime" : 0,
-    "mutateRowMinTime" : -1,
-    "mutateRowMaxTime" : 0,
-    "modifyTableNumOps" : 0,
-    "modifyTableAvgTime" : 0,
-    "modifyTableMinTime" : -1,
-    "modifyTableMaxTime" : 0,
-    "synchronousBalanceSwitch.aboveOneSec.NumOps" : 0,
-    "synchronousBalanceSwitch.aboveOneSec.AvgTime" : 0,
-    "synchronousBalanceSwitch.aboveOneSec.MinTime" : -1,
-    "synchronousBalanceSwitch.aboveOneSec.MaxTime" : 0,
-    "rpcAuthorizationFailures" : 0,
-    "getProtocolVersionNumOps" : 0,
-    "getProtocolVersionAvgTime" : 0,
-    "getProtocolVersionMinTime" : 0,
-    "getProtocolVersionMaxTime" : 1,
-    "RpcQueueTimeNumOps" : 4,
-    "RpcQueueTimeAvgTime" : 0,
-    "RpcQueueTimeMinTime" : 0,
-    "RpcQueueTimeMaxTime" : 112,
-    "checkAndDeleteNumOps" : 0,
-    "checkAndDeleteAvgTime" : 0,
-    "checkAndDeleteMinTime" : -1,
-    "checkAndDeleteMaxTime" : 0,
-    "deleteTable.aboveOneSec.NumOps" : 0,
-    "deleteTable.aboveOneSec.AvgTime" : 0,
-    "deleteTable.aboveOneSec.MinTime" : -1,
-    "deleteTable.aboveOneSec.MaxTime" : 0,
-    "isMasterRunningNumOps" : 0,
-    "isMasterRunningAvgTime" : 0,
-    "isMasterRunningMinTime" : 0,
-    "isMasterRunningMaxTime" : 1,
-    "modifyColumnNumOps" : 0,
-    "modifyColumnAvgTime" : 0,
-    "modifyColumnMinTime" : -1,
-    "modifyColumnMaxTime" : 0,
-    "lockRowNumOps" : 0,
-    "lockRowAvgTime" : 0,
-    "lockRowMinTime" : -1,
-    "lockRowMaxTime" : 0,
-    "modifyColumn.aboveOneSec.NumOps" : 0,
-    "modifyColumn.aboveOneSec.AvgTime" : 0,
-    "modifyColumn.aboveOneSec.MinTime" : -1,
-    "modifyColumn.aboveOneSec.MaxTime" : 0,
-    "regionServerReport.aboveOneSec.NumOps" : 0,
-    "regionServerReport.aboveOneSec.AvgTime" : 0,
-    "regionServerReport.aboveOneSec.MinTime" : -1,
-    "regionServerReport.aboveOneSec.MaxTime" : 0,
-    "getCompactionStateNumOps" : 0,
-    "getCompactionStateAvgTime" : 0,
-    "getCompactionStateMinTime" : -1,
-    "getCompactionStateMaxTime" : 0,
-    "assign.aboveOneSec.NumOps" : 0,
-    "assign.aboveOneSec.AvgTime" : 0,
-    "assign.aboveOneSec.MinTime" : -1,
-    "assign.aboveOneSec.MaxTime" : 0,
-    "regionServerReportNumOps" : 4,
-    "regionServerReportAvgTime" : 0,
-    "regionServerReportMinTime" : 0,
-    "regionServerReportMaxTime" : 48,
-    "deleteColumn.aboveOneSec.NumOps" : 0,
-    "deleteColumn.aboveOneSec.AvgTime" : 0,
-    "deleteColumn.aboveOneSec.MinTime" : -1,
-    "deleteColumn.aboveOneSec.MaxTime" : 0,
-    "getHTableDescriptors.aboveOneSec.NumOps" : 0,
-    "getHTableDescriptors.aboveOneSec.AvgTime" : 0,
-    "getHTableDescriptors.aboveOneSec.MinTime" : 1356,
-    "getHTableDescriptors.aboveOneSec.MaxTime" : 1763
-  }, {
-    "name" : "com.sun.management:type=HotSpotDiagnostic",
-    "modelerType" : "sun.management.HotSpotDiagnostic",
-    "DiagnosticOptions" : [ {
-      "name" : "HeapDumpBeforeFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpAfterFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpOnOutOfMemoryError",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpPath",
-      "origin" : "DEFAULT",
-      "value" : "",
-      "writeable" : true
-    }, {
-      "name" : "CMSAbortablePrecleanWaitMillis",
-      "origin" : "DEFAULT",
-      "value" : "100",
-      "writeable" : true
-    }, {
-      "name" : "CMSWaitDuration",
-      "origin" : "DEFAULT",
-      "value" : "2000",
-      "writeable" : true
-    }, {
-      "name" : "PrintGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCDetails",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCDateStamps",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCTimeStamps",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogramBeforeFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogramAfterFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogram",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintConcurrentLocks",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    } ]
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=CMS Perm Gen",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "CMS Perm Gen",
-    "Type" : "NON_HEAP",
-    "Valid" : true,
-    "Usage" : {
-      "committed" : 41156608,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 25949032
-    },
-    "CollectionUsage" : {
-      "committed" : 29929472,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 24692864
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep" ],
-    "PeakUsage" : {
-      "committed" : 41156608,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 25949032
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "java.lang:type=OperatingSystem",
-    "modelerType" : "com.sun.management.UnixOperatingSystem",
-    "MaxFileDescriptorCount" : 32768,
-    "OpenFileDescriptorCount" : 181,
-    "CommittedVirtualMemorySize" : 1626152960,
-    "FreePhysicalMemorySize" : 4532117504,
-    "FreeSwapSpaceSize" : 0,
-    "ProcessCpuTime" : 382410000000,
-    "TotalPhysicalMemorySize" : 7694454784,
-    "TotalSwapSpaceSize" : 0,
-    "Name" : "Linux",
-    "Version" : "2.6.32-220.17.1.el6.centos.plus.x86_64",
-    "AvailableProcessors" : 2,
-    "Arch" : "amd64",
-    "SystemLoadAverage" : 0.54
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Par Survivor Space",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Par Survivor Space",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "Usage" : {
-      "committed" : 2162688,
-      "init" : 2162688,
-      "max" : 4325376,
-      "used" : 100136
-    },
-    "CollectionUsage" : {
-      "committed" : 2162688,
-      "init" : 2162688,
-      "max" : 4325376,
-      "used" : 100136
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep", "ParNew" ],
-    "PeakUsage" : {
-      "committed" : 2162688,
-      "init" : 2162688,
-      "max" : 4325376,
-      "used" : 2122944
-    },
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdSupported" : false
-  }, {
-    "name" : "JMImplementation:type=MBeanServerDelegate",
-    "modelerType" : "javax.management.MBeanServerDelegate",
-    "MBeanServerId" : "ip-10-85-118-178.ec2.internal_1358364478079",
-    "SpecificationName" : "Java Management Extensions",
-    "SpecificationVersion" : "1.4",
-    "SpecificationVendor" : "Sun Microsystems",
-    "ImplementationName" : "JMX",
-    "ImplementationVersion" : "1.6.0_31-b04",
-    "ImplementationVendor" : "Sun Microsystems"
-  }, {
-    "name" : "java.lang:type=MemoryManager,name=CodeCacheManager",
-    "modelerType" : "sun.management.MemoryManagerImpl",
-    "Name" : "CodeCacheManager",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Code Cache" ]
-  } ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/resources/hdfs_datanode_jmx.json b/branch-1.2/ambari-server/src/test/resources/hdfs_datanode_jmx.json
deleted file mode 100644
index 9037bcb..0000000
--- a/branch-1.2/ambari-server/src/test/resources/hdfs_datanode_jmx.json
+++ /dev/null
@@ -1,752 +0,0 @@
-{
-  "beans" : [ {
-    "name" : "java.lang:type=MemoryPool,name=PS Eden Space",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "PS Eden Space",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 9437184,
-      "init" : 30081024,
-      "max" : 357564416,
-      "used" : 0
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "PS MarkSweep", "PS Scavenge" ],
-    "PeakUsage" : {
-      "committed" : 30081024,
-      "init" : 30081024,
-      "max" : 357629952,
-      "used" : 30081024
-    },
-    "Usage" : {
-      "committed" : 9437184,
-      "init" : 30081024,
-      "max" : 357564416,
-      "used" : 6995424
-    },
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdSupported" : false
-  }, {
-    "name" : "java.lang:type=Memory",
-    "modelerType" : "sun.management.MemoryImpl",
-    "Verbose" : false,
-    "HeapMemoryUsage" : {
-      "committed" : 89784320,
-      "init" : 120225856,
-      "max" : 954466304,
-      "used" : 9772616
-    },
-    "NonHeapMemoryUsage" : {
-      "committed" : 24313856,
-      "init" : 24313856,
-      "max" : 136314880,
-      "used" : 21933376
-    },
-    "ObjectPendingFinalizationCount" : 0
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=PS Survivor Space",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "PS Survivor Space",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 131072,
-      "init" : 4980736,
-      "max" : 131072,
-      "used" : 98304
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "PS MarkSweep", "PS Scavenge" ],
-    "PeakUsage" : {
-      "committed" : 4980736,
-      "init" : 4980736,
-      "max" : 4980736,
-      "used" : 2564664
-    },
-    "Usage" : {
-      "committed" : 131072,
-      "init" : 4980736,
-      "max" : 131072,
-      "used" : 98304
-    },
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdSupported" : false
-  }, {
-    "name" : "java.lang:type=GarbageCollector,name=PS MarkSweep",
-    "modelerType" : "sun.management.GarbageCollectorImpl",
-    "LastGcInfo" : null,
-    "CollectionCount" : 0,
-    "CollectionTime" : 0,
-    "Name" : "PS MarkSweep",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "PS Eden Space", "PS Survivor Space", "PS Old Gen", "PS Perm Gen" ]
-  }, {
-    "name" : "java.lang:type=Compilation",
-    "modelerType" : "sun.management.CompilationImpl",
-    "Name" : "HotSpot 64-Bit Tiered Compilers",
-    "CompilationTimeMonitoringSupported" : true,
-    "TotalCompilationTime" : 8281
-  }, {
-    "name" : "Hadoop:service=DataNode,name=DataNode",
-    "modelerType" : "DataNode",
-    "tag.context" : "dfs",
-    "tag.sessionId" : null,
-    "tag.hostName" : "ip-10-85-111-149.ec2.internal",
-    "bytes_written" : 256373,
-    "bytes_read" : 224135,
-    "blocks_written" : 27,
-    "blocks_read" : 13,
-    "blocks_replicated" : 0,
-    "blocks_removed" : 6,
-    "blocks_verified" : 15,
-    "block_verification_failures" : 0,
-    "blocks_get_local_pathinfo" : 2,
-    "reads_from_local_client" : 5,
-    "reads_from_remote_client" : 8,
-    "writes_from_local_client" : 5,
-    "writes_from_remote_client" : 22,
-    "readBlockOp_num_ops" : 13,
-    "readBlockOp_avg_time" : 3.0,
-    "writeBlockOp_num_ops" : 27,
-    "writeBlockOp_avg_time" : 11.833333333333332,
-    "blockChecksumOp_num_ops" : 0,
-    "blockChecksumOp_avg_time" : 0.0,
-    "copyBlockOp_num_ops" : 0,
-    "copyBlockOp_avg_time" : 0.0,
-    "replaceBlockOp_num_ops" : 0,
-    "replaceBlockOp_avg_time" : 0.0,
-    "heartBeats_num_ops" : 31994,
-    "heartBeats_avg_time" : 1.0,
-    "blockReports_num_ops" : 3,
-    "blockReports_avg_time" : 2.0
-  }, {
-    "name" : "Hadoop:service=DataNode,name=FSDatasetState-UndefinedStorageId-547291082",
-    "modelerType" : "org.apache.hadoop.hdfs.server.datanode.FSDataset",
-    "Remaining" : 842207985664,
-    "Capacity" : 887717691390,
-    "DfsUsed" : 299008,
-    "StorageInfo" : "FSDataset{dirpath='/grid/0/hadoop/hdfs/data/current,/grid/1/hadoop/hdfs/data/current'}"
-  }, {
-    "name" : "Hadoop:service=DataNode,name=RpcActivityForPort8010",
-    "modelerType" : "RpcActivityForPort8010",
-    "tag.context" : "rpc",
-    "tag.port" : "8010",
-    "tag.hostName" : "ip-10-85-111-149.ec2.internal",
-    "rpcAuthenticationSuccesses" : 0,
-    "rpcAuthenticationFailures" : 0,
-    "rpcAuthorizationSuccesses" : 2,
-    "rpcAuthorizationFailures" : 0,
-    "ReceivedBytes" : 856,
-    "SentBytes" : 544,
-    "RpcQueueTime_num_ops" : 3,
-    "RpcQueueTime_avg_time" : 0.0,
-    "RpcProcessingTime_num_ops" : 3,
-    "RpcProcessingTime_avg_time" : 1.0,
-    "NumOpenConnections" : 0,
-    "callQueueLen" : 0
-  }, {
-    "name" : "Hadoop:service=DataNode,name=ugi",
-    "modelerType" : "ugi",
-    "tag.context" : "ugi",
-    "tag.hostName" : "ip-10-85-111-149.ec2.internal",
-    "loginSuccess_num_ops" : 0,
-    "loginSuccess_avg_time" : 0.0,
-    "loginFailure_num_ops" : 0,
-    "loginFailure_avg_time" : 0.0
-  }, {
-    "name" : "java.lang:type=OperatingSystem",
-    "modelerType" : "com.sun.management.UnixOperatingSystem",
-    "MaxFileDescriptorCount" : 32768,
-    "OpenFileDescriptorCount" : 103,
-    "CommittedVirtualMemorySize" : 1540296704,
-    "FreePhysicalMemorySize" : 5948542976,
-    "FreeSwapSpaceSize" : 0,
-    "ProcessCpuTime" : 141410000000,
-    "TotalPhysicalMemorySize" : 7694454784,
-    "TotalSwapSpaceSize" : 0,
-    "Name" : "Linux",
-    "Version" : "2.6.32-220.17.1.el6.centos.plus.x86_64",
-    "AvailableProcessors" : 2,
-    "Arch" : "amd64",
-    "SystemLoadAverage" : 0.0
-  }, {
-    "name" : "java.lang:type=MemoryManager,name=CodeCacheManager",
-    "modelerType" : "sun.management.MemoryManagerImpl",
-    "Name" : "CodeCacheManager",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Code Cache" ]
-  }, {
-    "name" : "Hadoop:service=DataNode,name=MetricsSystem,sub=Stats",
-    "modelerType" : "MetricsSystem,sub=Stats",
-    "tag.context" : "metricssystem",
-    "num_sources" : 5,
-    "num_sinks" : 1,
-    "sink.ganglia.latency_num_ops" : 9606,
-    "sink.ganglia.latency_avg_time" : 1.0,
-    "sink.ganglia.dropped" : 0,
-    "sink.ganglia.qsize" : 0,
-    "snapshot_num_ops" : 57621,
-    "snapshot_avg_time" : 0.0,
-    "snapshot_stdev_time" : 0.0,
-    "snapshot_imin_time" : 0.0,
-    "snapshot_imax_time" : 1.401298464324817E-45,
-    "snapshot_min_time" : 0.0,
-    "snapshot_max_time" : 41.0,
-    "publish_num_ops" : 9606,
-    "publish_avg_time" : 0.0,
-    "publish_stdev_time" : 0.0,
-    "publish_imin_time" : 0.0,
-    "publish_imax_time" : 1.401298464324817E-45,
-    "publish_min_time" : 0.0,
-    "publish_max_time" : 39.0,
-    "dropped_pub_all" : 0
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Code Cache",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Code Cache",
-    "Type" : "NON_HEAP",
-    "Valid" : true,
-    "CollectionUsage" : null,
-    "MemoryManagerNames" : [ "CodeCacheManager" ],
-    "PeakUsage" : {
-      "committed" : 2555904,
-      "init" : 2555904,
-      "max" : 50331648,
-      "used" : 2178624
-    },
-    "Usage" : {
-      "committed" : 2555904,
-      "init" : 2555904,
-      "max" : 50331648,
-      "used" : 2157760
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdSupported" : false,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "java.lang:type=Runtime",
-    "modelerType" : "sun.management.RuntimeImpl",
-    "Name" : "13341@ip-10-85-111-149.ec2.internal",
-    "ClassPath" : "/etc/hadoop/conf:/usr/jdk/jdk1.6.0_31/lib/tools.jar:/usr/lib/hadoop/libexec/..:/usr/lib/hadoop/libexec/../hadoop-core-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/ambari-log4j-1.0.jar:/usr/lib/hadoop/libexec/../lib/asm-3.2.jar:/usr/lib/hadoop/libexec/../lib/aspectjrt-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/aspectjtools-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-1.7.0.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-core-1.8.0.jar:/usr/lib/hadoop/libexec/../lib/commons-cli-1.2.jar:/usr/lib/hadoop/libexec/../lib/commons-codec-1.4.jar:/usr/lib/hadoop/libexec/../lib/commons-collections-3.2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-configuration-1.6.jar:/usr/lib/hadoop/libexec/../lib/commons-daemon-1.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-digester-1.8.jar:/usr/lib/hadoop/libexec/../lib/commons-el-1.0.jar:/usr/lib/hadoop/libexec/../lib/commons-httpclient-3.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-io-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-lang-2.4.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-1.1.1.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-api-1.0.4.jar:/usr/lib/hadoop/libexec/../lib/commons-math-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-net-3.1.jar:/usr/lib/hadoop/libexec/../lib/core-3.1.1.jar:/usr/lib/hadoop/libexec/../lib/hadoop-capacity-scheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-fairscheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-lzo-0.5.0.jar:/usr/lib/hadoop/libexec/../lib/hadoop-thriftfs-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-tools.jar:/usr/lib/hadoop/libexec/../lib/hsqldb-1.8.0.10.jar:/usr/lib/hadoop/libexec/../lib/jackson-core-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jackson-mapper-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jasper-compiler-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jasper-runtime-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jdeb-0.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-core-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-json-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-server-1.8.jar:/usr/lib/hadoop/libexec/../lib/jets3t-0.6.1.jar:/usr/lib/hadoop/libexec/../lib/jetty-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jetty-util-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jsch-0.1.42.jar:/usr/lib/hadoop/libexec/../lib/junit-4.5.jar:/usr/lib/hadoop/libexec/../lib/kfs-0.2.2.jar:/usr/lib/hadoop/libexec/../lib/log4j-1.2.15.jar:/usr/lib/hadoop/libexec/../lib/mockito-all-1.8.5.jar:/usr/lib/hadoop/libexec/../lib/oro-2.0.8.jar:/usr/lib/hadoop/libexec/../lib/postgresql-9.1-901-1.jdbc4.jar:/usr/lib/hadoop/libexec/../lib/servlet-api-2.5-20081211.jar:/usr/lib/hadoop/libexec/../lib/slf4j-api-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/slf4j-log4j12-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/xmlenc-0.52.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-2.1.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-api-2.1.jar",
-    "StartTime" : 1358364350192,
-    "BootClassPath" : "/usr/jdk/jdk1.6.0_31/jre/lib/resources.jar:/usr/jdk/jdk1.6.0_31/jre/lib/rt.jar:/usr/jdk/jdk1.6.0_31/jre/lib/sunrsasign.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jsse.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jce.jar:/usr/jdk/jdk1.6.0_31/jre/lib/charsets.jar:/usr/jdk/jdk1.6.0_31/jre/lib/modules/jdk.boot.jar:/usr/jdk/jdk1.6.0_31/jre/classes",
-    "LibraryPath" : "/usr/lib/hadoop/libexec/../lib/native/Linux-amd64-64",
-    "VmName" : "Java HotSpot(TM) 64-Bit Server VM",
-    "VmVendor" : "Sun Microsystems Inc.",
-    "VmVersion" : "20.6-b01",
-    "BootClassPathSupported" : true,
-    "InputArguments" : [ "-Dproc_datanode", "-Xmx1024m", "-Djava.net.preferIPv4Stack=true", "-Djava.net.preferIPv4Stack=true", "-Djava.net.preferIPv4Stack=true", "-Xmx1024m", "-Dhadoop.security.logger=ERROR,DRFAS", "-Dhadoop.log.dir=/var/log/hadoop/hdfs", "-Dhadoop.log.file=hadoop-hdfs-datanode-ip-10-85-111-149.log", "-Dhadoop.home.dir=/usr/lib/hadoop/libexec/..", "-Dhadoop.id.str=hdfs", "-Dhadoop.root.logger=INFO,DRFA", "-Dhadoop.security.logger=INFO,NullAppender", "-Djava.library.path=/usr/lib/hadoop/libexec/../lib/native/Linux-amd64-64", "-Dhadoop.policy.file=hadoop-policy.xml" ],
-    "ManagementSpecVersion" : "1.2",
-    "SpecName" : "Java Virtual Machine Specification",
-    "SpecVendor" : "Sun Microsystems Inc.",
-    "SpecVersion" : "1.0",
-    "SystemProperties" : [ {
-      "key" : "proc_datanode",
-      "value" : ""
-    }, {
-      "key" : "java.ext.dirs",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/ext:/usr/java/packages/lib/ext"
-    }, {
-      "key" : "hadoop.home.dir",
-      "value" : "/usr/lib/hadoop/libexec/.."
-    }, {
-      "key" : "java.vm.specification.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "user.timezone",
-      "value" : "America/New_York"
-    }, {
-      "key" : "java.vm.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "hadoop.id.str",
-      "value" : "hdfs"
-    }, {
-      "key" : "user.name",
-      "value" : "hdfs"
-    }, {
-      "key" : "java.vm.specification.name",
-      "value" : "Java Virtual Machine Specification"
-    }, {
-      "key" : "user.dir",
-      "value" : "/usr/lib/hadoop"
-    }, {
-      "key" : "user.country",
-      "value" : "US"
-    }, {
-      "key" : "user.language",
-      "value" : "en"
-    }, {
-      "key" : "java.specification.version",
-      "value" : "1.6"
-    }, {
-      "key" : "hadoop.log.file",
-      "value" : "hadoop-hdfs-datanode-ip-10-85-111-149.log"
-    }, {
-      "key" : "hadoop.policy.file",
-      "value" : "hadoop-policy.xml"
-    }, {
-      "key" : "sun.cpu.endian",
-      "value" : "little"
-    }, {
-      "key" : "java.home",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre"
-    }, {
-      "key" : "java.net.preferIPv4Stack",
-      "value" : "true"
-    }, {
-      "key" : "sun.jnu.encoding",
-      "value" : "ANSI_X3.4-1968"
-    }, {
-      "key" : "file.separator",
-      "value" : "/"
-    }, {
-      "key" : "java.vendor.url",
-      "value" : "http://java.sun.com/"
-    }, {
-      "key" : "java.awt.graphicsenv",
-      "value" : "sun.awt.X11GraphicsEnvironment"
-    }, {
-      "key" : "hadoop.log.dir",
-      "value" : "/var/log/hadoop/hdfs"
-    }, {
-      "key" : "os.arch",
-      "value" : "amd64"
-    }, {
-      "key" : "java.io.tmpdir",
-      "value" : "/tmp"
-    }, {
-      "key" : "java.runtime.name",
-      "value" : "Java(TM) SE Runtime Environment"
-    }, {
-      "key" : "java.awt.printerjob",
-      "value" : "sun.print.PSPrinterJob"
-    }, {
-      "key" : "file.encoding",
-      "value" : "ANSI_X3.4-1968"
-    }, {
-      "key" : "java.version",
-      "value" : "1.6.0_31"
-    }, {
-      "key" : "java.vendor.url.bug",
-      "value" : "http://java.sun.com/cgi-bin/bugreport.cgi"
-    }, {
-      "key" : "java.vm.specification.version",
-      "value" : "1.0"
-    }, {
-      "key" : "file.encoding.pkg",
-      "value" : "sun.io"
-    }, {
-      "key" : "sun.java.command",
-      "value" : "org.apache.hadoop.hdfs.server.datanode.DataNode"
-    }, {
-      "key" : "sun.java.launcher",
-      "value" : "SUN_STANDARD"
-    }, {
-      "key" : "path.separator",
-      "value" : ":"
-    }, {
-      "key" : "java.runtime.version",
-      "value" : "1.6.0_31-b04"
-    }, {
-      "key" : "java.class.path",
-      "value" : "/etc/hadoop/conf:/usr/jdk/jdk1.6.0_31/lib/tools.jar:/usr/lib/hadoop/libexec/..:/usr/lib/hadoop/libexec/../hadoop-core-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/ambari-log4j-1.0.jar:/usr/lib/hadoop/libexec/../lib/asm-3.2.jar:/usr/lib/hadoop/libexec/../lib/aspectjrt-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/aspectjtools-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-1.7.0.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-core-1.8.0.jar:/usr/lib/hadoop/libexec/../lib/commons-cli-1.2.jar:/usr/lib/hadoop/libexec/../lib/commons-codec-1.4.jar:/usr/lib/hadoop/libexec/../lib/commons-collections-3.2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-configuration-1.6.jar:/usr/lib/hadoop/libexec/../lib/commons-daemon-1.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-digester-1.8.jar:/usr/lib/hadoop/libexec/../lib/commons-el-1.0.jar:/usr/lib/hadoop/libexec/../lib/commons-httpclient-3.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-io-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-lang-2.4.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-1.1.1.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-api-1.0.4.jar:/usr/lib/hadoop/libexec/../lib/commons-math-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-net-3.1.jar:/usr/lib/hadoop/libexec/../lib/core-3.1.1.jar:/usr/lib/hadoop/libexec/../lib/hadoop-capacity-scheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-fairscheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-lzo-0.5.0.jar:/usr/lib/hadoop/libexec/../lib/hadoop-thriftfs-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-tools.jar:/usr/lib/hadoop/libexec/../lib/hsqldb-1.8.0.10.jar:/usr/lib/hadoop/libexec/../lib/jackson-core-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jackson-mapper-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jasper-compiler-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jasper-runtime-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jdeb-0.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-core-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-json-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-server-1.8.jar:/usr/lib/hadoop/libexec/../lib/jets3t-0.6.1.jar:/usr/lib/hadoop/libexec/../lib/jetty-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jetty-util-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jsch-0.1.42.jar:/usr/lib/hadoop/libexec/../lib/junit-4.5.jar:/usr/lib/hadoop/libexec/../lib/kfs-0.2.2.jar:/usr/lib/hadoop/libexec/../lib/log4j-1.2.15.jar:/usr/lib/hadoop/libexec/../lib/mockito-all-1.8.5.jar:/usr/lib/hadoop/libexec/../lib/oro-2.0.8.jar:/usr/lib/hadoop/libexec/../lib/postgresql-9.1-901-1.jdbc4.jar:/usr/lib/hadoop/libexec/../lib/servlet-api-2.5-20081211.jar:/usr/lib/hadoop/libexec/../lib/slf4j-api-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/slf4j-log4j12-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/xmlenc-0.52.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-2.1.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-api-2.1.jar"
-    }, {
-      "key" : "os.name",
-      "value" : "Linux"
-    }, {
-      "key" : "hadoop.security.logger",
-      "value" : "INFO,NullAppender"
-    }, {
-      "key" : "line.separator",
-      "value" : "\n"
-    }, {
-      "key" : "os.version",
-      "value" : "2.6.32-220.17.1.el6.centos.plus.x86_64"
-    }, {
-      "key" : "sun.arch.data.model",
-      "value" : "64"
-    }, {
-      "key" : "java.class.version",
-      "value" : "50.0"
-    }, {
-      "key" : "sun.io.unicode.encoding",
-      "value" : "UnicodeLittle"
-    }, {
-      "key" : "java.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "sun.boot.class.path",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/resources.jar:/usr/jdk/jdk1.6.0_31/jre/lib/rt.jar:/usr/jdk/jdk1.6.0_31/jre/lib/sunrsasign.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jsse.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jce.jar:/usr/jdk/jdk1.6.0_31/jre/lib/charsets.jar:/usr/jdk/jdk1.6.0_31/jre/lib/modules/jdk.boot.jar:/usr/jdk/jdk1.6.0_31/jre/classes"
-    }, {
-      "key" : "java.vm.info",
-      "value" : "mixed mode"
-    }, {
-      "key" : "java.specification.name",
-      "value" : "Java Platform API Specification"
-    }, {
-      "key" : "java.vm.name",
-      "value" : "Java HotSpot(TM) 64-Bit Server VM"
-    }, {
-      "key" : "java.vm.version",
-      "value" : "20.6-b01"
-    }, {
-      "key" : "sun.boot.library.path",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/amd64"
-    }, {
-      "key" : "hadoop.root.logger",
-      "value" : "INFO,DRFA"
-    }, {
-      "key" : "java.endorsed.dirs",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/endorsed"
-    }, {
-      "key" : "sun.os.patch.level",
-      "value" : "unknown"
-    }, {
-      "key" : "sun.cpu.isalist",
-      "value" : ""
-    }, {
-      "key" : "user.home",
-      "value" : "/usr/lib/hadoop"
-    }, {
-      "key" : "java.library.path",
-      "value" : "/usr/lib/hadoop/libexec/../lib/native/Linux-amd64-64"
-    }, {
-      "key" : "java.specification.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "sun.management.compiler",
-      "value" : "HotSpot 64-Bit Tiered Compilers"
-    } ],
-    "Uptime" : 96072219
-  }, {
-    "name" : "java.lang:type=ClassLoading",
-    "modelerType" : "sun.management.ClassLoadingImpl",
-    "LoadedClassCount" : 2563,
-    "UnloadedClassCount" : 0,
-    "TotalLoadedClassCount" : 2563,
-    "Verbose" : false
-  }, {
-    "name" : "Hadoop:service=DataNode,name=DataNodeInfo",
-    "modelerType" : "org.apache.hadoop.hdfs.server.datanode.DataNode",
-    "HostName" : "ip-10-85-111-149.ec2.internal",
-    "Version" : "1.1.2.21",
-    "RpcPort" : "8010",
-    "HttpPort" : null,
-    "NamenodeAddress" : "ip-10-85-118-178.ec2.internal",
-    "VolumeInfo" : "{\"/grid/1/hadoop/hdfs/data/current\":{\"freeSpace\":421104041984,\"usedSpace\":122880,\"reservedSpace\":1},\"/grid/0/hadoop/hdfs/data/current\":{\"freeSpace\":421103943680,\"usedSpace\":176128,\"reservedSpace\":1}}"
-  }, {
-    "name" : "Hadoop:service=DataNode,name=RpcDetailedActivityForPort8010",
-    "modelerType" : "RpcDetailedActivityForPort8010",
-    "tag.context" : "rpcdetailed",
-    "tag.port" : "8010",
-    "tag.hostName" : "ip-10-85-111-149.ec2.internal",
-    "getProtocolVersion_num_ops" : 1,
-    "getProtocolVersion_avg_time" : 0.0,
-    "getBlockLocalPathInfo_num_ops" : 2,
-    "getBlockLocalPathInfo_avg_time" : 1.0
-  }, {
-    "name" : "java.lang:type=Threading",
-    "modelerType" : "sun.management.ThreadImpl",
-    "ThreadAllocatedMemoryEnabled" : true,
-    "ThreadAllocatedMemorySupported" : true,
-    "DaemonThreadCount" : 18,
-    "PeakThreadCount" : 32,
-    "CurrentThreadCpuTimeSupported" : true,
-    "ObjectMonitorUsageSupported" : true,
-    "SynchronizerUsageSupported" : true,
-    "ThreadContentionMonitoringEnabled" : false,
-    "ThreadContentionMonitoringSupported" : true,
-    "ThreadCpuTimeEnabled" : true,
-    "AllThreadIds" : [ 2059, 46, 45, 44, 43, 35, 41, 30, 42, 40, 39, 38, 37, 36, 34, 33, 29, 28, 25, 22, 11, 10, 4, 3, 2, 1 ],
-    "CurrentThreadCpuTime" : 50000000,
-    "CurrentThreadUserTime" : 50000000,
-    "ThreadCount" : 26,
-    "TotalStartedThreadCount" : 2353,
-    "ThreadCpuTimeSupported" : true
-  }, {
-    "name" : "java.util.logging:type=Logging",
-    "modelerType" : "java.util.logging.Logging",
-    "LoggerNames" : [ "sun.awt.AppContext", "javax.management", "global", "javax.management.mbeanserver", "" ]
-  }, {
-    "name" : "Hadoop:service=DataNode,name=jvm",
-    "modelerType" : "jvm",
-    "tag.context" : "jvm",
-    "tag.processName" : "DataNode",
-    "tag.sessionId" : null,
-    "tag.hostName" : "ip-10-85-111-149.ec2.internal",
-    "memNonHeapUsedM" : 20.917297,
-    "memNonHeapCommittedM" : 23.1875,
-    "memHeapUsedM" : 9.15242,
-    "memHeapCommittedM" : 85.625,
-    "gcCount" : 84,
-    "gcTimeMillis" : 168,
-    "threadsNew" : 0,
-    "threadsRunnable" : 11,
-    "threadsBlocked" : 0,
-    "threadsWaiting" : 7,
-    "threadsTimedWaiting" : 8,
-    "threadsTerminated" : 0,
-    "logFatal" : 0,
-    "logError" : 0,
-    "logWarn" : 0,
-    "logInfo" : 3
-  }, {
-    "name" : "com.sun.management:type=HotSpotDiagnostic",
-    "modelerType" : "sun.management.HotSpotDiagnostic",
-    "DiagnosticOptions" : [ {
-      "name" : "HeapDumpBeforeFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpAfterFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpOnOutOfMemoryError",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpPath",
-      "origin" : "DEFAULT",
-      "value" : "",
-      "writeable" : true
-    }, {
-      "name" : "CMSAbortablePrecleanWaitMillis",
-      "origin" : "DEFAULT",
-      "value" : "100",
-      "writeable" : true
-    }, {
-      "name" : "CMSWaitDuration",
-      "origin" : "DEFAULT",
-      "value" : "2000",
-      "writeable" : true
-    }, {
-      "name" : "PrintGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCDetails",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCDateStamps",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCTimeStamps",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogramBeforeFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogramAfterFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogram",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintConcurrentLocks",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    } ]
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=PS Perm Gen",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "PS Perm Gen",
-    "Type" : "NON_HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 0,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 0
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "PS MarkSweep" ],
-    "PeakUsage" : {
-      "committed" : 21757952,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 19775616
-    },
-    "Usage" : {
-      "committed" : 21757952,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 19775616
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "Hadoop:service=DataNode,name=MetricsSystem,sub=Control",
-    "modelerType" : "org.apache.hadoop.metrics2.impl.MetricsSystemImpl"
-  }, {
-    "name" : "java.lang:type=GarbageCollector,name=PS Scavenge",
-    "modelerType" : "sun.management.GarbageCollectorImpl",
-    "LastGcInfo" : {
-      "GcThreadCount" : 2,
-      "duration" : 0,
-      "endTime" : 95505269,
-      "id" : 84,
-      "memoryUsageAfterGc" : [ {
-        "key" : "PS Survivor Space",
-        "value" : {
-          "committed" : 131072,
-          "init" : 4980736,
-          "max" : 131072,
-          "used" : 98304
-        }
-      }, {
-        "key" : "PS Old Gen",
-        "value" : {
-          "committed" : 80216064,
-          "init" : 80216064,
-          "max" : 715849728,
-          "used" : 2667576
-        }
-      }, {
-        "key" : "PS Perm Gen",
-        "value" : {
-          "committed" : 21757952,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 19775288
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 2555904,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 2157760
-        }
-      }, {
-        "key" : "PS Eden Space",
-        "value" : {
-          "committed" : 9437184,
-          "init" : 30081024,
-          "max" : 357564416,
-          "used" : 0
-        }
-      } ],
-      "memoryUsageBeforeGc" : [ {
-        "key" : "PS Survivor Space",
-        "value" : {
-          "committed" : 131072,
-          "init" : 4980736,
-          "max" : 131072,
-          "used" : 98304
-        }
-      }, {
-        "key" : "PS Old Gen",
-        "value" : {
-          "committed" : 80216064,
-          "init" : 80216064,
-          "max" : 715849728,
-          "used" : 2659384
-        }
-      }, {
-        "key" : "PS Perm Gen",
-        "value" : {
-          "committed" : 21757952,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 19775288
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 2555904,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 2157760
-        }
-      }, {
-        "key" : "PS Eden Space",
-        "value" : {
-          "committed" : 9502720,
-          "init" : 30081024,
-          "max" : 357564416,
-          "used" : 9502720
-        }
-      } ],
-      "startTime" : 95505269
-    },
-    "CollectionCount" : 84,
-    "CollectionTime" : 168,
-    "Name" : "PS Scavenge",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "PS Eden Space", "PS Survivor Space" ]
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=PS Old Gen",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "PS Old Gen",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 0,
-      "init" : 80216064,
-      "max" : 715849728,
-      "used" : 0
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "PS MarkSweep" ],
-    "PeakUsage" : {
-      "committed" : 80216064,
-      "init" : 80216064,
-      "max" : 715849728,
-      "used" : 2667576
-    },
-    "Usage" : {
-      "committed" : 80216064,
-      "init" : 80216064,
-      "max" : 715849728,
-      "used" : 2667576
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "JMImplementation:type=MBeanServerDelegate",
-    "modelerType" : "javax.management.MBeanServerDelegate",
-    "MBeanServerId" : "ip-10-85-111-149.ec2.internal_1358364350917",
-    "SpecificationName" : "Java Management Extensions",
-    "SpecificationVersion" : "1.4",
-    "SpecificationVendor" : "Sun Microsystems",
-    "ImplementationName" : "JMX",
-    "ImplementationVersion" : "1.6.0_31-b04",
-    "ImplementationVendor" : "Sun Microsystems"
-  } ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/resources/hdfs_namenode_jmx.json b/branch-1.2/ambari-server/src/test/resources/hdfs_namenode_jmx.json
deleted file mode 100644
index cb0dc7b..0000000
--- a/branch-1.2/ambari-server/src/test/resources/hdfs_namenode_jmx.json
+++ /dev/null
@@ -1,830 +0,0 @@
-{
-  "beans" : [ {
-    "name" : "java.lang:type=Memory",
-    "modelerType" : "sun.management.MemoryImpl",
-    "Verbose" : true,
-    "HeapMemoryUsage" : {
-      "committed" : 1006632960,
-      "init" : 1073741824,
-      "max" : 1006632960,
-      "used" : 473433016
-    },
-    "NonHeapMemoryUsage" : {
-      "committed" : 24379392,
-      "init" : 24313856,
-      "max" : 136314880,
-      "used" : 23634400
-    },
-    "ObjectPendingFinalizationCount" : 0
-  }, {
-    "name" : "Hadoop:service=NameNode,name=FSNamesystemMetrics",
-    "modelerType" : "FSNamesystemMetrics",
-    "tag.context" : "dfs",
-    "tag.hostName" : "ip-10-85-118-178.ec2.internal",
-    "FilesTotal" : 61,
-    "BlocksTotal" : 21,
-    "CapacityTotalGB" : 827,
-    "CapacityUsedGB" : 0,
-    "CapacityRemainingGB" : 784,
-    "TotalLoad" : 1,
-    "CorruptBlocks" : 0,
-    "ExcessBlocks" : 0,
-    "PendingDeletionBlocks" : 0,
-    "PendingReplicationBlocks" : 0,
-    "UnderReplicatedBlocks" : 21,
-    "ScheduledReplicationBlocks" : 0,
-    "MissingBlocks" : 0,
-    "BlockCapacity" : 2097152
-  }, {
-    "name" : "Hadoop:service=NameNode,name=RpcActivityForPort8020",
-    "modelerType" : "RpcActivityForPort8020",
-    "tag.context" : "rpc",
-    "tag.port" : "8020",
-    "tag.hostName" : "ip-10-85-118-178.ec2.internal",
-    "rpcAuthenticationSuccesses" : 0,
-    "rpcAuthenticationFailures" : 0,
-    "rpcAuthorizationSuccesses" : 6419,
-    "rpcAuthorizationFailures" : 0,
-    "ReceivedBytes" : 13670605,
-    "SentBytes" : 10510194,
-    "RpcQueueTime_num_ops" : 62756,
-    "RpcQueueTime_avg_time" : 0.3333333333333333,
-    "RpcProcessingTime_num_ops" : 62756,
-    "RpcProcessingTime_avg_time" : 0.0,
-    "NumOpenConnections" : 0,
-    "callQueueLen" : 0
-  }, {
-    "name" : "java.lang:type=GarbageCollector,name=ConcurrentMarkSweep",
-    "modelerType" : "sun.management.GarbageCollectorImpl",
-    "LastGcInfo" : null,
-    "CollectionCount" : 0,
-    "CollectionTime" : 0,
-    "Name" : "ConcurrentMarkSweep",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Par Eden Space", "Par Survivor Space", "CMS Old Gen", "CMS Perm Gen" ]
-  }, {
-    "name" : "java.lang:type=Compilation",
-    "modelerType" : "sun.management.CompilationImpl",
-    "Name" : "HotSpot 64-Bit Tiered Compilers",
-    "CompilationTimeMonitoringSupported" : true,
-    "TotalCompilationTime" : 13348
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Par Eden Space",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Par Eden Space",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 536870912,
-      "init" : 536870912,
-      "max" : 536870912,
-      "used" : 0
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep", "ParNew" ],
-    "PeakUsage" : {
-      "committed" : 536870912,
-      "init" : 536870912,
-      "max" : 536870912,
-      "used" : 536870912
-    },
-    "Usage" : {
-      "committed" : 536870912,
-      "init" : 536870912,
-      "max" : 536870912,
-      "used" : 457697336
-    },
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdSupported" : false
-  }, {
-    "name" : "Hadoop:service=NameNode,name=jvm",
-    "modelerType" : "jvm",
-    "tag.context" : "jvm",
-    "tag.processName" : "NameNode",
-    "tag.sessionId" : null,
-    "tag.hostName" : "ip-10-85-118-178.ec2.internal",
-    "memNonHeapUsedM" : 22.53952,
-    "memNonHeapCommittedM" : 23.25,
-    "memHeapUsedM" : 451.50092,
-    "memHeapCommittedM" : 960.0,
-    "gcCount" : 8,
-    "gcTimeMillis" : 1153,
-    "threadsNew" : 0,
-    "threadsRunnable" : 10,
-    "threadsBlocked" : 0,
-    "threadsWaiting" : 44,
-    "threadsTimedWaiting" : 8,
-    "threadsTerminated" : 0,
-    "logFatal" : 0,
-    "logError" : 0,
-    "logWarn" : 0,
-    "logInfo" : 3
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=CMS Perm Gen",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "CMS Perm Gen",
-    "Type" : "NON_HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 0,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 0
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep" ],
-    "PeakUsage" : {
-      "committed" : 21757952,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 21093088
-    },
-    "Usage" : {
-      "committed" : 21757952,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 21093088
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "java.lang:type=OperatingSystem",
-    "modelerType" : "com.sun.management.UnixOperatingSystem",
-    "MaxFileDescriptorCount" : 32768,
-    "OpenFileDescriptorCount" : 108,
-    "CommittedVirtualMemorySize" : 1623138304,
-    "FreePhysicalMemorySize" : 4531109888,
-    "FreeSwapSpaceSize" : 0,
-    "ProcessCpuTime" : 320980000000,
-    "TotalPhysicalMemorySize" : 7694454784,
-    "TotalSwapSpaceSize" : 0,
-    "Name" : "Linux",
-    "Version" : "2.6.32-220.17.1.el6.centos.plus.x86_64",
-    "AvailableProcessors" : 2,
-    "Arch" : "amd64",
-    "SystemLoadAverage" : 0.11
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Par Survivor Space",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Par Survivor Space",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 67108864,
-      "init" : 67108864,
-      "max" : 67108864,
-      "used" : 4785576
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep", "ParNew" ],
-    "PeakUsage" : {
-      "committed" : 67108864,
-      "init" : 67108864,
-      "max" : 67108864,
-      "used" : 15156976
-    },
-    "Usage" : {
-      "committed" : 67108864,
-      "init" : 67108864,
-      "max" : 67108864,
-      "used" : 4785576
-    },
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdSupported" : false
-  }, {
-    "name" : "Hadoop:service=NameNode,name=NameNode",
-    "modelerType" : "NameNode",
-    "tag.context" : "dfs",
-    "tag.sessionId" : null,
-    "tag.hostName" : "ip-10-85-118-178.ec2.internal",
-    "FilesCreated" : 77,
-    "FilesAppended" : 0,
-    "GetBlockLocations" : 18,
-    "FilesRenamed" : 10,
-    "GetListingOps" : 15200,
-    "CreateFileOps" : 28,
-    "FilesDeleted" : 17,
-    "DeleteFileOps" : 11,
-    "FileInfoOps" : 10777,
-    "AddBlockOps" : 27,
-    "Transactions_num_ops" : 204,
-    "Transactions_avg_time" : 0.12820512820512828,
-    "Syncs_num_ops" : 128,
-    "Syncs_avg_time" : 2.5416666666666674,
-    "JournalTransactionsBatchedInSync" : 1,
-    "blockReport_num_ops" : 26,
-    "blockReport_avg_time" : 1.0,
-    "SafemodeTime" : 1141,
-    "fsImageLoadTime" : 1130,
-    "FilesInGetListingOps" : 25803
-  }, {
-    "name" : "java.lang:type=MemoryManager,name=CodeCacheManager",
-    "modelerType" : "sun.management.MemoryManagerImpl",
-    "Name" : "CodeCacheManager",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Code Cache" ]
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=CMS Old Gen",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "CMS Old Gen",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 0,
-      "init" : 402653184,
-      "max" : 402653184,
-      "used" : 0
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep" ],
-    "PeakUsage" : {
-      "committed" : 402653184,
-      "init" : 402653184,
-      "max" : 402653184,
-      "used" : 10950104
-    },
-    "Usage" : {
-      "committed" : 402653184,
-      "init" : 402653184,
-      "max" : 402653184,
-      "used" : 10950104
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "Hadoop:service=NameNode,name=MetricsSystem,sub=Control",
-    "modelerType" : "org.apache.hadoop.metrics2.impl.MetricsSystemImpl"
-  }, {
-    "name" : "Hadoop:service=NameNode,name=RpcDetailedActivityForPort8020",
-    "modelerType" : "RpcDetailedActivityForPort8020",
-    "tag.context" : "rpcdetailed",
-    "tag.port" : "8020",
-    "tag.hostName" : "ip-10-85-118-178.ec2.internal",
-    "getProtocolVersion_num_ops" : 43,
-    "getProtocolVersion_avg_time" : 0.0,
-    "versionRequest_num_ops" : 1,
-    "versionRequest_avg_time" : 2.0,
-    "getFileInfo_num_ops" : 10777,
-    "getFileInfo_avg_time" : 0.0,
-    "register_num_ops" : 1,
-    "register_avg_time" : 4.0,
-    "blocksBeingWrittenReport_num_ops" : 1,
-    "blocksBeingWrittenReport_avg_time" : 2.0,
-    "sendHeartbeat_num_ops" : 31794,
-    "sendHeartbeat_avg_time" : 0.0,
-    "mkdirs_num_ops" : 29,
-    "mkdirs_avg_time" : 2.25,
-    "setOwner_num_ops" : 4,
-    "setOwner_avg_time" : 35.0,
-    "blockReport_num_ops" : 26,
-    "blockReport_avg_time" : 1.0,
-    "setPermission_num_ops" : 13,
-    "setPermission_avg_time" : 2.6666666666666665,
-    "setSafeMode_num_ops" : 3,
-    "setSafeMode_avg_time" : 0.0,
-    "getListing_num_ops" : 15200,
-    "getListing_avg_time" : 1.0,
-    "delete_num_ops" : 12,
-    "delete_avg_time" : 3.5,
-    "create_num_ops" : 28,
-    "create_avg_time" : 5.166666666666666,
-    "addBlock_num_ops" : 27,
-    "addBlock_avg_time" : 0.6666666666666667,
-    "blockReceived_num_ops" : 27,
-    "blockReceived_avg_time" : 0.16666666666666669,
-    "complete_num_ops" : 27,
-    "complete_avg_time" : 3.8333333333333335,
-    "getBlockLocations_num_ops" : 18,
-    "getBlockLocations_avg_time" : 1.0,
-    "rename_num_ops" : 10,
-    "rename_avg_time" : 4.333333333333333,
-    "fsync_num_ops" : 5,
-    "fsync_avg_time" : 0.3333333333333333,
-    "setReplication_num_ops" : 2,
-    "setReplication_avg_time" : 6.0,
-    "renewLease_num_ops" : 3034,
-    "renewLease_avg_time" : 0.0,
-    "getEditLogSize_num_ops" : 303,
-    "getEditLogSize_avg_time" : 0.0,
-    "rollEditLog_num_ops" : 5,
-    "rollEditLog_avg_time" : 15.0,
-    "rollFsImage_num_ops" : 5,
-    "rollFsImage_avg_time" : 41.0
-  }, {
-    "name" : "Hadoop:service=NameNode,name=FSNamesystemState",
-    "modelerType" : "org.apache.hadoop.hdfs.server.namenode.FSNamesystem",
-    "CapacityTotal" : 887717691390,
-    "CapacityUsed" : 299008,
-    "CapacityRemaining" : 842207985664,
-    "TotalLoad" : 1,
-    "BlocksTotal" : 21,
-    "FilesTotal" : 61,
-    "PendingReplicationBlocks" : 0,
-    "UnderReplicatedBlocks" : 21,
-    "ScheduledReplicationBlocks" : 0,
-    "FSState" : "Operational"
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Code Cache",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Code Cache",
-    "Type" : "NON_HEAP",
-    "Valid" : true,
-    "CollectionUsage" : null,
-    "MemoryManagerNames" : [ "CodeCacheManager" ],
-    "PeakUsage" : {
-      "committed" : 2621440,
-      "init" : 2555904,
-      "max" : 50331648,
-      "used" : 2546688
-    },
-    "Usage" : {
-      "committed" : 2621440,
-      "init" : 2555904,
-      "max" : 50331648,
-      "used" : 2541312
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdSupported" : false,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "java.lang:type=Runtime",
-    "modelerType" : "sun.management.RuntimeImpl",
-    "Name" : "15720@ip-10-85-118-178.ec2.internal",
-    "ClassPath" : "/etc/hadoop/conf:/usr/jdk/jdk1.6.0_31/lib/tools.jar:/usr/lib/hadoop/libexec/..:/usr/lib/hadoop/libexec/../hadoop-core-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/ambari-log4j-1.0.jar:/usr/lib/hadoop/libexec/../lib/asm-3.2.jar:/usr/lib/hadoop/libexec/../lib/aspectjrt-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/aspectjtools-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-1.7.0.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-core-1.8.0.jar:/usr/lib/hadoop/libexec/../lib/commons-cli-1.2.jar:/usr/lib/hadoop/libexec/../lib/commons-codec-1.4.jar:/usr/lib/hadoop/libexec/../lib/commons-collections-3.2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-configuration-1.6.jar:/usr/lib/hadoop/libexec/../lib/commons-daemon-1.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-digester-1.8.jar:/usr/lib/hadoop/libexec/../lib/commons-el-1.0.jar:/usr/lib/hadoop/libexec/../lib/commons-httpclient-3.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-io-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-lang-2.4.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-1.1.1.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-api-1.0.4.jar:/usr/lib/hadoop/libexec/../lib/commons-math-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-net-3.1.jar:/usr/lib/hadoop/libexec/../lib/core-3.1.1.jar:/usr/lib/hadoop/libexec/../lib/hadoop-capacity-scheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-fairscheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-lzo-0.5.0.jar:/usr/lib/hadoop/libexec/../lib/hadoop-thriftfs-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-tools.jar:/usr/lib/hadoop/libexec/../lib/hsqldb-1.8.0.10.jar:/usr/lib/hadoop/libexec/../lib/jackson-core-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jackson-mapper-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jasper-compiler-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jasper-runtime-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jdeb-0.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-core-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-json-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-server-1.8.jar:/usr/lib/hadoop/libexec/../lib/jets3t-0.6.1.jar:/usr/lib/hadoop/libexec/../lib/jetty-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jetty-util-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jsch-0.1.42.jar:/usr/lib/hadoop/libexec/../lib/junit-4.5.jar:/usr/lib/hadoop/libexec/../lib/kfs-0.2.2.jar:/usr/lib/hadoop/libexec/../lib/log4j-1.2.15.jar:/usr/lib/hadoop/libexec/../lib/mockito-all-1.8.5.jar:/usr/lib/hadoop/libexec/../lib/oro-2.0.8.jar:/usr/lib/hadoop/libexec/../lib/postgresql-9.1-901-1.jdbc4.jar:/usr/lib/hadoop/libexec/../lib/servlet-api-2.5-20081211.jar:/usr/lib/hadoop/libexec/../lib/slf4j-api-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/slf4j-log4j12-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/xmlenc-0.52.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-2.1.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-api-2.1.jar",
-    "StartTime" : 1358364400185,
-    "BootClassPath" : "/usr/jdk/jdk1.6.0_31/jre/lib/resources.jar:/usr/jdk/jdk1.6.0_31/jre/lib/rt.jar:/usr/jdk/jdk1.6.0_31/jre/lib/sunrsasign.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jsse.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jce.jar:/usr/jdk/jdk1.6.0_31/jre/lib/charsets.jar:/usr/jdk/jdk1.6.0_31/jre/lib/modules/jdk.boot.jar:/usr/jdk/jdk1.6.0_31/jre/classes",
-    "LibraryPath" : "/usr/lib/hadoop/libexec/../lib/native/Linux-amd64-64",
-    "VmName" : "Java HotSpot(TM) 64-Bit Server VM",
-    "VmVendor" : "Sun Microsystems Inc.",
-    "VmVersion" : "20.6-b01",
-    "BootClassPathSupported" : true,
-    "InputArguments" : [ "-Dproc_namenode", "-Xmx1024m", "-Djava.net.preferIPv4Stack=true", "-Djava.net.preferIPv4Stack=true", "-Djava.net.preferIPv4Stack=true", "-XX:ParallelGCThreads=8", "-XX:+UseConcMarkSweepGC", "-XX:ErrorFile=/var/log/hadoop/hdfs/hs_err_pid%p.log", "-XX:NewSize=200m", "-XX:MaxNewSize=640m", "-Xloggc:/var/log/hadoop/hdfs/gc.log-201301161426", "-verbose:gc", "-XX:+PrintGCDetails", "-XX:+PrintGCTimeStamps", "-XX:+PrintGCDateStamps", "-Xms1024m", "-Xmx1024m", "-Dhadoop.security.logger=INFO,DRFAS", "-Dhdfs.audit.logger=INFO,DRFAAUDIT", "-XX:ParallelGCThreads=8", "-XX:+UseConcMarkSweepGC", "-XX:ErrorFile=/var/log/hadoop/hdfs/hs_err_pid%p.log", "-XX:NewSize=200m", "-XX:MaxNewSize=640m", "-Xloggc:/var/log/hadoop/hdfs/gc.log-201301161426", "-verbose:gc", "-XX:+PrintGCDetails", "-XX:+PrintGCTimeStamps", "-XX:+PrintGCDateStamps", "-Xms1024m", "-Xmx1024m", "-Dhadoop.security.logger=INFO,DRFAS", "-Dhdfs.audit.logger=INFO,DRFAAUDIT", "-XX:ParallelGCThreads=8", "-XX:+UseConcMarkSweepGC", "-XX:ErrorFile=/var/log/hadoop/hdfs/hs_err_pid%p.log", "-XX:NewSize=200m", "-XX:MaxNewSize=640m", "-Xloggc:/var/log/hadoop/hdfs/gc.log-201301161426", "-verbose:gc", "-XX:+PrintGCDetails", "-XX:+PrintGCTimeStamps", "-XX:+PrintGCDateStamps", "-Xms1024m", "-Xmx1024m", "-Dhadoop.security.logger=INFO,DRFAS", "-Dhdfs.audit.logger=INFO,DRFAAUDIT", "-Dhadoop.log.dir=/var/log/hadoop/hdfs", "-Dhadoop.log.file=hadoop-hdfs-namenode-ip-10-85-118-178.log", "-Dhadoop.home.dir=/usr/lib/hadoop/libexec/..", "-Dhadoop.id.str=hdfs", "-Dhadoop.root.logger=INFO,DRFA", "-Dhadoop.security.logger=INFO,DRFAS", "-Djava.library.path=/usr/lib/hadoop/libexec/../lib/native/Linux-amd64-64", "-Dhadoop.policy.file=hadoop-policy.xml" ],
-    "ManagementSpecVersion" : "1.2",
-    "SpecName" : "Java Virtual Machine Specification",
-    "SpecVendor" : "Sun Microsystems Inc.",
-    "SpecVersion" : "1.0",
-    "SystemProperties" : [ {
-      "key" : "java.ext.dirs",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/ext:/usr/java/packages/lib/ext"
-    }, {
-      "key" : "hadoop.home.dir",
-      "value" : "/usr/lib/hadoop/libexec/.."
-    }, {
-      "key" : "java.vm.specification.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "user.timezone",
-      "value" : "America/New_York"
-    }, {
-      "key" : "java.vm.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "hadoop.id.str",
-      "value" : "hdfs"
-    }, {
-      "key" : "user.name",
-      "value" : "hdfs"
-    }, {
-      "key" : "java.vm.specification.name",
-      "value" : "Java Virtual Machine Specification"
-    }, {
-      "key" : "user.dir",
-      "value" : "/usr/lib/hadoop"
-    }, {
-      "key" : "user.country",
-      "value" : "US"
-    }, {
-      "key" : "user.language",
-      "value" : "en"
-    }, {
-      "key" : "java.specification.version",
-      "value" : "1.6"
-    }, {
-      "key" : "hadoop.log.file",
-      "value" : "hadoop-hdfs-namenode-ip-10-85-118-178.log"
-    }, {
-      "key" : "hadoop.policy.file",
-      "value" : "hadoop-policy.xml"
-    }, {
-      "key" : "sun.cpu.endian",
-      "value" : "little"
-    }, {
-      "key" : "java.home",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre"
-    }, {
-      "key" : "java.net.preferIPv4Stack",
-      "value" : "true"
-    }, {
-      "key" : "sun.jnu.encoding",
-      "value" : "ANSI_X3.4-1968"
-    }, {
-      "key" : "file.separator",
-      "value" : "/"
-    }, {
-      "key" : "java.vendor.url",
-      "value" : "http://java.sun.com/"
-    }, {
-      "key" : "java.awt.graphicsenv",
-      "value" : "sun.awt.X11GraphicsEnvironment"
-    }, {
-      "key" : "hadoop.log.dir",
-      "value" : "/var/log/hadoop/hdfs"
-    }, {
-      "key" : "os.arch",
-      "value" : "amd64"
-    }, {
-      "key" : "java.io.tmpdir",
-      "value" : "/tmp"
-    }, {
-      "key" : "java.runtime.name",
-      "value" : "Java(TM) SE Runtime Environment"
-    }, {
-      "key" : "java.awt.printerjob",
-      "value" : "sun.print.PSPrinterJob"
-    }, {
-      "key" : "file.encoding",
-      "value" : "ANSI_X3.4-1968"
-    }, {
-      "key" : "java.version",
-      "value" : "1.6.0_31"
-    }, {
-      "key" : "java.vendor.url.bug",
-      "value" : "http://java.sun.com/cgi-bin/bugreport.cgi"
-    }, {
-      "key" : "java.vm.specification.version",
-      "value" : "1.0"
-    }, {
-      "key" : "file.encoding.pkg",
-      "value" : "sun.io"
-    }, {
-      "key" : "sun.java.command",
-      "value" : "org.apache.hadoop.hdfs.server.namenode.NameNode"
-    }, {
-      "key" : "sun.java.launcher",
-      "value" : "SUN_STANDARD"
-    }, {
-      "key" : "path.separator",
-      "value" : ":"
-    }, {
-      "key" : "java.runtime.version",
-      "value" : "1.6.0_31-b04"
-    }, {
-      "key" : "java.class.path",
-      "value" : "/etc/hadoop/conf:/usr/jdk/jdk1.6.0_31/lib/tools.jar:/usr/lib/hadoop/libexec/..:/usr/lib/hadoop/libexec/../hadoop-core-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/ambari-log4j-1.0.jar:/usr/lib/hadoop/libexec/../lib/asm-3.2.jar:/usr/lib/hadoop/libexec/../lib/aspectjrt-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/aspectjtools-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-1.7.0.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-core-1.8.0.jar:/usr/lib/hadoop/libexec/../lib/commons-cli-1.2.jar:/usr/lib/hadoop/libexec/../lib/commons-codec-1.4.jar:/usr/lib/hadoop/libexec/../lib/commons-collections-3.2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-configuration-1.6.jar:/usr/lib/hadoop/libexec/../lib/commons-daemon-1.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-digester-1.8.jar:/usr/lib/hadoop/libexec/../lib/commons-el-1.0.jar:/usr/lib/hadoop/libexec/../lib/commons-httpclient-3.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-io-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-lang-2.4.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-1.1.1.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-api-1.0.4.jar:/usr/lib/hadoop/libexec/../lib/commons-math-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-net-3.1.jar:/usr/lib/hadoop/libexec/../lib/core-3.1.1.jar:/usr/lib/hadoop/libexec/../lib/hadoop-capacity-scheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-fairscheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-lzo-0.5.0.jar:/usr/lib/hadoop/libexec/../lib/hadoop-thriftfs-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-tools.jar:/usr/lib/hadoop/libexec/../lib/hsqldb-1.8.0.10.jar:/usr/lib/hadoop/libexec/../lib/jackson-core-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jackson-mapper-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jasper-compiler-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jasper-runtime-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jdeb-0.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-core-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-json-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-server-1.8.jar:/usr/lib/hadoop/libexec/../lib/jets3t-0.6.1.jar:/usr/lib/hadoop/libexec/../lib/jetty-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jetty-util-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jsch-0.1.42.jar:/usr/lib/hadoop/libexec/../lib/junit-4.5.jar:/usr/lib/hadoop/libexec/../lib/kfs-0.2.2.jar:/usr/lib/hadoop/libexec/../lib/log4j-1.2.15.jar:/usr/lib/hadoop/libexec/../lib/mockito-all-1.8.5.jar:/usr/lib/hadoop/libexec/../lib/oro-2.0.8.jar:/usr/lib/hadoop/libexec/../lib/postgresql-9.1-901-1.jdbc4.jar:/usr/lib/hadoop/libexec/../lib/servlet-api-2.5-20081211.jar:/usr/lib/hadoop/libexec/../lib/slf4j-api-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/slf4j-log4j12-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/xmlenc-0.52.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-2.1.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-api-2.1.jar"
-    }, {
-      "key" : "os.name",
-      "value" : "Linux"
-    }, {
-      "key" : "hadoop.security.logger",
-      "value" : "INFO,DRFAS"
-    }, {
-      "key" : "line.separator",
-      "value" : "\n"
-    }, {
-      "key" : "proc_namenode",
-      "value" : ""
-    }, {
-      "key" : "os.version",
-      "value" : "2.6.32-220.17.1.el6.centos.plus.x86_64"
-    }, {
-      "key" : "sun.arch.data.model",
-      "value" : "64"
-    }, {
-      "key" : "java.class.version",
-      "value" : "50.0"
-    }, {
-      "key" : "sun.io.unicode.encoding",
-      "value" : "UnicodeLittle"
-    }, {
-      "key" : "java.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "sun.boot.class.path",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/resources.jar:/usr/jdk/jdk1.6.0_31/jre/lib/rt.jar:/usr/jdk/jdk1.6.0_31/jre/lib/sunrsasign.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jsse.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jce.jar:/usr/jdk/jdk1.6.0_31/jre/lib/charsets.jar:/usr/jdk/jdk1.6.0_31/jre/lib/modules/jdk.boot.jar:/usr/jdk/jdk1.6.0_31/jre/classes"
-    }, {
-      "key" : "java.vm.info",
-      "value" : "mixed mode"
-    }, {
-      "key" : "java.specification.name",
-      "value" : "Java Platform API Specification"
-    }, {
-      "key" : "java.vm.name",
-      "value" : "Java HotSpot(TM) 64-Bit Server VM"
-    }, {
-      "key" : "java.vm.version",
-      "value" : "20.6-b01"
-    }, {
-      "key" : "sun.boot.library.path",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/amd64"
-    }, {
-      "key" : "hadoop.root.logger",
-      "value" : "INFO,DRFA"
-    }, {
-      "key" : "java.endorsed.dirs",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/endorsed"
-    }, {
-      "key" : "sun.os.patch.level",
-      "value" : "unknown"
-    }, {
-      "key" : "sun.cpu.isalist",
-      "value" : ""
-    }, {
-      "key" : "hdfs.audit.logger",
-      "value" : "INFO,DRFAAUDIT"
-    }, {
-      "key" : "user.home",
-      "value" : "/usr/lib/hadoop"
-    }, {
-      "key" : "java.library.path",
-      "value" : "/usr/lib/hadoop/libexec/../lib/native/Linux-amd64-64"
-    }, {
-      "key" : "java.specification.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "sun.management.compiler",
-      "value" : "HotSpot 64-Bit Tiered Compilers"
-    } ],
-    "Uptime" : 95428374
-  }, {
-    "name" : "java.lang:type=ClassLoading",
-    "modelerType" : "sun.management.ClassLoadingImpl",
-    "LoadedClassCount" : 2707,
-    "UnloadedClassCount" : 0,
-    "TotalLoadedClassCount" : 2707,
-    "Verbose" : false
-  }, {
-    "name" : "java.lang:type=Threading",
-    "modelerType" : "sun.management.ThreadImpl",
-    "ThreadAllocatedMemoryEnabled" : true,
-    "ThreadAllocatedMemorySupported" : true,
-    "DaemonThreadCount" : 54,
-    "PeakThreadCount" : 66,
-    "CurrentThreadCpuTimeSupported" : true,
-    "ObjectMonitorUsageSupported" : true,
-    "SynchronizerUsageSupported" : true,
-    "ThreadContentionMonitoringEnabled" : false,
-    "ThreadContentionMonitoringSupported" : true,
-    "ThreadCpuTimeEnabled" : true,
-    "AllThreadIds" : [ 1370, 73, 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 19, 25, 29, 28, 24, 23, 22, 21, 20, 18, 17, 16, 15, 14, 12, 11, 5, 3, 2, 1 ],
-    "CurrentThreadCpuTime" : 310000000,
-    "CurrentThreadUserTime" : 270000000,
-    "ThreadCount" : 62,
-    "TotalStartedThreadCount" : 1398,
-    "ThreadCpuTimeSupported" : true
-  }, {
-    "name" : "java.util.logging:type=Logging",
-    "modelerType" : "java.util.logging.Logging",
-    "LoggerNames" : [ "sun.awt.AppContext", "javax.management", "sun.net.www.protocol.http.HttpURLConnection", "global", "javax.management.mbeanserver", "" ]
-  }, {
-    "name" : "java.lang:type=GarbageCollector,name=ParNew",
-    "modelerType" : "sun.management.GarbageCollectorImpl",
-    "LastGcInfo" : {
-      "GcThreadCount" : 11,
-      "duration" : 17,
-      "endTime" : 79533682,
-      "id" : 8,
-      "memoryUsageAfterGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 21757952,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 21091432
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 2555904,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 2468928
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 402653184,
-          "init" : 402653184,
-          "max" : 402653184,
-          "used" : 10950104
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 536870912,
-          "init" : 536870912,
-          "max" : 536870912,
-          "used" : 0
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 67108864,
-          "init" : 67108864,
-          "max" : 67108864,
-          "used" : 4785576
-        }
-      } ],
-      "memoryUsageBeforeGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 21757952,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 21091432
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 2555904,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 2468928
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 402653184,
-          "init" : 402653184,
-          "max" : 402653184,
-          "used" : 10949176
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 536870912,
-          "init" : 536870912,
-          "max" : 536870912,
-          "used" : 536870912
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 67108864,
-          "init" : 67108864,
-          "max" : 67108864,
-          "used" : 4834584
-        }
-      } ],
-      "startTime" : 79533665
-    },
-    "CollectionCount" : 8,
-    "CollectionTime" : 1153,
-    "Name" : "ParNew",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Par Eden Space", "Par Survivor Space" ]
-  }, {
-    "name" : "Hadoop:service=NameNode,name=MetricsSystem,sub=Stats",
-    "modelerType" : "MetricsSystem,sub=Stats",
-    "tag.context" : "metricssystem",
-    "num_sources" : 6,
-    "num_sinks" : 1,
-    "sink.ganglia.latency_num_ops" : 9541,
-    "sink.ganglia.latency_avg_time" : 2.0,
-    "sink.ganglia.dropped" : 0,
-    "sink.ganglia.qsize" : 0,
-    "snapshot_num_ops" : 66793,
-    "snapshot_avg_time" : 0.14285714285714288,
-    "snapshot_stdev_time" : 0.37796447300922725,
-    "snapshot_imin_time" : 0.0,
-    "snapshot_imax_time" : 1.0,
-    "snapshot_min_time" : 0.0,
-    "snapshot_max_time" : 84.0,
-    "publish_num_ops" : 9541,
-    "publish_avg_time" : 0.0,
-    "publish_stdev_time" : 0.0,
-    "publish_imin_time" : 0.0,
-    "publish_imax_time" : 1.401298464324817E-45,
-    "publish_min_time" : 0.0,
-    "publish_max_time" : 413.0,
-    "dropped_pub_all" : 0
-  }, {
-    "name" : "Hadoop:service=NameNode,name=NameNodeInfo",
-    "modelerType" : "org.apache.hadoop.hdfs.server.namenode.FSNamesystem",
-    "Threads" : 62,
-    "HostName" : "ip-10-85-118-178.ec2.internal",
-    "Total" : 887717691390,
-    "Version" : "1.1.2.21, r",
-    "UpgradeFinalized" : true,
-    "Used" : 299008,
-    "Free" : 842207985664,
-    "Safemode" : "",
-    "NonDfsUsedSpace" : 45509406718,
-    "PercentUsed" : 3.368278E-5,
-    "PercentRemaining" : 94.873405,
-    "TotalBlocks" : 21,
-    "TotalFiles" : 61,
-    "LiveNodes" : "{\"ip-10-85-111-149.ec2.internal\":{\"usedSpace\":299008,\"lastContact\":0}}",
-    "DeadNodes" : "{}",
-    "DecomNodes" : "{}",
-    "NameDirStatuses" : "{\"failed\":{},\"active\":{\"/grid/1/hadoop/hdfs/namenode\":\"IMAGE_AND_EDITS\",\"/grid/0/hadoop/hdfs/namenode\":\"IMAGE_AND_EDITS\"}}"
-  }, {
-    "name" : "com.sun.management:type=HotSpotDiagnostic",
-    "modelerType" : "sun.management.HotSpotDiagnostic",
-    "DiagnosticOptions" : [ {
-      "name" : "HeapDumpBeforeFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpAfterFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpOnOutOfMemoryError",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpPath",
-      "origin" : "DEFAULT",
-      "value" : "",
-      "writeable" : true
-    }, {
-      "name" : "CMSAbortablePrecleanWaitMillis",
-      "origin" : "DEFAULT",
-      "value" : "100",
-      "writeable" : true
-    }, {
-      "name" : "CMSWaitDuration",
-      "origin" : "DEFAULT",
-      "value" : "2000",
-      "writeable" : true
-    }, {
-      "name" : "PrintGC",
-      "origin" : "VM_CREATION",
-      "value" : "true",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCDetails",
-      "origin" : "VM_CREATION",
-      "value" : "true",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCDateStamps",
-      "origin" : "VM_CREATION",
-      "value" : "true",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCTimeStamps",
-      "origin" : "VM_CREATION",
-      "value" : "true",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogramBeforeFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogramAfterFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogram",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintConcurrentLocks",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    } ]
-  }, {
-    "name" : "Hadoop:service=NameNode,name=ugi",
-    "modelerType" : "ugi",
-    "tag.context" : "ugi",
-    "tag.hostName" : "ip-10-85-118-178.ec2.internal",
-    "loginSuccess_num_ops" : 0,
-    "loginSuccess_avg_time" : 0.0,
-    "loginFailure_num_ops" : 0,
-    "loginFailure_avg_time" : 0.0
-  }, {
-    "name" : "JMImplementation:type=MBeanServerDelegate",
-    "modelerType" : "javax.management.MBeanServerDelegate",
-    "MBeanServerId" : "ip-10-85-118-178.ec2.internal_1358364400963",
-    "SpecificationName" : "Java Management Extensions",
-    "SpecificationVersion" : "1.4",
-    "SpecificationVendor" : "Sun Microsystems",
-    "ImplementationName" : "JMX",
-    "ImplementationVersion" : "1.6.0_31-b04",
-    "ImplementationVendor" : "Sun Microsystems"
-  } ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/resources/log4j.properties b/branch-1.2/ambari-server/src/test/resources/log4j.properties
deleted file mode 100644
index f6767d3..0000000
--- a/branch-1.2/ambari-server/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,21 +0,0 @@
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-
-# log4j configuration used during build and unit tests
-
-log4j.rootLogger=DEBUG,stdout
-log4j.threshhold=ALL
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} (%F:%M(%L)) - %m%n
-
-log4j.logger.org.apache.ambari=DEBUG
diff --git a/branch-1.2/ambari-server/src/test/resources/mapreduce_jobtracker_jmx.json b/branch-1.2/ambari-server/src/test/resources/mapreduce_jobtracker_jmx.json
deleted file mode 100644
index 6c265b5..0000000
--- a/branch-1.2/ambari-server/src/test/resources/mapreduce_jobtracker_jmx.json
+++ /dev/null
@@ -1,897 +0,0 @@
-{
-  "beans" : [ {
-    "name" : "java.lang:type=Memory",
-    "modelerType" : "sun.management.MemoryImpl",
-    "Verbose" : true,
-    "HeapMemoryUsage" : {
-      "committed" : 194183168,
-      "init" : 0,
-      "max" : 1052770304,
-      "used" : 43580400
-    },
-    "NonHeapMemoryUsage" : {
-      "committed" : 47218688,
-      "init" : 24313856,
-      "max" : 136314880,
-      "used" : 29602888
-    },
-    "ObjectPendingFinalizationCount" : 0
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=RpcActivityForPort50300",
-    "modelerType" : "RpcActivityForPort50300",
-    "tag.context" : "rpc",
-    "tag.port" : "50300",
-    "tag.hostName" : "ip-10-116-103-5.ec2.internal",
-    "rpcAuthenticationSuccesses" : 0,
-    "rpcAuthenticationFailures" : 0,
-    "rpcAuthorizationSuccesses" : 2,
-    "rpcAuthorizationFailures" : 0,
-    "ReceivedBytes" : 97304512,
-    "SentBytes" : 32241668,
-    "RpcQueueTime_num_ops" : 301267,
-    "RpcQueueTime_avg_time" : 0.05882352941176472,
-    "RpcProcessingTime_num_ops" : 301267,
-    "RpcProcessingTime_avg_time" : 0.058823529411764705,
-    "NumOpenConnections" : 0,
-    "callQueueLen" : 0
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=JobTrackerInfo",
-    "modelerType" : "org.apache.hadoop.mapred.JobTracker",
-    "Hostname" : "ip-10-116-103-5",
-    "Version" : "1.1.2.21, r",
-    "ConfigVersion" : "default",
-    "ThreadCount" : 75,
-    "SummaryJson" : "{\"nodes\":1,\"alive\":1,\"blacklisted\":0,\"graylisted\":0,\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"jobs\":1}",
-    "AliveNodesInfoJson" : "[{\"hostname\":\"ip-10-85-111-149.ec2.internal\",\"last_seen\":1358455632994,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":0,\"dir_failures\":0}]",
-    "BlacklistedNodesInfoJson" : "[]",
-    "GraylistedNodesInfoJson" : "[]",
-    "QueueInfoJson" : "{\"default\":{\"state\":\"running\",\"info\":\"Queue configuration\\nCapacity Percentage: 100.0%\\nUser Limit: 100%\\nPriority Supported: NO\\n-------------\\nMap tasks\\nCapacity: 4 slots\\nUsed capacity: 0 (0.0% of Capacity)\\nRunning tasks: 0\\n-------------\\nReduce tasks\\nCapacity: 2 slots\\nUsed capacity: 0 (0.0% of Capacity)\\nRunning tasks: 0\\n-------------\\nJob info\\nNumber of Waiting Jobs: 0\\nNumber of Initializing Jobs: 0\\nNumber of users who have submitted jobs: 0\\n\"}}"
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=MetricsSystem,sub=Control",
-    "modelerType" : "org.apache.hadoop.metrics2.impl.MetricsSystemImpl"
-  }, {
-    "name" : "java.lang:type=GarbageCollector,name=ConcurrentMarkSweep",
-    "modelerType" : "sun.management.GarbageCollectorImpl",
-    "LastGcInfo" : {
-      "GcThreadCount" : 11,
-      "duration" : 421,
-      "endTime" : 5276850,
-      "id" : 261,
-      "memoryUsageAfterGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 44007424,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 26295304
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 2752512,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 2664960
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 5439488,
-          "init" : 5439488,
-          "max" : 864026624,
-          "used" : 3141144
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 167772160,
-          "init" : 167772160,
-          "max" : 167772160,
-          "used" : 199320
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 20971520,
-          "init" : 20971520,
-          "max" : 20971520,
-          "used" : 299064
-        }
-      } ],
-      "memoryUsageBeforeGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 44007424,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 26295304
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 2752512,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 2664960
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 5439488,
-          "init" : 5439488,
-          "max" : 864026624,
-          "used" : 3140952
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 167772160,
-          "init" : 167772160,
-          "max" : 167772160,
-          "used" : 167494600
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 20971520,
-          "init" : 20971520,
-          "max" : 20971520,
-          "used" : 237168
-        }
-      } ],
-      "startTime" : 5276429
-    },
-    "CollectionCount" : 261,
-    "CollectionTime" : 128067,
-    "Name" : "ConcurrentMarkSweep",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Par Eden Space", "Par Survivor Space", "CMS Old Gen", "CMS Perm Gen" ]
-  }, {
-    "name" : "java.lang:type=Compilation",
-    "modelerType" : "sun.management.CompilationImpl",
-    "Name" : "HotSpot 64-Bit Tiered Compilers",
-    "CompilationTimeMonitoringSupported" : true,
-    "TotalCompilationTime" : 16556
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Par Eden Space",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Par Eden Space",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 167772160,
-      "init" : 167772160,
-      "max" : 167772160,
-      "used" : 0
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep", "ParNew" ],
-    "PeakUsage" : {
-      "committed" : 167772160,
-      "init" : 167772160,
-      "max" : 167772160,
-      "used" : 167772160
-    },
-    "Usage" : {
-      "committed" : 167772160,
-      "init" : 167772160,
-      "max" : 167772160,
-      "used" : 40150880
-    },
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdSupported" : false
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=RpcDetailedActivityForPort50300",
-    "modelerType" : "RpcDetailedActivityForPort50300",
-    "tag.context" : "rpcdetailed",
-    "tag.port" : "50300",
-    "tag.hostName" : "ip-10-116-103-5.ec2.internal",
-    "getProtocolVersion_num_ops" : 2,
-    "getProtocolVersion_avg_time" : 0.0,
-    "getBuildVersion_num_ops" : 1,
-    "getBuildVersion_avg_time" : 0.0,
-    "getVIVersion_num_ops" : 1,
-    "getVIVersion_avg_time" : 0.0,
-    "getSystemDir_num_ops" : 1,
-    "getSystemDir_avg_time" : 0.0,
-    "heartbeat_num_ops" : 301157,
-    "heartbeat_avg_time" : 0.06060606060606061,
-    "getStagingAreaDir_num_ops" : 1,
-    "getStagingAreaDir_avg_time" : 2.0,
-    "getNewJobId_num_ops" : 1,
-    "getNewJobId_avg_time" : 0.0,
-    "getQueueAdmins_num_ops" : 1,
-    "getQueueAdmins_avg_time" : 0.0,
-    "submitJob_num_ops" : 1,
-    "submitJob_avg_time" : 160.0,
-    "getJobProfile_num_ops" : 1,
-    "getJobProfile_avg_time" : 0.0,
-    "getJobStatus_num_ops" : 26,
-    "getJobStatus_avg_time" : 0.0,
-    "getTaskCompletionEvents_num_ops" : 56,
-    "getTaskCompletionEvents_avg_time" : 0.09090909090909093,
-    "getJobCounters_num_ops" : 1,
-    "getJobCounters_avg_time" : 0.0
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=CMS Perm Gen",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "CMS Perm Gen",
-    "Type" : "NON_HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 44007424,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 26295304
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep" ],
-    "PeakUsage" : {
-      "committed" : 44007424,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 26468808
-    },
-    "Usage" : {
-      "committed" : 44007424,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 26468808
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "java.lang:type=OperatingSystem",
-    "modelerType" : "com.sun.management.UnixOperatingSystem",
-    "MaxFileDescriptorCount" : 32768,
-    "OpenFileDescriptorCount" : 92,
-    "CommittedVirtualMemorySize" : 1644990464,
-    "FreePhysicalMemorySize" : 5975683072,
-    "FreeSwapSpaceSize" : 0,
-    "ProcessCpuTime" : 552680000000,
-    "TotalPhysicalMemorySize" : 7694454784,
-    "TotalSwapSpaceSize" : 0,
-    "Name" : "Linux",
-    "Version" : "2.6.32-220.17.1.el6.centos.plus.x86_64",
-    "AvailableProcessors" : 2,
-    "Arch" : "amd64",
-    "SystemLoadAverage" : 0.0
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default",
-    "modelerType" : "QueueMetrics,q=default",
-    "tag.context" : "mapred",
-    "tag.sessionId" : "",
-    "tag.Queue" : "default",
-    "tag.hostName" : "ip-10-116-103-5.ec2.internal",
-    "maps_launched" : 1,
-    "maps_completed" : 1,
-    "maps_failed" : 0,
-    "reduces_launched" : 1,
-    "reduces_completed" : 1,
-    "reduces_failed" : 0,
-    "jobs_submitted" : 1,
-    "jobs_completed" : 1,
-    "waiting_maps" : 0,
-    "waiting_reduces" : 0,
-    "reserved_map_slots" : 0,
-    "reserved_reduce_slots" : 0,
-    "jobs_failed" : 0,
-    "jobs_killed" : 0,
-    "jobs_preparing" : 0,
-    "jobs_running" : 0,
-    "maps_killed" : 0,
-    "reduces_killed" : 0,
-    "running_0" : 0,
-    "running_60" : 0,
-    "running_300" : 0,
-    "running_1440" : 0
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Par Survivor Space",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Par Survivor Space",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 20971520,
-      "init" : 20971520,
-      "max" : 20971520,
-      "used" : 263424
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep", "ParNew" ],
-    "PeakUsage" : {
-      "committed" : 20971520,
-      "init" : 20971520,
-      "max" : 20971520,
-      "used" : 4907280
-    },
-    "Usage" : {
-      "committed" : 20971520,
-      "init" : 20971520,
-      "max" : 20971520,
-      "used" : 263424
-    },
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdSupported" : false
-  }, {
-    "name" : "java.lang:type=MemoryManager,name=CodeCacheManager",
-    "modelerType" : "sun.management.MemoryManagerImpl",
-    "Name" : "CodeCacheManager",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Code Cache" ]
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=CMS Old Gen",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "CMS Old Gen",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 5439488,
-      "init" : 5439488,
-      "max" : 864026624,
-      "used" : 3141144
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep" ],
-    "PeakUsage" : {
-      "committed" : 5439488,
-      "init" : 5439488,
-      "max" : 864026624,
-      "used" : 3267800
-    },
-    "Usage" : {
-      "committed" : 5439488,
-      "init" : 5439488,
-      "max" : 864026624,
-      "used" : 3222992
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=JobTrackerMetrics",
-    "modelerType" : "JobTrackerMetrics",
-    "tag.context" : "mapred",
-    "tag.sessionId" : "",
-    "tag.hostName" : "ip-10-116-103-5.ec2.internal",
-    "map_slots" : 4,
-    "reduce_slots" : 2,
-    "blacklisted_maps" : 0,
-    "blacklisted_reduces" : 0,
-    "maps_launched" : 1,
-    "maps_completed" : 1,
-    "maps_failed" : 0,
-    "reduces_launched" : 1,
-    "reduces_completed" : 1,
-    "reduces_failed" : 0,
-    "jobs_submitted" : 2,
-    "jobs_completed" : 1,
-    "waiting_maps" : 0,
-    "waiting_reduces" : 0,
-    "reserved_map_slots" : 0,
-    "reserved_reduce_slots" : 0,
-    "occupied_map_slots" : 0,
-    "occupied_reduce_slots" : 0,
-    "jobs_failed" : 1,
-    "jobs_killed" : 0,
-    "jobs_preparing" : 0,
-    "jobs_running" : 0,
-    "running_maps" : 0,
-    "running_reduces" : 0,
-    "maps_killed" : 0,
-    "reduces_killed" : 0,
-    "trackers" : 1,
-    "trackers_blacklisted" : 0,
-    "trackers_graylisted" : 0,
-    "trackers_decommissioned" : 0,
-    "heartbeats" : 301157
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Code Cache",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Code Cache",
-    "Type" : "NON_HEAP",
-    "Valid" : true,
-    "CollectionUsage" : null,
-    "MemoryManagerNames" : [ "CodeCacheManager" ],
-    "PeakUsage" : {
-      "committed" : 3211264,
-      "init" : 2555904,
-      "max" : 50331648,
-      "used" : 3144512
-    },
-    "Usage" : {
-      "committed" : 3211264,
-      "init" : 2555904,
-      "max" : 50331648,
-      "used" : 3134080
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdSupported" : false,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "java.lang:type=Runtime",
-    "modelerType" : "sun.management.RuntimeImpl",
-    "Name" : "12857@ip-10-116-103-5.ec2.internal",
-    "ClassPath" : "/etc/hadoop/conf:/usr/jdk/jdk1.6.0_31/lib/tools.jar:/usr/lib/hadoop/libexec/..:/usr/lib/hadoop/libexec/../hadoop-core-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/ambari-log4j-1.0.jar:/usr/lib/hadoop/libexec/../lib/asm-3.2.jar:/usr/lib/hadoop/libexec/../lib/aspectjrt-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/aspectjtools-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-1.7.0.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-core-1.8.0.jar:/usr/lib/hadoop/libexec/../lib/commons-cli-1.2.jar:/usr/lib/hadoop/libexec/../lib/commons-codec-1.4.jar:/usr/lib/hadoop/libexec/../lib/commons-collections-3.2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-configuration-1.6.jar:/usr/lib/hadoop/libexec/../lib/commons-daemon-1.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-digester-1.8.jar:/usr/lib/hadoop/libexec/../lib/commons-el-1.0.jar:/usr/lib/hadoop/libexec/../lib/commons-httpclient-3.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-io-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-lang-2.4.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-1.1.1.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-api-1.0.4.jar:/usr/lib/hadoop/libexec/../lib/commons-math-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-net-3.1.jar:/usr/lib/hadoop/libexec/../lib/core-3.1.1.jar:/usr/lib/hadoop/libexec/../lib/hadoop-capacity-scheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-fairscheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-lzo-0.5.0.jar:/usr/lib/hadoop/libexec/../lib/hadoop-thriftfs-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-tools.jar:/usr/lib/hadoop/libexec/../lib/hsqldb-1.8.0.10.jar:/usr/lib/hadoop/libexec/../lib/jackson-core-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jackson-mapper-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jasper-compiler-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jasper-runtime-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jdeb-0.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-core-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-json-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-server-1.8.jar:/usr/lib/hadoop/libexec/../lib/jets3t-0.6.1.jar:/usr/lib/hadoop/libexec/../lib/jetty-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jetty-util-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jsch-0.1.42.jar:/usr/lib/hadoop/libexec/../lib/junit-4.5.jar:/usr/lib/hadoop/libexec/../lib/kfs-0.2.2.jar:/usr/lib/hadoop/libexec/../lib/log4j-1.2.15.jar:/usr/lib/hadoop/libexec/../lib/mockito-all-1.8.5.jar:/usr/lib/hadoop/libexec/../lib/oro-2.0.8.jar:/usr/lib/hadoop/libexec/../lib/postgresql-9.1-901-1.jdbc4.jar:/usr/lib/hadoop/libexec/../lib/servlet-api-2.5-20081211.jar:/usr/lib/hadoop/libexec/../lib/slf4j-api-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/slf4j-log4j12-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/xmlenc-0.52.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-2.1.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-api-2.1.jar",
-    "StartTime" : 1358364466756,
-    "BootClassPath" : "/usr/jdk/jdk1.6.0_31/jre/lib/resources.jar:/usr/jdk/jdk1.6.0_31/jre/lib/rt.jar:/usr/jdk/jdk1.6.0_31/jre/lib/sunrsasign.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jsse.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jce.jar:/usr/jdk/jdk1.6.0_31/jre/lib/charsets.jar:/usr/jdk/jdk1.6.0_31/jre/lib/modules/jdk.boot.jar:/usr/jdk/jdk1.6.0_31/jre/classes",
-    "LibraryPath" : "/usr/lib/hadoop/libexec/../lib/native/Linux-amd64-64",
-    "VmName" : "Java HotSpot(TM) 64-Bit Server VM",
-    "VmVendor" : "Sun Microsystems Inc.",
-    "VmVersion" : "20.6-b01",
-    "BootClassPathSupported" : true,
-    "InputArguments" : [ "-Dproc_jobtracker", "-Xmx1024m", "-Djava.net.preferIPv4Stack=true", "-Djava.net.preferIPv4Stack=true", "-Djava.net.preferIPv4Stack=true", "-XX:ParallelGCThreads=8", "-XX:+UseConcMarkSweepGC", "-XX:ErrorFile=/var/log/hadoop/mapred/hs_err_pid%p.log", "-XX:NewSize=200m", "-XX:MaxNewSize=200m", "-Xloggc:/var/log/hadoop/mapred/gc.log-201301161427", "-verbose:gc", "-XX:+PrintGCDetails", "-XX:+PrintGCTimeStamps", "-XX:+PrintGCDateStamps", "-Xmx1024m", "-Dhadoop.security.logger=INFO,DRFAS", "-Dmapred.audit.logger=INFO,MRAUDIT", "-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA", "-Dhadoop.log.dir=/var/log/hadoop/mapred", "-Dhadoop.log.file=hadoop-mapred-jobtracker-ip-10-116-103-5.log", "-Dhadoop.home.dir=/usr/lib/hadoop/libexec/..", "-Dhadoop.id.str=mapred", "-Dhadoop.root.logger=INFO,DRFA", "-Dhadoop.security.logger=INFO,DRFAS", "-Djava.library.path=/usr/lib/hadoop/libexec/../lib/native/Linux-amd64-64", "-Dhadoop.policy.file=hadoop-policy.xml" ],
-    "ManagementSpecVersion" : "1.2",
-    "SpecName" : "Java Virtual Machine Specification",
-    "SpecVendor" : "Sun Microsystems Inc.",
-    "SpecVersion" : "1.0",
-    "SystemProperties" : [ {
-      "key" : "java.ext.dirs",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/ext:/usr/java/packages/lib/ext"
-    }, {
-      "key" : "hadoop.home.dir",
-      "value" : "/usr/lib/hadoop/libexec/.."
-    }, {
-      "key" : "java.vm.specification.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "user.timezone",
-      "value" : "America/New_York"
-    }, {
-      "key" : "java.vm.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "hadoop.id.str",
-      "value" : "mapred"
-    }, {
-      "key" : "user.name",
-      "value" : "mapred"
-    }, {
-      "key" : "java.vm.specification.name",
-      "value" : "Java Virtual Machine Specification"
-    }, {
-      "key" : "user.dir",
-      "value" : "/usr/lib/hadoop"
-    }, {
-      "key" : "user.country",
-      "value" : "US"
-    }, {
-      "key" : "user.language",
-      "value" : "en"
-    }, {
-      "key" : "java.specification.version",
-      "value" : "1.6"
-    }, {
-      "key" : "hadoop.log.file",
-      "value" : "hadoop-mapred-jobtracker-ip-10-116-103-5.log"
-    }, {
-      "key" : "hadoop.policy.file",
-      "value" : "hadoop-policy.xml"
-    }, {
-      "key" : "sun.cpu.endian",
-      "value" : "little"
-    }, {
-      "key" : "java.home",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre"
-    }, {
-      "key" : "java.net.preferIPv4Stack",
-      "value" : "true"
-    }, {
-      "key" : "sun.jnu.encoding",
-      "value" : "ANSI_X3.4-1968"
-    }, {
-      "key" : "file.separator",
-      "value" : "/"
-    }, {
-      "key" : "java.vendor.url",
-      "value" : "http://java.sun.com/"
-    }, {
-      "key" : "hadoop.mapreduce.jobsummary.logger",
-      "value" : "INFO,JSA"
-    }, {
-      "key" : "java.awt.graphicsenv",
-      "value" : "sun.awt.X11GraphicsEnvironment"
-    }, {
-      "key" : "hadoop.log.dir",
-      "value" : "/var/log/hadoop/mapred"
-    }, {
-      "key" : "os.arch",
-      "value" : "amd64"
-    }, {
-      "key" : "proc_jobtracker",
-      "value" : ""
-    }, {
-      "key" : "java.io.tmpdir",
-      "value" : "/tmp"
-    }, {
-      "key" : "java.runtime.name",
-      "value" : "Java(TM) SE Runtime Environment"
-    }, {
-      "key" : "java.awt.printerjob",
-      "value" : "sun.print.PSPrinterJob"
-    }, {
-      "key" : "mapred.audit.logger",
-      "value" : "INFO,MRAUDIT"
-    }, {
-      "key" : "file.encoding",
-      "value" : "ANSI_X3.4-1968"
-    }, {
-      "key" : "java.version",
-      "value" : "1.6.0_31"
-    }, {
-      "key" : "java.vendor.url.bug",
-      "value" : "http://java.sun.com/cgi-bin/bugreport.cgi"
-    }, {
-      "key" : "java.vm.specification.version",
-      "value" : "1.0"
-    }, {
-      "key" : "file.encoding.pkg",
-      "value" : "sun.io"
-    }, {
-      "key" : "sun.java.command",
-      "value" : "org.apache.hadoop.mapred.JobTracker"
-    }, {
-      "key" : "sun.java.launcher",
-      "value" : "SUN_STANDARD"
-    }, {
-      "key" : "path.separator",
-      "value" : ":"
-    }, {
-      "key" : "java.runtime.version",
-      "value" : "1.6.0_31-b04"
-    }, {
-      "key" : "java.class.path",
-      "value" : "/etc/hadoop/conf:/usr/jdk/jdk1.6.0_31/lib/tools.jar:/usr/lib/hadoop/libexec/..:/usr/lib/hadoop/libexec/../hadoop-core-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/ambari-log4j-1.0.jar:/usr/lib/hadoop/libexec/../lib/asm-3.2.jar:/usr/lib/hadoop/libexec/../lib/aspectjrt-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/aspectjtools-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-1.7.0.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-core-1.8.0.jar:/usr/lib/hadoop/libexec/../lib/commons-cli-1.2.jar:/usr/lib/hadoop/libexec/../lib/commons-codec-1.4.jar:/usr/lib/hadoop/libexec/../lib/commons-collections-3.2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-configuration-1.6.jar:/usr/lib/hadoop/libexec/../lib/commons-daemon-1.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-digester-1.8.jar:/usr/lib/hadoop/libexec/../lib/commons-el-1.0.jar:/usr/lib/hadoop/libexec/../lib/commons-httpclient-3.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-io-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-lang-2.4.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-1.1.1.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-api-1.0.4.jar:/usr/lib/hadoop/libexec/../lib/commons-math-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-net-3.1.jar:/usr/lib/hadoop/libexec/../lib/core-3.1.1.jar:/usr/lib/hadoop/libexec/../lib/hadoop-capacity-scheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-fairscheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-lzo-0.5.0.jar:/usr/lib/hadoop/libexec/../lib/hadoop-thriftfs-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-tools.jar:/usr/lib/hadoop/libexec/../lib/hsqldb-1.8.0.10.jar:/usr/lib/hadoop/libexec/../lib/jackson-core-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jackson-mapper-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jasper-compiler-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jasper-runtime-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jdeb-0.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-core-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-json-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-server-1.8.jar:/usr/lib/hadoop/libexec/../lib/jets3t-0.6.1.jar:/usr/lib/hadoop/libexec/../lib/jetty-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jetty-util-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jsch-0.1.42.jar:/usr/lib/hadoop/libexec/../lib/junit-4.5.jar:/usr/lib/hadoop/libexec/../lib/kfs-0.2.2.jar:/usr/lib/hadoop/libexec/../lib/log4j-1.2.15.jar:/usr/lib/hadoop/libexec/../lib/mockito-all-1.8.5.jar:/usr/lib/hadoop/libexec/../lib/oro-2.0.8.jar:/usr/lib/hadoop/libexec/../lib/postgresql-9.1-901-1.jdbc4.jar:/usr/lib/hadoop/libexec/../lib/servlet-api-2.5-20081211.jar:/usr/lib/hadoop/libexec/../lib/slf4j-api-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/slf4j-log4j12-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/xmlenc-0.52.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-2.1.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-api-2.1.jar"
-    }, {
-      "key" : "os.name",
-      "value" : "Linux"
-    }, {
-      "key" : "hadoop.security.logger",
-      "value" : "INFO,DRFAS"
-    }, {
-      "key" : "line.separator",
-      "value" : "\n"
-    }, {
-      "key" : "os.version",
-      "value" : "2.6.32-220.17.1.el6.centos.plus.x86_64"
-    }, {
-      "key" : "sun.arch.data.model",
-      "value" : "64"
-    }, {
-      "key" : "java.class.version",
-      "value" : "50.0"
-    }, {
-      "key" : "sun.io.unicode.encoding",
-      "value" : "UnicodeLittle"
-    }, {
-      "key" : "java.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "sun.boot.class.path",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/resources.jar:/usr/jdk/jdk1.6.0_31/jre/lib/rt.jar:/usr/jdk/jdk1.6.0_31/jre/lib/sunrsasign.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jsse.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jce.jar:/usr/jdk/jdk1.6.0_31/jre/lib/charsets.jar:/usr/jdk/jdk1.6.0_31/jre/lib/modules/jdk.boot.jar:/usr/jdk/jdk1.6.0_31/jre/classes"
-    }, {
-      "key" : "java.vm.info",
-      "value" : "mixed mode"
-    }, {
-      "key" : "java.specification.name",
-      "value" : "Java Platform API Specification"
-    }, {
-      "key" : "java.vm.name",
-      "value" : "Java HotSpot(TM) 64-Bit Server VM"
-    }, {
-      "key" : "java.vm.version",
-      "value" : "20.6-b01"
-    }, {
-      "key" : "sun.boot.library.path",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/amd64"
-    }, {
-      "key" : "hadoop.root.logger",
-      "value" : "INFO,DRFA"
-    }, {
-      "key" : "java.endorsed.dirs",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/endorsed"
-    }, {
-      "key" : "sun.os.patch.level",
-      "value" : "unknown"
-    }, {
-      "key" : "sun.cpu.isalist",
-      "value" : ""
-    }, {
-      "key" : "user.home",
-      "value" : "/usr/lib/hadoop"
-    }, {
-      "key" : "java.library.path",
-      "value" : "/usr/lib/hadoop/libexec/../lib/native/Linux-amd64-64"
-    }, {
-      "key" : "java.specification.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "sun.management.compiler",
-      "value" : "HotSpot 64-Bit Tiered Compilers"
-    } ],
-    "Uptime" : 91166419
-  }, {
-    "name" : "java.lang:type=ClassLoading",
-    "modelerType" : "sun.management.ClassLoadingImpl",
-    "LoadedClassCount" : 3365,
-    "UnloadedClassCount" : 0,
-    "TotalLoadedClassCount" : 3365,
-    "Verbose" : false
-  }, {
-    "name" : "java.lang:type=Threading",
-    "modelerType" : "sun.management.ThreadImpl",
-    "ThreadAllocatedMemoryEnabled" : true,
-    "ThreadAllocatedMemorySupported" : true,
-    "ThreadCount" : 75,
-    "DaemonThreadCount" : 66,
-    "PeakThreadCount" : 80,
-    "CurrentThreadCpuTimeSupported" : true,
-    "ObjectMonitorUsageSupported" : true,
-    "SynchronizerUsageSupported" : true,
-    "ThreadContentionMonitoringEnabled" : false,
-    "ThreadContentionMonitoringSupported" : true,
-    "ThreadCpuTimeEnabled" : true,
-    "AllThreadIds" : [ 206, 96, 95, 88, 87, 15, 85, 84, 82, 83, 76, 75, 74, 73, 72, 71, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 18, 20, 26, 24, 23, 19, 17, 14, 13, 12, 10, 5, 3, 2, 1 ],
-    "CurrentThreadCpuTime" : 0,
-    "CurrentThreadUserTime" : 0,
-    "TotalStartedThreadCount" : 197,
-    "ThreadCpuTimeSupported" : true
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=jvm",
-    "modelerType" : "jvm",
-    "tag.context" : "jvm",
-    "tag.processName" : "JobTracker",
-    "tag.sessionId" : "",
-    "tag.hostName" : "ip-10-116-103-5.ec2.internal",
-    "memNonHeapUsedM" : 28.231514,
-    "memNonHeapCommittedM" : 45.03125,
-    "memHeapUsedM" : 41.50329,
-    "memHeapCommittedM" : 185.1875,
-    "gcCount" : 293,
-    "gcTimeMillis" : 128469,
-    "threadsNew" : 0,
-    "threadsRunnable" : 6,
-    "threadsBlocked" : 0,
-    "threadsWaiting" : 59,
-    "threadsTimedWaiting" : 10,
-    "threadsTerminated" : 0,
-    "logFatal" : 0,
-    "logError" : 0,
-    "logWarn" : 0,
-    "logInfo" : 3
-  }, {
-    "name" : "java.util.logging:type=Logging",
-    "modelerType" : "java.util.logging.Logging",
-    "LoggerNames" : [ "sun.awt.AppContext", "javax.management", "global", "javax.management.mbeanserver", "" ]
-  }, {
-    "name" : "java.lang:type=GarbageCollector,name=ParNew",
-    "modelerType" : "sun.management.GarbageCollectorImpl",
-    "LastGcInfo" : {
-      "GcThreadCount" : 11,
-      "duration" : 4,
-      "endTime" : 90371302,
-      "id" : 32,
-      "memoryUsageAfterGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 44007424,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 26468808
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 3211264,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 3132544
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 5439488,
-          "init" : 5439488,
-          "max" : 864026624,
-          "used" : 3222992
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 167772160,
-          "init" : 167772160,
-          "max" : 167772160,
-          "used" : 0
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 20971520,
-          "init" : 20971520,
-          "max" : 20971520,
-          "used" : 263424
-        }
-      } ],
-      "memoryUsageBeforeGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 44007424,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 26468808
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 3211264,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 3132544
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 5439488,
-          "init" : 5439488,
-          "max" : 864026624,
-          "used" : 3214808
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 167772160,
-          "init" : 167772160,
-          "max" : 167772160,
-          "used" : 167772160
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 20971520,
-          "init" : 20971520,
-          "max" : 20971520,
-          "used" : 268496
-        }
-      } ],
-      "startTime" : 90371298
-    },
-    "CollectionCount" : 32,
-    "CollectionTime" : 402,
-    "Name" : "ParNew",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Par Eden Space", "Par Survivor Space" ]
-  }, {
-    "name" : "com.sun.management:type=HotSpotDiagnostic",
-    "modelerType" : "sun.management.HotSpotDiagnostic",
-    "DiagnosticOptions" : [ {
-      "name" : "HeapDumpBeforeFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpAfterFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpOnOutOfMemoryError",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpPath",
-      "origin" : "DEFAULT",
-      "value" : "",
-      "writeable" : true
-    }, {
-      "name" : "CMSAbortablePrecleanWaitMillis",
-      "origin" : "DEFAULT",
-      "value" : "100",
-      "writeable" : true
-    }, {
-      "name" : "CMSWaitDuration",
-      "origin" : "DEFAULT",
-      "value" : "2000",
-      "writeable" : true
-    }, {
-      "name" : "PrintGC",
-      "origin" : "VM_CREATION",
-      "value" : "true",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCDetails",
-      "origin" : "VM_CREATION",
-      "value" : "true",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCDateStamps",
-      "origin" : "VM_CREATION",
-      "value" : "true",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCTimeStamps",
-      "origin" : "VM_CREATION",
-      "value" : "true",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogramBeforeFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogramAfterFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogram",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintConcurrentLocks",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    } ]
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=ugi",
-    "modelerType" : "ugi",
-    "tag.context" : "ugi",
-    "tag.hostName" : "ip-10-116-103-5.ec2.internal",
-    "loginSuccess_num_ops" : 0,
-    "loginSuccess_avg_time" : 0.0,
-    "loginFailure_num_ops" : 0,
-    "loginFailure_avg_time" : 0.0
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=MetricsSystem,sub=Stats",
-    "modelerType" : "MetricsSystem,sub=Stats",
-    "tag.context" : "metricssystem",
-    "num_sources" : 6,
-    "num_sinks" : 1,
-    "sink.ganglia.latency_num_ops" : 9115,
-    "sink.ganglia.latency_avg_time" : 1.0,
-    "sink.ganglia.dropped" : 0,
-    "sink.ganglia.qsize" : 0,
-    "snapshot_num_ops" : 63811,
-    "snapshot_avg_time" : 0.14285714285714285,
-    "snapshot_stdev_time" : 0.37796447300922725,
-    "snapshot_imin_time" : 0.0,
-    "snapshot_imax_time" : 1.0,
-    "snapshot_min_time" : 0.0,
-    "snapshot_max_time" : 4.0,
-    "publish_num_ops" : 9115,
-    "publish_avg_time" : 0.0,
-    "publish_stdev_time" : 0.0,
-    "publish_imin_time" : 0.0,
-    "publish_imax_time" : 1.401298464324817E-45,
-    "publish_min_time" : 0.0,
-    "publish_max_time" : 335.0,
-    "dropped_pub_all" : 0
-  }, {
-    "name" : "JMImplementation:type=MBeanServerDelegate",
-    "modelerType" : "javax.management.MBeanServerDelegate",
-    "MBeanServerId" : "ip-10-116-103-5.ec2.internal_1358364467998",
-    "SpecificationName" : "Java Management Extensions",
-    "SpecificationVersion" : "1.4",
-    "SpecificationVendor" : "Sun Microsystems",
-    "ImplementationName" : "JMX",
-    "ImplementationVersion" : "1.6.0_31-b04",
-    "ImplementationVendor" : "Sun Microsystems"
-  } ]
-}
diff --git a/branch-1.2/ambari-server/src/test/resources/mapreduce_tasktracker_jmx.json b/branch-1.2/ambari-server/src/test/resources/mapreduce_tasktracker_jmx.json
deleted file mode 100644
index 9bd6e63..0000000
--- a/branch-1.2/ambari-server/src/test/resources/mapreduce_tasktracker_jmx.json
+++ /dev/null
@@ -1,842 +0,0 @@
-{
-  "beans" : [ {
-    "name" : "java.lang:type=MemoryPool,name=PS Eden Space",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "PS Eden Space",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 3735552,
-      "init" : 30081024,
-      "max" : 357629952,
-      "used" : 0
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "PS MarkSweep", "PS Scavenge" ],
-    "PeakUsage" : {
-      "committed" : 30081024,
-      "init" : 30081024,
-      "max" : 357695488,
-      "used" : 30081024
-    },
-    "Usage" : {
-      "committed" : 3735552,
-      "init" : 30081024,
-      "max" : 357629952,
-      "used" : 3459696
-    },
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdSupported" : false
-  }, {
-    "name" : "java.lang:type=Memory",
-    "modelerType" : "sun.management.MemoryImpl",
-    "Verbose" : false,
-    "HeapMemoryUsage" : {
-      "committed" : 40173568,
-      "init" : 120225856,
-      "max" : 954466304,
-      "used" : 18330984
-    },
-    "NonHeapMemoryUsage" : {
-      "committed" : 46202880,
-      "init" : 24313856,
-      "max" : 136314880,
-      "used" : 24235104
-    },
-    "ObjectPendingFinalizationCount" : 0
-  }, {
-    "name" : "Hadoop:service=TaskTracker,name=MetricsSystem,sub=Control",
-    "modelerType" : "org.apache.hadoop.metrics2.impl.MetricsSystemImpl"
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=PS Survivor Space",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "PS Survivor Space",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 131072,
-      "init" : 4980736,
-      "max" : 131072,
-      "used" : 98304
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "PS MarkSweep", "PS Scavenge" ],
-    "PeakUsage" : {
-      "committed" : 4980736,
-      "init" : 4980736,
-      "max" : 4980736,
-      "used" : 2867816
-    },
-    "Usage" : {
-      "committed" : 131072,
-      "init" : 4980736,
-      "max" : 131072,
-      "used" : 98304
-    },
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdSupported" : false
-  }, {
-    "name" : "java.lang:type=GarbageCollector,name=PS MarkSweep",
-    "modelerType" : "sun.management.GarbageCollectorImpl",
-    "LastGcInfo" : {
-      "GcThreadCount" : 2,
-      "duration" : 166,
-      "endTime" : 86208313,
-      "id" : 1,
-      "memoryUsageAfterGc" : [ {
-        "key" : "PS Survivor Space",
-        "value" : {
-          "committed" : 131072,
-          "init" : 4980736,
-          "max" : 131072,
-          "used" : 0
-        }
-      }, {
-        "key" : "PS Old Gen",
-        "value" : {
-          "committed" : 36306944,
-          "init" : 80216064,
-          "max" : 715849728,
-          "used" : 7479576
-        }
-      }, {
-        "key" : "PS Perm Gen",
-        "value" : {
-          "committed" : 43450368,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 21567872
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 2752512,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 2635904
-        }
-      }, {
-        "key" : "PS Eden Space",
-        "value" : {
-          "committed" : 3735552,
-          "init" : 30081024,
-          "max" : 357629952,
-          "used" : 0
-        }
-      } ],
-      "memoryUsageBeforeGc" : [ {
-        "key" : "PS Survivor Space",
-        "value" : {
-          "committed" : 131072,
-          "init" : 4980736,
-          "max" : 131072,
-          "used" : 98304
-        }
-      }, {
-        "key" : "PS Old Gen",
-        "value" : {
-          "committed" : 80216064,
-          "init" : 80216064,
-          "max" : 715849728,
-          "used" : 80204800
-        }
-      }, {
-        "key" : "PS Perm Gen",
-        "value" : {
-          "committed" : 21757952,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 21567872
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 2752512,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 2635904
-        }
-      }, {
-        "key" : "PS Eden Space",
-        "value" : {
-          "committed" : 3735552,
-          "init" : 30081024,
-          "max" : 357629952,
-          "used" : 0
-        }
-      } ],
-      "startTime" : 86208147
-    },
-    "CollectionCount" : 1,
-    "CollectionTime" : 165,
-    "Name" : "PS MarkSweep",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "PS Eden Space", "PS Survivor Space", "PS Old Gen", "PS Perm Gen" ]
-  }, {
-    "name" : "java.lang:type=Compilation",
-    "modelerType" : "sun.management.CompilationImpl",
-    "Name" : "HotSpot 64-Bit Tiered Compilers",
-    "CompilationTimeMonitoringSupported" : true,
-    "TotalCompilationTime" : 12183
-  }, {
-    "name" : "Hadoop:service=TaskTracker,name=TaskTrackerInfo",
-    "modelerType" : "org.apache.hadoop.mapred.TaskTracker",
-    "Hostname" : "ip-10-85-111-149.ec2.internal",
-    "Version" : "1.1.2.21, r",
-    "ConfigVersion" : "default",
-    "JobTrackerUrl" : "ip-10-116-103-5.ec2.internal:50300",
-    "RpcPort" : 39494,
-    "HttpPort" : 50060,
-    "Healthy" : true,
-    "TasksInfoJson" : "{\"running\":0,\"failed\":0,\"commit_pending\":0}"
-  }, {
-    "name" : "Hadoop:service=TaskTracker,name=RpcDetailedActivityForPort39494",
-    "modelerType" : "RpcDetailedActivityForPort39494",
-    "tag.context" : "rpcdetailed",
-    "tag.port" : "39494",
-    "tag.hostName" : "ip-10-85-111-149.ec2.internal",
-    "getProtocolVersion_num_ops" : 4,
-    "getProtocolVersion_avg_time" : 0.0,
-    "getTask_num_ops" : 4,
-    "getTask_avg_time" : 1.0,
-    "statusUpdate_num_ops" : 8,
-    "statusUpdate_avg_time" : 1.0,
-    "done_num_ops" : 4,
-    "done_avg_time" : 0.0,
-    "getMapCompletionEvents_num_ops" : 6,
-    "getMapCompletionEvents_avg_time" : 1.0,
-    "ping_num_ops" : 1,
-    "ping_avg_time" : 0.0,
-    "commitPending_num_ops" : 1,
-    "commitPending_avg_time" : 1.0,
-    "canCommit_num_ops" : 2,
-    "canCommit_avg_time" : 0.0
-  }, {
-    "name" : "java.lang:type=OperatingSystem",
-    "modelerType" : "com.sun.management.UnixOperatingSystem",
-    "MaxFileDescriptorCount" : 32768,
-    "OpenFileDescriptorCount" : 87,
-    "CommittedVirtualMemorySize" : 1542619136,
-    "FreePhysicalMemorySize" : 5949931520,
-    "FreeSwapSpaceSize" : 0,
-    "ProcessCpuTime" : 443210000000,
-    "TotalPhysicalMemorySize" : 7694454784,
-    "TotalSwapSpaceSize" : 0,
-    "Name" : "Linux",
-    "Version" : "2.6.32-220.17.1.el6.centos.plus.x86_64",
-    "AvailableProcessors" : 2,
-    "Arch" : "amd64",
-    "SystemLoadAverage" : 0.0
-  }, {
-    "name" : "Hadoop:service=TaskTracker,name=ShuffleServerMetrics",
-    "modelerType" : "ShuffleServerMetrics",
-    "tag.context" : "mapred",
-    "tag.sessionId" : "",
-    "tag.hostName" : "ip-10-85-111-149.ec2.internal",
-    "shuffle_handler_busy_percent" : 0.0,
-    "shuffle_output_bytes" : 1841,
-    "shuffle_failed_outputs" : 0,
-    "shuffle_success_outputs" : 1,
-    "shuffle_exceptions_caught" : 0
-  }, {
-    "name" : "java.lang:type=MemoryManager,name=CodeCacheManager",
-    "modelerType" : "sun.management.MemoryManagerImpl",
-    "Name" : "CodeCacheManager",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Code Cache" ]
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Code Cache",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Code Cache",
-    "Type" : "NON_HEAP",
-    "Valid" : true,
-    "CollectionUsage" : null,
-    "MemoryManagerNames" : [ "CodeCacheManager" ],
-    "PeakUsage" : {
-      "committed" : 2752512,
-      "init" : 2555904,
-      "max" : 50331648,
-      "used" : 2673472
-    },
-    "Usage" : {
-      "committed" : 2752512,
-      "init" : 2555904,
-      "max" : 50331648,
-      "used" : 2666880
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdSupported" : false,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "java.lang:type=Runtime",
-    "modelerType" : "sun.management.RuntimeImpl",
-    "Name" : "14318@ip-10-85-111-149.ec2.internal",
-    "ClassPath" : "/etc/hadoop/conf:/usr/jdk/jdk1.6.0_31/lib/tools.jar:/usr/lib/hadoop/libexec/..:/usr/lib/hadoop/libexec/../hadoop-core-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/ambari-log4j-1.0.jar:/usr/lib/hadoop/libexec/../lib/asm-3.2.jar:/usr/lib/hadoop/libexec/../lib/aspectjrt-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/aspectjtools-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-1.7.0.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-core-1.8.0.jar:/usr/lib/hadoop/libexec/../lib/commons-cli-1.2.jar:/usr/lib/hadoop/libexec/../lib/commons-codec-1.4.jar:/usr/lib/hadoop/libexec/../lib/commons-collections-3.2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-configuration-1.6.jar:/usr/lib/hadoop/libexec/../lib/commons-daemon-1.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-digester-1.8.jar:/usr/lib/hadoop/libexec/../lib/commons-el-1.0.jar:/usr/lib/hadoop/libexec/../lib/commons-httpclient-3.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-io-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-lang-2.4.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-1.1.1.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-api-1.0.4.jar:/usr/lib/hadoop/libexec/../lib/commons-math-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-net-3.1.jar:/usr/lib/hadoop/libexec/../lib/core-3.1.1.jar:/usr/lib/hadoop/libexec/../lib/hadoop-capacity-scheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-fairscheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-lzo-0.5.0.jar:/usr/lib/hadoop/libexec/../lib/hadoop-thriftfs-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-tools.jar:/usr/lib/hadoop/libexec/../lib/hsqldb-1.8.0.10.jar:/usr/lib/hadoop/libexec/../lib/jackson-core-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jackson-mapper-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jasper-compiler-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jasper-runtime-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jdeb-0.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-core-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-json-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-server-1.8.jar:/usr/lib/hadoop/libexec/../lib/jets3t-0.6.1.jar:/usr/lib/hadoop/libexec/../lib/jetty-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jetty-util-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jsch-0.1.42.jar:/usr/lib/hadoop/libexec/../lib/junit-4.5.jar:/usr/lib/hadoop/libexec/../lib/kfs-0.2.2.jar:/usr/lib/hadoop/libexec/../lib/log4j-1.2.15.jar:/usr/lib/hadoop/libexec/../lib/mockito-all-1.8.5.jar:/usr/lib/hadoop/libexec/../lib/oro-2.0.8.jar:/usr/lib/hadoop/libexec/../lib/postgresql-9.1-901-1.jdbc4.jar:/usr/lib/hadoop/libexec/../lib/servlet-api-2.5-20081211.jar:/usr/lib/hadoop/libexec/../lib/slf4j-api-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/slf4j-log4j12-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/xmlenc-0.52.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-2.1.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-api-2.1.jar",
-    "StartTime" : 1358364468454,
-    "BootClassPath" : "/usr/jdk/jdk1.6.0_31/jre/lib/resources.jar:/usr/jdk/jdk1.6.0_31/jre/lib/rt.jar:/usr/jdk/jdk1.6.0_31/jre/lib/sunrsasign.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jsse.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jce.jar:/usr/jdk/jdk1.6.0_31/jre/lib/charsets.jar:/usr/jdk/jdk1.6.0_31/jre/lib/modules/jdk.boot.jar:/usr/jdk/jdk1.6.0_31/jre/classes",
-    "LibraryPath" : "/usr/lib/hadoop/libexec/../lib/native/Linux-amd64-64",
-    "VmName" : "Java HotSpot(TM) 64-Bit Server VM",
-    "VmVendor" : "Sun Microsystems Inc.",
-    "VmVersion" : "20.6-b01",
-    "BootClassPathSupported" : true,
-    "InputArguments" : [ "-Dproc_tasktracker", "-Xmx1024m", "-Djava.net.preferIPv4Stack=true", "-Djava.net.preferIPv4Stack=true", "-Djava.net.preferIPv4Stack=true", "-Xmx1024m", "-Dhadoop.security.logger=ERROR,console", "-Dmapred.audit.logger=ERROR,console", "-Dhadoop.log.dir=/var/log/hadoop/mapred", "-Dhadoop.log.file=hadoop-mapred-tasktracker-ip-10-85-111-149.log", "-Dhadoop.home.dir=/usr/lib/hadoop/libexec/..", "-Dhadoop.id.str=mapred", "-Dhadoop.root.logger=INFO,DRFA", "-Dhadoop.security.logger=INFO,NullAppender", "-Djava.library.path=/usr/lib/hadoop/libexec/../lib/native/Linux-amd64-64", "-Dhadoop.policy.file=hadoop-policy.xml" ],
-    "ManagementSpecVersion" : "1.2",
-    "SpecName" : "Java Virtual Machine Specification",
-    "SpecVendor" : "Sun Microsystems Inc.",
-    "SpecVersion" : "1.0",
-    "SystemProperties" : [ {
-      "key" : "java.ext.dirs",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/ext:/usr/java/packages/lib/ext"
-    }, {
-      "key" : "hadoop.home.dir",
-      "value" : "/usr/lib/hadoop/libexec/.."
-    }, {
-      "key" : "java.vm.specification.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "user.timezone",
-      "value" : "America/New_York"
-    }, {
-      "key" : "java.vm.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "hadoop.id.str",
-      "value" : "mapred"
-    }, {
-      "key" : "user.name",
-      "value" : "mapred"
-    }, {
-      "key" : "java.vm.specification.name",
-      "value" : "Java Virtual Machine Specification"
-    }, {
-      "key" : "user.dir",
-      "value" : "/usr/lib/hadoop"
-    }, {
-      "key" : "user.country",
-      "value" : "US"
-    }, {
-      "key" : "user.language",
-      "value" : "en"
-    }, {
-      "key" : "java.specification.version",
-      "value" : "1.6"
-    }, {
-      "key" : "hadoop.log.file",
-      "value" : "hadoop-mapred-tasktracker-ip-10-85-111-149.log"
-    }, {
-      "key" : "hadoop.policy.file",
-      "value" : "hadoop-policy.xml"
-    }, {
-      "key" : "sun.cpu.endian",
-      "value" : "little"
-    }, {
-      "key" : "java.home",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre"
-    }, {
-      "key" : "java.net.preferIPv4Stack",
-      "value" : "true"
-    }, {
-      "key" : "sun.jnu.encoding",
-      "value" : "ANSI_X3.4-1968"
-    }, {
-      "key" : "file.separator",
-      "value" : "/"
-    }, {
-      "key" : "java.vendor.url",
-      "value" : "http://java.sun.com/"
-    }, {
-      "key" : "java.awt.graphicsenv",
-      "value" : "sun.awt.X11GraphicsEnvironment"
-    }, {
-      "key" : "hadoop.log.dir",
-      "value" : "/var/log/hadoop/mapred"
-    }, {
-      "key" : "os.arch",
-      "value" : "amd64"
-    }, {
-      "key" : "proc_tasktracker",
-      "value" : ""
-    }, {
-      "key" : "java.io.tmpdir",
-      "value" : "/tmp"
-    }, {
-      "key" : "java.runtime.name",
-      "value" : "Java(TM) SE Runtime Environment"
-    }, {
-      "key" : "java.awt.printerjob",
-      "value" : "sun.print.PSPrinterJob"
-    }, {
-      "key" : "mapred.audit.logger",
-      "value" : "ERROR,console"
-    }, {
-      "key" : "file.encoding",
-      "value" : "ANSI_X3.4-1968"
-    }, {
-      "key" : "java.version",
-      "value" : "1.6.0_31"
-    }, {
-      "key" : "java.vendor.url.bug",
-      "value" : "http://java.sun.com/cgi-bin/bugreport.cgi"
-    }, {
-      "key" : "java.vm.specification.version",
-      "value" : "1.0"
-    }, {
-      "key" : "file.encoding.pkg",
-      "value" : "sun.io"
-    }, {
-      "key" : "sun.java.command",
-      "value" : "org.apache.hadoop.mapred.TaskTracker"
-    }, {
-      "key" : "sun.java.launcher",
-      "value" : "SUN_STANDARD"
-    }, {
-      "key" : "path.separator",
-      "value" : ":"
-    }, {
-      "key" : "java.runtime.version",
-      "value" : "1.6.0_31-b04"
-    }, {
-      "key" : "java.class.path",
-      "value" : "/etc/hadoop/conf:/usr/jdk/jdk1.6.0_31/lib/tools.jar:/usr/lib/hadoop/libexec/..:/usr/lib/hadoop/libexec/../hadoop-core-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/ambari-log4j-1.0.jar:/usr/lib/hadoop/libexec/../lib/asm-3.2.jar:/usr/lib/hadoop/libexec/../lib/aspectjrt-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/aspectjtools-1.6.11.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-1.7.0.jar:/usr/lib/hadoop/libexec/../lib/commons-beanutils-core-1.8.0.jar:/usr/lib/hadoop/libexec/../lib/commons-cli-1.2.jar:/usr/lib/hadoop/libexec/../lib/commons-codec-1.4.jar:/usr/lib/hadoop/libexec/../lib/commons-collections-3.2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-configuration-1.6.jar:/usr/lib/hadoop/libexec/../lib/commons-daemon-1.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-digester-1.8.jar:/usr/lib/hadoop/libexec/../lib/commons-el-1.0.jar:/usr/lib/hadoop/libexec/../lib/commons-httpclient-3.0.1.jar:/usr/lib/hadoop/libexec/../lib/commons-io-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-lang-2.4.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-1.1.1.jar:/usr/lib/hadoop/libexec/../lib/commons-logging-api-1.0.4.jar:/usr/lib/hadoop/libexec/../lib/commons-math-2.1.jar:/usr/lib/hadoop/libexec/../lib/commons-net-3.1.jar:/usr/lib/hadoop/libexec/../lib/core-3.1.1.jar:/usr/lib/hadoop/libexec/../lib/hadoop-capacity-scheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-fairscheduler-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-lzo-0.5.0.jar:/usr/lib/hadoop/libexec/../lib/hadoop-thriftfs-1.1.2.21.jar:/usr/lib/hadoop/libexec/../lib/hadoop-tools.jar:/usr/lib/hadoop/libexec/../lib/hsqldb-1.8.0.10.jar:/usr/lib/hadoop/libexec/../lib/jackson-core-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jackson-mapper-asl-1.8.8.jar:/usr/lib/hadoop/libexec/../lib/jasper-compiler-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jasper-runtime-5.5.12.jar:/usr/lib/hadoop/libexec/../lib/jdeb-0.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-core-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-json-1.8.jar:/usr/lib/hadoop/libexec/../lib/jersey-server-1.8.jar:/usr/lib/hadoop/libexec/../lib/jets3t-0.6.1.jar:/usr/lib/hadoop/libexec/../lib/jetty-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jetty-util-6.1.26.jar:/usr/lib/hadoop/libexec/../lib/jsch-0.1.42.jar:/usr/lib/hadoop/libexec/../lib/junit-4.5.jar:/usr/lib/hadoop/libexec/../lib/kfs-0.2.2.jar:/usr/lib/hadoop/libexec/../lib/log4j-1.2.15.jar:/usr/lib/hadoop/libexec/../lib/mockito-all-1.8.5.jar:/usr/lib/hadoop/libexec/../lib/oro-2.0.8.jar:/usr/lib/hadoop/libexec/../lib/postgresql-9.1-901-1.jdbc4.jar:/usr/lib/hadoop/libexec/../lib/servlet-api-2.5-20081211.jar:/usr/lib/hadoop/libexec/../lib/slf4j-api-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/slf4j-log4j12-1.4.3.jar:/usr/lib/hadoop/libexec/../lib/xmlenc-0.52.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-2.1.jar:/usr/lib/hadoop/libexec/../lib/jsp-2.1/jsp-api-2.1.jar"
-    }, {
-      "key" : "os.name",
-      "value" : "Linux"
-    }, {
-      "key" : "hadoop.security.logger",
-      "value" : "INFO,NullAppender"
-    }, {
-      "key" : "line.separator",
-      "value" : "\n"
-    }, {
-      "key" : "os.version",
-      "value" : "2.6.32-220.17.1.el6.centos.plus.x86_64"
-    }, {
-      "key" : "sun.arch.data.model",
-      "value" : "64"
-    }, {
-      "key" : "java.class.version",
-      "value" : "50.0"
-    }, {
-      "key" : "sun.io.unicode.encoding",
-      "value" : "UnicodeLittle"
-    }, {
-      "key" : "java.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "sun.boot.class.path",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/resources.jar:/usr/jdk/jdk1.6.0_31/jre/lib/rt.jar:/usr/jdk/jdk1.6.0_31/jre/lib/sunrsasign.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jsse.jar:/usr/jdk/jdk1.6.0_31/jre/lib/jce.jar:/usr/jdk/jdk1.6.0_31/jre/lib/charsets.jar:/usr/jdk/jdk1.6.0_31/jre/lib/modules/jdk.boot.jar:/usr/jdk/jdk1.6.0_31/jre/classes"
-    }, {
-      "key" : "java.vm.info",
-      "value" : "mixed mode"
-    }, {
-      "key" : "java.specification.name",
-      "value" : "Java Platform API Specification"
-    }, {
-      "key" : "java.vm.name",
-      "value" : "Java HotSpot(TM) 64-Bit Server VM"
-    }, {
-      "key" : "java.vm.version",
-      "value" : "20.6-b01"
-    }, {
-      "key" : "sun.boot.library.path",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/amd64"
-    }, {
-      "key" : "hadoop.root.logger",
-      "value" : "INFO,DRFA"
-    }, {
-      "key" : "java.endorsed.dirs",
-      "value" : "/usr/jdk/jdk1.6.0_31/jre/lib/endorsed"
-    }, {
-      "key" : "sun.os.patch.level",
-      "value" : "unknown"
-    }, {
-      "key" : "sun.cpu.isalist",
-      "value" : ""
-    }, {
-      "key" : "user.home",
-      "value" : "/usr/lib/hadoop"
-    }, {
-      "key" : "java.library.path",
-      "value" : "/usr/lib/hadoop/libexec/../lib/native/Linux-amd64-64"
-    }, {
-      "key" : "java.specification.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "sun.management.compiler",
-      "value" : "HotSpot 64-Bit Tiered Compilers"
-    } ],
-    "Uptime" : 93078193
-  }, {
-    "name" : "java.lang:type=ClassLoading",
-    "modelerType" : "sun.management.ClassLoadingImpl",
-    "LoadedClassCount" : 2760,
-    "UnloadedClassCount" : 1,
-    "TotalLoadedClassCount" : 2761,
-    "Verbose" : false
-  }, {
-    "name" : "java.lang:type=Threading",
-    "modelerType" : "sun.management.ThreadImpl",
-    "ThreadAllocatedMemoryEnabled" : true,
-    "ThreadAllocatedMemorySupported" : true,
-    "ThreadContentionMonitoringEnabled" : false,
-    "AllThreadIds" : [ 839, 82, 12, 15, 16, 42, 41, 40, 39, 38, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 18, 20, 19, 17, 11, 10, 4, 3, 2, 1 ],
-    "DaemonThreadCount" : 26,
-    "PeakThreadCount" : 39,
-    "CurrentThreadCpuTimeSupported" : true,
-    "ObjectMonitorUsageSupported" : true,
-    "SynchronizerUsageSupported" : true,
-    "ThreadContentionMonitoringSupported" : true,
-    "ThreadCpuTimeEnabled" : true,
-    "CurrentThreadCpuTime" : 0,
-    "CurrentThreadUserTime" : 0,
-    "ThreadCount" : 31,
-    "TotalStartedThreadCount" : 832,
-    "ThreadCpuTimeSupported" : true
-  }, {
-    "name" : "java.util.logging:type=Logging",
-    "modelerType" : "java.util.logging.Logging",
-    "LoggerNames" : [ "sun.awt.AppContext", "javax.management", "global", "javax.management.mbeanserver", "" ]
-  }, {
-    "name" : "Hadoop:service=TaskTracker,name=TaskTrackerMetrics",
-    "modelerType" : "TaskTrackerMetrics",
-    "tag.context" : "mapred",
-    "tag.sessionId" : "",
-    "tag.hostName" : "ip-10-85-111-149.ec2.internal",
-    "maps_running" : 1,
-    "reduces_running" : 1,
-    "mapTaskSlots" : 4,
-    "reduceTaskSlots" : 2,
-    "failedDirs" : 1,
-    "tasks_completed" : 4,
-    "tasks_failed_timeout" : 1,
-    "tasks_failed_ping" : 1
-  }, {
-    "name" : "Hadoop:service=TaskTracker,name=MetricsSystem,sub=Stats",
-    "modelerType" : "MetricsSystem,sub=Stats",
-    "tag.context" : "metricssystem",
-    "num_sources" : 6,
-    "num_sinks" : 1,
-    "sink.ganglia.latency_num_ops" : 9306,
-    "sink.ganglia.latency_avg_time" : 2.0,
-    "sink.ganglia.dropped" : 0,
-    "sink.ganglia.qsize" : 0,
-    "snapshot_num_ops" : 65148,
-    "snapshot_avg_time" : 0.0,
-    "snapshot_stdev_time" : 0.0,
-    "snapshot_imin_time" : 0.0,
-    "snapshot_imax_time" : 1.401298464324817E-45,
-    "snapshot_min_time" : 0.0,
-    "snapshot_max_time" : 3.0,
-    "publish_num_ops" : 9306,
-    "publish_avg_time" : 0.0,
-    "publish_stdev_time" : 0.0,
-    "publish_imin_time" : 0.0,
-    "publish_imax_time" : 1.401298464324817E-45,
-    "publish_min_time" : 0.0,
-    "publish_max_time" : 11.0,
-    "dropped_pub_all" : 0
-  }, {
-    "name" : "Hadoop:service=TaskTracker,name=RpcActivityForPort39494",
-    "modelerType" : "RpcActivityForPort39494",
-    "tag.context" : "rpc",
-    "tag.port" : "39494",
-    "tag.hostName" : "ip-10-85-111-149.ec2.internal",
-    "rpcAuthenticationSuccesses" : 0,
-    "rpcAuthenticationFailures" : 0,
-    "rpcAuthorizationSuccesses" : 4,
-    "rpcAuthorizationFailures" : 0,
-    "ReceivedBytes" : 14123,
-    "SentBytes" : 3612,
-    "RpcQueueTime_num_ops" : 30,
-    "RpcQueueTime_avg_time" : 0.14285714285714285,
-    "RpcProcessingTime_num_ops" : 30,
-    "RpcProcessingTime_avg_time" : 0.6428571428571428,
-    "NumOpenConnections" : 0,
-    "callQueueLen" : 0
-  }, {
-    "name" : "Hadoop:service=TaskTracker,name=jvm",
-    "modelerType" : "jvm",
-    "tag.context" : "jvm",
-    "tag.processName" : "TaskTracker",
-    "tag.sessionId" : "",
-    "tag.hostName" : "ip-10-85-111-149.ec2.internal",
-    "memNonHeapUsedM" : 23.112396,
-    "memNonHeapCommittedM" : 44.0625,
-    "memHeapUsedM" : 17.613655,
-    "memHeapCommittedM" : 38.3125,
-    "gcCount" : 10439,
-    "gcTimeMillis" : 13882,
-    "threadsNew" : 0,
-    "threadsRunnable" : 6,
-    "threadsBlocked" : 0,
-    "threadsWaiting" : 17,
-    "threadsTimedWaiting" : 8,
-    "threadsTerminated" : 0,
-    "logFatal" : 0,
-    "logError" : 0,
-    "logWarn" : 0,
-    "logInfo" : 3
-  }, {
-    "name" : "com.sun.management:type=HotSpotDiagnostic",
-    "modelerType" : "sun.management.HotSpotDiagnostic",
-    "DiagnosticOptions" : [ {
-      "name" : "HeapDumpBeforeFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpAfterFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpOnOutOfMemoryError",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpPath",
-      "origin" : "DEFAULT",
-      "value" : "",
-      "writeable" : true
-    }, {
-      "name" : "CMSAbortablePrecleanWaitMillis",
-      "origin" : "DEFAULT",
-      "value" : "100",
-      "writeable" : true
-    }, {
-      "name" : "CMSWaitDuration",
-      "origin" : "DEFAULT",
-      "value" : "2000",
-      "writeable" : true
-    }, {
-      "name" : "PrintGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCDetails",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCDateStamps",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCTimeStamps",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogramBeforeFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogramAfterFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogram",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintConcurrentLocks",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    } ]
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=PS Perm Gen",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "PS Perm Gen",
-    "Type" : "NON_HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 43450368,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 21567872
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "PS MarkSweep" ],
-    "PeakUsage" : {
-      "committed" : 43450368,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 21568224
-    },
-    "Usage" : {
-      "committed" : 43450368,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 21568224
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "java.lang:type=GarbageCollector,name=PS Scavenge",
-    "modelerType" : "sun.management.GarbageCollectorImpl",
-    "LastGcInfo" : {
-      "GcThreadCount" : 2,
-      "duration" : 1,
-      "endTime" : 93078197,
-      "id" : 10440,
-      "memoryUsageAfterGc" : [ {
-        "key" : "PS Survivor Space",
-        "value" : {
-          "committed" : 131072,
-          "init" : 4980736,
-          "max" : 131072,
-          "used" : 122896
-        }
-      }, {
-        "key" : "PS Old Gen",
-        "value" : {
-          "committed" : 36306944,
-          "init" : 80216064,
-          "max" : 715849728,
-          "used" : 14823816
-        }
-      }, {
-        "key" : "PS Perm Gen",
-        "value" : {
-          "committed" : 43450368,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 21568224
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 2752512,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 2666880
-        }
-      }, {
-        "key" : "PS Eden Space",
-        "value" : {
-          "committed" : 3670016,
-          "init" : 30081024,
-          "max" : 357564416,
-          "used" : 0
-        }
-      } ],
-      "memoryUsageBeforeGc" : [ {
-        "key" : "PS Survivor Space",
-        "value" : {
-          "committed" : 131072,
-          "init" : 4980736,
-          "max" : 131072,
-          "used" : 98304
-        }
-      }, {
-        "key" : "PS Old Gen",
-        "value" : {
-          "committed" : 36306944,
-          "init" : 80216064,
-          "max" : 715849728,
-          "used" : 14758280
-        }
-      }, {
-        "key" : "PS Perm Gen",
-        "value" : {
-          "committed" : 43450368,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 21568224
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 2752512,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 2666880
-        }
-      }, {
-        "key" : "PS Eden Space",
-        "value" : {
-          "committed" : 3735552,
-          "init" : 30081024,
-          "max" : 357629952,
-          "used" : 3735552
-        }
-      } ],
-      "startTime" : 93078196
-    },
-    "CollectionCount" : 10440,
-    "CollectionTime" : 13718,
-    "Name" : "PS Scavenge",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "PS Eden Space", "PS Survivor Space" ]
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=PS Old Gen",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "PS Old Gen",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 36306944,
-      "init" : 80216064,
-      "max" : 715849728,
-      "used" : 7479576
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "PS MarkSweep" ],
-    "PeakUsage" : {
-      "committed" : 80216064,
-      "init" : 80216064,
-      "max" : 715849728,
-      "used" : 80204800
-    },
-    "Usage" : {
-      "committed" : 36306944,
-      "init" : 80216064,
-      "max" : 715849728,
-      "used" : 14823816
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "Hadoop:service=TaskTracker,name=ugi",
-    "modelerType" : "ugi",
-    "tag.context" : "ugi",
-    "tag.hostName" : "ip-10-85-111-149.ec2.internal",
-    "loginSuccess_num_ops" : 0,
-    "loginSuccess_avg_time" : 0.0,
-    "loginFailure_num_ops" : 0,
-    "loginFailure_avg_time" : 0.0
-  }, {
-    "name" : "JMImplementation:type=MBeanServerDelegate",
-    "modelerType" : "javax.management.MBeanServerDelegate",
-    "MBeanServerId" : "ip-10-85-111-149.ec2.internal_1358364469165",
-    "SpecificationName" : "Java Management Extensions",
-    "SpecificationVersion" : "1.4",
-    "SpecificationVendor" : "Sun Microsystems",
-    "ImplementationName" : "JMX",
-    "ImplementationVersion" : "1.6.0_31-b04",
-    "ImplementationVendor" : "Sun Microsystems"
-  } ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/repos/repoinfo.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/repos/repoinfo.xml
deleted file mode 100644
index 9ea91b8..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/repos/repoinfo.xml
+++ /dev/null
@@ -1,57 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6, redhat6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
-      <repoid>HDP-1.1.1.16</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>epel</repoid>
-      <reponame>epel</reponame>
-      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
-      <repoid>HDP-1.1.1.16</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>epel</repoid>
-      <reponame>epel</reponame>
-      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-</reposinfo>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/HDFS/configuration/hbase-site.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/HDFS/configuration/hbase-site.xml
deleted file mode 100644
index 5024e85..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/HDFS/configuration/hbase-site.xml
+++ /dev/null
@@ -1,137 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.regionserver.msginterval</name>
-    <value>1000</value>
-    <description>Interval between messages from the RegionServer to HMaster
-    in milliseconds.  Default is 15. Set this value low if you want unit
-    tests to be responsive.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.pause</name>
-    <value>5000</value>
-    <description>General client pause value.  Used mostly as value to wait
-    before running a retry of a failed get, region lookup, etc.</description>
-  </property>
-  <property>
-    <name>hbase.master.meta.thread.rescanfrequency</name>
-    <value>10000</value>
-    <description>How long the HMaster sleeps (in milliseconds) between scans of
-    the root and meta tables.
-    </description>
-  </property>
-  <property>
-    <name>hbase.server.thread.wakefrequency</name>
-    <value>1000</value>
-    <description>Time to sleep in between searches for work (in milliseconds).
-    Used as sleep interval by service threads such as META scanner and log roller.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>5</value>
-    <description>Count of RPC Server instances spun up on RegionServers
-    Same property is used by the HMaster for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.period</name>
-    <value>6000</value>
-    <description>Length of time the master will wait before timing out a region
-    server lease. Since region servers report in every second (see above), this
-    value has been reduced so that the master will notice a dead region server
-    sooner. The default is 30 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value>-1</value>
-    <description>The port for the hbase master web UI
-    Set to -1 if you do not want the info server to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>-1</value>
-    <description>The port for the hbase regionserver web UI
-    Set to -1 if you do not want the info server to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port.auto</name>
-    <value>true</value>
-    <description>Info server auto port bind. Enables automatic port
-    search if hbase.regionserver.info.port is already in use.
-    Enabled for testing to run multiple tests on one machine.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.thread.wakefrequency</name>
-    <value>3000</value>
-    <description>The interval between checks for expired region server leases.
-    This value has been reduced due to the other reduced values above so that
-    the master will notice a dead region server sooner. The default is 15 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.optionalcacheflushinterval</name>
-    <value>10000</value>
-    <description>
-    Amount of time to wait since the last time a region was flushed before
-    invoking an optional cache flush. Default 60,000.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.safemode</name>
-    <value>false</value>
-    <description>
-    Turn on/off safe mode in region server. Always on for production, always off
-    for tests.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>67108864</value>
-    <description>
-    Maximum desired file size for an HRegion.  If filesize exceeds
-    value + (value / 2), the HRegion is split in two.  Default: 256M.
-
-    Keep the maximum filesize small so we split more often in tests.
-    </description>
-  </property>
-  <property>
-    <name>hadoop.log.dir</name>
-    <value>${user.dir}/../logs</value>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>21818</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/HDFS/configuration/hdfs-site.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index 240068b..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,403 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>false</value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value>hbase</value>
-    <description>the user who is allowed to perform short
-    circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value>/mnt/hmc/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value>/etc/hadoop/conf/dfs.include</value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value>hdp1.cybervisiontech.com.ua:50070</value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value>1073741824</value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>1024</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>077</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value>nn/_HOST@</value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value>nn/_HOST@</value>
-    <description>
-        Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value>host/_HOST@</value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value>host/_HOST@</value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value>hdp2.cybervisiontech.com.ua:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value>HTTP/_HOST@</value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value>/nn.service.keytab</value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value>dn/_HOST@</value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value>/nn.service.keytab</value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value>/nn.service.keytab</value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value>/dn.service.keytab</value>
- <description>
-        The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
- <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value>hdp1.cybervisiontech.com.ua:50470</value>
-  <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-<description>The permissions that should be there on dfs.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-  <name>dfs.access.time.precision</name>
-  <value>0</value>
-  <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
-
-<property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
-</property>
-
-<property>
-  <name>ipc.server.read.threadpool.size</name>
-  <value>5</value>
-  <description></description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/HDFS/metainfo.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/HDFS/metainfo.xml
deleted file mode 100644
index 3c42b9b..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,54 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HDFS service</comment>
-    <version>1.0</version>
-
-
-    <components>
-        <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>DATANODE1</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>DATANODE2</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hbase-site.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hbase-site.xml
deleted file mode 100644
index 5024e85..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,137 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.regionserver.msginterval</name>
-    <value>1000</value>
-    <description>Interval between messages from the RegionServer to HMaster
-    in milliseconds.  Default is 15. Set this value low if you want unit
-    tests to be responsive.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.pause</name>
-    <value>5000</value>
-    <description>General client pause value.  Used mostly as value to wait
-    before running a retry of a failed get, region lookup, etc.</description>
-  </property>
-  <property>
-    <name>hbase.master.meta.thread.rescanfrequency</name>
-    <value>10000</value>
-    <description>How long the HMaster sleeps (in milliseconds) between scans of
-    the root and meta tables.
-    </description>
-  </property>
-  <property>
-    <name>hbase.server.thread.wakefrequency</name>
-    <value>1000</value>
-    <description>Time to sleep in between searches for work (in milliseconds).
-    Used as sleep interval by service threads such as META scanner and log roller.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>5</value>
-    <description>Count of RPC Server instances spun up on RegionServers
-    Same property is used by the HMaster for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.period</name>
-    <value>6000</value>
-    <description>Length of time the master will wait before timing out a region
-    server lease. Since region servers report in every second (see above), this
-    value has been reduced so that the master will notice a dead region server
-    sooner. The default is 30 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value>-1</value>
-    <description>The port for the hbase master web UI
-    Set to -1 if you do not want the info server to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>-1</value>
-    <description>The port for the hbase regionserver web UI
-    Set to -1 if you do not want the info server to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port.auto</name>
-    <value>true</value>
-    <description>Info server auto port bind. Enables automatic port
-    search if hbase.regionserver.info.port is already in use.
-    Enabled for testing to run multiple tests on one machine.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.thread.wakefrequency</name>
-    <value>3000</value>
-    <description>The interval between checks for expired region server leases.
-    This value has been reduced due to the other reduced values above so that
-    the master will notice a dead region server sooner. The default is 15 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.optionalcacheflushinterval</name>
-    <value>10000</value>
-    <description>
-    Amount of time to wait since the last time a region was flushed before
-    invoking an optional cache flush. Default 60,000.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.safemode</name>
-    <value>false</value>
-    <description>
-    Turn on/off safe mode in region server. Always on for production, always off
-    for tests.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>67108864</value>
-    <description>
-    Maximum desired file size for an HRegion.  If filesize exceeds
-    value + (value / 2), the HRegion is split in two.  Default: 256M.
-
-    Keep the maximum filesize small so we split more often in tests.
-    </description>
-  </property>
-  <property>
-    <name>hadoop.log.dir</name>
-    <value>${user.dir}/../logs</value>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>21818</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hdfs-site.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hdfs-site.xml
deleted file mode 100644
index 240068b..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,403 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>false</value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value>hbase</value>
-    <description>the user who is allowed to perform short
-    circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value>/mnt/hmc/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value>/etc/hadoop/conf/dfs.include</value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value>hdp1.cybervisiontech.com.ua:50070</value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value>1073741824</value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>1024</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>077</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value>nn/_HOST@</value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value>nn/_HOST@</value>
-    <description>
-        Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value>host/_HOST@</value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value>host/_HOST@</value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value>hdp2.cybervisiontech.com.ua:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value>HTTP/_HOST@</value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value>/nn.service.keytab</value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value>dn/_HOST@</value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value>/nn.service.keytab</value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value>/nn.service.keytab</value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value>/dn.service.keytab</value>
- <description>
-        The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
- <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value>hdp1.cybervisiontech.com.ua:50470</value>
-  <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-<description>The permissions that should be there on dfs.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-  <name>dfs.access.time.precision</name>
-  <value>0</value>
-  <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
-
-<property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
-</property>
-
-<property>
-  <name>ipc.server.read.threadpool.size</name>
-  <value>5</value>
-  <description></description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
deleted file mode 100644
index 1f3e1a1..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
+++ /dev/null
@@ -1,407 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>false</value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value>hbase</value>
-    <description>the user who is allowed to perform short
-    circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value>/mnt/hmc/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value>/etc/hadoop/conf/dfs.include</value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-    <description>Address where the datanode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-    <description>HTTP address for the datanode</description>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value>hdp1.cybervisiontech.com.ua:50070</value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value>1073741824</value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>1024</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>077</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-<description>The max response size for IPC</description>
-</property>
-
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value>nn/_HOST@</value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value>nn/_HOST@</value>
-    <description>
-        Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value>host/_HOST@</value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value>host/_HOST@</value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value>hdp2.cybervisiontech.com.ua:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value>HTTP/_HOST@</value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value>/nn.service.keytab</value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value>dn/_HOST@</value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value>/nn.service.keytab</value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value>/nn.service.keytab</value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value>/dn.service.keytab</value>
- <description>
-        The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
- <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value>hdp1.cybervisiontech.com.ua:50470</value>
-  <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-<description>The permissions that should be there on dfs.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-  <name>dfs.access.time.precision</name>
-  <value>0</value>
-  <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
-
-<property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
-</property>
-
-<property>
-  <name>ipc.server.read.threadpool.size</name>
-  <value>5</value>
-  <description>IPC thread size</description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/metainfo.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/metainfo.xml
deleted file mode 100644
index 2114480..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/metainfo.xml
+++ /dev/null
@@ -1,34 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<metainfo>
-  <user>root</user>
-  <comment>This is comment for Mapred service</comment>
-  <version>1.0</version>
-  <components>
-    <component>
-      <name>JOBTRACKER</name>
-      <category>MASTER</category>
-    </component>
-    <component>
-      <name>TASKTRACKER</name>
-      <category>SLAVE</category>
-    </component>
-    <component>
-      <name>MAPREDUCE_CLIENT</name>
-      <category>CLIENT</category>
-    </component>
-  </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/PIG/configuration/pig.properties b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/PIG/metainfo.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/PIG/metainfo.xml
deleted file mode 100644
index c89afa7..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.1/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for PIG service</comment>
-    <version>1.0</version>
-
-    <components>
-        <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/repos/repoinfo.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/repos/repoinfo.xml
deleted file mode 100644
index e5bd698..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/repos/repoinfo.xml
+++ /dev/null
@@ -1,57 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
-      <repoid>HDP-1.1.1.16</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>epel</repoid>
-      <reponame>epel</reponame>
-      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
-      <repoid>HDP-1.1.1.16</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-    <repo>
-      <baseurl></baseurl>
-      <repoid>epel</repoid>
-      <reponame>epel</reponame>
-      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
-    </repo>
-  </os>
-</reposinfo>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HBASE/configuration/hbase-site.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index 5024e85..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,137 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.regionserver.msginterval</name>
-    <value>1000</value>
-    <description>Interval between messages from the RegionServer to HMaster
-    in milliseconds.  Default is 15. Set this value low if you want unit
-    tests to be responsive.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.pause</name>
-    <value>5000</value>
-    <description>General client pause value.  Used mostly as value to wait
-    before running a retry of a failed get, region lookup, etc.</description>
-  </property>
-  <property>
-    <name>hbase.master.meta.thread.rescanfrequency</name>
-    <value>10000</value>
-    <description>How long the HMaster sleeps (in milliseconds) between scans of
-    the root and meta tables.
-    </description>
-  </property>
-  <property>
-    <name>hbase.server.thread.wakefrequency</name>
-    <value>1000</value>
-    <description>Time to sleep in between searches for work (in milliseconds).
-    Used as sleep interval by service threads such as META scanner and log roller.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>5</value>
-    <description>Count of RPC Server instances spun up on RegionServers
-    Same property is used by the HMaster for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.period</name>
-    <value>6000</value>
-    <description>Length of time the master will wait before timing out a region
-    server lease. Since region servers report in every second (see above), this
-    value has been reduced so that the master will notice a dead region server
-    sooner. The default is 30 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value>-1</value>
-    <description>The port for the hbase master web UI
-    Set to -1 if you do not want the info server to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>-1</value>
-    <description>The port for the hbase regionserver web UI
-    Set to -1 if you do not want the info server to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port.auto</name>
-    <value>true</value>
-    <description>Info server auto port bind. Enables automatic port
-    search if hbase.regionserver.info.port is already in use.
-    Enabled for testing to run multiple tests on one machine.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.thread.wakefrequency</name>
-    <value>3000</value>
-    <description>The interval between checks for expired region server leases.
-    This value has been reduced due to the other reduced values above so that
-    the master will notice a dead region server sooner. The default is 15 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.optionalcacheflushinterval</name>
-    <value>10000</value>
-    <description>
-    Amount of time to wait since the last time a region was flushed before
-    invoking an optional cache flush. Default 60,000.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.safemode</name>
-    <value>false</value>
-    <description>
-    Turn on/off safe mode in region server. Always on for production, always off
-    for tests.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>67108864</value>
-    <description>
-    Maximum desired file size for an HRegion.  If filesize exceeds
-    value + (value / 2), the HRegion is split in two.  Default: 256M.
-
-    Keep the maximum filesize small so we split more often in tests.
-    </description>
-  </property>
-  <property>
-    <name>hadoop.log.dir</name>
-    <value>${user.dir}/../logs</value>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>21818</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HBASE/metainfo.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HBASE/metainfo.xml
deleted file mode 100644
index d584d7f..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>This is comment for HBASE service</comment>
-    <version>1.0</version>
-
-
-    <components>
-        <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hbase-site.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hbase-site.xml
deleted file mode 100644
index 5024e85..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hbase-site.xml
+++ /dev/null
@@ -1,137 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.regionserver.msginterval</name>
-    <value>1000</value>
-    <description>Interval between messages from the RegionServer to HMaster
-    in milliseconds.  Default is 15. Set this value low if you want unit
-    tests to be responsive.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.pause</name>
-    <value>5000</value>
-    <description>General client pause value.  Used mostly as value to wait
-    before running a retry of a failed get, region lookup, etc.</description>
-  </property>
-  <property>
-    <name>hbase.master.meta.thread.rescanfrequency</name>
-    <value>10000</value>
-    <description>How long the HMaster sleeps (in milliseconds) between scans of
-    the root and meta tables.
-    </description>
-  </property>
-  <property>
-    <name>hbase.server.thread.wakefrequency</name>
-    <value>1000</value>
-    <description>Time to sleep in between searches for work (in milliseconds).
-    Used as sleep interval by service threads such as META scanner and log roller.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>5</value>
-    <description>Count of RPC Server instances spun up on RegionServers
-    Same property is used by the HMaster for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.period</name>
-    <value>6000</value>
-    <description>Length of time the master will wait before timing out a region
-    server lease. Since region servers report in every second (see above), this
-    value has been reduced so that the master will notice a dead region server
-    sooner. The default is 30 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value>-1</value>
-    <description>The port for the hbase master web UI
-    Set to -1 if you do not want the info server to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>-1</value>
-    <description>The port for the hbase regionserver web UI
-    Set to -1 if you do not want the info server to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port.auto</name>
-    <value>true</value>
-    <description>Info server auto port bind. Enables automatic port
-    search if hbase.regionserver.info.port is already in use.
-    Enabled for testing to run multiple tests on one machine.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.thread.wakefrequency</name>
-    <value>3000</value>
-    <description>The interval between checks for expired region server leases.
-    This value has been reduced due to the other reduced values above so that
-    the master will notice a dead region server sooner. The default is 15 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.optionalcacheflushinterval</name>
-    <value>10000</value>
-    <description>
-    Amount of time to wait since the last time a region was flushed before
-    invoking an optional cache flush. Default 60,000.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.safemode</name>
-    <value>false</value>
-    <description>
-    Turn on/off safe mode in region server. Always on for production, always off
-    for tests.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>67108864</value>
-    <description>
-    Maximum desired file size for an HRegion.  If filesize exceeds
-    value + (value / 2), the HRegion is split in two.  Default: 256M.
-
-    Keep the maximum filesize small so we split more often in tests.
-    </description>
-  </property>
-  <property>
-    <name>hadoop.log.dir</name>
-    <value>${user.dir}/../logs</value>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>21818</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hdfs-site.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index 240068b..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,403 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>false</value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value>hbase</value>
-    <description>the user who is allowed to perform short
-    circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value>/mnt/hmc/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value>/etc/hadoop/conf/dfs.include</value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value>hdp1.cybervisiontech.com.ua:50070</value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value>1073741824</value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>1024</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>077</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value>nn/_HOST@</value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value>nn/_HOST@</value>
-    <description>
-        Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value>host/_HOST@</value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value>host/_HOST@</value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value>hdp2.cybervisiontech.com.ua:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value>HTTP/_HOST@</value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value>/nn.service.keytab</value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value>dn/_HOST@</value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value>/nn.service.keytab</value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value>/nn.service.keytab</value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value>/dn.service.keytab</value>
- <description>
-        The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
- <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value>hdp1.cybervisiontech.com.ua:50470</value>
-  <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-<description>The permissions that should be there on dfs.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-  <name>dfs.access.time.precision</name>
-  <value>0</value>
-  <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
-
-<property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
-</property>
-
-<property>
-  <name>ipc.server.read.threadpool.size</name>
-  <value>5</value>
-  <description></description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/metainfo.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/metainfo.xml
deleted file mode 100644
index 622f0fd..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HDFS service</comment>
-    <version>1.0</version>
-
-
-    <components>
-        <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HIVE/metainfo.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HIVE/metainfo.xml
deleted file mode 100644
index 22a86e3..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HIVE service</comment>
-    <version>1.0</version>
-
-
-    <components>
-        <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hbase-site.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hbase-site.xml
deleted file mode 100644
index 5024e85..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,137 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.regionserver.msginterval</name>
-    <value>1000</value>
-    <description>Interval between messages from the RegionServer to HMaster
-    in milliseconds.  Default is 15. Set this value low if you want unit
-    tests to be responsive.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.pause</name>
-    <value>5000</value>
-    <description>General client pause value.  Used mostly as value to wait
-    before running a retry of a failed get, region lookup, etc.</description>
-  </property>
-  <property>
-    <name>hbase.master.meta.thread.rescanfrequency</name>
-    <value>10000</value>
-    <description>How long the HMaster sleeps (in milliseconds) between scans of
-    the root and meta tables.
-    </description>
-  </property>
-  <property>
-    <name>hbase.server.thread.wakefrequency</name>
-    <value>1000</value>
-    <description>Time to sleep in between searches for work (in milliseconds).
-    Used as sleep interval by service threads such as META scanner and log roller.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>5</value>
-    <description>Count of RPC Server instances spun up on RegionServers
-    Same property is used by the HMaster for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.period</name>
-    <value>6000</value>
-    <description>Length of time the master will wait before timing out a region
-    server lease. Since region servers report in every second (see above), this
-    value has been reduced so that the master will notice a dead region server
-    sooner. The default is 30 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value>-1</value>
-    <description>The port for the hbase master web UI
-    Set to -1 if you do not want the info server to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>-1</value>
-    <description>The port for the hbase regionserver web UI
-    Set to -1 if you do not want the info server to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port.auto</name>
-    <value>true</value>
-    <description>Info server auto port bind. Enables automatic port
-    search if hbase.regionserver.info.port is already in use.
-    Enabled for testing to run multiple tests on one machine.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.thread.wakefrequency</name>
-    <value>3000</value>
-    <description>The interval between checks for expired region server leases.
-    This value has been reduced due to the other reduced values above so that
-    the master will notice a dead region server sooner. The default is 15 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.optionalcacheflushinterval</name>
-    <value>10000</value>
-    <description>
-    Amount of time to wait since the last time a region was flushed before
-    invoking an optional cache flush. Default 60,000.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.safemode</name>
-    <value>false</value>
-    <description>
-    Turn on/off safe mode in region server. Always on for production, always off
-    for tests.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>67108864</value>
-    <description>
-    Maximum desired file size for an HRegion.  If filesize exceeds
-    value + (value / 2), the HRegion is split in two.  Default: 256M.
-
-    Keep the maximum filesize small so we split more often in tests.
-    </description>
-  </property>
-  <property>
-    <name>hadoop.log.dir</name>
-    <value>${user.dir}/../logs</value>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>21818</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-</configuration>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hdfs-site.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hdfs-site.xml
deleted file mode 100644
index 240068b..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,403 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>false</value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value>hbase</value>
-    <description>the user who is allowed to perform short
-    circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value>/mnt/hmc/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value>/etc/hadoop/conf/dfs.include</value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value>hdp1.cybervisiontech.com.ua:50070</value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value>1073741824</value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>1024</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>077</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value>nn/_HOST@</value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value>nn/_HOST@</value>
-    <description>
-        Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value>host/_HOST@</value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value>host/_HOST@</value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value>hdp2.cybervisiontech.com.ua:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value>HTTP/_HOST@</value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value>/nn.service.keytab</value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value>dn/_HOST@</value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value>/nn.service.keytab</value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value>/nn.service.keytab</value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value>/dn.service.keytab</value>
- <description>
-        The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
- <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value>hdp1.cybervisiontech.com.ua:50470</value>
-  <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-<description>The permissions that should be there on dfs.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-  <name>dfs.access.time.precision</name>
-  <value>0</value>
-  <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
-
-<property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
-</property>
-
-<property>
-  <name>ipc.server.read.threadpool.size</name>
-  <value>5</value>
-  <description></description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/mapred-site.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/mapred-site.xml
deleted file mode 100644
index 1f3e1a1..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/mapred-site.xml
+++ /dev/null
@@ -1,407 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>false</value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value>hbase</value>
-    <description>the user who is allowed to perform short
-    circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value>/mnt/hmc/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value>/etc/hadoop/conf/dfs.include</value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-    <description>Address where the datanode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-    <description>HTTP address for the datanode</description>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value>hdp1.cybervisiontech.com.ua:50070</value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value>1073741824</value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>1024</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>077</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-<description>The max response size for IPC</description>
-</property>
-
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value>nn/_HOST@</value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value>nn/_HOST@</value>
-    <description>
-        Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value>host/_HOST@</value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value>host/_HOST@</value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value>hdp2.cybervisiontech.com.ua:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value>HTTP/_HOST@</value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value>/nn.service.keytab</value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value>dn/_HOST@</value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value>/nn.service.keytab</value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value>/nn.service.keytab</value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value>/dn.service.keytab</value>
- <description>
-        The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
- <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value>hdp1.cybervisiontech.com.ua:50470</value>
-  <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-<description>The permissions that should be there on dfs.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-  <name>dfs.access.time.precision</name>
-  <value>0</value>
-  <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
-
-<property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
-</property>
-
-<property>
-  <name>ipc.server.read.threadpool.size</name>
-  <value>5</value>
-  <description>IPC thread size</description>
-</property>
-
-</configuration>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/metainfo.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/metainfo.xml
deleted file mode 100644
index 2114480..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/metainfo.xml
+++ /dev/null
@@ -1,34 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<metainfo>
-  <user>root</user>
-  <comment>This is comment for Mapred service</comment>
-  <version>1.0</version>
-  <components>
-    <component>
-      <name>JOBTRACKER</name>
-      <category>MASTER</category>
-    </component>
-    <component>
-      <name>TASKTRACKER</name>
-      <category>SLAVE</category>
-    </component>
-    <component>
-      <name>MAPREDUCE_CLIENT</name>
-      <category>CLIENT</category>
-    </component>
-  </components>
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/ZOOKEEPER/metainfo.xml b/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index 7115199..0000000
--- a/branch-1.2/ambari-server/src/test/resources/stacks/HDP/0.2/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,37 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for ZOOKEEPER service</comment>
-    <version>1.0</version>
-
-
-    <components>
-        <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>
diff --git a/branch-1.2/ambari-server/src/test/resources/temporal_ganglia_data.txt b/branch-1.2/ambari-server/src/test/resources/temporal_ganglia_data.txt
deleted file mode 100644
index ed0f896..0000000
--- a/branch-1.2/ambari-server/src/test/resources/temporal_ganglia_data.txt
+++ /dev/null
@@ -1,719 +0,0 @@
-1358487599.0
-sum
-HDPSlaves
-domU-12-31-39-0E-34-E1.compute-1.internal
-mapred.shuffleOutput.shuffle_exceptions_caught
-1358434800
-360
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-[AMBARI_DP_END]
-sum
-HDPSlaves
-domU-12-31-39-0E-34-E1.compute-1.internal
-jvm.metrics.gcCount
-1358434800
-360
-83513252.9778
-119191023.641
-94591541.6419
-122656559.37
-142370213.761
-107374182.528
-131235111.95
-130489436.186
-143115866.264
-132030498.116
-131235096.191
-145722098.374
-128678583.679
-131235111.822
-69196695.2917
-133621204.781
-95443717.7111
-115895949.467
-134643809.447
-135211933.4
-142370212.222
-128053678.05
-141574824.397
-97034446.3556
-92035020.3
-39200091.525
-129644383.156
-143960940.847
-132030476.178
-127826414.616
-59879570.0701
-142370213.906
-147142389.835
-143165576.536
-134963382.052
-143165576.534
-141141658.42
-141461233.555
-119304637.975
-119304655.727
-146347025.188
-143392823.479
-131007864.879
-139984129.803
-131235108.855
-107374174.855
-95443717.671
-131235111.822
-143165576.538
-83513263.8167
-111350993.135
-118509282.802
-142370212.222
-120100011.423
-93852989.0687
-23065591.0807
-118509256.794
-142370212.222
-120100011.461
-143165576.518
-143165576.532
-71582797.8174
-146148183.173
-116322039.185
-131235103.56
-131235123.489
-95443706.0444
-71582788.25
-139984127.162
-134416571.797
-143165577.856
-107374182.454
-143165564.577
-119304647.097
-128053662.656
-84990350.0306
-117827550.129
-132030467.969
-143960940.848
-93852997.3374
-142938330.213
-109192149.075
-120895375.756
-138691664.073
-135709034.825
-146148182.177
-94847194.4554
-128849018.873
-107374182.416
-71582801.1622
-134416556.176
-128053654.561
-83513252.9926
-63380593.77
-115576389.233
-[AMBARI_DP_END]
-sum
-HDPSlaves
-domU-12-31-39-0E-34-E2.compute-1.internal
-jvm.metrics.gcCount
-1358434800
-360
-83513252.9778
-119191023.641
-94591541.6419
-122656559.37
-142370213.761
-107374182.528
-131235111.95
-130489436.186
-143115866.264
-132030498.116
-131235096.191
-145722098.374
-128678583.679
-131235111.822
-69196695.2917
-133621204.781
-95443717.7111
-115895949.467
-134643809.447
-135211933.4
-142370212.222
-128053678.05
-141574824.397
-97034446.3556
-92035020.3
-39200091.525
-129644383.156
-143960940.847
-132030476.178
-127826414.616
-59879570.0701
-142370213.906
-147142389.835
-143165576.536
-134963382.052
-143165576.534
-141141658.42
-141461233.555
-119304637.975
-119304655.727
-146347025.188
-143392823.479
-131007864.879
-139984129.803
-131235108.855
-107374174.855
-95443717.671
-131235111.822
-143165576.538
-83513263.8167
-111350993.135
-118509282.802
-142370212.222
-120100011.423
-93852989.0687
-23065591.0807
-118509256.794
-142370212.222
-120100011.461
-143165576.518
-143165576.532
-71582797.8174
-146148183.173
-116322039.185
-131235103.56
-131235123.489
-95443706.0444
-71582788.25
-139984127.162
-134416571.797
-143165577.856
-107374182.454
-143165564.577
-119304647.097
-128053662.656
-84990350.0306
-117827550.129
-132030467.969
-143960940.848
-93852997.3374
-142938330.213
-109192149.075
-120895375.756
-138691664.073
-135709034.825
-146148182.177
-94847194.4554
-128849018.873
-107374182.416
-71582801.1622
-134416556.176
-128053654.561
-83513252.9926
-63380593.77
-115576389.233
-[AMBARI_DP_END]
-sum
-HDPNameNode
-domU-12-31-39-0E-34-E3.compute-1.internal
-jvm.metrics.gcCount
-1358434800
-360
-83513252.9778
-119191023.641
-94591541.6419
-122656559.37
-142370213.761
-107374182.528
-131235111.95
-130489436.186
-143115866.264
-132030498.116
-131235096.191
-145722098.374
-128678583.679
-131235111.822
-69196695.2917
-133621204.781
-95443717.7111
-115895949.467
-134643809.447
-135211933.4
-142370212.222
-128053678.05
-141574824.397
-97034446.3556
-92035020.3
-39200091.525
-129644383.156
-143960940.847
-132030476.178
-127826414.616
-59879570.0701
-142370213.906
-147142389.835
-143165576.536
-134963382.052
-143165576.534
-141141658.42
-141461233.555
-119304637.975
-119304655.727
-146347025.188
-143392823.479
-131007864.879
-139984129.803
-131235108.855
-107374174.855
-95443717.671
-131235111.822
-143165576.538
-83513263.8167
-111350993.135
-118509282.802
-142370212.222
-120100011.423
-93852989.0687
-23065591.0807
-118509256.794
-142370212.222
-120100011.461
-143165576.518
-143165576.532
-71582797.8174
-146148183.173
-116322039.185
-131235103.56
-131235123.489
-95443706.0444
-71582788.25
-139984127.162
-134416571.797
-143165577.856
-107374182.454
-143165564.577
-119304647.097
-128053662.656
-84990350.0306
-117827550.129
-132030467.969
-143960940.848
-93852997.3374
-142938330.213
-109192149.075
-120895375.756
-138691664.073
-135709034.825
-146148182.177
-94847194.4554
-128849018.873
-107374182.416
-71582801.1622
-134416556.176
-128053654.561
-83513252.9926
-63380593.77
-115576389.233
-[AMBARI_DP_END]
-sum
-HDPSlaves
-domU-12-31-39-0E-34-E1.compute-1.internal
-mapred.shuffleOutput.shuffle_output_bytes
-1358434800
-360
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-[AMBARI_DP_END]
-sum
-HDPSlaves
-domU-12-31-39-0E-34-E1.compute-1.internal
-mapred.shuffleOutput.shuffle_failed_outputs
-1358434800
-360
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-[AMBARI_DP_END]
-sum
-HDPSlaves
-domU-12-31-39-0E-34-E1.compute-1.internal
-mapred.shuffleOutput.shuffle_success_outputs
-1358434800
-360
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-0.0
-[AMBARI_DP_END]
-[AMBARI_END]
-1358487599.0
diff --git a/branch-1.2/ambari-server/src/test/resources/test_api.sh b/branch-1.2/ambari-server/src/test/resources/test_api.sh
deleted file mode 100644
index dda2847..0000000
--- a/branch-1.2/ambari-server/src/test/resources/test_api.sh
+++ /dev/null
@@ -1,67 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-AGENT_HOST="localhost.localdomain"
-curl -i -X POST -d '{"Clusters": {"version" : "HDP-1.2.0"}}' http://localhost:8080/api/v1/clusters/c1
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HDFS
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/ZOOKEEPER
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HBASE
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/GANGLIA
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/NAGIOS
-curl -i -X POST -d '{"type": "core-site", "tag": "version1", "properties" : { "fs.default.name" : "localhost:8020"}}' http://localhost:8080/api/v1/clusters/c1/configurations
-curl -i -X POST -d '{"type": "hdfs-site", "tag": "version1", "properties" : { "dfs.datanode.data.dir.perm" : "750"}}' http://localhost:8080/api/v1/clusters/c1/configurations
-curl -i -X POST -d '{"type": "global", "tag": "version1", "properties" : { "hbase_hdfs_root_dir" : "/apps/hbase/"}}' http://localhost:8080/api/v1/clusters/c1/configurations
-curl -i -X POST -d '{"type": "mapred-site", "tag": "version1", "properties" : { "mapred.job.tracker" : "localhost:50300", "mapreduce.history.server.embedded": "false", "mapreduce.history.server.http.address": "localhost:51111"}}' http://localhost:8080/api/v1/clusters/c1/configurations
-curl -i -X POST -d '{"type": "hbase-site", "tag": "version1", "properties" : { "hbase.rootdir" : "hdfs://localhost:8020/apps/hbase/", "hbase.cluster.distributed" : "true", "hbase.zookeeper.quorum": "localhost", "zookeeper.session.timeout": "60000" }}' http://localhost:8080/api/v1/clusters/c1/configurations
-curl -i -X POST -d '{"type": "hbase-env", "tag": "version1", "properties" : { "hbase_hdfs_root_dir" : "/apps/hbase/"}}' http://localhost:8080/api/v1/clusters/c1/configurations
-curl -i -X POST -d '{"type": "nagios-global", "tag": "version2", "properties" : { "nagios_web_login" : "nagiosadmin", "nagios_web_password" : "password", "nagios_contact": "a\u0040b.c" }}' http://localhost:8080/api/v1/clusters/c1/configurations
-curl -i -X POST -d '{"type": "nagios-global", "tag": "version1", "properties" : { "nagios_web_login" : "nagiosadmin", "nagios_web_password" : "password"  }}' http://localhost:8080/api/v1/clusters/c1/configurations
-curl -i -X PUT -d '{"config": {"core-site": "version1", "hdfs-site": "version1", "global" : "version1" }}'  http://localhost:8080/api/v1/clusters/c1/services/HDFS
-curl -i -X PUT -d '{"config": {"core-site": "version1", "mapred-site": "version1"}}'  http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE
-curl -i -X PUT -d '{"config": {"hbase-site": "version1", "hbase-env": "version1"}}'  http://localhost:8080/api/v1/clusters/c1/services/HBASE
-curl -i -X PUT -d '{"config": {"nagios-global": "version2" }}'  http://localhost:8080/api/v1/clusters/c1/services/NAGIOS
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HDFS/components/NAMENODE
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HDFS/components/SECONDARY_NAMENODE
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HDFS/components/DATANODE
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HDFS/components/HDFS_CLIENT
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE/components/JOBTRACKER
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/MAPREDUCE/components/TASKTRACKER
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/ZOOKEEPER/components/ZOOKEEPER_SERVER
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HBASE/components/HBASE_MASTER
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HBASE/components/HBASE_REGIONSERVER
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/HBASE/components/HBASE_CLIENT
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/GANGLIA/components/GANGLIA_SERVER
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/GANGLIA/components/GANGLIA_MONITOR
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/services/NAGIOS/components/NAGIOS_SERVER
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/NAMENODE
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/SECONDARY_NAMENODE
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/JOBTRACKER
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/DATANODE
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/TASKTRACKER
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/ZOOKEEPER_SERVER
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/GANGLIA_SERVER
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/GANGLIA_MONITOR
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/HDFS_CLIENT
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/HBASE_MASTER
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/HBASE_REGIONSERVER
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/HBASE_CLIENT
-curl -i -X POST http://localhost:8080/api/v1/clusters/c1/hosts/$AGENT_HOST/host_components/NAGIOS_SERVER
-curl -i -X PUT  -d '{"ServiceInfo": {"state" : "INSTALLED"}}'   http://localhost:8080/api/v1/clusters/c1/services?state=INIT
-#curl -i -X PUT  -d '{"ServiceInfo": {"state" : "STARTED"}}'   http://localhost:8080/api/v1/clusters/c1/services?state=INSTALLED
-# http://localhost:8080/api/v1/clusters/c1/requests/2
-#curl -i -X PUT    http://localhost:8080/api/v1/clusters/c1/services?state="INSTALLED"
-#curl -i -X POST  -d '{"ServiceInfo": {"state" : "STARTED"}}' http://localhost:8080/api/v1/clusters/c1/services/HDFS
diff --git a/branch-1.2/ambari-server/src/test/resources/test_multnode_api.sh b/branch-1.2/ambari-server/src/test/resources/test_multnode_api.sh
deleted file mode 100644
index 8083654..0000000
--- a/branch-1.2/ambari-server/src/test/resources/test_multnode_api.sh
+++ /dev/null
@@ -1,84 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-SERVER_HOST="ec2-107-20-75-170.compute-1.amazonaws.com"
-declare -a AGENT_HOSTS=('test1' 'test2');
-
-# All servers will be the first host on AGENT_HOSTS
-
-echo curl -i -X POST -d '{"Clusters": {"version" : "HDP-1.2.0"}}' http://$SERVER_HOST:8080/api/v1/clusters/c1
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HDFS
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/MAPREDUCE
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/ZOOKEEPER
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HBASE
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/GANGLIA
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/NAGIOS
-echo curl -i -X POST -d '{"type": "core-site", "tag": "version1", "properties" : { "fs.default.name" : "hdfs://'${AGENT_HOSTS[0]}':8020"}}' http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
-echo curl -i -X POST -d '{"type": "hdfs-site", "tag": "version1", "properties" : { "dfs.datanode.data.dir.perm" : "750"}}' http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
-echo curl -i -X POST -d '{"type": "global", "tag": "version1", "properties" : { "hbase_hdfs_root_dir" : "/apps/hbase/"}}' http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
-echo curl -i -X POST -d '{"type": "mapred-site", "tag": "version1", "properties" : { "mapred.job.tracker" : "'${AGENT_HOSTS[0]}':50300", "mapreduce.history.server.embedded": "false", "mapreduce.history.server.http.address": "'${AGENT_HOSTS[0]}':51111"}}' http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
-echo curl -i -X POST -d '{"type": "hbase-site", "tag": "version1", "properties" : { "hbase.rootdir" : "hdfs://'${AGENT_HOSTS[0]}':8020/apps/hbase/", "hbase.cluster.distributed" : "true", "hbase.zookeeper.quorum": "'${AGENT_HOSTS[0]}'", "zookeeper.session.timeout": "60000" }}' http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
-echo curl -i -X POST -d '{"type": "hbase-env", "tag": "version1", "properties" : { "hbase_hdfs_root_dir" : "/apps/hbase/"}}' http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
-echo curl -i -X POST -d '{"type": "nagios-global", "tag": "version2", "properties" : { "nagios_web_login" : "nagiosadmin", "nagios_web_password" : "password", "nagios_contact": "a\u0040b.c" }}' http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
-echo curl -i -X POST -d '{"type": "nagios-global", "tag": "version1", "properties" : { "nagios_web_login" : "nagiosadmin", "nagios_web_password" : "password"  }}' http://$SERVER_HOST:8080/api/v1/clusters/c1/configurations
-echo curl -i -X PUT -d '{"config": {"core-site": "version1", "hdfs-site": "version1", "global" : "version1" }}'  http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HDFS
-echo curl -i -X PUT -d '{"config": {"core-site": "version1", "mapred-site": "version1"}}'  http://$SERVER_HOST:8080/api/v1/clusters/c1/services/MAPREDUCE
-echo curl -i -X PUT -d '{"config": {"hbase-site": "version1", "hbase-env": "version1"}}'  http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HBASE
-echo curl -i -X PUT -d '{"config": {"nagios-global": "version2" }}'  http://$SERVER_HOST:8080/api/v1/clusters/c1/services/NAGIOS
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HDFS/components/NAMENODE
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HDFS/components/SECONDARY_NAMENODE
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HDFS/components/DATANODE
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HDFS/components/HDFS_CLIENT
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/MAPREDUCE/components/JOBTRACKER
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/MAPREDUCE/components/TASKTRACKER
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/ZOOKEEPER/components/ZOOKEEPER_SERVER
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HBASE/components/HBASE_MASTER
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HBASE/components/HBASE_REGIONSERVER
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/HBASE/components/HBASE_CLIENT
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/GANGLIA/components/GANGLIA_SERVER
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/GANGLIA/components/GANGLIA_MONITOR
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/services/NAGIOS/components/NAGIOS_SERVER
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/NAMENODE
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/SECONDARY_NAMENODE
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/JOBTRACKER
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/DATANODE
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/TASKTRACKER
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/ZOOKEEPER_SERVER
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/GANGLIA_SERVER
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/GANGLIA_MONITOR
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/HDFS_CLIENT
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/HBASE_MASTER
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/HBASE_REGIONSERVER
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/HBASE_CLIENT
-echo curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[0]}/host_components/NAGIOS_SERVER
-echo 
-
-len=${#AGENT_HOSTS[@]}
-
-for (( i=1; i<$len; i++ ))
-do
-  curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[$i]}
-  curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[$i]}/host_components/DATANODE
-  curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOST[$i]}/host_components/TASKTRACKER
-  curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[$i]}/host_components/GANGLIA_MONITOR
-  curl -i -X POST http://$SERVER_HOST:8080/api/v1/clusters/c1/hosts/${AGENT_HOSTS[$i]}/host_components/HBASE_REGIONSERVER
-done
-
-curl -i -X PUT  -d '{"ServiceInfo": {"state" : "INSTALLED"}}'   http://$SERVER_HOST:8080/api/v1/clusters/c1/services?state=INIT
-#curl -i -X PUT  -d '{"ServiceInfo": {"state" : "STARTED"}}'   http://$SERVER_HOST:8080/api/v1/clusters/c1/services?state=INSTALLED
-# http://localhost:8080/api/v1/clusters/c1/requests/2
-#curl -i -X PUT    http://localhost:8080/api/v1/clusters/c1/services?state="INSTALLED"
-#curl -i -X POST  -d '{"ServiceInfo": {"state" : "STARTED"}}' http://localhost:8080/api/v1/clusters/c1/services/HDFS
diff --git a/branch-1.2/ambari-server/src/test/resources/users.ldif b/branch-1.2/ambari-server/src/test/resources/users.ldif
deleted file mode 100644
index 903c1d2..0000000
--- a/branch-1.2/ambari-server/src/test/resources/users.ldif
+++ /dev/null
@@ -1,35 +0,0 @@
-dn: ou=groups,dc=ambari,dc=apache,dc=org
-objectclass:top
-objectclass:organizationalUnit
-ou: groups
-
-dn: ou=people,dc=ambari,dc=apache,dc=org
-objectclass:top
-objectclass:organizationalUnit
-ou: people
-
-dn: uid=allowedUser,ou=people,dc=ambari,dc=apache,dc=org
-objectclass:top
-objectclass:person
-objectclass:organizationalPerson
-objectclass:inetOrgPerson
-cn: CraigWalls
-sn: Walls
-uid: allowedUser
-userPassword:password
-
-dn: uid=deniedUser,ou=people,dc=ambari,dc=apache,dc=org
-objectclass:top
-objectclass:person
-objectclass:organizationalPerson
-objectclass:inetOrgPerson
-cn: JohnSmith
-sn: Smith
-uid: deniedUser
-userPassword:password
-
-dn: cn=admin,ou=groups,dc=ambari,dc=apache,dc=org
-objectclass:top
-objectclass:groupOfNames
-cn: admin
-member: uid=allowedUser,ou=people,dc=ambari,dc=apache,dc=org
diff --git a/branch-1.2/ambari-server/src/test/resources/web/WEB-INF/web.xml b/branch-1.2/ambari-server/src/test/resources/web/WEB-INF/web.xml
deleted file mode 100644
index 724e23a..0000000
--- a/branch-1.2/ambari-server/src/test/resources/web/WEB-INF/web.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<web-app xmlns="http://java.sun.com/xml/ns/javaee"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://java.sun.com/xml/ns/javaee
-		  http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd"
-         version="2.5">
-
-    <servlet>
-        <servlet-name>MyRestApplication</servlet-name>
-        <servlet-class>com.sun.jersey.spi.container.servlet.ServletContainer</servlet-class>
-        <load-on-startup>1</load-on-startup>
-    </servlet>
-    <servlet-mapping>
-        <servlet-name>MyRestApplication</servlet-name>
-        <url-pattern>/resources/*</url-pattern>
-    </servlet-mapping>
-</web-app>
diff --git a/branch-1.2/ambari-server/src/test/resources/web/index.jsp b/branch-1.2/ambari-server/src/test/resources/web/index.jsp
deleted file mode 100644
index 7e467c6..0000000
--- a/branch-1.2/ambari-server/src/test/resources/web/index.jsp
+++ /dev/null
@@ -1,29 +0,0 @@
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<%--
-  Created by IntelliJ IDEA.
-  User: john
-  Date: 6/20/12
-  Time: 9:04 PM
-  To change this template use File | Settings | File Templates.
---%>
-<%@ page contentType="text/html;charset=UTF-8" language="java" %>
-<html>
-<head>
-    <title></title>
-</head>
-<body>
-
-</body>
-</html>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/app.js b/branch-1.2/ambari-web/app/app.js
deleted file mode 100644
index 697fde4..0000000
--- a/branch-1.2/ambari-web/app/app.js
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Application bootstrapper
-
-module.exports = Em.Application.create({
-  name: 'Ambari Web',
-  rootElement: '#wrapper',
-
-  store: DS.Store.create({
-    revision: 4,
-    adapter: DS.FixtureAdapter.create({
-      simulateRemoteResponse: false
-    })
-  }),
-  isAdmin : function(){
-    var user = this.db && this.db.getUser();
-    return user ? user.admin : false;
-  }.property(),
-  /**
-   * return url prefix with number value of version of HDP stack
-   */
-  stackVersionURL:function(){
-    return '/stacks/HDP/version/' + App.defaultStackVersion.replace(/HDP-/g, '');
-  }.property()
-});
-
-/**
- * Ambari overrides the default date transformer.
- * This is done because of the non-standard data
- * sent. For example Nagios sends date as "12345678".
- * The problem is that it is a String and is represented
- * only in seconds whereas Javascript's Date needs
- * milliseconds representation.
- */
-DS.attr.transforms.date = {
-  from: function (serialized) {
-    var type = typeof serialized;
-    if (type === "string") {
-      serialized = parseInt(serialized);
-      type = typeof serialized;
-    }
-    if (type === "number") {
-      // The number could be seconds or milliseconds.
-      // If seconds, then multiplying with 1000 should still
-      // keep it below the current time.
-      if (serialized * 1000 < new Date().getTime()) {
-        serialized = serialized * 1000;
-      }
-      return new Date(serialized);
-    } else if (serialized === null || serialized === undefined) {
-      // if the value is not present in the data,
-      // return undefined, not null.
-      return serialized;
-    } else {
-      return null;
-    }
-  },
-  to: function (deserialized) {
-    if (deserialized instanceof Date) {
-      return deserialized.getTime();
-    } else if (deserialized === undefined) {
-      return undefined;
-    } else {
-      return null;
-    }
-  }
-}
-
-
diff --git a/branch-1.2/ambari-web/app/assets/data/alerts/alerts.json b/branch-1.2/ambari-web/app/assets/data/alerts/alerts.json
deleted file mode 100644
index 8bd8c38..0000000
--- a/branch-1.2/ambari-web/app/assets/data/alerts/alerts.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
-  "href" : "http://ec2-54-234-53-225.compute-1.amazonaws.com:8080/api/v1/clusters/yusaku/host_components?HostRoles/component_name=NAGIOS_SERVER&fields=HostRoles/nagios_alerts",
-  "items" : [
-    {
-      "href" : "http://ec2-54-234-53-225.compute-1.amazonaws.com:8080/api/v1/clusters/yusaku/hosts/ip-10-114-35-111.ec2.internal/host_components/NAGIOS_SERVER",
-      "HostRoles" : {
-        "cluster_name" : "yusaku",
-        "nagios_alerts" : "{\"alerts\":[{\"service_description\":\"Ganglia [gmetad] Process down\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"TCP OK - 0.004 second response time on port 8651\",\"last_hard_state_change\":\"1359058506\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359072006\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359072006\",\"service_type\":\"GANGLIA\"},{\"service_description\":\"Ganglia collector [gmond] Process down alert for hbasemaster\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"TCP OK - 0.009 second response time on port 8663\",\"last_hard_state_change\":\"1359058517\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359072002\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359072002\",\"service_type\":\"GANGLIA\"},{\"service_description\":\"Ganglia collector [gmond] Process down alert for jobtracker\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"TCP OK - 0.009 second response time on port 8662\",\"last_hard_state_change\":\"1359058528\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359072013\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359072013\",\"service_type\":\"GANGLIA\"},{\"service_description\":\"Ganglia collector [gmond] Process down alert for namenode\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"TCP OK - 0.004 second response time on port 8661\",\"last_hard_state_change\":\"1359058540\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359072010\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359072010\",\"service_type\":\"GANGLIA\"},{\"service_description\":\"Ganglia collector [gmond] Process down alert for slaves\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"TCP OK - 0.009 second response time on port 8660\",\"last_hard_state_change\":\"1359058551\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359072006\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359072006\",\"service_type\":\"GANGLIA\"},{\"service_description\":\"Percent region servers down\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: total:&lt;1&gt;, affected:&lt;0&gt;\",\"last_hard_state_change\":\"1359058802\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359072002\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"1359058772\",\"is_flapping\":\"0\",\"last_check\":\"1359072002\",\"service_type\":\"HBASE\"},{\"service_description\":\"HBase Web UI down\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: Successfully accessed hbase Web UI\",\"last_hard_state_change\":\"1359058574\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359072014\",\"last_time_warning\":\"1359058634\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359072014\",\"service_type\":\"HBASE\"},{\"service_description\":\"HBaseMaster CPU utilization\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"2 CPU, average load 14.0% &lt; 200% : OK\",\"last_hard_state_change\":\"1359058585\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071785\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359071785\",\"service_type\":\"HBASE\"},{\"service_description\":\"HBaseMaster Process down\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"TCP OK - 0.000 second response time on port 60000\",\"last_hard_state_change\":\"1359058596\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071991\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"1359058626\",\"is_flapping\":\"0\",\"last_check\":\"1359071991\",\"service_type\":\"HBASE\"},{\"service_description\":\"Corrupt\\/Missing blocks\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: corrupt_blocks:&lt;0&gt;, missing_blocks:&lt;0&gt;, total_blocks:&lt;249&gt;\",\"last_hard_state_change\":\"1359058509\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071949\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359071949\",\"service_type\":\"HDFS\"},{\"service_description\":\"HDFS Capacity utilization\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: DFSUsedGB:&lt;0.1&gt;, DFSTotalGB:&lt;784.4&gt;\",\"last_hard_state_change\":\"1359058520\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071720\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359071720\",\"service_type\":\"HDFS\"},{\"service_description\":\"Namenode RPC Latency\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: RpcQueueTime_avg_time:&lt;0&gt; Secs, RpcProcessingTime_avg_time:&lt;0&gt; Secs\",\"last_hard_state_change\":\"1359058531\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071731\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359071731\",\"service_type\":\"HDFS\"},{\"service_description\":\"Percent DataNodes down\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: total:&lt;1&gt;, affected:&lt;0&gt;\",\"last_hard_state_change\":\"1359058543\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359072013\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359072013\",\"service_type\":\"HDFS\"},{\"service_description\":\"Percent DataNodes storage full\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: total:&lt;1&gt;, affected:&lt;0&gt;\",\"last_hard_state_change\":\"1359058554\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071994\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359071994\",\"service_type\":\"HDFS\"},{\"service_description\":\"Percent TaskTrackers down\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: total:&lt;1&gt;, affected:&lt;0&gt;\",\"last_hard_state_change\":\"1359058745\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359072005\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"1359058715\",\"is_flapping\":\"0\",\"last_check\":\"1359072005\",\"service_type\":\"MAPREDUCE\"},{\"service_description\":\"Nagios status log staleness\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"NAGIOS OK: 2 processes, status log updated 9 seconds ago\",\"last_hard_state_change\":\"1359058576\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071776\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359071776\",\"service_type\":\"UNKNOWN\"},{\"service_description\":\"Namenode Edit logs directory status\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: All Namenode directories are active\",\"last_hard_state_change\":\"1359058588\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071998\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359071998\",\"service_type\":\"HDFS\"},{\"service_description\":\"Namenode Host CPU utilization\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"2 CPU, average load 14.5% &lt; 200% : OK\",\"last_hard_state_change\":\"1359058599\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071799\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359071799\",\"service_type\":\"HDFS\"},{\"service_description\":\"Namenode Process down\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"TCP OK - 0.001 second response time on port 8020\",\"last_hard_state_change\":\"1359058511\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071996\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"1359058511\",\"is_flapping\":\"0\",\"last_check\":\"1359071996\",\"service_type\":\"HDFS\"},{\"service_description\":\"Namenode Web UI down\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: Successfully accessed namenode Web UI\",\"last_hard_state_change\":\"1359058523\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071963\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359071963\",\"service_type\":\"HDFS\"},{\"service_description\":\"ZKSERVERS Process down\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"TCP OK - 0.003 second response time on port 2181\",\"last_hard_state_change\":\"1359058654\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071974\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"1359058594\",\"is_flapping\":\"0\",\"last_check\":\"1359071974\",\"service_type\":\"UNKNOWN\"},{\"service_description\":\"Percent zookeeper servers down\",\"host_name\":\"ip-10-114-35-111.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: total:&lt;3&gt;, affected:&lt;0&gt;\",\"last_hard_state_change\":\"1359058545\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359072015\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359072015\",\"service_type\":\"ZOOKEEPER\"},{\"service_description\":\"HIVE-METASTORE status check\",\"host_name\":\"ip-10-12-194-214.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: Hive metaserver status OK\",\"last_hard_state_change\":\"1359058677\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071997\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"1359058647\",\"is_flapping\":\"0\",\"last_check\":\"1359071997\",\"service_type\":\"HIVE\"},{\"service_description\":\"JobHistory Web UI down\",\"host_name\":\"ip-10-12-194-214.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: Successfully accessed jobhistory Web UI\",\"last_hard_state_change\":\"1359058568\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359072008\",\"last_time_warning\":\"1359058628\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359072008\",\"service_type\":\"MAPREDUCE\"},{\"service_description\":\"JobTracker Web UI down\",\"host_name\":\"ip-10-12-194-214.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: Successfully accessed jobtracker Web UI\",\"last_hard_state_change\":\"1359058579\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071959\",\"last_time_warning\":\"1359058639\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359071959\",\"service_type\":\"MAPREDUCE\"},{\"service_description\":\"Jobtracker CPU utilization\",\"host_name\":\"ip-10-12-194-214.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"2 CPU, average load 2.5% &lt; 200% : OK\",\"last_hard_state_change\":\"1359058591\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071791\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359071791\",\"service_type\":\"MAPREDUCE\"},{\"service_description\":\"Jobtracker Process down\",\"host_name\":\"ip-10-12-194-214.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"TCP OK - 0.064 second response time on port 50030\",\"last_hard_state_change\":\"1359058677\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071997\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"1359058647\",\"is_flapping\":\"0\",\"last_check\":\"1359071997\",\"service_type\":\"MAPREDUCE\"},{\"service_description\":\"JobTracker RPC Latency\",\"host_name\":\"ip-10-12-194-214.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: RpcQueueTime_avg_time:&lt;0.2&gt; Secs, RpcProcessingTime_avg_time:&lt;0.24&gt; Secs\",\"last_hard_state_change\":\"1359058514\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359072014\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359072014\",\"service_type\":\"MAPREDUCE\"},{\"service_description\":\"Oozie status check\",\"host_name\":\"ip-10-12-194-214.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: Oozie server status [System mode: NORMAL]\",\"last_hard_state_change\":\"1359058826\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071966\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"1359058766\",\"is_flapping\":\"0\",\"last_check\":\"1359071966\",\"service_type\":\"OOZIE\"},{\"service_description\":\"WEBHCAT status check\",\"host_name\":\"ip-10-12-194-214.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"\",\"last_hard_state_change\":\"1359058897\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071977\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"1359058837\",\"is_flapping\":\"\",\"last_check\":\"\",\"service_type\":\"WEBHCAT\"},{\"service_description\":\"ZKSERVERS Process down\",\"host_name\":\"ip-10-12-194-214.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"TCP OK - 0.001 second response time on port 2181\",\"last_hard_state_change\":\"1359058548\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071988\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359071988\",\"service_type\":\"UNKNOWN\"},{\"service_description\":\"ZKSERVERS Process down\",\"host_name\":\"ip-10-204-141-167.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"TCP OK - 0.031 second response time on port 2181\",\"last_hard_state_change\":\"1359058559\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071999\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359071999\",\"service_type\":\"UNKNOWN\"},{\"service_description\":\"Process down\",\"host_name\":\"ip-10-80-69-221.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"TCP OK - 0.127 second response time on port 50010\",\"last_hard_state_change\":\"1359058571\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359072011\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359072011\",\"service_type\":\"UNKNOWN\"},{\"service_description\":\"Storage full\",\"host_name\":\"ip-10-80-69-221.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"OK: Capacity:[], Remaining Capacity:[], percent_full:[0]\",\"last_hard_state_change\":\"1359058582\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071782\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"0\",\"is_flapping\":\"0\",\"last_check\":\"1359071782\",\"service_type\":\"UNKNOWN\"},{\"service_description\":\"Process down\",\"host_name\":\"ip-10-80-69-221.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"TCP OK - 0.001 second response time on port 60020\",\"last_hard_state_change\":\"1359058773\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071973\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"1359058713\",\"is_flapping\":\"0\",\"last_check\":\"1359071973\",\"service_type\":\"UNKNOWN\"},{\"service_description\":\"Process down\",\"host_name\":\"ip-10-80-69-221.ec2.internal\",\"current_attempt\":\"1\",\"current_state\":\"0\",\"plugin_output\":\"TCP OK - 0.001 second response time on port 50060\",\"last_hard_state_change\":\"1359058725\",\"last_hard_state\":\"0\",\"last_time_ok\":\"1359071985\",\"last_time_warning\":\"0\",\"last_time_unknown\":\"0\",\"last_time_critical\":\"1359058665\",\"is_flapping\":\"0\",\"last_check\":\"1359071985\",\"service_type\":\"UNKNOWN\"}],\"hostcounts\":{\"up_hosts\":4,\"down_hosts\":0},\"servicestates\":{\"PUPPET\":0,\"HBASE\":\"0\",\"HDFS\":\"0\",\"ZOOKEEPER\":0,\"HIVE-METASTORE\":\"0\",\"MAPREDUCE\":\"0\",\"OOZIE\":\"0\"}}",
-        "component_name" : "NAGIOS_SERVER",
-        "host_name" : "ip-10-114-35-111.ec2.internal"
-      },
-      "host" : {
-        "href" : "http://ec2-54-234-53-225.compute-1.amazonaws.com:8080/api/v1/clusters/yusaku/hosts/ip-10-114-35-111.ec2.internal"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/alerts/alerts.jsonp b/branch-1.2/ambari-web/app/assets/data/alerts/alerts.jsonp
deleted file mode 100644
index 6da23a8..0000000
--- a/branch-1.2/ambari-web/app/assets/data/alerts/alerts.jsonp
+++ /dev/null
@@ -1,430 +0,0 @@
-jQuery172040994187095202506_1352498338217({
-   "alerts":[
-      {
-         "service_description":"Process down",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"1",
-         "plugin_output":"TCP OK - 0.002 second response time on port 50010",
-         "last_hard_state_change":"1350378326",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498253",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498253",
-         "service_type":"UNKNOWN"
-      },
-      {
-         "service_description":"Storage full",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"OK: Capacity:[], Remaining Capacity:[], percent_full:[0]",
-         "last_hard_state_change":"1350378331",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498195",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498195",
-         "service_type":"UNKNOWN"
-      },
-      {
-         "service_description":"Ganglia [gmetad] Process down",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"TCP OK - 0.003 second response time on port 8651",
-         "last_hard_state_change":"1350378335",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498284",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498284",
-         "service_type":"UNKNOWN"
-      },
-      {
-         "service_description":"Ganglia collector [gmond] Process down alert for jobtracker",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"TCP OK - 0.006 second response time on port 8662",
-         "last_hard_state_change":"1350378340",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498284",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498284",
-         "service_type":"UNKNOWN"
-      },
-      {
-         "service_description":"Ganglia collector [gmond] Process down alert for namenode",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"TCP OK - 0.005 second response time on port 8661",
-         "last_hard_state_change":"1350378344",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498284",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498284",
-         "service_type":"UNKNOWN"
-      },
-      {
-         "service_description":"Ganglia collector [gmond] Process down alert for slaves",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"TCP OK - 0.001 second response time on port 8660",
-         "last_hard_state_change":"1350378349",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498284",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498284",
-         "service_type":"UNKNOWN"
-      },
-      {
-         "service_description":"Corrupt\/Missing blocks",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"OK: corrupt_blocks:&lt;0&gt;, missing_blocks:&lt;0&gt;, total_blocks:&lt;135&gt;",
-         "last_hard_state_change":"1350933589",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498193",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"1350933469",
-         "is_flapping":"0",
-         "last_check":"1352498193",
-         "service_type":"HDFS"
-      },
-      {
-         "service_description":"HDFS Capacity utilization",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"OK: DFSUsedGB:&lt;0&gt;, DFSTotalGB:&lt;784.3&gt;",
-         "last_hard_state_change":"1350378358",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498195",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498195",
-         "service_type":"HDFS"
-      },
-      {
-         "service_description":"Namenode RPC Latency",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"OK: RpcQueueTime_avg_time:&lt;0&gt; Secs, RpcProcessingTime_avg_time:&lt;0&gt; Secs",
-         "last_hard_state_change":"1350378362",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498195",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498195",
-         "service_type":"HDFS"
-      },
-      {
-         "service_description":"Percent DataNodes down",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"OK: total:&lt;1&gt;, affected:&lt;0&gt;",
-         "last_hard_state_change":"1350378367",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498284",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498284",
-         "service_type":"HDFS"
-      },
-      {
-         "service_description":"Percent DataNodes storage full",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"1",
-         "plugin_output":"OK: total:&lt;1&gt;, affected:&lt;0&gt;",
-         "last_hard_state_change":"1350378371",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498193",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498193",
-         "service_type":"HDFS"
-      },
-      {
-         "service_description":"JobHistory Web UI down",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"OK: Successfully accessed jobhistory Web UI",
-         "last_hard_state_change":"1350378376",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498253",
-         "last_time_warning":"1351216729",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498253",
-         "service_type":"MAPREDUCE"
-      },
-      {
-         "service_description":"JobTracker Web UI down",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"1",
-         "plugin_output":"OK: Successfully accessed jobtracker Web UI",
-         "last_hard_state_change":"1350378380",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498253",
-         "last_time_warning":"1351216729",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498253",
-         "service_type":"MAPREDUCE"
-      },
-      {
-         "service_description":"Jobtracker CPU utilization",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"2 CPU, average load 3.5% &lt; 200% : OK",
-         "last_hard_state_change":"1350378385",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498195",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498195",
-         "service_type":"MAPREDUCE"
-      },
-      {
-         "service_description":"Jobtracker Process down",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"TCP OK - 0.001 second response time on port 50030",
-         "last_hard_state_change":"1351216775",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498284",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"1351216745",
-         "is_flapping":"0",
-         "last_check":"1352498284",
-         "service_type":"MAPREDUCE"
-      },
-      {
-         "service_description":"JobTracker RPC Latency",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"OK: RpcQueueTime_avg_time:&lt;0.03&gt; Secs, RpcProcessingTime_avg_time:&lt;0&gt; Secs",
-         "last_hard_state_change":"1350378394",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498195",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498195",
-         "service_type":"MAPREDUCE"
-      },
-      {
-         "service_description":"Percent TaskTrackers down",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"OK: total:&lt;1&gt;, affected:&lt;0&gt;",
-         "last_hard_state_change":"1351216880",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498284",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"1351216850",
-         "is_flapping":"0",
-         "last_check":"1352498284",
-         "service_type":"MAPREDUCE"
-      },
-      {
-         "service_description":"Nagios status log staleness",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"NAGIOS OK: 2 processes, status log updated 0 seconds ago",
-         "last_hard_state_change":"1350937033",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498233",
-         "last_time_warning":"1350936733",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498233",
-         "service_type":"UNKNOWN"
-      },
-      {
-         "service_description":"Namenode Edit logs directory status",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"OK: All Namenode directories are active",
-         "last_hard_state_change":"1350378408",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498284",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498284",
-         "service_type":"HDFS"
-      },
-      {
-         "service_description":"Namenode Host CPU utilization",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"2 CPU, average load 3.5% &lt; 200% : OK",
-         "last_hard_state_change":"1350378412",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498212",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498212",
-         "service_type":"HDFS"
-      },
-      {
-         "service_description":"Namenode Process down",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"TCP OK - 0.001 second response time on port 8020",
-         "last_hard_state_change":"1350378417",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498284",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498284",
-         "service_type":"HDFS"
-      },
-      {
-         "service_description":"Namenode Web UI down",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"OK: Successfully accessed namenode Web UI",
-         "last_hard_state_change":"1350378421",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498253",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498253",
-         "service_type":"HDFS"
-      },
-      {
-         "service_description":"Secondary Namenode Process down",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"TCP OK - 0.001 second response time on port 50090",
-         "last_hard_state_change":"1350378426",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498284",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498284",
-         "service_type":"HDFS"
-      },
-      {
-         "service_description":"Oozie status check",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"OK: Oozie server status [System mode: NORMAL]",
-         "last_hard_state_change":"1351217029",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498253",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"1351924129",
-         "is_flapping":"0",
-         "last_check":"1352498253",
-         "service_type":"OOZIE"
-      },
-      {
-         "service_description":"Puppet agent down",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"0",
-         "plugin_output":"TCP OK - 0.001 second response time on port 8139",
-         "last_hard_state_change":"1350378435",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498253",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"0",
-         "is_flapping":"0",
-         "last_check":"1352498253",
-         "service_type":"PUPPET"
-      },
-      {
-         "service_description":"Process down",
-         "host_name":"hostname",
-         "current_attempt":"1",
-         "current_state":"1",
-         "plugin_output":"TCP OK - 0.005 second response time on port 50060",
-         "last_hard_state_change":"1351216849",
-         "last_hard_state":"0",
-         "last_time_ok":"1352498253",
-         "last_time_warning":"0",
-         "last_time_unknown":"0",
-         "last_time_critical":"1351216789",
-         "is_flapping":"0",
-         "last_check":"1352498253",
-         "service_type":"UNKNOWN"
-      }
-   ],
-   "hostcounts":{
-      "up_hosts":1,
-      "down_hosts":0
-   },
-   "servicestates":{
-      "PUPPET":0,
-      "MAPREDUCE":"0",
-      "HDFS":"0",
-      "OOZIE":"0"
-   }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/apps/jobs.json b/branch-1.2/ambari-web/app/assets/data/apps/jobs.json
deleted file mode 100644
index e69de29..0000000
--- a/branch-1.2/ambari-web/app/assets/data/apps/jobs.json
+++ /dev/null
diff --git a/branch-1.2/ambari-web/app/assets/data/apps/jobs/mr_201301280808_0001.json b/branch-1.2/ambari-web/app/assets/data/apps/jobs/mr_201301280808_0001.json
deleted file mode 100644
index 00425f8..0000000
--- a/branch-1.2/ambari-web/app/assets/data/apps/jobs/mr_201301280808_0001.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
-  "jobs": [
-    {
-      "jobId": "job_201301280808_0001",
-      "jobName": "word count",
-      "status": "SUCCESS",
-      "userName": "ambari_qa",
-      "submitTime": 1359378637135,
-      "elapsedTime": 30215,
-      "maps": 1,
-      "reduces": 1,
-      "inputBytes": 1942,
-      "outputBytes": 1908,
-      "confPath": "hdfs:\/\/ip-10-83-1-168\\.ec2\\.internal:8020\/user\/ambari_qa\/\\.staging\/job_201301280808_0001\/job\\.xml",
-      "workflowId": "mr_201301280808_0001",
-      "workflowEntityName": "X"
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/apps/jobs/mr_201301280808_0003.json b/branch-1.2/ambari-web/app/assets/data/apps/jobs/mr_201301280808_0003.json
deleted file mode 100644
index 02b78e8..0000000
--- a/branch-1.2/ambari-web/app/assets/data/apps/jobs/mr_201301280808_0003.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
-  "jobs": [
-    {
-      "jobId": "job_201301280808_0003",
-      "jobName": "oozie:launcher:T\\=map-reduce:W\\=map-reduce-wf:A\\=mr-node:ID\\=0000000-130128081151371-oozie-oozi-W",
-      "status": "SUCCESS",
-      "userName": "ambari_qa",
-      "submitTime": 1359378907927,
-      "elapsedTime": 19186,
-      "maps": 1,
-      "reduces": 0,
-      "inputBytes": 37485,
-      "outputBytes": 37458,
-      "confPath": "hdfs:\/\/ip-10-83-1-168\\.ec2\\.internal:8020\/user\/ambari_qa\/\\.staging\/job_201301280808_0003\/job\\.xml",
-      "workflowId": "mr_201301280808_0003",
-      "workflowEntityName": "X"
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/apps/jobs/mr_201301280808_0004.json b/branch-1.2/ambari-web/app/assets/data/apps/jobs/mr_201301280808_0004.json
deleted file mode 100644
index 82005e2..0000000
--- a/branch-1.2/ambari-web/app/assets/data/apps/jobs/mr_201301280808_0004.json
+++ /dev/null
@@ -1,49 +0,0 @@
-{
-  "jobs": [
-    {
-      "jobId": "job_201301280808_0002",
-      "jobName": "oozie:action:T\\=map-reduce:W\\=map-reduce-wf:A\\=mr-node:ID\\=0000000-130128081151371-oozie-oozi-W",
-      "status": "SUCCESS",
-      "userName": "ambari_qa",
-      "submitTime": 1359378922503,
-      "elapsedTime": 27080,
-      "maps": 1,
-      "reduces": 1,
-      "inputBytes": 1550,
-      "outputBytes": 1547,
-      "confPath": "hdfs:\/\/ip-10-83-1-168\\.ec2\\.internal:8020\/user\/ambari_qa\/\\.staging\/job_201301280808_0004\/job\\.xml",
-      "workflowId": "mr_201301280808_0002",
-      "workflowEntityName": "X"
-    },
-    {
-      "jobId": "job_201301280808_0004",
-      "jobName": "oozie:action:T\\=map-reduce:W\\=map-reduce-wf:A\\=mr-node:ID\\=0000000-130128081151371-oozie-oozi-W",
-      "status": "SUCCESS",
-      "userName": "ambari_qa",
-      "submitTime": 1359378922503,
-      "elapsedTime": 27080,
-      "maps": 1,
-      "reduces": 1,
-      "inputBytes": 1550,
-      "outputBytes": 1547,
-      "confPath": "hdfs:\/\/ip-10-83-1-168\\.ec2\\.internal:8020\/user\/ambari_qa\/\\.staging\/job_201301280808_0004\/job\\.xml",
-      "workflowId": "mr_201301280808_0004",
-      "workflowEntityName": "X"
-    },
-    {
-      "jobId": "job_201301280808_0003",
-      "jobName": "oozie:action:T\\=map-reduce:W\\=map-reduce-wf:A\\=mr-node:ID\\=0000000-130128081151371-oozie-oozi-W",
-      "status": "SUCCESS",
-      "userName": "ambari_qa",
-      "submitTime": 1359378922503,
-      "elapsedTime": 27080,
-      "maps": 1,
-      "reduces": 1,
-      "inputBytes": 1550,
-      "outputBytes": 1547,
-      "confPath": "hdfs:\/\/ip-10-83-1-168\\.ec2\\.internal:8020\/user\/ambari_qa\/\\.staging\/job_201301280808_0004\/job\\.xml",
-      "workflowId": "mr_201301280808_0003",
-      "workflowEntityName": "X"
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/apps/jobs/pig_f9957a11-a902-4f01-ac53-9679ce3a4b13.json b/branch-1.2/ambari-web/app/assets/data/apps/jobs/pig_f9957a11-a902-4f01-ac53-9679ce3a4b13.json
deleted file mode 100644
index f4ff684..0000000
--- a/branch-1.2/ambari-web/app/assets/data/apps/jobs/pig_f9957a11-a902-4f01-ac53-9679ce3a4b13.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
-  "jobs": [
-    {
-      "jobId": "job_201301280808_0002",
-      "jobName": "PigLatin:pigSmoke\\.sh",
-      "status": "SUCCESS",
-      "userName": "ambari_qa",
-      "submitTime": 1359378741973,
-      "elapsedTime": 18125,
-      "maps": 1,
-      "reduces": 0,
-      "inputBytes": 2186,
-      "outputBytes": 253,
-      "confPath": "hdfs:\/\/ip-10-83-1-168\\.ec2\\.internal:8020\/user\/ambari_qa\/\\.staging\/job_201301280808_0002\/job\\.xml",
-      "workflowId": "pig_f9957a11-a902-4f01-ac53-9679ce3a4b13",
-      "workflowEntityName": "scope-5"
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/apps/jobs/taskview.json b/branch-1.2/ambari-web/app/assets/data/apps/jobs/taskview.json
deleted file mode 100644
index fbee75b..0000000
--- a/branch-1.2/ambari-web/app/assets/data/apps/jobs/taskview.json
+++ /dev/null
@@ -1,92 +0,0 @@
-{
-  "mapNodeLocal": [{
-    "x": 1358508346,
-    "y": 0,
-    "r": 0,
-    "io": 0
-  }, {
-    "x": 1358508356,
-    "y": 2393,
-    "r": 24,
-    "io": 62557,
-    "label": "attempt_201301180624_0001_m_000000_0",
-    "status": "SUCCESS"
-  }, {
-    "x": 1358508358,
-    "y": 0,
-    "r": 0,
-    "io": 0
-  }, {
-    "x": 1358508371,
-    "y": 0,
-    "r": 0,
-    "io": 0
-  }],
-  "mapRackLocal": [{
-    "x": 1358508346,
-    "y": 0,
-    "r": 0,
-    "io": 0
-  }, {
-    "x": 1358508356,
-    "y": 0,
-    "r": 0,
-    "io": 0
-  }, {
-    "x": 1358508358,
-    "y": 0,
-    "r": 0,
-    "io": 0
-  }, {
-    "x": 1358508371,
-    "y": 0,
-    "r": 0,
-    "io": 0
-  }],
-  "mapOffSwitch": [{
-    "x": 1358508346,
-    "y": 0,
-    "r": 0,
-    "io": 0
-  }, {
-    "x": 1358508356,
-    "y": 0,
-    "r": 0,
-    "io": 0
-  }, {
-    "x": 1358508358,
-    "y": 0,
-    "r": 0,
-    "io": 0
-  }, {
-    "x": 1358508371,
-    "y": 0,
-    "r": 0,
-    "io": 0
-  }],
-  "reduceOffSwitch": [{
-    "x": 1358508346,
-    "y": 0,
-    "r": 0,
-    "io": 0
-  }, {
-    "x": 1358508356,
-    "y": 0,
-    "r": 0,
-    "io": 0
-  }, {
-    "x": 1358508358,
-    "y": 10407,
-    "r": 24,
-    "io": 63848,
-    "label": "attempt_201301180624_0001_r_000000_0",
-    "status": "SUCCESS"
-  }, {
-    "x": 1358508371,
-    "y": 0,
-    "r": 0,
-    "io": 0
-  }],
-  "submitTime": 1358508346,
-  "finishTime": 1358508371
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/apps/jobs/timeline.json b/branch-1.2/ambari-web/app/assets/data/apps/jobs/timeline.json
deleted file mode 100644
index 9d5d3e9..0000000
--- a/branch-1.2/ambari-web/app/assets/data/apps/jobs/timeline.json
+++ /dev/null
@@ -1,3272 +0,0 @@
-{
-    "map": [
-        {
-            "x": 1353963926,
-            "y": 0
-        },
-        {
-            "x": 1353963927,
-            "y": 0
-        },
-        {
-            "x": 1353963928,
-            "y": 0
-        },
-        {
-            "x": 1353963929,
-            "y": 0
-        },
-        {
-            "x": 1353963930,
-            "y": 0
-        },
-        {
-            "x": 1353963931,
-            "y": 0
-        },
-        {
-            "x": 1353963932,
-            "y": 0
-        },
-        {
-            "x": 1353963933,
-            "y": 0
-        },
-        {
-            "x": 1353963934,
-            "y": 0
-        },
-        {
-            "x": 1353963935,
-            "y": 0
-        },
-        {
-            "x": 1353963936,
-            "y": 0
-        },
-        {
-            "x": 1353963937,
-            "y": 0
-        },
-        {
-            "x": 1353963938,
-            "y": 0
-        },
-        {
-            "x": 1353963939,
-            "y": 0
-        },
-        {
-            "x": 1353963940,
-            "y": 0
-        },
-        {
-            "x": 1353963941,
-            "y": 0
-        },
-        {
-            "x": 1353963942,
-            "y": 0
-        },
-        {
-            "x": 1353963943,
-            "y": 0
-        },
-        {
-            "x": 1353963944,
-            "y": 0
-        },
-        {
-            "x": 1353963945,
-            "y": 0
-        },
-        {
-            "x": 1353963946,
-            "y": 0
-        },
-        {
-            "x": 1353963947,
-            "y": 0
-        },
-        {
-            "x": 1353963948,
-            "y": 0
-        },
-        {
-            "x": 1353963949,
-            "y": 0
-        },
-        {
-            "x": 1353963950,
-            "y": 0
-        },
-        {
-            "x": 1353963951,
-            "y": 0
-        },
-        {
-            "x": 1353963952,
-            "y": 0
-        },
-        {
-            "x": 1353963953,
-            "y": 0
-        },
-        {
-            "x": 1353963954,
-            "y": 0
-        },
-        {
-            "x": 1353963955,
-            "y": 0
-        },
-        {
-            "x": 1353963956,
-            "y": 0
-        },
-        {
-            "x": 1353963957,
-            "y": 0
-        },
-        {
-            "x": 1353963958,
-            "y": 0
-        },
-        {
-            "x": 1353963959,
-            "y": 0
-        },
-        {
-            "x": 1353963960,
-            "y": 0
-        },
-        {
-            "x": 1353963961,
-            "y": 0
-        },
-        {
-            "x": 1353963962,
-            "y": 0
-        },
-        {
-            "x": 1353963963,
-            "y": 0
-        },
-        {
-            "x": 1353963964,
-            "y": 0
-        },
-        {
-            "x": 1353963965,
-            "y": 0
-        },
-        {
-            "x": 1353963966,
-            "y": 0
-        },
-        {
-            "x": 1353963967,
-            "y": 0
-        },
-        {
-            "x": 1353963968,
-            "y": 0
-        },
-        {
-            "x": 1353963969,
-            "y": 0
-        },
-        {
-            "x": 1353963970,
-            "y": 0
-        },
-        {
-            "x": 1353963971,
-            "y": 0
-        },
-        {
-            "x": 1353963972,
-            "y": 0
-        },
-        {
-            "x": 1353963973,
-            "y": 0
-        },
-        {
-            "x": 1353963974,
-            "y": 0
-        },
-        {
-            "x": 1353963975,
-            "y": 0
-        },
-        {
-            "x": 1353963976,
-            "y": 0
-        },
-        {
-            "x": 1353963977,
-            "y": 0
-        },
-        {
-            "x": 1353963978,
-            "y": 0
-        },
-        {
-            "x": 1353963979,
-            "y": 0
-        },
-        {
-            "x": 1353963980,
-            "y": 0
-        },
-        {
-            "x": 1353963981,
-            "y": 0
-        },
-        {
-            "x": 1353963982,
-            "y": 0
-        },
-        {
-            "x": 1353963983,
-            "y": 0
-        },
-        {
-            "x": 1353963984,
-            "y": 0
-        },
-        {
-            "x": 1353963985,
-            "y": 0
-        },
-        {
-            "x": 1353963986,
-            "y": 0
-        },
-        {
-            "x": 1353963987,
-            "y": 0
-        },
-        {
-            "x": 1353963988,
-            "y": 0
-        },
-        {
-            "x": 1353963989,
-            "y": 0
-        },
-        {
-            "x": 1353963990,
-            "y": 0
-        },
-        {
-            "x": 1353963991,
-            "y": 0
-        },
-        {
-            "x": 1353963992,
-            "y": 0
-        },
-        {
-            "x": 1353963993,
-            "y": 0
-        },
-        {
-            "x": 1353963994,
-            "y": 0
-        },
-        {
-            "x": 1353963995,
-            "y": 0
-        },
-        {
-            "x": 1353963996,
-            "y": 0
-        },
-        {
-            "x": 1353963997,
-            "y": 0
-        },
-        {
-            "x": 1353963998,
-            "y": 0
-        },
-        {
-            "x": 1353963999,
-            "y": 0
-        },
-        {
-            "x": 1353964000,
-            "y": 0
-        },
-        {
-            "x": 1353964001,
-            "y": 0
-        },
-        {
-            "x": 1353964002,
-            "y": 0
-        },
-        {
-            "x": 1353964003,
-            "y": 0
-        },
-        {
-            "x": 1353964004,
-            "y": 0
-        },
-        {
-            "x": 1353964005,
-            "y": 0
-        },
-        {
-            "x": 1353964006,
-            "y": 0
-        },
-        {
-            "x": 1353964007,
-            "y": 0
-        },
-        {
-            "x": 1353964008,
-            "y": 0
-        },
-        {
-            "x": 1353964009,
-            "y": 0
-        },
-        {
-            "x": 1353964010,
-            "y": 0
-        },
-        {
-            "x": 1353964011,
-            "y": 0
-        },
-        {
-            "x": 1353964012,
-            "y": 0
-        },
-        {
-            "x": 1353964013,
-            "y": 0
-        },
-        {
-            "x": 1353964014,
-            "y": 0
-        },
-        {
-            "x": 1353964015,
-            "y": 0
-        },
-        {
-            "x": 1353964016,
-            "y": 0
-        },
-        {
-            "x": 1353964017,
-            "y": 0
-        },
-        {
-            "x": 1353964018,
-            "y": 0
-        },
-        {
-            "x": 1353964019,
-            "y": 0
-        },
-        {
-            "x": 1353964020,
-            "y": 0
-        },
-        {
-            "x": 1353964021,
-            "y": 0
-        },
-        {
-            "x": 1353964022,
-            "y": 0
-        },
-        {
-            "x": 1353964023,
-            "y": 0
-        },
-        {
-            "x": 1353964024,
-            "y": 0
-        },
-        {
-            "x": 1353964025,
-            "y": 0
-        },
-        {
-            "x": 1353964026,
-            "y": 0
-        },
-        {
-            "x": 1353964027,
-            "y": 0
-        },
-        {
-            "x": 1353964028,
-            "y": 0
-        },
-        {
-            "x": 1353964029,
-            "y": 0
-        },
-        {
-            "x": 1353964030,
-            "y": 0
-        },
-        {
-            "x": 1353964031,
-            "y": 0
-        },
-        {
-            "x": 1353964032,
-            "y": 0
-        },
-        {
-            "x": 1353964033,
-            "y": 0
-        },
-        {
-            "x": 1353964034,
-            "y": 0
-        },
-        {
-            "x": 1353964035,
-            "y": 0
-        },
-        {
-            "x": 1353964036,
-            "y": 0
-        },
-        {
-            "x": 1353964037,
-            "y": 0
-        },
-        {
-            "x": 1353964038,
-            "y": 0
-        },
-        {
-            "x": 1353964039,
-            "y": 0
-        },
-        {
-            "x": 1353964040,
-            "y": 0
-        },
-        {
-            "x": 1353964041,
-            "y": 0
-        },
-        {
-            "x": 1353964042,
-            "y": 0
-        },
-        {
-            "x": 1353964043,
-            "y": 0
-        },
-        {
-            "x": 1353964044,
-            "y": 0
-        },
-        {
-            "x": 1353964045,
-            "y": 0
-        },
-        {
-            "x": 1353964046,
-            "y": 0
-        },
-        {
-            "x": 1353964047,
-            "y": 0
-        },
-        {
-            "x": 1353964048,
-            "y": 0
-        },
-        {
-            "x": 1353964049,
-            "y": 0
-        },
-        {
-            "x": 1353964050,
-            "y": 0
-        },
-        {
-            "x": 1353964051,
-            "y": 0
-        },
-        {
-            "x": 1353964052,
-            "y": 0
-        },
-        {
-            "x": 1353964053,
-            "y": 0
-        },
-        {
-            "x": 1353964054,
-            "y": 0
-        },
-        {
-            "x": 1353964055,
-            "y": 0
-        },
-        {
-            "x": 1353964056,
-            "y": 0
-        },
-        {
-            "x": 1353964057,
-            "y": 0
-        },
-        {
-            "x": 1353964058,
-            "y": 0
-        },
-        {
-            "x": 1353964059,
-            "y": 1
-        },
-        {
-            "x": 1353964060,
-            "y": 1
-        },
-        {
-            "x": 1353964061,
-            "y": 1
-        },
-        {
-            "x": 1353964062,
-            "y": 1
-        },
-        {
-            "x": 1353964063,
-            "y": 1
-        },
-        {
-            "x": 1353964064,
-            "y": 1
-        },
-        {
-            "x": 1353964065,
-            "y": 1
-        },
-        {
-            "x": 1353964066,
-            "y": 1
-        },
-        {
-            "x": 1353964067,
-            "y": 1
-        },
-        {
-            "x": 1353964068,
-            "y": 1
-        },
-        {
-            "x": 1353964069,
-            "y": 1
-        },
-        {
-            "x": 1353964070,
-            "y": 1
-        },
-        {
-            "x": 1353964071,
-            "y": 0
-        },
-        {
-            "x": 1353964072,
-            "y": 0
-        },
-        {
-            "x": 1353964073,
-            "y": 0
-        },
-        {
-            "x": 1353964074,
-            "y": 0
-        },
-        {
-            "x": 1353964075,
-            "y": 0
-        },
-        {
-            "x": 1353964076,
-            "y": 0
-        },
-        {
-            "x": 1353964077,
-            "y": 0
-        },
-        {
-            "x": 1353964078,
-            "y": 0
-        },
-        {
-            "x": 1353964079,
-            "y": 0
-        },
-        {
-            "x": 1353964080,
-            "y": 0
-        },
-        {
-            "x": 1353964081,
-            "y": 0
-        },
-        {
-            "x": 1353964082,
-            "y": 0
-        },
-        {
-            "x": 1353964083,
-            "y": 0
-        },
-        {
-            "x": 1353964084,
-            "y": 0
-        },
-        {
-            "x": 1353964085,
-            "y": 0
-        },
-        {
-            "x": 1353964086,
-            "y": 0
-        },
-        {
-            "x": 1353964087,
-            "y": 0
-        },
-        {
-            "x": 1353964088,
-            "y": 0
-        },
-        {
-            "x": 1353964089,
-            "y": 0
-        },
-        {
-            "x": 1353964090,
-            "y": 0
-        },
-        {
-            "x": 1353964091,
-            "y": 0
-        },
-        {
-            "x": 1353964092,
-            "y": 0
-        },
-        {
-            "x": 1353964093,
-            "y": 0
-        },
-        {
-            "x": 1353964094,
-            "y": 0
-        },
-        {
-            "x": 1353964095,
-            "y": 0
-        },
-        {
-            "x": 1353964096,
-            "y": 0
-        },
-        {
-            "x": 1353964097,
-            "y": 0
-        },
-        {
-            "x": 1353964098,
-            "y": 0
-        },
-        {
-            "x": 1353964099,
-            "y": 0
-        },
-        {
-            "x": 1353964100,
-            "y": 0
-        },
-        {
-            "x": 1353964101,
-            "y": 0
-        },
-        {
-            "x": 1353964102,
-            "y": 0
-        },
-        {
-            "x": 1353964103,
-            "y": 0
-        },
-        {
-            "x": 1353964104,
-            "y": 0
-        },
-        {
-            "x": 1353964105,
-            "y": 0
-        },
-        {
-            "x": 1353964106,
-            "y": 0
-        },
-        {
-            "x": 1353964107,
-            "y": 0
-        },
-        {
-            "x": 1353964108,
-            "y": 0
-        },
-        {
-            "x": 1353964109,
-            "y": 0
-        },
-        {
-            "x": 1353964110,
-            "y": 0
-        },
-        {
-            "x": 1353964111,
-            "y": 0
-        },
-        {
-            "x": 1353964112,
-            "y": 0
-        },
-        {
-            "x": 1353964113,
-            "y": 0
-        },
-        {
-            "x": 1353964114,
-            "y": 0
-        },
-        {
-            "x": 1353964115,
-            "y": 0
-        },
-        {
-            "x": 1353964116,
-            "y": 0
-        },
-        {
-            "x": 1353964117,
-            "y": 0
-        },
-        {
-            "x": 1353964118,
-            "y": 0
-        },
-        {
-            "x": 1353964119,
-            "y": 0
-        },
-        {
-            "x": 1353964120,
-            "y": 0
-        },
-        {
-            "x": 1353964121,
-            "y": 0
-        },
-        {
-            "x": 1353964122,
-            "y": 0
-        },
-        {
-            "x": 1353964123,
-            "y": 0
-        },
-        {
-            "x": 1353964124,
-            "y": 0
-        },
-        {
-            "x": 1353964125,
-            "y": 0
-        },
-        {
-            "x": 1353964126,
-            "y": 0
-        },
-        {
-            "x": 1353964127,
-            "y": 0
-        },
-        {
-            "x": 1353964128,
-            "y": 0
-        },
-        {
-            "x": 1353964129,
-            "y": 0
-        },
-        {
-            "x": 1353964130,
-            "y": 0
-        },
-        {
-            "x": 1353964131,
-            "y": 0
-        },
-        {
-            "x": 1353964132,
-            "y": 0
-        },
-        {
-            "x": 1353964133,
-            "y": 0
-        },
-        {
-            "x": 1353964134,
-            "y": 0
-        },
-        {
-            "x": 1353964135,
-            "y": 0
-        },
-        {
-            "x": 1353964136,
-            "y": 0
-        },
-        {
-            "x": 1353964137,
-            "y": 0
-        },
-        {
-            "x": 1353964138,
-            "y": 0
-        },
-        {
-            "x": 1353964139,
-            "y": 0
-        },
-        {
-            "x": 1353964140,
-            "y": 0
-        },
-        {
-            "x": 1353964141,
-            "y": 0
-        },
-        {
-            "x": 1353964142,
-            "y": 0
-        },
-        {
-            "x": 1353964143,
-            "y": 0
-        },
-        {
-            "x": 1353964144,
-            "y": 0
-        },
-        {
-            "x": 1353964145,
-            "y": 0
-        },
-        {
-            "x": 1353964146,
-            "y": 0
-        },
-        {
-            "x": 1353964147,
-            "y": 0
-        },
-        {
-            "x": 1353964148,
-            "y": 0
-        },
-        {
-            "x": 1353964149,
-            "y": 0
-        },
-        {
-            "x": 1353964150,
-            "y": 0
-        },
-        {
-            "x": 1353964151,
-            "y": 0
-        },
-        {
-            "x": 1353964152,
-            "y": 0
-        },
-        {
-            "x": 1353964153,
-            "y": 0
-        },
-        {
-            "x": 1353964154,
-            "y": 0
-        },
-        {
-            "x": 1353964155,
-            "y": 0
-        },
-        {
-            "x": 1353964156,
-            "y": 0
-        },
-        {
-            "x": 1353964157,
-            "y": 0
-        },
-        {
-            "x": 1353964158,
-            "y": 0
-        },
-        {
-            "x": 1353964159,
-            "y": 0
-        },
-        {
-            "x": 1353964160,
-            "y": 0
-        },
-        {
-            "x": 1353964161,
-            "y": 0
-        },
-        {
-            "x": 1353964162,
-            "y": 0
-        },
-        {
-            "x": 1353964163,
-            "y": 0
-        },
-        {
-            "x": 1353964164,
-            "y": 0
-        },
-        {
-            "x": 1353964165,
-            "y": 0
-        },
-        {
-            "x": 1353964166,
-            "y": 0
-        },
-        {
-            "x": 1353964167,
-            "y": 0
-        },
-        {
-            "x": 1353964168,
-            "y": 0
-        },
-        {
-            "x": 1353964169,
-            "y": 0
-        },
-        {
-            "x": 1353964170,
-            "y": 0
-        },
-        {
-            "x": 1353964171,
-            "y": 0
-        },
-        {
-            "x": 1353964172,
-            "y": 0
-        },
-        {
-            "x": 1353964173,
-            "y": 0
-        },
-        {
-            "x": 1353964174,
-            "y": 0
-        },
-        {
-            "x": 1353964175,
-            "y": 0
-        },
-        {
-            "x": 1353964176,
-            "y": 0
-        },
-        {
-            "x": 1353964177,
-            "y": 0
-        },
-        {
-            "x": 1353964178,
-            "y": 0
-        },
-        {
-            "x": 1353964179,
-            "y": 0
-        },
-        {
-            "x": 1353964180,
-            "y": 0
-        },
-        {
-            "x": 1353964181,
-            "y": 0
-        },
-        {
-            "x": 1353964182,
-            "y": 0
-        },
-        {
-            "x": 1353964183,
-            "y": 0
-        },
-        {
-            "x": 1353964184,
-            "y": 0
-        },
-        {
-            "x": 1353964185,
-            "y": 0
-        },
-        {
-            "x": 1353964186,
-            "y": 0
-        },
-        {
-            "x": 1353964187,
-            "y": 0
-        },
-        {
-            "x": 1353964188,
-            "y": 0
-        },
-        {
-            "x": 1353964189,
-            "y": 0
-        },
-        {
-            "x": 1353964190,
-            "y": 0
-        },
-        {
-            "x": 1353964191,
-            "y": 0
-        },
-        {
-            "x": 1353964192,
-            "y": 0
-        },
-        {
-            "x": 1353964193,
-            "y": 0
-        },
-        {
-            "x": 1353964194,
-            "y": 0
-        },
-        {
-            "x": 1353964195,
-            "y": 0
-        },
-        {
-            "x": 1353964196,
-            "y": 0
-        },
-        {
-            "x": 1353964197,
-            "y": 0
-        }
-    ],
-    "shuffle": [
-        {
-            "x": 1353963926,
-            "y": 0
-        },
-        {
-            "x": 1353963927,
-            "y": 0
-        },
-        {
-            "x": 1353963928,
-            "y": 0
-        },
-        {
-            "x": 1353963929,
-            "y": 0
-        },
-        {
-            "x": 1353963930,
-            "y": 0
-        },
-        {
-            "x": 1353963931,
-            "y": 0
-        },
-        {
-            "x": 1353963932,
-            "y": 0
-        },
-        {
-            "x": 1353963933,
-            "y": 0
-        },
-        {
-            "x": 1353963934,
-            "y": 0
-        },
-        {
-            "x": 1353963935,
-            "y": 0
-        },
-        {
-            "x": 1353963936,
-            "y": 0
-        },
-        {
-            "x": 1353963937,
-            "y": 0
-        },
-        {
-            "x": 1353963938,
-            "y": 0
-        },
-        {
-            "x": 1353963939,
-            "y": 0
-        },
-        {
-            "x": 1353963940,
-            "y": 0
-        },
-        {
-            "x": 1353963941,
-            "y": 0
-        },
-        {
-            "x": 1353963942,
-            "y": 0
-        },
-        {
-            "x": 1353963943,
-            "y": 0
-        },
-        {
-            "x": 1353963944,
-            "y": 0
-        },
-        {
-            "x": 1353963945,
-            "y": 0
-        },
-        {
-            "x": 1353963946,
-            "y": 0
-        },
-        {
-            "x": 1353963947,
-            "y": 0
-        },
-        {
-            "x": 1353963948,
-            "y": 0
-        },
-        {
-            "x": 1353963949,
-            "y": 0
-        },
-        {
-            "x": 1353963950,
-            "y": 0
-        },
-        {
-            "x": 1353963951,
-            "y": 0
-        },
-        {
-            "x": 1353963952,
-            "y": 0
-        },
-        {
-            "x": 1353963953,
-            "y": 0
-        },
-        {
-            "x": 1353963954,
-            "y": 0
-        },
-        {
-            "x": 1353963955,
-            "y": 0
-        },
-        {
-            "x": 1353963956,
-            "y": 0
-        },
-        {
-            "x": 1353963957,
-            "y": 0
-        },
-        {
-            "x": 1353963958,
-            "y": 0
-        },
-        {
-            "x": 1353963959,
-            "y": 0
-        },
-        {
-            "x": 1353963960,
-            "y": 0
-        },
-        {
-            "x": 1353963961,
-            "y": 0
-        },
-        {
-            "x": 1353963962,
-            "y": 0
-        },
-        {
-            "x": 1353963963,
-            "y": 0
-        },
-        {
-            "x": 1353963964,
-            "y": 0
-        },
-        {
-            "x": 1353963965,
-            "y": 0
-        },
-        {
-            "x": 1353963966,
-            "y": 0
-        },
-        {
-            "x": 1353963967,
-            "y": 0
-        },
-        {
-            "x": 1353963968,
-            "y": 0
-        },
-        {
-            "x": 1353963969,
-            "y": 0
-        },
-        {
-            "x": 1353963970,
-            "y": 0
-        },
-        {
-            "x": 1353963971,
-            "y": 0
-        },
-        {
-            "x": 1353963972,
-            "y": 0
-        },
-        {
-            "x": 1353963973,
-            "y": 0
-        },
-        {
-            "x": 1353963974,
-            "y": 0
-        },
-        {
-            "x": 1353963975,
-            "y": 0
-        },
-        {
-            "x": 1353963976,
-            "y": 0
-        },
-        {
-            "x": 1353963977,
-            "y": 0
-        },
-        {
-            "x": 1353963978,
-            "y": 0
-        },
-        {
-            "x": 1353963979,
-            "y": 0
-        },
-        {
-            "x": 1353963980,
-            "y": 0
-        },
-        {
-            "x": 1353963981,
-            "y": 0
-        },
-        {
-            "x": 1353963982,
-            "y": 0
-        },
-        {
-            "x": 1353963983,
-            "y": 0
-        },
-        {
-            "x": 1353963984,
-            "y": 0
-        },
-        {
-            "x": 1353963985,
-            "y": 0
-        },
-        {
-            "x": 1353963986,
-            "y": 0
-        },
-        {
-            "x": 1353963987,
-            "y": 0
-        },
-        {
-            "x": 1353963988,
-            "y": 0
-        },
-        {
-            "x": 1353963989,
-            "y": 0
-        },
-        {
-            "x": 1353963990,
-            "y": 0
-        },
-        {
-            "x": 1353963991,
-            "y": 0
-        },
-        {
-            "x": 1353963992,
-            "y": 0
-        },
-        {
-            "x": 1353963993,
-            "y": 0
-        },
-        {
-            "x": 1353963994,
-            "y": 0
-        },
-        {
-            "x": 1353963995,
-            "y": 0
-        },
-        {
-            "x": 1353963996,
-            "y": 0
-        },
-        {
-            "x": 1353963997,
-            "y": 0
-        },
-        {
-            "x": 1353963998,
-            "y": 0
-        },
-        {
-            "x": 1353963999,
-            "y": 0
-        },
-        {
-            "x": 1353964000,
-            "y": 0
-        },
-        {
-            "x": 1353964001,
-            "y": 0
-        },
-        {
-            "x": 1353964002,
-            "y": 0
-        },
-        {
-            "x": 1353964003,
-            "y": 0
-        },
-        {
-            "x": 1353964004,
-            "y": 0
-        },
-        {
-            "x": 1353964005,
-            "y": 0
-        },
-        {
-            "x": 1353964006,
-            "y": 0
-        },
-        {
-            "x": 1353964007,
-            "y": 0
-        },
-        {
-            "x": 1353964008,
-            "y": 0
-        },
-        {
-            "x": 1353964009,
-            "y": 0
-        },
-        {
-            "x": 1353964010,
-            "y": 0
-        },
-        {
-            "x": 1353964011,
-            "y": 0
-        },
-        {
-            "x": 1353964012,
-            "y": 0
-        },
-        {
-            "x": 1353964013,
-            "y": 0
-        },
-        {
-            "x": 1353964014,
-            "y": 0
-        },
-        {
-            "x": 1353964015,
-            "y": 0
-        },
-        {
-            "x": 1353964016,
-            "y": 0
-        },
-        {
-            "x": 1353964017,
-            "y": 0
-        },
-        {
-            "x": 1353964018,
-            "y": 0
-        },
-        {
-            "x": 1353964019,
-            "y": 0
-        },
-        {
-            "x": 1353964020,
-            "y": 0
-        },
-        {
-            "x": 1353964021,
-            "y": 0
-        },
-        {
-            "x": 1353964022,
-            "y": 0
-        },
-        {
-            "x": 1353964023,
-            "y": 0
-        },
-        {
-            "x": 1353964024,
-            "y": 0
-        },
-        {
-            "x": 1353964025,
-            "y": 0
-        },
-        {
-            "x": 1353964026,
-            "y": 0
-        },
-        {
-            "x": 1353964027,
-            "y": 0
-        },
-        {
-            "x": 1353964028,
-            "y": 0
-        },
-        {
-            "x": 1353964029,
-            "y": 0
-        },
-        {
-            "x": 1353964030,
-            "y": 0
-        },
-        {
-            "x": 1353964031,
-            "y": 0
-        },
-        {
-            "x": 1353964032,
-            "y": 0
-        },
-        {
-            "x": 1353964033,
-            "y": 0
-        },
-        {
-            "x": 1353964034,
-            "y": 0
-        },
-        {
-            "x": 1353964035,
-            "y": 0
-        },
-        {
-            "x": 1353964036,
-            "y": 0
-        },
-        {
-            "x": 1353964037,
-            "y": 0
-        },
-        {
-            "x": 1353964038,
-            "y": 0
-        },
-        {
-            "x": 1353964039,
-            "y": 0
-        },
-        {
-            "x": 1353964040,
-            "y": 0
-        },
-        {
-            "x": 1353964041,
-            "y": 0
-        },
-        {
-            "x": 1353964042,
-            "y": 0
-        },
-        {
-            "x": 1353964043,
-            "y": 0
-        },
-        {
-            "x": 1353964044,
-            "y": 0
-        },
-        {
-            "x": 1353964045,
-            "y": 0
-        },
-        {
-            "x": 1353964046,
-            "y": 0
-        },
-        {
-            "x": 1353964047,
-            "y": 0
-        },
-        {
-            "x": 1353964048,
-            "y": 0
-        },
-        {
-            "x": 1353964049,
-            "y": 0
-        },
-        {
-            "x": 1353964050,
-            "y": 0
-        },
-        {
-            "x": 1353964051,
-            "y": 0
-        },
-        {
-            "x": 1353964052,
-            "y": 0
-        },
-        {
-            "x": 1353964053,
-            "y": 0
-        },
-        {
-            "x": 1353964054,
-            "y": 0
-        },
-        {
-            "x": 1353964055,
-            "y": 0
-        },
-        {
-            "x": 1353964056,
-            "y": 0
-        },
-        {
-            "x": 1353964057,
-            "y": 0
-        },
-        {
-            "x": 1353964058,
-            "y": 0
-        },
-        {
-            "x": 1353964059,
-            "y": 0
-        },
-        {
-            "x": 1353964060,
-            "y": 0
-        },
-        {
-            "x": 1353964061,
-            "y": 0
-        },
-        {
-            "x": 1353964062,
-            "y": 0
-        },
-        {
-            "x": 1353964063,
-            "y": 0
-        },
-        {
-            "x": 1353964064,
-            "y": 0
-        },
-        {
-            "x": 1353964065,
-            "y": 0
-        },
-        {
-            "x": 1353964066,
-            "y": 0
-        },
-        {
-            "x": 1353964067,
-            "y": 0
-        },
-        {
-            "x": 1353964068,
-            "y": 0
-        },
-        {
-            "x": 1353964069,
-            "y": 0
-        },
-        {
-            "x": 1353964070,
-            "y": 0
-        },
-        {
-            "x": 1353964071,
-            "y": 1
-        },
-        {
-            "x": 1353964072,
-            "y": 1
-        },
-        {
-            "x": 1353964073,
-            "y": 1
-        },
-        {
-            "x": 1353964074,
-            "y": 2
-        },
-        {
-            "x": 1353964075,
-            "y": 2
-        },
-        {
-            "x": 1353964076,
-            "y": 2
-        },
-        {
-            "x": 1353964077,
-            "y": 2
-        },
-        {
-            "x": 1353964078,
-            "y": 2
-        },
-        {
-            "x": 1353964079,
-            "y": 2
-        },
-        {
-            "x": 1353964080,
-            "y": 2
-        },
-        {
-            "x": 1353964081,
-            "y": 2
-        },
-        {
-            "x": 1353964082,
-            "y": 1
-        },
-        {
-            "x": 1353964083,
-            "y": 2
-        },
-        {
-            "x": 1353964084,
-            "y": 2
-        },
-        {
-            "x": 1353964085,
-            "y": 2
-        },
-        {
-            "x": 1353964086,
-            "y": 2
-        },
-        {
-            "x": 1353964087,
-            "y": 2
-        },
-        {
-            "x": 1353964088,
-            "y": 2
-        },
-        {
-            "x": 1353964089,
-            "y": 2
-        },
-        {
-            "x": 1353964090,
-            "y": 1
-        },
-        {
-            "x": 1353964091,
-            "y": 1
-        },
-        {
-            "x": 1353964092,
-            "y": 2
-        },
-        {
-            "x": 1353964093,
-            "y": 1
-        },
-        {
-            "x": 1353964094,
-            "y": 1
-        },
-        {
-            "x": 1353964095,
-            "y": 2
-        },
-        {
-            "x": 1353964096,
-            "y": 2
-        },
-        {
-            "x": 1353964097,
-            "y": 2
-        },
-        {
-            "x": 1353964098,
-            "y": 2
-        },
-        {
-            "x": 1353964099,
-            "y": 2
-        },
-        {
-            "x": 1353964100,
-            "y": 2
-        },
-        {
-            "x": 1353964101,
-            "y": 2
-        },
-        {
-            "x": 1353964102,
-            "y": 2
-        },
-        {
-            "x": 1353964103,
-            "y": 1
-        },
-        {
-            "x": 1353964104,
-            "y": 2
-        },
-        {
-            "x": 1353964105,
-            "y": 2
-        },
-        {
-            "x": 1353964106,
-            "y": 1
-        },
-        {
-            "x": 1353964107,
-            "y": 2
-        },
-        {
-            "x": 1353964108,
-            "y": 2
-        },
-        {
-            "x": 1353964109,
-            "y": 2
-        },
-        {
-            "x": 1353964110,
-            "y": 2
-        },
-        {
-            "x": 1353964111,
-            "y": 2
-        },
-        {
-            "x": 1353964112,
-            "y": 2
-        },
-        {
-            "x": 1353964113,
-            "y": 2
-        },
-        {
-            "x": 1353964114,
-            "y": 1
-        },
-        {
-            "x": 1353964115,
-            "y": 1
-        },
-        {
-            "x": 1353964116,
-            "y": 2
-        },
-        {
-            "x": 1353964117,
-            "y": 1
-        },
-        {
-            "x": 1353964118,
-            "y": 1
-        },
-        {
-            "x": 1353964119,
-            "y": 2
-        },
-        {
-            "x": 1353964120,
-            "y": 2
-        },
-        {
-            "x": 1353964121,
-            "y": 2
-        },
-        {
-            "x": 1353964122,
-            "y": 2
-        },
-        {
-            "x": 1353964123,
-            "y": 2
-        },
-        {
-            "x": 1353964124,
-            "y": 2
-        },
-        {
-            "x": 1353964125,
-            "y": 2
-        },
-        {
-            "x": 1353964126,
-            "y": 2
-        },
-        {
-            "x": 1353964127,
-            "y": 1
-        },
-        {
-            "x": 1353964128,
-            "y": 2
-        },
-        {
-            "x": 1353964129,
-            "y": 2
-        },
-        {
-            "x": 1353964130,
-            "y": 1
-        },
-        {
-            "x": 1353964131,
-            "y": 2
-        },
-        {
-            "x": 1353964132,
-            "y": 2
-        },
-        {
-            "x": 1353964133,
-            "y": 2
-        },
-        {
-            "x": 1353964134,
-            "y": 2
-        },
-        {
-            "x": 1353964135,
-            "y": 2
-        },
-        {
-            "x": 1353964136,
-            "y": 2
-        },
-        {
-            "x": 1353964137,
-            "y": 2
-        },
-        {
-            "x": 1353964138,
-            "y": 1
-        },
-        {
-            "x": 1353964139,
-            "y": 1
-        },
-        {
-            "x": 1353964140,
-            "y": 2
-        },
-        {
-            "x": 1353964141,
-            "y": 1
-        },
-        {
-            "x": 1353964142,
-            "y": 1
-        },
-        {
-            "x": 1353964143,
-            "y": 2
-        },
-        {
-            "x": 1353964144,
-            "y": 2
-        },
-        {
-            "x": 1353964145,
-            "y": 2
-        },
-        {
-            "x": 1353964146,
-            "y": 2
-        },
-        {
-            "x": 1353964147,
-            "y": 2
-        },
-        {
-            "x": 1353964148,
-            "y": 2
-        },
-        {
-            "x": 1353964149,
-            "y": 2
-        },
-        {
-            "x": 1353964150,
-            "y": 2
-        },
-        {
-            "x": 1353964151,
-            "y": 1
-        },
-        {
-            "x": 1353964152,
-            "y": 1
-        },
-        {
-            "x": 1353964153,
-            "y": 1
-        },
-        {
-            "x": 1353964154,
-            "y": 1
-        },
-        {
-            "x": 1353964155,
-            "y": 2
-        },
-        {
-            "x": 1353964156,
-            "y": 2
-        },
-        {
-            "x": 1353964157,
-            "y": 2
-        },
-        {
-            "x": 1353964158,
-            "y": 2
-        },
-        {
-            "x": 1353964159,
-            "y": 2
-        },
-        {
-            "x": 1353964160,
-            "y": 2
-        },
-        {
-            "x": 1353964161,
-            "y": 2
-        },
-        {
-            "x": 1353964162,
-            "y": 1
-        },
-        {
-            "x": 1353964163,
-            "y": 0
-        },
-        {
-            "x": 1353964164,
-            "y": 1
-        },
-        {
-            "x": 1353964165,
-            "y": 1
-        },
-        {
-            "x": 1353964166,
-            "y": 1
-        },
-        {
-            "x": 1353964167,
-            "y": 2
-        },
-        {
-            "x": 1353964168,
-            "y": 2
-        },
-        {
-            "x": 1353964169,
-            "y": 2
-        },
-        {
-            "x": 1353964170,
-            "y": 2
-        },
-        {
-            "x": 1353964171,
-            "y": 2
-        },
-        {
-            "x": 1353964172,
-            "y": 2
-        },
-        {
-            "x": 1353964173,
-            "y": 2
-        },
-        {
-            "x": 1353964174,
-            "y": 1
-        },
-        {
-            "x": 1353964175,
-            "y": 1
-        },
-        {
-            "x": 1353964176,
-            "y": 1
-        },
-        {
-            "x": 1353964177,
-            "y": 1
-        },
-        {
-            "x": 1353964178,
-            "y": 1
-        },
-        {
-            "x": 1353964179,
-            "y": 2
-        },
-        {
-            "x": 1353964180,
-            "y": 2
-        },
-        {
-            "x": 1353964181,
-            "y": 2
-        },
-        {
-            "x": 1353964182,
-            "y": 2
-        },
-        {
-            "x": 1353964183,
-            "y": 2
-        },
-        {
-            "x": 1353964184,
-            "y": 2
-        },
-        {
-            "x": 1353964185,
-            "y": 2
-        },
-        {
-            "x": 1353964186,
-            "y": 1
-        },
-        {
-            "x": 1353964187,
-            "y": 0
-        },
-        {
-            "x": 1353964188,
-            "y": 0
-        },
-        {
-            "x": 1353964189,
-            "y": 0
-        },
-        {
-            "x": 1353964190,
-            "y": 0
-        },
-        {
-            "x": 1353964191,
-            "y": 0
-        },
-        {
-            "x": 1353964192,
-            "y": 0
-        },
-        {
-            "x": 1353964193,
-            "y": 0
-        },
-        {
-            "x": 1353964194,
-            "y": 0
-        },
-        {
-            "x": 1353964195,
-            "y": 0
-        },
-        {
-            "x": 1353964196,
-            "y": 0
-        },
-        {
-            "x": 1353964197,
-            "y": 0
-        }
-    ],
-    "reduce": [
-        {
-            "x": 1353963926,
-            "y": 0
-        },
-        {
-            "x": 1353963927,
-            "y": 0
-        },
-        {
-            "x": 1353963928,
-            "y": 0
-        },
-        {
-            "x": 1353963929,
-            "y": 0
-        },
-        {
-            "x": 1353963930,
-            "y": 0
-        },
-        {
-            "x": 1353963931,
-            "y": 0
-        },
-        {
-            "x": 1353963932,
-            "y": 0
-        },
-        {
-            "x": 1353963933,
-            "y": 0
-        },
-        {
-            "x": 1353963934,
-            "y": 0
-        },
-        {
-            "x": 1353963935,
-            "y": 0
-        },
-        {
-            "x": 1353963936,
-            "y": 0
-        },
-        {
-            "x": 1353963937,
-            "y": 0
-        },
-        {
-            "x": 1353963938,
-            "y": 0
-        },
-        {
-            "x": 1353963939,
-            "y": 0
-        },
-        {
-            "x": 1353963940,
-            "y": 0
-        },
-        {
-            "x": 1353963941,
-            "y": 0
-        },
-        {
-            "x": 1353963942,
-            "y": 0
-        },
-        {
-            "x": 1353963943,
-            "y": 0
-        },
-        {
-            "x": 1353963944,
-            "y": 0
-        },
-        {
-            "x": 1353963945,
-            "y": 0
-        },
-        {
-            "x": 1353963946,
-            "y": 0
-        },
-        {
-            "x": 1353963947,
-            "y": 0
-        },
-        {
-            "x": 1353963948,
-            "y": 0
-        },
-        {
-            "x": 1353963949,
-            "y": 0
-        },
-        {
-            "x": 1353963950,
-            "y": 0
-        },
-        {
-            "x": 1353963951,
-            "y": 0
-        },
-        {
-            "x": 1353963952,
-            "y": 0
-        },
-        {
-            "x": 1353963953,
-            "y": 0
-        },
-        {
-            "x": 1353963954,
-            "y": 0
-        },
-        {
-            "x": 1353963955,
-            "y": 0
-        },
-        {
-            "x": 1353963956,
-            "y": 0
-        },
-        {
-            "x": 1353963957,
-            "y": 0
-        },
-        {
-            "x": 1353963958,
-            "y": 0
-        },
-        {
-            "x": 1353963959,
-            "y": 0
-        },
-        {
-            "x": 1353963960,
-            "y": 0
-        },
-        {
-            "x": 1353963961,
-            "y": 0
-        },
-        {
-            "x": 1353963962,
-            "y": 0
-        },
-        {
-            "x": 1353963963,
-            "y": 0
-        },
-        {
-            "x": 1353963964,
-            "y": 0
-        },
-        {
-            "x": 1353963965,
-            "y": 0
-        },
-        {
-            "x": 1353963966,
-            "y": 0
-        },
-        {
-            "x": 1353963967,
-            "y": 0
-        },
-        {
-            "x": 1353963968,
-            "y": 0
-        },
-        {
-            "x": 1353963969,
-            "y": 0
-        },
-        {
-            "x": 1353963970,
-            "y": 0
-        },
-        {
-            "x": 1353963971,
-            "y": 0
-        },
-        {
-            "x": 1353963972,
-            "y": 0
-        },
-        {
-            "x": 1353963973,
-            "y": 0
-        },
-        {
-            "x": 1353963974,
-            "y": 0
-        },
-        {
-            "x": 1353963975,
-            "y": 0
-        },
-        {
-            "x": 1353963976,
-            "y": 0
-        },
-        {
-            "x": 1353963977,
-            "y": 0
-        },
-        {
-            "x": 1353963978,
-            "y": 0
-        },
-        {
-            "x": 1353963979,
-            "y": 0
-        },
-        {
-            "x": 1353963980,
-            "y": 0
-        },
-        {
-            "x": 1353963981,
-            "y": 0
-        },
-        {
-            "x": 1353963982,
-            "y": 0
-        },
-        {
-            "x": 1353963983,
-            "y": 0
-        },
-        {
-            "x": 1353963984,
-            "y": 0
-        },
-        {
-            "x": 1353963985,
-            "y": 0
-        },
-        {
-            "x": 1353963986,
-            "y": 0
-        },
-        {
-            "x": 1353963987,
-            "y": 0
-        },
-        {
-            "x": 1353963988,
-            "y": 0
-        },
-        {
-            "x": 1353963989,
-            "y": 0
-        },
-        {
-            "x": 1353963990,
-            "y": 0
-        },
-        {
-            "x": 1353963991,
-            "y": 0
-        },
-        {
-            "x": 1353963992,
-            "y": 0
-        },
-        {
-            "x": 1353963993,
-            "y": 0
-        },
-        {
-            "x": 1353963994,
-            "y": 0
-        },
-        {
-            "x": 1353963995,
-            "y": 0
-        },
-        {
-            "x": 1353963996,
-            "y": 0
-        },
-        {
-            "x": 1353963997,
-            "y": 0
-        },
-        {
-            "x": 1353963998,
-            "y": 0
-        },
-        {
-            "x": 1353963999,
-            "y": 0
-        },
-        {
-            "x": 1353964000,
-            "y": 0
-        },
-        {
-            "x": 1353964001,
-            "y": 0
-        },
-        {
-            "x": 1353964002,
-            "y": 0
-        },
-        {
-            "x": 1353964003,
-            "y": 0
-        },
-        {
-            "x": 1353964004,
-            "y": 0
-        },
-        {
-            "x": 1353964005,
-            "y": 0
-        },
-        {
-            "x": 1353964006,
-            "y": 0
-        },
-        {
-            "x": 1353964007,
-            "y": 0
-        },
-        {
-            "x": 1353964008,
-            "y": 0
-        },
-        {
-            "x": 1353964009,
-            "y": 0
-        },
-        {
-            "x": 1353964010,
-            "y": 0
-        },
-        {
-            "x": 1353964011,
-            "y": 0
-        },
-        {
-            "x": 1353964012,
-            "y": 0
-        },
-        {
-            "x": 1353964013,
-            "y": 0
-        },
-        {
-            "x": 1353964014,
-            "y": 0
-        },
-        {
-            "x": 1353964015,
-            "y": 0
-        },
-        {
-            "x": 1353964016,
-            "y": 0
-        },
-        {
-            "x": 1353964017,
-            "y": 0
-        },
-        {
-            "x": 1353964018,
-            "y": 0
-        },
-        {
-            "x": 1353964019,
-            "y": 0
-        },
-        {
-            "x": 1353964020,
-            "y": 0
-        },
-        {
-            "x": 1353964021,
-            "y": 0
-        },
-        {
-            "x": 1353964022,
-            "y": 0
-        },
-        {
-            "x": 1353964023,
-            "y": 0
-        },
-        {
-            "x": 1353964024,
-            "y": 0
-        },
-        {
-            "x": 1353964025,
-            "y": 0
-        },
-        {
-            "x": 1353964026,
-            "y": 0
-        },
-        {
-            "x": 1353964027,
-            "y": 0
-        },
-        {
-            "x": 1353964028,
-            "y": 0
-        },
-        {
-            "x": 1353964029,
-            "y": 0
-        },
-        {
-            "x": 1353964030,
-            "y": 0
-        },
-        {
-            "x": 1353964031,
-            "y": 0
-        },
-        {
-            "x": 1353964032,
-            "y": 0
-        },
-        {
-            "x": 1353964033,
-            "y": 0
-        },
-        {
-            "x": 1353964034,
-            "y": 0
-        },
-        {
-            "x": 1353964035,
-            "y": 0
-        },
-        {
-            "x": 1353964036,
-            "y": 0
-        },
-        {
-            "x": 1353964037,
-            "y": 0
-        },
-        {
-            "x": 1353964038,
-            "y": 0
-        },
-        {
-            "x": 1353964039,
-            "y": 0
-        },
-        {
-            "x": 1353964040,
-            "y": 0
-        },
-        {
-            "x": 1353964041,
-            "y": 0
-        },
-        {
-            "x": 1353964042,
-            "y": 0
-        },
-        {
-            "x": 1353964043,
-            "y": 0
-        },
-        {
-            "x": 1353964044,
-            "y": 0
-        },
-        {
-            "x": 1353964045,
-            "y": 0
-        },
-        {
-            "x": 1353964046,
-            "y": 0
-        },
-        {
-            "x": 1353964047,
-            "y": 0
-        },
-        {
-            "x": 1353964048,
-            "y": 0
-        },
-        {
-            "x": 1353964049,
-            "y": 0
-        },
-        {
-            "x": 1353964050,
-            "y": 0
-        },
-        {
-            "x": 1353964051,
-            "y": 0
-        },
-        {
-            "x": 1353964052,
-            "y": 0
-        },
-        {
-            "x": 1353964053,
-            "y": 0
-        },
-        {
-            "x": 1353964054,
-            "y": 0
-        },
-        {
-            "x": 1353964055,
-            "y": 0
-        },
-        {
-            "x": 1353964056,
-            "y": 0
-        },
-        {
-            "x": 1353964057,
-            "y": 0
-        },
-        {
-            "x": 1353964058,
-            "y": 0
-        },
-        {
-            "x": 1353964059,
-            "y": 0
-        },
-        {
-            "x": 1353964060,
-            "y": 0
-        },
-        {
-            "x": 1353964061,
-            "y": 0
-        },
-        {
-            "x": 1353964062,
-            "y": 0
-        },
-        {
-            "x": 1353964063,
-            "y": 0
-        },
-        {
-            "x": 1353964064,
-            "y": 0
-        },
-        {
-            "x": 1353964065,
-            "y": 0
-        },
-        {
-            "x": 1353964066,
-            "y": 0
-        },
-        {
-            "x": 1353964067,
-            "y": 0
-        },
-        {
-            "x": 1353964068,
-            "y": 0
-        },
-        {
-            "x": 1353964069,
-            "y": 0
-        },
-        {
-            "x": 1353964070,
-            "y": 0
-        },
-        {
-            "x": 1353964071,
-            "y": 0
-        },
-        {
-            "x": 1353964072,
-            "y": 0
-        },
-        {
-            "x": 1353964073,
-            "y": 0
-        },
-        {
-            "x": 1353964074,
-            "y": 0
-        },
-        {
-            "x": 1353964075,
-            "y": 0
-        },
-        {
-            "x": 1353964076,
-            "y": 0
-        },
-        {
-            "x": 1353964077,
-            "y": 0
-        },
-        {
-            "x": 1353964078,
-            "y": 0
-        },
-        {
-            "x": 1353964079,
-            "y": 0
-        },
-        {
-            "x": 1353964080,
-            "y": 1
-        },
-        {
-            "x": 1353964081,
-            "y": 1
-        },
-        {
-            "x": 1353964082,
-            "y": 1
-        },
-        {
-            "x": 1353964083,
-            "y": 1
-        },
-        {
-            "x": 1353964084,
-            "y": 1
-        },
-        {
-            "x": 1353964085,
-            "y": 0
-        },
-        {
-            "x": 1353964086,
-            "y": 0
-        },
-        {
-            "x": 1353964087,
-            "y": 0
-        },
-        {
-            "x": 1353964088,
-            "y": 0
-        },
-        {
-            "x": 1353964089,
-            "y": 0
-        },
-        {
-            "x": 1353964090,
-            "y": 1
-        },
-        {
-            "x": 1353964091,
-            "y": 1
-        },
-        {
-            "x": 1353964092,
-            "y": 1
-        },
-        {
-            "x": 1353964093,
-            "y": 2
-        },
-        {
-            "x": 1353964094,
-            "y": 2
-        },
-        {
-            "x": 1353964095,
-            "y": 2
-        },
-        {
-            "x": 1353964096,
-            "y": 1
-        },
-        {
-            "x": 1353964097,
-            "y": 1
-        },
-        {
-            "x": 1353964098,
-            "y": 1
-        },
-        {
-            "x": 1353964099,
-            "y": 0
-        },
-        {
-            "x": 1353964100,
-            "y": 0
-        },
-        {
-            "x": 1353964101,
-            "y": 0
-        },
-        {
-            "x": 1353964102,
-            "y": 0
-        },
-        {
-            "x": 1353964103,
-            "y": 1
-        },
-        {
-            "x": 1353964104,
-            "y": 1
-        },
-        {
-            "x": 1353964105,
-            "y": 1
-        },
-        {
-            "x": 1353964106,
-            "y": 1
-        },
-        {
-            "x": 1353964107,
-            "y": 1
-        },
-        {
-            "x": 1353964108,
-            "y": 1
-        },
-        {
-            "x": 1353964109,
-            "y": 0
-        },
-        {
-            "x": 1353964110,
-            "y": 0
-        },
-        {
-            "x": 1353964111,
-            "y": 0
-        },
-        {
-            "x": 1353964112,
-            "y": 0
-        },
-        {
-            "x": 1353964113,
-            "y": 0
-        },
-        {
-            "x": 1353964114,
-            "y": 1
-        },
-        {
-            "x": 1353964115,
-            "y": 1
-        },
-        {
-            "x": 1353964116,
-            "y": 1
-        },
-        {
-            "x": 1353964117,
-            "y": 2
-        },
-        {
-            "x": 1353964118,
-            "y": 2
-        },
-        {
-            "x": 1353964119,
-            "y": 2
-        },
-        {
-            "x": 1353964120,
-            "y": 1
-        },
-        {
-            "x": 1353964121,
-            "y": 1
-        },
-        {
-            "x": 1353964122,
-            "y": 1
-        },
-        {
-            "x": 1353964123,
-            "y": 0
-        },
-        {
-            "x": 1353964124,
-            "y": 0
-        },
-        {
-            "x": 1353964125,
-            "y": 0
-        },
-        {
-            "x": 1353964126,
-            "y": 0
-        },
-        {
-            "x": 1353964127,
-            "y": 1
-        },
-        {
-            "x": 1353964128,
-            "y": 1
-        },
-        {
-            "x": 1353964129,
-            "y": 1
-        },
-        {
-            "x": 1353964130,
-            "y": 1
-        },
-        {
-            "x": 1353964131,
-            "y": 1
-        },
-        {
-            "x": 1353964132,
-            "y": 1
-        },
-        {
-            "x": 1353964133,
-            "y": 0
-        },
-        {
-            "x": 1353964134,
-            "y": 0
-        },
-        {
-            "x": 1353964135,
-            "y": 0
-        },
-        {
-            "x": 1353964136,
-            "y": 0
-        },
-        {
-            "x": 1353964137,
-            "y": 0
-        },
-        {
-            "x": 1353964138,
-            "y": 1
-        },
-        {
-            "x": 1353964139,
-            "y": 1
-        },
-        {
-            "x": 1353964140,
-            "y": 1
-        },
-        {
-            "x": 1353964141,
-            "y": 2
-        },
-        {
-            "x": 1353964142,
-            "y": 2
-        },
-        {
-            "x": 1353964143,
-            "y": 1
-        },
-        {
-            "x": 1353964144,
-            "y": 0
-        },
-        {
-            "x": 1353964145,
-            "y": 0
-        },
-        {
-            "x": 1353964146,
-            "y": 0
-        },
-        {
-            "x": 1353964147,
-            "y": 0
-        },
-        {
-            "x": 1353964148,
-            "y": 0
-        },
-        {
-            "x": 1353964149,
-            "y": 0
-        },
-        {
-            "x": 1353964150,
-            "y": 0
-        },
-        {
-            "x": 1353964151,
-            "y": 1
-        },
-        {
-            "x": 1353964152,
-            "y": 2
-        },
-        {
-            "x": 1353964153,
-            "y": 2
-        },
-        {
-            "x": 1353964154,
-            "y": 1
-        },
-        {
-            "x": 1353964155,
-            "y": 0
-        },
-        {
-            "x": 1353964156,
-            "y": 0
-        },
-        {
-            "x": 1353964157,
-            "y": 0
-        },
-        {
-            "x": 1353964158,
-            "y": 0
-        },
-        {
-            "x": 1353964159,
-            "y": 0
-        },
-        {
-            "x": 1353964160,
-            "y": 0
-        },
-        {
-            "x": 1353964161,
-            "y": 0
-        },
-        {
-            "x": 1353964162,
-            "y": 1
-        },
-        {
-            "x": 1353964163,
-            "y": 2
-        },
-        {
-            "x": 1353964164,
-            "y": 2
-        },
-        {
-            "x": 1353964165,
-            "y": 2
-        },
-        {
-            "x": 1353964166,
-            "y": 1
-        },
-        {
-            "x": 1353964167,
-            "y": 1
-        },
-        {
-            "x": 1353964168,
-            "y": 0
-        },
-        {
-            "x": 1353964169,
-            "y": 0
-        },
-        {
-            "x": 1353964170,
-            "y": 0
-        },
-        {
-            "x": 1353964171,
-            "y": 0
-        },
-        {
-            "x": 1353964172,
-            "y": 0
-        },
-        {
-            "x": 1353964173,
-            "y": 0
-        },
-        {
-            "x": 1353964174,
-            "y": 1
-        },
-        {
-            "x": 1353964175,
-            "y": 1
-        },
-        {
-            "x": 1353964176,
-            "y": 2
-        },
-        {
-            "x": 1353964177,
-            "y": 2
-        },
-        {
-            "x": 1353964178,
-            "y": 2
-        },
-        {
-            "x": 1353964179,
-            "y": 1
-        },
-        {
-            "x": 1353964180,
-            "y": 0
-        },
-        {
-            "x": 1353964181,
-            "y": 0
-        },
-        {
-            "x": 1353964182,
-            "y": 0
-        },
-        {
-            "x": 1353964183,
-            "y": 0
-        },
-        {
-            "x": 1353964184,
-            "y": 0
-        },
-        {
-            "x": 1353964185,
-            "y": 0
-        },
-        {
-            "x": 1353964186,
-            "y": 1
-        },
-        {
-            "x": 1353964187,
-            "y": 2
-        },
-        {
-            "x": 1353964188,
-            "y": 2
-        },
-        {
-            "x": 1353964189,
-            "y": 2
-        },
-        {
-            "x": 1353964190,
-            "y": 1
-        },
-        {
-            "x": 1353964191,
-            "y": 1
-        },
-        {
-            "x": 1353964192,
-            "y": 0
-        },
-        {
-            "x": 1353964193,
-            "y": 0
-        },
-        {
-            "x": 1353964194,
-            "y": 0
-        },
-        {
-            "x": 1353964195,
-            "y": 0
-        },
-        {
-            "x": 1353964196,
-            "y": 0
-        },
-        {
-            "x": 1353964197,
-            "y": 0
-        }
-    ]
-}
diff --git a/branch-1.2/ambari-web/app/assets/data/apps/runs.json b/branch-1.2/ambari-web/app/assets/data/apps/runs.json
deleted file mode 100644
index a3694dd..0000000
--- a/branch-1.2/ambari-web/app/assets/data/apps/runs.json
+++ /dev/null
@@ -1,128 +0,0 @@
-{
-  "sEcho": 0,
-  "iTotalRecords": 4,
-  "iTotalDisplayRecords": 4,
-  "startIndex": 0,
-  "endIndex": 3,
-  "aaData": [
-    {
-      "workflowId": "mr_201301280808_0001",
-      "workflowName": "word count",
-      "userName": "ambari_qa",
-      "startTime": 1359378637135,
-      "elapsedTime": 30215,
-      "inputBytes": 1942,
-      "outputBytes": 1908,
-      "numJobsTotal": 1,
-      "numJobsCompleted": 1,
-      "workflowContext": {
-        "workflowDag": {
-          "entries": [
-            {
-              "source": "X",
-              "targets": [
-
-              ]
-            }
-          ]
-        }
-      }
-    },
-    {
-      "workflowId": "mr_201301280808_0003",
-      "workflowName": "oozie:launcher:T\\=map-reduce:W\\=map-reduce-wf:A\\=mr-node:ID\\=0000000-130128081151371-oozie-oozi-W",
-      "userName": "ambari_qa",
-      "startTime": 1359378907927,
-      "elapsedTime": 19186,
-      "inputBytes": 37485,
-      "outputBytes": 37458,
-      "numJobsTotal": 1,
-      "numJobsCompleted": 1,
-      "workflowContext": {
-        "workflowDag": {
-          "entries": [
-            {
-              "source": "X",
-              "targets": [
-
-              ]
-            }
-          ]
-        }
-      }
-    },
-    {
-      "workflowId": "mr_201301280808_0004",
-      "workflowName": "oozie:action:T\\=map-reduce:W\\=map-reduce-wf:A\\=mr-node:ID\\=0000000-130128081151371-oozie-oozi-W",
-      "userName": "ambari_qa",
-      "startTime": 1359378922503,
-      "elapsedTime": 27080,
-      "inputBytes": 1550,
-      "outputBytes": 1547,
-      "numJobsTotal": 3,
-      "numJobsCompleted": 1,
-      "workflowContext": {
-        "workflowDag": {
-          "entries": [
-            {
-              "source": "X",
-              "targets": [
-
-              ]
-            }
-          ]
-        }
-      }
-    },
-    {
-      "workflowId": "pig_f9957a11-a902-4f01-ac53-9679ce3a4b13",
-      "workflowName": "\/tmp\/pigSmoke.sh",
-      "userName": "ambari_qa",
-      "startTime": 1359378741973,
-      "elapsedTime": 18125,
-      "inputBytes": 2186,
-      "outputBytes": 253,
-      "numJobsTotal": 1,
-      "numJobsCompleted": 1,
-      "workflowContext": {
-        "workflowDag": {
-          "entries": [
-            {
-              "source": "scope-5",
-              "targets": [
-
-              ]
-            }
-          ]
-        }
-      }
-    }
-  ],
-  "summary": {
-    "numRows": 4,
-    "jobs": {
-      "avg": 1,
-      "min": 1,
-      "max": 1
-    },
-    "input": {
-      "avg": 10790.75,
-      "min": 1550,
-      "max": 37485
-    },
-    "output": {
-      "avg": 10291.5,
-      "min": 253,
-      "max": 37458
-    },
-    "duration": {
-      "avg": 23651.5,
-      "min": 18125,
-      "max": 30215
-    },
-    "times": {
-      "oldest": 1359378922503,
-      "youngest": 1359378637135
-    }
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/background_operations/list_on_start.json b/branch-1.2/ambari-web/app/assets/data/background_operations/list_on_start.json
deleted file mode 100644
index 3e1d6e5..0000000
--- a/branch-1.2/ambari-web/app/assets/data/background_operations/list_on_start.json
+++ /dev/null
@@ -1,338 +0,0 @@
-{
-  "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/?fields=tasks/*",
-  "items" : [
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/3",
-      "Requests" : {
-        "id" : 3,
-        "cluster_name" : "mycluster"
-      },
-      "tasks" : [
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/3/tasks/16",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "QUEUED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 16,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 1,
-            "request_id" : 3,
-            "command" : "STOP",
-            "role" : "NAMENODE",
-            "start_time" : 1352125378300,
-            "stage_id" : 1
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/3/tasks/15",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "COMPLETED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 15,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 1,
-            "request_id" : 3,
-            "command" : "STOP",
-            "role" : "DATANODE",
-            "start_time" : 1352125378280,
-            "stage_id" : 1
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/3/tasks/17",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "COMPLETED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 17,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 1,
-            "request_id" : 3,
-            "command" : "STOP",
-            "role" : "SECONDARY_NAMENODE",
-            "start_time" : 1352125378315,
-            "stage_id" : 1
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/2",
-      "Requests" : {
-        "id" : 2,
-        "cluster_name" : "mycluster"
-      },
-      "tasks" : [
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/2/tasks/11",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "COMPLETED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 11,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 1,
-            "request_id" : 2,
-            "command" : "START",
-            "role" : "JOBTRACKER",
-            "start_time" : 1352119106491,
-            "stage_id" : 2
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/2/tasks/14",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "COMPLETED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 14,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 1,
-            "request_id" : 2,
-            "command" : "EXECUTE",
-            "role" : "MAPREDUCE_SERVICE_CHECK",
-            "start_time" : 1352119157294,
-            "stage_id" : 3
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/2/tasks/13",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "COMPLETED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 13,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 1,
-            "request_id" : 2,
-            "command" : "START",
-            "role" : "TASKTRACKER",
-            "start_time" : 1352119106518,
-            "stage_id" : 2
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/2/tasks/12",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "COMPLETED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 12,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 1,
-            "request_id" : 2,
-            "command" : "START",
-            "role" : "SECONDARY_NAMENODE",
-            "start_time" : 1352119106506,
-            "stage_id" : 2
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/2/tasks/9",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "COMPLETED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 9,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 1,
-            "request_id" : 2,
-            "command" : "START",
-            "role" : "NAMENODE",
-            "start_time" : 1352119024782,
-            "stage_id" : 1
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/2/tasks/8",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "COMPLETED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 8,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 1,
-            "request_id" : 2,
-            "command" : "START",
-            "role" : "DATANODE",
-            "start_time" : 1352119024765,
-            "stage_id" : 1
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/2/tasks/10",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "COMPLETED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 10,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 1,
-            "request_id" : 2,
-            "command" : "EXECUTE",
-            "role" : "HDFS_SERVICE_CHECK",
-            "start_time" : 1352119106480,
-            "stage_id" : 2
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/1",
-      "Requests" : {
-        "id" : 1,
-        "cluster_name" : "mycluster"
-      },
-      "tasks" : [
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/1/tasks/1",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "COMPLETED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 1,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 2,
-            "request_id" : 1,
-            "command" : "INSTALL",
-            "role" : "DATANODE",
-            "start_time" : 1352118607290,
-            "stage_id" : 1
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/1/tasks/4",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "COMPLETED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 4,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 2,
-            "request_id" : 1,
-            "command" : "INSTALL",
-            "role" : "MAPREDUCE_CLIENT",
-            "start_time" : 1352118607672,
-            "stage_id" : 1
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/1/tasks/5",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "COMPLETED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 5,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 2,
-            "request_id" : 1,
-            "command" : "INSTALL",
-            "role" : "NAMENODE",
-            "start_time" : 1352118607808,
-            "stage_id" : 1
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/1/tasks/3",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "COMPLETED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 3,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 2,
-            "request_id" : 1,
-            "command" : "INSTALL",
-            "role" : "JOBTRACKER",
-            "start_time" : 1352118607566,
-            "stage_id" : 1
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/1/tasks/7",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "COMPLETED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 7,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 2,
-            "request_id" : 1,
-            "command" : "INSTALL",
-            "role" : "TASKTRACKER",
-            "start_time" : 1352118608124,
-            "stage_id" : 1
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/1/tasks/2",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "COMPLETED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 2,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 2,
-            "request_id" : 1,
-            "command" : "INSTALL",
-            "role" : "HDFS_CLIENT",
-            "start_time" : 1352118607469,
-            "stage_id" : 1
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/requests/1/tasks/6",
-          "Tasks" : {
-            "exit_code" : 0,
-            "stdout" : "Output",
-            "status" : "COMPLETED",
-            "stderr" : "none",
-            "host_name" : "dev.hortonworks.com",
-            "id" : 6,
-            "cluster_name" : "mycluster",
-            "attempt_cnt" : 2,
-            "request_id" : 1,
-            "command" : "INSTALL",
-            "role" : "SECONDARY_NAMENODE",
-            "start_time" : 1352118607958,
-            "stage_id" : 1
-          }
-        }
-      ]
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/cpu_1hr.json b/branch-1.2/ambari-web/app/assets/data/cluster_metrics/cpu_1hr.json
deleted file mode 100644
index 89b0dfa..0000000
--- a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/cpu_1hr.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "href" : "http://ambari/api/clusters/vmc?fields=metrics/cpu[1352702257,1352705857,15]",
-  "metrics" : {
-    "cpu" : {
-      "User" : "[[15.5,1352706465],[15.966666667,1352706480],[16.433333333,1352706495],[15.5,1352706510],[15.5,1352706525],[15.5,1352706540],[15.5,1352706555],[15.5,1352706570],[15.82,1352706585],[16.7,1352706600],[16.166666667,1352706615],[14.7,1352706630],[14.7,1352706645],[14.966666667,1352706660],[15.64,1352706675],[15.48,1352706690],[15.7,1352706705],[15.7,1352706720],[15.7,1352706735],[15.7,1352706750],[15.7,1352706765],[15.62,1352706780],[15.24,1352706795],[14.78,1352706810],[14.7,1352706825],[29.933333333,1352706840],[51.386666667,1352706855],[29.8,1352706870],[36.2,1352706885],[41.586666667,1352706900],[48.16,1352706915],[15.2,1352706930],[14.98,1352706945],[14.1,1352706960],[14.233333333,1352706975],[15.1,1352706990],[15.1,1352707005],[15.1,1352707020],[15.1,1352707035],[15.38,1352707050],[17.013333333,1352707065],[15.8,1352707080],[15.8,1352707095],[15.8,1352707110],[15.8,1352707125],[15.8,1352707140],[15.8,1352707155],[15.92,1352707170],[17.746666667,1352707185],[26.5,1352707200],[33.38,1352707215],[62.1,1352707230],[66.9,1352707245],[57.88,1352707260],[20.42,1352707275],[15.22,1352707290],[16.5,1352707305],[16.04,1352707320],[14.2,1352707335],[14.2,1352707350],[14.2,1352707365],[14.2,1352707380],[14.2,1352707395],[14.4,1352707410],[15.2,1352707425],[15.34,1352707440],[15.9,1352707455],[15.9,1352707470],[15.9,1352707485],[15.833333333,1352707500],[15.1,1352707515],[14.94,1352707530],[16.5,1352707545],[15.933333333,1352707560],[14.8,1352707575],[15.133333333,1352707590],[15.8,1352707605],[15.9,1352707620],[16.1,1352707635],[16.1,1352707650],[16.1,1352707665],[15.766666667,1352707680],[15.1,1352707695],[15.1,1352707710],[15.1,1352707725],[15.966666667,1352707740],[31.94,1352707755],[56.406666667,1352707770],[16.0,1352707785],[22.04,1352707800],[45.68,1352707815],[39.773333333,1352707830],[14.9,1352707845],[14.9,1352707860],[14.9,1352707875],[14.9,1352707890],[14.9,1352707905],[14.96,1352707920],[15.0,1352707935],[14.2,1352707950],[14.2,1352707965],[14.3,1352707980],[14.82,1352707995],[15.8,1352708010],[16.96,1352708025],[15.0,1352708040],[15.0,1352708055],[15.0,1352708070],[15.0,1352708085],[15.0,1352708100],[15.426666667,1352708115],[15.426666667,1352708130],[15.7,1352708145],[15.2,1352708160],[15.2,1352708175],[16.2,1352708190],[16.2,1352708205],[16.2,1352708220],[16.2,1352708235],[16.2,1352708250],[16.6,1352708265],[14.9,1352708280],[14.9,1352708295],[14.9,1352708310],[14.9,1352708325],[14.9,1352708340],[14.9,1352708355],[15.273333333,1352708370],[15.3,1352708385],[15.113333333,1352708400],[16.226666667,1352708415],[16.4,1352708430],[15.1,1352708445],[14.9,1352708460],[14.9,1352708475],[38.42,1352708490],[20.22,1352708505],[14.2,1352708520],[14.2,1352708535],[14.2,1352708550],[16.033333333,1352708565],[15.16,1352708580],[14.6,1352708595],[14.6,1352708610],[14.6,1352708625],[14.6,1352708640],[14.0,1352708655],[13.6,1352708670],[14.44,1352708685],[17.58,1352708700],[16.0,1352708715],[13.8,1352708730],[13.8,1352708745],[13.8,1352708760],[14.866666667,1352708775],[15.8,1352708790],[15.533333333,1352708805],[15.3,1352708820],[14.98,1352708835],[14.7,1352708850],[15.213333333,1352708865],[15.8,1352708880],[15.8,1352708895],[15.8,1352708910],[15.8,1352708925],[15.8,1352708940],[15.853333333,1352708955],[15.9,1352708970],[15.9,1352708985],[15.9,1352709000],[15.74,1352709015],[16.206666667,1352709030],[16.9,1352709045],[16.9,1352709060],[15.92,1352709075],[15.36,1352709090],[16.2,1352709105],[16.2,1352709120],[16.2,1352709135],[16.2,1352709150],[16.2,1352709165],[16.386666667,1352709180],[16.013333333,1352709195],[15.5,1352709210],[15.5,1352709225],[14.86,1352709240],[14.3,1352709255],[14.3,1352709270],[14.833333333,1352709285],[15.3,1352709300],[15.3,1352709315],[15.3,1352709330],[14.713333333,1352709345],[13.88,1352709360],[13.84,1352709375],[14.0,1352709390],[14.0,1352709405],[14.8,1352709420],[14.1,1352709435],[13.7,1352709450],[14.833333333,1352709465],[14.666666667,1352709480],[14.666666667,1352709495],[14.8,1352709510],[14.8,1352709525],[16.32,1352709540],[15.66,1352709555],[15.4,1352709570],[15.4,1352709585],[15.4,1352709600],[15.4,1352709615],[15.4,1352709630],[16.146666667,1352709645],[16.2,1352709660],[16.2,1352709675],[14.986666667,1352709690],[15.84,1352709705],[55.953333333,1352709720],[62.1,1352709735],[31.593333333,1352709750],[18.34,1352709765],[15.16,1352709780],[60.42,1352709795],[71.8,1352709810],[32.64,1352709825],[16.64,1352709840],[14.973333333,1352709855],[14.6,1352709870],[14.6,1352709885],[36.38,1352709900],[23.473333333,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050],[0.0,1352710065]]",
-      "Idle" : "[[80.3,1352706465],[79.433333333,1352706480],[78.833333333,1352706495],[80.3,1352706510],[80.3,1352706525],[80.3,1352706540],[80.3,1352706555],[80.3,1352706570],[79.766666667,1352706585],[78.3,1352706600],[78.433333333,1352706615],[78.8,1352706630],[78.8,1352706645],[78.56,1352706660],[78.28,1352706675],[79.56,1352706690],[78.9,1352706705],[78.9,1352706720],[78.9,1352706735],[78.9,1352706750],[78.9,1352706765],[79.246666667,1352706780],[79.88,1352706795],[79.24,1352706810],[80.2,1352706825],[58.266666667,1352706840],[27.44,1352706855],[56.266666667,1352706870],[42.2,1352706885],[36.68,1352706900],[33.02,1352706915],[79.1,1352706930],[79.42,1352706945],[80.7,1352706960],[80.646666667,1352706975],[80.3,1352706990],[80.3,1352707005],[80.3,1352707020],[80.3,1352707035],[80.033333333,1352707050],[78.473333333,1352707065],[79.6,1352707080],[79.6,1352707095],[79.6,1352707110],[79.6,1352707125],[79.6,1352707140],[79.6,1352707155],[79.36,1352707170],[76.386666667,1352707185],[63.3,1352707200],[53.46,1352707215],[12.94,1352707230],[8.3,1352707245],[20.74,1352707260],[71.9,1352707275],[77.74,1352707290],[78.7,1352707305],[79.32,1352707320],[81.8,1352707335],[81.8,1352707350],[81.8,1352707365],[81.8,1352707380],[81.8,1352707395],[81.48,1352707410],[80.2,1352707425],[80.12,1352707440],[79.8,1352707455],[79.8,1352707470],[79.8,1352707485],[79.2,1352707500],[78.566666667,1352707515],[78.82,1352707530],[77.5,1352707545],[78.3,1352707560],[79.9,1352707575],[79.733333333,1352707590],[79.4,1352707605],[78.9,1352707620],[77.9,1352707635],[77.9,1352707650],[77.9,1352707665],[78.833333333,1352707680],[80.7,1352707695],[80.7,1352707710],[80.7,1352707725],[79.366666667,1352707740],[56.54,1352707755],[21.953333333,1352707770],[79.3,1352707785],[70.68,1352707800],[36.58,1352707815],[43.673333333,1352707830],[79.9,1352707845],[79.9,1352707860],[79.9,1352707875],[79.9,1352707890],[79.9,1352707905],[80.04,1352707920],[80.74,1352707935],[81.3,1352707950],[81.3,1352707965],[80.88,1352707980],[79.226666667,1352707995],[79.2,1352708010],[78.066666667,1352708025],[80.4,1352708040],[80.4,1352708055],[80.4,1352708070],[80.4,1352708085],[80.4,1352708100],[79.173333333,1352708115],[79.173333333,1352708130],[78.9,1352708145],[76.3,1352708160],[76.3,1352708175],[78.7,1352708190],[78.7,1352708205],[78.7,1352708220],[78.7,1352708235],[78.7,1352708250],[78.4,1352708265],[80.8,1352708280],[80.8,1352708295],[80.8,1352708310],[80.8,1352708325],[80.8,1352708340],[80.8,1352708355],[79.4,1352708370],[79.3,1352708385],[79.766666667,1352708400],[77.806666667,1352708415],[77.5,1352708430],[78.54,1352708445],[78.7,1352708460],[78.7,1352708475],[47.18,1352708490],[72.9,1352708505],[81.3,1352708520],[81.3,1352708535],[81.3,1352708550],[79.173333333,1352708565],[80.233333333,1352708580],[80.9,1352708595],[80.9,1352708610],[80.9,1352708625],[80.9,1352708640],[79.04,1352708655],[77.8,1352708670],[79.3,1352708685],[71.24,1352708700],[73.96,1352708715],[79.8,1352708730],[79.8,1352708745],[79.8,1352708760],[79.693333333,1352708775],[79.6,1352708790],[79.013333333,1352708805],[78.5,1352708820],[79.726666667,1352708835],[80.8,1352708850],[80.1,1352708865],[79.3,1352708880],[79.3,1352708895],[79.3,1352708910],[79.3,1352708925],[79.3,1352708940],[79.46,1352708955],[79.6,1352708970],[79.6,1352708985],[79.6,1352709000],[78.213333333,1352709015],[77.513333333,1352709030],[78.1,1352709045],[78.1,1352709060],[79.22,1352709075],[79.98,1352709090],[79.2,1352709105],[79.2,1352709120],[79.2,1352709135],[79.2,1352709150],[79.2,1352709165],[79.34,1352709180],[79.66,1352709195],[79.8,1352709210],[79.8,1352709225],[80.493333333,1352709240],[81.1,1352709255],[81.1,1352709270],[79.66,1352709285],[78.4,1352709300],[78.4,1352709315],[78.4,1352709330],[79.466666667,1352709345],[79.866666667,1352709360],[80.78,1352709375],[81.7,1352709390],[81.7,1352709405],[80.9,1352709420],[81.6,1352709435],[82.0,1352709450],[80.6,1352709465],[81.1,1352709480],[80.16,1352709495],[79.6,1352709510],[79.6,1352709525],[79.04,1352709540],[79.86,1352709555],[80.1,1352709570],[80.1,1352709585],[80.1,1352709600],[80.1,1352709615],[80.1,1352709630],[77.673333333,1352709645],[77.5,1352709660],[77.5,1352709675],[79.926666667,1352709690],[78.913333333,1352709705],[22.54,1352709720],[13.9,1352709735],[57.146666667,1352709750],[75.88,1352709765],[80.18,1352709780],[16.1,1352709795],[0.0,1352709810],[55.22,1352709825],[78.306666667,1352709840],[80.28,1352709855],[80.6,1352709870],[80.6,1352709885],[50.386666667,1352709900],[68.366666667,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050],[0.0,1352710065]]",
-      "Wait" : "[[1.0,1352706465],[1.1666666667,1352706480],[1.2333333333,1352706495],[0.7,1352706510],[0.7,1352706525],[0.7,1352706540],[0.7,1352706555],[0.7,1352706570],[0.80666666667,1352706585],[1.1,1352706600],[1.2866666667,1352706615],[1.8,1352706630],[1.8,1352706645],[1.9066666667,1352706660],[1.88,1352706675],[0.94666666667,1352706690],[1.9,1352706705],[1.9,1352706720],[1.9,1352706735],[1.9,1352706750],[1.9,1352706765],[1.6333333333,1352706780],[1.3266666667,1352706795],[2.26,1352706810],[1.3,1352706825],[1.7,1352706840],[2.0733333333,1352706855],[3.6333333333,1352706870],[9.1,1352706885],[6.6733333333,1352706900],[0.32,1352706915],[1.6,1352706930],[1.58,1352706945],[1.5,1352706960],[1.3933333333,1352706975],[0.7,1352706990],[0.7,1352707005],[0.7,1352707020],[0.7,1352707035],[0.72666666667,1352707050],[0.92666666667,1352707065],[1.1,1352707080],[1.1,1352707095],[1.1,1352707110],[1.1,1352707125],[1.1,1352707140],[1.1,1352707155],[1.16,1352707170],[1.3866666667,1352707185],[1.3,1352707200],[1.16,1352707215],[0.5,1352707230],[0.1,1352707245],[0.24,1352707260],[1.36,1352707275],[3.08,1352707290],[1.0,1352707305],[0.9,1352707320],[0.5,1352707335],[0.5,1352707350],[0.5,1352707365],[0.5,1352707380],[0.5,1352707395],[0.64,1352707410],[1.2,1352707425],[1.1,1352707440],[0.7,1352707455],[0.7,1352707470],[0.7,1352707485],[1.2,1352707500],[2.3333333333,1352707515],[2.28,1352707530],[1.8,1352707545],[1.7666666667,1352707560],[1.7,1352707575],[1.4666666667,1352707590],[1.0,1352707605],[1.3666666667,1352707620],[2.1,1352707635],[2.1,1352707650],[2.1,1352707665],[1.6333333333,1352707680],[0.7,1352707695],[0.7,1352707710],[0.7,1352707725],[0.83333333333,1352707740],[0.80666666667,1352707755],[0.18666666667,1352707770],[0.7,1352707785],[0.68,1352707800],[0.76,1352707815],[1.36,1352707830],[1.1,1352707845],[1.1,1352707860],[1.1,1352707875],[1.1,1352707890],[1.1,1352707905],[1.04,1352707920],[0.82,1352707935],[0.9,1352707950],[0.9,1352707965],[1.18,1352707980],[2.1666666667,1352707995],[1.2733333333,1352708010],[1.0933333333,1352708025],[1.0,1352708040],[1.0,1352708055],[1.0,1352708070],[1.0,1352708085],[1.0,1352708100],[1.8533333333,1352708115],[1.8533333333,1352708130],[1.2,1352708145],[4.4,1352708160],[4.4,1352708175],[1.0,1352708190],[1.0,1352708205],[1.0,1352708220],[1.0,1352708235],[1.0,1352708250],[1.1,1352708265],[0.7,1352708280],[0.7,1352708295],[0.7,1352708310],[0.7,1352708325],[0.7,1352708340],[0.7,1352708355],[1.7266666667,1352708370],[1.8,1352708385],[0.96,1352708400],[1.0733333333,1352708415],[1.1,1352708430],[1.7933333333,1352708445],[1.9,1352708460],[1.9,1352708475],[0.78,1352708490],[0.82,1352708505],[0.9,1352708520],[0.9,1352708535],[0.9,1352708550],[0.9,1352708565],[1.0466666667,1352708580],[1.1,1352708595],[1.1,1352708610],[1.1,1352708625],[1.1,1352708640],[3.26,1352708655],[4.7,1352708670],[2.48,1352708685],[6.58,1352708700],[5.38,1352708715],[2.1,1352708730],[2.1,1352708745],[2.1,1352708760],[1.5666666667,1352708775],[1.1,1352708790],[1.7933333333,1352708805],[2.4,1352708820],[1.6,1352708835],[0.9,1352708850],[0.99333333333,1352708865],[1.1,1352708880],[1.1,1352708895],[1.1,1352708910],[1.1,1352708925],[1.1,1352708940],[0.99333333333,1352708955],[0.9,1352708970],[0.9,1352708985],[0.9,1352709000],[1.86,1352709015],[1.7666666667,1352709030],[0.7,1352709045],[0.7,1352709060],[1.0266666667,1352709075],[1.2,1352709090],[0.9,1352709105],[0.9,1352709120],[0.9,1352709135],[0.9,1352709150],[0.9,1352709165],[0.62,1352709180],[0.78,1352709195],[1.2,1352709210],[1.2,1352709225],[1.2,1352709240],[1.2,1352709255],[1.2,1352709270],[1.7333333333,1352709285],[2.2,1352709300],[2.2,1352709315],[2.2,1352709330],[1.7733333333,1352709345],[2.4666666667,1352709360],[1.9,1352709375],[0.9,1352709390],[0.9,1352709405],[0.9,1352709420],[0.97333333333,1352709435],[1.0,1352709450],[1.0,1352709465],[0.8,1352709480],[1.6533333333,1352709495],[2.0,1352709510],[2.0,1352709525],[0.88,1352709540],[0.76,1352709555],[0.8,1352709570],[0.8,1352709585],[0.8,1352709600],[0.8,1352709615],[0.8,1352709630],[1.8266666667,1352709645],[1.9,1352709660],[1.9,1352709675],[1.2066666667,1352709690],[1.1866666667,1352709705],[0.33333333333,1352709720],[0.2,1352709735],[0.80666666667,1352709750],[1.22,1352709765],[0.82,1352709780],[0.3,1352709795],[0.2,1352709810],[1.2266666667,1352709825],[1.0866666667,1352709840],[0.75333333333,1352709855],[0.7,1352709870],[0.7,1352709885],[0.48,1352709900],[0.69333333333,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050],[0.0,1352710065]]",
-      "System" : "[[3.2,1352706465],[3.4333333333,1352706480],[3.5,1352706495],[3.5,1352706510],[3.5,1352706525],[3.5,1352706540],[3.5,1352706555],[3.5,1352706570],[3.6066666667,1352706585],[3.9,1352706600],[4.1133333333,1352706615],[4.7,1352706630],[4.7,1352706645],[4.54,1352706660],[4.1,1352706675],[3.94,1352706690],[3.5,1352706705],[3.5,1352706720],[3.5,1352706735],[3.5,1352706750],[3.5,1352706765],[3.4733333333,1352706780],[3.5066666667,1352706795],[3.78,1352706810],[3.7,1352706825],[10.033333333,1352706840],[19.1,1352706855],[10.3,1352706870],[12.5,1352706885],[15.06,1352706900],[18.5,1352706915],[4.1,1352706930],[4.02,1352706945],[3.7,1352706960],[3.7266666667,1352706975],[3.9,1352706990],[3.9,1352707005],[3.9,1352707020],[3.9,1352707035],[3.86,1352707050],[3.5866666667,1352707065],[3.5,1352707080],[3.5,1352707095],[3.5,1352707110],[3.5,1352707125],[3.5,1352707140],[3.5,1352707155],[3.56,1352707170],[4.4666666667,1352707185],[8.8,1352707200],[11.92,1352707215],[24.46,1352707230],[24.7,1352707245],[21.16,1352707260],[6.4,1352707275],[3.96,1352707290],[3.8,1352707305],[3.76,1352707320],[3.6,1352707335],[3.6,1352707350],[3.6,1352707365],[3.6,1352707380],[3.6,1352707395],[3.56,1352707410],[3.4,1352707425],[3.44,1352707440],[3.6,1352707455],[3.6,1352707470],[3.6,1352707485],[3.7666666667,1352707500],[4.0,1352707515],[3.92,1352707530],[4.1,1352707545],[3.9333333333,1352707560],[3.6,1352707575],[3.6666666667,1352707590],[3.8,1352707605],[3.8333333333,1352707620],[3.9,1352707635],[3.9,1352707650],[3.9,1352707665],[3.7666666667,1352707680],[3.5,1352707695],[3.5,1352707710],[3.5,1352707725],[3.8333333333,1352707740],[10.74,1352707755],[21.526666667,1352707770],[4.0,1352707785],[6.6,1352707800],[16.98,1352707815],[15.193333333,1352707830],[4.1,1352707845],[4.1,1352707860],[4.1,1352707875],[4.1,1352707890],[4.1,1352707905],[3.96,1352707920],[3.44,1352707935],[3.6,1352707950],[3.6,1352707965],[3.62,1352707980],[3.7,1352707995],[3.7266666667,1352708010],[3.8733333333,1352708025],[3.5,1352708040],[3.5,1352708055],[3.5,1352708070],[3.5,1352708085],[3.5,1352708100],[3.5,1352708115],[3.5,1352708130],[4.2,1352708145],[4.0,1352708160],[4.0,1352708175],[4.1,1352708190],[4.1,1352708205],[4.1,1352708220],[4.1,1352708235],[4.1,1352708250],[3.9,1352708265],[3.6,1352708280],[3.6,1352708295],[3.6,1352708310],[3.6,1352708325],[3.6,1352708340],[3.6,1352708355],[3.5066666667,1352708370],[3.5,1352708385],[4.2466666667,1352708400],[4.9066666667,1352708415],[5.0,1352708430],[4.5666666667,1352708445],[4.5,1352708460],[4.5,1352708475],[13.7,1352708490],[6.08,1352708505],[3.6,1352708520],[3.6,1352708535],[3.6,1352708550],[3.8933333333,1352708565],[3.56,1352708580],[3.4,1352708595],[3.4,1352708610],[3.4,1352708625],[3.4,1352708640],[3.7,1352708655],[3.9,1352708670],[3.78,1352708685],[4.6,1352708700],[4.6,1352708715],[4.2,1352708730],[4.2,1352708745],[4.2,1352708760],[3.8266666667,1352708775],[3.5,1352708790],[3.7133333333,1352708805],[3.9,1352708820],[3.74,1352708835],[3.6,1352708850],[3.6466666667,1352708865],[3.7,1352708880],[3.7,1352708895],[3.7,1352708910],[3.7,1352708925],[3.7,1352708940],[3.6466666667,1352708955],[3.6,1352708970],[3.6,1352708985],[3.6,1352709000],[4.1866666667,1352709015],[4.5133333333,1352709030],[4.3,1352709045],[4.3,1352709060],[3.8333333333,1352709075],[3.5,1352709090],[3.8,1352709105],[3.8,1352709120],[3.8,1352709135],[3.8,1352709150],[3.8,1352709165],[3.7066666667,1352709180],[3.6,1352709195],[3.6,1352709210],[3.6,1352709225],[3.4933333333,1352709240],[3.4,1352709255],[3.4,1352709270],[3.8266666667,1352709285],[4.2,1352709300],[4.2,1352709315],[4.2,1352709330],[4.0933333333,1352709345],[3.7866666667,1352709360],[3.54,1352709375],[3.5,1352709390],[3.5,1352709405],[3.5,1352709420],[3.28,1352709435],[3.2,1352709450],[3.5333333333,1352709465],[3.3666666667,1352709480],[3.4933333333,1352709495],[3.6,1352709510],[3.6,1352709525],[3.68,1352709540],[3.62,1352709555],[3.6,1352709570],[3.6,1352709585],[3.6,1352709600],[3.6,1352709615],[3.6,1352709630],[4.44,1352709645],[4.5,1352709660],[4.5,1352709675],[3.8066666667,1352709690],[3.96,1352709705],[21.16,1352709720],[23.8,1352709735],[10.453333333,1352709750],[4.56,1352709765],[3.92,1352709780],[23.2,1352709795],[28.0,1352709810],[10.913333333,1352709825],[3.9666666667,1352709840],[3.9933333333,1352709855],[4.1,1352709870],[4.1,1352709885],[12.68,1352709900],[7.3666666667,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050],[0.0,1352710065]]",
-      "Nice" : "[[0.0,1352706465],[0.0,1352706480],[0.0,1352706495],[0.0,1352706510],[0.0,1352706525],[0.0,1352706540],[0.0,1352706555],[0.0,1352706570],[0.0,1352706585],[0.0,1352706600],[0.0,1352706615],[0.0,1352706630],[0.0,1352706645],[0.0,1352706660],[0.0,1352706675],[0.0,1352706690],[0.0,1352706705],[0.0,1352706720],[0.0,1352706735],[0.0,1352706750],[0.0,1352706765],[0.0,1352706780],[0.0,1352706795],[0.0,1352706810],[0.0,1352706825],[0.0,1352706840],[0.0,1352706855],[0.0,1352706870],[0.0,1352706885],[0.0,1352706900],[0.0,1352706915],[0.0,1352706930],[0.0,1352706945],[0.0,1352706960],[0.0,1352706975],[0.0,1352706990],[0.0,1352707005],[0.0,1352707020],[0.0,1352707035],[0.0,1352707050],[0.0,1352707065],[0.0,1352707080],[0.0,1352707095],[0.0,1352707110],[0.0,1352707125],[0.0,1352707140],[0.0,1352707155],[0.0,1352707170],[0.0,1352707185],[0.0,1352707200],[0.0,1352707215],[0.0,1352707230],[0.0,1352707245],[0.0,1352707260],[0.0,1352707275],[0.0,1352707290],[0.0,1352707305],[0.0,1352707320],[0.0,1352707335],[0.0,1352707350],[0.0,1352707365],[0.0,1352707380],[0.0,1352707395],[0.0,1352707410],[0.0,1352707425],[0.0,1352707440],[0.0,1352707455],[0.0,1352707470],[0.0,1352707485],[0.0,1352707500],[0.0,1352707515],[0.0,1352707530],[0.0,1352707545],[0.0,1352707560],[0.0,1352707575],[0.0,1352707590],[0.0,1352707605],[0.0,1352707620],[0.0,1352707635],[0.0,1352707650],[0.0,1352707665],[0.0,1352707680],[0.0,1352707695],[0.0,1352707710],[0.0,1352707725],[0.0,1352707740],[0.0,1352707755],[0.0,1352707770],[0.0,1352707785],[0.0,1352707800],[0.0,1352707815],[0.0,1352707830],[0.0,1352707845],[0.0,1352707860],[0.0,1352707875],[0.0,1352707890],[0.0,1352707905],[0.0,1352707920],[0.0,1352707935],[0.0,1352707950],[0.0,1352707965],[0.0,1352707980],[0.0,1352707995],[0.0,1352708010],[0.0,1352708025],[0.0,1352708040],[0.0,1352708055],[0.0,1352708070],[0.0,1352708085],[0.0,1352708100],[0.0,1352708115],[0.0,1352708130],[0.0,1352708145],[0.0,1352708160],[0.0,1352708175],[0.0,1352708190],[0.0,1352708205],[0.0,1352708220],[0.0,1352708235],[0.0,1352708250],[0.0,1352708265],[0.0,1352708280],[0.0,1352708295],[0.0,1352708310],[0.0,1352708325],[0.0,1352708340],[0.0,1352708355],[0.0,1352708370],[0.0,1352708385],[0.0,1352708400],[0.0,1352708415],[0.0,1352708430],[0.0,1352708445],[0.0,1352708460],[0.0,1352708475],[0.0,1352708490],[0.0,1352708505],[0.0,1352708520],[0.0,1352708535],[0.0,1352708550],[0.0,1352708565],[0.0,1352708580],[0.0,1352708595],[0.0,1352708610],[0.0,1352708625],[0.0,1352708640],[0.0,1352708655],[0.0,1352708670],[0.0,1352708685],[0.0,1352708700],[0.0,1352708715],[0.0,1352708730],[0.0,1352708745],[0.0,1352708760],[0.0,1352708775],[0.0,1352708790],[0.0,1352708805],[0.0,1352708820],[0.0,1352708835],[0.0,1352708850],[0.0,1352708865],[0.0,1352708880],[0.0,1352708895],[0.0,1352708910],[0.0,1352708925],[0.0,1352708940],[0.0,1352708955],[0.0,1352708970],[0.0,1352708985],[0.0,1352709000],[0.0,1352709015],[0.0,1352709030],[0.0,1352709045],[0.0,1352709060],[0.0,1352709075],[0.0,1352709090],[0.0,1352709105],[0.0,1352709120],[0.0,1352709135],[0.0,1352709150],[0.0,1352709165],[0.0,1352709180],[0.0,1352709195],[0.0,1352709210],[0.0,1352709225],[0.0,1352709240],[0.0,1352709255],[0.0,1352709270],[0.0,1352709285],[0.0,1352709300],[0.0,1352709315],[0.0,1352709330],[0.0,1352709345],[0.0,1352709360],[0.0,1352709375],[0.0,1352709390],[0.0,1352709405],[0.0,1352709420],[0.0,1352709435],[0.0,1352709450],[0.0,1352709465],[0.0,1352709480],[0.0,1352709495],[0.0,1352709510],[0.0,1352709525],[0.0,1352709540],[0.0,1352709555],[0.0,1352709570],[0.0,1352709585],[0.0,1352709600],[0.0,1352709615],[0.0,1352709630],[0.0,1352709645],[0.0,1352709660],[0.0,1352709675],[0.0,1352709690],[0.0,1352709705],[0.0,1352709720],[0.0,1352709735],[0.0,1352709750],[0.0,1352709765],[0.0,1352709780],[0.0,1352709795],[0.0,1352709810],[0.0,1352709825],[0.0,1352709840],[0.0,1352709855],[0.0,1352709870],[0.0,1352709885],[0.0,1352709900],[0.0,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050],[0.0,1352710065]]"
-    }
-  },
-  "Clusters" : {
-    "cluster_name" : "vmc",
-    "version" : "HDP-1.2.0"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/cpu_2hr.json b/branch-1.2/ambari-web/app/assets/data/cluster_metrics/cpu_2hr.json
deleted file mode 100644
index dd96f30..0000000
--- a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/cpu_2hr.json
+++ /dev/null
@@ -1 +0,0 @@
-[{"ds_name":"ccpu_user","cluster_name":"","graph_type":"stack","host_name":"","metric_name":"User\\g","datapoints":[[0.99666666667,1350519840],[0.88490740741,1350520200],[0.9725,1350520560],[0.80074074074,1350520920],[0.99574074074,1350521280],[0.87675925926,1350521640],[0.92185185185,1350522000],[0.9512962963,1350522360],[1.2297222222,1350522720],[1.4477777778,1350523080],[0.90824074074,1350523440],[0.91277777778,1350523800],[1.0499074074,1350524160],[0.84990740741,1350524520],[0.89268518519,1350524880],[0.76268518519,1350525240],[0.88666666667,1350525600],[0.76731481481,1350525960],[0.83675925926,1350526320],[0.80990740741,1350526680],[0,1350527040]]},{"ds_name":"ccpu_nice","cluster_name":"","graph_type":"stack","host_name":"","metric_name":"Nice\\g","datapoints":[[0,1350519840],[0,1350520200],[0,1350520560],[0,1350520920],[0,1350521280],[0,1350521640],[0,1350522000],[0,1350522360],[0,1350522720],[0,1350523080],[0,1350523440],[0,1350523800],[0,1350524160],[0,1350524520],[0,1350524880],[0,1350525240],[0,1350525600],[0,1350525960],[0,1350526320],[0,1350526680],[0,1350527040]]},{"ds_name":"ccpu_system","cluster_name":"","graph_type":"stack","host_name":"","metric_name":"System\\g","datapoints":[[1.4416666667,1350519840],[1.4385185185,1350520200],[1.4625,1350520560],[1.3718518519,1350520920],[1.5043518519,1350521280],[1.4396296296,1350521640],[1.4028703704,1350522000],[1.4771296296,1350522360],[1.6426851852,1350522720],[1.6589814815,1350523080],[1.4750925926,1350523440],[1.434537037,1350523800],[1.4760185185,1350524160],[1.4387962963,1350524520],[1.435462963,1350524880],[1.3639814815,1350525240],[1.4262037037,1350525600],[1.492962963,1350525960],[1.4201851852,1350526320],[1.3730555556,1350526680],[0,1350527040]]},{"ds_name":"ccpu_wio","cluster_name":"","graph_type":"stack","host_name":"","metric_name":"Wait\\g","datapoints":[[0.16444444444,1350519840],[0.13509259259,1350520200],[0.093703703704,1350520560],[0.16490740741,1350520920],[0.12046296296,1350521280],[0.20018518519,1350521640],[0.18,1350522000],[0.16194444444,1350522360],[0.14935185185,1350522720],[0.12398148148,1350523080],[0.11925925926,1350523440],[0.125,1350523800],[0.13277777778,1350524160],[0.11175925926,1350524520],[0.15037037037,1350524880],[0.12638888889,1350525240],[0.093518518519,1350525600],[0.1787037037,1350525960],[0.10444444444,1350526320],[0.12314814815,1350526680],[0,1350527040]]},{"ds_name":"ccpu_idle","cluster_name":"","graph_type":"stack","host_name":"","metric_name":"Idle\\g","datapoints":[[97.42212963,1350519840],[97.553333333,1350520200],[97.473333333,1350520560],[97.675092593,1350520920],[97.386574074,1350521280],[97.519074074,1350521640],[97.485833333,1350522000],[97.420277778,1350522360],[96.978888889,1350522720],[96.780925926,1350523080],[97.502407407,1350523440],[97.535092593,1350523800],[97.385092593,1350524160],[97.607685185,1350524520],[97.549074074,1350524880],[97.763518519,1350525240],[97.617592593,1350525600],[97.5725,1350525960],[97.654907407,1350526320],[97.712777778,1350526680],[0,1350527040]]}]
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/load_1hr.json b/branch-1.2/ambari-web/app/assets/data/cluster_metrics/load_1hr.json
deleted file mode 100644
index 32709c5..0000000
--- a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/load_1hr.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
-  "href" : "http://ambari/api/clusters/vmc?fields=metrics/load[1352702257,1352705857,15]",
-  "metrics" : {
-    "load" : {
-      "CPUs" : "[[1.0,1352706450],[1.0,1352706465],[1.0,1352706480],[1.0,1352706495],[1.0,1352706510],[1.0,1352706525],[1.0,1352706540],[1.0,1352706555],[1.0,1352706570],[1.0,1352706585],[1.0,1352706600],[1.0,1352706615],[1.0,1352706630],[1.0,1352706645],[1.0,1352706660],[1.0,1352706675],[1.0,1352706690],[1.0,1352706705],[1.0,1352706720],[1.0,1352706735],[1.0,1352706750],[1.0,1352706765],[1.0,1352706780],[1.0,1352706795],[1.0,1352706810],[1.0,1352706825],[1.0,1352706840],[1.0,1352706855],[1.0,1352706870],[1.0,1352706885],[1.0,1352706900],[1.0,1352706915],[1.0,1352706930],[1.0,1352706945],[1.0,1352706960],[1.0,1352706975],[1.0,1352706990],[1.0,1352707005],[1.0,1352707020],[1.0,1352707035],[1.0,1352707050],[1.0,1352707065],[1.0,1352707080],[1.0,1352707095],[1.0,1352707110],[1.0,1352707125],[1.0,1352707140],[1.0,1352707155],[1.0,1352707170],[1.0,1352707185],[1.0,1352707200],[1.0,1352707215],[1.0,1352707230],[1.0,1352707245],[1.0,1352707260],[1.0,1352707275],[1.0,1352707290],[1.0,1352707305],[1.0,1352707320],[1.0,1352707335],[1.0,1352707350],[1.0,1352707365],[1.0,1352707380],[1.0,1352707395],[1.0,1352707410],[1.0,1352707425],[1.0,1352707440],[1.0,1352707455],[1.0,1352707470],[1.0,1352707485],[1.0,1352707500],[1.0,1352707515],[1.0,1352707530],[1.0,1352707545],[1.0,1352707560],[1.0,1352707575],[1.0,1352707590],[1.0,1352707605],[1.0,1352707620],[1.0,1352707635],[1.0,1352707650],[1.0,1352707665],[1.0,1352707680],[1.0,1352707695],[1.0,1352707710],[1.0,1352707725],[1.0,1352707740],[1.0,1352707755],[1.0,1352707770],[1.0,1352707785],[1.0,1352707800],[1.0,1352707815],[1.0,1352707830],[1.0,1352707845],[1.0,1352707860],[1.0,1352707875],[1.0,1352707890],[1.0,1352707905],[1.0,1352707920],[1.0,1352707935],[1.0,1352707950],[1.0,1352707965],[1.0,1352707980],[1.0,1352707995],[1.0,1352708010],[1.0,1352708025],[1.0,1352708040],[1.0,1352708055],[1.0,1352708070],[1.0,1352708085],[1.0,1352708100],[1.0,1352708115],[1.0,1352708130],[1.0,1352708145],[1.0,1352708160],[1.0,1352708175],[1.0,1352708190],[1.0,1352708205],[1.0,1352708220],[1.0,1352708235],[1.0,1352708250],[1.0,1352708265],[1.0,1352708280],[1.0,1352708295],[1.0,1352708310],[1.0,1352708325],[1.0,1352708340],[1.0,1352708355],[1.0,1352708370],[1.0,1352708385],[1.0,1352708400],[1.0,1352708415],[1.0,1352708430],[1.0,1352708445],[1.0,1352708460],[1.0,1352708475],[1.0,1352708490],[1.0,1352708505],[1.0,1352708520],[1.0,1352708535],[1.0,1352708550],[1.0,1352708565],[1.0,1352708580],[1.0,1352708595],[1.0,1352708610],[1.0,1352708625],[1.0,1352708640],[1.0,1352708655],[1.0,1352708670],[1.0,1352708685],[1.0,1352708700],[1.0,1352708715],[1.0,1352708730],[1.0,1352708745],[1.0,1352708760],[1.0,1352708775],[1.0,1352708790],[1.0,1352708805],[1.0,1352708820],[1.0,1352708835],[1.0,1352708850],[1.0,1352708865],[1.0,1352708880],[1.0,1352708895],[1.0,1352708910],[1.0,1352708925],[1.0,1352708940],[1.0,1352708955],[1.0,1352708970],[1.0,1352708985],[1.0,1352709000],[1.0,1352709015],[1.0,1352709030],[1.0,1352709045],[1.0,1352709060],[1.0,1352709075],[1.0,1352709090],[1.0,1352709105],[1.0,1352709120],[1.0,1352709135],[1.0,1352709150],[1.0,1352709165],[1.0,1352709180],[1.0,1352709195],[1.0,1352709210],[1.0,1352709225],[1.0,1352709240],[1.0,1352709255],[1.0,1352709270],[1.0,1352709285],[1.0,1352709300],[1.0,1352709315],[1.0,1352709330],[1.0,1352709345],[1.0,1352709360],[1.0,1352709375],[1.0,1352709390],[1.0,1352709405],[1.0,1352709420],[1.0,1352709435],[1.0,1352709450],[1.0,1352709465],[1.0,1352709480],[1.0,1352709495],[1.0,1352709510],[1.0,1352709525],[1.0,1352709540],[1.0,1352709555],[1.0,1352709570],[1.0,1352709585],[1.0,1352709600],[1.0,1352709615],[1.0,1352709630],[1.0,1352709645],[1.0,1352709660],[1.0,1352709675],[1.0,1352709690],[1.0,1352709705],[1.0,1352709720],[1.0,1352709735],[1.0,1352709750],[1.0,1352709765],[1.0,1352709780],[1.0,1352709795],[1.0,1352709810],[1.0,1352709825],[1.0,1352709840],[1.0,1352709855],[1.0,1352709870],[1.0,1352709885],[1.0,1352709900],[1.0,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050]]",
-      "Procs" : "[[0.0,1352706450],[0.0,1352706465],[0.0,1352706480],[0.33333333333,1352706495],[1.0,1352706510],[1.0,1352706525],[1.0,1352706540],[1.0,1352706555],[1.0,1352706570],[1.0,1352706585],[1.0,1352706600],[1.0,1352706615],[1.0,1352706630],[1.0,1352706645],[0.73333333333,1352706660],[0.0,1352706675],[0.0,1352706690],[0.0,1352706705],[0.0,1352706720],[0.0,1352706735],[0.0,1352706750],[0.0,1352706765],[0.0,1352706780],[0.0,1352706795],[0.0,1352706810],[0.0,1352706825],[0.0,1352706840],[0.0,1352706855],[0.0,1352706870],[0.0,1352706885],[0.26666666667,1352706900],[1.0,1352706915],[1.0,1352706930],[1.0,1352706945],[1.0,1352706960],[1.6666666667,1352706975],[6.0,1352706990],[6.0,1352707005],[6.0,1352707020],[6.0,1352707035],[5.2,1352707050],[0.0,1352707065],[0.0,1352707080],[0.0,1352707095],[0.0,1352707110],[0.0,1352707125],[0.0,1352707140],[0.0,1352707155],[0.0,1352707170],[0.0,1352707185],[0.0,1352707200],[0.2,1352707215],[1.0,1352707230],[1.0,1352707245],[1.0,1352707260],[1.0,1352707275],[1.4,1352707290],[3.0,1352707305],[3.0,1352707320],[3.0,1352707335],[3.0,1352707350],[3.0,1352707365],[2.8,1352707380],[2.0,1352707395],[2.0,1352707410],[2.0,1352707425],[2.0,1352707440],[2.2,1352707455],[3.0,1352707470],[3.0,1352707485],[3.0,1352707500],[3.0,1352707515],[2.2,1352707530],[1.0,1352707545],[1.0,1352707560],[1.0,1352707575],[1.0,1352707590],[1.0,1352707605],[1.3333333333,1352707620],[2.0,1352707635],[2.0,1352707650],[2.0,1352707665],[2.0,1352707680],[1.6666666667,1352707695],[1.0,1352707710],[1.0,1352707725],[1.0,1352707740],[1.0,1352707755],[0.73333333333,1352707770],[0.0,1352707785],[0.0,1352707800],[0.0,1352707815],[0.0,1352707830],[0.0,1352707845],[0.0,1352707860],[0.0,1352707875],[0.0,1352707890],[0.0,1352707905],[0.0,1352707920],[0.2,1352707935],[1.0,1352707950],[1.0,1352707965],[1.0,1352707980],[1.0,1352707995],[0.86666666667,1352708010],[0.0,1352708025],[0.0,1352708040],[0.0,1352708055],[0.0,1352708070],[0.0,1352708085],[0.0,1352708100],[0.0,1352708115],[0.0,1352708130],[0.0,1352708145],[0.0,1352708160],[0.0,1352708175],[0.0,1352708190],[0.0,1352708205],[0.0,1352708220],[0.0,1352708235],[0.0,1352708250],[2.0,1352708265],[2.0,1352708280],[2.0,1352708295],[2.0,1352708310],[2.0,1352708325],[0.13333333333,1352708340],[0.0,1352708355],[0.0,1352708370],[0.0,1352708385],[0.0,1352708400],[2.6,1352708415],[3.0,1352708430],[3.0,1352708445],[3.0,1352708460],[3.0,1352708475],[3.0,1352708490],[0.6,1352708505],[0.0,1352708520],[0.0,1352708535],[0.0,1352708550],[0.0,1352708565],[0.0,1352708580],[0.0,1352708595],[0.0,1352708610],[0.0,1352708625],[0.0,1352708640],[0.0,1352708655],[0.0,1352708670],[0.0,1352708685],[0.0,1352708700],[0.0,1352708715],[0.0,1352708730],[0.0,1352708745],[0.0,1352708760],[0.0,1352708775],[0.0,1352708790],[0.0,1352708805],[0.0,1352708820],[0.0,1352708835],[0.0,1352708850],[0.0,1352708865],[0.0,1352708880],[0.0,1352708895],[0.0,1352708910],[0.0,1352708925],[0.0,1352708940],[0.0,1352708955],[0.0,1352708970],[0.0,1352708985],[0.0,1352709000],[0.0,1352709015],[0.0,1352709030],[0.0,1352709045],[0.46666666667,1352709060],[1.0,1352709075],[1.0,1352709090],[1.0,1352709105],[1.0,1352709120],[1.0,1352709135],[1.0,1352709150],[1.0,1352709165],[1.0,1352709180],[1.0,1352709195],[1.0,1352709210],[1.0,1352709225],[1.0,1352709240],[1.0,1352709255],[1.0,1352709270],[1.0,1352709285],[1.0,1352709300],[1.0,1352709315],[1.0,1352709330],[1.0,1352709345],[1.0,1352709360],[2.8,1352709375],[4.0,1352709390],[4.0,1352709405],[4.0,1352709420],[4.0,1352709435],[4.0,1352709450],[2.0,1352709465],[1.0,1352709480],[1.0,1352709495],[1.0,1352709510],[1.0,1352709525],[0.2,1352709540],[0.0,1352709555],[0.0,1352709570],[0.0,1352709585],[0.0,1352709600],[0.0,1352709615],[3.4666666667,1352709630],[4.0,1352709645],[4.0,1352709660],[4.0,1352709675],[4.0,1352709690],[0.53333333333,1352709705],[0.0,1352709720],[0.0,1352709735],[0.0,1352709750],[0.0,1352709765],[0.0,1352709780],[0.0,1352709795],[0.0,1352709810],[0.0,1352709825],[0.0,1352709840],[0.0,1352709855],[0.0,1352709870],[0.0,1352709885],[0.0,1352709900],[0.0,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050]]",
-      "Nodes" : "[[1.0,1352706450],[1.0,1352706465],[1.0,1352706480],[1.0,1352706495],[1.0,1352706510],[1.0,1352706525],[1.0,1352706540],[1.0,1352706555],[1.0,1352706570],[1.0,1352706585],[1.0,1352706600],[1.0,1352706615],[1.0,1352706630],[1.0,1352706645],[1.0,1352706660],[1.0,1352706675],[1.0,1352706690],[1.0,1352706705],[1.0,1352706720],[1.0,1352706735],[1.0,1352706750],[1.0,1352706765],[1.0,1352706780],[1.0,1352706795],[1.0,1352706810],[1.0,1352706825],[1.0,1352706840],[1.0,1352706855],[1.0,1352706870],[1.0,1352706885],[1.0,1352706900],[1.0,1352706915],[1.0,1352706930],[1.0,1352706945],[1.0,1352706960],[1.0,1352706975],[1.0,1352706990],[1.0,1352707005],[1.0,1352707020],[1.0,1352707035],[1.0,1352707050],[1.0,1352707065],[1.0,1352707080],[1.0,1352707095],[1.0,1352707110],[1.0,1352707125],[1.0,1352707140],[1.0,1352707155],[1.0,1352707170],[1.0,1352707185],[1.0,1352707200],[1.0,1352707215],[1.0,1352707230],[1.0,1352707245],[1.0,1352707260],[1.0,1352707275],[1.0,1352707290],[1.0,1352707305],[1.0,1352707320],[1.0,1352707335],[1.0,1352707350],[1.0,1352707365],[1.0,1352707380],[1.0,1352707395],[1.0,1352707410],[1.0,1352707425],[1.0,1352707440],[1.0,1352707455],[1.0,1352707470],[1.0,1352707485],[1.0,1352707500],[1.0,1352707515],[1.0,1352707530],[1.0,1352707545],[1.0,1352707560],[1.0,1352707575],[1.0,1352707590],[1.0,1352707605],[1.0,1352707620],[1.0,1352707635],[1.0,1352707650],[1.0,1352707665],[1.0,1352707680],[1.0,1352707695],[1.0,1352707710],[1.0,1352707725],[1.0,1352707740],[1.0,1352707755],[1.0,1352707770],[1.0,1352707785],[1.0,1352707800],[1.0,1352707815],[1.0,1352707830],[1.0,1352707845],[1.0,1352707860],[1.0,1352707875],[1.0,1352707890],[1.0,1352707905],[1.0,1352707920],[1.0,1352707935],[1.0,1352707950],[1.0,1352707965],[1.0,1352707980],[1.0,1352707995],[1.0,1352708010],[1.0,1352708025],[1.0,1352708040],[1.0,1352708055],[1.0,1352708070],[1.0,1352708085],[1.0,1352708100],[1.0,1352708115],[1.0,1352708130],[1.0,1352708145],[1.0,1352708160],[1.0,1352708175],[1.0,1352708190],[1.0,1352708205],[1.0,1352708220],[1.0,1352708235],[1.0,1352708250],[1.0,1352708265],[1.0,1352708280],[1.0,1352708295],[1.0,1352708310],[1.0,1352708325],[1.0,1352708340],[1.0,1352708355],[1.0,1352708370],[1.0,1352708385],[1.0,1352708400],[1.0,1352708415],[1.0,1352708430],[1.0,1352708445],[1.0,1352708460],[1.0,1352708475],[1.0,1352708490],[1.0,1352708505],[1.0,1352708520],[1.0,1352708535],[1.0,1352708550],[1.0,1352708565],[1.0,1352708580],[1.0,1352708595],[1.0,1352708610],[1.0,1352708625],[1.0,1352708640],[1.0,1352708655],[1.0,1352708670],[1.0,1352708685],[1.0,1352708700],[1.0,1352708715],[1.0,1352708730],[1.0,1352708745],[1.0,1352708760],[1.0,1352708775],[1.0,1352708790],[1.0,1352708805],[1.0,1352708820],[1.0,1352708835],[1.0,1352708850],[1.0,1352708865],[1.0,1352708880],[1.0,1352708895],[1.0,1352708910],[1.0,1352708925],[1.0,1352708940],[1.0,1352708955],[1.0,1352708970],[1.0,1352708985],[1.0,1352709000],[1.0,1352709015],[1.0,1352709030],[1.0,1352709045],[1.0,1352709060],[1.0,1352709075],[1.0,1352709090],[1.0,1352709105],[1.0,1352709120],[1.0,1352709135],[1.0,1352709150],[1.0,1352709165],[1.0,1352709180],[1.0,1352709195],[1.0,1352709210],[1.0,1352709225],[1.0,1352709240],[1.0,1352709255],[1.0,1352709270],[1.0,1352709285],[1.0,1352709300],[1.0,1352709315],[1.0,1352709330],[1.0,1352709345],[1.0,1352709360],[1.0,1352709375],[1.0,1352709390],[1.0,1352709405],[1.0,1352709420],[1.0,1352709435],[1.0,1352709450],[1.0,1352709465],[1.0,1352709480],[1.0,1352709495],[1.0,1352709510],[1.0,1352709525],[1.0,1352709540],[1.0,1352709555],[1.0,1352709570],[1.0,1352709585],[1.0,1352709600],[1.0,1352709615],[1.0,1352709630],[1.0,1352709645],[1.0,1352709660],[1.0,1352709675],[1.0,1352709690],[1.0,1352709705],[1.0,1352709720],[1.0,1352709735],[1.0,1352709750],[1.0,1352709765],[1.0,1352709780],[1.0,1352709795],[1.0,1352709810],[1.0,1352709825],[1.0,1352709840],[1.0,1352709855],[1.0,1352709870],[1.0,1352709885],[1.0,1352709900],[1.0,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050]]",
-      "1-min" : "[[0.31,1352706450],[0.31,1352706465],[0.31,1352706480],[0.31,1352706495],[0.25,1352706510],[0.13,1352706525],[0.13,1352706540],[0.13,1352706555],[0.13,1352706570],[0.13,1352706585],[0.178,1352706600],[0.31,1352706615],[0.31,1352706630],[0.31,1352706645],[0.31,1352706660],[0.31,1352706675],[0.24333333333,1352706690],[0.06,1352706705],[0.06,1352706720],[0.06,1352706735],[0.06,1352706750],[0.06,1352706765],[0.068,1352706780],[0.09,1352706795],[0.09,1352706810],[0.09,1352706825],[0.5,1352706840],[1.32,1352706855],[1.32,1352706870],[1.32,1352706885],[1.32,1352706900],[1.32,1352706915],[1.1946666667,1352706930],[0.85,1352706945],[0.85,1352706960],[0.85,1352706975],[0.85,1352706990],[0.85,1352707005],[0.762,1352707020],[0.41,1352707035],[0.41,1352707050],[0.41,1352707065],[0.41,1352707080],[0.41,1352707095],[0.376,1352707110],[0.24,1352707125],[0.24,1352707140],[0.24,1352707155],[0.24,1352707170],[0.24133333333,1352707185],[0.25,1352707200],[0.25,1352707215],[0.25,1352707230],[0.25,1352707245],[0.25,1352707260],[0.25,1352707275],[0.412,1352707290],[1.06,1352707305],[1.06,1352707320],[1.06,1352707335],[1.06,1352707350],[1.06,1352707365],[0.93,1352707380],[0.41,1352707395],[0.41,1352707410],[0.41,1352707425],[0.41,1352707440],[0.41,1352707455],[0.394,1352707470],[0.35,1352707485],[0.35,1352707500],[0.35,1352707515],[0.35,1352707530],[0.35,1352707545],[0.32,1352707560],[0.26,1352707575],[0.26,1352707590],[0.26,1352707605],[0.26,1352707620],[0.26,1352707635],[0.252,1352707650],[0.24,1352707665],[0.24,1352707680],[0.24,1352707695],[0.24,1352707710],[0.24,1352707725],[0.21333333333,1352707740],[0.16,1352707755],[0.16,1352707770],[0.16,1352707785],[0.16,1352707800],[0.16,1352707815],[0.25333333333,1352707830],[0.86,1352707845],[0.86,1352707860],[0.86,1352707875],[0.86,1352707890],[0.86,1352707905],[0.764,1352707920],[0.38,1352707935],[0.38,1352707950],[0.38,1352707965],[0.38,1352707980],[0.36133333333,1352707995],[0.24,1352708010],[0.24,1352708025],[0.24,1352708040],[0.24,1352708055],[0.24,1352708070],[0.24866666667,1352708085],[0.37,1352708100],[0.37,1352708115],[0.37,1352708130],[0.37,1352708145],[0.37,1352708160],[0.37,1352708175],[0.09,1352708190],[0.09,1352708205],[0.09,1352708220],[0.09,1352708235],[0.09,1352708250],[0.09,1352708265],[0.22,1352708280],[0.22,1352708295],[0.22,1352708310],[0.22,1352708325],[0.22,1352708340],[0.22,1352708355],[0.136,1352708370],[0.13,1352708385],[0.13,1352708400],[0.13,1352708415],[0.13,1352708430],[0.13,1352708445],[0.078,1352708460],[0.07,1352708475],[0.07,1352708490],[0.07,1352708505],[0.07,1352708520],[0.07,1352708535],[0.40733333333,1352708550],[0.53,1352708565],[0.53,1352708580],[0.53,1352708595],[0.53,1352708610],[0.53,1352708625],[0.27,1352708640],[0.14,1352708655],[0.14,1352708670],[0.14,1352708685],[0.14,1352708700],[0.14,1352708715],[0.35866666667,1352708730],[0.55,1352708745],[0.55,1352708760],[0.55,1352708775],[0.55,1352708790],[0.55,1352708805],[0.422,1352708820],[0.31,1352708835],[0.31,1352708850],[0.31,1352708865],[0.31,1352708880],[0.31,1352708895],[0.28866666667,1352708910],[0.27,1352708925],[0.27,1352708940],[0.27,1352708955],[0.27,1352708970],[0.27,1352708985],[0.17933333333,1352709000],[0.1,1352709015],[0.1,1352709030],[0.1,1352709045],[0.1,1352709060],[0.1,1352709075],[0.152,1352709090],[0.23,1352709105],[0.23,1352709120],[0.23,1352709135],[0.23,1352709150],[0.23,1352709165],[0.17866666667,1352709180],[0.12,1352709195],[0.12,1352709210],[0.12,1352709225],[0.12,1352709240],[0.12,1352709255],[0.19466666667,1352709270],[0.26,1352709285],[0.26,1352709300],[0.26,1352709315],[0.26,1352709330],[0.26,1352709345],[0.26533333333,1352709360],[0.27,1352709375],[0.27,1352709390],[0.27,1352709405],[0.27,1352709420],[0.27,1352709435],[0.248,1352709450],[0.24,1352709465],[0.24,1352709480],[0.24,1352709495],[0.24,1352709510],[0.24,1352709525],[0.16,1352709540],[0.14,1352709555],[0.14,1352709570],[0.14,1352709585],[0.14,1352709600],[0.14,1352709615],[0.14866666667,1352709630],[0.15,1352709645],[0.15,1352709660],[0.15,1352709675],[0.15,1352709690],[0.15,1352709705],[0.202,1352709720],[0.21,1352709735],[0.21,1352709750],[0.21,1352709765],[0.21,1352709780],[0.21,1352709795],[1.074,1352709810],[1.29,1352709825],[1.29,1352709840],[1.29,1352709855],[1.29,1352709870],[1.29,1352709885],[1.1433333333,1352709900],[1.09,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050]]"
-    }
-  },
-  "Clusters" : {
-    "cluster_name" : "vmc",
-    "version" : "HDP-1.2.0"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/load_2hr.json b/branch-1.2/ambari-web/app/assets/data/cluster_metrics/load_2hr.json
deleted file mode 100644
index 8ea97c7..0000000
--- a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/load_2hr.json
+++ /dev/null
@@ -1 +0,0 @@
-[{"ds_name":"a0","cluster_name":"","graph_type":"stack","host_name":"","metric_name":"1-min","datapoints":[[0.66047222222,1350505440],[0.020416666667,1350505800],[0.39083333333,1350506160],[0.39933333333,1350506520],[0.0064444444444,1350506880],[0,1350507240],[0,1350507600],[0,1350507960],[0,1350508320],[0,1350508680],[0,1350509040],[0.13225,1350509400],[0.29808333333,1350509760],[0.11486111111,1350510120],[0.26966666667,1350510480],[0.067611111111,1350510840],[0.063555555556,1350511200],[0,1350511560],[0,1350511920],[0.075388888889,1350512280],[0,1350512640]]},{"ds_name":"a1","cluster_name":"","graph_type":"line","host_name":"","metric_name":"Nodes","datapoints":[[3,1350505440],[3,1350505800],[3,1350506160],[3,1350506520],[3,1350506880],[3,1350507240],[3,1350507600],[3,1350507960],[3,1350508320],[3,1350508680],[3,1350509040],[3,1350509400],[3,1350509760],[3,1350510120],[3,1350510480],[3,1350510840],[3,1350511200],[3,1350511560],[3,1350511920],[3,1350512280],[0,1350512640]]},{"ds_name":"a2","cluster_name":"","graph_type":"line","host_name":"","metric_name":"CPUs ","datapoints":[[6,1350505440],[6,1350505800],[6,1350506160],[6,1350506520],[6,1350506880],[6,1350507240],[6,1350507600],[6,1350507960],[6,1350508320],[6,1350508680],[6,1350509040],[6,1350509400],[6,1350509760],[6,1350510120],[6,1350510480],[6,1350510840],[6,1350511200],[6,1350511560],[6,1350511920],[6,1350512280],[0,1350512640]]},{"ds_name":"a3","cluster_name":"","graph_type":"line","host_name":"","metric_name":"Procs","datapoints":[[4.5527777778,1350505440],[2.6138888889,1350505800],[1.3333333333,1350506160],[2.8555555556,1350506520],[1.2166666667,1350506880],[1.2861111111,1350507240],[2.5638888889,1350507600],[0.86666666667,1350507960],[2.2472222222,1350508320],[0.77777777778,1350508680],[0.53055555556,1350509040],[1.6694444444,1350509400],[1.9944444444,1350509760],[1.1694444444,1350510120],[1.9916666667,1350510480],[2.6055555556,1350510840],[3.1027777778,1350511200],[1.8555555556,1350511560],[3.1138888889,1350511920],[1.5416666667,1350512280],[0,1350512640]]}]
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/load_4hr.json b/branch-1.2/ambari-web/app/assets/data/cluster_metrics/load_4hr.json
deleted file mode 100644
index 6b0e1c5..0000000
--- a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/load_4hr.json
+++ /dev/null
@@ -1 +0,0 @@
-[{"ds_name":"a0","cluster_name":"","graph_type":"stack","host_name":"","metric_name":"1-min","datapoints":[[0.30627777778,1350498240],[0.090611111111,1350498600],[0,1350498960],[0.23097222222,1350499320],[0.40261111111,1350499680],[0.53072222222,1350500040],[0.68769444444,1350500400],[0.23547222222,1350500760],[0.39988888889,1350501120],[0.090111111111,1350501480],[0.048888888889,1350501840],[0.1275,1350502200],[0.25075,1350502560],[0.14805555556,1350502920],[0.053638888889,1350503280],[0.066777777778,1350503640],[0.17205555556,1350504000],[0.47402777778,1350504360],[0.096472222222,1350504720],[0.030138888889,1350505080],[0.66047222222,1350505440],[0.020416666667,1350505800],[0.39083333333,1350506160],[0.39933333333,1350506520],[0.0064444444444,1350506880],[0,1350507240],[0,1350507600],[0,1350507960],[0,1350508320],[0,1350508680],[0,1350509040],[0.13225,1350509400],[0.29808333333,1350509760],[0.11486111111,1350510120],[0.26966666667,1350510480],[0.067611111111,1350510840],[0.063555555556,1350511200],[0,1350511560],[0,1350511920],[0.075388888889,1350512280],[0,1350512640]]},{"ds_name":"a1","cluster_name":"","graph_type":"line","host_name":"","metric_name":"Nodes","datapoints":[[3,1350498240],[3,1350498600],[3,1350498960],[3,1350499320],[3,1350499680],[3,1350500040],[3,1350500400],[3,1350500760],[3,1350501120],[3,1350501480],[3,1350501840],[3,1350502200],[3,1350502560],[3,1350502920],[3,1350503280],[3,1350503640],[3,1350504000],[3,1350504360],[3,1350504720],[3,1350505080],[3,1350505440],[3,1350505800],[3,1350506160],[3,1350506520],[3,1350506880],[3,1350507240],[3,1350507600],[3,1350507960],[3,1350508320],[3,1350508680],[3,1350509040],[3,1350509400],[3,1350509760],[3,1350510120],[3,1350510480],[3,1350510840],[3,1350511200],[3,1350511560],[3,1350511920],[3,1350512280],[0,1350512640]]},{"ds_name":"a2","cluster_name":"","graph_type":"line","host_name":"","metric_name":"CPUs ","datapoints":[[6,1350498240],[6,1350498600],[6,1350498960],[6,1350499320],[6,1350499680],[6,1350500040],[6,1350500400],[6,1350500760],[6,1350501120],[6,1350501480],[6,1350501840],[6,1350502200],[6,1350502560],[6,1350502920],[6,1350503280],[6,1350503640],[6,1350504000],[6,1350504360],[6,1350504720],[6,1350505080],[6,1350505440],[6,1350505800],[6,1350506160],[6,1350506520],[6,1350506880],[6,1350507240],[6,1350507600],[6,1350507960],[6,1350508320],[6,1350508680],[6,1350509040],[6,1350509400],[6,1350509760],[6,1350510120],[6,1350510480],[6,1350510840],[6,1350511200],[6,1350511560],[6,1350511920],[6,1350512280],[0,1350512640]]},{"ds_name":"a3","cluster_name":"","graph_type":"line","host_name":"","metric_name":"Procs","datapoints":[[1.5722222222,1350498240],[1.2555555556,1350498600],[1.8277777778,1350498960],[1.2166666667,1350499320],[3.8833333333,1350499680],[1.3222222222,1350500040],[1.1555555556,1350500400],[1.7611111111,1350500760],[2.5888888889,1350501120],[1.3305555556,1350501480],[0.53611111111,1350501840],[2.25,1350502200],[1.675,1350502560],[1.4083333333,1350502920],[2.1277777778,1350503280],[3,1350503640],[1.4166666667,1350504000],[2.3416666667,1350504360],[1.3694444444,1350504720],[0.65277777778,1350505080],[4.5527777778,1350505440],[2.6138888889,1350505800],[1.3333333333,1350506160],[2.8555555556,1350506520],[1.2166666667,1350506880],[1.2861111111,1350507240],[2.5638888889,1350507600],[0.86666666667,1350507960],[2.2472222222,1350508320],[0.77777777778,1350508680],[0.53055555556,1350509040],[1.6694444444,1350509400],[1.9944444444,1350509760],[1.1694444444,1350510120],[1.9916666667,1350510480],[2.6055555556,1350510840],[3.1027777778,1350511200],[1.8555555556,1350511560],[3.1138888889,1350511920],[1.5416666667,1350512280],[0,1350512640]]}]
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/load_day.json b/branch-1.2/ambari-web/app/assets/data/cluster_metrics/load_day.json
deleted file mode 100644
index b1aab90..0000000
--- a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/load_day.json
+++ /dev/null
@@ -1 +0,0 @@
-[{"ds_name":"a0","cluster_name":"","graph_type":"stack","host_name":"","metric_name":"1-min","datapoints":[[0.41566666667,1350426240],[0.071388888889,1350426600],[0,1350426960],[0,1350427320],[0.034694444444,1350427680],[0.049194444444,1350428040],[0.096555555556,1350428400],[0.0077777777778,1350428760],[0.093555555556,1350429120],[0.098166666667,1350429480],[0.067166666667,1350429840],[0.10477777778,1350430200],[0.11913888889,1350430560],[0.15888888889,1350430920],[0.014833333333,1350431280],[0,1350431640],[0,1350432000],[0.13236111111,1350432360],[0.0032222222222,1350432720],[0.055222222222,1350433080],[0.13966666667,1350433440],[0.11030555556,1350433800],[0.033555555556,1350434160],[0.095444444444,1350434520],[0.10961111111,1350434880],[0.10583333333,1350435240],[0.010833333333,1350435600],[0.042777777778,1350435960],[0.016555555556,1350436320],[0.085722222222,1350436680],[0.0033333333333,1350437040],[0.052444444444,1350437400],[0.096388888889,1350437760],[0.017944444444,1350438120],[0.23366666667,1350438480],[0,1350438840],[0.12633333333,1350439200],[0.27244444444,1350439560],[0.21280555556,1350439920],[0.22961111111,1350440280],[0.30141666667,1350440640],[0.0030833333333,1350441000],[0,1350441360],[0,1350441720],[0.071055555556,1350442080],[0.060861111111,1350442440],[0.039583333333,1350442800],[0.20541666667,1350443160],[0.1725,1350443520],[0.076777777778,1350443880],[0.10913888889,1350444240],[0.17088888889,1350444600],[0.079694444444,1350444960],[0.15969444444,1350445320],[0.0031388888889,1350445680],[0.17558333333,1350446040],[0.29675,1350446400],[0.0032777777778,1350446760],[0.029361111111,1350447120],[0.18375,1350447480],[0.039444444444,1350447840],[0.15644444444,1350448200],[0.074944444444,1350448560],[0.04325,1350448920],[0.072111111111,1350449280],[0.35930555556,1350449640],[0.49425,1350450000],[0.41797222222,1350450360],[0.16530555556,1350450720],[0.13291666667,1350451080],[0.031944444444,1350451440],[0.03,1350451800],[0,1350452160],[0.015166666667,1350452520],[0.067055555556,1350452880],[0.10861111111,1350453240],[0.014388888889,1350453600],[0.11847222222,1350453960],[0.20633333333,1350454320],[0.17852777778,1350454680],[0.12266666667,1350455040],[0.21163888889,1350455400],[0.084083333333,1350455760],[0.19605555556,1350456120],[0.30877777778,1350456480],[0.33252777778,1350456840],[0.07475,1350457200],[0.0635,1350457560],[0.0057222222222,1350457920],[0.16755555556,1350458280],[0.25280555556,1350458640],[0.042333333333,1350459000],[0.031361111111,1350459360],[0.22352777778,1350459720],[0.30294444444,1350460080],[0.0085833333333,1350460440],[0.0225,1350460800],[0.22358333333,1350461160],[0.035277777778,1350461520],[0.22088888889,1350461880],[0.42880555556,1350462240],[0.020222222222,1350462600],[0.218,1350462960],[0.031555555556,1350463320],[0.027194444444,1350463680],[0.0985,1350464040],[0.35652777778,1350464400],[0.47572222222,1350464760],[0.25161111111,1350465120],[0.39591666667,1350465480],[0.49633333333,1350465840],[0.09225,1350466200],[0,1350466560],[0.050416666667,1350466920],[0.10372222222,1350467280],[0.029166666667,1350467640],[0.024333333333,1350468000],[0.07475,1350468360],[0.29177777778,1350468720],[0.013083333333,1350469080],[0.14536111111,1350469440],[0.26847222222,1350469800],[0.18216666667,1350470160],[0.10222222222,1350470520],[0.19358333333,1350470880],[0.053805555556,1350471240],[0.14816666667,1350471600],[0.22397222222,1350471960],[0.01025,1350472320],[0,1350472680],[0.085611111111,1350473040],[0.015333333333,1350473400],[0.0082222222222,1350473760],[0.10069444444,1350474120],[0.29602777778,1350474480],[0.16466666667,1350474840],[0.45930555556,1350475200],[0.12125,1350475560],[0.23119444444,1350475920],[0.19061111111,1350476280],[0.012277777778,1350476640],[0.0065,1350477000],[0,1350477360],[0,1350477720],[0.033666666667,1350478080],[0.0028611111111,1350478440],[0,1350478800],[0.039888888889,1350479160],[0.16308333333,1350479520],[0.11302777778,1350479880],[0.10641666667,1350480240],[0.0058888888889,1350480600],[0.039722222222,1350480960],[0.048111111111,1350481320],[0.070694444444,1350481680],[0.060916666667,1350482040],[0.0069444444444,1350482400],[0.25288888889,1350482760],[0.28447222222,1350483120],[0.46719444444,1350483480],[0.079722222222,1350483840],[0.042583333333,1350484200],[0.055527777778,1350484560],[0.053472222222,1350484920],[0.071222222222,1350485280],[0.14538888889,1350485640],[0,1350486000],[0.13136111111,1350486360],[0.05775,1350486720],[0,1350487080],[0,1350487440],[0.13261111111,1350487800],[0.066416666667,1350488160],[0.16997222222,1350488520],[0.19747222222,1350488880],[0.012888888889,1350489240],[0.060166666667,1350489600],[0.24775,1350489960],[0.21008333333,1350490320],[0.19586111111,1350490680],[0.063972222222,1350491040],[0.0055277777778,1350491400],[0.15425,1350491760],[0.040666666667,1350492120],[0.054638888889,1350492480],[0.16366666667,1350492840],[0.37302777778,1350493200],[0.11797222222,1350493560],[0.015111111111,1350493920],[0.057111111111,1350494280],[0.29183333333,1350494640],[0.087305555556,1350495000],[0.10877777778,1350495360],[0,1350495720],[0.35838888889,1350496080],[0.26455555556,1350496440],[0.0031388888889,1350496800],[0.418,1350497160],[0.069472222222,1350497520],[0.046416666667,1350497880],[0.30627777778,1350498240],[0.090611111111,1350498600],[0,1350498960],[0.23097222222,1350499320],[0.40261111111,1350499680],[0.53072222222,1350500040],[0.68769444444,1350500400],[0.23547222222,1350500760],[0.39988888889,1350501120],[0.090111111111,1350501480],[0.048888888889,1350501840],[0.1275,1350502200],[0.25075,1350502560],[0.14805555556,1350502920],[0.053638888889,1350503280],[0.066777777778,1350503640],[0.17205555556,1350504000],[0.47402777778,1350504360],[0.096472222222,1350504720],[0.030138888889,1350505080],[0.66047222222,1350505440],[0.020416666667,1350505800],[0.39083333333,1350506160],[0.39933333333,1350506520],[0.0064444444444,1350506880],[0,1350507240],[0,1350507600],[0,1350507960],[0,1350508320],[0,1350508680],[0,1350509040],[0.13225,1350509400],[0.29808333333,1350509760],[0.11486111111,1350510120],[0.26966666667,1350510480],[0.067611111111,1350510840],[0.063555555556,1350511200],[0,1350511560],[0,1350511920],[0.075388888889,1350512280],[0,1350512640]]},{"ds_name":"a1","cluster_name":"","graph_type":"line","host_name":"","metric_name":"Nodes","datapoints":[[3,1350426240],[3,1350426600],[3,1350426960],[3,1350427320],[3,1350427680],[3,1350428040],[3,1350428400],[3,1350428760],[3,1350429120],[3,1350429480],[3,1350429840],[3,1350430200],[3,1350430560],[3,1350430920],[3,1350431280],[3,1350431640],[3,1350432000],[3,1350432360],[3,1350432720],[3,1350433080],[3,1350433440],[3,1350433800],[3,1350434160],[3,1350434520],[3,1350434880],[3,1350435240],[3,1350435600],[3,1350435960],[3,1350436320],[3,1350436680],[3,1350437040],[3,1350437400],[3,1350437760],[3,1350438120],[3,1350438480],[3,1350438840],[3,1350439200],[3,1350439560],[3,1350439920],[3,1350440280],[3,1350440640],[3,1350441000],[3,1350441360],[3,1350441720],[3,1350442080],[3,1350442440],[3,1350442800],[3,1350443160],[3,1350443520],[3,1350443880],[3,1350444240],[3,1350444600],[3,1350444960],[3,1350445320],[3,1350445680],[3,1350446040],[3,1350446400],[3,1350446760],[3,1350447120],[3,1350447480],[3,1350447840],[3,1350448200],[3,1350448560],[3,1350448920],[3,1350449280],[3,1350449640],[3,1350450000],[3,1350450360],[3,1350450720],[3,1350451080],[3,1350451440],[3,1350451800],[3,1350452160],[3,1350452520],[3,1350452880],[3,1350453240],[3,1350453600],[3,1350453960],[3,1350454320],[3,1350454680],[3,1350455040],[3,1350455400],[3,1350455760],[3,1350456120],[3,1350456480],[3,1350456840],[3,1350457200],[3,1350457560],[3,1350457920],[3,1350458280],[3,1350458640],[3,1350459000],[3,1350459360],[3,1350459720],[3,1350460080],[3,1350460440],[3,1350460800],[3,1350461160],[3,1350461520],[3,1350461880],[3,1350462240],[3,1350462600],[3,1350462960],[3,1350463320],[3,1350463680],[3,1350464040],[3,1350464400],[3,1350464760],[3,1350465120],[3,1350465480],[3,1350465840],[3,1350466200],[3,1350466560],[3,1350466920],[3,1350467280],[3,1350467640],[3,1350468000],[3,1350468360],[3,1350468720],[3,1350469080],[3,1350469440],[3,1350469800],[3,1350470160],[3,1350470520],[3,1350470880],[3,1350471240],[3,1350471600],[3,1350471960],[3,1350472320],[3,1350472680],[3,1350473040],[3,1350473400],[3,1350473760],[3,1350474120],[3,1350474480],[3,1350474840],[3,1350475200],[3,1350475560],[3,1350475920],[3,1350476280],[3,1350476640],[3,1350477000],[3,1350477360],[3,1350477720],[3,1350478080],[3,1350478440],[3,1350478800],[3,1350479160],[3,1350479520],[3,1350479880],[3,1350480240],[3,1350480600],[3,1350480960],[3,1350481320],[3,1350481680],[3,1350482040],[3,1350482400],[3,1350482760],[3,1350483120],[3,1350483480],[3,1350483840],[3,1350484200],[3,1350484560],[3,1350484920],[3,1350485280],[3,1350485640],[3,1350486000],[3,1350486360],[3,1350486720],[3,1350487080],[3,1350487440],[3,1350487800],[3,1350488160],[3,1350488520],[3,1350488880],[3,1350489240],[3,1350489600],[3,1350489960],[3,1350490320],[3,1350490680],[3,1350491040],[3,1350491400],[3,1350491760],[3,1350492120],[3,1350492480],[3,1350492840],[3,1350493200],[3,1350493560],[3,1350493920],[3,1350494280],[3,1350494640],[3,1350495000],[3,1350495360],[3,1350495720],[3,1350496080],[3,1350496440],[3,1350496800],[3,1350497160],[3,1350497520],[3,1350497880],[3,1350498240],[3,1350498600],[3,1350498960],[3,1350499320],[3,1350499680],[3,1350500040],[3,1350500400],[3,1350500760],[3,1350501120],[3,1350501480],[3,1350501840],[3,1350502200],[3,1350502560],[3,1350502920],[3,1350503280],[3,1350503640],[3,1350504000],[3,1350504360],[3,1350504720],[3,1350505080],[3,1350505440],[3,1350505800],[3,1350506160],[3,1350506520],[3,1350506880],[3,1350507240],[3,1350507600],[3,1350507960],[3,1350508320],[3,1350508680],[3,1350509040],[3,1350509400],[3,1350509760],[3,1350510120],[3,1350510480],[3,1350510840],[3,1350511200],[3,1350511560],[3,1350511920],[3,1350512280],[0,1350512640]]},{"ds_name":"a2","cluster_name":"","graph_type":"line","host_name":"","metric_name":"CPUs ","datapoints":[[6,1350426240],[6,1350426600],[6,1350426960],[6,1350427320],[6,1350427680],[6,1350428040],[6,1350428400],[6,1350428760],[6,1350429120],[6,1350429480],[6,1350429840],[6,1350430200],[6,1350430560],[6,1350430920],[6,1350431280],[6,1350431640],[6,1350432000],[6,1350432360],[6,1350432720],[6,1350433080],[6,1350433440],[6,1350433800],[6,1350434160],[6,1350434520],[6,1350434880],[6,1350435240],[6,1350435600],[6,1350435960],[6,1350436320],[6,1350436680],[6,1350437040],[6,1350437400],[6,1350437760],[6,1350438120],[6,1350438480],[6,1350438840],[6,1350439200],[6,1350439560],[6,1350439920],[6,1350440280],[6,1350440640],[6,1350441000],[6,1350441360],[6,1350441720],[6,1350442080],[6,1350442440],[6,1350442800],[6,1350443160],[6,1350443520],[6,1350443880],[6,1350444240],[6,1350444600],[6,1350444960],[6,1350445320],[6,1350445680],[6,1350446040],[6,1350446400],[6,1350446760],[6,1350447120],[6,1350447480],[6,1350447840],[6,1350448200],[6,1350448560],[6,1350448920],[6,1350449280],[6,1350449640],[6,1350450000],[6,1350450360],[6,1350450720],[6,1350451080],[6,1350451440],[6,1350451800],[6,1350452160],[6,1350452520],[6,1350452880],[6,1350453240],[6,1350453600],[6,1350453960],[6,1350454320],[6,1350454680],[6,1350455040],[6,1350455400],[6,1350455760],[6,1350456120],[6,1350456480],[6,1350456840],[6,1350457200],[6,1350457560],[6,1350457920],[6,1350458280],[6,1350458640],[6,1350459000],[6,1350459360],[6,1350459720],[6,1350460080],[6,1350460440],[6,1350460800],[6,1350461160],[6,1350461520],[6,1350461880],[6,1350462240],[6,1350462600],[6,1350462960],[6,1350463320],[6,1350463680],[6,1350464040],[6,1350464400],[6,1350464760],[6,1350465120],[6,1350465480],[6,1350465840],[6,1350466200],[6,1350466560],[6,1350466920],[6,1350467280],[6,1350467640],[6,1350468000],[6,1350468360],[6,1350468720],[6,1350469080],[6,1350469440],[6,1350469800],[6,1350470160],[6,1350470520],[6,1350470880],[6,1350471240],[6,1350471600],[6,1350471960],[6,1350472320],[6,1350472680],[6,1350473040],[6,1350473400],[6,1350473760],[6,1350474120],[6,1350474480],[6,1350474840],[6,1350475200],[6,1350475560],[6,1350475920],[6,1350476280],[6,1350476640],[6,1350477000],[6,1350477360],[6,1350477720],[6,1350478080],[6,1350478440],[6,1350478800],[6,1350479160],[6,1350479520],[6,1350479880],[6,1350480240],[6,1350480600],[6,1350480960],[6,1350481320],[6,1350481680],[6,1350482040],[6,1350482400],[6,1350482760],[6,1350483120],[6,1350483480],[6,1350483840],[6,1350484200],[6,1350484560],[6,1350484920],[6,1350485280],[6,1350485640],[6,1350486000],[6,1350486360],[6,1350486720],[6,1350487080],[6,1350487440],[6,1350487800],[6,1350488160],[6,1350488520],[6,1350488880],[6,1350489240],[6,1350489600],[6,1350489960],[6,1350490320],[6,1350490680],[6,1350491040],[6,1350491400],[6,1350491760],[6,1350492120],[6,1350492480],[6,1350492840],[6,1350493200],[6,1350493560],[6,1350493920],[6,1350494280],[6,1350494640],[6,1350495000],[6,1350495360],[6,1350495720],[6,1350496080],[6,1350496440],[6,1350496800],[6,1350497160],[6,1350497520],[6,1350497880],[6,1350498240],[6,1350498600],[6,1350498960],[6,1350499320],[6,1350499680],[6,1350500040],[6,1350500400],[6,1350500760],[6,1350501120],[6,1350501480],[6,1350501840],[6,1350502200],[6,1350502560],[6,1350502920],[6,1350503280],[6,1350503640],[6,1350504000],[6,1350504360],[6,1350504720],[6,1350505080],[6,1350505440],[6,1350505800],[6,1350506160],[6,1350506520],[6,1350506880],[6,1350507240],[6,1350507600],[6,1350507960],[6,1350508320],[6,1350508680],[6,1350509040],[6,1350509400],[6,1350509760],[6,1350510120],[6,1350510480],[6,1350510840],[6,1350511200],[6,1350511560],[6,1350511920],[6,1350512280],[0,1350512640]]},{"ds_name":"a3","cluster_name":"","graph_type":"line","host_name":"","metric_name":"Procs","datapoints":[[1.5333333333,1350426240],[3.1361111111,1350426600],[2.5277777778,1350426960],[1.3916666667,1350427320],[1.7138888889,1350427680],[1.2777777778,1350428040],[1.7638888889,1350428400],[1.4222222222,1350428760],[1.475,1350429120],[2.5138888889,1350429480],[0.64722222222,1350429840],[2.4305555556,1350430200],[2.4055555556,1350430560],[1.6333333333,1350430920],[1.7166666667,1350431280],[1.65,1350431640],[1.2388888889,1350432000],[4.4,1350432360],[2.0305555556,1350432720],[2.3583333333,1350433080],[1.15,1350433440],[1.3138888889,1350433800],[1.8583333333,1350434160],[2.4861111111,1350434520],[0.82777777778,1350434880],[1.8166666667,1350435240],[3.1083333333,1350435600],[1.4583333333,1350435960],[1.9694444444,1350436320],[1.4333333333,1350436680],[0,1350437040],[0.82777777778,1350437400],[1.75,1350437760],[1.25,1350438120],[2.7861111111,1350438480],[0.41666666667,1350438840],[0.88888888889,1350439200],[0.45,1350439560],[1.8972222222,1350439920],[1.3222222222,1350440280],[2.7555555556,1350440640],[1.8083333333,1350441000],[2.575,1350441360],[0.83611111111,1350441720],[1.6361111111,1350442080],[0.88333333333,1350442440],[1.1666666667,1350442800],[0.38888888889,1350443160],[3.3444444444,1350443520],[2.4444444444,1350443880],[2.175,1350444240],[1.5277777778,1350444600],[2.45,1350444960],[1.2972222222,1350445320],[0.70277777778,1350445680],[0.21111111111,1350446040],[0.95555555556,1350446400],[1.2527777778,1350446760],[1.7111111111,1350447120],[1.425,1350447480],[0.25833333333,1350447840],[0.25277777778,1350448200],[1.1694444444,1350448560],[2.3166666667,1350448920],[1.4527777778,1350449280],[2.1944444444,1350449640],[0.99166666667,1350450000],[1.0194444444,1350450360],[1.1916666667,1350450720],[1.0416666667,1350451080],[1.1666666667,1350451440],[1.0722222222,1350451800],[0.43611111111,1350452160],[0.83055555556,1350452520],[1.1944444444,1350452880],[0.71111111111,1350453240],[1.0222222222,1350453600],[1.5361111111,1350453960],[3.7694444444,1350454320],[1.0194444444,1350454680],[0.99444444444,1350455040],[1.9833333333,1350455400],[0.63611111111,1350455760],[0.55555555556,1350456120],[1.2111111111,1350456480],[1.1194444444,1350456840],[1.7722222222,1350457200],[0.95555555556,1350457560],[2.0111111111,1350457920],[0.95833333333,1350458280],[1.0916666667,1350458640],[2.3027777778,1350459000],[0.58055555556,1350459360],[2.5666666667,1350459720],[1.0416666667,1350460080],[0,1350460440],[0.63333333333,1350460800],[1.9444444444,1350461160],[3.3444444444,1350461520],[2.4555555556,1350461880],[4.5805555556,1350462240],[4.0138888889,1350462600],[1.3222222222,1350462960],[1.8916666667,1350463320],[1.6333333333,1350463680],[1.4083333333,1350464040],[3.075,1350464400],[0.53888888889,1350464760],[1.1027777778,1350465120],[1.2416666667,1350465480],[3.5694444444,1350465840],[0.86944444444,1350466200],[2.9138888889,1350466560],[1.4305555556,1350466920],[1.8555555556,1350467280],[0.66111111111,1350467640],[3.3944444444,1350468000],[1.4138888889,1350468360],[0.094444444444,1350468720],[0.46666666667,1350469080],[1.8666666667,1350469440],[2.0138888889,1350469800],[0.83055555556,1350470160],[1.5888888889,1350470520],[1.5027777778,1350470880],[3.2333333333,1350471240],[3.4777777778,1350471600],[3.6555555556,1350471960],[2.3222222222,1350472320],[1.0222222222,1350472680],[1.3888888889,1350473040],[1.9083333333,1350473400],[2.2777777778,1350473760],[1.6055555556,1350474120],[1.3472222222,1350474480],[1.0277777778,1350474840],[2.8638888889,1350475200],[1.2527777778,1350475560],[2.1361111111,1350475920],[1.5305555556,1350476280],[1.7888888889,1350476640],[3.5833333333,1350477000],[2.4638888889,1350477360],[0.53055555556,1350477720],[1.0416666667,1350478080],[0.41666666667,1350478440],[1.9722222222,1350478800],[1.4583333333,1350479160],[1.1,1350479520],[0.66388888889,1350479880],[2.8666666667,1350480240],[1.6916666667,1350480600],[2.0805555556,1350480960],[2.7361111111,1350481320],[1.7638888889,1350481680],[1.8805555556,1350482040],[2.8138888889,1350482400],[1.6444444444,1350482760],[1.1916666667,1350483120],[1.3416666667,1350483480],[1.5361111111,1350483840],[3.4527777778,1350484200],[2.1583333333,1350484560],[0.19444444444,1350484920],[1.9694444444,1350485280],[0.96111111111,1350485640],[1.7361111111,1350486000],[1.5111111111,1350486360],[1.1388888889,1350486720],[0.63055555556,1350487080],[0.83611111111,1350487440],[1.6472222222,1350487800],[2.1583333333,1350488160],[0.98888888889,1350488520],[2.8333333333,1350488880],[4.8305555556,1350489240],[2.4888888889,1350489600],[1.4611111111,1350489960],[4.8361111111,1350490320],[0.93611111111,1350490680],[2.325,1350491040],[3.525,1350491400],[1.9611111111,1350491760],[3.375,1350492120],[1.8166666667,1350492480],[4.3083333333,1350492840],[2.925,1350493200],[2.9361111111,1350493560],[1.0861111111,1350493920],[2.3222222222,1350494280],[3.1222222222,1350494640],[3.5861111111,1350495000],[2.8361111111,1350495360],[4.7416666667,1350495720],[4.0138888889,1350496080],[2.5194444444,1350496440],[2.3416666667,1350496800],[2.0111111111,1350497160],[2.9083333333,1350497520],[1.6333333333,1350497880],[1.5722222222,1350498240],[1.2555555556,1350498600],[1.8277777778,1350498960],[1.2166666667,1350499320],[3.8833333333,1350499680],[1.3222222222,1350500040],[1.1555555556,1350500400],[1.7611111111,1350500760],[2.5888888889,1350501120],[1.3305555556,1350501480],[0.53611111111,1350501840],[2.25,1350502200],[1.675,1350502560],[1.4083333333,1350502920],[2.1277777778,1350503280],[3,1350503640],[1.4166666667,1350504000],[2.3416666667,1350504360],[1.3694444444,1350504720],[0.65277777778,1350505080],[4.5527777778,1350505440],[2.6138888889,1350505800],[1.3333333333,1350506160],[2.8555555556,1350506520],[1.2166666667,1350506880],[1.2861111111,1350507240],[2.5638888889,1350507600],[0.86666666667,1350507960],[2.2472222222,1350508320],[0.77777777778,1350508680],[0.53055555556,1350509040],[1.6694444444,1350509400],[1.9944444444,1350509760],[1.1694444444,1350510120],[1.9916666667,1350510480],[2.6055555556,1350510840],[3.1027777778,1350511200],[1.8555555556,1350511560],[3.1138888889,1350511920],[1.5416666667,1350512280],[0,1350512640]]}]
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/memory_1hr.json b/branch-1.2/ambari-web/app/assets/data/cluster_metrics/memory_1hr.json
deleted file mode 100644
index 40d9170..0000000
--- a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/memory_1hr.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
-  "href" : "http://ambari/api/clusters/vmc?fields=metrics/memory[1352702257,1352705857,15]",
-  "metrics" : {
-    "memory" : {
-      "Cache" : "[[2.19856896E8,1352706465],[2.19856896E8,1352706480],[2.1466453333E8,1352706495],[2.04279808E8,1352706510],[2.04279808E8,1352706525],[2.0454959787E8,1352706540],[2.0529152E8,1352706555],[2.059829248E8,1352706570],[2.07884288E8,1352706585],[2.07884288E8,1352706600],[2.0824692053E8,1352706615],[2.0924416E8,1352706630],[2.0924416E8,1352706645],[2.0981213867E8,1352706660],[2.1137408E8,1352706675],[2.11685376E8,1352706690],[2.1254144E8,1352706705],[2.1254144E8,1352706720],[2.129313792E8,1352706735],[2.14003712E8,1352706750],[2.14003712E8,1352706765],[2.1431282347E8,1352706780],[2.1516288E8,1352706795],[2.15523328E8,1352706810],[2.1696512E8,1352706825],[2.1696512E8,1352706840],[2.1444962987E8,1352706855],[2.07532032E8,1352706870],[2.07532032E8,1352706885],[1.998348288E8,1352706900],[1.7866752E8,1352706915],[1.7916340907E8,1352706930],[1.80527104E8,1352706945],[1.80527104E8,1352706960],[1.806893056E8,1352706975],[1.81743616E8,1352706990],[1.81743616E8,1352707005],[1.819705344E8,1352707020],[1.82878208E8,1352707035],[1.830600704E8,1352707050],[1.84242176E8,1352707065],[1.84242176E8,1352707080],[1.844666368E8,1352707095],[1.8536448E8,1352707110],[1.8536448E8,1352707125],[1.8567031467E8,1352707140],[1.8651136E8,1352707155],[1.868283904E8,1352707170],[1.88096512E8,1352707185],[1.88096512E8,1352707200],[1.888616448E8,1352707215],[1.91922176E8,1352707230],[1.91922176E8,1352707245],[1.90906368E8,1352707260],[1.86843136E8,1352707275],[1.873313792E8,1352707290],[1.89284352E8,1352707305],[1.89284352E8,1352707320],[1.894621184E8,1352707335],[1.90173184E8,1352707350],[1.90173184E8,1352707365],[1.904254976E8,1352707380],[1.91434752E8,1352707395],[1.895170048E8,1352707410],[1.81846016E8,1352707425],[1.81846016E8,1352707440],[1.821253632E8,1352707455],[1.83242752E8,1352707470],[1.83242752E8,1352707485],[1.8375748267E8,1352707500],[1.84786944E8,1352707515],[1.853145088E8,1352707530],[1.86105856E8,1352707545],[1.86105856E8,1352707560],[1.8650589867E8,1352707575],[1.87305984E8,1352707590],[1.87305984E8,1352707605],[1.84549376E8,1352707620],[1.7903616E8,1352707635],[1.796603904E8,1352707650],[1.80596736E8,1352707665],[1.80596736E8,1352707680],[1.8098449067E8,1352707695],[1.8176E8,1352707710],[1.8176E8,1352707725],[1.82194176E8,1352707740],[1.83062528E8,1352707755],[1.8359883093E8,1352707770],[1.85073664E8,1352707785],[1.85073664E8,1352707800],[1.833091072E8,1352707815],[1.7625088E8,1352707830],[1.7642018133E8,1352707845],[1.7752064E8,1352707860],[1.7752064E8,1352707875],[1.7768502613E8,1352707890],[1.78753536E8,1352707905],[1.78753536E8,1352707920],[1.789779968E8,1352707935],[1.7987584E8,1352707950],[1.7987584E8,1352707965],[1.788010496E8,1352707980],[1.74501888E8,1352707995],[1.7473344853E8,1352708010],[1.76238592E8,1352708025],[1.76238592E8,1352708040],[1.76238592E8,1352708055],[1.77414144E8,1352708070],[1.7749906773E8,1352708085],[1.78688E8,1352708100],[1.78688E8,1352708115],[1.78688E8,1352708130],[1.80191232E8,1352708145],[1.80191232E8,1352708160],[1.80191232E8,1352708175],[1.77688576E8,1352708190],[1.77688576E8,1352708205],[1.78950144E8,1352708220],[1.78950144E8,1352708235],[1.78950144E8,1352708250],[1.80211712E8,1352708265],[1.80211712E8,1352708280],[1.80211712E8,1352708295],[1.81460992E8,1352708310],[1.81460992E8,1352708325],[1.7311552853E8,1352708340],[1.72519424E8,1352708355],[1.72519424E8,1352708370],[1.7511519573E8,1352708385],[1.75300608E8,1352708400],[1.76685056E8,1352708415],[1.76898048E8,1352708430],[1.76898048E8,1352708445],[1.7891437227E8,1352708460],[1.79224576E8,1352708475],[1.79224576E8,1352708490],[1.802764288E8,1352708505],[1.80539392E8,1352708520],[1.7745455787E8,1352708535],[1.763328E8,1352708550],[1.763328E8,1352708565],[1.7743817387E8,1352708580],[1.77840128E8,1352708595],[1.77840128E8,1352708610],[1.78618368E8,1352708625],[1.79007488E8,1352708640],[1.8176E8,1352708655],[1.83595008E8,1352708670],[1.83595008E8,1352708685],[1.780629504E8,1352708700],[1.74374912E8,1352708715],[1.7513076053E8,1352708730],[1.75792128E8,1352708745],[1.75792128E8,1352708760],[1.7646277973E8,1352708775],[1.770496E8,1352708790],[1.770496E8,1352708805],[1.7812220587E8,1352708820],[1.79060736E8,1352708835],[1.79060736E8,1352708850],[1.7965902507E8,1352708865],[1.80342784E8,1352708880],[1.80342784E8,1352708895],[1.80342784E8,1352708910],[1.80342784E8,1352708925],[1.8159670613E8,1352708940],[1.82693888E8,1352708955],[1.8338420053E8,1352708970],[1.83988224E8,1352708985],[1.83988224E8,1352709000],[1.8468072107E8,1352709015],[1.85286656E8,1352709030],[1.85286656E8,1352709045],[1.85974784E8,1352709060],[1.86761216E8,1352709075],[1.835532288E8,1352709090],[1.78741248E8,1352709105],[1.78741248E8,1352709120],[1.790902272E8,1352709135],[1.79613696E8,1352709150],[1.79613696E8,1352709165],[1.8018904747E8,1352709180],[1.80846592E8,1352709195],[1.80846592E8,1352709210],[1.8165705387E8,1352709225],[1.82366208E8,1352709240],[1.8303249067E8,1352709255],[1.83615488E8,1352709270],[1.83615488E8,1352709285],[1.7966585173E8,1352709300],[1.7620992E8,1352709315],[1.7620992E8,1352709330],[1.767931904E8,1352709345],[1.77303552E8,1352709360],[1.780260864E8,1352709375],[1.78507776E8,1352709390],[1.78507776E8,1352709405],[1.79433472E8,1352709420],[1.7989632E8,1352709435],[1.7989632E8,1352709450],[1.8057079467E8,1352709465],[1.80908032E8,1352709480],[1.821786112E8,1352709495],[1.8264064E8,1352709510],[1.8264064E8,1352709525],[1.8378752E8,1352709540],[1.8407424E8,1352709555],[1.8407424E8,1352709570],[1.850310656E8,1352709585],[1.85270272E8,1352709600],[1.85270272E8,1352709615],[1.7653050027E8,1352709630],[1.7518592E8,1352709645],[1.7745291947E8,1352709660],[1.77614848E8,1352709675],[1.77614848E8,1352709690],[1.7874015573E8,1352709705],[1.7891328E8,1352709720],[1.7891328E8,1352709735],[1.805000704E8,1352709750],[1.80744192E8,1352709765],[1.817174016E8,1352709780],[1.81960704E8,1352709795],[1.81960704E8,1352709810],[1.7739803307E8,1352709825],[1.7573888E8,1352709840],[1.7671509333E8,1352709855],[1.7707008E8,1352709870],[1.7707008E8,1352709885],[1.7850886827E8,1352709900],[1.79032064E8,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050],[0.0,1352710065]]",
-      "Swap" : "[[9.1787264E7,1352706465],[9.1787264E7,1352706480],[9.2783957333E7,1352706495],[9.4777344E7,1352706510],[9.4777344E7,1352706525],[9.4777344E7,1352706540],[9.4777344E7,1352706555],[9.4777344E7,1352706570],[9.4777344E7,1352706585],[9.4777344E7,1352706600],[9.47806208E7,1352706615],[9.4789632E7,1352706630],[9.4789632E7,1352706645],[9.4789632E7,1352706660],[9.4789632E7,1352706675],[9.4789632E7,1352706690],[9.4789632E7,1352706705],[9.4789632E7,1352706720],[9.4789632E7,1352706735],[9.4789632E7,1352706750],[9.4789632E7,1352706765],[9.4789632E7,1352706780],[9.4789632E7,1352706795],[9.4789632E7,1352706810],[9.4789632E7,1352706825],[9.4789632E7,1352706840],[9.49403648E7,1352706855],[9.535488E7,1352706870],[9.535488E7,1352706885],[9.6414378667E7,1352706900],[9.9328E7,1352706915],[9.93378304E7,1352706930],[9.9364864E7,1352706945],[9.9364864E7,1352706960],[9.9364864E7,1352706975],[9.9364864E7,1352706990],[9.9364864E7,1352707005],[9.9364864E7,1352707020],[9.9364864E7,1352707035],[9.9364864E7,1352707050],[9.9364864E7,1352707065],[9.9364864E7,1352707080],[9.9377152E7,1352707095],[9.9426304E7,1352707110],[9.9426304E7,1352707125],[9.9426304E7,1352707140],[9.9426304E7,1352707155],[9.9426304E7,1352707170],[9.9426304E7,1352707185],[9.9426304E7,1352707200],[9.94238464E7,1352707215],[9.9414016E7,1352707230],[9.9414016E7,1352707245],[9.95704832E7,1352707260],[1.00196352E8,1352707275],[1.001955328E8,1352707290],[1.00192256E8,1352707305],[1.00192256E8,1352707320],[1.002070016E8,1352707335],[1.00265984E8,1352707350],[1.00265984E8,1352707365],[1.00265984E8,1352707380],[1.00265984E8,1352707395],[1.003610112E8,1352707410],[1.0074112E8,1352707425],[1.0074112E8,1352707440],[1.0074112E8,1352707455],[1.0074112E8,1352707470],[1.0074112E8,1352707485],[1.0074112E8,1352707500],[1.0074112E8,1352707515],[1.007525888E8,1352707530],[1.00769792E8,1352707545],[1.00769792E8,1352707560],[1.00769792E8,1352707575],[1.00769792E8,1352707590],[1.00769792E8,1352707605],[1.0085444267E8,1352707620],[1.01023744E8,1352707635],[1.01023744E8,1352707650],[1.01023744E8,1352707665],[1.01023744E8,1352707680],[1.01023744E8,1352707695],[1.01023744E8,1352707710],[1.01023744E8,1352707725],[1.01023744E8,1352707740],[1.01023744E8,1352707755],[1.01023744E8,1352707770],[1.01023744E8,1352707785],[1.01023744E8,1352707800],[1.011286016E8,1352707815],[1.01548032E8,1352707830],[1.01548032E8,1352707845],[1.01548032E8,1352707860],[1.01548032E8,1352707875],[1.01548032E8,1352707890],[1.01548032E8,1352707905],[1.01548032E8,1352707920],[1.01548032E8,1352707935],[1.01548032E8,1352707950],[1.01548032E8,1352707965],[1.018363904E8,1352707980],[1.02989824E8,1352707995],[1.02989824E8,1352708010],[1.02989824E8,1352708025],[1.02989824E8,1352708040],[1.02989824E8,1352708055],[1.02989824E8,1352708070],[1.02989824E8,1352708085],[1.02989824E8,1352708100],[1.02989824E8,1352708115],[1.02989824E8,1352708130],[1.02989824E8,1352708145],[1.02989824E8,1352708160],[1.02989824E8,1352708175],[1.03907328E8,1352708190],[1.03907328E8,1352708205],[1.03903232E8,1352708220],[1.03903232E8,1352708235],[1.03903232E8,1352708250],[1.03903232E8,1352708265],[1.03903232E8,1352708280],[1.03903232E8,1352708295],[1.03903232E8,1352708310],[1.03903232E8,1352708325],[1.0469075627E8,1352708340],[1.04747008E8,1352708355],[1.04747008E8,1352708370],[1.04747008E8,1352708385],[1.04747008E8,1352708400],[1.04747008E8,1352708415],[1.04747008E8,1352708430],[1.04747008E8,1352708445],[1.04747008E8,1352708460],[1.04747008E8,1352708475],[1.04747008E8,1352708490],[1.04747008E8,1352708505],[1.04747008E8,1352708520],[1.0537478827E8,1352708535],[1.05603072E8,1352708550],[1.05603072E8,1352708565],[1.05603072E8,1352708580],[1.05603072E8,1352708595],[1.05603072E8,1352708610],[1.05603072E8,1352708625],[1.05603072E8,1352708640],[1.05603072E8,1352708655],[1.05603072E8,1352708670],[1.05603072E8,1352708685],[1.07175936E8,1352708700],[1.08224512E8,1352708715],[1.0792741547E8,1352708730],[1.07667456E8,1352708745],[1.07667456E8,1352708760],[1.07667456E8,1352708775],[1.07667456E8,1352708790],[1.07667456E8,1352708805],[1.07667456E8,1352708820],[1.07667456E8,1352708835],[1.07667456E8,1352708850],[1.07667456E8,1352708865],[1.07667456E8,1352708880],[1.07667456E8,1352708895],[1.07667456E8,1352708910],[1.07667456E8,1352708925],[1.07667456E8,1352708940],[1.07667456E8,1352708955],[1.07667456E8,1352708970],[1.07667456E8,1352708985],[1.07667456E8,1352709000],[1.077067776E8,1352709015],[1.07741184E8,1352709030],[1.07741184E8,1352709045],[1.07741184E8,1352709060],[1.07741184E8,1352709075],[1.086849024E8,1352709090],[1.1010048E8,1352709105],[1.1010048E8,1352709120],[1.1010048E8,1352709135],[1.1010048E8,1352709150],[1.1010048E8,1352709165],[1.1047130453E8,1352709180],[1.10895104E8,1352709195],[1.10895104E8,1352709210],[1.10895104E8,1352709225],[1.10895104E8,1352709240],[1.10895104E8,1352709255],[1.10895104E8,1352709270],[1.10895104E8,1352709285],[1.1094971733E8,1352709300],[1.10997504E8,1352709315],[1.10997504E8,1352709330],[1.10997504E8,1352709345],[1.10997504E8,1352709360],[1.110147072E8,1352709375],[1.11026176E8,1352709390],[1.11026176E8,1352709405],[1.1103163733E8,1352709420],[1.11034368E8,1352709435],[1.11034368E8,1352709450],[1.11017984E8,1352709465],[1.11009792E8,1352709480],[1.11009792E8,1352709495],[1.11009792E8,1352709510],[1.11009792E8,1352709525],[1.11009792E8,1352709540],[1.11009792E8,1352709555],[1.11009792E8,1352709570],[1.11009792E8,1352709585],[1.11009792E8,1352709600],[1.11009792E8,1352709615],[1.1142867627E8,1352709630],[1.1149312E8,1352709645],[1.1149312E8,1352709660],[1.1149312E8,1352709675],[1.1149312E8,1352709690],[1.1149312E8,1352709705],[1.1149312E8,1352709720],[1.1149312E8,1352709735],[1.1149666987E8,1352709750],[1.11497216E8,1352709765],[1.116151808E8,1352709780],[1.11644672E8,1352709795],[1.11644672E8,1352709810],[1.130594304E8,1352709825],[1.13573888E8,1352709840],[1.135919104E8,1352709855],[1.13598464E8,1352709870],[1.13598464E8,1352709885],[1.136615424E8,1352709900],[1.1368448E8,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050],[0.0,1352710065]]",
-      "Buffer" : "[[9355264.0,1352706465],[9355264.0,1352706480],[7940778.6667,1352706495],[5111808.0,1352706510],[5111808.0,1352706525],[5121638.4,1352706540],[5148672.0,1352706555],[5161779.2,1352706570],[5197824.0,1352706585],[5197824.0,1352706600],[5213115.7333,1352706615],[5255168.0,1352706630],[5255168.0,1352706645],[5438668.8,1352706660],[5943296.0,1352706675],[5955310.9333,1352706690],[5988352.0,1352706705],[5988352.0,1352706720],[6003643.7333,1352706735],[6045696.0,1352706750],[6045696.0,1352706765],[6060987.7333,1352706780],[6103040.0,1352706795],[6353715.2,1352706810],[7356416.0,1352706825],[7356416.0,1352706840],[7321463.4667,1352706855],[7225344.0,1352706870],[7225344.0,1352706885],[6552507.7333,1352706900],[4702208.0,1352706915],[4719684.2667,1352706930],[4767744.0,1352706945],[4767744.0,1352706960],[4775389.8667,1352706975],[4825088.0,1352706990],[4825088.0,1352707005],[4835737.6,1352707020],[4878336.0,1352707035],[4885435.7333,1352707050],[4931584.0,1352707065],[4931584.0,1352707080],[4942233.6,1352707095],[4984832.0,1352707110],[4984832.0,1352707125],[5001216.0,1352707140],[5046272.0,1352707155],[5060198.4,1352707170],[5115904.0,1352707185],[5115904.0,1352707200],[5178163.2,1352707215],[5427200.0,1352707230],[5427200.0,1352707245],[5410816.0,1352707260],[5345280.0,1352707275],[5567283.2,1352707290],[6455296.0,1352707305],[6455296.0,1352707320],[6463488.0,1352707335],[6496256.0,1352707350],[6496256.0,1352707365],[6507724.8,1352707380],[6553600.0,1352707395],[6362726.4,1352707410],[5599232.0,1352707425],[5599232.0,1352707440],[5609881.6,1352707455],[5652480.0,1352707470],[5652480.0,1352707485],[5756245.3333,1352707500],[5963776.0,1352707515],[5989990.4,1352707530],[6029312.0,1352707545],[6029312.0,1352707560],[6044330.6667,1352707575],[6074368.0,1352707590],[6074368.0,1352707605],[6008832.0,1352707620],[5877760.0,1352707635],[5897420.8,1352707650],[5926912.0,1352707665],[5926912.0,1352707680],[5946026.6667,1352707695],[5984256.0,1352707710],[5984256.0,1352707725],[5985621.3333,1352707740],[5988352.0,1352707755],[6005828.2667,1352707770],[6053888.0,1352707785],[6053888.0,1352707800],[6065356.8,1352707815],[6111232.0,1352707830],[6119970.1333,1352707845],[6176768.0,1352707860],[6176768.0,1352707875],[6183321.6,1352707890],[6225920.0,1352707905],[6225920.0,1352707920],[6237388.8,1352707935],[6283264.0,1352707950],[6283264.0,1352707965],[6288998.4,1352707980],[6311936.0,1352707995],[6320674.1333,1352708010],[6377472.0,1352708025],[6377472.0,1352708040],[6377472.0,1352708055],[6443008.0,1352708070],[6446284.8,1352708085],[6492160.0,1352708100],[6492160.0,1352708115],[6492160.0,1352708130],[6549504.0,1352708145],[6549504.0,1352708160],[6549504.0,1352708175],[7241728.0,1352708190],[7241728.0,1352708205],[7299072.0,1352708220],[7299072.0,1352708235],[7299072.0,1352708250],[7356416.0,1352708265],[7356416.0,1352708280],[7356416.0,1352708295],[7413760.0,1352708310],[7413760.0,1352708325],[7467281.0667,1352708340],[7471104.0,1352708355],[7471104.0,1352708370],[7532270.9333,1352708385],[7536640.0,1352708400],[7586338.1333,1352708415],[7593984.0,1352708430],[7593984.0,1352708445],[7657881.6,1352708460],[7667712.0,1352708475],[7667712.0,1352708490],[7713587.2,1352708505],[7725056.0,1352708520],[7707033.6,1352708535],[7700480.0,1352708550],[7700480.0,1352708565],[7742532.2667,1352708580],[7757824.0,1352708595],[7757824.0,1352708610],[7796053.3333,1352708625],[7815168.0,1352708640],[7923302.4,1352708655],[7995392.0,1352708670],[7995392.0,1352708685],[5989990.4,1352708700],[4653056.0,1352708715],[4709853.8667,1352708730],[4759552.0,1352708745],[4759552.0,1352708760],[4794504.5333,1352708775],[4825088.0,1352708790],[4825088.0,1352708805],[4849117.8667,1352708820],[4870144.0,1352708835],[4870144.0,1352708850],[4896904.5333,1352708865],[4927488.0,1352708880],[4927488.0,1352708895],[4927488.0,1352708910],[4927488.0,1352708925],[4984285.8667,1352708940],[5033984.0,1352708955],[5073305.6,1352708970],[5107712.0,1352708985],[5107712.0,1352709000],[5138295.4667,1352709015],[5165056.0,1352709030],[5165056.0,1352709045],[5191816.5333,1352709060],[5222400.0,1352709075],[4839014.4,1352709090],[4263936.0,1352709105],[4263936.0,1352709120],[4280320.0,1352709135],[4304896.0,1352709150],[4304896.0,1352709165],[4322099.2,1352709180],[4341760.0,1352709195],[4341760.0,1352709210],[4376712.5333,1352709225],[4407296.0,1352709240],[4442248.5333,1352709255],[4472832.0,1352709270],[4472832.0,1352709285],[4225979.7333,1352709300],[4009984.0,1352709315],[4009984.0,1352709330],[4044936.5333,1352709345],[4075520.0,1352709360],[4102553.6,1352709375],[4120576.0,1352709390],[4120576.0,1352709405],[4153344.0,1352709420],[4169728.0,1352709435],[4169728.0,1352709450],[4197034.6667,1352709465],[4210688.0,1352709480],[4342852.2667,1352709495],[4390912.0,1352709510],[4390912.0,1352709525],[4449894.4,1352709540],[4464640.0,1352709555],[4464640.0,1352709570],[4510515.2,1352709585],[4521984.0,1352709600],[4521984.0,1352709615],[4266393.6,1352709630],[4227072.0,1352709645],[4670532.2667,1352709660],[4702208.0,1352709675],[4702208.0,1352709690],[4759005.8667,1352709705],[4767744.0,1352709720],[4767744.0,1352709735],[4803242.6667,1352709750],[4808704.0,1352709765],[4798873.6,1352709780],[4796416.0,1352709795],[4796416.0,1352709810],[4727330.1333,1352709825],[4702208.0,1352709840],[4744260.2667,1352709855],[4759552.0,1352709870],[4759552.0,1352709885],[4807611.7333,1352709900],[4825088.0,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050],[0.0,1352710065]]",
-      "Share" : "[[0.0,1352706465],[0.0,1352706480],[0.0,1352706495],[0.0,1352706510],[0.0,1352706525],[0.0,1352706540],[0.0,1352706555],[0.0,1352706570],[0.0,1352706585],[0.0,1352706600],[0.0,1352706615],[0.0,1352706630],[0.0,1352706645],[0.0,1352706660],[0.0,1352706675],[0.0,1352706690],[0.0,1352706705],[0.0,1352706720],[0.0,1352706735],[0.0,1352706750],[0.0,1352706765],[0.0,1352706780],[0.0,1352706795],[0.0,1352706810],[0.0,1352706825],[0.0,1352706840],[0.0,1352706855],[0.0,1352706870],[0.0,1352706885],[0.0,1352706900],[0.0,1352706915],[0.0,1352706930],[0.0,1352706945],[0.0,1352706960],[0.0,1352706975],[0.0,1352706990],[0.0,1352707005],[0.0,1352707020],[0.0,1352707035],[0.0,1352707050],[0.0,1352707065],[0.0,1352707080],[0.0,1352707095],[0.0,1352707110],[0.0,1352707125],[0.0,1352707140],[0.0,1352707155],[0.0,1352707170],[0.0,1352707185],[0.0,1352707200],[0.0,1352707215],[0.0,1352707230],[0.0,1352707245],[0.0,1352707260],[0.0,1352707275],[0.0,1352707290],[0.0,1352707305],[0.0,1352707320],[0.0,1352707335],[0.0,1352707350],[0.0,1352707365],[0.0,1352707380],[0.0,1352707395],[0.0,1352707410],[0.0,1352707425],[0.0,1352707440],[0.0,1352707455],[0.0,1352707470],[0.0,1352707485],[0.0,1352707500],[0.0,1352707515],[0.0,1352707530],[0.0,1352707545],[0.0,1352707560],[0.0,1352707575],[0.0,1352707590],[0.0,1352707605],[0.0,1352707620],[0.0,1352707635],[0.0,1352707650],[0.0,1352707665],[0.0,1352707680],[0.0,1352707695],[0.0,1352707710],[0.0,1352707725],[0.0,1352707740],[0.0,1352707755],[0.0,1352707770],[0.0,1352707785],[0.0,1352707800],[0.0,1352707815],[0.0,1352707830],[0.0,1352707845],[0.0,1352707860],[0.0,1352707875],[0.0,1352707890],[0.0,1352707905],[0.0,1352707920],[0.0,1352707935],[0.0,1352707950],[0.0,1352707965],[0.0,1352707980],[0.0,1352707995],[0.0,1352708010],[0.0,1352708025],[0.0,1352708040],[0.0,1352708055],[0.0,1352708070],[0.0,1352708085],[0.0,1352708100],[0.0,1352708115],[0.0,1352708130],[0.0,1352708145],[0.0,1352708160],[0.0,1352708175],[0.0,1352708190],[0.0,1352708205],[0.0,1352708220],[0.0,1352708235],[0.0,1352708250],[0.0,1352708265],[0.0,1352708280],[0.0,1352708295],[0.0,1352708310],[0.0,1352708325],[0.0,1352708340],[0.0,1352708355],[0.0,1352708370],[0.0,1352708385],[0.0,1352708400],[0.0,1352708415],[0.0,1352708430],[0.0,1352708445],[0.0,1352708460],[0.0,1352708475],[0.0,1352708490],[0.0,1352708505],[0.0,1352708520],[0.0,1352708535],[0.0,1352708550],[0.0,1352708565],[0.0,1352708580],[0.0,1352708595],[0.0,1352708610],[0.0,1352708625],[0.0,1352708640],[0.0,1352708655],[0.0,1352708670],[0.0,1352708685],[0.0,1352708700],[0.0,1352708715],[0.0,1352708730],[0.0,1352708745],[0.0,1352708760],[0.0,1352708775],[0.0,1352708790],[0.0,1352708805],[0.0,1352708820],[0.0,1352708835],[0.0,1352708850],[0.0,1352708865],[0.0,1352708880],[0.0,1352708895],[0.0,1352708910],[0.0,1352708925],[0.0,1352708940],[0.0,1352708955],[0.0,1352708970],[0.0,1352708985],[0.0,1352709000],[0.0,1352709015],[0.0,1352709030],[0.0,1352709045],[0.0,1352709060],[0.0,1352709075],[0.0,1352709090],[0.0,1352709105],[0.0,1352709120],[0.0,1352709135],[0.0,1352709150],[0.0,1352709165],[0.0,1352709180],[0.0,1352709195],[0.0,1352709210],[0.0,1352709225],[0.0,1352709240],[0.0,1352709255],[0.0,1352709270],[0.0,1352709285],[0.0,1352709300],[0.0,1352709315],[0.0,1352709330],[0.0,1352709345],[0.0,1352709360],[0.0,1352709375],[0.0,1352709390],[0.0,1352709405],[0.0,1352709420],[0.0,1352709435],[0.0,1352709450],[0.0,1352709465],[0.0,1352709480],[0.0,1352709495],[0.0,1352709510],[0.0,1352709525],[0.0,1352709540],[0.0,1352709555],[0.0,1352709570],[0.0,1352709585],[0.0,1352709600],[0.0,1352709615],[0.0,1352709630],[0.0,1352709645],[0.0,1352709660],[0.0,1352709675],[0.0,1352709690],[0.0,1352709705],[0.0,1352709720],[0.0,1352709735],[0.0,1352709750],[0.0,1352709765],[0.0,1352709780],[0.0,1352709795],[0.0,1352709810],[0.0,1352709825],[0.0,1352709840],[0.0,1352709855],[0.0,1352709870],[0.0,1352709885],[0.0,1352709900],[0.0,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050],[0.0,1352710065]]",
-      "Total" : "[[2.104250368E9,1352706465],[2.104250368E9,1352706480],[2.104250368E9,1352706495],[2.104250368E9,1352706510],[2.104250368E9,1352706525],[2.104250368E9,1352706540],[2.104250368E9,1352706555],[2.104250368E9,1352706570],[2.104250368E9,1352706585],[2.104250368E9,1352706600],[2.104250368E9,1352706615],[2.104250368E9,1352706630],[2.104250368E9,1352706645],[2.104250368E9,1352706660],[2.104250368E9,1352706675],[2.104250368E9,1352706690],[2.104250368E9,1352706705],[2.104250368E9,1352706720],[2.104250368E9,1352706735],[2.104250368E9,1352706750],[2.104250368E9,1352706765],[2.104250368E9,1352706780],[2.104250368E9,1352706795],[2.104250368E9,1352706810],[2.104250368E9,1352706825],[2.104250368E9,1352706840],[2.104250368E9,1352706855],[2.104250368E9,1352706870],[2.104250368E9,1352706885],[2.104250368E9,1352706900],[2.104250368E9,1352706915],[2.104250368E9,1352706930],[2.104250368E9,1352706945],[2.104250368E9,1352706960],[2.104250368E9,1352706975],[2.104250368E9,1352706990],[2.104250368E9,1352707005],[2.104250368E9,1352707020],[2.104250368E9,1352707035],[2.104250368E9,1352707050],[2.104250368E9,1352707065],[2.104250368E9,1352707080],[2.104250368E9,1352707095],[2.104250368E9,1352707110],[2.104250368E9,1352707125],[2.104250368E9,1352707140],[2.104250368E9,1352707155],[2.104250368E9,1352707170],[2.104250368E9,1352707185],[2.104250368E9,1352707200],[2.104250368E9,1352707215],[2.104250368E9,1352707230],[2.104250368E9,1352707245],[2.104250368E9,1352707260],[2.104250368E9,1352707275],[2.104250368E9,1352707290],[2.104250368E9,1352707305],[2.104250368E9,1352707320],[2.104250368E9,1352707335],[2.104250368E9,1352707350],[2.104250368E9,1352707365],[2.104250368E9,1352707380],[2.104250368E9,1352707395],[2.104250368E9,1352707410],[2.104250368E9,1352707425],[2.104250368E9,1352707440],[2.104250368E9,1352707455],[2.104250368E9,1352707470],[2.104250368E9,1352707485],[2.104250368E9,1352707500],[2.104250368E9,1352707515],[2.104250368E9,1352707530],[2.104250368E9,1352707545],[2.104250368E9,1352707560],[2.104250368E9,1352707575],[2.104250368E9,1352707590],[2.104250368E9,1352707605],[2.104250368E9,1352707620],[2.104250368E9,1352707635],[2.104250368E9,1352707650],[2.104250368E9,1352707665],[2.104250368E9,1352707680],[2.104250368E9,1352707695],[2.104250368E9,1352707710],[2.104250368E9,1352707725],[2.104250368E9,1352707740],[2.104250368E9,1352707755],[2.104250368E9,1352707770],[2.104250368E9,1352707785],[2.104250368E9,1352707800],[2.104250368E9,1352707815],[2.104250368E9,1352707830],[2.104250368E9,1352707845],[2.104250368E9,1352707860],[2.104250368E9,1352707875],[2.104250368E9,1352707890],[2.104250368E9,1352707905],[2.104250368E9,1352707920],[2.104250368E9,1352707935],[2.104250368E9,1352707950],[2.104250368E9,1352707965],[2.104250368E9,1352707980],[2.104250368E9,1352707995],[2.104250368E9,1352708010],[2.104250368E9,1352708025],[2.104250368E9,1352708040],[2.104250368E9,1352708055],[2.104250368E9,1352708070],[2.104250368E9,1352708085],[2.104250368E9,1352708100],[2.104250368E9,1352708115],[2.104250368E9,1352708130],[2.104250368E9,1352708145],[2.104250368E9,1352708160],[2.104250368E9,1352708175],[2.104250368E9,1352708190],[2.104250368E9,1352708205],[2.104250368E9,1352708220],[2.104250368E9,1352708235],[2.104250368E9,1352708250],[2.104250368E9,1352708265],[2.104250368E9,1352708280],[2.104250368E9,1352708295],[2.104250368E9,1352708310],[2.104250368E9,1352708325],[2.104250368E9,1352708340],[2.104250368E9,1352708355],[2.104250368E9,1352708370],[2.104250368E9,1352708385],[2.104250368E9,1352708400],[2.104250368E9,1352708415],[2.104250368E9,1352708430],[2.104250368E9,1352708445],[2.104250368E9,1352708460],[2.104250368E9,1352708475],[2.104250368E9,1352708490],[2.104250368E9,1352708505],[2.104250368E9,1352708520],[2.104250368E9,1352708535],[2.104250368E9,1352708550],[2.104250368E9,1352708565],[2.104250368E9,1352708580],[2.104250368E9,1352708595],[2.104250368E9,1352708610],[2.104250368E9,1352708625],[2.104250368E9,1352708640],[2.104250368E9,1352708655],[2.104250368E9,1352708670],[2.104250368E9,1352708685],[2.104250368E9,1352708700],[2.104250368E9,1352708715],[2.104250368E9,1352708730],[2.104250368E9,1352708745],[2.104250368E9,1352708760],[2.104250368E9,1352708775],[2.104250368E9,1352708790],[2.104250368E9,1352708805],[2.104250368E9,1352708820],[2.104250368E9,1352708835],[2.104250368E9,1352708850],[2.104250368E9,1352708865],[2.104250368E9,1352708880],[2.104250368E9,1352708895],[2.104250368E9,1352708910],[2.104250368E9,1352708925],[2.104250368E9,1352708940],[2.104250368E9,1352708955],[2.104250368E9,1352708970],[2.104250368E9,1352708985],[2.104250368E9,1352709000],[2.104250368E9,1352709015],[2.104250368E9,1352709030],[2.104250368E9,1352709045],[2.104250368E9,1352709060],[2.104250368E9,1352709075],[2.104250368E9,1352709090],[2.104250368E9,1352709105],[2.104250368E9,1352709120],[2.104250368E9,1352709135],[2.104250368E9,1352709150],[2.104250368E9,1352709165],[2.104250368E9,1352709180],[2.104250368E9,1352709195],[2.104250368E9,1352709210],[2.104250368E9,1352709225],[2.104250368E9,1352709240],[2.104250368E9,1352709255],[2.104250368E9,1352709270],[2.104250368E9,1352709285],[2.104250368E9,1352709300],[2.104250368E9,1352709315],[2.104250368E9,1352709330],[2.104250368E9,1352709345],[2.104250368E9,1352709360],[2.104250368E9,1352709375],[2.104250368E9,1352709390],[2.104250368E9,1352709405],[2.104250368E9,1352709420],[2.104250368E9,1352709435],[2.104250368E9,1352709450],[2.104250368E9,1352709465],[2.104250368E9,1352709480],[2.104250368E9,1352709495],[2.104250368E9,1352709510],[2.104250368E9,1352709525],[2.104250368E9,1352709540],[2.104250368E9,1352709555],[2.104250368E9,1352709570],[2.104250368E9,1352709585],[2.104250368E9,1352709600],[2.104250368E9,1352709615],[2.104250368E9,1352709630],[2.104250368E9,1352709645],[2.104250368E9,1352709660],[2.104250368E9,1352709675],[2.104250368E9,1352709690],[2.104250368E9,1352709705],[2.104250368E9,1352709720],[2.104250368E9,1352709735],[2.104250368E9,1352709750],[2.104250368E9,1352709765],[2.104250368E9,1352709780],[2.104250368E9,1352709795],[2.104250368E9,1352709810],[2.104250368E9,1352709825],[2.104250368E9,1352709840],[2.104250368E9,1352709855],[2.104250368E9,1352709870],[2.104250368E9,1352709885],[2.104250368E9,1352709900],[2.104250368E9,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050],[0.0,1352710065]]",
-      "Use" : "[[1.803198464E9,1352706465],[1.803198464E9,1352706480],[1.8057284267E9,1352706495],[1.810788352E9,1352706510],[1.810788352E9,1352706525],[1.8023407616E9,1352706540],[1.779109888E9,1352706555],[1.7808837291E9,1352706570],[1.785761792E9,1352706585],[1.785761792E9,1352706600],[1.7884913664E9,1352706615],[1.795997696E9,1352706630],[1.795997696E9,1352706645],[1.7990680576E9,1352706660],[1.807511552E9,1352706675],[1.8066191701E9,1352706690],[1.80416512E9,1352706705],[1.80416512E9,1352706720],[1.7990686037E9,1352706735],[1.785053184E9,1352706750],[1.785053184E9,1352706765],[1.7873709739E9,1352706780],[1.793744896E9,1352706795],[1.795801088E9,1352706810],[1.804025856E9,1352706825],[1.804025856E9,1352706840],[1.8079492779E9,1352706855],[1.818738688E9,1352706870],[1.818738688E9,1352706885],[1.8087073109E9,1352706900],[1.781121024E9,1352706915],[1.7832935424E9,1352706930],[1.789267968E9,1352706945],[1.789267968E9,1352706960],[1.79267584E9,1352706975],[1.814827008E9,1352706990],[1.814827008E9,1352707005],[1.8165161984E9,1352707020],[1.82327296E9,1352707035],[1.8232598528E9,1352707050],[1.823174656E9,1352707065],[1.823174656E9,1352707080],[1.8186862592E9,1352707095],[1.800732672E9,1352707110],[1.800732672E9,1352707125],[1.8027380736E9,1352707140],[1.808252928E9,1352707155],[1.810436096E9,1352707170],[1.819168768E9,1352707185],[1.819168768E9,1352707200],[1.82173696E9,1352707215],[1.832009728E9,1352707230],[1.832009728E9,1352707245],[1.8313486336E9,1352707260],[1.828704256E9,1352707275],[1.8246705152E9,1352707290],[1.808535552E9,1352707305],[1.808535552E9,1352707320],[1.8100944896E9,1352707335],[1.81633024E9,1352707350],[1.81633024E9,1352707365],[1.818451968E9,1352707380],[1.82693888E9,1352707395],[1.8291326976E9,1352707410],[1.837907968E9,1352707425],[1.837907968E9,1352707440],[1.8321154048E9,1352707455],[1.808945152E9,1352707470],[1.808945152E9,1352707485],[1.81051392E9,1352707500],[1.813651456E9,1352707515],[1.8177933312E9,1352707530],[1.824006144E9,1352707545],[1.824006144E9,1352707560],[1.8276502187E9,1352707575],[1.834938368E9,1352707590],[1.834938368E9,1352707605],[1.8338816E9,1352707620],[1.831768064E9,1352707635],[1.8239152128E9,1352707650],[1.812135936E9,1352707665],[1.812135936E9,1352707680],[1.8149799253E9,1352707695],[1.820667904E9,1352707710],[1.820667904E9,1352707725],[1.825062912E9,1352707740],[1.833852928E9,1352707755],[1.8365420885E9,1352707770],[1.84393728E9,1352707785],[1.84393728E9,1352707800],[1.8428338176E9,1352707815],[1.838419968E9,1352707830],[1.8358515029E9,1352707845],[1.81915648E9,1352707860],[1.81915648E9,1352707875],[1.8204748459E9,1352707890],[1.829044224E9,1352707905],[1.829044224E9,1352707920],[1.8311626752E9,1352707935],[1.83963648E9,1352707950],[1.83963648E9,1352707965],[1.8391474176E9,1352707980],[1.837191168E9,1352707995],[1.8343299755E9,1352708010],[1.815732224E9,1352708025],[1.815732224E9,1352708040],[1.815732224E9,1352708055],[1.82427648E9,1352708070],[1.8248223403E9,1352708085],[1.832464384E9,1352708100],[1.832464384E9,1352708115],[1.832464384E9,1352708130],[1.843101696E9,1352708145],[1.843101696E9,1352708160],[1.843101696E9,1352708175],[1.838481408E9,1352708190],[1.838481408E9,1352708205],[1.816686592E9,1352708220],[1.816686592E9,1352708235],[1.816686592E9,1352708250],[1.825988608E9,1352708265],[1.825988608E9,1352708280],[1.825988608E9,1352708295],[1.835585536E9,1352708310],[1.835585536E9,1352708325],[1.8455481003E9,1352708340],[1.846259712E9,1352708355],[1.846259712E9,1352708370],[1.8167734272E9,1352708385],[1.814667264E9,1352708400],[1.8224840704E9,1352708415],[1.823686656E9,1352708430],[1.823686656E9,1352708445],[1.8305520981E9,1352708460],[1.83160832E9,1352708475],[1.83160832E9,1352708490],[1.8399444992E9,1352708505],[1.842028544E9,1352708520],[1.8403374421E9,1352708535],[1.839722496E9,1352708550],[1.839722496E9,1352708565],[1.8225291264E9,1352708580],[1.816276992E9,1352708595],[1.816276992E9,1352708610],[1.8222107307E9,1352708625],[1.8251776E9,1352708640],[1.8315649024E9,1352708655],[1.835823104E9,1352708670],[1.835823104E9,1352708685],[1.8371698688E9,1352708700],[1.838067712E9,1352708715],[1.8355292843E9,1352708730],[1.83330816E9,1352708745],[1.83330816E9,1352708760],[1.8250397013E9,1352708775],[1.8178048E9,1352708790],[1.8178048E9,1352708805],[1.8209439744E9,1352708820],[1.823690752E9,1352708835],[1.823690752E9,1352708850],[1.8289912491E9,1352708865],[1.83504896E9,1352708880],[1.83504896E9,1352708895],[1.83504896E9,1352708910],[1.83504896E9,1352708925],[1.8233617067E9,1352708940],[1.81313536E9,1352708955],[1.8168031915E9,1352708970],[1.820012544E9,1352708985],[1.820012544E9,1352709000],[1.8254345557E9,1352709015],[1.830178816E9,1352709030],[1.830178816E9,1352709045],[1.8354449067E9,1352709060],[1.841463296E9,1352709075],[1.8398560256E9,1352709090],[1.83744512E9,1352709105],[1.83744512E9,1352709120],[1.8292318208E9,1352709135],[1.816911872E9,1352709150],[1.816911872E9,1352709165],[1.8207997952E9,1352709180],[1.825243136E9,1352709195],[1.825243136E9,1352709210],[1.8311129771E9,1352709225],[1.836249088E9,1352709240],[1.8422390784E9,1352709255],[1.84748032E9,1352709270],[1.84748032E9,1352709285],[1.8302683819E9,1352709300],[1.815207936E9,1352709315],[1.815207936E9,1352709330],[1.818517504E9,1352709345],[1.821413376E9,1352709360],[1.8295701504E9,1352709375],[1.835008E9,1352709390],[1.835008E9,1352709405],[1.838874624E9,1352709420],[1.840807936E9,1352709435],[1.840807936E9,1352709450],[1.839194112E9,1352709465],[1.8383872E9,1352709480],[1.824014336E9,1352709495],[1.81878784E9,1352709510],[1.81878784E9,1352709525],[1.826209792E9,1352709540],[1.82806528E9,1352709555],[1.82806528E9,1352709570],[1.8362802176E9,1352709585],[1.838333952E9,1352709600],[1.838333952E9,1352709615],[1.8494982827E9,1352709630],[1.851215872E9,1352709645],[1.8450494805E9,1352709660],[1.844609024E9,1352709675],[1.844609024E9,1352709690],[1.8273886208E9,1352709705],[1.824739328E9,1352709720],[1.824739328E9,1352709735],[1.8345050112E9,1352709750],[1.836007424E9,1352709765],[1.8441895936E9,1352709780],[1.846235136E9,1352709795],[1.846235136E9,1352709810],[1.8451928405E9,1352709825],[1.844813824E9,1352709840],[1.8288399701E9,1352709855],[1.823031296E9,1352709870],[1.823031296E9,1352709885],[1.8289366357E9,1352709900],[1.831084032E9,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050],[0.0,1352710065]]"
-    }
-  },
-  "Clusters" : {
-    "cluster_name" : "vmc",
-    "version" : "HDP-1.2.0"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/memory_2hr.json b/branch-1.2/ambari-web/app/assets/data/cluster_metrics/memory_2hr.json
deleted file mode 100644
index 896fb67..0000000
--- a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/memory_2hr.json
+++ /dev/null
@@ -1 +0,0 @@
-[{"ds_name":"bmem_used","cluster_name":"","graph_type":"stack","host_name":"","metric_name":"Use\\g","datapoints":[[6611666386.5,1350519840],[6607590718.6,1350520200],[6598802921.2,1350520560],[6599184167.8,1350520920],[6600846927.6,1350521280],[6601654340.3,1350521640],[6603619180.1,1350522000],[6608557545.2,1350522360],[6607194533,1350522720],[6605735025.8,1350523080],[6603998105.6,1350523440],[6604949424.4,1350523800],[6605775496.5,1350524160],[6605691437.5,1350524520],[6606612935.1,1350524880],[6608204185.6,1350525240],[6608155261.2,1350525600],[6653075603.9,1350525960],[6652708761.6,1350526320],[0,1350526680],[0,1350527040]]},{"ds_name":"bmem_shared","cluster_name":"","graph_type":"stack","host_name":"","metric_name":"Share\\g","datapoints":[[0,1350519840],[0,1350520200],[0,1350520560],[0,1350520920],[0,1350521280],[0,1350521640],[0,1350522000],[0,1350522360],[0,1350522720],[0,1350523080],[0,1350523440],[0,1350523800],[0,1350524160],[0,1350524520],[0,1350524880],[0,1350525240],[0,1350525600],[0,1350525960],[0,1350526320],[0,1350526680],[0,1350527040]]},{"ds_name":"bmem_cached","cluster_name":"","graph_type":"stack","host_name":"","metric_name":"Cache\\g","datapoints":[[6583777245.9,1350519840],[6584079109.7,1350520200],[6584355089.1,1350520560],[6584645643.4,1350520920],[6584953878.8,1350521280],[6585259736.2,1350521640],[6585550848,1350522000],[6585851437.5,1350522360],[6586156123,1350522720],[6586563185.8,1350523080],[6586897373.9,1350523440],[6587174491,1350523800],[6587487687.1,1350524160],[6587763848.5,1350524520],[6588033513.2,1350524880],[6588284950.8,1350525240],[6588571204.3,1350525600],[6588924950.8,1350525960],[6589168640,1350526320],[0,1350526680],[0,1350527040]]},{"ds_name":"bmem_buffers","cluster_name":"","graph_type":"stack","host_name":"","metric_name":"Buffer\\g","datapoints":[[558022690.13,1350519840],[558269769.96,1350520200],[558499737.6,1350520560],[558749320.53,1350520920],[558976523.38,1350521280],[559188673.42,1350521640],[559454139.73,1350522000],[559749074.49,1350522360],[560021913.6,1350522720],[560313662.58,1350523080],[560651332.27,1350523440],[560954049.42,1350523800],[561263559.11,1350524160],[561470259.2,1350524520],[561661644.8,1350524880],[561861188.27,1350525240],[562096469.33,1350525600],[562875756.09,1350525960],[563147229.87,1350526320],[0,1350526680],[0,1350527040]]},{"ds_name":"bmem_swapped","cluster_name":"","graph_type":"stack","host_name":"","metric_name":"Swap\\g","datapoints":[[0,1350519840],[0,1350520200],[0,1350520560],[0,1350520920],[0,1350521280],[0,1350521640],[0,1350522000],[0,1350522360],[0,1350522720],[0,1350523080],[0,1350523440],[0,1350523800],[0,1350524160],[0,1350524520],[0,1350524880],[0,1350525240],[0,1350525600],[0,1350525960],[0,1350526320],[0,1350526680],[0,1350527040]]},{"ds_name":"bmem_total","cluster_name":"","graph_type":"line","host_name":"","metric_name":"Total\\g","datapoints":[[23488978944,1350519840],[23488978944,1350520200],[23488978944,1350520560],[23488978944,1350520920],[23488978944,1350521280],[23488978944,1350521640],[23488978944,1350522000],[23488978944,1350522360],[23488978944,1350522720],[23488978944,1350523080],[23488978944,1350523440],[23488978944,1350523800],[23488978944,1350524160],[23488978944,1350524520],[23488978944,1350524880],[23488978944,1350525240],[23488978944,1350525600],[23488978944,1350525960],[23488978944,1350526320],[0,1350526680],[0,1350527040]]}]
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/network_1hr.json b/branch-1.2/ambari-web/app/assets/data/cluster_metrics/network_1hr.json
deleted file mode 100644
index 931d460..0000000
--- a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/network_1hr.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
-  "href" : "http://ambari/api/clusters/vmc?fields=metrics/network[1352702257,1352705857,15]",
-  "metrics" : {
-    "network" : {
-      "Out" : "[[12583.08,1352706495],[12583.08,1352706510],[12583.08,1352706525],[12583.08,1352706540],[12583.08,1352706555],[12583.08,1352706570],[12583.08,1352706585],[12583.08,1352706600],[12583.08,1352706615],[12583.08,1352706630],[12583.08,1352706645],[12583.08,1352706660],[12583.08,1352706675],[12106.336,1352706690],[10795.29,1352706705],[10795.29,1352706720],[10795.29,1352706735],[10795.29,1352706750],[10795.29,1352706765],[10795.29,1352706780],[10795.29,1352706795],[10795.29,1352706810],[10795.29,1352706825],[10795.29,1352706840],[31635.684667,1352706855],[88946.77,1352706870],[88946.77,1352706885],[68785.01,1352706900],[13340.17,1352706915],[13340.17,1352706930],[13340.17,1352706945],[13340.17,1352706960],[13340.17,1352706975],[13340.17,1352706990],[13340.17,1352707005],[13340.17,1352707020],[13340.17,1352707035],[13340.17,1352707050],[13340.17,1352707065],[13340.17,1352707080],[13340.17,1352707095],[13340.17,1352707110],[13340.17,1352707125],[13340.17,1352707140],[13340.17,1352707155],[13340.17,1352707170],[13118.971333,1352707185],[11681.18,1352707200],[11681.18,1352707215],[11681.18,1352707230],[11681.18,1352707245],[11681.18,1352707260],[11681.18,1352707275],[11681.18,1352707290],[11681.18,1352707305],[11681.18,1352707320],[11681.18,1352707335],[11681.18,1352707350],[11681.18,1352707365],[11681.18,1352707380],[11681.18,1352707395],[11681.18,1352707410],[11681.18,1352707425],[11681.18,1352707440],[11681.18,1352707455],[11681.18,1352707470],[11681.18,1352707485],[11983.44,1352707500],[12587.96,1352707515],[12587.96,1352707530],[12587.96,1352707545],[12587.96,1352707560],[12587.96,1352707575],[12587.96,1352707590],[12587.96,1352707605],[12587.96,1352707620],[12587.96,1352707635],[12587.96,1352707650],[12587.96,1352707665],[12587.96,1352707680],[12587.96,1352707695],[12587.96,1352707710],[12587.96,1352707725],[36661.513333,1352707740],[84808.62,1352707755],[66401.5,1352707770],[15781.92,1352707785],[15781.92,1352707800],[30233.69,1352707815],[88040.77,1352707830],[77739.934,1352707845],[10784.5,1352707860],[10784.5,1352707875],[10784.5,1352707890],[10784.5,1352707905],[10784.5,1352707920],[10784.5,1352707935],[10784.5,1352707950],[10784.5,1352707965],[10784.5,1352707980],[10784.5,1352707995],[10784.5,1352708010],[10784.5,1352708025],[10784.5,1352708040],[10784.5,1352708055],[10784.5,1352708070],[10784.5,1352708085],[10784.5,1352708100],[10784.5,1352708115],[10784.5,1352708130],[10784.5,1352708145],[12577.01,1352708160],[12577.01,1352708175],[12577.01,1352708190],[12577.01,1352708205],[12577.01,1352708220],[12577.01,1352708235],[12577.01,1352708250],[12577.01,1352708265],[12577.01,1352708280],[12577.01,1352708295],[12577.01,1352708310],[12577.01,1352708325],[12577.01,1352708340],[12577.01,1352708355],[12577.01,1352708370],[12577.01,1352708385],[12577.01,1352708400],[12577.01,1352708415],[12577.01,1352708430],[12577.01,1352708445],[76909.477333,1352708460],[86806.78,1352708475],[86806.78,1352708490],[28706.124,1352708505],[14180.96,1352708520],[14180.96,1352708535],[14180.96,1352708550],[14180.96,1352708565],[14180.96,1352708580],[14180.96,1352708595],[14180.96,1352708610],[14180.96,1352708625],[14180.96,1352708640],[14180.96,1352708655],[14180.96,1352708670],[14180.96,1352708685],[14180.96,1352708700],[14180.96,1352708715],[14180.96,1352708730],[14180.96,1352708745],[14180.96,1352708760],[14180.96,1352708775],[14180.96,1352708790],[12369.776,1352708805],[10784.99,1352708820],[10784.99,1352708835],[10784.99,1352708850],[10784.99,1352708865],[10784.99,1352708880],[10784.99,1352708895],[10784.99,1352708910],[10784.99,1352708925],[10784.99,1352708940],[10784.99,1352708955],[10784.99,1352708970],[10784.99,1352708985],[10784.99,1352709000],[10784.99,1352709015],[10784.99,1352709030],[10784.99,1352709045],[10784.99,1352709060],[10784.99,1352709075],[11502.454,1352709090],[12578.65,1352709105],[12578.65,1352709120],[12578.65,1352709135],[12578.65,1352709150],[12578.65,1352709165],[12578.65,1352709180],[12578.65,1352709195],[12578.65,1352709210],[12578.65,1352709225],[12578.65,1352709240],[12578.65,1352709255],[12578.65,1352709270],[12578.65,1352709285],[12578.65,1352709300],[12578.65,1352709315],[12578.65,1352709330],[12578.65,1352709345],[12578.65,1352709360],[12578.65,1352709375],[12578.65,1352709390],[11382.443333,1352709405],[10784.34,1352709420],[10784.34,1352709435],[10784.34,1352709450],[10784.34,1352709465],[10784.34,1352709480],[10784.34,1352709495],[10784.34,1352709510],[10784.34,1352709525],[10784.34,1352709540],[10784.34,1352709555],[10784.34,1352709570],[10784.34,1352709585],[10784.34,1352709600],[10784.34,1352709615],[10784.34,1352709630],[10784.34,1352709645],[10784.34,1352709660],[10784.34,1352709675],[10784.34,1352709690],[12345.674667,1352709705],[12585.88,1352709720],[12585.88,1352709735],[78700.907333,1352709750],[88872.45,1352709765],[88872.45,1352709780],[88872.45,1352709795],[88872.45,1352709810],[34560.793333,1352709825],[14811.1,1352709840],[67446.54,1352709855],[86586.7,1352709870],[86586.7,1352709885],[33933.975333,1352709900],[14787.53,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050],[0.0,1352710065],[0.0,1352710080],[0.0,1352710095]]",
-      "In" : "[[363.51,1352706495],[363.51,1352706510],[363.51,1352706525],[363.51,1352706540],[363.51,1352706555],[363.51,1352706570],[363.51,1352706585],[363.51,1352706600],[363.51,1352706615],[363.51,1352706630],[363.51,1352706645],[363.51,1352706660],[363.51,1352706675],[355.134,1352706690],[332.1,1352706705],[332.1,1352706720],[332.1,1352706735],[332.1,1352706750],[332.1,1352706765],[332.1,1352706780],[332.1,1352706795],[332.1,1352706810],[332.1,1352706825],[332.1,1352706840],[718.19333333,1352706855],[1779.95,1352706870],[1779.95,1352706885],[1618.51,1352706900],[1174.55,1352706915],[1174.55,1352706930],[1174.55,1352706945],[1174.55,1352706960],[1174.55,1352706975],[1174.55,1352706990],[1174.55,1352707005],[1174.55,1352707020],[1174.55,1352707035],[1174.55,1352707050],[1174.55,1352707065],[1174.55,1352707080],[1174.55,1352707095],[1174.55,1352707110],[1174.55,1352707125],[1174.55,1352707140],[1174.55,1352707155],[1174.55,1352707170],[1062.434,1352707185],[333.68,1352707200],[333.68,1352707215],[333.68,1352707230],[333.68,1352707245],[333.68,1352707260],[333.68,1352707275],[333.68,1352707290],[333.68,1352707305],[333.68,1352707320],[333.68,1352707335],[333.68,1352707350],[333.68,1352707365],[333.68,1352707380],[333.68,1352707395],[333.68,1352707410],[333.68,1352707425],[333.68,1352707440],[333.68,1352707455],[333.68,1352707470],[333.68,1352707485],[343.97666667,1352707500],[364.57,1352707515],[364.57,1352707530],[364.57,1352707545],[364.57,1352707560],[364.57,1352707575],[364.57,1352707590],[364.57,1352707605],[364.57,1352707620],[364.57,1352707635],[364.57,1352707650],[364.57,1352707665],[364.57,1352707680],[364.57,1352707695],[364.57,1352707710],[364.57,1352707725],[755.93666667,1352707740],[1538.67,1352707755],[1283.942,1352707770],[583.44,1352707785],[583.44,1352707800],[783.546,1352707815],[1583.97,1352707830],[1414.7873333,1352707845],[315.1,1352707860],[315.1,1352707875],[315.1,1352707890],[315.1,1352707905],[315.1,1352707920],[315.1,1352707935],[315.1,1352707950],[315.1,1352707965],[315.1,1352707980],[315.1,1352707995],[315.1,1352708010],[315.1,1352708025],[315.1,1352708040],[315.1,1352708055],[315.1,1352708070],[315.1,1352708085],[315.1,1352708100],[315.1,1352708115],[315.1,1352708130],[315.1,1352708145],[359.9,1352708160],[359.9,1352708175],[359.9,1352708190],[359.9,1352708205],[359.9,1352708220],[359.9,1352708235],[359.9,1352708250],[359.9,1352708265],[359.9,1352708280],[359.9,1352708295],[359.9,1352708310],[359.9,1352708325],[359.9,1352708340],[359.9,1352708355],[359.9,1352708370],[359.9,1352708385],[359.9,1352708400],[359.9,1352708415],[359.9,1352708430],[359.9,1352708445],[1375.798,1352708460],[1532.09,1352708475],[1532.09,1352708490],[778.714,1352708505],[590.37,1352708520],[590.37,1352708535],[590.37,1352708550],[590.37,1352708565],[590.37,1352708580],[590.37,1352708595],[590.37,1352708610],[590.37,1352708625],[590.37,1352708640],[590.37,1352708655],[590.37,1352708670],[590.37,1352708685],[590.37,1352708700],[590.37,1352708715],[590.37,1352708730],[590.37,1352708745],[590.37,1352708760],[590.37,1352708775],[590.37,1352708790],[442.36466667,1352708805],[312.86,1352708820],[312.86,1352708835],[312.86,1352708850],[312.86,1352708865],[312.86,1352708880],[312.86,1352708895],[312.86,1352708910],[312.86,1352708925],[312.86,1352708940],[312.86,1352708955],[312.86,1352708970],[312.86,1352708985],[312.86,1352709000],[312.86,1352709015],[312.86,1352709030],[312.86,1352709045],[312.86,1352709060],[312.86,1352709075],[329.824,1352709090],[355.27,1352709105],[355.27,1352709120],[355.27,1352709135],[355.27,1352709150],[355.27,1352709165],[355.27,1352709180],[355.27,1352709195],[355.27,1352709210],[355.27,1352709225],[355.27,1352709240],[355.27,1352709255],[355.27,1352709270],[355.27,1352709285],[355.27,1352709300],[355.27,1352709315],[355.27,1352709330],[355.27,1352709345],[355.27,1352709360],[355.27,1352709375],[355.27,1352709390],[322.90333333,1352709405],[306.72,1352709420],[306.72,1352709435],[306.72,1352709450],[306.72,1352709465],[306.72,1352709480],[306.72,1352709495],[306.72,1352709510],[306.72,1352709525],[306.72,1352709540],[306.72,1352709555],[306.72,1352709570],[306.72,1352709585],[306.72,1352709600],[306.72,1352709615],[306.72,1352709630],[306.72,1352709645],[306.72,1352709660],[306.72,1352709675],[306.72,1352709690],[361.04266667,1352709705],[369.4,1352709720],[369.4,1352709735],[1606.3933333,1352709750],[1796.7,1352709765],[1796.7,1352709780],[1796.7,1352709795],[1796.7,1352709810],[978.52,1352709825],[681.0,1352709840],[923.56466667,1352709855],[1011.77,1352709870],[1011.77,1352709885],[793.442,1352709900],[714.05,1352709915],[0.0,1352709930],[0.0,1352709945],[0.0,1352709960],[0.0,1352709975],[0.0,1352709990],[0.0,1352710005],[0.0,1352710020],[0.0,1352710035],[0.0,1352710050],[0.0,1352710065],[0.0,1352710080],[0.0,1352710095]]"
-    }
-  },
-  "Clusters" : {
-    "cluster_name" : "vmc",
-    "version" : "HDP-1.2.0"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/network_2hr.json b/branch-1.2/ambari-web/app/assets/data/cluster_metrics/network_2hr.json
deleted file mode 100644
index 756bbb2..0000000
--- a/branch-1.2/ambari-web/app/assets/data/cluster_metrics/network_2hr.json
+++ /dev/null
@@ -1 +0,0 @@
-[{"ds_name":"a0","cluster_name":"","graph_type":"line","host_name":"","metric_name":"In ","datapoints":[[352.942,1350519840],[236.59,1350520200],[240.60791667,1350520560],[351.89730556,1350520920],[294.35841667,1350521280],[374.38777778,1350521640],[249.21122222,1350522000],[259.88830556,1350522360],[382.79436111,1350522720],[617.02863889,1350523080],[240.46,1350523440],[247.0425,1350523800],[503.92527778,1350524160],[233.43291667,1350524520],[240.355,1350524880],[229.16444444,1350525240],[227.12472222,1350525600],[239.61583333,1350525960],[345.57472222,1350526320],[451.08116667,1350526680],[0,1350527040]]},{"ds_name":"a1","cluster_name":"","graph_type":"line","host_name":"","metric_name":"Out","datapoints":[[2044.9526667,1350519840],[186.601,1350520200],[322.66416667,1350520560],[1746.4213333,1350520920],[801.19655556,1350521280],[2100.7752778,1350521640],[196.62205556,1350522000],[544.16102778,1350522360],[2467.9166389,1350522720],[6349.9070833,1350523080],[189.792,1350523440],[195.6495,1350523800],[3345.7549444,1350524160],[302.33,1350524520],[190.26427778,1350524880],[179.69261111,1350525240],[178.87130556,1350525600],[189.15422222,1350525960],[1709.4773889,1350526320],[2826.0326667,1350526680],[0,1350527040]]}]
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/clusters/cluster.json b/branch-1.2/ambari-web/app/assets/data/clusters/cluster.json
deleted file mode 100644
index a0ad3a7..0000000
--- a/branch-1.2/ambari-web/app/assets/data/clusters/cluster.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-  "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster?fields=Clusters",
-  "Clusters" : {
-    "cluster_name" : "mycluster",
-    "cluster_id" : 1,
-    "version" : "HDP-0.1",
-    "stack_name" : "HDP",
-    "max_hosts_per_rack" : 10
-  }
-}
diff --git a/branch-1.2/ambari-web/app/assets/data/clusters/info.json b/branch-1.2/ambari-web/app/assets/data/clusters/info.json
deleted file mode 100644
index 701989a..0000000
--- a/branch-1.2/ambari-web/app/assets/data/clusters/info.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
-  "href" : "http://ambari:9998/clusters",
-  "items" : [
-    {
-      "href" : "http://ambari:9998/clusters/mycluster",
-      "Clusters" : {
-        "cluster_name" : "mycluster"
-      }
-    }
-  ]
-}
diff --git a/branch-1.2/ambari-web/app/assets/data/dashboard/mapreduce/mapreduce_start.json b/branch-1.2/ambari-web/app/assets/data/dashboard/mapreduce/mapreduce_start.json
deleted file mode 100644
index ac9fd19..0000000
--- a/branch-1.2/ambari-web/app/assets/data/dashboard/mapreduce/mapreduce_start.json
+++ /dev/null
@@ -1,370 +0,0 @@
-{
-  "href" : "http://localhost:8080/api/clusters/mycluster/services?ServiceInfo/service_name=MAPREDUCE&fields=components/host_components/*",
-  "items" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/services/MAPREDUCE",
-      "ServiceInfo" : {
-        "cluster_name" : "mycluster",
-        "service_name" : "MAPREDUCE"
-      },
-      "components" : [
-        {
-          "href" : "http://localhost:8080/api/clusters/mycluster/services/MAPREDUCE/components/MAPREDUCE_CLIENT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "MAPREDUCE_CLIENT",
-            "service_name" : "MAPREDUCE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://localhost:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/MAPREDUCE_CLIENT",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "MAPREDUCE_CLIENT",
-                "host_name" : "dev.hortonworks.com"
-              },
-              "component" : [
-                {
-                  "href" : "http://localhost:8080/api/clusters/mycluster/services/MAPREDUCE/components/MAPREDUCE_CLIENT",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "MAPREDUCE_CLIENT",
-                    "service_name" : "MAPREDUCE"
-                  }
-                }
-              ]
-            }
-          ]
-        },
-        {
-          "href" : "http://localhost:8080/api/clusters/mycluster/services/MAPREDUCE/components/JOBTRACKER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "JOBTRACKER",
-            "service_name" : "MAPREDUCE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://localhost:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/JOBTRACKER",
-              "HostRoles" : {
-                "configs" : "{\"mapred-site\":\"version1\",\"global\":\"version1\",\"core-site\":\"version1\"}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "JOBTRACKER",
-                "host_name" : "dev.hortonworks.com"
-              },
-              "metrics" : {
-                "boottime" : 1.353113263E9,
-                "process" : {
-                  "proc_total" : 542.4,
-                  "proc_run" : 1.04722222222
-                },
-                "rpc" : {
-                  "rpcAuthorizationSuccesses" : 0.0,
-                  "SentBytes" : 355.647619048,
-                  "rpcAuthorizationFailures" : 0.0,
-                  "ReceivedBytes" : 1040.35238095,
-                  "NumOpenConnections" : 0.0,
-                  "callQueueLen" : 0.0,
-                  "rpcAuthenticationSuccesses" : 0.0,
-                  "RpcQueueTime_num_ops" : 3.32380952381,
-                  "RpcProcessingTime_num_ops" : 3.32380952381,
-                  "RpcProcessingTime_avg_time" : 0.142915649166,
-                  "rpcAuthenticationFailures" : 0.0,
-                  "RpcQueueTime_avg_time" : 0.0767488298738
-                },
-                "ugi" : {
-                  "loginSuccess_num_ops" : 0.0,
-                  "loginFailure_num_ops" : 0.0,
-                  "loginSuccess_avg_time" : 0.0,
-                  "loginFailure_avg_time" : 0.0
-                },
-                "mapred" : {
-                  "Queue" : {
-                    "maps_killed" : 0.0,
-                    "reduces_killed" : 0.0,
-                    "jobs_failed" : 0.0,
-                    "reduces_completed" : 0.0,
-                    "running_0" : 0.0,
-                    "reduces_failed" : 0.0,
-                    "reserved_map_slots" : 0.0,
-                    "jobs_completed" : 0.0,
-                    "waiting_maps" : 0.0,
-                    "running_1440" : 0.0,
-                    "maps_completed" : 0.0,
-                    "jobs_preparing" : 0.0,
-                    "jobs_submitted" : 0.0,
-                    "reserved_reduce_slots" : 0.0,
-                    "jobs_running" : 0.0,
-                    "running_300" : 0.0,
-                    "maps_launched" : 0.0,
-                    "reduces_launched" : 0.0,
-                    "running_60" : 0.0,
-                    "waiting_reduces" : 0.0,
-                    "maps_failed" : 0.0,
-                    "jobs_killed" : 0.0
-                  },
-                  "jobtracker" : {
-                    "blacklisted_maps" : 0.0,
-                    "running_maps" : 0.0,
-                    "jobs_failed" : 0.0,
-                    "waiting_maps" : 0.0,
-                    "maps_completed" : 0.0,
-                    "trackers" : 1.0,
-                    "jobs_submitted" : 0.0,
-                    "map_slots" : 4.0,
-                    "reserved_reduce_slots" : 0.0,
-                    "trackers_graylisted" : 0.0,
-                    "heartbeats" : 3.32380952381,
-                    "jobs_running" : 0.0,
-                    "blacklisted_reduces" : 0.0,
-                    "maps_launched" : 0.0,
-                    "occupied_map_slots" : 0.0,
-                    "reduces_launched" : 0.0,
-                    "jobs_killed" : 0.0,
-                    "maps_failed" : 0.0,
-                    "maps_killed" : 0.0,
-                    "reduce_slots" : 2.0,
-                    "reduces_killed" : 0.0,
-                    "reduces_completed" : 0.0,
-                    "jobs_completed" : 0.0,
-                    "reserved_map_slots" : 0.0,
-                    "trackers_decommissioned" : 0.0,
-                    "reduces_failed" : 0.0,
-                    "trackers_blacklisted" : 0.0,
-                    "jobs_preparing" : 0.0,
-                    "running_reduces" : 0.0,
-                    "occupied_reduce_slots" : 0.0,
-                    "waiting_reduces" : 0.0
-                  }
-                },
-                "disk" : {
-                  "disk_total" : 101.515,
-                  "disk_free" : 93.4107777778,
-                  "part_max_used" : 12.8
-                },
-                "cpu" : {
-                  "cpu_speed" : 1986.0,
-                  "cpu_wio" : 0.510555555556,
-                  "cpu_num" : 1.0,
-                  "cpu_idle" : 45.9291666667,
-                  "cpu_nice" : 0.0,
-                  "cpu_aidle" : 0.0,
-                  "cpu_system" : 6.88277777778,
-                  "cpu_user" : 46.6838888889
-                },
-                "rpcdetailed" : {
-                  "getJobProfile_num_ops" : 0.0,
-                  "getStagingAreaDir_num_ops" : 0.0,
-                  "getProtocolVersion_avg_time" : 0.0,
-                  "getBuildVersion_avg_time" : 0.0,
-                  "getBuildVersion_num_ops" : 0.0,
-                  "getNewJobId_avg_time" : 0.0,
-                  "getQueueAdmins_num_ops" : 0.0,
-                  "getSystemDir_num_ops" : 0.0,
-                  "getTaskCompletionEvents_num_ops" : 0.0,
-                  "getJobProfile_avg_time" : 0.0,
-                  "submitJob_num_ops" : 0.0,
-                  "getStagingAreaDir_avg_time" : 2.0,
-                  "getNewJobId_num_ops" : 0.0,
-                  "getJobCounters_num_ops" : 0.0,
-                  "getTaskCompletionEvents_avg_time" : 0.111111111111,
-                  "getProtocolVersion_num_ops" : 0.0,
-                  "submitJob_avg_time" : 312.0,
-                  "getSystemDir_avg_time" : 0.0,
-                  "getJobStatus_num_ops" : 0.0,
-                  "getJobCounters_avg_time" : 0.0,
-                  "getQueueAdmins_avg_time" : 0.0,
-                  "heartbeat_num_ops" : 3.32380952381,
-                  "heartbeat_avg_time" : 0.142915649166,
-                  "getJobStatus_avg_time" : 0.0
-                },
-                "load" : {
-                  "load_fifteen" : 0.8845,
-                  "load_one" : 0.516833333333,
-                  "load_five" : 0.740888888889
-                },
-                "jvm" : {
-                  "memHeapCommittedM" : 185.1875,
-                  "logFatal" : 0.0,
-                  "threadsBlocked" : 0.0,
-                  "threadsWaiting" : 19.0,
-                  "gcCount" : 0.00277777777778,
-                  "logWarn" : 0.0,
-                  "logError" : 0.0,
-                  "memNonHeapCommittedM" : 23.75,
-                  "gcTimeMillis" : 0.641666666667,
-                  "memNonHeapUsedM" : 23.1303326472,
-                  "logInfo" : 0.0,
-                  "threadsNew" : 0.0,
-                  "memHeapUsedM" : 101.890453742,
-                  "threadsTerminated" : 0.0,
-                  "threadsTimedWaiting" : 10.0,
-                  "threadsRunnable" : 6.0
-                },
-                "network" : {
-                  "pkts_out" : 0.455111111111,
-                  "bytes_in" : 50.645,
-                  "bytes_out" : 119.762888889,
-                  "pkts_in" : 0.411222222222
-                },
-                "memory" : {
-                  "mem_total" : 2054932.0,
-                  "swap_free" : 4074713.26667,
-                  "mem_buffers" : 13563.6333333,
-                  "mem_shared" : 0.0,
-                  "mem_cached" : 175195.088889,
-                  "mem_free" : 117578.155556,
-                  "swap_total" : 4128760.0
-                }
-              },
-              "component" : [
-                {
-                  "href" : "http://localhost:8080/api/clusters/mycluster/services/MAPREDUCE/components/JOBTRACKER",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "JOBTRACKER",
-                    "service_name" : "MAPREDUCE"
-                  }
-                }
-              ]
-            }
-          ]
-        },
-        {
-          "href" : "http://localhost:8080/api/clusters/mycluster/services/MAPREDUCE/components/TASKTRACKER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "TASKTRACKER",
-            "service_name" : "MAPREDUCE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://localhost:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/TASKTRACKER",
-              "HostRoles" : {
-                "configs" : "{\"mapred-site\":\"version1\",\"global\":\"version1\",\"core-site\":\"version1\"}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "TASKTRACKER",
-                "host_name" : "dev.hortonworks.com"
-              },
-              "metrics" : {
-                "boottime" : 1.353113263E9,
-                "process" : {
-                  "proc_total" : 542.625,
-                  "proc_run" : 1.81944444444
-                },
-                "rpc" : {
-                  "rpcAuthorizationSuccesses" : 7.15827882667E7,
-                  "SentBytes" : 7.15827882667E7,
-                  "rpcAuthorizationFailures" : 0.0,
-                  "ReceivedBytes" : 7.15827882667E7,
-                  "NumOpenConnections" : 0.0,
-                  "callQueueLen" : 0.0,
-                  "rpcAuthenticationSuccesses" : 0.0,
-                  "RpcQueueTime_num_ops" : 7.15827882667E7,
-                  "RpcProcessingTime_num_ops" : 7.15827882667E7,
-                  "RpcProcessingTime_avg_time" : 0.151111111111,
-                  "rpcAuthenticationFailures" : 0.0,
-                  "RpcQueueTime_avg_time" : 0.151111111111
-                },
-                "ugi" : {
-                  "loginSuccess_num_ops" : 0.0,
-                  "loginFailure_num_ops" : 0.0,
-                  "loginSuccess_avg_time" : 0.0,
-                  "loginFailure_avg_time" : 0.0
-                },
-                "mapred" : {
-                  "shuffleOutput" : {
-                    "shuffle_handler_busy_percent" : 0.0
-                  },
-                  "tasktracker" : {
-                    "reduces_running" : 0.0,
-                    "maps_running" : 0.0,
-                    "reduceTaskSlots" : 2.0,
-                    "mapTaskSlots" : 4.0
-                  }
-                },
-                "disk" : {
-                  "disk_total" : 101.515,
-                  "disk_free" : 93.4107777778,
-                  "part_max_used" : 12.8
-                },
-                "cpu" : {
-                  "cpu_speed" : 1986.0,
-                  "cpu_wio" : 0.519444444444,
-                  "cpu_num" : 1.0,
-                  "cpu_idle" : 46.9705555556,
-                  "cpu_nice" : 0.0,
-                  "cpu_aidle" : 0.0,
-                  "cpu_system" : 6.75972222222,
-                  "cpu_user" : 45.7563888889
-                },
-                "rpcdetailed" : {
-                  "getProtocolVersion_avg_time" : 0.0,
-                  "getProtocolVersion_num_ops" : 4
-                },
-                "load" : {
-                  "load_fifteen" : 0.882305555556,
-                  "load_one" : 0.523944444444,
-                  "load_five" : 0.739694444444
-                },
-                "jvm" : {
-                  "memHeapCommittedM" : 30.375,
-                  "logFatal" : 0.0,
-                  "threadsBlocked" : 0.0,
-                  "threadsWaiting" : 14.5333333333,
-                  "gcCount" : 7.15827883321E7,
-                  "logWarn" : 0.0,
-                  "logError" : 0.0,
-                  "memNonHeapCommittedM" : 23.1875,
-                  "gcTimeMillis" : 7.15827883254E7,
-                  "memNonHeapUsedM" : 21.7573377917,
-                  "logInfo" : 0.0,
-                  "threadsNew" : 0.0,
-                  "memHeapUsedM" : 7.07001514861,
-                  "threadsTerminated" : 0.0,
-                  "threadsTimedWaiting" : 8.71666666667,
-                  "threadsRunnable" : 6.26666666667
-                },
-                "network" : {
-                  "pkts_out" : 0.453888888889,
-                  "bytes_in" : 50.5375,
-                  "bytes_out" : 119.456111111,
-                  "pkts_in" : 0.410277777778
-                },
-                "memory" : {
-                  "mem_total" : 2054932.0,
-                  "swap_free" : 4074707.96667,
-                  "mem_buffers" : 13563.6555556,
-                  "mem_shared" : 0.0,
-                  "mem_cached" : 175260.0,
-                  "mem_free" : 115051.811111,
-                  "swap_total" : 4128760.0
-                }
-              },
-              "component" : [
-                {
-                  "href" : "http://localhost:8080/api/clusters/mycluster/services/MAPREDUCE/components/TASKTRACKER",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "TASKTRACKER",
-                    "service_name" : "MAPREDUCE"
-                  }
-                }
-              ]
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/dashboard/mapreduce/mapreduce_stop.json b/branch-1.2/ambari-web/app/assets/data/dashboard/mapreduce/mapreduce_stop.json
deleted file mode 100644
index 6d55a3c..0000000
--- a/branch-1.2/ambari-web/app/assets/data/dashboard/mapreduce/mapreduce_stop.json
+++ /dev/null
@@ -1,370 +0,0 @@
-{
-  "href" : "http://localhost:8080/api/clusters/mycluster/services?ServiceInfo/service_name=MAPREDUCE&fields=components/host_components/*",
-  "items" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/services/MAPREDUCE",
-      "ServiceInfo" : {
-        "cluster_name" : "mycluster",
-        "service_name" : "MAPREDUCE"
-      },
-      "components" : [
-        {
-          "href" : "http://localhost:8080/api/clusters/mycluster/services/MAPREDUCE/components/MAPREDUCE_CLIENT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "MAPREDUCE_CLIENT",
-            "service_name" : "MAPREDUCE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://localhost:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/MAPREDUCE_CLIENT",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "MAPREDUCE_CLIENT",
-                "host_name" : "dev.hortonworks.com"
-              },
-              "component" : [
-                {
-                  "href" : "http://localhost:8080/api/clusters/mycluster/services/MAPREDUCE/components/MAPREDUCE_CLIENT",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "MAPREDUCE_CLIENT",
-                    "service_name" : "MAPREDUCE"
-                  }
-                }
-              ]
-            }
-          ]
-        },
-        {
-          "href" : "http://localhost:8080/api/clusters/mycluster/services/MAPREDUCE/components/JOBTRACKER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "JOBTRACKER",
-            "service_name" : "MAPREDUCE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://localhost:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/JOBTRACKER",
-              "HostRoles" : {
-                "configs" : "{\"mapred-site\":\"version1\",\"global\":\"version1\",\"core-site\":\"version1\"}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "JOBTRACKER",
-                "host_name" : "dev.hortonworks.com"
-              },
-              "metrics" : {
-                "boottime" : 1.353113263E9,
-                "process" : {
-                  "proc_total" : 542.4,
-                  "proc_run" : 1.04722222222
-                },
-                "rpc" : {
-                  "rpcAuthorizationSuccesses" : 0.0,
-                  "SentBytes" : 355.647619048,
-                  "rpcAuthorizationFailures" : 0.0,
-                  "ReceivedBytes" : 1040.35238095,
-                  "NumOpenConnections" : 0.0,
-                  "callQueueLen" : 0.0,
-                  "rpcAuthenticationSuccesses" : 0.0,
-                  "RpcQueueTime_num_ops" : 3.32380952381,
-                  "RpcProcessingTime_num_ops" : 3.32380952381,
-                  "RpcProcessingTime_avg_time" : 0.142915649166,
-                  "rpcAuthenticationFailures" : 0.0,
-                  "RpcQueueTime_avg_time" : 0.0767488298738
-                },
-                "ugi" : {
-                  "loginSuccess_num_ops" : 0.0,
-                  "loginFailure_num_ops" : 0.0,
-                  "loginSuccess_avg_time" : 0.0,
-                  "loginFailure_avg_time" : 0.0
-                },
-                "mapred" : {
-                  "Queue" : {
-                    "maps_killed" : 0.0,
-                    "reduces_killed" : 0.0,
-                    "jobs_failed" : 0.0,
-                    "reduces_completed" : 0.0,
-                    "running_0" : 0.0,
-                    "reduces_failed" : 0.0,
-                    "reserved_map_slots" : 0.0,
-                    "jobs_completed" : 0.0,
-                    "waiting_maps" : 0.0,
-                    "running_1440" : 0.0,
-                    "maps_completed" : 0.0,
-                    "jobs_preparing" : 0.0,
-                    "jobs_submitted" : 0.0,
-                    "reserved_reduce_slots" : 0.0,
-                    "jobs_running" : 0.0,
-                    "running_300" : 0.0,
-                    "maps_launched" : 0.0,
-                    "reduces_launched" : 0.0,
-                    "running_60" : 0.0,
-                    "waiting_reduces" : 0.0,
-                    "maps_failed" : 0.0,
-                    "jobs_killed" : 0.0
-                  },
-                  "jobtracker" : {
-                    "blacklisted_maps" : 0.0,
-                    "running_maps" : 0.0,
-                    "jobs_failed" : 0.0,
-                    "waiting_maps" : 0.0,
-                    "maps_completed" : 0.0,
-                    "trackers" : 1.0,
-                    "jobs_submitted" : 0.0,
-                    "map_slots" : 4.0,
-                    "reserved_reduce_slots" : 0.0,
-                    "trackers_graylisted" : 0.0,
-                    "heartbeats" : 3.32380952381,
-                    "jobs_running" : 0.0,
-                    "blacklisted_reduces" : 0.0,
-                    "maps_launched" : 0.0,
-                    "occupied_map_slots" : 0.0,
-                    "reduces_launched" : 0.0,
-                    "jobs_killed" : 0.0,
-                    "maps_failed" : 0.0,
-                    "maps_killed" : 0.0,
-                    "reduce_slots" : 2.0,
-                    "reduces_killed" : 0.0,
-                    "reduces_completed" : 0.0,
-                    "jobs_completed" : 0.0,
-                    "reserved_map_slots" : 0.0,
-                    "trackers_decommissioned" : 0.0,
-                    "reduces_failed" : 0.0,
-                    "trackers_blacklisted" : 0.0,
-                    "jobs_preparing" : 0.0,
-                    "running_reduces" : 0.0,
-                    "occupied_reduce_slots" : 0.0,
-                    "waiting_reduces" : 0.0
-                  }
-                },
-                "disk" : {
-                  "disk_total" : 101.515,
-                  "disk_free" : 93.4107777778,
-                  "part_max_used" : 12.8
-                },
-                "cpu" : {
-                  "cpu_speed" : 1986.0,
-                  "cpu_wio" : 0.510555555556,
-                  "cpu_num" : 1.0,
-                  "cpu_idle" : 45.9291666667,
-                  "cpu_nice" : 0.0,
-                  "cpu_aidle" : 0.0,
-                  "cpu_system" : 6.88277777778,
-                  "cpu_user" : 46.6838888889
-                },
-                "rpcdetailed" : {
-                  "getJobProfile_num_ops" : 0.0,
-                  "getStagingAreaDir_num_ops" : 0.0,
-                  "getProtocolVersion_avg_time" : 0.0,
-                  "getBuildVersion_avg_time" : 0.0,
-                  "getBuildVersion_num_ops" : 0.0,
-                  "getNewJobId_avg_time" : 0.0,
-                  "getQueueAdmins_num_ops" : 0.0,
-                  "getSystemDir_num_ops" : 0.0,
-                  "getTaskCompletionEvents_num_ops" : 0.0,
-                  "getJobProfile_avg_time" : 0.0,
-                  "submitJob_num_ops" : 0.0,
-                  "getStagingAreaDir_avg_time" : 2.0,
-                  "getNewJobId_num_ops" : 0.0,
-                  "getJobCounters_num_ops" : 0.0,
-                  "getTaskCompletionEvents_avg_time" : 0.111111111111,
-                  "getProtocolVersion_num_ops" : 0.0,
-                  "submitJob_avg_time" : 312.0,
-                  "getSystemDir_avg_time" : 0.0,
-                  "getJobStatus_num_ops" : 0.0,
-                  "getJobCounters_avg_time" : 0.0,
-                  "getQueueAdmins_avg_time" : 0.0,
-                  "heartbeat_num_ops" : 3.32380952381,
-                  "heartbeat_avg_time" : 0.142915649166,
-                  "getJobStatus_avg_time" : 0.0
-                },
-                "load" : {
-                  "load_fifteen" : 0.8845,
-                  "load_one" : 0.516833333333,
-                  "load_five" : 0.740888888889
-                },
-                "jvm" : {
-                  "memHeapCommittedM" : 185.1875,
-                  "logFatal" : 0.0,
-                  "threadsBlocked" : 0.0,
-                  "threadsWaiting" : 19.0,
-                  "gcCount" : 0.00277777777778,
-                  "logWarn" : 0.0,
-                  "logError" : 0.0,
-                  "memNonHeapCommittedM" : 23.75,
-                  "gcTimeMillis" : 0.641666666667,
-                  "memNonHeapUsedM" : 23.1303326472,
-                  "logInfo" : 0.0,
-                  "threadsNew" : 0.0,
-                  "memHeapUsedM" : 101.890453742,
-                  "threadsTerminated" : 0.0,
-                  "threadsTimedWaiting" : 10.0,
-                  "threadsRunnable" : 6.0
-                },
-                "network" : {
-                  "pkts_out" : 0.455111111111,
-                  "bytes_in" : 50.645,
-                  "bytes_out" : 119.762888889,
-                  "pkts_in" : 0.411222222222
-                },
-                "memory" : {
-                  "mem_total" : 2054932.0,
-                  "swap_free" : 4074713.26667,
-                  "mem_buffers" : 13563.6333333,
-                  "mem_shared" : 0.0,
-                  "mem_cached" : 175195.088889,
-                  "mem_free" : 117578.155556,
-                  "swap_total" : 4128760.0
-                }
-              },
-              "component" : [
-                {
-                  "href" : "http://localhost:8080/api/clusters/mycluster/services/MAPREDUCE/components/JOBTRACKER",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "JOBTRACKER",
-                    "service_name" : "MAPREDUCE"
-                  }
-                }
-              ]
-            }
-          ]
-        },
-        {
-          "href" : "http://localhost:8080/api/clusters/mycluster/services/MAPREDUCE/components/TASKTRACKER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "TASKTRACKER",
-            "service_name" : "MAPREDUCE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://localhost:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/TASKTRACKER",
-              "HostRoles" : {
-                "configs" : "{\"mapred-site\":\"version1\",\"global\":\"version1\",\"core-site\":\"version1\"}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "TASKTRACKER",
-                "host_name" : "dev.hortonworks.com"
-              },
-              "metrics" : {
-                "boottime" : 1.353113263E9,
-                "process" : {
-                  "proc_total" : 542.625,
-                  "proc_run" : 1.81944444444
-                },
-                "rpc" : {
-                  "rpcAuthorizationSuccesses" : 7.15827882667E7,
-                  "SentBytes" : 7.15827882667E7,
-                  "rpcAuthorizationFailures" : 0.0,
-                  "ReceivedBytes" : 7.15827882667E7,
-                  "NumOpenConnections" : 0.0,
-                  "callQueueLen" : 0.0,
-                  "rpcAuthenticationSuccesses" : 0.0,
-                  "RpcQueueTime_num_ops" : 7.15827882667E7,
-                  "RpcProcessingTime_num_ops" : 7.15827882667E7,
-                  "RpcProcessingTime_avg_time" : 0.151111111111,
-                  "rpcAuthenticationFailures" : 0.0,
-                  "RpcQueueTime_avg_time" : 0.151111111111
-                },
-                "ugi" : {
-                  "loginSuccess_num_ops" : 0.0,
-                  "loginFailure_num_ops" : 0.0,
-                  "loginSuccess_avg_time" : 0.0,
-                  "loginFailure_avg_time" : 0.0
-                },
-                "mapred" : {
-                  "shuffleOutput" : {
-                    "shuffle_handler_busy_percent" : 0.0
-                  },
-                  "tasktracker" : {
-                    "reduces_running" : 0.0,
-                    "maps_running" : 0.0,
-                    "reduceTaskSlots" : 2.0,
-                    "mapTaskSlots" : 4.0
-                  }
-                },
-                "disk" : {
-                  "disk_total" : 101.515,
-                  "disk_free" : 93.4107777778,
-                  "part_max_used" : 12.8
-                },
-                "cpu" : {
-                  "cpu_speed" : 1986.0,
-                  "cpu_wio" : 0.519444444444,
-                  "cpu_num" : 1.0,
-                  "cpu_idle" : 46.9705555556,
-                  "cpu_nice" : 0.0,
-                  "cpu_aidle" : 0.0,
-                  "cpu_system" : 6.75972222222,
-                  "cpu_user" : 45.7563888889
-                },
-                "rpcdetailed" : {
-                  "getProtocolVersion_avg_time" : 0.0,
-                  "getProtocolVersion_num_ops" : 4
-                },
-                "load" : {
-                  "load_fifteen" : 0.882305555556,
-                  "load_one" : 0.523944444444,
-                  "load_five" : 0.739694444444
-                },
-                "jvm" : {
-                  "memHeapCommittedM" : 30.375,
-                  "logFatal" : 0.0,
-                  "threadsBlocked" : 0.0,
-                  "threadsWaiting" : 14.5333333333,
-                  "gcCount" : 7.15827883321E7,
-                  "logWarn" : 0.0,
-                  "logError" : 0.0,
-                  "memNonHeapCommittedM" : 23.1875,
-                  "gcTimeMillis" : 7.15827883254E7,
-                  "memNonHeapUsedM" : 21.7573377917,
-                  "logInfo" : 0.0,
-                  "threadsNew" : 0.0,
-                  "memHeapUsedM" : 7.07001514861,
-                  "threadsTerminated" : 0.0,
-                  "threadsTimedWaiting" : 8.71666666667,
-                  "threadsRunnable" : 6.26666666667
-                },
-                "network" : {
-                  "pkts_out" : 0.453888888889,
-                  "bytes_in" : 50.5375,
-                  "bytes_out" : 119.456111111,
-                  "pkts_in" : 0.410277777778
-                },
-                "memory" : {
-                  "mem_total" : 2054932.0,
-                  "swap_free" : 4074707.96667,
-                  "mem_buffers" : 13563.6555556,
-                  "mem_shared" : 0.0,
-                  "mem_cached" : 175260.0,
-                  "mem_free" : 115051.811111,
-                  "swap_total" : 4128760.0
-                }
-              },
-              "component" : [
-                {
-                  "href" : "http://localhost:8080/api/clusters/mycluster/services/MAPREDUCE/components/TASKTRACKER",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "TASKTRACKER",
-                    "service_name" : "MAPREDUCE"
-                  }
-                }
-              ]
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/dashboard/serviceComponents.json b/branch-1.2/ambari-web/app/assets/data/dashboard/serviceComponents.json
deleted file mode 100644
index 49828f1..0000000
--- a/branch-1.2/ambari-web/app/assets/data/dashboard/serviceComponents.json
+++ /dev/null
@@ -1,203 +0,0 @@
-{
-  "href" : "http://ambari/api/clusters/vmc/services?ServiceInfo/service_name!=MISCELLANEOUS&ServiceInfo/service_name!=DASHBOARD&fields=components/ServiceComponentInfo",
-  "items" : [
-    {
-      "href" : "http://ambari/api/clusters/vmc/services/HDFS",
-      "ServiceInfo" : {
-        "cluster_name" : "vmc",
-        "service_name" : "HDFS"
-      },
-      "components" : [
-        {
-          "href" : "http://ambari/api/clusters/vmc/services/HDFS/components/DATANODE",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "vmc",
-            "desired_configs" : "{}",
-            "state" : "STARTED",
-            "component_name" : "DATANODE",
-            "service_name" : "HDFS"
-          }
-        },
-        {
-          "href" : "http://ambari/api/clusters/vmc/services/HDFS/components/SECONDARY_NAMENODE",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "vmc",
-            "desired_configs" : "{}",
-            "state" : "STARTED",
-            "component_name" : "SECONDARY_NAMENODE",
-            "service_name" : "HDFS"
-          }
-        },
-        {
-          "href" : "http://ambari/api/clusters/vmc/services/HDFS/components/NAMENODE",
-          "ServiceComponentInfo" : {
-            "LiveNodes" : "{\"hostname\":{\"usedSpace\":200704,\"lastContact\":2}}",
-            "CapacityUsed" : 200704,
-            "component_name" : "NAMENODE",
-            "state" : "STARTED",
-            "HeapMemoryUsed" : 60622032,
-            "service_name" : "HDFS",
-            "UpgradeFinalized" : true,
-            "HeapMemoryMax" : 1006632960,
-            "cluster_name" : "vmc",
-            "DecomNodes" : "{}",
-            "Safemode" : "",
-            "desired_configs" : "{}",
-            "CapacityTotal" : 52844687359,
-            "StartTime" : 1352767879543,
-            "Version" : "1.1.0.1, r",
-            "DeadNodes" : "{}"
-          }
-        },
-        {
-          "href" : "http://ambari/api/clusters/vmc/services/HDFS/components/HDFS_CLIENT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "vmc",
-            "desired_configs" : "{}",
-            "state" : "INSTALLED",
-            "component_name" : "HDFS_CLIENT",
-            "service_name" : "HDFS"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://ambari/api/clusters/vmc/services/NAGIOS",
-      "ServiceInfo" : {
-        "cluster_name" : "vmc",
-        "service_name" : "NAGIOS"
-      },
-      "components" : [
-        {
-          "href" : "http://ambari/api/clusters/vmc/services/NAGIOS/components/NAGIOS_SERVER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "vmc",
-            "desired_configs" : "{}",
-            "state" : "STARTED",
-            "component_name" : "NAGIOS_SERVER",
-            "service_name" : "NAGIOS"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://ambari/api/clusters/vmc/services/GANGLIA",
-      "ServiceInfo" : {
-        "cluster_name" : "vmc",
-        "service_name" : "GANGLIA"
-      },
-      "components" : [
-        {
-          "href" : "http://ambari/api/clusters/vmc/services/GANGLIA/components/GANGLIA_SERVER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "vmc",
-            "desired_configs" : "{}",
-            "state" : "STARTED",
-            "component_name" : "GANGLIA_SERVER",
-            "service_name" : "GANGLIA"
-          }
-        },
-        {
-          "href" : "http://ambari/api/clusters/vmc/services/GANGLIA/components/GANGLIA_MONITOR",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "vmc",
-            "desired_configs" : "{}",
-            "state" : "STARTED",
-            "component_name" : "GANGLIA_MONITOR",
-            "service_name" : "GANGLIA"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://ambari/api/clusters/vmc/services/MAPREDUCE",
-      "ServiceInfo" : {
-        "cluster_name" : "vmc",
-        "service_name" : "MAPREDUCE"
-      },
-      "components" : [
-        {
-          "href" : "http://ambari/api/clusters/vmc/services/MAPREDUCE/components/TASKTRACKER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "vmc",
-            "desired_configs" : "{}",
-            "state" : "STARTED",
-            "component_name" : "TASKTRACKER",
-            "service_name" : "MAPREDUCE"
-          }
-        },
-        {
-          "href" : "http://ambari/api/clusters/vmc/services/MAPREDUCE/components/MAPREDUCE_CLIENT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "vmc",
-            "desired_configs" : "{}",
-            "state" : "INSTALLED",
-            "component_name" : "MAPREDUCE_CLIENT",
-            "service_name" : "MAPREDUCE"
-          }
-        },
-        {
-          "href" : "http://ambari/api/clusters/vmc/services/MAPREDUCE/components/JOBTRACKER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "vmc",
-            "desired_configs" : "{}",
-            "state" : "STARTED",
-            "component_name" : "JOBTRACKER",
-            "HeapMemoryUsed" : 144639872,
-            "BlackListedNodes" : "[]",
-            "StartTime" : 1352768002344,
-            "service_name" : "MAPREDUCE",
-            "AliveNodes" : "[{\"hostname\":\"hostname\",\"last_seen\":1352854673780,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":0,\"dir_failures\":0}]",
-            "HeapMemoryMax" : 1052770304,
-            "Version" : "1.1.0.1, r",
-            "GrayListedNodes" : "[]"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://ambari/clusters/vmc/services/HBASE",
-      "ServiceInfo" : {
-        "cluster_name" : "vmc",
-        "service_name" : "HBASE"
-      },
-      "components" : [
-        {
-          "href" : "http://ambari/clusters/vmc/services/HBASE/components/HBASE_MASTER",
-          "ServiceComponentInfo" : {
-            "MasterStartTime" : 1350859237269,
-            "cluster_name" : "vmc",
-            "RegionsInTransition" : "[]",
-            "MasterActiveTime" : 1350859237344,
-            "component_name" : "HBASE_MASTER",
-            "state" : "STARTED",
-            "HeapMemoryUsed" : 9864776,
-            "Revision" : "Unknown",
-            "service_name" : "HBASE",
-            "HeapMemoryMax" : 1807613952,
-            "Version" : "0.92.1.14",
-            "AverageLoad" : 1.0
-          }
-        },
-        {
-          "href" : "http://ambari/clusters/vmc/services/HBASE/components/HBASE_CLIENT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "vmc",
-            "component_name" : "HBASE_CLIENT",
-            "state" : "INSTALLED",
-            "service_name" : "HBASE"
-          }
-        },
-        {
-          "href" : "http://ambari/clusters/vmc/services/HBASE/components/HBASE_REGIONSERVER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "vmc",
-            "component_name" : "HBASE_REGIONSERVER",
-            "state" : "STARTED",
-            "service_name" : "HBASE"
-          }
-        }
-      ]
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/dashboard/services.json b/branch-1.2/ambari-web/app/assets/data/dashboard/services.json
deleted file mode 100644
index 177b200..0000000
--- a/branch-1.2/ambari-web/app/assets/data/dashboard/services.json
+++ /dev/null
@@ -1,1527 +0,0 @@
-{
-  "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services?fields=components/ServiceComponentInfo,components/host_components,components/host_components/HostRoles&_=1358264805285",
-  "items" : [
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/HBASE",
-      "ServiceInfo" : {
-        "cluster_name" : "cl1",
-        "service_name" : "HBASE"
-      },
-      "components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/HBASE/components/HBASE_REGIONSERVER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "STARTED",
-            "component_name" : "HBASE_REGIONSERVER",
-            "service_name" : "HBASE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/HBASE_REGIONSERVER",
-              "HostRoles" : {
-                "configs" : {
-                  "hbase-site" : "version1",
-                  "global" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "HBASE_REGIONSERVER",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/HBASE_REGIONSERVER",
-              "HostRoles" : {
-                "configs" : {
-                  "hbase-site" : "version1",
-                  "global" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "HBASE_REGIONSERVER",
-                "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/HBASE_REGIONSERVER",
-              "HostRoles" : {
-                "configs" : {
-                  "hbase-site" : "version1",
-                  "global" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "HBASE_REGIONSERVER",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/HBASE_REGIONSERVER",
-              "HostRoles" : {
-                "configs" : {
-                  "hbase-site" : "version1",
-                  "global" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "HBASE_REGIONSERVER",
-                "host_name" : "ip-10-110-38-164.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/HBASE_REGIONSERVER",
-              "HostRoles" : {
-                "configs" : {
-                  "hbase-site" : "version1",
-                  "global" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "HBASE_REGIONSERVER",
-                "host_name" : "ip-10-110-79-42.ec2.internal"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/HBASE/components/HBASE_MASTER",
-          "ServiceComponentInfo" : {
-            "MasterStartTime" : 1358245370553,
-            "MasterActiveTime" : 1358245370618,
-            "RegionsInTransition" : [ ],
-            "component_name" : "HBASE_MASTER",
-            "state" : "STARTED",
-            "HeapMemoryUsed" : 14455400,
-            "Revision" : "Unknown",
-            "service_name" : "HBASE",
-            "HeapMemoryMax" : 1069416448,
-            "AverageLoad" : 1.0,
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "Version" : "0.94.2.21"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/HBASE_MASTER",
-              "HostRoles" : {
-                "configs" : {
-                  "hbase-site" : "version1",
-                  "global" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "HBASE_MASTER",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/HBASE/components/HBASE_CLIENT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "INSTALLED",
-            "component_name" : "HBASE_CLIENT",
-            "service_name" : "HBASE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/HBASE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HBASE_CLIENT",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/HBASE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HBASE_CLIENT",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/HBASE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HBASE_CLIENT",
-                "host_name" : "ip-10-110-38-164.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/HBASE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HBASE_CLIENT",
-                "host_name" : "ip-10-110-79-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/HBASE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HBASE_CLIENT",
-                "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-              }
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/NAGIOS",
-      "ServiceInfo" : {
-        "cluster_name" : "cl1",
-        "service_name" : "NAGIOS"
-      },
-      "components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/NAGIOS/components/NAGIOS_SERVER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "STARTED",
-            "component_name" : "NAGIOS_SERVER",
-            "service_name" : "NAGIOS"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/NAGIOS_SERVER",
-              "HostRoles" : {
-                "configs" : {
-                  "global" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "NAGIOS_SERVER",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/SQOOP",
-      "ServiceInfo" : {
-        "cluster_name" : "cl1",
-        "service_name" : "SQOOP"
-      },
-      "components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/SQOOP/components/SQOOP",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "INSTALLED",
-            "component_name" : "SQOOP",
-            "service_name" : "SQOOP"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/SQOOP",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "SQOOP",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/SQOOP",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "SQOOP",
-                "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/SQOOP",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "SQOOP",
-                "host_name" : "ip-10-110-38-164.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/SQOOP",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "SQOOP",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/SQOOP",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "SQOOP",
-                "host_name" : "ip-10-110-79-42.ec2.internal"
-              }
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/HDFS",
-      "ServiceInfo" : {
-        "cluster_name" : "cl1",
-        "service_name" : "HDFS"
-      },
-      "components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/HDFS/components/HDFS_CLIENT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "INSTALLED",
-            "component_name" : "HDFS_CLIENT",
-            "service_name" : "HDFS"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/HDFS_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HDFS_CLIENT",
-                "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/HDFS_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HDFS_CLIENT",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/HDFS_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HDFS_CLIENT",
-                "host_name" : "ip-10-110-38-164.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/HDFS_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HDFS_CLIENT",
-                "host_name" : "ip-10-110-79-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/HDFS_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HDFS_CLIENT",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/HDFS/components/DATANODE",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "STARTED",
-            "component_name" : "DATANODE",
-            "service_name" : "HDFS"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/DATANODE",
-              "HostRoles" : {
-                "configs" : {
-                  "hdfs-site" : "version1",
-                  "global" : "version1",
-                  "core-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "DATANODE",
-                "host_name" : "ip-10-110-38-164.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/DATANODE",
-              "HostRoles" : {
-                "configs" : {
-                  "hdfs-site" : "version1",
-                  "global" : "version1",
-                  "core-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "DATANODE",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/DATANODE",
-              "HostRoles" : {
-                "configs" : {
-                  "hdfs-site" : "version1",
-                  "global" : "version1",
-                  "core-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "DATANODE",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/DATANODE",
-              "HostRoles" : {
-                "configs" : {
-                  "hdfs-site" : "version1",
-                  "global" : "version1",
-                  "core-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "DATANODE",
-                "host_name" : "ip-10-110-79-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/DATANODE",
-              "HostRoles" : {
-                "configs" : {
-                  "hdfs-site" : "version1",
-                  "global" : "version1",
-                  "core-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "DATANODE",
-                "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/HDFS/components/NAMENODE",
-          "ServiceComponentInfo" : {
-            "PercentRemaining" : 94.86242,
-            "CapacityUsed" : 482971648,
-            "state" : "STARTED",
-            "HeapMemoryUsed" : 324295392,
-            "service_name" : "HDFS",
-            "UpgradeFinalized" : true,
-            "HeapMemoryMax" : 1006632960,
-            "DecomNodes" : "{}",
-            "Safemode" : "",
-            "CapacityRemaining" : 4210552782848,
-            "StartTime" : 1358245243704,
-            "Version" : "1.1.2.21, r",
-            "BlocksTotal" : 248,
-            "LiveNodes" : "{\"ip-10-110-38-164.ec2.internal\":{\"usedSpace\":87072768,\"lastContact\":1},\"ip-10-110-79-42.ec2.internal\":{\"usedSpace\":133492736,\"lastContact\":1},\"ip-10-191-202-42.ec2.internal\":{\"usedSpace\":65101824,\"lastContact\":1},\"domU-12-31-39-0E-E6-01.compute-1.internal\":{\"usedSpace\":160288768,\"lastContact\":0},\"domU-12-31-39-16-48-4B.compute-1.internal\":{\"usedSpace\":37015552,\"lastContact\":1}}",
-            "component_name" : "NAMENODE",
-            "PercentUsed" : 0.010881199,
-            "TotalFiles" : 375,
-            "NonDfsUsedSpace" : 227552702454,
-            "MissingBlocks" : 0,
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "UnderReplicatedBlocks" : 0,
-            "CapacityTotal" : 4438588456950,
-            "CorruptBlocks" : 0,
-            "DeadNodes" : "{}"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/NAMENODE",
-              "HostRoles" : {
-                "configs" : {
-                  "hdfs-site" : "version1",
-                  "global" : "version1",
-                  "core-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "NAMENODE",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/HDFS/components/SECONDARY_NAMENODE",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "STARTED",
-            "component_name" : "SECONDARY_NAMENODE",
-            "service_name" : "HDFS"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/SECONDARY_NAMENODE",
-              "HostRoles" : {
-                "configs" : {
-                  "hdfs-site" : "version1",
-                  "global" : "version1",
-                  "core-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "SECONDARY_NAMENODE",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/MAPREDUCE",
-      "ServiceInfo" : {
-        "cluster_name" : "cl1",
-        "service_name" : "MAPREDUCE"
-      },
-      "components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/MAPREDUCE/components/MAPREDUCE_CLIENT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "INSTALLED",
-            "component_name" : "MAPREDUCE_CLIENT",
-            "service_name" : "MAPREDUCE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/MAPREDUCE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "MAPREDUCE_CLIENT",
-                "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/MAPREDUCE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "MAPREDUCE_CLIENT",
-                "host_name" : "ip-10-110-38-164.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/MAPREDUCE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "MAPREDUCE_CLIENT",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/MAPREDUCE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "MAPREDUCE_CLIENT",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/MAPREDUCE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "MAPREDUCE_CLIENT",
-                "host_name" : "ip-10-110-79-42.ec2.internal"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/MAPREDUCE/components/TASKTRACKER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "STARTED",
-            "component_name" : "TASKTRACKER",
-            "service_name" : "MAPREDUCE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/TASKTRACKER",
-              "HostRoles" : {
-                "configs" : {
-                  "mapred-site" : "version1",
-                  "global" : "version1",
-                  "core-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "TASKTRACKER",
-                "host_name" : "ip-10-110-79-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/TASKTRACKER",
-              "HostRoles" : {
-                "configs" : {
-                  "mapred-site" : "version1",
-                  "global" : "version1",
-                  "core-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "TASKTRACKER",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/TASKTRACKER",
-              "HostRoles" : {
-                "configs" : {
-                  "mapred-site" : "version1",
-                  "global" : "version1",
-                  "core-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "TASKTRACKER",
-                "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/TASKTRACKER",
-              "HostRoles" : {
-                "configs" : {
-                  "mapred-site" : "version1",
-                  "global" : "version1",
-                  "core-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "TASKTRACKER",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/TASKTRACKER",
-              "HostRoles" : {
-                "configs" : {
-                  "mapred-site" : "version1",
-                  "global" : "version1",
-                  "core-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "TASKTRACKER",
-                "host_name" : "ip-10-110-38-164.ec2.internal"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/MAPREDUCE/components/JOBTRACKER",
-          "ServiceComponentInfo" : {
-            "component_name" : "JOBTRACKER",
-            "state" : "STARTED",
-            "HeapMemoryUsed" : 129384032,
-            "service_name" : "MAPREDUCE",
-            "HeapMemoryMax" : 1052770304,
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "jobs_running" : 0,
-            "BlackListedNodes" : "[]",
-            "StartTime" : 1358245381436,
-            "AliveNodes" : "[{\"hostname\":\"domU-12-31-39-16-48-4B.compute-1.internal\",\"last_seen\":1358264806195,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":0,\"dir_failures\":0},{\"hostname\":\"ip-10-110-38-164.ec2.internal\",\"last_seen\":1358264806343,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":0,\"dir_failures\":0},{\"hostname\":\"domU-12-31-39-0E-E6-01.compute-1.internal\",\"last_seen\":1358264806332,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":0,\"dir_failures\":0},{\"hostname\":\"ip-10-191-202-42.ec2.internal\",\"last_seen\":1358264806381,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":0,\"dir_failures\":0},{\"hostname\":\"ip-10-110-79-42.ec2.internal\",\"last_seen\":1358264806352,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":0,\"dir_failures\":0}]",
-            "Version" : "1.1.2.21, r",
-            "GrayListedNodes" : "[]",
-            "Queue" : {
-              "reserved_reduce_slots" : 0,
-              "running_300" : 0,
-              "jobs_completed" : 4,
-              "reserved_map_slots" : 0,
-              "running_0" : 0,
-              "waiting_maps" : 0,
-              "running_1440" : 0,
-              "running_60" : 0,
-              "jobs_submitted" : 4,
-              "waiting_reduces" : 0
-            },
-            "jobtracker" : {
-              "reserved_reduce_slots" : 0,
-              "running_maps" : 0,
-              "jobs_running" : 0,
-              "running_reduces" : 0,
-              "occupied_map_slots" : 0,
-              "reserved_map_slots" : 0,
-              "jobs_completed" : 4,
-              "waiting_maps" : 0,
-              "jobs_submitted" : 4,
-              "occupied_reduce_slots" : 0,
-              "waiting_reduces" : 0
-            }
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/JOBTRACKER",
-              "HostRoles" : {
-                "configs" : {
-                  "mapred-site" : "version1",
-                  "global" : "version1",
-                  "core-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "JOBTRACKER",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/HIVE",
-      "ServiceInfo" : {
-        "cluster_name" : "cl1",
-        "service_name" : "HIVE"
-      },
-      "components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/HIVE/components/HIVE_METASTORE",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "STARTED",
-            "component_name" : "HIVE_METASTORE",
-            "service_name" : "HIVE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/HIVE_METASTORE",
-              "HostRoles" : {
-                "configs" : {
-                  "global" : "version1",
-                  "hive-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "HIVE_METASTORE",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/HIVE/components/HIVE_CLIENT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "INSTALLED",
-            "component_name" : "HIVE_CLIENT",
-            "service_name" : "HIVE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/HIVE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HIVE_CLIENT",
-                "host_name" : "ip-10-110-38-164.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/HIVE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HIVE_CLIENT",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/HIVE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HIVE_CLIENT",
-                "host_name" : "ip-10-110-79-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/HIVE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HIVE_CLIENT",
-                "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/HIVE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HIVE_CLIENT",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/HIVE/components/MYSQL_SERVER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "STARTED",
-            "component_name" : "MYSQL_SERVER",
-            "service_name" : "HIVE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/MYSQL_SERVER",
-              "HostRoles" : {
-                "configs" : {
-                  "global" : "version1",
-                  "hive-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "MYSQL_SERVER",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/HIVE/components/HIVE_SERVER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "STARTED",
-            "component_name" : "HIVE_SERVER",
-            "service_name" : "HIVE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/HIVE_SERVER",
-              "HostRoles" : {
-                "configs" : {
-                  "global" : "version1",
-                  "hive-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "HIVE_SERVER",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/WEBHCAT",
-      "ServiceInfo" : {
-        "cluster_name" : "cl1",
-        "service_name" : "WEBHCAT"
-      },
-      "components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/WEBHCAT/components/WEBHCAT_SERVER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "STARTED",
-            "component_name" : "WEBHCAT_SERVER",
-            "service_name" : "WEBHCAT"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/WEBHCAT_SERVER",
-              "HostRoles" : {
-                "configs" : {
-                  "global" : "version1",
-                  "webhcat-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "WEBHCAT_SERVER",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/PIG",
-      "ServiceInfo" : {
-        "cluster_name" : "cl1",
-        "service_name" : "PIG"
-      },
-      "components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/PIG/components/PIG",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "INSTALLED",
-            "component_name" : "PIG",
-            "service_name" : "PIG"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/PIG",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "PIG",
-                "host_name" : "ip-10-110-38-164.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/PIG",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "PIG",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/PIG",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "PIG",
-                "host_name" : "ip-10-110-79-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/PIG",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "PIG",
-                "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/PIG",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "PIG",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/GANGLIA",
-      "ServiceInfo" : {
-        "cluster_name" : "cl1",
-        "service_name" : "GANGLIA"
-      },
-      "components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/GANGLIA/components/GANGLIA_SERVER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "STARTED",
-            "component_name" : "GANGLIA_SERVER",
-            "service_name" : "GANGLIA"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/GANGLIA_SERVER",
-              "HostRoles" : {
-                "configs" : {
-                  "global" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "GANGLIA_SERVER",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/GANGLIA/components/GANGLIA_MONITOR",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "STARTED",
-            "component_name" : "GANGLIA_MONITOR",
-            "service_name" : "GANGLIA"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/GANGLIA_MONITOR",
-              "HostRoles" : {
-                "configs" : {
-                  "global" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "GANGLIA_MONITOR",
-                "host_name" : "ip-10-110-79-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/GANGLIA_MONITOR",
-              "HostRoles" : {
-                "configs" : {
-                  "global" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "GANGLIA_MONITOR",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/GANGLIA_MONITOR",
-              "HostRoles" : {
-                "configs" : {
-                  "global" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "GANGLIA_MONITOR",
-                "host_name" : "ip-10-110-38-164.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/GANGLIA_MONITOR",
-              "HostRoles" : {
-                "configs" : {
-                  "global" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "GANGLIA_MONITOR",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/GANGLIA_MONITOR",
-              "HostRoles" : {
-                "configs" : {
-                  "global" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "GANGLIA_MONITOR",
-                "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-              }
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/OOZIE",
-      "ServiceInfo" : {
-        "cluster_name" : "cl1",
-        "service_name" : "OOZIE"
-      },
-      "components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/OOZIE/components/OOZIE_SERVER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "STARTED",
-            "component_name" : "OOZIE_SERVER",
-            "service_name" : "OOZIE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/OOZIE_SERVER",
-              "HostRoles" : {
-                "configs" : {
-                  "global" : "version1",
-                  "oozie-site" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "OOZIE_SERVER",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/OOZIE/components/OOZIE_CLIENT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "INSTALLED",
-            "component_name" : "OOZIE_CLIENT",
-            "service_name" : "OOZIE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/OOZIE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "OOZIE_CLIENT",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/OOZIE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "OOZIE_CLIENT",
-                "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/OOZIE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "OOZIE_CLIENT",
-                "host_name" : "ip-10-110-38-164.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/OOZIE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "OOZIE_CLIENT",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/OOZIE_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "OOZIE_CLIENT",
-                "host_name" : "ip-10-110-79-42.ec2.internal"
-              }
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/HCATALOG",
-      "ServiceInfo" : {
-        "cluster_name" : "cl1",
-        "service_name" : "HCATALOG"
-      },
-      "components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/HCATALOG/components/HCAT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "INSTALLED",
-            "component_name" : "HCAT",
-            "service_name" : "HCATALOG"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/HCAT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HCAT",
-                "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/HCAT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HCAT",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/HCAT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HCAT",
-                "host_name" : "ip-10-110-79-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/HCAT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HCAT",
-                "host_name" : "ip-10-110-38-164.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/HCAT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HCAT",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/ZOOKEEPER",
-      "ServiceInfo" : {
-        "cluster_name" : "cl1",
-        "service_name" : "ZOOKEEPER"
-      },
-      "components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/ZOOKEEPER/components/ZOOKEEPER_SERVER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "STARTED",
-            "component_name" : "ZOOKEEPER_SERVER",
-            "service_name" : "ZOOKEEPER"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/ZOOKEEPER_SERVER",
-              "HostRoles" : {
-                "configs" : {
-                  "global" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "ZOOKEEPER_SERVER",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/ZOOKEEPER_SERVER",
-              "HostRoles" : {
-                "configs" : {
-                  "global" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "ZOOKEEPER_SERVER",
-                "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/ZOOKEEPER_SERVER",
-              "HostRoles" : {
-                "configs" : {
-                  "global" : "version1"
-                },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "ZOOKEEPER_SERVER",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/services/ZOOKEEPER/components/ZOOKEEPER_CLIENT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "cl1",
-            "desired_configs" : { },
-            "state" : "INSTALLED",
-            "component_name" : "ZOOKEEPER_CLIENT",
-            "service_name" : "ZOOKEEPER"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/ZOOKEEPER_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "ZOOKEEPER_CLIENT",
-                "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/ZOOKEEPER_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "ZOOKEEPER_CLIENT",
-                "host_name" : "ip-10-191-202-42.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/ZOOKEEPER_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "ZOOKEEPER_CLIENT",
-                "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/ZOOKEEPER_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "ZOOKEEPER_CLIENT",
-                "host_name" : "ip-10-110-38-164.ec2.internal"
-              }
-            },
-            {
-              "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/ZOOKEEPER_CLIENT",
-              "HostRoles" : {
-                "configs" : { },
-                "cluster_name" : "cl1",
-                "desired_configs" : { },
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "ZOOKEEPER_CLIENT",
-                "host_name" : "ip-10-110-79-42.ec2.internal"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/dashboard/services_multi_hosts.json b/branch-1.2/ambari-web/app/assets/data/dashboard/services_multi_hosts.json
deleted file mode 100644
index f2005f7..0000000
--- a/branch-1.2/ambari-web/app/assets/data/dashboard/services_multi_hosts.json
+++ /dev/null
@@ -1,2018 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/services?fields=components/host_components/*",
-  "items" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/services/MAPREDUCE",
-      "ServiceInfo" : {
-        "cluster_name" : "mycluster",
-        "service_name" : "MAPREDUCE"
-      },
-      "components" : [
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/MAPREDUCE/components/MAPREDUCE_CLIENT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "MAPREDUCE_CLIENT",
-            "service_name" : "MAPREDUCE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-196-102-80.ec2.internal/host_components/MAPREDUCE_CLIENT",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "MAPREDUCE_CLIENT",
-                "host_name" : "ip-10-196-102-80.ec2.internal"
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/MAPREDUCE/components/MAPREDUCE_CLIENT",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "MAPREDUCE_CLIENT",
-                    "service_name" : "MAPREDUCE"
-                  }
-                }
-              ]
-            }
-          ]
-        },
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/MAPREDUCE/components/JOBTRACKER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "JOBTRACKER",
-            "service_name" : "MAPREDUCE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-204-102-239.ec2.internal/host_components/JOBTRACKER",
-              "HostRoles" : {
-                "configs" : "{\"mapred-site\":\"version1\",\"global\":\"version1\",\"core-site\":\"version1\"}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "JOBTRACKER",
-                "host_name" : "ip-10-204-102-239.ec2.internal"
-              },
-              "metrics" : {
-                "boottime" : 1.352944348E9,
-                "process" : {
-                  "proc_total" : 233.130555556,
-                  "proc_run" : 1.67777777778
-                },
-                "rpc" : {
-                  "rpcAuthorizationSuccesses" : 0.0,
-                  "SentBytes" : 697.9075,
-                  "rpcAuthorizationFailures" : 0.0,
-                  "ReceivedBytes" : 2185.0375,
-                  "NumOpenConnections" : 0.0,
-                  "callQueueLen" : 0.0,
-                  "rpcAuthenticationSuccesses" : 0.0,
-                  "RpcQueueTime_num_ops" : 6.5225,
-                  "RpcProcessingTime_num_ops" : 6.5225,
-                  "RpcProcessingTime_avg_time" : 0.0531117806688,
-                  "rpcAuthenticationFailures" : 0.0,
-                  "RpcQueueTime_avg_time" : 0.05922430332
-                },
-                "ugi" : {
-                  "loginFailure_num_ops" : 0.0,
-                  "loginSuccess_num_ops" : 0.0,
-                  "loginSuccess_avg_time" : 0.0,
-                  "loginFailure_avg_time" : 0.0
-                },
-                "mapred" : {
-                  "Queue" : {
-                    "maps_killed" : 0.0,
-                    "reduces_killed" : 0.0,
-                    "jobs_failed" : 0.0,
-                    "reduces_completed" : 0.0,
-                    "running_0" : 0.0,
-                    "jobs_completed" : 0.0,
-                    "reserved_map_slots" : 0.0,
-                    "reduces_failed" : 0.0,
-                    "waiting_maps" : 0.0,
-                    "running_1440" : 0.0,
-                    "maps_completed" : 0.0,
-                    "jobs_preparing" : 0.0,
-                    "jobs_submitted" : 0.0,
-                    "reserved_reduce_slots" : 0.0,
-                    "jobs_running" : 0.0,
-                    "running_300" : 0.0,
-                    "maps_launched" : 0.0,
-                    "reduces_launched" : 0.0,
-                    "running_60" : 0.0,
-                    "waiting_reduces" : 0.0,
-                    "jobs_killed" : 0.0,
-                    "maps_failed" : 0.0
-                  },
-                  "jobtracker" : {
-                    "blacklisted_maps" : 0.0,
-                    "running_maps" : 0.0,
-                    "jobs_failed" : 0.0,
-                    "waiting_maps" : 0.0,
-                    "maps_completed" : 0.0,
-                    "trackers" : 2.0,
-                    "jobs_submitted" : 0.0,
-                    "map_slots" : 8.0,
-                    "reserved_reduce_slots" : 0.0,
-                    "trackers_graylisted" : 0.0,
-                    "heartbeats" : 6.5225,
-                    "jobs_running" : 0.0,
-                    "blacklisted_reduces" : 0.0,
-                    "maps_launched" : 0.0,
-                    "occupied_map_slots" : 0.0,
-                    "reduces_launched" : 0.0,
-                    "maps_failed" : 0.0,
-                    "jobs_killed" : 0.0,
-                    "maps_killed" : 0.0,
-                    "reduce_slots" : 4.0,
-                    "reduces_killed" : 0.0,
-                    "reduces_completed" : 0.0,
-                    "reduces_failed" : 0.0,
-                    "reserved_map_slots" : 0.0,
-                    "jobs_completed" : 0.0,
-                    "trackers_decommissioned" : 0.0,
-                    "trackers_blacklisted" : 0.0,
-                    "jobs_preparing" : 0.0,
-                    "running_reduces" : 0.0,
-                    "occupied_reduce_slots" : 0.0,
-                    "waiting_reduces" : 0.0
-                  }
-                },
-                "disk" : {
-                  "disk_free" : 845.332,
-                  "part_max_used" : 48.3575
-                },
-                "cpu" : {
-                  "cpu_speed" : 2660.0,
-                  "cpu_num" : 2.0,
-                  "cpu_wio" : 0.0958333333333,
-                  "cpu_idle" : 99.1786111111,
-                  "cpu_nice" : 0.0,
-                  "cpu_aidle" : 0.0,
-                  "cpu_system" : 0.425277777778,
-                  "cpu_user" : 0.321388888889
-                },
-                "rpcdetailed" : {
-                  "getStagingAreaDir_num_ops" : 0.0,
-                  "getJobProfile_num_ops" : 0.0,
-                  "getQueueAdmins_num_ops" : 0.0,
-                  "getNewJobId_num_ops" : 0.0,
-                  "submitJob_avg_time" : 136.0,
-                  "getJobCounters_num_ops" : 0.0,
-                  "getTaskCompletionEvents_avg_time" : 0.0,
-                  "getJobStatus_num_ops" : 0.0,
-                  "getJobCounters_avg_time" : 0.0,
-                  "getQueueAdmins_avg_time" : 0.0,
-                  "heartbeat_num_ops" : 6.5225,
-                  "getProtocolVersion_avg_time" : 0.0,
-                  "getBuildVersion_avg_time" : 0.0,
-                  "getReduceTaskReports_avg_time" : 0.0,
-                  "getBuildVersion_num_ops" : 0.0,
-                  "getNewJobId_avg_time" : 0.0,
-                  "getSystemDir_num_ops" : 0.0,
-                  "getReduceTaskReports_num_ops" : 0.0,
-                  "getTaskCompletionEvents_num_ops" : 0.0,
-                  "getJobProfile_avg_time" : 0.0,
-                  "submitJob_num_ops" : 0.0,
-                  "getStagingAreaDir_avg_time" : 1.0,
-                  "getProtocolVersion_num_ops" : 0.0,
-                  "getSystemDir_avg_time" : 0.0,
-                  "getMapTaskReports_num_ops" : 0.0,
-                  "getMapTaskReports_avg_time" : 1.0,
-                  "heartbeat_avg_time" : 0.0531117806688,
-                  "getJobStatus_avg_time" : 0.0
-                },
-                "load" : {
-                  "load_fifteen" : 0.0,
-                  "load_one" : 0.0,
-                  "load_five" : 0.0
-                },
-                "jvm" : {
-                  "memHeapCommittedM" : 185.1875,
-                  "logFatal" : 0.0,
-                  "threadsBlocked" : 0.0,
-                  "threadsWaiting" : 19.0,
-                  "gcCount" : 0.0,
-                  "logWarn" : 0.0,
-                  "logError" : 0.0,
-                  "memNonHeapCommittedM" : 23.1875,
-                  "gcTimeMillis" : 0.0,
-                  "memNonHeapUsedM" : 22.0414801806,
-                  "logInfo" : 0.0,
-                  "threadsNew" : 0.0,
-                  "memHeapUsedM" : 46.4608707,
-                  "threadsTerminated" : 0.0,
-                  "threadsTimedWaiting" : 9.0,
-                  "threadsRunnable" : 6.0
-                },
-                "memory" : {
-                  "mem_total" : 7646152.0,
-                  "swap_free" : 0.0,
-                  "mem_buffers" : 59373.3777778,
-                  "mem_shared" : 0.0,
-                  "mem_free" : 5225939.86667,
-                  "mem_cached" : 1506893.46667,
-                  "swap_total" : 0.0
-                },
-                "network" : {
-                  "pkts_out" : 24.3603333333,
-                  "bytes_in" : 4213.70038889,
-                  "bytes_out" : 4898.53533333,
-                  "pkts_in" : 20.0795555556
-                }
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/MAPREDUCE/components/JOBTRACKER",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "JOBTRACKER",
-                    "service_name" : "MAPREDUCE"
-                  }
-                }
-              ]
-            }
-          ]
-        },
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/MAPREDUCE/components/TASKTRACKER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "TASKTRACKER",
-            "service_name" : "MAPREDUCE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-196-102-80.ec2.internal/host_components/TASKTRACKER",
-              "HostRoles" : {
-                "configs" : "{\"mapred-site\":\"version1\",\"global\":\"version1\",\"core-site\":\"version1\"}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "TASKTRACKER",
-                "host_name" : "ip-10-196-102-80.ec2.internal"
-              },
-              "metrics" : {
-                "boottime" : 1.352944347E9,
-                "process" : {
-                  "proc_total" : 263.683333333,
-                  "proc_run" : 0.813888888889
-                },
-                "rpc" : {
-                  "rpcAuthorizationSuccesses" : 0.0166666666667,
-                  "SentBytes" : 14.6277777778,
-                  "rpcAuthorizationFailures" : 0.0,
-                  "ReceivedBytes" : 54.325,
-                  "NumOpenConnections" : 0.0,
-                  "callQueueLen" : 0.0,
-                  "rpcAuthenticationSuccesses" : 0.0,
-                  "RpcQueueTime_num_ops" : 0.122222222222,
-                  "RpcProcessingTime_num_ops" : 0.122222222222,
-                  "RpcProcessingTime_avg_time" : 0.544444444444,
-                  "rpcAuthenticationFailures" : 0.0,
-                  "RpcQueueTime_avg_time" : 2.51111111111
-                },
-                "ugi" : {
-                  "loginFailure_num_ops" : 0.0,
-                  "loginSuccess_num_ops" : 0.0,
-                  "loginSuccess_avg_time" : 0.0,
-                  "loginFailure_avg_time" : 0.0
-                },
-                "mapred" : {
-                  "shuffleOutput" : {
-                    "shuffle_handler_busy_percent" : 0.0
-                  },
-                  "tasktracker" : {
-                    "reduces_running" : 0.0,
-                    "maps_running" : 0.0,
-                    "reduceTaskSlots" : 2.0,
-                    "mapTaskSlots" : 4.0
-                  }
-                },
-                "disk" : {
-                  "disk_free" : 845.075,
-                  "part_max_used" : 52.6
-                },
-                "cpu" : {
-                  "cpu_speed" : 2659.0,
-                  "cpu_num" : 2.0,
-                  "cpu_wio" : 0.123888888889,
-                  "cpu_idle" : 99.0469444444,
-                  "cpu_nice" : 0.0,
-                  "cpu_aidle" : 0.0,
-                  "cpu_system" : 0.401944444444,
-                  "cpu_user" : 0.4
-                },
-                "load" : {
-                  "load_fifteen" : 0.0466666666667,
-                  "load_one" : 6.11111111111E-4,
-                  "load_five" : 6.11111111111E-4
-                },
-                "jvm" : {
-                  "memHeapCommittedM" : 760.003819444,
-                  "logFatal" : 0.0,
-                  "threadsBlocked" : 0.0,
-                  "threadsWaiting" : 38.2944444444,
-                  "gcCount" : 2.3860929425E7,
-                  "logWarn" : 0.0,
-                  "logError" : 0.0,
-                  "memNonHeapCommittedM" : 32.7367406333,
-                  "gcTimeMillis" : 7.63549738133E7,
-                  "memNonHeapUsedM" : 21.0635269694,
-                  "logInfo" : 7.158278827E7,
-                  "threadsNew" : 0.0,
-                  "memHeapUsedM" : 83.2495438167,
-                  "threadsTerminated" : 0.0,
-                  "maxMemoryM" : 1004.0,
-                  "threadsTimedWaiting" : 11.1388888889,
-                  "threadsRunnable" : 14.2777777778
-                },
-                "memory" : {
-                  "mem_total" : 7646152.0,
-                  "swap_free" : 0.0,
-                  "mem_buffers" : 62720.2888889,
-                  "mem_shared" : 0.0,
-                  "mem_free" : 4978855.83333,
-                  "mem_cached" : 1756252.53333,
-                  "swap_total" : 0.0
-                },
-                "network" : {
-                  "pkts_out" : 105.755833333,
-                  "bytes_in" : 1895.95808333,
-                  "bytes_out" : 21296.58625,
-                  "pkts_in" : 11.2986666667
-                }
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/MAPREDUCE/components/TASKTRACKER",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "TASKTRACKER",
-                    "service_name" : "MAPREDUCE"
-                  }
-                }
-              ]
-            },
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-118-149-18.ec2.internal/host_components/TASKTRACKER",
-              "HostRoles" : {
-                "configs" : "{\"mapred-site\":\"version1\",\"global\":\"version1\",\"core-site\":\"version1\"}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "TASKTRACKER",
-                "host_name" : "ip-10-118-149-18.ec2.internal"
-              },
-              "metrics" : {
-                "boottime" : 1.352944346E9,
-                "process" : {
-                  "proc_total" : 264.063888889,
-                  "proc_run" : 1.16944444444
-                },
-                "rpc" : {
-                  "rpcAuthorizationSuccesses" : 1.19304647083E7,
-                  "SentBytes" : 1.19304639222E7,
-                  "rpcAuthorizationFailures" : 0.0,
-                  "ReceivedBytes" : 1.19304635583E7,
-                  "NumOpenConnections" : 0.0,
-                  "callQueueLen" : 0.0,
-                  "rpcAuthenticationSuccesses" : 0.0,
-                  "RpcQueueTime_num_ops" : 1.19304647056E7,
-                  "RpcProcessingTime_num_ops" : 1.19304647056E7,
-                  "RpcProcessingTime_avg_time" : 0.344444444444,
-                  "rpcAuthenticationFailures" : 0.0,
-                  "RpcQueueTime_avg_time" : 3.78888888889
-                },
-                "ugi" : {
-                  "loginFailure_num_ops" : 0.0,
-                  "loginSuccess_num_ops" : 0.0,
-                  "loginSuccess_avg_time" : 0.0,
-                  "loginFailure_avg_time" : 0.0
-                },
-                "mapred" : {
-                  "shuffleOutput" : {
-                    "shuffle_handler_busy_percent" : 0.0
-                  },
-                  "tasktracker" : {
-                    "reduces_running" : 0.0,
-                    "maps_running" : 0.0,
-                    "reduceTaskSlots" : 2.0,
-                    "mapTaskSlots" : 4.0
-                  }
-                },
-                "disk" : {
-                  "disk_free" : 845.599,
-                  "part_max_used" : 43.9
-                },
-                "cpu" : {
-                  "cpu_speed" : 2266.0,
-                  "cpu_num" : 2.0,
-                  "cpu_wio" : 0.0,
-                  "cpu_idle" : 98.6938888889,
-                  "cpu_nice" : 0.0,
-                  "cpu_aidle" : 0.0,
-                  "cpu_system" : 0.531111111111,
-                  "cpu_user" : 0.775
-                },
-                "load" : {
-                  "load_fifteen" : 0.00313888888889,
-                  "load_one" : 0.0282777777778,
-                  "load_five" : 0.0163055555556
-                },
-                "jvm" : {
-                  "memHeapCommittedM" : 174.446875,
-                  "logFatal" : 0.0,
-                  "threadsBlocked" : 0.0,
-                  "threadsWaiting" : 14.4361111111,
-                  "gcCount" : 1.19304647115E8,
-                  "logWarn" : 0.0,
-                  "logError" : 0.0,
-                  "memNonHeapCommittedM" : 24.3248698333,
-                  "gcTimeMillis" : 1.43165576518E8,
-                  "memNonHeapUsedM" : 19.4993658028,
-                  "logInfo" : 2.38609294222E7,
-                  "threadsNew" : 0.0,
-                  "memHeapUsedM" : 18.087863945,
-                  "threadsTerminated" : 0.0,
-                  "maxMemoryM" : 1004.0,
-                  "threadsTimedWaiting" : 7.46388888889,
-                  "threadsRunnable" : 7.43611111111
-                },
-                "memory" : {
-                  "mem_total" : 7646152.0,
-                  "swap_free" : 0.0,
-                  "mem_buffers" : 52429.0222222,
-                  "mem_shared" : 0.0,
-                  "mem_free" : 5610725.88889,
-                  "mem_cached" : 1247511.4,
-                  "swap_total" : 0.0
-                },
-                "network" : {
-                  "pkts_out" : 116.179833333,
-                  "bytes_in" : 1897.15633333,
-                  "bytes_out" : 23871.1300833,
-                  "pkts_in" : 11.2918333333
-                }
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/MAPREDUCE/components/TASKTRACKER",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "TASKTRACKER",
-                    "service_name" : "MAPREDUCE"
-                  }
-                }
-              ]
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/services/HDFS",
-      "ServiceInfo" : {
-        "cluster_name" : "mycluster",
-        "service_name" : "HDFS"
-      },
-      "components" : [
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/HDFS/components/HDFS_CLIENT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "HDFS_CLIENT",
-            "service_name" : "HDFS"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-196-102-80.ec2.internal/host_components/HDFS_CLIENT",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HDFS_CLIENT",
-                "host_name" : "ip-10-196-102-80.ec2.internal"
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/HDFS/components/HDFS_CLIENT",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "HDFS_CLIENT",
-                    "service_name" : "HDFS"
-                  }
-                }
-              ]
-            }
-          ]
-        },
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/HDFS/components/DATANODE",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "DATANODE",
-            "service_name" : "HDFS"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-118-149-18.ec2.internal/host_components/DATANODE",
-              "HostRoles" : {
-                "configs" : "{\"global\":\"version1\",\"hdfs-site\":\"version1\",\"core-site\":\"version1\"}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "DATANODE",
-                "host_name" : "ip-10-118-149-18.ec2.internal"
-              },
-              "metrics" : {
-                "boottime" : 1.352944346E9,
-                "process" : {
-                  "proc_total" : 264.063888889,
-                  "proc_run" : 1.16944444444
-                },
-                "rpc" : {
-                  "rpcAuthorizationSuccesses" : 1.19304647083E7,
-                  "SentBytes" : 1.19304639222E7,
-                  "rpcAuthorizationFailures" : 0.0,
-                  "ReceivedBytes" : 1.19304635583E7,
-                  "NumOpenConnections" : 0.0,
-                  "callQueueLen" : 0.0,
-                  "rpcAuthenticationSuccesses" : 0.0,
-                  "RpcQueueTime_num_ops" : 1.19304647056E7,
-                  "RpcProcessingTime_num_ops" : 1.19304647056E7,
-                  "RpcProcessingTime_avg_time" : 0.344444444444,
-                  "rpcAuthenticationFailures" : 0.0,
-                  "RpcQueueTime_avg_time" : 3.78888888889
-                },
-                "ugi" : {
-                  "loginFailure_num_ops" : 0.0,
-                  "loginSuccess_num_ops" : 0.0,
-                  "loginSuccess_avg_time" : 0.0,
-                  "loginFailure_avg_time" : 0.0
-                },
-                "dfs" : {
-                  "datanode" : {
-                    "readBlockOp_num_ops" : 0.0,
-                    "writeBlockOp_avg_time" : 4.42857142857,
-                    "block_verification_failures" : 0.0,
-                    "blockChecksumOp_num_ops" : 0.0,
-                    "blocks_read" : 0.0,
-                    "copyBlockOp_avg_time" : 0.0,
-                    "blocks_written" : 0.0,
-                    "heartBeats_num_ops" : 0.3325,
-                    "writes_from_remote_client" : 0.0,
-                    "replaceBlockOp_num_ops" : 0.0,
-                    "blocks_replicated" : 0.0,
-                    "blockReports_avg_time" : 1.0,
-                    "writeBlockOp_num_ops" : 0.0,
-                    "heartBeats_avg_time" : 2.44421296296,
-                    "bytes_read" : 0.0,
-                    "copyBlockOp_num_ops" : 0.0,
-                    "blockReports_num_ops" : 0.0,
-                    "bytes_written" : 0.0,
-                    "replaceBlockOp_avg_time" : 0.0,
-                    "reads_from_remote_client" : 0.0,
-                    "readBlockOp_avg_time" : 3.0,
-                    "reads_from_local_client" : 0.0,
-                    "blocks_verified" : 0.00555555555556,
-                    "writes_from_local_client" : 0.0,
-                    "blocks_get_local_pathinfo" : 0.0,
-                    "blockChecksumOp_avg_time" : 0.0,
-                    "blocks_removed" : 0.0
-                  }
-                },
-                "disk" : {
-                  "disk_free" : 845.599,
-                  "part_max_used" : 43.9
-                },
-                "cpu" : {
-                  "cpu_speed" : 2266.0,
-                  "cpu_num" : 2.0,
-                  "cpu_wio" : 0.0,
-                  "cpu_idle" : 98.6938888889,
-                  "cpu_nice" : 0.0,
-                  "cpu_aidle" : 0.0,
-                  "cpu_system" : 0.531111111111,
-                  "cpu_user" : 0.775
-                },
-                "load" : {
-                  "load_fifteen" : 0.00313888888889,
-                  "load_one" : 0.0282777777778,
-                  "load_five" : 0.0163055555556
-                },
-                "jvm" : {
-                  "memHeapCommittedM" : 174.446875,
-                  "logFatal" : 0.0,
-                  "threadsBlocked" : 0.0,
-                  "threadsWaiting" : 14.4361111111,
-                  "gcCount" : 1.19304647115E8,
-                  "logWarn" : 0.0,
-                  "logError" : 0.0,
-                  "memNonHeapCommittedM" : 24.3248698333,
-                  "gcTimeMillis" : 1.43165576518E8,
-                  "memNonHeapUsedM" : 19.4993658028,
-                  "logInfo" : 2.38609294222E7,
-                  "threadsNew" : 0.0,
-                  "memHeapUsedM" : 18.087863945,
-                  "threadsTerminated" : 0.0,
-                  "maxMemoryM" : 1004.0,
-                  "threadsTimedWaiting" : 7.46388888889,
-                  "threadsRunnable" : 7.43611111111
-                },
-                "memory" : {
-                  "mem_total" : 7646152.0,
-                  "swap_free" : 0.0,
-                  "mem_buffers" : 52429.0222222,
-                  "mem_shared" : 0.0,
-                  "mem_free" : 5610725.88889,
-                  "mem_cached" : 1247511.4,
-                  "swap_total" : 0.0
-                },
-                "network" : {
-                  "pkts_out" : 116.179833333,
-                  "bytes_in" : 1897.15633333,
-                  "bytes_out" : 23871.1300833,
-                  "pkts_in" : 11.2918333333
-                }
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/HDFS/components/DATANODE",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "DATANODE",
-                    "service_name" : "HDFS"
-                  }
-                }
-              ]
-            },
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-196-102-80.ec2.internal/host_components/DATANODE",
-              "HostRoles" : {
-                "configs" : "{\"global\":\"version1\",\"hdfs-site\":\"version1\",\"core-site\":\"version1\"}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "DATANODE",
-                "host_name" : "ip-10-196-102-80.ec2.internal"
-              },
-              "metrics" : {
-                "boottime" : 1.352944347E9,
-                "process" : {
-                  "proc_total" : 263.683333333,
-                  "proc_run" : 0.813888888889
-                },
-                "rpc" : {
-                  "rpcAuthorizationSuccesses" : 0.0166666666667,
-                  "SentBytes" : 14.6277777778,
-                  "rpcAuthorizationFailures" : 0.0,
-                  "ReceivedBytes" : 54.325,
-                  "NumOpenConnections" : 0.0,
-                  "callQueueLen" : 0.0,
-                  "rpcAuthenticationSuccesses" : 0.0,
-                  "RpcQueueTime_num_ops" : 0.122222222222,
-                  "RpcProcessingTime_num_ops" : 0.122222222222,
-                  "RpcProcessingTime_avg_time" : 0.544444444444,
-                  "rpcAuthenticationFailures" : 0.0,
-                  "RpcQueueTime_avg_time" : 2.51111111111
-                },
-                "ugi" : {
-                  "loginFailure_num_ops" : 0.0,
-                  "loginSuccess_num_ops" : 0.0,
-                  "loginSuccess_avg_time" : 0.0,
-                  "loginFailure_avg_time" : 0.0
-                },
-                "dfs" : {
-                  "datanode" : {
-                    "readBlockOp_num_ops" : 0.0,
-                    "writeBlockOp_avg_time" : 10.9298245614,
-                    "block_verification_failures" : 0.0,
-                    "blockChecksumOp_num_ops" : 0.0,
-                    "blocks_read" : 0.0,
-                    "copyBlockOp_avg_time" : 0.0,
-                    "blocks_written" : 0.0,
-                    "heartBeats_num_ops" : 0.335833333333,
-                    "writes_from_remote_client" : 0.0,
-                    "replaceBlockOp_num_ops" : 0.0,
-                    "blocks_replicated" : 0.0,
-                    "blockReports_avg_time" : 9.0,
-                    "writeBlockOp_num_ops" : 0.0,
-                    "heartBeats_avg_time" : 2.92106481481,
-                    "bytes_read" : 0.0,
-                    "copyBlockOp_num_ops" : 0.0,
-                    "blockReports_num_ops" : 0.0,
-                    "bytes_written" : 0.0,
-                    "replaceBlockOp_avg_time" : 0.0,
-                    "reads_from_remote_client" : 0.0,
-                    "readBlockOp_avg_time" : 6.5,
-                    "reads_from_local_client" : 0.0,
-                    "blocks_verified" : 0.0125,
-                    "writes_from_local_client" : 0.0,
-                    "blocks_get_local_pathinfo" : 0.0,
-                    "blockChecksumOp_avg_time" : 0.0,
-                    "blocks_removed" : 0.0
-                  }
-                },
-                "disk" : {
-                  "disk_free" : 845.075,
-                  "part_max_used" : 52.6
-                },
-                "cpu" : {
-                  "cpu_speed" : 2659.0,
-                  "cpu_num" : 2.0,
-                  "cpu_wio" : 0.123888888889,
-                  "cpu_idle" : 99.0469444444,
-                  "cpu_nice" : 0.0,
-                  "cpu_aidle" : 0.0,
-                  "cpu_system" : 0.401944444444,
-                  "cpu_user" : 0.4
-                },
-                "load" : {
-                  "load_fifteen" : 0.0466666666667,
-                  "load_one" : 6.11111111111E-4,
-                  "load_five" : 6.11111111111E-4
-                },
-                "jvm" : {
-                  "memHeapCommittedM" : 760.003819444,
-                  "logFatal" : 0.0,
-                  "threadsBlocked" : 0.0,
-                  "threadsWaiting" : 38.2944444444,
-                  "gcCount" : 2.3860929425E7,
-                  "logWarn" : 0.0,
-                  "logError" : 0.0,
-                  "memNonHeapCommittedM" : 32.7367406333,
-                  "gcTimeMillis" : 7.63549738133E7,
-                  "memNonHeapUsedM" : 21.0635269694,
-                  "logInfo" : 7.158278827E7,
-                  "threadsNew" : 0.0,
-                  "memHeapUsedM" : 83.2495438167,
-                  "threadsTerminated" : 0.0,
-                  "maxMemoryM" : 1004.0,
-                  "threadsTimedWaiting" : 11.1388888889,
-                  "threadsRunnable" : 14.2777777778
-                },
-                "memory" : {
-                  "mem_total" : 7646152.0,
-                  "swap_free" : 0.0,
-                  "mem_buffers" : 62720.2888889,
-                  "mem_shared" : 0.0,
-                  "mem_free" : 4978855.83333,
-                  "mem_cached" : 1756252.53333,
-                  "swap_total" : 0.0
-                },
-                "network" : {
-                  "pkts_out" : 105.755833333,
-                  "bytes_in" : 1895.95808333,
-                  "bytes_out" : 21296.58625,
-                  "pkts_in" : 11.2986666667
-                }
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/HDFS/components/DATANODE",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "DATANODE",
-                    "service_name" : "HDFS"
-                  }
-                }
-              ]
-            }
-          ]
-        },
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/HDFS/components/NAMENODE",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "NAMENODE",
-            "service_name" : "HDFS"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-83-47-139.ec2.internal/host_components/NAMENODE",
-              "HostRoles" : {
-                "configs" : "{\"global\":\"version1\",\"hdfs-site\":\"version1\",\"core-site\":\"version1\"}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "NAMENODE",
-                "host_name" : "ip-10-83-47-139.ec2.internal"
-              },
-              "metrics" : {
-                "boottime" : 1.352944348E9,
-                "process" : {
-                  "proc_total" : 293.494444444,
-                  "proc_run" : 2.86944444444
-                },
-                "rpc" : {
-                  "rpcAuthorizationSuccesses" : 0.0861111111111,
-                  "SentBytes" : 106.391481481,
-                  "rpcAuthorizationFailures" : 0.0,
-                  "ReceivedBytes" : 229.217407407,
-                  "NumOpenConnections" : 0.0,
-                  "callQueueLen" : 0.0,
-                  "rpcAuthenticationSuccesses" : 0.0,
-                  "RpcQueueTime_num_ops" : 0.748703703704,
-                  "RpcProcessingTime_num_ops" : 0.748703703704,
-                  "RpcProcessingTime_avg_time" : 0.110499338624,
-                  "rpcAuthenticationFailures" : 0.0,
-                  "RpcQueueTime_avg_time" : 0.323186728395
-                },
-                "ugi" : {
-                  "loginFailure_num_ops" : 0.0,
-                  "loginSuccess_num_ops" : 0.0,
-                  "loginSuccess_avg_time" : 0.0,
-                  "loginFailure_avg_time" : 0.0
-                },
-                "dfs" : {
-                  "namenode" : {
-                    "AddBlockOps" : 0.0,
-                    "fsImageLoadTime" : 1300.0,
-                    "FilesRenamed" : 0.0,
-                    "JournalTransactionsBatchedInSync" : 0.0,
-                    "FileInfoOps" : 0.0,
-                    "CreateFileOps" : 0.0,
-                    "GetListingOps" : 0.0166666666667,
-                    "Transactions_num_ops" : 0.0,
-                    "GetBlockLocations" : 0.0,
-                    "Syncs_avg_time" : 2.87394957983,
-                    "blockReport_num_ops" : 0.0,
-                    "Syncs_num_ops" : 0.0,
-                    "SafemodeTime" : 1317.0,
-                    "Transactions_avg_time" : 0.0337423312883,
-                    "FilesInGetListingOps" : 0.0,
-                    "FilesDeleted" : 0.0,
-                    "FilesAppended" : 0.0,
-                    "DeleteFileOps" : 0.0,
-                    "FilesCreated" : 0.0,
-                    "blockReport_avg_time" : 4.0
-                  },
-                  "FSNamesystem" : {
-                    "BlocksTotal" : 403.0,
-                    "ScheduledReplicationBlocks" : 0.0,
-                    "CapacityTotalGB" : 11.0,
-                    "CapacityUsedGB" : 0.0,
-                    "ExcessBlocks" : 0.0,
-                    "MissingBlocks" : 0.0,
-                    "PendingReplicationBlocks" : 0.0,
-                    "FilesTotal" : 324.0,
-                    "CapacityRemainingGB" : 6.0,
-                    "UnderReplicatedBlocks" : 215.0,
-                    "TotalLoad" : 2.0,
-                    "PendingDeletionBlocks" : 0.0,
-                    "CorruptBlocks" : 0.0,
-                    "BlockCapacity" : 2097152.0
-                  }
-                },
-                "disk" : {
-                  "disk_total" : 893.765,
-                  "disk_free" : 845.308827778,
-                  "part_max_used" : 48.7186111111
-                },
-                "cpu" : {
-                  "cpu_speed" : 2266.0,
-                  "cpu_num" : 2.0,
-                  "cpu_wio" : 0.493888888889,
-                  "cpu_idle" : 84.9672222222,
-                  "cpu_nice" : 0.0,
-                  "cpu_aidle" : 0.0,
-                  "cpu_system" : 3.33388888889,
-                  "cpu_user" : 11.2113888889
-                },
-                "rpcdetailed" : {
-                  "addBlock_avg_time" : 0.465909090909,
-                  "rollFsImage_num_ops" : 0.0,
-                  "register_num_ops" : 0.0,
-                  "versionRequest_num_ops" : 0.0,
-                  "create_avg_time" : 5.04545454545,
-                  "fsync_num_ops" : 0.0,
-                  "getBlockLocations_num_ops" : 0.0,
-                  "addBlock_num_ops" : 0.0,
-                  "getListing_avg_time" : 1.69444444444,
-                  "getBlockLocations_avg_time" : 0.0,
-                  "renewLease_num_ops" : 0.0666666666667,
-                  "getFileInfo_num_ops" : 0.0,
-                  "register_avg_time" : 2.5,
-                  "setPermission_num_ops" : 0.0,
-                  "versionRequest_avg_time" : 0.5,
-                  "fsync_avg_time" : 0.0,
-                  "complete_num_ops" : 0.0,
-                  "setOwner_num_ops" : 0.0,
-                  "getProtocolVersion_num_ops" : 0.0,
-                  "setOwner_avg_time" : 15.5,
-                  "setReplication_num_ops" : 0.0,
-                  "blockReport_avg_time" : 4.0,
-                  "setPermission_avg_time" : 3.66666666667,
-                  "getListing_num_ops" : 0.0166666666667,
-                  "renewLease_avg_time" : 0.0430555555556,
-                  "sendHeartbeat_num_ops" : 0.662592592593,
-                  "blocksBeingWrittenReport_avg_time" : 0.5,
-                  "rename_num_ops" : 0.0,
-                  "mkdirs_avg_time" : 3.95,
-                  "delete_num_ops" : 0.0,
-                  "blockReport_num_ops" : 0.0,
-                  "create_num_ops" : 0.0,
-                  "getEditLogSize_num_ops" : 0.00277777777778,
-                  "rollEditLog_num_ops" : 0.0,
-                  "rollFsImage_avg_time" : 48.0,
-                  "mkdirs_num_ops" : 0.0,
-                  "delete_avg_time" : 4.0,
-                  "getFileInfo_avg_time" : 0.0,
-                  "rename_avg_time" : 3.0,
-                  "getProtocolVersion_avg_time" : 0.0,
-                  "rollEditLog_avg_time" : 83.0,
-                  "blockReceived_avg_time" : 0.205714285714,
-                  "getEditLogSize_avg_time" : 0.0,
-                  "sendHeartbeat_avg_time" : 0.0732804232804,
-                  "complete_avg_time" : 1.44943820225,
-                  "blockReceived_num_ops" : 0.0,
-                  "setSafeMode_avg_time" : 0.0,
-                  "blocksBeingWrittenReport_num_ops" : 0.0,
-                  "setSafeMode_num_ops" : 0.0,
-                  "setReplication_avg_time" : 1.5
-                },
-                "load" : {
-                  "load_fifteen" : 0.356833333333,
-                  "load_one" : 0.511388888889,
-                  "load_five" : 0.374166666667
-                },
-                "jvm" : {
-                  "memHeapCommittedM" : 960.0,
-                  "logFatal" : 0.0,
-                  "threadsBlocked" : 0.0,
-                  "threadsWaiting" : 14.0,
-                  "gcCount" : 0.0,
-                  "logWarn" : 0.0,
-                  "logError" : 0.0,
-                  "memNonHeapCommittedM" : 23.1875,
-                  "gcTimeMillis" : 0.0,
-                  "memNonHeapUsedM" : 20.7895987972,
-                  "logInfo" : 0.0,
-                  "threadsNew" : 0.0,
-                  "memHeapUsedM" : 384.695487139,
-                  "threadsTerminated" : 0.0,
-                  "threadsTimedWaiting" : 7.0,
-                  "threadsRunnable" : 6.0
-                },
-                "memory" : {
-                  "mem_total" : 7646152.0,
-                  "swap_free" : 0.0,
-                  "mem_buffers" : 63871.6111111,
-                  "mem_shared" : 0.0,
-                  "mem_free" : 4787186.06667,
-                  "mem_cached" : 1545617.4,
-                  "swap_total" : 0.0
-                },
-                "network" : {
-                  "pkts_out" : 29.3245833333,
-                  "bytes_in" : 45395.5750833,
-                  "bytes_out" : 15665.7432778,
-                  "pkts_in" : 234.953444444
-                }
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/HDFS/components/NAMENODE",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "NAMENODE",
-                    "service_name" : "HDFS"
-                  }
-                }
-              ]
-            }
-          ]
-        },
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/HDFS/components/SECONDARY_NAMENODE",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "SECONDARY_NAMENODE",
-            "service_name" : "HDFS"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-204-102-239.ec2.internal/host_components/SECONDARY_NAMENODE",
-              "HostRoles" : {
-                "configs" : "{\"global\":\"version1\",\"hdfs-site\":\"version1\",\"core-site\":\"version1\"}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "SECONDARY_NAMENODE",
-                "host_name" : "ip-10-204-102-239.ec2.internal"
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/HDFS/components/SECONDARY_NAMENODE",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "SECONDARY_NAMENODE",
-                    "service_name" : "HDFS"
-                  }
-                }
-              ]
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/services/SQOOP",
-      "ServiceInfo" : {
-        "cluster_name" : "mycluster",
-        "service_name" : "SQOOP"
-      },
-      "components" : [
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/SQOOP/components/SQOOP",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "SQOOP",
-            "service_name" : "SQOOP"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-196-102-80.ec2.internal/host_components/SQOOP",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "SQOOP",
-                "host_name" : "ip-10-196-102-80.ec2.internal"
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/SQOOP/components/SQOOP",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "SQOOP",
-                    "service_name" : "SQOOP"
-                  }
-                }
-              ]
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/services/OOZIE",
-      "ServiceInfo" : {
-        "cluster_name" : "mycluster",
-        "service_name" : "OOZIE"
-      },
-      "components" : [
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/OOZIE/components/OOZIE_CLIENT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "OOZIE_CLIENT",
-            "service_name" : "OOZIE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-196-102-80.ec2.internal/host_components/OOZIE_CLIENT",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "OOZIE_CLIENT",
-                "host_name" : "ip-10-196-102-80.ec2.internal"
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/OOZIE/components/OOZIE_CLIENT",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "OOZIE_CLIENT",
-                    "service_name" : "OOZIE"
-                  }
-                }
-              ]
-            }
-          ]
-        },
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/OOZIE/components/OOZIE_SERVER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "OOZIE_SERVER",
-            "service_name" : "OOZIE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-204-102-239.ec2.internal/host_components/OOZIE_SERVER",
-              "HostRoles" : {
-                "configs" : "{\"global\":\"version1\",\"oozie-site\":\"version1\",\"core-site\":\"version1\"}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "OOZIE_SERVER",
-                "host_name" : "ip-10-204-102-239.ec2.internal"
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/OOZIE/components/OOZIE_SERVER",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "OOZIE_SERVER",
-                    "service_name" : "OOZIE"
-                  }
-                }
-              ]
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/services/NAGIOS",
-      "ServiceInfo" : {
-        "cluster_name" : "mycluster",
-        "service_name" : "NAGIOS"
-      },
-      "components" : [
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/NAGIOS/components/NAGIOS_SERVER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "NAGIOS_SERVER",
-            "service_name" : "NAGIOS"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-83-47-139.ec2.internal/host_components/NAGIOS_SERVER",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "NAGIOS_SERVER",
-                "host_name" : "ip-10-83-47-139.ec2.internal"
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/NAGIOS/components/NAGIOS_SERVER",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "NAGIOS_SERVER",
-                    "service_name" : "NAGIOS"
-                  }
-                }
-              ]
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/services/HBASE",
-      "ServiceInfo" : {
-        "cluster_name" : "mycluster",
-        "service_name" : "HBASE"
-      },
-      "components" : [
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/HBASE/components/HBASE_CLIENT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "HBASE_CLIENT",
-            "service_name" : "HBASE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-196-102-80.ec2.internal/host_components/HBASE_CLIENT",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "HBASE_CLIENT",
-                "host_name" : "ip-10-196-102-80.ec2.internal"
-              },
-              "metrics" : {
-                "boottime" : 1.352944347E9,
-                "process" : {
-                  "proc_total" : 263.683333333,
-                  "proc_run" : 0.813888888889
-                },
-                "rpc" : {
-                  "rpcAuthorizationSuccesses" : 0.0166666666667,
-                  "rpcAuthorizationFailures" : 0.0,
-                  "SentBytes" : 14.6277777778,
-                  "ReceivedBytes" : 54.325,
-                  "NumOpenConnections" : 0.0,
-                  "callQueueLen" : 0.0,
-                  "RpcQueueTime_num_ops" : 0.122222222222,
-                  "rpcAuthenticationSuccesses" : 0.0,
-                  "RpcProcessingTime_num_ops" : 0.122222222222,
-                  "RpcProcessingTime_avg_time" : 0.544444444444,
-                  "rpcAuthenticationFailures" : 0.0,
-                  "RpcQueueTime_avg_time" : 2.51111111111
-                },
-                "ugi" : {
-                  "loginFailure_num_ops" : 0.0,
-                  "loginSuccess_num_ops" : 0.0,
-                  "loginSuccess_avg_time" : 0.0,
-                  "loginFailure_avg_time" : 0.0
-                },
-                "disk" : {
-                  "disk_free" : 845.075,
-                  "part_max_used" : 52.6
-                },
-                "cpu" : {
-                  "cpu_speed" : 2659.0,
-                  "cpu_num" : 2.0,
-                  "cpu_wio" : 0.123888888889,
-                  "cpu_idle" : 99.0469444444,
-                  "cpu_nice" : 0.0,
-                  "cpu_aidle" : 0.0,
-                  "cpu_system" : 0.401944444444,
-                  "cpu_user" : 0.4
-                },
-                "jvm" : {
-                  "memHeapCommittedM" : 760.003819444,
-                  "logFatal" : 0.0,
-                  "threadsBlocked" : 0.0,
-                  "threadsWaiting" : 38.2944444444,
-                  "gcCount" : 2.3860929425E7,
-                  "logError" : 0.0,
-                  "logWarn" : 0.0,
-                  "memNonHeapCommittedM" : 32.7367406333,
-                  "gcTimeMillis" : 7.63549738133E7,
-                  "memNonHeapUsedM" : 21.0635269694,
-                  "logInfo" : 7.158278827E7,
-                  "memHeapUsedM" : 83.2495438167,
-                  "threadsNew" : 0.0,
-                  "threadsTerminated" : 0.0,
-                  "threadsTimedWaiting" : 11.1388888889,
-                  "maxMemoryM" : 1004.0,
-                  "threadsRunnable" : 14.2777777778
-                },
-                "load" : {
-                  "load_fifteen" : 0.0466666666667,
-                  "load_one" : 6.11111111111E-4,
-                  "load_five" : 6.11111111111E-4
-                },
-                "memory" : {
-                  "mem_total" : 7646152.0,
-                  "swap_free" : 0.0,
-                  "mem_buffers" : 62720.2888889,
-                  "mem_shared" : 0.0,
-                  "mem_free" : 4978855.83333,
-                  "mem_cached" : 1756252.53333,
-                  "swap_total" : 0.0
-                },
-                "network" : {
-                  "pkts_out" : 105.755833333,
-                  "bytes_in" : 1895.95808333,
-                  "bytes_out" : 21296.58625,
-                  "pkts_in" : 11.2986666667
-                }
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/HBASE/components/HBASE_CLIENT",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "HBASE_CLIENT",
-                    "service_name" : "HBASE"
-                  }
-                }
-              ]
-            }
-          ]
-        },
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/HBASE/components/HBASE_MASTER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "HBASE_MASTER",
-            "service_name" : "HBASE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-83-47-139.ec2.internal/host_components/HBASE_MASTER",
-              "HostRoles" : {
-                "configs" : "{\"hbase-site\":\"version1\",\"global\":\"version1\",\"core-site\":\"version1\"}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "HBASE_MASTER",
-                "host_name" : "ip-10-83-47-139.ec2.internal"
-              },
-              "metrics" : {
-                "boottime" : 1.352944348E9,
-                "process" : {
-                  "proc_total" : 293.219444444,
-                  "proc_run" : 0.0972222222222
-                },
-                "hbase" : {
-                  "master" : {
-                    "splitSize_avg_time" : 0.0,
-                    "splitTime_avg_time" : 0.0,
-                    "splitTime_num_ops" : 0.0,
-                    "cluster_requests" : 0.0,
-                    "splitSize_num_ops" : 0.0
-                  }
-                },
-                "disk" : {
-                  "disk_total" : 893.765,
-                  "disk_free" : 845.308872222,
-                  "part_max_used" : 48.7180555556
-                },
-                "cpu" : {
-                  "cpu_speed" : 2266.0,
-                  "cpu_num" : 2.0,
-                  "cpu_wio" : 0.495833333333,
-                  "cpu_idle" : 84.5666666667,
-                  "cpu_nice" : 0.0,
-                  "cpu_aidle" : 0.0,
-                  "cpu_system" : 3.34,
-                  "cpu_user" : 11.5819444444
-                },
-                "jvm" : {
-                  "memHeapCommittedM" : 81.0625,
-                  "logFatal" : 0.0,
-                  "threadsBlocked" : 0.0,
-                  "threadsWaiting" : 42.0,
-                  "gcCount" : 18.9194444444,
-                  "logError" : 0.0,
-                  "logWarn" : 0.0,
-                  "memNonHeapCommittedM" : 36.679688,
-                  "gcTimeMillis" : 113.919444444,
-                  "memNonHeapUsedM" : 22.1864361111,
-                  "logInfo" : 0.0,
-                  "memHeapUsedM" : 10.7416655886,
-                  "threadsNew" : 0.0,
-                  "threadsTerminated" : 0.0,
-                  "threadsTimedWaiting" : 8.24444444444,
-                  "maxMemoryM" : 1019.875,
-                  "threadsRunnable" : 16.0
-                },
-                "load" : {
-                  "load_fifteen" : 0.356222222222,
-                  "load_one" : 0.504861111111,
-                  "load_five" : 0.372333333333
-                },
-                "memory" : {
-                  "mem_total" : 7646152.0,
-                  "swap_free" : 0.0,
-                  "mem_buffers" : 63860.1,
-                  "mem_shared" : 0.0,
-                  "mem_free" : 4787479.36667,
-                  "mem_cached" : 1545456.5,
-                  "swap_total" : 0.0
-                },
-                "network" : {
-                  "pkts_out" : 29.2349444444,
-                  "bytes_in" : 45360.1838889,
-                  "bytes_out" : 15245.1213056,
-                  "pkts_in" : 234.745277778
-                }
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/HBASE/components/HBASE_MASTER",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "HBASE_MASTER",
-                    "service_name" : "HBASE"
-                  }
-                }
-              ]
-            }
-          ]
-        },
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/HBASE/components/HBASE_REGIONSERVER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "HBASE_REGIONSERVER",
-            "service_name" : "HBASE"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-196-102-80.ec2.internal/host_components/HBASE_REGIONSERVER",
-              "HostRoles" : {
-                "configs" : "{\"hbase-site\":\"version1\",\"global\":\"version1\",\"core-site\":\"version1\"}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "HBASE_REGIONSERVER",
-                "host_name" : "ip-10-196-102-80.ec2.internal"
-              },
-              "metrics" : {
-                "boottime" : 1.352944347E9,
-                "process" : {
-                  "proc_total" : 263.683333333,
-                  "proc_run" : 0.813888888889
-                },
-                "rpc" : {
-                  "rpcAuthorizationSuccesses" : 0.0166666666667,
-                  "rpcAuthorizationFailures" : 0.0,
-                  "SentBytes" : 14.6277777778,
-                  "ReceivedBytes" : 54.325,
-                  "NumOpenConnections" : 0.0,
-                  "callQueueLen" : 0.0,
-                  "RpcQueueTime_num_ops" : 0.122222222222,
-                  "rpcAuthenticationSuccesses" : 0.0,
-                  "RpcProcessingTime_num_ops" : 0.122222222222,
-                  "RpcProcessingTime_avg_time" : 0.544444444444,
-                  "rpcAuthenticationFailures" : 0.0,
-                  "RpcQueueTime_avg_time" : 2.51111111111
-                },
-                "ugi" : {
-                  "loginFailure_num_ops" : 0.0,
-                  "loginSuccess_num_ops" : 0.0,
-                  "loginSuccess_avg_time" : 0.0,
-                  "loginFailure_avg_time" : 0.0
-                },
-                "hbase" : {
-                  "regionserver" : {
-                    "compactionSize_avg_time" : 0.0,
-                    "fsReadLatencyHistogram_99th_percentile" : 0.0,
-                    "fsWriteLatencyHistogram_median" : 0.0,
-                    "requests" : 0.0,
-                    "fsReadLatencyHistogram_num_ops" : 0.0,
-                    "hdfsBlocksLocalityIndex" : 0.0,
-                    "fsWriteLatencyHistogram_std_dev" : 0.0,
-                    "writeRequestsCount" : 1.0,
-                    "flushSize_avg_time" : 0.0,
-                    "fsWriteLatencyHistogram_max" : 0.0,
-                    "fsSyncLatency_num_ops" : 1.0,
-                    "fsWriteLatencyHistogram_95th_percentile" : 0.0,
-                    "fsSyncLatency_avg_time" : 0.0,
-                    "compactionSize_num_ops" : 0.0,
-                    "compactionTime_num_ops" : 0.0,
-                    "fsReadLatencyHistogram_mean" : 0.0,
-                    "blockCacheEvictedCount" : 0.0,
-                    "storefileIndexSizeMB" : 0.0,
-                    "fsWriteLatencyHistogram_75th_percentile" : 0.0,
-                    "fsWriteLatency_avg_time" : 0.0,
-                    "fsReadLatencyHistogram_max" : 0.0,
-                    "fsWriteLatencyHistogram_min" : 0.0,
-                    "compactionQueueSize" : 0.0,
-                    "totalStaticBloomSizeKB" : 0.0,
-                    "fsWriteLatencyHistogram_mean" : 0.0,
-                    "blockCacheFree" : 2.61032304E8,
-                    "totalStaticIndexSizeKB" : 0.0,
-                    "fsReadLatency_avg_time" : 0.0,
-                    "fsWriteLatency_num_ops" : 1.0,
-                    "memstoreSizeMB" : 0.0,
-                    "regions" : 1.0,
-                    "blockCacheCount" : 0.0,
-                    "blockCacheHitRatio" : 0.0,
-                    "flushQueueSize" : 0.0,
-                    "blockCacheHitCachingRatio" : 0.0,
-                    "fsReadLatencyHistogram_95th_percentile" : 0.0,
-                    "blockCacheHitCount" : 0.0,
-                    "flushTime_avg_time" : 0.0,
-                    "fsReadLatencyHistogram_min" : 0.0,
-                    "flushTime_num_ops" : 0.0,
-                    "compactionTime_avg_time" : 0.0,
-                    "fsReadLatency_num_ops" : 0.0,
-                    "blockCacheSize" : 2160272.0,
-                    "rootIndexSizeKB" : 0.0,
-                    "fsReadLatencyHistogram_std_dev" : 0.0,
-                    "readRequestsCount" : 2.0,
-                    "fsReadLatencyHistogram_75th_percentile" : 0.0,
-                    "blockCacheMissCount" : 0.0,
-                    "storefiles" : 0.0,
-                    "fsWriteLatencyHistogram_num_ops" : 0.0,
-                    "flushSize_num_ops" : 0.0,
-                    "fsWriteLatencyHistogram_99th_percentile" : 0.0,
-                    "stores" : 1.0,
-                    "fsReadLatencyHistogram_median" : 0.0
-                  }
-                },
-                "disk" : {
-                  "disk_free" : 845.075,
-                  "part_max_used" : 52.6
-                },
-                "cpu" : {
-                  "cpu_speed" : 2659.0,
-                  "cpu_num" : 2.0,
-                  "cpu_wio" : 0.123888888889,
-                  "cpu_idle" : 99.0469444444,
-                  "cpu_nice" : 0.0,
-                  "cpu_aidle" : 0.0,
-                  "cpu_system" : 0.401944444444,
-                  "cpu_user" : 0.4
-                },
-                "jvm" : {
-                  "memHeapCommittedM" : 760.003819444,
-                  "logFatal" : 0.0,
-                  "threadsBlocked" : 0.0,
-                  "threadsWaiting" : 38.2944444444,
-                  "gcCount" : 2.3860929425E7,
-                  "logError" : 0.0,
-                  "logWarn" : 0.0,
-                  "memNonHeapCommittedM" : 32.7367406333,
-                  "gcTimeMillis" : 7.63549738133E7,
-                  "memNonHeapUsedM" : 21.0635269694,
-                  "logInfo" : 7.158278827E7,
-                  "memHeapUsedM" : 83.2495438167,
-                  "threadsNew" : 0.0,
-                  "threadsTerminated" : 0.0,
-                  "threadsTimedWaiting" : 11.1388888889,
-                  "maxMemoryM" : 1004.0,
-                  "threadsRunnable" : 14.2777777778
-                },
-                "load" : {
-                  "load_fifteen" : 0.0466666666667,
-                  "load_one" : 6.11111111111E-4,
-                  "load_five" : 6.11111111111E-4
-                },
-                "memory" : {
-                  "mem_total" : 7646152.0,
-                  "swap_free" : 0.0,
-                  "mem_buffers" : 62720.2888889,
-                  "mem_shared" : 0.0,
-                  "mem_free" : 4978855.83333,
-                  "mem_cached" : 1756252.53333,
-                  "swap_total" : 0.0
-                },
-                "network" : {
-                  "pkts_out" : 105.755833333,
-                  "bytes_in" : 1895.95808333,
-                  "bytes_out" : 21296.58625,
-                  "pkts_in" : 11.2986666667
-                }
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/HBASE/components/HBASE_REGIONSERVER",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "HBASE_REGIONSERVER",
-                    "service_name" : "HBASE"
-                  }
-                }
-              ]
-            },
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-118-149-18.ec2.internal/host_components/HBASE_REGIONSERVER",
-              "HostRoles" : {
-                "configs" : "{\"hbase-site\":\"version1\",\"global\":\"version1\",\"core-site\":\"version1\"}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "HBASE_REGIONSERVER",
-                "host_name" : "ip-10-118-149-18.ec2.internal"
-              },
-              "metrics" : {
-                "boottime" : 1.352944346E9,
-                "process" : {
-                  "proc_total" : 264.063888889,
-                  "proc_run" : 1.16944444444
-                },
-                "rpc" : {
-                  "rpcAuthorizationSuccesses" : 1.19304647083E7,
-                  "rpcAuthorizationFailures" : 0.0,
-                  "SentBytes" : 1.19304639222E7,
-                  "ReceivedBytes" : 1.19304635583E7,
-                  "NumOpenConnections" : 0.0,
-                  "callQueueLen" : 0.0,
-                  "RpcQueueTime_num_ops" : 1.19304647056E7,
-                  "rpcAuthenticationSuccesses" : 0.0,
-                  "RpcProcessingTime_num_ops" : 1.19304647056E7,
-                  "RpcProcessingTime_avg_time" : 0.344444444444,
-                  "rpcAuthenticationFailures" : 0.0,
-                  "RpcQueueTime_avg_time" : 3.78888888889
-                },
-                "ugi" : {
-                  "loginFailure_num_ops" : 0.0,
-                  "loginSuccess_num_ops" : 0.0,
-                  "loginSuccess_avg_time" : 0.0,
-                  "loginFailure_avg_time" : 0.0
-                },
-                "hbase" : {
-                  "regionserver" : {
-                    "compactionSize_avg_time" : 0.0,
-                    "fsReadLatencyHistogram_99th_percentile" : 0.0,
-                    "fsWriteLatencyHistogram_median" : 0.0,
-                    "requests" : 0.0,
-                    "fsReadLatencyHistogram_num_ops" : 0.0,
-                    "hdfsBlocksLocalityIndex" : 100.0,
-                    "fsWriteLatencyHistogram_std_dev" : 0.0,
-                    "writeRequestsCount" : 3.0,
-                    "flushSize_avg_time" : 0.0,
-                    "fsWriteLatencyHistogram_max" : 0.0,
-                    "fsSyncLatency_num_ops" : 4.0,
-                    "fsWriteLatencyHistogram_95th_percentile" : 0.0,
-                    "fsSyncLatency_avg_time" : 0.0,
-                    "compactionSize_num_ops" : 0.0,
-                    "compactionTime_num_ops" : 0.0,
-                    "fsReadLatencyHistogram_mean" : 0.0,
-                    "blockCacheEvictedCount" : 0.0,
-                    "storefileIndexSizeMB" : 0.0,
-                    "fsWriteLatencyHistogram_75th_percentile" : 0.0,
-                    "fsWriteLatency_avg_time" : 0.0,
-                    "fsReadLatencyHistogram_max" : 0.0,
-                    "fsWriteLatencyHistogram_min" : 0.0,
-                    "compactionQueueSize" : 0.0,
-                    "totalStaticBloomSizeKB" : 0.0,
-                    "fsWriteLatencyHistogram_mean" : 0.0,
-                    "blockCacheFree" : 2.61031624E8,
-                    "totalStaticIndexSizeKB" : 0.0,
-                    "fsReadLatency_avg_time" : 0.0,
-                    "fsWriteLatency_num_ops" : 3.0,
-                    "memstoreSizeMB" : 0.0,
-                    "regions" : 2.0,
-                    "blockCacheCount" : 1.0,
-                    "blockCacheHitRatio" : 96.0,
-                    "flushQueueSize" : 0.0,
-                    "blockCacheHitCachingRatio" : 96.0,
-                    "fsReadLatencyHistogram_95th_percentile" : 0.0,
-                    "blockCacheHitCount" : 26.0,
-                    "flushTime_avg_time" : 0.0,
-                    "fsReadLatencyHistogram_min" : 0.0,
-                    "flushTime_num_ops" : 0.0,
-                    "compactionTime_avg_time" : 0.0,
-                    "fsReadLatency_num_ops" : 0.0,
-                    "blockCacheSize" : 2160952.0,
-                    "rootIndexSizeKB" : 0.0,
-                    "fsReadLatencyHistogram_std_dev" : 0.0,
-                    "readRequestsCount" : 88.4222222222,
-                    "fsReadLatencyHistogram_75th_percentile" : 0.0,
-                    "blockCacheMissCount" : 1.0,
-                    "storefiles" : 1.0,
-                    "fsWriteLatencyHistogram_num_ops" : 0.0,
-                    "flushSize_num_ops" : 0.0,
-                    "fsWriteLatencyHistogram_99th_percentile" : 0.0,
-                    "stores" : 2.0,
-                    "fsReadLatencyHistogram_median" : 0.0
-                  }
-                },
-                "disk" : {
-                  "disk_free" : 845.599,
-                  "part_max_used" : 43.9
-                },
-                "cpu" : {
-                  "cpu_speed" : 2266.0,
-                  "cpu_num" : 2.0,
-                  "cpu_wio" : 0.0,
-                  "cpu_idle" : 98.6938888889,
-                  "cpu_nice" : 0.0,
-                  "cpu_aidle" : 0.0,
-                  "cpu_system" : 0.531111111111,
-                  "cpu_user" : 0.775
-                },
-                "jvm" : {
-                  "memHeapCommittedM" : 174.446875,
-                  "logFatal" : 0.0,
-                  "threadsBlocked" : 0.0,
-                  "threadsWaiting" : 14.4361111111,
-                  "gcCount" : 1.19304647115E8,
-                  "logError" : 0.0,
-                  "logWarn" : 0.0,
-                  "memNonHeapCommittedM" : 24.3248698333,
-                  "gcTimeMillis" : 1.43165576518E8,
-                  "memNonHeapUsedM" : 19.4993658028,
-                  "logInfo" : 2.38609294222E7,
-                  "memHeapUsedM" : 18.087863945,
-                  "threadsNew" : 0.0,
-                  "threadsTerminated" : 0.0,
-                  "threadsTimedWaiting" : 7.46388888889,
-                  "maxMemoryM" : 1004.0,
-                  "threadsRunnable" : 7.43611111111
-                },
-                "load" : {
-                  "load_fifteen" : 0.00313888888889,
-                  "load_one" : 0.0282777777778,
-                  "load_five" : 0.0163055555556
-                },
-                "memory" : {
-                  "mem_total" : 7646152.0,
-                  "swap_free" : 0.0,
-                  "mem_buffers" : 52429.0222222,
-                  "mem_shared" : 0.0,
-                  "mem_free" : 5610725.88889,
-                  "mem_cached" : 1247511.4,
-                  "swap_total" : 0.0
-                },
-                "network" : {
-                  "pkts_out" : 116.179833333,
-                  "bytes_in" : 1897.15633333,
-                  "bytes_out" : 23871.1300833,
-                  "pkts_in" : 11.2918333333
-                }
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/HBASE/components/HBASE_REGIONSERVER",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "HBASE_REGIONSERVER",
-                    "service_name" : "HBASE"
-                  }
-                }
-              ]
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/services/PIG",
-      "ServiceInfo" : {
-        "cluster_name" : "mycluster",
-        "service_name" : "PIG"
-      },
-      "components" : [
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/PIG/components/PIG",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "PIG",
-            "service_name" : "PIG"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-196-102-80.ec2.internal/host_components/PIG",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "PIG",
-                "host_name" : "ip-10-196-102-80.ec2.internal"
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/PIG/components/PIG",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "PIG",
-                    "service_name" : "PIG"
-                  }
-                }
-              ]
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/services/GANGLIA",
-      "ServiceInfo" : {
-        "cluster_name" : "mycluster",
-        "service_name" : "GANGLIA"
-      },
-      "components" : [
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/GANGLIA/components/GANGLIA_MONITOR",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "GANGLIA_MONITOR",
-            "service_name" : "GANGLIA"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-204-102-239.ec2.internal/host_components/GANGLIA_MONITOR",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "GANGLIA_MONITOR",
-                "host_name" : "ip-10-204-102-239.ec2.internal"
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/GANGLIA/components/GANGLIA_MONITOR",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "GANGLIA_MONITOR",
-                    "service_name" : "GANGLIA"
-                  }
-                }
-              ]
-            },
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-83-47-139.ec2.internal/host_components/GANGLIA_MONITOR",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "GANGLIA_MONITOR",
-                "host_name" : "ip-10-83-47-139.ec2.internal"
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/GANGLIA/components/GANGLIA_MONITOR",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "GANGLIA_MONITOR",
-                    "service_name" : "GANGLIA"
-                  }
-                }
-              ]
-            },
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-196-102-80.ec2.internal/host_components/GANGLIA_MONITOR",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "GANGLIA_MONITOR",
-                "host_name" : "ip-10-196-102-80.ec2.internal"
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/GANGLIA/components/GANGLIA_MONITOR",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "GANGLIA_MONITOR",
-                    "service_name" : "GANGLIA"
-                  }
-                }
-              ]
-            },
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-204-62-16.ec2.internal/host_components/GANGLIA_MONITOR",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "GANGLIA_MONITOR",
-                "host_name" : "ip-10-204-62-16.ec2.internal"
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/GANGLIA/components/GANGLIA_MONITOR",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "GANGLIA_MONITOR",
-                    "service_name" : "GANGLIA"
-                  }
-                }
-              ]
-            },
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-118-149-18.ec2.internal/host_components/GANGLIA_MONITOR",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "GANGLIA_MONITOR",
-                "host_name" : "ip-10-118-149-18.ec2.internal"
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/GANGLIA/components/GANGLIA_MONITOR",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "GANGLIA_MONITOR",
-                    "service_name" : "GANGLIA"
-                  }
-                }
-              ]
-            }
-          ]
-        },
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/GANGLIA/components/GANGLIA_SERVER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "GANGLIA_SERVER",
-            "service_name" : "GANGLIA"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-83-47-139.ec2.internal/host_components/GANGLIA_SERVER",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "GANGLIA_SERVER",
-                "host_name" : "ip-10-83-47-139.ec2.internal"
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/GANGLIA/components/GANGLIA_SERVER",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "GANGLIA_SERVER",
-                    "service_name" : "GANGLIA"
-                  }
-                }
-              ]
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/services/ZOOKEEPER",
-      "ServiceInfo" : {
-        "cluster_name" : "mycluster",
-        "service_name" : "ZOOKEEPER"
-      },
-      "components" : [
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/ZOOKEEPER/components/ZOOKEEPER_SERVER",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "ZOOKEEPER_SERVER",
-            "service_name" : "ZOOKEEPER"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-204-62-16.ec2.internal/host_components/ZOOKEEPER_SERVER",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "STARTED",
-                "state" : "STARTED",
-                "component_name" : "ZOOKEEPER_SERVER",
-                "host_name" : "ip-10-204-62-16.ec2.internal"
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/ZOOKEEPER/components/ZOOKEEPER_SERVER",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "ZOOKEEPER_SERVER",
-                    "service_name" : "ZOOKEEPER"
-                  }
-                }
-              ]
-            }
-          ]
-        },
-        {
-          "href" : "http://ambari:8080/api/clusters/mycluster/services/ZOOKEEPER/components/ZOOKEEPER_CLIENT",
-          "ServiceComponentInfo" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "ZOOKEEPER_CLIENT",
-            "service_name" : "ZOOKEEPER"
-          },
-          "host_components" : [
-            {
-              "href" : "http://ambari:8080/api/clusters/mycluster/hosts/ip-10-196-102-80.ec2.internal/host_components/ZOOKEEPER_CLIENT",
-              "HostRoles" : {
-                "configs" : "{}",
-                "cluster_name" : "mycluster",
-                "desired_configs" : "{}",
-                "desired_state" : "INSTALLED",
-                "state" : "INSTALLED",
-                "component_name" : "ZOOKEEPER_CLIENT",
-                "host_name" : "ip-10-196-102-80.ec2.internal"
-              },
-              "component" : [
-                {
-                  "href" : "http://ambari:8080/api/clusters/mycluster/services/ZOOKEEPER/components/ZOOKEEPER_CLIENT",
-                  "ServiceComponentInfo" : {
-                    "cluster_name" : "mycluster",
-                    "component_name" : "ZOOKEEPER_CLIENT",
-                    "service_name" : "ZOOKEEPER"
-                  }
-                }
-              ]
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/hosts/background_operations/bg_operations.json b/branch-1.2/ambari-web/app/assets/data/hosts/background_operations/bg_operations.json
deleted file mode 100644
index 912578c..0000000
--- a/branch-1.2/ambari-web/app/assets/data/hosts/background_operations/bg_operations.json
+++ /dev/null
@@ -1,21 +0,0 @@
-  {
-    "requestId"  : 23 ,
-    "tasks" : [
-    {
-      "taskId": 1,
-      "stageId": 2,
-      "hostname": "x.y.z.com",
-      "role": "NAMENODE",
-      "command": "START",
-      "status": "PENDING"
-    },
-    {
-      "taskId": 2,
-      "stageId": 3,
-      "hostname": "a.b.c.com",
-      "role": "DATANODE",
-      "command": "START",
-      "status": "PENDING"
-    }
-    ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/hosts/background_operations/logs/task1.json b/branch-1.2/ambari-web/app/assets/data/hosts/background_operations/logs/task1.json
deleted file mode 100644
index 4a6a7b9..0000000
--- a/branch-1.2/ambari-web/app/assets/data/hosts/background_operations/logs/task1.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
-  "taskId": 1,
-  "stageId": 2,
-  "hostname": "x.y.z.com",
-  "role": "NAMENODE",
-  "command": "START",
-  "status": "PENDING",
-  "exitcode": "exitcode",
-  "stderror": "stderror",
-  "stdout": "stdout",
-  "startTime": "",
-  "attemptCount": 8
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/hosts/background_operations/logs/task2.json b/branch-1.2/ambari-web/app/assets/data/hosts/background_operations/logs/task2.json
deleted file mode 100644
index d2defbb..0000000
--- a/branch-1.2/ambari-web/app/assets/data/hosts/background_operations/logs/task2.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
-  "taskId": 2,
-  "stageId": 3,
-  "hostname": "a.b.c.com",
-  "role": "DATANODE",
-  "command": "START",
-  "status": "PENDING",
-  "exitcode": "exitcode",
-  "stderror": "stderror",
-  "stdout": "stdout",
-  "startTime": "",
-  "attemptCount": 8
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/hosts/background_operations/logs/task3.json b/branch-1.2/ambari-web/app/assets/data/hosts/background_operations/logs/task3.json
deleted file mode 100644
index 05fa425..0000000
--- a/branch-1.2/ambari-web/app/assets/data/hosts/background_operations/logs/task3.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
-  "taskId": 3,
-  "stageId":4,
-  "hostname":"host3.com",
-  "role":"DATANODE",
-  "command":"STOP",
-  "status":"PENDING",
-  "exitcode": "exitcode",
-  "stderror": "stderror",
-  "stdout": "stdout",
-  "startTime": "",
-  "attemptCount": 8
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/hosts/hosts.json b/branch-1.2/ambari-web/app/assets/data/hosts/hosts.json
deleted file mode 100644
index d39e7f3..0000000
--- a/branch-1.2/ambari-web/app/assets/data/hosts/hosts.json
+++ /dev/null
@@ -1,1062 +0,0 @@
-{
-  "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts?fields=Hosts,host_components,metrics/cpu,metrics/disk,metrics/load,metrics/memory&_=1358264805279",
-  "items" : [
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal",
-      "metrics" : {
-        "disk" : {
-          "disk_total" : 893.765,
-          "disk_free" : 844.923
-        },
-        "cpu" : {
-          "cpu_speed" : 2266.0,
-          "cpu_num" : 2.0,
-          "cpu_wio" : 0.0,
-          "cpu_idle" : 99.6513888889,
-          "cpu_nice" : 0.0,
-          "cpu_aidle" : 0.0,
-          "cpu_system" : 0.156111111111,
-          "cpu_user" : 0.1925
-        },
-        "load" : {
-          "load_fifteen" : 0.0,
-          "load_one" : 0.0,
-          "load_five" : 0.0
-        },
-        "memory" : {
-          "mem_total" : 7646152.0,
-          "swap_free" : 0.0,
-          "mem_buffers" : 81746.4888889,
-          "mem_shared" : 0.0,
-          "mem_free" : 4895641.53333,
-          "mem_cached" : 1902804.22222,
-          "swap_total" : 0.0
-        }
-      },
-      "Hosts" : {
-        "host_status" : "HEALTHY",
-        "public_host_name" : "ec2-23-20-94-194.compute-1.amazonaws.com",
-        "cpu_count" : 1,
-        "rack_info" : "/default-rack",
-        "host_health_report" : "",
-        "os_arch" : "x86_64",
-        "host_name" : "ip-10-110-38-164.ec2.internal",
-        "disk_info" : [
-          {
-            "available" : "3960848",
-            "used" : "1884868",
-            "percent" : "33%",
-            "size" : "5905712",
-            "type" : "ext4",
-            "mountpoint" : "/"
-          },
-          {
-            "available" : "3823076",
-            "used" : "0",
-            "percent" : "0%",
-            "size" : "3823076",
-            "type" : "tmpfs",
-            "mountpoint" : "/dev/shm"
-          },
-          {
-            "available" : "411234588",
-            "used" : "203012",
-            "percent" : "1%",
-            "size" : "433455904",
-            "type" : "ext3",
-            "mountpoint" : "/grid/0"
-          },
-          {
-            "available" : "411234588",
-            "used" : "203012",
-            "percent" : "1%",
-            "size" : "433455904",
-            "type" : "ext3",
-            "mountpoint" : "/grid/1"
-          }
-        ],
-        "ip" : "10.110.38.164",
-        "os_type" : "redhat6",
-        "last_heartbeat_time" : 1358264796432,
-        "host_state" : "HEALTHY",
-        "cluster_name" : "cl1",
-        "last_registration_time" : 1358244199614,
-        "total_mem" : 7644119
-      },
-      "host_components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/HDFS_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HDFS_CLIENT",
-            "host_name" : "ip-10-110-38-164.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/HBASE_REGIONSERVER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HBASE_REGIONSERVER",
-            "host_name" : "ip-10-110-38-164.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/DATANODE",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "DATANODE",
-            "host_name" : "ip-10-110-38-164.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/SQOOP",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "SQOOP",
-            "host_name" : "ip-10-110-38-164.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/OOZIE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "OOZIE_CLIENT",
-            "host_name" : "ip-10-110-38-164.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/ZOOKEEPER_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "ZOOKEEPER_CLIENT",
-            "host_name" : "ip-10-110-38-164.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/MAPREDUCE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "MAPREDUCE_CLIENT",
-            "host_name" : "ip-10-110-38-164.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/HBASE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HBASE_CLIENT",
-            "host_name" : "ip-10-110-38-164.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/HCAT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HCAT",
-            "host_name" : "ip-10-110-38-164.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/GANGLIA_MONITOR",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "GANGLIA_MONITOR",
-            "host_name" : "ip-10-110-38-164.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/HIVE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HIVE_CLIENT",
-            "host_name" : "ip-10-110-38-164.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/PIG",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "PIG",
-            "host_name" : "ip-10-110-38-164.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-38-164.ec2.internal/host_components/TASKTRACKER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "TASKTRACKER",
-            "host_name" : "ip-10-110-38-164.ec2.internal"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal",
-      "metrics" : {
-        "disk" : {
-          "disk_total" : 893.765,
-          "disk_free" : 844.317
-        },
-        "cpu" : {
-          "cpu_speed" : 2266.0,
-          "cpu_num" : 2.0,
-          "cpu_wio" : 0.505555555556,
-          "cpu_idle" : 83.2013888889,
-          "cpu_nice" : 0.0,
-          "cpu_aidle" : 0.0,
-          "cpu_system" : 5.78361111111,
-          "cpu_user" : 10.5008333333
-        },
-        "load" : {
-          "load_fifteen" : 0.417416666667,
-          "load_one" : 0.362472222222,
-          "load_five" : 0.371472222222
-        },
-        "memory" : {
-          "mem_total" : 7646152.0,
-          "swap_free" : 0.0,
-          "mem_buffers" : 103387.711111,
-          "mem_shared" : 0.0,
-          "mem_free" : 2903253.56667,
-          "mem_cached" : 2507736.87778,
-          "swap_total" : 0.0
-        }
-      },
-      "Hosts" : {
-        "host_status" : "HEALTHY",
-        "public_host_name" : "ec2-184-73-46-113.compute-1.amazonaws.com",
-        "cpu_count" : 1,
-        "rack_info" : "/default-rack",
-        "host_health_report" : "",
-        "os_arch" : "x86_64",
-        "host_name" : "ip-10-191-202-42.ec2.internal",
-        "disk_info" : [
-          {
-            "available" : "3512988",
-            "used" : "2332728",
-            "percent" : "40%",
-            "size" : "5905712",
-            "type" : "ext4",
-            "mountpoint" : "/"
-          },
-          {
-            "available" : "3823076",
-            "used" : "0",
-            "percent" : "0%",
-            "size" : "3823076",
-            "type" : "tmpfs",
-            "mountpoint" : "/dev/shm"
-          },
-          {
-            "available" : "411234588",
-            "used" : "203012",
-            "percent" : "1%",
-            "size" : "433455904",
-            "type" : "ext3",
-            "mountpoint" : "/grid/0"
-          },
-          {
-            "available" : "411234588",
-            "used" : "203012",
-            "percent" : "1%",
-            "size" : "433455904",
-            "type" : "ext3",
-            "mountpoint" : "/grid/1"
-          }
-        ],
-        "ip" : "10.191.202.42",
-        "os_type" : "redhat6",
-        "last_heartbeat_time" : 1358264801546,
-        "host_state" : "HEALTHY",
-        "cluster_name" : "cl1",
-        "last_registration_time" : 1358244171255,
-        "total_mem" : 7644119
-      },
-      "host_components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/HBASE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HBASE_CLIENT",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/SQOOP",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "SQOOP",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/OOZIE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "OOZIE_CLIENT",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/ZOOKEEPER_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "ZOOKEEPER_CLIENT",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/HBASE_REGIONSERVER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HBASE_REGIONSERVER",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/HIVE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HIVE_CLIENT",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/HBASE_MASTER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HBASE_MASTER",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/HDFS_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HDFS_CLIENT",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/TASKTRACKER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "TASKTRACKER",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/ZOOKEEPER_SERVER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "ZOOKEEPER_SERVER",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/GANGLIA_SERVER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "GANGLIA_SERVER",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/DATANODE",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "DATANODE",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/GANGLIA_MONITOR",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "GANGLIA_MONITOR",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/NAMENODE",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "NAMENODE",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/HCAT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HCAT",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/MAPREDUCE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "MAPREDUCE_CLIENT",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/PIG",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "PIG",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-191-202-42.ec2.internal/host_components/NAGIOS_SERVER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "NAGIOS_SERVER",
-            "host_name" : "ip-10-191-202-42.ec2.internal"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal",
-      "metrics" : {
-        "disk" : {
-          "disk_total" : 893.765,
-          "disk_free" : 844.973
-        },
-        "cpu" : {
-          "cpu_speed" : 2266.0,
-          "cpu_num" : 2.0,
-          "cpu_wio" : 0.104444444444,
-          "cpu_idle" : 99.1966666667,
-          "cpu_nice" : 0.0,
-          "cpu_aidle" : 0.0,
-          "cpu_system" : 0.374166666667,
-          "cpu_user" : 0.355
-        },
-        "load" : {
-          "load_fifteen" : 0.0,
-          "load_one" : 0.0,
-          "load_five" : 0.0
-        },
-        "memory" : {
-          "mem_total" : 7646152.0,
-          "swap_free" : 0.0,
-          "mem_buffers" : 81301.6,
-          "mem_shared" : 0.0,
-          "mem_free" : 4850323.57778,
-          "mem_cached" : 1853997.93333,
-          "swap_total" : 0.0
-        }
-      },
-      "Hosts" : {
-        "host_status" : "HEALTHY",
-        "public_host_name" : "ec2-50-17-21-254.compute-1.amazonaws.com",
-        "cpu_count" : 1,
-        "rack_info" : "/default-rack",
-        "host_health_report" : "",
-        "os_arch" : "x86_64",
-        "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal",
-        "disk_info" : [
-          {
-            "available" : "3964900",
-            "used" : "1880816",
-            "percent" : "33%",
-            "size" : "5905712",
-            "type" : "ext4",
-            "mountpoint" : "/"
-          },
-          {
-            "available" : "3823076",
-            "used" : "0",
-            "percent" : "0%",
-            "size" : "3823076",
-            "type" : "tmpfs",
-            "mountpoint" : "/dev/shm"
-          },
-          {
-            "available" : "411234588",
-            "used" : "203012",
-            "percent" : "1%",
-            "size" : "433455904",
-            "type" : "ext3",
-            "mountpoint" : "/grid/0"
-          },
-          {
-            "available" : "411234588",
-            "used" : "203012",
-            "percent" : "1%",
-            "size" : "433455904",
-            "type" : "ext3",
-            "mountpoint" : "/grid/1"
-          }
-        ],
-        "ip" : "10.96.75.185",
-        "os_type" : "redhat6",
-        "last_heartbeat_time" : 1358264800613,
-        "host_state" : "HEALTHY",
-        "cluster_name" : "cl1",
-        "last_registration_time" : 1358244226244,
-        "total_mem" : 7644119
-      },
-      "host_components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/HIVE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HIVE_CLIENT",
-            "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/SQOOP",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "SQOOP",
-            "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/DATANODE",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "DATANODE",
-            "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/ZOOKEEPER_SERVER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "ZOOKEEPER_SERVER",
-            "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/HCAT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HCAT",
-            "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/HBASE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HBASE_CLIENT",
-            "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/TASKTRACKER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "TASKTRACKER",
-            "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/HBASE_REGIONSERVER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HBASE_REGIONSERVER",
-            "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/GANGLIA_MONITOR",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "GANGLIA_MONITOR",
-            "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/OOZIE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "OOZIE_CLIENT",
-            "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/MAPREDUCE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "MAPREDUCE_CLIENT",
-            "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/PIG",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "PIG",
-            "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/HDFS_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HDFS_CLIENT",
-            "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-16-48-4B.compute-1.internal/host_components/ZOOKEEPER_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "ZOOKEEPER_CLIENT",
-            "host_name" : "domU-12-31-39-16-48-4B.compute-1.internal"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal",
-      "metrics" : {
-        "disk" : {
-          "disk_total" : 893.765,
-          "disk_free" : 844.876
-        },
-        "cpu" : {
-          "cpu_speed" : 2266.0,
-          "cpu_num" : 2.0,
-          "cpu_wio" : 0.0741666666667,
-          "cpu_idle" : 99.2025,
-          "cpu_nice" : 0.0,
-          "cpu_aidle" : 0.0,
-          "cpu_system" : 0.367222222222,
-          "cpu_user" : 0.386388888889
-        },
-        "load" : {
-          "load_fifteen" : 0.0,
-          "load_one" : 0.0172222222222,
-          "load_five" : 0.0119444444444
-        },
-        "memory" : {
-          "mem_total" : 7646152.0,
-          "swap_free" : 0.0,
-          "mem_buffers" : 81151.1777778,
-          "mem_shared" : 0.0,
-          "mem_free" : 4811126.51111,
-          "mem_cached" : 1948264.15556,
-          "swap_total" : 0.0
-        }
-      },
-      "Hosts" : {
-        "host_status" : "HEALTHY",
-        "public_host_name" : "ec2-184-73-56-206.compute-1.amazonaws.com",
-        "cpu_count" : 1,
-        "rack_info" : "/default-rack",
-        "host_health_report" : "",
-        "os_arch" : "x86_64",
-        "host_name" : "ip-10-110-79-42.ec2.internal",
-        "disk_info" : [
-          {
-            "available" : "3960848",
-            "used" : "1884868",
-            "percent" : "33%",
-            "size" : "5905712",
-            "type" : "ext4",
-            "mountpoint" : "/"
-          },
-          {
-            "available" : "3823076",
-            "used" : "0",
-            "percent" : "0%",
-            "size" : "3823076",
-            "type" : "tmpfs",
-            "mountpoint" : "/dev/shm"
-          },
-          {
-            "available" : "411234588",
-            "used" : "203012",
-            "percent" : "1%",
-            "size" : "433455904",
-            "type" : "ext3",
-            "mountpoint" : "/grid/0"
-          },
-          {
-            "available" : "411234588",
-            "used" : "203012",
-            "percent" : "1%",
-            "size" : "433455904",
-            "type" : "ext3",
-            "mountpoint" : "/grid/1"
-          }
-        ],
-        "ip" : "10.110.79.42",
-        "os_type" : "redhat6",
-        "last_heartbeat_time" : 1358264801346,
-        "host_state" : "HEALTHY",
-        "cluster_name" : "cl1",
-        "last_registration_time" : 1358244211909,
-        "total_mem" : 7644119
-      },
-      "host_components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/HBASE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HBASE_CLIENT",
-            "host_name" : "ip-10-110-79-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/DATANODE",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "DATANODE",
-            "host_name" : "ip-10-110-79-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/HBASE_REGIONSERVER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HBASE_REGIONSERVER",
-            "host_name" : "ip-10-110-79-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/HDFS_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HDFS_CLIENT",
-            "host_name" : "ip-10-110-79-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/PIG",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "PIG",
-            "host_name" : "ip-10-110-79-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/HIVE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HIVE_CLIENT",
-            "host_name" : "ip-10-110-79-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/ZOOKEEPER_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "ZOOKEEPER_CLIENT",
-            "host_name" : "ip-10-110-79-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/SQOOP",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "SQOOP",
-            "host_name" : "ip-10-110-79-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/MAPREDUCE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "MAPREDUCE_CLIENT",
-            "host_name" : "ip-10-110-79-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/OOZIE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "OOZIE_CLIENT",
-            "host_name" : "ip-10-110-79-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/TASKTRACKER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "TASKTRACKER",
-            "host_name" : "ip-10-110-79-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/HCAT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HCAT",
-            "host_name" : "ip-10-110-79-42.ec2.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/ip-10-110-79-42.ec2.internal/host_components/GANGLIA_MONITOR",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "GANGLIA_MONITOR",
-            "host_name" : "ip-10-110-79-42.ec2.internal"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal",
-      "metrics" : {
-        "disk" : {
-          "disk_total" : 893.765,
-          "disk_free" : 844.359
-        },
-        "cpu" : {
-          "cpu_speed" : 2666.0,
-          "cpu_num" : 2.0,
-          "cpu_wio" : 0.1,
-          "cpu_idle" : 98.85,
-          "cpu_nice" : 0.0,
-          "cpu_aidle" : 0.0,
-          "cpu_system" : 0.525,
-          "cpu_user" : 0.575
-        },
-        "load" : {
-          "load_fifteen" : 0.0,
-          "load_one" : 0.00538888888889,
-          "load_five" : 7.22222222222E-4
-        },
-        "memory" : {
-          "mem_total" : 7646152.0,
-          "swap_free" : 0.0,
-          "mem_buffers" : 96371.0444444,
-          "mem_shared" : 0.0,
-          "mem_free" : 3233313.1,
-          "mem_cached" : 2452721.45556,
-          "swap_total" : 0.0
-        }
-      },
-      "Hosts" : {
-        "host_status" : "HEALTHY",
-        "public_host_name" : "ec2-54-234-28-43.compute-1.amazonaws.com",
-        "cpu_count" : 1,
-        "rack_info" : "/default-rack",
-        "host_health_report" : "",
-        "os_arch" : "x86_64",
-        "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal",
-        "disk_info" : [
-          {
-            "available" : "3960476",
-            "used" : "1885240",
-            "percent" : "33%",
-            "size" : "5905712",
-            "type" : "ext4",
-            "mountpoint" : "/"
-          },
-          {
-            "available" : "3823076",
-            "used" : "0",
-            "percent" : "0%",
-            "size" : "3823076",
-            "type" : "tmpfs",
-            "mountpoint" : "/dev/shm"
-          },
-          {
-            "available" : "411234588",
-            "used" : "203012",
-            "percent" : "1%",
-            "size" : "433455904",
-            "type" : "ext3",
-            "mountpoint" : "/grid/0"
-          },
-          {
-            "available" : "411234588",
-            "used" : "203012",
-            "percent" : "1%",
-            "size" : "433455904",
-            "type" : "ext3",
-            "mountpoint" : "/grid/1"
-          }
-        ],
-        "ip" : "10.192.229.235",
-        "os_type" : "redhat6",
-        "last_heartbeat_time" : 1358264805167,
-        "host_state" : "HEALTHY",
-        "cluster_name" : "cl1",
-        "last_registration_time" : 1358244189981,
-        "total_mem" : 7644119
-      },
-      "host_components" : [
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/SECONDARY_NAMENODE",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "SECONDARY_NAMENODE",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/HBASE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HBASE_CLIENT",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/TASKTRACKER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "TASKTRACKER",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/MAPREDUCE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "MAPREDUCE_CLIENT",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/DATANODE",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "DATANODE",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/PIG",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "PIG",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/HDFS_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HDFS_CLIENT",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/SQOOP",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "SQOOP",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/ZOOKEEPER_SERVER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "ZOOKEEPER_SERVER",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/MYSQL_SERVER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "MYSQL_SERVER",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/HIVE_METASTORE",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HIVE_METASTORE",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/OOZIE_SERVER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "OOZIE_SERVER",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/ZOOKEEPER_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "ZOOKEEPER_CLIENT",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/GANGLIA_MONITOR",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "GANGLIA_MONITOR",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/HIVE_SERVER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HIVE_SERVER",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/HIVE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HIVE_CLIENT",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/JOBTRACKER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "JOBTRACKER",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/OOZIE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "OOZIE_CLIENT",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/WEBHCAT_SERVER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "WEBHCAT_SERVER",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/HCAT",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HCAT",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        },
-        {
-          "href" : "http://ec2-184-73-46-113.compute-1.amazonaws.com:8080/api/v1/clusters/cl1/hosts/domU-12-31-39-0E-E6-01.compute-1.internal/host_components/HBASE_REGIONSERVER",
-          "HostRoles" : {
-            "cluster_name" : "cl1",
-            "component_name" : "HBASE_REGIONSERVER",
-            "host_name" : "domU-12-31-39-0E-E6-01.compute-1.internal"
-          }
-        }
-      ]
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/hosts/hosts70.json b/branch-1.2/ambari-web/app/assets/data/hosts/hosts70.json
deleted file mode 100644
index 8342b24..0000000
--- a/branch-1.2/ambari-web/app/assets/data/hosts/hosts70.json
+++ /dev/null
@@ -1,1436 +0,0 @@
-{
-  "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts?fields=*",
-  "items" : [
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev1.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev1.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : [
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/SECONDARY_NAMENODE",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "SECONDARY_NAMENODE",
-            "host_name" : "dev1.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/JOBTRACKER",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "JOBTRACKER",
-            "host_name" : "dev1.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/DATANODE",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "DATANODE",
-            "host_name" : "dev1.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/HDFS_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "HDFS_CLIENT",
-            "host_name" : "dev1.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/TASKTRACKER",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "TASKTRACKER",
-            "host_name" : "dev1.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/NAMENODE",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "NAMENODE",
-            "host_name" : "dev1.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/MAPREDUCE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "MAPREDUCE_CLIENT",
-            "host_name" : "dev1.hortonworks.com"
-          }
-        }
-      ]
-    }, 
-    {
-      
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev2.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev2.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : [
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/SECONDARY_NAMENODE",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "SECONDARY_NAMENODE",
-            "host_name" : "dev2.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/JOBTRACKER",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "JOBTRACKER",
-            "host_name" : "dev2.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/DATANODE",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "DATANODE",
-            "host_name" : "dev2.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/HDFS_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "HDFS_CLIENT",
-            "host_name" : "dev2.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/TASKTRACKER",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "TASKTRACKER",
-            "host_name" : "dev2.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/NAMENODE",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "NAMENODE",
-            "host_name" : "dev2.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/MAPREDUCE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "MAPREDUCE_CLIENT",
-            "host_name" : "dev2.hortonworks.com"
-          }
-        }
-      ]
-    },
-    {
-      
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev3.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev3.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : [
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/SECONDARY_NAMENODE",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "SECONDARY_NAMENODE",
-            "host_name" : "dev3.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/JOBTRACKER",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "JOBTRACKER",
-            "host_name" : "dev3.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/DATANODE",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "DATANODE",
-            "host_name" : "dev3.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/HDFS_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "HDFS_CLIENT",
-            "host_name" : "dev3.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/TASKTRACKER",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "TASKTRACKER",
-            "host_name" : "dev3.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/NAMENODE",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "NAMENODE",
-            "host_name" : "dev3.hortonworks.com"
-          }
-        },
-        {
-          "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev.hortonworks.com/host_components/MAPREDUCE_CLIENT",
-          "HostRoles" : {
-            "cluster_name" : "mycluster",
-            "component_name" : "MAPREDUCE_CLIENT",
-            "host_name" : "dev3.hortonworks.com"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev4.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev4.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev5.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev5.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev6.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev6.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev7.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev7.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev8.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev8.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev9.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev9.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev10.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev10.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev11.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev12.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev13.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev14.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev15.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev16.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev17.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev18.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev19.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev20.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev21.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev22.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev23.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev24.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev25.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev26.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev27.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev28.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev29.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev30.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev31.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev32.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev33.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev34.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev35.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev36.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev37.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev38.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev39.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev40.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev41.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev42.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev43.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev44.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev45.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev46.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev47.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev48.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev49.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev50.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev51.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev52.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev53.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev54.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev55.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev56.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev57.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev58.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev59.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev60.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev61.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev62.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev63.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev64.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev65.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev66.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev67.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev68.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev69.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev70.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev71.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev72.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev73.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/clusters/mycluster/hosts/dev11.hortonworks.com",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "cluster_name" : "mycluster",
-        "last_registration_time" : 1352386560520,
-        "rack_info" : "/default-rack",
-        "total_mem" : 3082813,
-        "os_arch" : "x86_64",
-        "host_name" : "dev74.hortonworks.com",
-        "disk_info" : "[{\"available\":\"47295056\",\"mountpoint\":\"/\",\"used\":\"3786948\",\"percent\":\"8%\",\"size\":\"51606140\",\"type\":\"ext4\"},{\"available\":\"1542800\",\"mountpoint\":\"/dev/shm\",\"used\":\"248\",\"percent\":\"1%\",\"size\":\"1543048\",\"type\":\"tmpfs\"},{\"available\":\"432210\",\"mountpoint\":\"/boot\",\"used\":\"38034\",\"percent\":\"9%\",\"size\":\"495844\",\"type\":\"ext4\"},{\"available\":\"44459872\",\"mountpoint\":\"/home\",\"used\":\"184220\",\"percent\":\"1%\",\"size\":\"47033288\",\"type\":\"ext4\"},{\"available\":\"902105496\",\"mountpoint\":\"/media/sf_ambari\",\"used\":\"74551908\",\"percent\":\"8%\",\"size\":\"976657404\",\"type\":\"vboxsf\"}]",
-        "last_heartbeat_time" : 1352461939047,
-        "os_type" : "centos6",
-        "ip" : "10.0.2.15"
-      },
-      "host_components" : []
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/hosts/metrics/cpu.json b/branch-1.2/ambari-web/app/assets/data/hosts/metrics/cpu.json
deleted file mode 100644
index 229d8b9..0000000
--- a/branch-1.2/ambari-web/app/assets/data/hosts/metrics/cpu.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
-  "href" : "http://ambari/clusters/mycluster/hosts/hostname?fields=metrics/cpu/cpu_user[1351621554,1351625154,60],metrics/cpu/cpu_wio[1351621554,1351625154,60],metrics/cpu/cpu_nice[1351621554,1351625154,60],metrics/cpu/cpu_aidle[1351621554,1351625154,60],metrics/cpu/cpu_system[1351621554,1351625154,60],metrics/cpu/cpu_idle[1351621554,1351625154,60]",
-  "metrics" : {
-    "cpu" : {
-      "cpu_wio" : "[[0.21944444444,1351621800],[0.25416666667,1351622160],[0.17638888889,1351622520],[0.19472222222,1351622880],[0.22388888889,1351623240],[0.22638888889,1351623600],[0.093611111111,1351623960],[0.18555555556,1351624320],[0.15083333333,1351624680],[0.16583333333,1351625040],[0.0,1351625400]]",
-      "cpu_idle" : "[[57.459722222,1351621800],[55.233611111,1351622160],[50.290277778,1351622520],[49.941666667,1351622880],[49.392222222,1351623240],[47.776111111,1351623600],[46.941388889,1351623960],[44.761388889,1351624320],[42.887222222,1351624680],[42.725,1351625040],[40.239722222,1351625400]]",
-      "cpu_nice" : "[[0.0,1351621800],[0.0,1351622160],[0.0,1351622520],[0.0,1351622880],[0.0,1351623240],[0.0,1351623600],[0.0,1351623960],[0.0,1351624320],[0.0,1351624680],[0.0,1351625040],[0.0,1351625400]]",
-      "cpu_aidle" : "[[0.0,1351621800],[0.0,1351622160],[0.0,1351622520],[0.0,1351622880],[0.0,1351623240],[0.0,1351623600],[0.0,1351623960],[0.0,1351624320],[0.0,1351624680],[0.0,1351625040],[0.0,1351625400]]",
-      "cpu_system" : "[[4.8736111111,1351621800],[4.9991666667,1351622160],[5.89,1351622520],[5.5513888889,1351622880],[5.5730555556,1351623240],[5.5986111111,1351623600],[5.6897222222,1351623960],[5.8952777778,1351624320],[5.945,1351624680],[5.9422222222,1351625040],[6.4375,1351625400]]",
-      "cpu_user" : "[[37.468611111,1351621800],[39.5175,1351622160],[43.655277778,1351622520],[44.28,1351622880],[44.819444444,1351623240],[46.390833333,1351623600],[47.281111111,1351623960],[49.155555556,1351624320],[51.017222222,1351624680],[51.175277778,1351625040],[53.183333333,1351625400]]"
-    }
-  },
-  "Hosts" : {
-    "cluster_name" : "mycluster",
-    "cpu_count" : "2",
-    "total_mem" : "7466",
-    "os_arch" : "x86_64",
-    "attributes" : "{\"publicFQDN\":\"ambari\",\"privateFQDN\":\"hostname\"}",
-    "host_name" : "hostname",
-    "os_type" : "redhatenterpriselinuxserver6",
-    "ip" : "10.38.5.128"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/hosts/metrics/disk.json b/branch-1.2/ambari-web/app/assets/data/hosts/metrics/disk.json
deleted file mode 100644
index ba7e47c..0000000
--- a/branch-1.2/ambari-web/app/assets/data/hosts/metrics/disk.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
-  "href" : "http://ambari/clusters/mycluster/hosts/hostname?fields=metrics/disk/disk_total[1351621554,1351625154,60],metrics/disk/part_max_used[1351621554,1351625154,60],metrics/disk/disk_free[1351621554,1351625154,60]",
-  "metrics" : {
-    "disk" : {
-      "disk_total" : "[[893.765,1351621800],[893.765,1351622160],[893.765,1351622520],[893.765,1351622880],[893.765,1351623240],[893.765,1351623600],[893.765,1351623960],[893.765,1351624320],[893.765,1351624680],[893.765,1351625040],[893.765,1351625400]]",
-      "disk_free" : "[[842.89862778,1351621800],[842.898,1351622160],[842.89763333,1351622520],[842.89668333,1351622880],[842.89650278,1351623240],[842.89563333,1351623600],[842.895,1351623960],[842.89460278,1351624320],[842.89400556,1351624680],[842.89364444,1351625040],[842.89263333,1351625400]]",
-      "part_max_used" : "[[86.8,1351621800],[86.8,1351622160],[86.8,1351622520],[86.8,1351622880],[86.8,1351623240],[86.850277778,1351623600],[86.85,1351623960],[86.889444444,1351624320],[86.9,1351624680],[86.9,1351625040],[86.9,1351625400]]"
-    }
-  },
-  "Hosts" : {
-    "cluster_name" : "mycluster",
-    "cpu_count" : "2",
-    "total_mem" : "7466",
-    "os_arch" : "x86_64",
-    "attributes" : "{\"publicFQDN\":\"ambari\",\"privateFQDN\":\"hostname\"}",
-    "host_name" : "hostname",
-    "os_type" : "redhatenterpriselinuxserver6",
-    "ip" : "10.38.5.128"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/hosts/metrics/load.json b/branch-1.2/ambari-web/app/assets/data/hosts/metrics/load.json
deleted file mode 100644
index df6393f..0000000
--- a/branch-1.2/ambari-web/app/assets/data/hosts/metrics/load.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
-  "href" : "http://ambari/clusters/mycluster/hosts/hostname?fields=metrics/load/load_fifteen[1351621554,1351625154,60],metrics/load/load_one[1351621554,1351625154,60],metrics/load/load_five[1351621554,1351625154,60]",
-  "metrics" : {
-    "load" : {
-      "load_fifteen" : "[[3.5136666667,1351621800],[3.7444166667,1351622160],[3.7896666667,1351622520],[3.7719166667,1351622880],[3.8913333333,1351623240],[3.9593611111,1351623600],[3.8402222222,1351623960],[3.8458888889,1351624320],[3.9601388889,1351624680],[4.1802222222,1351625040],[4.02225,1351625400]]",
-      "load_one" : "[[4.7221944444,1351621800],[4.0629166667,1351622160],[3.822,1351622520],[3.9615,1351622880],[4.5276111111,1351623240],[3.6452222222,1351623600],[4.3615,1351623960],[4.0596666667,1351624320],[4.7445277778,1351624680],[4.4509444444,1351625040],[3.7785555556,1351625400]]",
-      "load_five" : "[[4.05075,1351621800],[4.1552222222,1351622160],[3.9908888889,1351622520],[3.8535833333,1351622880],[4.1159722222,1351623240],[4.0588888889,1351623600],[3.82425,1351623960],[3.9144444444,1351624320],[4.2187222222,1351624680],[4.5194166667,1351625040],[3.9673611111,1351625400]]"
-    }
-  },
-  "Hosts" : {
-    "cluster_name" : "mycluster",
-    "cpu_count" : "2",
-    "total_mem" : "7466",
-    "os_arch" : "x86_64",
-    "attributes" : "{\"publicFQDN\":\"ambari\",\"privateFQDN\":\"hostname\"}",
-    "host_name" : "hostname",
-    "os_type" : "redhatenterpriselinuxserver6",
-    "ip" : "10.38.5.128"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/hosts/metrics/memory.json b/branch-1.2/ambari-web/app/assets/data/hosts/metrics/memory.json
deleted file mode 100644
index 89029d3..0000000
--- a/branch-1.2/ambari-web/app/assets/data/hosts/metrics/memory.json
+++ /dev/null
@@ -1,22 +0,0 @@
-{
-  "href" : "http://ambari/clusters/mycluster/hosts/hostname?fields=metrics/memory/swap_free[1351621554,1351625154,60],metrics/memory/mem_total[1351621554,1351625154,60],metrics/memory/mem_free[1351621554,1351625154,60],metrics/memory/mem_cached[1351621554,1351625154,60],metrics/memory/mem_buffers[1351621554,1351625154,60]",
-  "metrics" : {
-    "memory" : {
-      "mem_total" : "[[7646152.0,1351621800],[7646152.0,1351622160],[7646152.0,1351622520],[7646152.0,1351622880],[7646152.0,1351623240],[7646152.0,1351623600],[7646152.0,1351623960],[7646152.0,1351624320],[7646152.0,1351624680],[7646152.0,1351625040],[7646152.0,1351625400]]",
-      "swap_free" : "[[0.0,1351621800],[0.0,1351622160],[0.0,1351622520],[0.0,1351622880],[0.0,1351623240],[0.0,1351623600],[0.0,1351623960],[0.0,1351624320],[0.0,1351624680],[0.0,1351625040],[0.0,1351625400]]",
-      "mem_buffers" : "[[228805.27778,1351621800],[228811.94444,1351622160],[228818.91111,1351622520],[228822.58889,1351622880],[228826.24444,1351623240],[228835.45556,1351623600],[228840.68889,1351623960],[228846.57778,1351624320],[228849.25556,1351624680],[228852.0,1351625040],[228859.23333,1351625400]]",
-      "mem_free" : "[[557190.06667,1351621800],[555238.24444,1351622160],[550594.85556,1351622520],[542883.0,1351622880],[548554.16667,1351623240],[544108.65556,1351623600],[540337.38889,1351623960],[539002.24444,1351624320],[540715.05556,1351624680],[537260.6,1351625040],[541950.46667,1351625400]]",
-      "mem_cached" : "[[3517800.5889,1351621800],[3518418.1,1351622160],[3519044.5,1351622520],[3519685.8,1351622880],[3520154.1222,1351623240],[3520744.7444,1351623600],[3521340.8333,1351623960],[3521865.1556,1351624320],[3522396.0556,1351624680],[3522936.7,1351625040],[3523384.7222,1351625400]]"
-    }
-  },
-  "Hosts" : {
-    "cluster_name" : "mycluster",
-    "cpu_count" : "2",
-    "total_mem" : "7466",
-    "os_arch" : "x86_64",
-    "attributes" : "{\"publicFQDN\":\"ambari\",\"privateFQDN\":\"hostname\"}",
-    "host_name" : "hostname",
-    "os_type" : "redhatenterpriselinuxserver6",
-    "ip" : "10.38.5.128"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/hosts/metrics/network.json b/branch-1.2/ambari-web/app/assets/data/hosts/metrics/network.json
deleted file mode 100644
index f9dca8a..0000000
--- a/branch-1.2/ambari-web/app/assets/data/hosts/metrics/network.json
+++ /dev/null
@@ -1,21 +0,0 @@
-{
-  "href" : "http://ambari/clusters/mycluster/hosts/hostname?fields=metrics/network/bytes_in[1351621554,1351625154,60],metrics/network/bytes_out[1351621554,1351625154,60],metrics/network/pkts_in[1351621554,1351625154,60],metrics/network/pkts_out[1351621554,1351625154,60]",
-  "metrics" : {
-    "network" : {
-      "bytes_in" : "[[368.36444444,1351621800],[332.40305556,1351622160],[354.20283333,1351622520],[349.55169444,1351622880],[343.27902778,1351623240],[366.97422222,1351623600],[333.27402778,1351623960],[360.01791667,1351624320],[354.37855556,1351624680],[347.01991667,1351625040],[366.68930556,1351625400]]",
-      "bytes_out" : "[[301.76044444,1351621800],[262.66875,1351622160],[281.7565,1351622520],[275.83197222,1351622880],[271.15458333,1351623240],[289.79577778,1351623600],[262.87319444,1351623960],[284.01583333,1351624320],[279.33888889,1351624680],[273.75552778,1351625040],[289.47902778,1351625400]]",
-      "pkts_in" : "[[3.3466666667,1351621800],[3.0370833333,1351622160],[3.2433333333,1351622520],[3.18625,1351622880],[3.1458333333,1351623240],[3.3453333333,1351623600],[3.04125,1351623960],[3.2750833333,1351624320],[3.2223333333,1351624680],[3.1640277778,1351625040],[3.33875,1351625400]]",
-      "pkts_out" : "[[3.3466666667,1351621800],[3.0416666667,1351622160],[3.2638333333,1351622520],[3.18625,1351622880],[3.1458333333,1351623240],[3.3453333333,1351623600],[3.04125,1351623960],[3.2750833333,1351624320],[3.2223333333,1351624680],[3.1640277778,1351625040],[3.33875,1351625400]]"
-    }
-  },
-  "Hosts" : {
-    "cluster_name" : "mycluster",
-    "cpu_count" : "2",
-    "total_mem" : "7466",
-    "os_arch" : "x86_64",
-    "attributes" : "{\"publicFQDN\":\"ambari\",\"privateFQDN\":\"hostname\"}",
-    "host_name" : "hostname",
-    "os_type" : "redhatenterpriselinuxserver6",
-    "ip" : "10.38.5.128"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/hosts/metrics/processes.json b/branch-1.2/ambari-web/app/assets/data/hosts/metrics/processes.json
deleted file mode 100644
index 9249147..0000000
--- a/branch-1.2/ambari-web/app/assets/data/hosts/metrics/processes.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
-  "href" : "http://ambari/clusters/mycluster/hosts/hostname?fields=metrics/processes/proc_total[1351621554,1351625154,60],metrics/processes/proc_run[1351621554,1351625154,60]",
-  "metrics" : {
-    "processes" : {
-      "proc_total" : "[[985.57777778,1351621800],[985.52777778,1351622160],[984.69722222,1351622520],[988.78055556,1351622880],[991.69166667,1351623240],[988.18055556,1351623600],[991.85,1351623960],[984.95277778,1351624320],[995.00277778,1351624680],[988.76666667,1351625040],[991.76388889,1351625400]]",
-      "proc_run" : "[[4.3805555556,1351621800],[0.19444444444,1351622160],[0.63055555556,1351622520],[4.0527777778,1351622880],[2.9166666667,1351623240],[9.7111111111,1351623600],[7.8888888889,1351623960],[2.6333333333,1351624320],[12.302777778,1351624680],[8.4944444444,1351625040],[7.5694444444,1351625400]]"
-    }
-  },
-  "Hosts" : {
-    "cluster_name" : "mycluster",
-    "cpu_count" : "2",
-    "total_mem" : "7466",
-    "os_arch" : "x86_64",
-    "attributes" : "{\"publicFQDN\":\"ambari\",\"privateFQDN\":\"hostname\"}",
-    "host_name" : "hostname",
-    "os_type" : "redhatenterpriselinuxserver6",
-    "ip" : "10.38.5.128"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/racks/racks.json b/branch-1.2/ambari-web/app/assets/data/racks/racks.json
deleted file mode 100644
index 0eda3ff..0000000
--- a/branch-1.2/ambari-web/app/assets/data/racks/racks.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-  "href": "http://localhost:8080/api/racks?fields=*",
-  "items": [
-    {
-      "href": "http://localhost:8080/api/racks/1",
-      "Racks": {
-        "id": 1,
-        "name": "Default Rack"
-      }
-    }
-  ]
-}
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/cluster_requests.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/cluster_requests.json
deleted file mode 100644
index 52d4085..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/cluster_requests.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
-  "href" : "http://ambari/clusters/mycluster/services/HBASE/components/HBASE_MASTER?fields=metrics/hbase/master/cluster_requests[1351356190,1351359791,60]",
-  "metrics" : {
-    "hbase" : {
-      "master" : {
-        "cluster_requests" : "[[0.0,1351356195],[0.0,1351356210],[0.0,1351356225],[0.0,1351356240],[0.0,1351356255],[0.0,1351356270],[0.0,1351356285],[0.0,1351356300],[0.0,1351356315],[0.0,1351356330],[0.0,1351356345],[0.0,1351356360],[0.0,1351356375],[0.0,1351356390],[0.0,1351356405],[0.0,1351356420],[0.0,1351356435],[0.14,1351356450],[0.18,1351356465],[0.0,1351356480],[0.0,1351356495],[0.0,1351356510],[0.0,1351356525],[0.0,1351356540],[0.0,1351356555],[0.0,1351356570],[0.0,1351356585],[0.0,1351356600],[0.0,1351356615],[0.0,1351356630],[0.0,1351356645],[0.0,1351356660],[0.0,1351356675],[0.0,1351356690],[0.0,1351356705],[0.0,1351356720],[0.0,1351356735],[0.16,1351356750],[0.14,1351356765],[0.0,1351356780],[0.0,1351356795],[0.0,1351356810],[0.0,1351356825],[0.0,1351356840],[0.0,1351356855],[0.0,1351356870],[0.0,1351356885],[0.0,1351356900],[0.0,1351356915],[0.0,1351356930],[0.0,1351356945],[0.0,1351356960],[0.0,1351356975],[0.0,1351356990],[0.0,1351357005],[0.0,1351357020],[0.0,1351357035],[0.2,1351357050],[0.1,1351357065],[0.0,1351357080],[0.0,1351357095],[0.0,1351357110],[0.0,1351357125],[0.0,1351357140],[0.0,1351357155],[0.0,1351357170],[0.0,1351357185],[0.0,1351357200],[0.0,1351357215],[0.0,1351357230],[0.0,1351357245],[0.0,1351357260],[0.0,1351357275],[0.0,1351357290],[0.0,1351357305],[0.0,1351357320],[0.0,1351357335],[0.22,1351357350],[0.08,1351357365],[0.0,1351357380],[0.0,1351357395],[0.0,1351357410],[0.0,1351357425],[0.0,1351357440],[0.0,1351357455],[0.0,1351357470],[0.0,1351357485],[0.0,1351357500],[0.0,1351357515],[0.0,1351357530],[0.0,1351357545],[0.0,1351357560],[0.0,1351357575],[0.0,1351357590],[0.0,1351357605],[0.0,1351357620],[0.0,1351357635],[0.0,1351357650],[0.0,1351357665],[0.0,1351357680],[0.0,1351357695],[0.0,1351357710],[0.0,1351357725],[0.0,1351357740],[0.0,1351357755],[0.0,1351357770],[0.0,1351357785],[0.0,1351357800],[0.0,1351357815],[0.0,1351357830],[0.0,1351357845],[0.0,1351357860],[0.0,1351357875],[0.0,1351357890],[0.0,1351357905],[0.0,1351357920],[0.0,1351357935],[0.0,1351357950],[0.0,1351357965],[0.0,1351357980],[0.0,1351357995],[0.0,1351358010],[0.0,1351358025],[0.0,1351358040],[0.0,1351358055],[0.0,1351358070],[0.0,1351358085],[0.0,1351358100],[0.0,1351358115],[0.0,1351358130],[0.0,1351358145],[0.0,1351358160],[0.0,1351358175],[0.0,1351358190],[0.0,1351358205],[0.0,1351358220],[0.0,1351358235],[0.0,1351358250],[0.0,1351358265],[0.0,1351358280],[0.0,1351358295],[0.0,1351358310],[0.0,1351358325],[0.0,1351358340],[0.0,1351358355],[0.0,1351358370],[0.0,1351358385],[0.0,1351358400],[0.0,1351358415],[0.0,1351358430],[0.0,1351358445],[0.0,1351358460],[0.0,1351358475],[0.0,1351358490],[0.0,1351358505],[0.0,1351358520],[0.0,1351358535],[0.06,1351358550],[0.24,1351358565],[0.0,1351358580],[0.0,1351358595],[0.0,1351358610],[0.0,1351358625],[0.0,1351358640],[0.0,1351358655],[0.0,1351358670],[0.0,1351358685],[0.0,1351358700],[0.0,1351358715],[0.0,1351358730],[0.0,1351358745],[0.0,1351358760],[0.0,1351358775],[0.0,1351358790],[0.0,1351358805],[0.0,1351358820],[0.0,1351358835],[0.14,1351358850],[0.18,1351358865],[0.0,1351358880],[0.0,1351358895],[0.0,1351358910],[0.0,1351358925],[0.0,1351358940],[0.0,1351358955],[0.0,1351358970],[0.0,1351358985],[0.0,1351359000],[0.0,1351359015],[0.0,1351359030],[0.0,1351359045],[0.0,1351359060],[0.0,1351359075],[0.0,1351359090],[0.0,1351359105],[0.0,1351359120],[0.0,1351359135],[0.12,1351359150],[0.18,1351359165],[0.0,1351359180],[0.0,1351359195],[0.0,1351359210],[0.0,1351359225],[0.0,1351359240],[0.0,1351359255],[0.0,1351359270],[0.0,1351359285],[0.0,1351359300],[0.0,1351359315],[0.0,1351359330],[0.0,1351359345],[0.0,1351359360],[0.0,1351359375],[0.0,1351359390],[0.0,1351359405],[0.0,1351359420],[0.0,1351359435],[0.14,1351359450],[0.16,1351359465],[0.0,1351359480],[0.0,1351359495],[0.0,1351359510],[0.0,1351359525],[0.0,1351359540],[0.0,1351359555],[0.0,1351359570],[0.0,1351359585],[0.0,1351359600],[0.0,1351359615],[0.0,1351359630],[0.0,1351359645],[0.0,1351359660],[0.0,1351359675],[0.0,1351359690],[0.0,1351359705],[0.0,1351359720],[0.0,1351359735],[0.0,1351359750],[0.0,1351359765],[0.0,1351359780],[0.0,1351359795]]"
-      }
-    }
-  },
-  "ServiceComponentInfo" : {
-    "cluster_name" : "mycluster",
-    "component_name" : "HBASE_MASTER",
-    "service_name" : "HBASE"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/hlog_split_size.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/hlog_split_size.json
deleted file mode 100644
index f9a5a13..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/hlog_split_size.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
-  "href" : "http://ambari/clusters/mycluster/services/HBASE/components/HBASE_MASTER?fields=metrics/hbase/master/splitSize_avg_time[1351356190,1351359791,60]",
-  "metrics" : {
-    "hbase" : {
-      "master" : {
-        "splitSize_avg_time" : "[[0.0,1351356480],[0.0,1351356840],[0.0,1351357200],[0.0,1351357560],[0.0,1351357920],[0.0,1351358280],[0.0,1351358640],[0.0,1351359000],[0.0,1351359360],[0.0,1351359720],[0.0,1351360080]]"
-      }
-    }
-  },
-  "ServiceComponentInfo" : {
-    "cluster_name" : "mycluster",
-    "component_name" : "HBASE_MASTER",
-    "service_name" : "HBASE"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/hlog_split_time.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/hlog_split_time.json
deleted file mode 100644
index e460f9c..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/hlog_split_time.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
-  "href" : "http://ambari/clusters/mycluster/services/HBASE/components/HBASE_MASTER?fields=metrics/hbase/master/splitTime_avg_time[1351356190,1351359791,60]",
-  "metrics" : {
-    "hbase" : {
-      "master" : {
-        "splitTime_avg_time" : "[[0.0,1351356195],[0.0,1351356210],[0.0,1351356225],[0.0,1351356240],[0.0,1351356255],[0.0,1351356270],[0.0,1351356285],[0.0,1351356300],[0.0,1351356315],[0.0,1351356330],[0.0,1351356345],[0.0,1351356360],[0.0,1351356375],[0.0,1351356390],[0.0,1351356405],[0.0,1351356420],[0.0,1351356435],[0.0,1351356450],[0.0,1351356465],[0.0,1351356480],[0.0,1351356495],[0.0,1351356510],[0.0,1351356525],[0.0,1351356540],[0.0,1351356555],[0.0,1351356570],[0.0,1351356585],[0.0,1351356600],[0.0,1351356615],[0.0,1351356630],[0.0,1351356645],[0.0,1351356660],[0.0,1351356675],[0.0,1351356690],[0.0,1351356705],[0.0,1351356720],[0.0,1351356735],[0.0,1351356750],[0.0,1351356765],[0.0,1351356780],[0.0,1351356795],[0.0,1351356810],[0.0,1351356825],[0.0,1351356840],[0.0,1351356855],[0.0,1351356870],[0.0,1351356885],[0.0,1351356900],[0.0,1351356915],[0.0,1351356930],[0.0,1351356945],[0.0,1351356960],[0.0,1351356975],[0.0,1351356990],[0.0,1351357005],[0.0,1351357020],[0.0,1351357035],[0.0,1351357050],[0.0,1351357065],[0.0,1351357080],[0.0,1351357095],[0.0,1351357110],[0.0,1351357125],[0.0,1351357140],[0.0,1351357155],[0.0,1351357170],[0.0,1351357185],[0.0,1351357200],[0.0,1351357215],[0.0,1351357230],[0.0,1351357245],[0.0,1351357260],[0.0,1351357275],[0.0,1351357290],[0.0,1351357305],[0.0,1351357320],[0.0,1351357335],[0.0,1351357350],[0.0,1351357365],[0.0,1351357380],[0.0,1351357395],[0.0,1351357410],[0.0,1351357425],[0.0,1351357440],[0.0,1351357455],[0.0,1351357470],[0.0,1351357485],[0.0,1351357500],[0.0,1351357515],[0.0,1351357530],[0.0,1351357545],[0.0,1351357560],[0.0,1351357575],[0.0,1351357590],[0.0,1351357605],[0.0,1351357620],[0.0,1351357635],[0.0,1351357650],[0.0,1351357665],[0.0,1351357680],[0.0,1351357695],[0.0,1351357710],[0.0,1351357725],[0.0,1351357740],[0.0,1351357755],[0.0,1351357770],[0.0,1351357785],[0.0,1351357800],[0.0,1351357815],[0.0,1351357830],[0.0,1351357845],[0.0,1351357860],[0.0,1351357875],[0.0,1351357890],[0.0,1351357905],[0.0,1351357920],[0.0,1351357935],[0.0,1351357950],[0.0,1351357965],[0.0,1351357980],[0.0,1351357995],[0.0,1351358010],[0.0,1351358025],[0.0,1351358040],[0.0,1351358055],[0.0,1351358070],[0.0,1351358085],[0.0,1351358100],[0.0,1351358115],[0.0,1351358130],[0.0,1351358145],[0.0,1351358160],[0.0,1351358175],[0.0,1351358190],[0.0,1351358205],[0.0,1351358220],[0.0,1351358235],[0.0,1351358250],[0.0,1351358265],[0.0,1351358280],[0.0,1351358295],[0.0,1351358310],[0.0,1351358325],[0.0,1351358340],[0.0,1351358355],[0.0,1351358370],[0.0,1351358385],[0.0,1351358400],[0.0,1351358415],[0.0,1351358430],[0.0,1351358445],[0.0,1351358460],[0.0,1351358475],[0.0,1351358490],[0.0,1351358505],[0.0,1351358520],[0.0,1351358535],[0.0,1351358550],[0.0,1351358565],[0.0,1351358580],[0.0,1351358595],[0.0,1351358610],[0.0,1351358625],[0.0,1351358640],[0.0,1351358655],[0.0,1351358670],[0.0,1351358685],[0.0,1351358700],[0.0,1351358715],[0.0,1351358730],[0.0,1351358745],[0.0,1351358760],[0.0,1351358775],[0.0,1351358790],[0.0,1351358805],[0.0,1351358820],[0.0,1351358835],[0.0,1351358850],[0.0,1351358865],[0.0,1351358880],[0.0,1351358895],[0.0,1351358910],[0.0,1351358925],[0.0,1351358940],[0.0,1351358955],[0.0,1351358970],[0.0,1351358985],[0.0,1351359000],[0.0,1351359015],[0.0,1351359030],[0.0,1351359045],[0.0,1351359060],[0.0,1351359075],[0.0,1351359090],[0.0,1351359105],[0.0,1351359120],[0.0,1351359135],[0.0,1351359150],[0.0,1351359165],[0.0,1351359180],[0.0,1351359195],[0.0,1351359210],[0.0,1351359225],[0.0,1351359240],[0.0,1351359255],[0.0,1351359270],[0.0,1351359285],[0.0,1351359300],[0.0,1351359315],[0.0,1351359330],[0.0,1351359345],[0.0,1351359360],[0.0,1351359375],[0.0,1351359390],[0.0,1351359405],[0.0,1351359420],[0.0,1351359435],[0.0,1351359450],[0.0,1351359465],[0.0,1351359480],[0.0,1351359495],[0.0,1351359510],[0.0,1351359525],[0.0,1351359540],[0.0,1351359555],[0.0,1351359570],[0.0,1351359585],[0.0,1351359600],[0.0,1351359615],[0.0,1351359630],[0.0,1351359645],[0.0,1351359660],[0.0,1351359675],[0.0,1351359690],[0.0,1351359705],[0.0,1351359720],[0.0,1351359735],[0.0,1351359750],[0.0,1351359765],[0.0,1351359780],[0.0,1351359795]]"
-      }
-    }
-  },
-  "ServiceComponentInfo" : {
-    "cluster_name" : "mycluster",
-    "component_name" : "HBASE_MASTER",
-    "service_name" : "HBASE"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/regionserver_queuesize.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/regionserver_queuesize.json
deleted file mode 100644
index 44cb0be..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/regionserver_queuesize.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "href" : "http://ambari/clusters/mycluster/services/HBASE/components/HBASE_REGIONSERVER?fields=metrics/hbase/regionserver/flushQueueSize[1351356190,1351359791,120],metrics/hbase/regionserver/compactionQueueSize[1351356190,1351359791,120]",
-  "metrics" : {
-    "hbase" : {
-      "regionserver" : {
-        "compactionQueueSize" : "[[0.0,1351356480],[0.0,1351356840],[0.0,1351357200],[0.0,1351357560],[0.0,1351357920],[0.0,1351358280],[0.0,1351358640],[0.0,1351359000],[0.0,1351359360],[0.0,1351359720],[0.0,1351360080]]",
-        "flushQueueSize" : "[[0.0,1351356480],[0.0,1351356840],[0.0,1351357200],[0.0,1351357560],[0.0,1351357920],[0.0,1351358280],[0.0,1351358640],[0.0,1351359000],[0.0,1351359360],[0.0,1351359720],[0.0,1351360080]]"
-      }
-    }
-  },
-  "ServiceComponentInfo" : {
-    "cluster_name" : "mycluster",
-    "component_name" : "HBASE_REGIONSERVER",
-    "service_name" : "HBASE"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/regionserver_regions.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/regionserver_regions.json
deleted file mode 100644
index d0cbe52..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/regionserver_regions.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
-  "href" : "http://ambari/clusters/mycluster/services/HBASE/components/HBASE_REGIONSERVER?fields=metrics/hbase/regionserver/regions[1351356190,1351359791,60]",
-  "metrics" : {
-    "hbase" : {
-      "regionserver" : {
-        "regions" : "[[3.0,1351356480],[3.0,1351356840],[3.0,1351357200],[3.0,1351357560],[3.0,1351357920],[3.0,1351358280],[3.0,1351358640],[3.0,1351359000],[3.0,1351359360],[3.0,1351359720],[0.0,1351360080]]"
-      }
-    }
-  },
-  "ServiceComponentInfo" : {
-    "cluster_name" : "mycluster",
-    "component_name" : "HBASE_REGIONSERVER",
-    "service_name" : "HBASE"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/regionserver_rw_requests.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/regionserver_rw_requests.json
deleted file mode 100644
index f21e09b..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/hbase/regionserver_rw_requests.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "href" : "http://ambari/clusters/mycluster/services/HBASE/components/HBASE_REGIONSERVER?fields=metrics/hbase/regionserver/readRequestsCount[1351356190,1351359791,60],metrics/hbase/regionserver/writeRequestsCount[1351356190,1351359791,60]",
-  "metrics" : {
-    "hbase" : {
-      "regionserver" : {
-        "writeRequestsCount" : "[[4.0,1351356480],[4.0,1351356840],[4.0,1351357200],[4.0,1351357560],[4.0,1351357920],[4.0,1351358280],[4.0,1351358640],[4.0,1351359000],[4.0,1351359360],[4.0,1351359720],[0.0,1351360080]]",
-        "readRequestsCount" : "[[2004.1333333,1351356480],[2006.5055556,1351356840],[2008.85,1351357200],[2011.1888889,1351357560],[2013.5222222,1351357920],[2016.0333333,1351358280],[2018.5277778,1351358640],[2020.8722222,1351359000],[2023.2111111,1351359360],[2025.5333333,1351359720],[0.0,1351360080]]"
-      }
-    }
-  },
-  "ServiceComponentInfo" : {
-    "cluster_name" : "mycluster",
-    "component_name" : "HBASE_REGIONSERVER",
-    "service_name" : "HBASE"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/block_status.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/block_status.json
deleted file mode 100644
index 8104768..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/block_status.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
-  "href" : "http://ambari/api/clusters/vmc/hosts/hostname/host_components/NAMENODE?fields=metrics/dfs/FSNamesystem/PendingReplicationBlocks[1352828598,1352832198,15],metrics/dfs/FSNamesystem/UnderReplicatedBlocks[1352828598,1352832198,15]",
-  "HostRoles" : {
-    "cluster_name" : "vmc",
-    "component_name" : "NAMENODE",
-    "host_name" : "hostname"
-  },
-  "host" : {
-    "href" : "http://ambari/api/clusters/vmc/hosts/hostname"
-  },
-  "metrics" : {
-    "dfs" : {
-      "FSNamesystem" : {
-        "PendingReplicationBlocks" : "[[0.0,1352768040],[0.0,1352768400],[0.0,1352768760],[0.0,1352769120],[0.0,1352769480],[0.0,1352769840],[0.0,1352770200],[0.0,1352770560],[0.0,1352770920],[0.0,1352771280],[0.0,1352771640],[0.0,1352772000],[0.0,1352772360],[0.0,1352772720],[0.0,1352773080],[0.0,1352773440],[0.0,1352773800],[0.0,1352774160],[0.0,1352774520],[0.0,1352774880],[0.0,1352775240],[0.0,1352775600],[0.0,1352775960],[0.0,1352776320],[0.0,1352776680],[0.0,1352777040],[0.0,1352777400],[0.0,1352777760],[0.0,1352778120],[0.0,1352778480],[0.0,1352778840],[0.0,1352779200],[0.0,1352779560],[0.0,1352779920],[0.0,1352780280],[0.0,1352780640],[0.0,1352781000],[0.0,1352781360],[0.0,1352781720],[0.0,1352782080],[0.0,1352782440],[0.0,1352785680],[0.0,1352786040],[0.0,1352786400],[0.0,1352790360],[0.0,1352790720],[0.0,1352791080],[0.0,1352791440],[0.0,1352791800],[0.0,1352792160],[0.0,1352792520],[0.0,1352792880],[0.0,1352793240],[0.0,1352793600],[0.0,1352793960],[0.0,1352794320],[0.0,1352794680],[0.0,1352795040],[0.0,1352795400],[0.0,1352795760],[0.0,1352796120],[0.0,1352796480],[0.0,1352796840],[0.0,1352797200],[0.0,1352797560],[0.0,1352797920],[0.0,1352798280],[0.0,1352798640],[0.0,1352799000],[0.0,1352822400],[0.0,1352822760],[0.0,1352823120],[0.0,1352823480],[0.0,1352823840],[0.0,1352824200],[0.0,1352824560],[0.0,1352824920],[0.0,1352825280],[0.0,1352825640],[0.0,1352826000],[0.0,1352826360],[0.0,1352826720],[0.0,1352827080],[0.0,1352827440],[0.0,1352830320],[0.0,1352830680],[0.0,1352831040],[0.0,1352831400]]",
-        "UnderReplicatedBlocks" : "[[5.96388888889,1352768040],[6.0,1352768400],[6.0,1352768760],[6.0,1352769120],[6.0,1352769480],[6.0,1352769840],[6.0,1352770200],[6.0,1352770560],[6.0,1352770920],[6.0,1352771280],[6.0,1352771640],[7.69722222222,1352772000],[7.0,1352772360],[7.0,1352772720],[7.0,1352773080],[7.0,1352773440],[7.0,1352773800],[7.0,1352774160],[8.59722222222,1352774520],[8.0,1352774880],[8.0,1352775240],[8.0,1352775600],[8.0,1352775960],[8.0,1352776320],[8.0,1352776680],[8.0,1352777040],[9.35,1352777400],[9.0,1352777760],[9.0,1352778120],[10.2833333333,1352778480],[10.0,1352778840],[10.0,1352779200],[10.0,1352779560],[10.25,1352779920],[12.2083333333,1352780280],[11.0,1352780640],[11.0,1352781000],[12.3361111111,1352781360],[12.1583333333,1352781720],[12.0,1352782080],[12.0,1352782440],[12.0,1352785680],[12.2055555556,1352786040],[13.8555555556,1352786400],[13.0,1352790360],[13.0,1352790720],[13.0,1352791080],[13.0,1352791440],[13.0,1352791800],[13.0,1352792160],[13.0,1352792520],[13.0,1352792880],[13.0,1352793240],[13.0,1352793600],[13.0,1352793960],[13.0,1352794320],[13.0,1352794680],[13.0,1352795040],[13.0,1352795400],[13.0,1352795760],[13.0,1352796120],[13.0,1352796480],[13.0,1352796840],[13.0,1352797200],[13.0,1352797560],[13.0,1352797920],[13.0,1352798280],[13.0,1352798640],[13.0,1352799000],[13.0,1352822400],[13.0,1352822760],[13.0,1352823120],[13.0,1352823480],[13.0,1352823840],[13.0,1352824200],[13.0,1352824560],[13.0,1352824920],[13.0,1352825280],[13.0,1352825640],[13.0,1352826000],[13.0,1352826360],[13.0,1352826720],[13.0,1352827080],[13.0,1352827440],[13.0,1352830320],[13.0,1352830680],[13.0,1352831040],[13.0,1352831400]]"
-      }
-    }
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/file_operations.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/file_operations.json
deleted file mode 100644
index 9a7608c..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/file_operations.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
-  "href" : "http://ambari/clusters/SrimanthAmbari/services/HDFS/components/NAMENODE?fields=metrics/dfs/namenode/FileInfoOps[1351117114,1351120714,60],metrics/dfs/namenode/CreateFileOps[1351117114,1351120714,60],metrics/dfs/namenode/DeleteFileOps[1351117114,1351120714,60]",
-  "metrics" : {
-    "dfs" : {
-      "namenode" : {
-        "FileInfoOps" : "[[0.0,1351117440],[0.0,1351117800],[0.0,1351118160],[0.0,1351118520],[0.0,1351118880],[0.0,1351119240],[0.0,1351119600],[0.0,1351119960],[0.0,1351120320],[0.0,1351120680],[0.0,1351121040]]",
-        "DeleteFileOps" : "[[0.0,1351117440],[0.0,1351117800],[0.0,1351118160],[0.0,1351118520],[0.0,1351118880],[0.0,1351119240],[0.0,1351119600],[0.0,1351119960],[0.0,1351120320],[0.0,1351120680],[0.0,1351121040]]",
-        "CreateFileOps" : "[[0.0,1351117440],[0.0,1351117800],[0.0,1351118160],[0.0,1351118520],[0.0,1351118880],[0.0,1351119240],[0.0,1351119600],[0.0,1351119960],[0.0,1351120320],[0.0,1351120680],[0.0,1351121040]]"
-      }
-    }
-  },
-  "ServiceComponentInfo" : {
-    "cluster_name" : "SrimanthAmbari",
-    "component_name" : "NAMENODE",
-    "service_name" : "HDFS"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/gc.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/gc.json
deleted file mode 100644
index 0b09a9d..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/gc.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "href" : "http://ambari/clusters/SrimanthAmbari/hosts/hostname/host_components/NAMENODE?fields=metrics/jvm/gcTimeMillis[1351117114,1351120714,60]",
-  "HostRoles" : {
-    "cluster_name" : "SrimanthAmbari",
-    "component_name" : "NAMENODE",
-    "host_name" : "hostname"
-  },
-  "host" : {
-    "href" : "http://ambari/clusters/SrimanthAmbari/hosts/hostname"
-  },
-  "metrics" : {
-    "jvm" : {
-      "gcTimeMillis" : "[[0.0,1351117440],[0.0,1351117800],[0.0,1351118160],[0.0,1351118520],[0.0,1351118880],[0.0,1351119240],[0.0,1351119600],[0.0,1351119960],[0.0,1351120320],[0.0,1351120680],[0.027777777778,1351121040]]"
-    }
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/io.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/io.json
deleted file mode 100644
index 39b60ef..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/io.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "href" : "http://ambari/clusters/SrimanthAmbari/services/HDFS/components/DATANODE?fields=metrics/dfs/datanode/bytes_written[1351117114,1351120714,60],metrics/dfs/datanode/bytes_read[1351117114,1351120714,60]",
-  "metrics" : {
-    "dfs" : {
-      "datanode" : {
-        "bytes_written" : "[[0.0,1351117440],[0.0,1351117800],[0.0,1351118160],[0.0,1351118520],[0.0,1351118880],[0.0,1351119240],[0.0,1351119600],[0.0,1351119960],[0.0,1351120320],[0.0,1351120680],[0.0,1351121040]]",
-        "bytes_read" : "[[0.0,1351117440],[0.0,1351117800],[0.0,1351118160],[0.0,1351118520],[0.0,1351118880],[0.0,1351119240],[0.0,1351119600],[0.0,1351119960],[0.0,1351120320],[0.0,1351120680],[0.0,1351121040]]"
-      }
-    }
-  },
-  "ServiceComponentInfo" : {
-    "cluster_name" : "SrimanthAmbari",
-    "component_name" : "DATANODE",
-    "service_name" : "HDFS"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/jvm_heap.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/jvm_heap.json
deleted file mode 100644
index 4e0e1a6..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/jvm_heap.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
-  "href" : "http://ambari/clusters/SrimanthAmbari/hosts/hostname/host_components/NAMENODE?fields=metrics/jvm/memNonHeapUsedM[1351117114,1351120714,60],metrics/jvm/memNonHeapCommittedM[1351117114,1351120714,60],metrics/jvm/memHeapUsedM[1351117114,1351120714,60],metrics/jvm/memHeapCommittedM[1351117114,1351120714,60]",
-  "HostRoles" : {
-    "cluster_name" : "SrimanthAmbari",
-    "component_name" : "NAMENODE",
-    "host_name" : "hostname"
-  },
-  "host" : {
-    "href" : "http://ambari/clusters/SrimanthAmbari/hosts/hostname"
-  },
-  "metrics" : {
-    "jvm" : {
-      "memHeapCommittedM" : "[[680.0,1351117440],[680.0,1351117800],[680.0,1351118160],[680.0,1351118520],[680.0,1351118880],[680.0,1351119240],[680.0,1351119600],[680.0,1351119960],[680.0,1351120320],[680.0,1351120680],[680.0,1351121040]]",
-      "memNonHeapUsedM" : "[[22.650757,1351117440],[22.650757,1351117800],[22.650757,1351118160],[22.650757,1351118520],[22.650757,1351118880],[22.650757,1351119240],[22.650757,1351119600],[22.650757,1351119960],[22.651528758,1351120320],[22.65326,1351120680],[22.65326,1351121040]]",
-      "memHeapUsedM" : "[[395.12300758,1351117440],[408.34930122,1351117800],[420.87871456,1351118160],[433.10785828,1351118520],[445.34583947,1351118880],[458.20661247,1351119240],[471.33682175,1351119600],[483.45747058,1351119960],[495.75202453,1351120320],[508.74344031,1351120680],[452.11703887,1351121040]]",
-      "memNonHeapCommittedM" : "[[23.375,1351117440],[23.375,1351117800],[23.375,1351118160],[23.375,1351118520],[23.375,1351118880],[23.375,1351119240],[23.375,1351119600],[23.375,1351119960],[23.375,1351120320],[23.375,1351120680],[23.375,1351121040]]"
-    }
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/jvm_threads.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/jvm_threads.json
deleted file mode 100644
index 7d85222..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/jvm_threads.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
-  "href" : "http://ambari/clusters/SrimanthAmbari/hosts/hostname/host_components/NAMENODE?fields=metrics/jvm/threadsRunnable[1351117114,1351120714,60],metrics/jvm/threadsBlocked[1351117114,1351120714,60],metrics/jvm/threadsWaiting[1351117114,1351120714,60],metrics/jvm/threadsTimedWaiting[1351117114,1351120714,60]",
-  "HostRoles" : {
-    "cluster_name" : "SrimanthAmbari",
-    "component_name" : "NAMENODE",
-    "host_name" : "hostname"
-  },
-  "host" : {
-    "href" : "http://ambari/clusters/SrimanthAmbari/hosts/hostname"
-  },
-  "metrics" : {
-    "jvm" : {
-      "threadsBlocked" : "[[0.0,1351117440],[0.0,1351117800],[0.0,1351118160],[0.0,1351118520],[0.0,1351118880],[0.0,1351119240],[0.0,1351119600],[0.0,1351119960],[0.0,1351120320],[0.0,1351120680],[0.0,1351121040]]",
-      "threadsWaiting" : "[[104.0,1351117440],[104.0,1351117800],[104.0,1351118160],[104.0,1351118520],[104.0,1351118880],[104.0,1351119240],[104.0,1351119600],[104.0,1351119960],[104.0,1351120320],[104.0,1351120680],[104.0,1351121040]]",
-      "threadsTimedWaiting" : "[[8.0,1351117440],[8.0,1351117800],[8.0,1351118160],[8.0,1351118520],[8.0,1351118880],[8.0,1351119240],[8.0,1351119600],[8.0,1351119960],[8.1638888889,1351120320],[8.0,1351120680],[8.0,1351121040]]",
-      "threadsRunnable" : "[[10.0,1351117440],[10.0,1351117800],[10.0,1351118160],[10.0,1351118520],[10.0,1351118880],[10.0,1351119240],[10.0,1351119600],[10.0,1351119960],[10.0,1351120320],[10.0,1351120680],[10.0,1351121040]]"
-    }
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/rpc.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/rpc.json
deleted file mode 100644
index f0e1827..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/rpc.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "href" : "http://ambari/clusters/SrimanthAmbari/hosts/hostname/host_components/NAMENODE?fields=metrics/rpc/RpcQueueTime_avg_time[1351117114,1351120714,60]",
-  "HostRoles" : {
-    "cluster_name" : "SrimanthAmbari",
-    "component_name" : "NAMENODE",
-    "host_name" : "hostname"
-  },
-  "host" : {
-    "href" : "http://ambari/clusters/SrimanthAmbari/hosts/hostname"
-  },
-  "metrics" : {
-    "rpc" : {
-      "RpcQueueTime_avg_time" : "[[0.055987654321,1351117440],[0.013271604938,1351117800],[0.0046296296296,1351118160],[0.018518518519,1351118520],[0.01712962963,1351118880],[0.0,1351119240],[0.0,1351119600],[0.0,1351119960],[0.0,1351120320],[0.0095679012346,1351120680],[0.018672839506,1351121040]]"
-    }
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/space_utilization.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/space_utilization.json
deleted file mode 100644
index fb8fb9a..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/hdfs/space_utilization.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
-  "href" : "http://ambari/api/clusters/vmc/hosts/hostname/host_components/NAMENODE?fields=metrics/dfs/FSNamesystem/CapacityRemainingGB[1352828598,1352832198,15],metrics/dfs/FSNamesystem/CapacityUsedGB[1352828598,1352832198,15],metrics/dfs/FSNamesystem/CapacityTotalGB[1352828598,1352832198,15]",
-  "HostRoles" : {
-    "cluster_name" : "vmc",
-    "component_name" : "NAMENODE",
-    "host_name" : "hostname"
-  },
-  "host" : {
-    "href" : "http://ambari/api/clusters/vmc/hosts/hostname"
-  },
-  "metrics" : {
-    "dfs" : {
-      "FSNamesystem" : {
-        "CapacityTotalGB" : "[[49.0,1352768040],[49.0,1352768400],[49.0,1352768760],[49.0,1352769120],[49.0,1352769480],[49.0,1352769840],[49.0,1352770200],[49.0,1352770560],[49.0,1352770920],[49.0,1352771280],[49.0,1352771640],[49.0,1352772000],[49.0,1352772360],[49.0,1352772720],[49.0,1352773080],[49.0,1352773440],[49.0,1352773800],[49.0,1352774160],[49.0,1352774520],[49.0,1352774880],[49.0,1352775240],[49.0,1352775600],[49.0,1352775960],[49.0,1352776320],[49.0,1352776680],[49.0,1352777040],[49.0,1352777400],[49.0,1352777760],[49.0,1352778120],[49.0,1352778480],[49.0,1352778840],[49.0,1352779200],[49.0,1352779560],[49.0,1352779920],[49.0,1352780280],[49.0,1352780640],[49.0,1352781000],[49.0,1352781360],[49.0,1352781720],[49.0,1352782080],[49.0,1352782440],[49.0,1352785680],[49.0,1352786040],[49.0,1352786400],[49.0,1352790360],[49.0,1352790720],[49.0,1352791080],[49.0,1352791440],[49.0,1352791800],[49.0,1352792160],[49.0,1352792520],[49.0,1352792880],[49.0,1352793240],[49.0,1352793600],[49.0,1352793960],[49.0,1352794320],[49.0,1352794680],[49.0,1352795040],[49.0,1352795400],[49.0,1352795760],[49.0,1352796120],[49.0,1352796480],[49.0,1352796840],[49.0,1352797200],[49.0,1352797560],[49.0,1352797920],[49.0,1352798280],[49.0,1352798640],[49.0,1352799000],[49.0,1352822400],[49.0,1352822760],[49.0,1352823120],[49.0,1352823480],[49.0,1352823840],[49.0,1352824200],[49.0,1352824560],[49.0,1352824920],[49.0,1352825280],[49.0,1352825640],[49.0,1352826000],[49.0,1352826360],[49.0,1352826720],[49.0,1352827080],[49.0,1352827440],[49.0,1352830320],[49.0,1352830680],[49.0,1352831040],[49.0,1352831400]]",
-        "CapacityUsedGB" : "[[0.0,1352768040],[0.0,1352768400],[0.0,1352768760],[0.0,1352769120],[0.0,1352769480],[0.0,1352769840],[0.0,1352770200],[0.0,1352770560],[0.0,1352770920],[0.0,1352771280],[0.0,1352771640],[0.0,1352772000],[0.0,1352772360],[0.0,1352772720],[0.0,1352773080],[0.0,1352773440],[0.0,1352773800],[0.0,1352774160],[0.0,1352774520],[0.0,1352774880],[0.0,1352775240],[0.0,1352775600],[0.0,1352775960],[0.0,1352776320],[0.0,1352776680],[0.0,1352777040],[0.0,1352777400],[0.0,1352777760],[0.0,1352778120],[0.0,1352778480],[0.0,1352778840],[0.0,1352779200],[0.0,1352779560],[0.0,1352779920],[0.0,1352780280],[0.0,1352780640],[0.0,1352781000],[0.0,1352781360],[0.0,1352781720],[0.0,1352782080],[0.0,1352782440],[0.0,1352785680],[0.0,1352786040],[0.0,1352786400],[0.0,1352790360],[0.0,1352790720],[0.0,1352791080],[0.0,1352791440],[0.0,1352791800],[0.0,1352792160],[0.0,1352792520],[0.0,1352792880],[0.0,1352793240],[0.0,1352793600],[0.0,1352793960],[0.0,1352794320],[0.0,1352794680],[0.0,1352795040],[0.0,1352795400],[0.0,1352795760],[0.0,1352796120],[0.0,1352796480],[0.0,1352796840],[0.0,1352797200],[0.0,1352797560],[0.0,1352797920],[0.0,1352798280],[0.0,1352798640],[0.0,1352799000],[0.0,1352822400],[0.0,1352822760],[0.0,1352823120],[0.0,1352823480],[0.0,1352823840],[0.0,1352824200],[0.0,1352824560],[0.0,1352824920],[0.0,1352825280],[0.0,1352825640],[0.0,1352826000],[0.0,1352826360],[0.0,1352826720],[0.0,1352827080],[0.0,1352827440],[0.0,1352830320],[0.0,1352830680],[0.0,1352831040],[0.0,1352831400]]",
-        "CapacityRemainingGB" : "[[45.0,1352768040],[45.0,1352768400],[45.0,1352768760],[45.0,1352769120],[45.0,1352769480],[45.0,1352769840],[45.0,1352770200],[45.0,1352770560],[45.0,1352770920],[45.0,1352771280],[45.0,1352771640],[45.0,1352772000],[45.0,1352772360],[45.0,1352772720],[45.0,1352773080],[45.0,1352773440],[45.0,1352773800],[45.0,1352774160],[45.0,1352774520],[45.0,1352774880],[45.0,1352775240],[45.0,1352775600],[45.0,1352775960],[45.0,1352776320],[45.0,1352776680],[45.0,1352777040],[45.0,1352777400],[44.9166666667,1352777760],[44.1277777778,1352778120],[44.0,1352778480],[44.0,1352778840],[44.0,1352779200],[44.2527777778,1352779560],[44.0,1352779920],[44.0,1352780280],[44.0,1352780640],[44.25,1352781000],[44.0,1352781360],[44.0,1352781720],[44.0,1352782080],[44.0,1352782440],[44.0861111111,1352785680],[44.0,1352786040],[44.0,1352786400],[44.0,1352790360],[44.0,1352790720],[44.0,1352791080],[44.0,1352791440],[44.0,1352791800],[44.0,1352792160],[44.0,1352792520],[44.0,1352792880],[44.0,1352793240],[44.0,1352793600],[44.0,1352793960],[44.0,1352794320],[44.0,1352794680],[44.0,1352795040],[44.0,1352795400],[44.0,1352795760],[44.0,1352796120],[44.0,1352796480],[44.0,1352796840],[44.0,1352797200],[44.0,1352797560],[44.0,1352797920],[44.0,1352798280],[44.0,1352798640],[44.0,1352799000],[44.0,1352822400],[44.0,1352822760],[44.0,1352823120],[44.0,1352823480],[44.0,1352823840],[44.0,1352824200],[44.0,1352824560],[44.0,1352824920],[44.0,1352825280],[44.0,1352825640],[44.0,1352826000],[44.0,1352826360],[44.0,1352826720],[44.0,1352827080],[44.0,1352827440],[44.0,1352830320],[44.0,1352830680],[44.0,1352831040],[44.0,1352831400]]"
-      }
-    }
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/gc.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/gc.json
deleted file mode 100644
index fcae92b..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/gc.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "href" : "http://ambari/clusters/SrimanthAmbari/hosts/hostname/host_components/JOBTRACKER?fields=metrics/jvm/gcTimeMillis[1351213550,1351217148,60]",
-  "HostRoles" : {
-    "cluster_name" : "SrimanthAmbari",
-    "component_name" : "JOBTRACKER",
-    "host_name" : "hostname"
-  },
-  "host" : {
-    "href" : "http://ambari/clusters/SrimanthAmbari/hosts/hostname"
-  },
-  "metrics" : {
-    "jvm" : {
-      "gcTimeMillis" : "[[0.0,1351213560],[0.0,1351213920],[0.0,1351214280],[0.0,1351214640],[0.0,1351215000],[0.0,1351215360],[0.011111111111,1351215720],[0.0,1351216080],[0.0,1351216440],[1.1930462189E7,1351216800],[0.069444444444,1351217160]]"
-    }
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/jobs_status.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/jobs_status.json
deleted file mode 100644
index 8b07690..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/jobs_status.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
-  "href" : "http://ambari/clusters/SrimanthAmbari/services/MAPREDUCE/components/JOBTRACKER?fields=metrics/mapred/jobtracker/jobs_completed[1351213550,1351217148,60],metrics/mapred/jobtracker/jobs_preparing[1351213550,1351217148,60],metrics/mapred/jobtracker/jobs_failed[1351213550,1351217148,60],metrics/mapred/jobtracker/jobs_submitted[1351213550,1351217148,60],metrics/mapred/jobtracker/jobs_failed[1351213550,1351217148,60],metrics/mapred/jobtracker/jobs_running[1351213550,1351217148,60]",
-  "metrics" : {
-    "mapred" : {
-      "jobtracker" : {
-        "jobs_running" : "[[0.0,1351213560],[0.0,1351213920],[0.0,1351214280],[0.0,1351214640],[0.0,1351215000],[0.0,1351215360],[0.0,1351215720],[0.0,1351216080],[0.0,1351216440],[0.0,1351216800],[0.077777777778,1351217160]]",
-        "jobs_failed" : "[[0.0,1351213560],[0.0,1351213920],[0.0,1351214280],[0.0,1351214640],[0.0,1351215000],[0.0,1351215360],[0.0,1351215720],[0.0,1351216080],[0.0,1351216440],[0.0,1351216800],[0.0,1351217160]]",
-        "jobs_completed" : "[[0.0,1351213560],[0.0,1351213920],[0.0,1351214280],[0.0,1351214640],[0.0,1351215000],[0.0,1351215360],[0.0,1351215720],[0.0,1351216080],[0.0,1351216440],[1.19304647E7,1351216800],[0.0083333333333,1351217160]]",
-        "jobs_preparing" : "[[0.0,1351213560],[0.0,1351213920],[0.0,1351214280],[0.0,1351214640],[0.0,1351215000],[0.0,1351215360],[0.0,1351215720],[0.0,1351216080],[0.0,1351216440],[0.0,1351216800],[0.086111111111,1351217160]]",
-        "jobs_submitted" : "[[0.0,1351213560],[0.0,1351213920],[0.0,1351214280],[0.0,1351214640],[0.0,1351215000],[0.0,1351215360],[0.0,1351215720],[0.0,1351216080],[0.0,1351216440],[1.19304647E7,1351216800],[0.0083333333333,1351217160]]"
-      }
-    }
-  },
-  "ServiceComponentInfo" : {
-    "cluster_name" : "SrimanthAmbari",
-    "component_name" : "JOBTRACKER",
-    "service_name" : "MAPREDUCE"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/jvm_heap.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/jvm_heap.json
deleted file mode 100644
index 6893d38..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/jvm_heap.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
-  "href" : "http://ambari/clusters/SrimanthAmbari/hosts/hostname/host_components/JOBTRACKER?fields=metrics/jvm/memNonHeapUsedM[1351213550,1351217148,60],metrics/jvm/memNonHeapCommittedM[1351213550,1351217148,60],metrics/jvm/memHeapUsedM[1351213550,1351217148,60],metrics/jvm/memHeapCommittedM[1351213550,1351217148,60]",
-  "HostRoles" : {
-    "cluster_name" : "SrimanthAmbari",
-    "component_name" : "JOBTRACKER",
-    "host_name" : "hostname"
-  },
-  "host" : {
-    "href" : "http://ambari/clusters/SrimanthAmbari/hosts/hostname"
-  },
-  "metrics" : {
-    "jvm" : {
-      "memHeapCommittedM" : "[[185.1875,1351213560],[185.1875,1351213920],[185.1875,1351214280],[185.1875,1351214640],[185.1875,1351215000],[185.1875,1351215360],[185.1875,1351215720],[185.1875,1351216080],[185.1875,1351216440],[185.1875,1351216800],[185.1875,1351217160]]",
-      "memNonHeapUsedM" : "[[24.856247,1351213560],[24.856247,1351213920],[24.856247,1351214280],[24.856247,1351214640],[24.856247,1351215000],[24.856247,1351215360],[24.856247,1351215720],[24.856247,1351216080],[24.856247,1351216440],[23.964379333,1351216800],[21.711374106,1351217160]]",
-      "memHeapUsedM" : "[[58.635102042,1351213560],[76.2040755,1351213920],[93.246927389,1351214280],[110.45160614,1351214640],[127.54546414,1351215000],[144.66363658,1351215360],[96.690545511,1351215720],[20.528702175,1351216080],[38.275298164,1351216440],[59.513269842,1351216800],[127.88187832,1351217160]]",
-      "memNonHeapCommittedM" : "[[39.08203,1351213560],[39.08203,1351213920],[39.08203,1351214280],[39.08203,1351214640],[39.08203,1351215000],[39.08203,1351215360],[39.08203,1351215720],[39.08203,1351216080],[39.08203,1351216440],[36.653699028,1351216800],[23.404166667,1351217160]]"
-    }
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/jvm_threads.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/jvm_threads.json
deleted file mode 100644
index 615e3dc..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/jvm_threads.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
-  "href" : "http://ambari/clusters/SrimanthAmbari/hosts/hostname/host_components/JOBTRACKER?fields=metrics/jvm/threadsRunnable[1351213550,1351217148,60],metrics/jvm/threadsBlocked[1351213550,1351217148,60],metrics/jvm/threadsWaiting[1351213550,1351217148,60],metrics/jvm/threadsTimedWaiting[1351213550,1351217148,60]",
-  "HostRoles" : {
-    "cluster_name" : "SrimanthAmbari",
-    "component_name" : "JOBTRACKER",
-    "host_name" : "hostname"
-  },
-  "host" : {
-    "href" : "http://ambari/clusters/SrimanthAmbari/hosts/hostname"
-  },
-  "metrics" : {
-    "jvm" : {
-      "threadsBlocked" : "[[0.0,1351213560],[0.0,1351213920],[0.0,1351214280],[0.0,1351214640],[0.0,1351215000],[0.0,1351215360],[0.0,1351215720],[0.0,1351216080],[0.0,1351216440],[0.0,1351216800],[0.0,1351217160]]",
-      "threadsWaiting" : "[[59.0,1351213560],[59.0,1351213920],[59.0,1351214280],[59.0,1351214640],[59.0,1351215000],[59.0,1351215360],[59.0,1351215720],[59.0,1351216080],[59.0,1351216440],[58.388888889,1351216800],[58.627777778,1351217160]]",
-      "threadsTimedWaiting" : "[[10.0,1351213560],[10.0,1351213920],[10.0,1351214280],[10.0,1351214640],[10.0,1351215000],[10.0,1351215360],[10.0,1351215720],[10.0,1351216080],[10.0,1351216440],[10.161111111,1351216800],[10.830555556,1351217160]]",
-      "threadsRunnable" : "[[6.0,1351213560],[6.0,1351213920],[6.0,1351214280],[6.0,1351214640],[6.0,1351215000],[6.0,1351215360],[6.0,1351215720],[6.0,1351216080],[6.0,1351216440],[6.0,1351216800],[6.0,1351217160]]"
-    }
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/map_slots.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/map_slots.json
deleted file mode 100644
index ea7a458..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/map_slots.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "href" : "http://ambari/clusters/SrimanthAmbari/services/MAPREDUCE/components/JOBTRACKER?fields=metrics/mapred/jobtracker/occupied_map_slots[1351213550,1351217148,60],metrics/mapred/jobtracker/reserved_map_slots[1351213550,1351217148,60]",
-  "metrics" : {
-    "mapred" : {
-      "jobtracker" : {
-        "reserved_map_slots" : "[[0.0,1351213560],[0.0,1351213920],[0.0,1351214280],[0.0,1351214640],[0.0,1351215000],[0.0,1351215360],[0.0,1351215720],[0.0,1351216080],[0.0,1351216440],[0.0,1351216800],[0.0,1351217160]]",
-        "occupied_map_slots" : "[[0.0,1351213560],[0.0,1351213920],[0.0,1351214280],[0.0,1351214640],[0.0,1351215000],[0.0,1351215360],[0.0,1351215720],[0.0,1351216080],[0.0,1351216440],[0.0,1351216800],[0.038888888889,1351217160]]"
-      }
-    }
-  },
-  "ServiceComponentInfo" : {
-    "cluster_name" : "SrimanthAmbari",
-    "component_name" : "JOBTRACKER",
-    "service_name" : "MAPREDUCE"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/reduce_slots.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/reduce_slots.json
deleted file mode 100644
index 3d501a1..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/reduce_slots.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "href" : "http://ambari/clusters/SrimanthAmbari/services/MAPREDUCE/components/JOBTRACKER?fields=metrics/mapred/jobtracker/occupied_reduce_slots[1351213550,1351217148,60],metrics/mapred/jobtracker/reserved_reduce_slots[1351213550,1351217148,60]",
-  "metrics" : {
-    "mapred" : {
-      "jobtracker" : {
-        "reserved_reduce_slots" : "[[0.0,1351213560],[0.0,1351213920],[0.0,1351214280],[0.0,1351214640],[0.0,1351215000],[0.0,1351215360],[0.0,1351215720],[0.0,1351216080],[0.0,1351216440],[0.0,1351216800],[0.0,1351217160]]",
-        "occupied_reduce_slots" : "[[0.0,1351213560],[0.0,1351213920],[0.0,1351214280],[0.0,1351214640],[0.0,1351215000],[0.0,1351215360],[0.0,1351215720],[0.0,1351216080],[0.0,1351216440],[0.0,1351216800],[0.038888888889,1351217160]]"
-      }
-    }
-  },
-  "ServiceComponentInfo" : {
-    "cluster_name" : "SrimanthAmbari",
-    "component_name" : "JOBTRACKER",
-    "service_name" : "MAPREDUCE"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/rpc.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/rpc.json
deleted file mode 100644
index 9f284f2..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/rpc.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "href" : "http://ambari/clusters/SrimanthAmbari/hosts/hostname/host_components/JOBTRACKER?fields=metrics/rpc/RpcQueueTime_avg_time[1351213550,1351217148,60]",
-  "HostRoles" : {
-    "cluster_name" : "SrimanthAmbari",
-    "component_name" : "JOBTRACKER",
-    "host_name" : "hostname"
-  },
-  "host" : {
-    "href" : "http://ambari/clusters/SrimanthAmbari/hosts/hostname"
-  },
-  "metrics" : {
-    "rpc" : {
-      "RpcQueueTime_avg_time" : "[[0.069553376906,1351213560],[0.08265993266,1351213920],[0.096717171717,1351214280],[0.07859724698,1351214640],[0.093248663102,1351215000],[0.084630619925,1351215360],[0.09087443058,1351215720],[0.075108932462,1351216080],[0.086007130125,1351216440],[0.42323904309,1351216800],[0.14756856477,1351217160]]"
-    }
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/tasks_running_waiting.json b/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/tasks_running_waiting.json
deleted file mode 100644
index f79810f..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/metrics/mapreduce/tasks_running_waiting.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
-  "href" : "http://ambari/clusters/SrimanthAmbari/services/MAPREDUCE/components/JOBTRACKER?fields=metrics/mapred/jobtracker/running_maps[1351213550,1351217148,60],metrics/mapred/jobtracker/running_reduces[1351213550,1351217148,60],metrics/mapred/jobtracker/waiting_maps[1351213550,1351217148,60],metrics/mapred/jobtracker/waiting_reduces[1351213550,1351217148,60]",
-  "metrics" : {
-    "mapred" : {
-      "jobtracker" : {
-        "running_maps" : "[[0.0,1351213560],[0.0,1351213920],[0.0,1351214280],[0.0,1351214640],[0.0,1351215000],[0.0,1351215360],[0.0,1351215720],[0.0,1351216080],[0.0,1351216440],[0.0,1351216800],[0.038888888889,1351217160]]",
-        "running_reduces" : "[[0.0,1351213560],[0.0,1351213920],[0.0,1351214280],[0.0,1351214640],[0.0,1351215000],[0.0,1351215360],[0.0,1351215720],[0.0,1351216080],[0.0,1351216440],[0.0,1351216800],[0.038888888889,1351217160]]",
-        "waiting_maps" : "[[0.0,1351213560],[0.0,1351213920],[0.0,1351214280],[0.0,1351214640],[0.0,1351215000],[0.0,1351215360],[0.0,1351215720],[0.0,1351216080],[0.0,1351216440],[0.0,1351216800],[0.0,1351217160]]",
-        "waiting_reduces" : "[[0.0,1351213560],[0.0,1351213920],[0.0,1351214280],[0.0,1351214640],[0.0,1351215000],[0.0,1351215360],[0.0,1351215720],[0.0,1351216080],[0.0,1351216440],[0.0,1351216800],[0.0,1351217160]]"
-      }
-    }
-  },
-  "ServiceComponentInfo" : {
-    "cluster_name" : "SrimanthAmbari",
-    "component_name" : "JOBTRACKER",
-    "service_name" : "MAPREDUCE"
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/summary/hbase.json b/branch-1.2/ambari-web/app/assets/data/services/summary/hbase.json
deleted file mode 100644
index 44875dd..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/summary/hbase.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
-  "hbase": {
-    "service_type": "HBASE",
-    "installed": true,
-    "hbasemaster_addr": "hbase:60010",
-    "total_regionservers": "1",
-    "memory_heap_used": 15691080,
-    "memory_heap_max": 498991104,
-    "version": "0.92.1.15, rUnknown",
-    "start_time": 1348935496,
-    "cluster_id": "83ad5508-f036-43e2-acc5-25408d34efe8",
-    "active_time": 1348935496,
-    "coprocessors": [],
-    "average_load": 3,
-    "regions_in_transition_count": 0,
-    "live_regionservers": 1,
-    "zookeeper_quorum": ["hbase:2181"],
-    "dead_regionservers": 0
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/summary/hdfs.json b/branch-1.2/ambari-web/app/assets/data/services/summary/hdfs.json
deleted file mode 100644
index 4008b11..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/summary/hdfs.json
+++ /dev/null
@@ -1,31 +0,0 @@
-{
-  "hdfs": {
-    "service_type": "HDFS",
-    "installed": true,
-    "namenode_addr": "namenode:50070",
-    "secondary_namenode_addr": "snamenode:50090",
-    "total_nodes": "1",
-    "memory_heap_used": 151642616,
-    "memory_heap_max": 453050368,
-    "dfs_dirfiles_count": 252,
-    "dfs_blocks_total": 146,
-    "dfs_blocks_underreplicated": 145,
-    "dfs_blocks_missing": 0,
-    "dfs_blocks_corrupt": 0,
-    "dfs_state": "Operational",
-    "start_time": 1348935028,
-    "live_nodes": 1,
-    "dead_nodes": 0,
-    "decommissioning_nodes": 0,
-    "version": "1.0.3.15, r",
-    "safemode": false,
-    "pending_upgrades": false,
-    "dfs_configured_capacity": 885570207744,
-    "dfs_percent_used": 0.01,
-    "dfs_percent_remaining": 95.09,
-    "dfs_total_bytes": 885570207744,
-    "dfs_used_bytes": 104898560,
-    "nondfs_used_bytes": 43365113856,
-    "dfs_free_bytes": 842100195328
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/services/summary/mapreduce.json b/branch-1.2/ambari-web/app/assets/data/services/summary/mapreduce.json
deleted file mode 100644
index 4e904a3..0000000
--- a/branch-1.2/ambari-web/app/assets/data/services/summary/mapreduce.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
-  "mapreduce": {
-    "service_type": "MAPREDUCE",
-    "installed": true,
-    "jobtracker_addr": "jobtracker:50030",
-    "trackers_total": "1",
-    "jobhistory_addr": "jobtracker:51111",
-    "memory_heap_used": 94499048,
-    "memory_heap_max": 482344960,
-    "trackers_live": 1,
-    "trackers_graylisted": 0,
-    "trackers_blacklisted": 0,
-    "version": "1.0.3.15, r",
-    "queue_info": {
-      "type": ""
-    },
-    "waiting_jobs": 0,
-    "trackers_excluded": 0,
-    "map_task_capacity": 2,
-    "reduce_task_capacity": 2,
-    "job_total_submissions": 4,
-    "job_total_completions": 4,
-    "running_jobs": 0,
-    "running_map_tasks": 0,
-    "running_reduce_tasks": 0,
-    "occupied_map_slots": 0,
-    "occupied_reduce_slots": 0,
-    "reserved_map_slots": 0,
-    "reserved_reduce_slots": 0,
-    "waiting_maps": 0,
-    "waiting_reduces": 0,
-    "start_time": 1348935243,
-    "average_node_capacity": 4
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/users/user_admin.json b/branch-1.2/ambari-web/app/assets/data/users/user_admin.json
deleted file mode 100644
index 0165ce7..0000000
--- a/branch-1.2/ambari-web/app/assets/data/users/user_admin.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
-  "href" : "http://dev.hortonworks.com:8080/api/v1/users/admin",
-  "Users" : {
-    "user_name" : "admin",
-    "ldap_user" : false,
-    "roles" : [
-      "admin",
-      "user"
-    ]
-  }
-}
diff --git a/branch-1.2/ambari-web/app/assets/data/users/user_user.json b/branch-1.2/ambari-web/app/assets/data/users/user_user.json
deleted file mode 100644
index 99515f2..0000000
--- a/branch-1.2/ambari-web/app/assets/data/users/user_user.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-  "href" : "http://dev.hortonworks.com:8080/api/v1/users/user",
-  "Users" : {
-    "user_name" : "user",
-    "ldap_user" : false,
-    "roles" : [
-      "user"
-    ]
-  }
-}
diff --git a/branch-1.2/ambari-web/app/assets/data/users/users.json b/branch-1.2/ambari-web/app/assets/data/users/users.json
deleted file mode 100644
index f81f6a7..0000000
--- a/branch-1.2/ambari-web/app/assets/data/users/users.json
+++ /dev/null
@@ -1,47 +0,0 @@
-{
-  "href" : "http://dev.hortonworks.com:8080/api/v1/users/?fields=*",
-  "items" : [
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/users/user",
-      "Users" : {
-        "user_name" : "user",
-        "ldap_user" : false,
-        "roles" : [
-          "user"
-        ]
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/users/admin",
-      "Users" : {
-        "user_name" : "admin",
-        "ldap_user" : false,
-        "roles" : [
-          "admin",
-          "user"
-        ]
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/users/jone",
-      "Users" : {
-        "user_name" : "jone",
-        "ldap_user" : true,
-        "roles" : [
-          "user"
-        ]
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/users/alex",
-      "Users" : {
-        "user_name" : "alex",
-        "ldap_user" : false,
-        "roles" : [
-          "admin",
-          "user"
-        ]
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/bootstrap.json b/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/bootstrap.json
deleted file mode 100644
index 2703955..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/bootstrap.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
-  "status": "OK",
-  "log": "Running Bootstrap now",
-  "requestId": "1"
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/poll_1.json b/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/poll_1.json
deleted file mode 100644
index 8ca91dc..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/poll_1.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
-  "status": "RUNNING",
-  "hostsStatus": [
-    {
-      "status": "RUNNING",
-      "hostName": "localhost.localdomain",
-      "log": "STDOUT\n\nSTDERR\nWarning: Permanently added 'dev001,10.0.2.15' (RSA) to the list of known hosts.\n"
-    }
-  ],
-  "log": ""
-}
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/poll_2.json b/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/poll_2.json
deleted file mode 100644
index 0926b40..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/poll_2.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
-  "status": "SUCCESS",
-  "hostsStatus": [
-    {
-      "status": "DONE",
-      "hostName": "localhost.localdomain",
-      "log": "STDOUT\n\nSTDERR\nWarning: Permanently added 'dev002,10.0.2.16' (RSA) to the list of known hosts.\r\n/etc/yum.repos.d/ambari.repo: No such file or directory\nSTDOUT\n\nSTDERR\nPermission denied, please try again.\r\nConnection closed by UNKNOWN\r\nlost connection\nSTDOUT\n\nSTDERR\npython: can't open file '/tmp/setupAgent.py': [Errno 2] No such file or directory\n"
-    }
-  ],
-  "log": "\n\nINFO:root:BootStrapping hosts ['dev001','dev002'] using /root/dev/ambari/ambari-server/src/main/python with sshKey File /tmp/bootstrap/1/sshKey using tmp dir /tmp/bootstrap/1 ambari: localhost\nINFO:root:Running scp command scp -o ConnectTimeout=3 -o StrictHostKeyChecking=no -i /tmp/bootstrap/1/sshKey /etc/yum.repos.d/ambari.repo root@dev001:/etc/yum.repos.d\nINFO:root:scp /etc/yum.repos.d/ambari.repo done for host dev001, exitcode=1\nINFO:root:Parallel scp returns for repo file\nINFO:root:Running scp command scp -o ConnectTimeout=3 -o StrictHostKeyChecking=no -i /tmp/bootstrap/1/sshKey /root/dev/ambari/ambari-server/src/main/python/setupAgent.py root@dev001:/tmp\nINFO:root:scp /root/dev/ambari/ambari-server/src/main/python/setupAgent.py done for host dev001, exitcode=1\nINFO:root:Parallel scp returns for agent script\nINFO:root:Running setup agent...\nINFO:root:Running ssh command ssh -o ConnectTimeOut=3 -o StrictHostKeyChecking=no -i /tmp/bootstrap/1/sshKey root@dev001 python /tmp/setupAgent.py tmp localhost\nINFO:root:Setup agent done for host dev001, exitcode=2\nINFO:root:Parallel ssh returns for setup agent\n"
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/single_host_information.json b/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/single_host_information.json
deleted file mode 100644
index be193bb..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/single_host_information.json
+++ /dev/null
@@ -1,124 +0,0 @@
-{
-  "href": "http://localhost:8080/api/v1/hosts?fields=Hosts/total_mem,Hosts/cpu_count",
-  "items": [
-    {
-      "href": "http://localhost:8080/api/v1/hosts/localhost.localdomain",
-      "Hosts": {
-        "cpu_count": 1,
-        "total_mem": 4057989,
-        "host_name": "localhost.localdomain",
-        "disk_info": [
-          {
-            "available": "47574176",
-            "used": "3507828",
-            "percent": "7%",
-            "size": "51606140",
-            "type": "ext4",
-            "mountpoint": "/"
-          },
-          {
-            "available": "47574176",
-            "used": "3507828",
-            "percent": "7%",
-            "size": "51606140",
-            "type": "ext4",
-            "mountpoint": "/grid/0"
-          },
-          {
-            "available": "1027204",
-            "used": "260",
-            "percent": "1%",
-            "size": "1027464",
-            "type": "tmpfs",
-            "mountpoint": "/dev/shm"
-          },
-          {
-            "available": "432210",
-            "used": "38034",
-            "percent": "9%",
-            "size": "495844",
-            "type": "ext4",
-            "mountpoint": "/boot"
-          },
-          {
-            "available": "44459872",
-            "used": "184220",
-            "percent": "1%",
-            "size": "47033288",
-            "type": "ext4",
-            "mountpoint": "/home"
-          },
-          {
-            "available": "450200708",
-            "used": "281534268",
-            "percent": "39%",
-            "size": "731734976",
-            "type": "vboxsf",
-            "mountpoint": "/media/sf_ambari"
-          },
-          {
-            "available": "450200708",
-            "used": "281534268",
-            "percent": "39%",
-            "size": "731734976",
-            "type": "vboxsf",
-            "mountpoint": "/host"
-          }
-        ]
-      }
-    },
-    {
-      "href" : "http://ec2-23-22-214-206.compute-1.amazonaws.com:8080/api/v1/hosts/ip-10-4-119-156.ec2.internal",
-      "Hosts" : {
-        "host_status" : "HEALTHY",
-        "public_host_name" : "ec2-23-22-21-211.compute-1.amazonaws.com",
-        "cpu_count" : 1,
-        "rack_info" : "/default-rack",
-        "host_health_report" : "",
-        "os_arch" : "x86_64",
-        "host_name" : "ip-10-4-119-156.ec2.internal",
-        "disk_info" : [
-          {
-            "available" : "3963732",
-            "used" : "1881984",
-            "percent" : "33%",
-            "size" : "5905712",
-            "type" : "ext4",
-            "mountpoint" : "/"
-          },
-          {
-            "available" : "3823076",
-            "used" : "0",
-            "percent" : "0%",
-            "size" : "3823076",
-            "type" : "tmpfs",
-            "mountpoint" : "/dev/shm"
-          },
-          {
-            "available" : "411234588",
-            "used" : "203012",
-            "percent" : "1%",
-            "size" : "433455904",
-            "type" : "ext3",
-            "mountpoint" : "/grid/0"
-          },
-          {
-            "available" : "411234588",
-            "used" : "203012",
-            "percent" : "1%",
-            "size" : "433455904",
-            "type" : "ext3",
-            "mountpoint" : "/grid/1"
-          }
-        ],
-        "ip" : "10.4.119.156",
-        "os_type" : "redhat6",
-        "last_heartbeat_time" : 1355185180819,
-        "host_state" : "HEALTHY",
-        "cluster_name" : "joker2",
-        "last_registration_time" : 1355182278593,
-        "total_mem" : 7644119
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/single_host_registration.json b/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/single_host_registration.json
deleted file mode 100644
index ba10a49..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/single_host_registration.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
-  "href" : "http://localhost:8080/api/v1/hosts",
-  "items" : [
-    {
-      "href" : "http://localhost:8080/api/v1/hosts/localhost.localdomain",
-      "Hosts" : {
-        "host_name" : "localhost.localdomain"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/two_hosts_information.json b/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/two_hosts_information.json
deleted file mode 100644
index fdb4620..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/bootstrap/two_hosts_information.json
+++ /dev/null
@@ -1,512 +0,0 @@
-{
-  "href" : "http://ec2-107-20-107-224.compute-1.amazonaws.com:8080/api/v1/hosts?fields=*",
-  "items" : [
-    {
-      "href" : "http://ec2-107-20-107-224.compute-1.amazonaws.com:8080/api/v1/hosts/ip-10-190-153-220.ec2.internal",
-      "Hosts" : {
-        "host_status" : "HEALTHY",
-        "public_host_name" : "ec2-50-19-188-43.compute-1.amazonaws.com",
-        "cpu_count" : 2,
-        "rack_info" : "/default-rack",
-        "host_health_report" : "",
-        "os_arch" : "x86_64",
-        "host_name" : "ip-10-190-153-220.ec2.internal",
-        "disk_info" : [
-          {
-            "available" : "3822572",
-            "used" : "2022188",
-            "percent" : "35%",
-            "size" : "5904748",
-            "type" : "ext3",
-            "mountpoint" : "/"
-          },
-          {
-            "available" : "3932160",
-            "used" : "0",
-            "percent" : "0%",
-            "size" : "3932160",
-            "type" : "tmpfs",
-            "mountpoint" : "/dev/shm"
-          },
-          {
-            "available" : "411234588",
-            "used" : "203012",
-            "percent" : "1%",
-            "size" : "433455904",
-            "type" : "ext3",
-            "mountpoint" : "/grid/0"
-          },
-          {
-            "available" : "411234588",
-            "used" : "203012",
-            "percent" : "1%",
-            "size" : "433455904",
-            "type" : "ext3",
-            "mountpoint" : "/grid/1"
-          }
-        ],
-        "ip" : "10.190.153.220",
-        "os_type" : "redhat5",
-        "last_heartbeat_time" : 1358871154566,
-        "host_state" : "HEALTHY",
-        "last_agent_env" : {
-          "paths" : [
-            {
-              "name" : "/etc/hadoop",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/etc/hadoop/conf",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/etc/hbase",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/etc/hcatalog",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/etc/hive",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/etc/oozie",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/etc/sqoop",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/etc/ganglia",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/etc/nagios",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/run/hadoop",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/run/zookeeper",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/run/hbase",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/run/templeton",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/run/oozie",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/log/hadoop",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/log/zookeeper",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/log/hbase",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/run/templeton",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/log/hive",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/log/nagios",
-              "type" : "not_exist"
-            }
-          ],
-          "javaProcs" : [ ],
-          "rpms" : [
-            {
-              "name" : "yum",
-              "installed" : true,
-              "version" : "yum-3.2.22-39.el5"
-            },
-            {
-              "name" : "rpm",
-              "installed" : true,
-              "version" : "rpm-4.4.2.3-27.el5"
-            },
-            {
-              "name" : "openssl",
-              "installed" : true,
-              "version" : "openssl-0.9.8e-22.el5\nopenssl-0.9.8e-22.el5"
-            },
-            {
-              "name" : "curl",
-              "installed" : true,
-              "version" : "curl-7.15.5-15.el5\ncurl-7.15.5-15.el5"
-            },
-            {
-              "name" : "wget",
-              "installed" : false
-            },
-            {
-              "name" : "net-snmp",
-              "installed" : true,
-              "version" : "net-snmp-5.3.2.2-17.el5"
-            },
-            {
-              "name" : "ntpd",
-              "installed" : false
-            },
-            {
-              "name" : "ruby",
-              "installed" : true,
-              "version" : "ruby-1.8.5-24.el5"
-            },
-            {
-              "name" : "puppet",
-              "installed" : false
-            },
-            {
-              "name" : "nagios",
-              "installed" : false
-            },
-            {
-              "name" : "ganglia",
-              "installed" : false
-            },
-            {
-              "name" : "passenger",
-              "installed" : false
-            },
-            {
-              "name" : "hadoop",
-              "installed" : false
-            },
-            {
-              "name" : "hbase",
-              "installed" : false
-            },
-            {
-              "name" : "oozie",
-              "installed" : false
-            },
-            {
-              "name" : "sqoop",
-              "installed" : false
-            },
-            {
-              "name" : "pig",
-              "installed" : false
-            },
-            {
-              "name" : "zookeeper",
-              "installed" : false
-            },
-            {
-              "name" : "hive",
-              "installed" : false
-            },
-            {
-              "name" : "libconfuse",
-              "installed" : false
-            },
-            {
-              "name" : "postgresql",
-              "installed" : true,
-              "version" : "postgresql-8.1.23-1.el5_7.3"
-            },
-            {
-              "name" : "httpd",
-              "installed" : true,
-              "version" : "httpd-2.2.3-63.el5"
-            },
-            {
-              "name" : "apache2",
-              "installed" : false
-            },
-            {
-              "name" : "http-server",
-              "installed" : false
-            }
-          ],
-          "varRunHadoopPidCount" : 0,
-          "varLogHadoopLogCount" : 0,
-          "etcAlternativesConf" : [ ],
-          "repoInfo" : "Loaded plugins: amazon-id, fastestmirror, rhui-lb, security\nrepo id                             repo name                             status\nAMBARI.dev-1.x                      Ambari 1.x                                6\nHDP-UTILS-1.1.0.15                  Hortonworks Data Platform Utils Versi    51\nepel                                Extra Packages for Enterprise Linux 5  7243\nrhui-us-east-client-config-server-5 Red Hat Update Infrastructure 2.0 Cli     1\nrhui-us-east-rhel-server            Red Hat Enterprise Linux Server 5 (RP 14819\nrepolist: 22120\n"
-        },
-        "last_registration_time" : 1358871040696,
-        "total_mem" : 7864320
-      }
-    },
-    {
-      "href" : "http://ec2-107-20-107-224.compute-1.amazonaws.com:8080/api/v1/hosts/domU-12-31-39-14-04-91.compute-1.internal",
-      "Hosts" : {
-        "host_status" : "HEALTHY",
-        "public_host_name" : "ec2-107-20-107-224.compute-1.amazonaws.com",
-        "cpu_count" : 2,
-        "rack_info" : "/default-rack",
-        "host_health_report" : "",
-        "os_arch" : "x86_64",
-        "host_name" : "domU-12-31-39-14-04-91.compute-1.internal",
-        "disk_info" : [
-          {
-            "available" : "3400492",
-            "used" : "2444268",
-            "percent" : "42%",
-            "size" : "5904748",
-            "type" : "ext3",
-            "mountpoint" : "/"
-          },
-          {
-            "available" : "3932160",
-            "used" : "0",
-            "percent" : "0%",
-            "size" : "3932160",
-            "type" : "tmpfs",
-            "mountpoint" : "/dev/shm"
-          },
-          {
-            "available" : "411234588",
-            "used" : "203012",
-            "percent" : "1%",
-            "size" : "433455904",
-            "type" : "ext3",
-            "mountpoint" : "/grid/0"
-          },
-          {
-            "available" : "411234588",
-            "used" : "203012",
-            "percent" : "1%",
-            "size" : "433455904",
-            "type" : "ext3",
-            "mountpoint" : "/grid/1"
-          }
-        ],
-        "ip" : "10.206.7.95",
-        "os_type" : "redhat5",
-        "last_heartbeat_time" : 1358871154070,
-        "host_state" : "HEALTHY",
-        "last_agent_env" : {
-          "paths" : [
-            {
-              "name" : "/etc/hadoop",
-              "type" : "directory"
-            },
-            {
-              "name" : "/etc/hadoop/conf",
-              "type" : "file"
-            },
-            {
-              "name" : "/etc/hbase",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/etc/hcatalog",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/etc/hive",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/etc/oozie",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/etc/sqoop",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/etc/ganglia",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/etc/nagios",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/run/hadoop",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/run/zookeeper",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/run/hbase",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/run/templeton",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/run/oozie",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/log/hadoop",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/log/zookeeper",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/log/hbase",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/run/templeton",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/log/hive",
-              "type" : "not_exist"
-            },
-            {
-              "name" : "/var/log/nagios",
-              "type" : "not_exist"
-            }
-          ],
-          "javaProcs" : [
-            {
-              "user" : "root",
-              "pid" : 2283,
-              "command" : "/bin/sh -c /usr/jdk64/jdk1.6.0_31/bin/java -server -XX:NewRatio=2 -XX:+UseConcMarkSweepGC -Xms512m -Xmx2048m -cp /etc/ambari-server/conf:/usr/lib/ambari-server/*:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin:/usr/lib/ambari-server/* org.apache.ambari.server.controller.AmbariServer >/var/log/ambari-server/ambari-server.out 2>&1",
-              "hadoop" : true
-            },
-            {
-              "user" : "root",
-              "pid" : 2284,
-              "command" : "/usr/jdk64/jdk1.6.0_31/bin/java -server -XX:NewRatio=2 -XX:+UseConcMarkSweepGC -Xms512m -Xmx2048m -cp /etc/ambari-server/conf:/usr/lib/ambari-server/*:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin:/usr/lib/ambari-server/* org.apache.ambari.server.controller.AmbariServer",
-              "hadoop" : false
-            }
-          ],
-          "rpms" : [
-            {
-              "name" : "yum",
-              "installed" : true,
-              "version" : "yum-3.2.22-39.el5"
-            },
-            {
-              "name" : "rpm",
-              "installed" : true,
-              "version" : "rpm-4.4.2.3-27.el5"
-            },
-            {
-              "name" : "openssl",
-              "installed" : true,
-              "version" : "openssl-0.9.8e-22.el5\nopenssl-0.9.8e-22.el5"
-            },
-            {
-              "name" : "curl",
-              "installed" : true,
-              "version" : "curl-7.15.5-15.el5\ncurl-7.15.5-15.el5"
-            },
-            {
-              "name" : "wget",
-              "installed" : false
-            },
-            {
-              "name" : "net-snmp",
-              "installed" : true,
-              "version" : "net-snmp-5.3.2.2-17.el5"
-            },
-            {
-              "name" : "ntpd",
-              "installed" : false
-            },
-            {
-              "name" : "ruby",
-              "installed" : true,
-              "version" : "ruby-1.8.5-24.el5"
-            },
-            {
-              "name" : "puppet",
-              "installed" : false
-            },
-            {
-              "name" : "nagios",
-              "installed" : false
-            },
-            {
-              "name" : "ganglia",
-              "installed" : false
-            },
-            {
-              "name" : "passenger",
-              "installed" : false
-            },
-            {
-              "name" : "hadoop",
-              "installed" : false
-            },
-            {
-              "name" : "hbase",
-              "installed" : false
-            },
-            {
-              "name" : "oozie",
-              "installed" : false
-            },
-            {
-              "name" : "sqoop",
-              "installed" : false
-            },
-            {
-              "name" : "pig",
-              "installed" : false
-            },
-            {
-              "name" : "zookeeper",
-              "installed" : false
-            },
-            {
-              "name" : "hive",
-              "installed" : false
-            },
-            {
-              "name" : "libconfuse",
-              "installed" : false
-            },
-            {
-              "name" : "postgresql",
-              "installed" : true,
-              "version" : "postgresql-8.1.23-6.el5_8"
-            },
-            {
-              "name" : "httpd",
-              "installed" : true,
-              "version" : "httpd-2.2.3-63.el5"
-            },
-            {
-              "name" : "apache2",
-              "installed" : false
-            },
-            {
-              "name" : "http-server",
-              "installed" : false
-            }
-          ],
-          "varRunHadoopPidCount" : 0,
-          "varLogHadoopLogCount" : 0,
-          "etcAlternativesConf" : [ ],
-          "repoInfo" : "Loaded plugins: amazon-id, fastestmirror, rhui-lb, security\nrepo id                             repo name                             status\nAMBARI.dev-1.x                      Ambari 1.x                                6\nHDP-UTILS-1.1.0.15                  Hortonworks Data Platform Utils Versi    51\nepel                                Extra Packages for Enterprise Linux 5  7243\nrhui-us-east-client-config-server-5 Red Hat Update Infrastructure 2.0 Cli     1\nrhui-us-east-rhel-server            Red Hat Enterprise Linux Server 5 (RP 14819\nrepolist: 22120\n"
-        },
-        "last_registration_time" : 1358870784262,
-        "total_mem" : 7864320
-      }
-    }
-  ]
-}
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_1.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_1.json
deleted file mode 100644
index 049ebf7..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_1.json
+++ /dev/null
@@ -1,56 +0,0 @@
-{
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/1",
-  "Requests" : {
-    "id" : 1
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "PENDING",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "PENDING",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "id" : "3",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "PENDING",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "SECONDARY_NAMENODE",
-        "stderr" : "",
-        "host_name" : "host2",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_2.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_2.json
deleted file mode 100644
index 68e5ee9..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_2.json
+++ /dev/null
@@ -1,56 +0,0 @@
-{
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/1",
-  "Requests" : {
-    "id" : 1
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "QUEUED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "QUEUED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "id" : "3",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "QUEUED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "SECONDARY_NAMENODE",
-        "stderr" : "",
-        "host_name" : "host2",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_3.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_3.json
deleted file mode 100644
index 92afee9..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_3.json
+++ /dev/null
@@ -1,56 +0,0 @@
-{
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/1",
-  "Requests" : {
-    "id" : 1
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "QUEUED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "IN_PROGRESS",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "id" : "3",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "IN_PROGRESS",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "SECONDARY_NAMENODE",
-        "stderr" : "",
-        "host_name" : "host2",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_4.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_4.json
deleted file mode 100644
index a702a73..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_4.json
+++ /dev/null
@@ -1,56 +0,0 @@
-{
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/1",
-  "Requests" : {
-    "id" : 1
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "IN_PROGRESS",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "COMPLETED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "id" : "3",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "IN_PROGRESS",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "SECONDARY_NAMENODE",
-        "stderr" : "",
-        "host_name" : "host2",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_5.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_5.json
deleted file mode 100644
index c5213ef..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_5.json
+++ /dev/null
@@ -1,56 +0,0 @@
-{
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/1",
-  "Requests" : {
-    "id" : 1
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "COMPLETED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "COMPLETED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "id" : "3",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "COMPLETED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "SECONDARY_NAMENODE",
-        "stderr" : "",
-        "host_name" : "host2",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_5_failed.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_5_failed.json
deleted file mode 100644
index 7e204e0..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_5_failed.json
+++ /dev/null
@@ -1,57 +0,0 @@
-{
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/1",
-  "Requests" : {
-    "id" : 1
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "COMPLETED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "FAILED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1",
-        "sf" : "100"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "id" : "3",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "COMPLETED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "SECONDARY_NAMENODE",
-        "stderr" : "",
-        "host_name" : "host2",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_6.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_6.json
deleted file mode 100644
index dd22e82..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_6.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/2",
-  "Requests" : {
-    "id" : 2
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/2/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "QUEUED",
-        "command" : "START",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/2/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "QUEUED",
-        "command" : "START",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_7.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_7.json
deleted file mode 100644
index 6f1dae8..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_7.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/2",
-  "Requests" : {
-    "id" : 2
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/2/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "QUEUED",
-        "command" : "START",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/2/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "IN_PROGRESS",
-        "command" : "START",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_8.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_8.json
deleted file mode 100644
index b6d3e21..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_8.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/2",
-  "Requests" : {
-    "id" : 2
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/2/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "IN_PROGRESS",
-        "command" : "START",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/2/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "COMPLETED",
-        "command" : "START",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_9.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_9.json
deleted file mode 100644
index 47284c3..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/2_hosts/poll_9.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/2",
-  "Requests" : {
-    "id" : 2
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/2/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "COMPLETED",
-        "command" : "START",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/2/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "COMPLETED",
-        "command" : "START",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_1.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_1.json
deleted file mode 100644
index fdfb408..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_1.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_10.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_10.json
deleted file mode 100644
index b6aae8e..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_10.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/2?fields=tasks/*",
-  "Requests" : {
-    "id" : 2,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/33",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.72 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.72 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 33,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864090181,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/30",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a973d9bcff056aeff7f22221886c84b7' to '{md5}df2d55356b238461af57fe22ad993e4d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}54edf0ba61f6501cc49c0d7788b266b1' to '{md5}b25bda7a405235227d20732f0972c5f6'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.00 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a973d9bcff056aeff7f22221886c84b7' to '{md5}df2d55356b238461af57fe22ad993e4d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}54edf0ba61f6501cc49c0d7788b266b1' to '{md5}b25bda7a405235227d20732f0972c5f6'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.00 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 30,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864090068,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/38",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: Safe mode is OFF\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 10.35 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: Safe mode is OFF\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 10.35 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 38,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "HDFS_SERVICE_CHECK",
-        "start_time" : 1352864269616,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/26",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 7.68 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 7.68 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 26,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089836,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/24",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a39a2689e76538c6d9090b00ceb04eb0' to '{md5}9786ed97b221e37075bdb64400bc804a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}c14eb8ab2bb5ab75789c875534ab64f4' to '{md5}9684de67c2a8fa0f7292418d6c0c1651'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.78 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a39a2689e76538c6d9090b00ceb04eb0' to '{md5}9786ed97b221e37075bdb64400bc804a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}c14eb8ab2bb5ab75789c875534ab64f4' to '{md5}9684de67c2a8fa0f7292418d6c0c1651'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.78 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 24,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864089661,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/35",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}4673b67e078cc9d84ffc4873e5198edf' to '{md5}654e54e7c3f58aa3d37d07110ad63bb5'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}d3b2d5e47669c948fccb907fa32c2b55' to '{md5}0e079fd5bc7cc43a35b60012c9ee00d9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.45 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}4673b67e078cc9d84ffc4873e5198edf' to '{md5}654e54e7c3f58aa3d37d07110ad63bb5'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}d3b2d5e47669c948fccb907fa32c2b55' to '{md5}0e079fd5bc7cc43a35b60012c9ee00d9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.45 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 35,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352864269474,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/40",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}827a6e7bd4233d4dc82b20761aed1e30' to '{md5}4e59b973cec0811615008a580244bcdb'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp::Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 36.67 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}827a6e7bd4233d4dc82b20761aed1e30' to '{md5}4e59b973cec0811615008a580244bcdb'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp::Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 36.67 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 40,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352864331712,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/31",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.15 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 31,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864090105,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/27",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Gmetad/Hdp::Exec[hdp-gmetad service]/Exec[hdp-gmetad service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Gmetad/Hdp::Exec[hdp-gmetad service]/Exec[hdp-gmetad service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 27,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352864089883,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/43",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Sqoop 1.4.2.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: git commit id ea3b95785b3daf62c68f1eb0e645636acc00d0c2\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Compiled by jenkins on Sat Nov 10 19:14:01 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Sqoop 1.4.2.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: git commit id ea3b95785b3daf62c68f1eb0e645636acc00d0c2\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Compiled by jenkins on Sat Nov 10 19:14:01 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.15 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 43,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "SQOOP_SERVICE_CHECK",
-        "start_time" : 1352864331830,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/42",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfs -rmr pigsmoke.out passwd; hadoop dfs -put /etc/passwd passwd ]/Exec[hadoop --config /etc/hadoop/conf dfs -rmr pigsmoke.out passwd; hadoop dfs -put /etc/passwd passwd ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/File[/tmp/pigSmoke.sh]/ensure: defined content as '{md5}feac231e484c08e3bc5f83d0ee189a8c'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,367 [main] INFO  org.apache.pig.Main - Apache Pig version 0.10.0.1 (rexported) compiled Nov 10 2012, 19:10:20\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,368 [main] INFO  org.apache.pig.Main - Logging error messages to: /home/ambari_qa/pig_1352864398364.log\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,789 [main] INFO  org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to hadoop file system at: hdfs://host5:8020\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:59,058 [main] INFO  org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to map-reduce job tracker at: host3:50300\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:59,907 [main] INFO  org.apache.pig.tools.pigstats.ScriptState - Pig features used in the script: UNKNOWN\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,158 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MRCompiler - File concatenation threshold: 100 optimistic? false\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,183 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MultiQueryOptimizer - MR plan size before optimization: 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,183 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MultiQueryOptimizer - MR plan size after optimization: 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,288 [main] INFO  org.apache.pig.tools.pigstats.ScriptState - Pig script settings are added to the job\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,312 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - mapred.job.reduce.markreset.buffer.percent is not set, set to default 0.3\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,315 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - creating jar file Job4537005419718909074.jar\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,356 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - jar file Job4537005419718909074.jar created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,377 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - Setting up single store job\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,432 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 1 map-reduce job(s) waiting for submission.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,932 [Thread-6] INFO  org.apache.hadoop.mapreduce.lib.input.FileInputFormat - Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,933 [Thread-6] INFO  org.apache.pig.backend.hadoop.executionengine.util.MapRedUtil - Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,934 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 0% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,971 [Thread-6] WARN  org.apache.hadoop.io.compress.snappy.LoadSnappy - Snappy native library is available\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,973 [Thread-6] INFO  org.apache.hadoop.util.NativeCodeLoader - Loaded the native-hadoop library\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,973 [Thread-6] INFO  org.apache.hadoop.io.compress.snappy.LoadSnappy - Snappy native library loaded\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,977 [Thread-6] INFO  org.apache.pig.backend.hadoop.executionengine.util.MapRedUtil - Total input paths (combined) to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:06,811 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - HadoopJobId: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:06,812 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - More information at: http://host3:50030/jobdetails.jsp?jobid=job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:17,380 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 50% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,432 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 100% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,434 [main] INFO  org.apache.pig.tools.pigstats.SimplePigStats - Script Statistics: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: HadoopVersion\tPigVersion\tUserId\tStartedAt\tFinishedAt\tFeatures\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 1.1.0.1\t0.10.0.1\tambari_qa\t2012-11-13 22:40:00\t2012-11-13 22:40:21\tUNKNOWN\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job Stats (time in seconds):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: JobId\tMaps\tReduces\tMaxMapTime\tMinMapTIme\tAvgMapTime\tMaxReduceTime\tMinReduceTime\tAvgReduceTime\tAlias\tFeature\tOutputs\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\t1\t0\t3\t3\t3\t0\t0\t0\tA,B\tMAP_ONLY\thdfs://host5:8020/user/ambari_qa/pigsmoke.out,\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Input(s):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Successfully read 36 records (2137 bytes) from: \"hdfs://host5:8020/user/ambari_qa/passwd\"\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Output(s):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Successfully stored 36 records (236 bytes) in: \"hdfs://host5:8020/user/ambari_qa/pigsmoke.out\"\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Counters:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records written : 36\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total bytes written : 236\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Spillable Memory Manager spill count : 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total bags proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job DAG:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,446 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]/Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 32.06 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job DAG:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,446 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]/Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 32.06 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 42,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "PIG_SERVICE_CHECK",
-        "start_time" : 1352864331815,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/36",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}9786ed97b221e37075bdb64400bc804a' to '{md5}8e06d7ec24fe5acd81917162d58857db'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}30e43dbdb225dad740d632ecc8f6ae11' to '{md5}558aadf67e4d29865a6d935076d3868b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.20 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}9786ed97b221e37075bdb64400bc804a' to '{md5}8e06d7ec24fe5acd81917162d58857db'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}30e43dbdb225dad740d632ecc8f6ae11' to '{md5}558aadf67e4d29865a6d935076d3868b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.20 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 36,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269562,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/34",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}63d8feab1255e45d9549ccea14f687c4' to '{md5}4673b67e078cc9d84ffc4873e5198edf'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[historyserver]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}cccc03b9f3384eac76957c7fe2f12849' to '{md5}07e946dbf4ae6632034ee6715a085b92'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 9.76 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}63d8feab1255e45d9549ccea14f687c4' to '{md5}4673b67e078cc9d84ffc4873e5198edf'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[historyserver]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}cccc03b9f3384eac76957c7fe2f12849' to '{md5}07e946dbf4ae6632034ee6715a085b92'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 9.76 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 34,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352864269447,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/28",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/content: content changed '{md5}ffff62426c4f7a42c1cb1ca44b324dad' to '{md5}21ad9f95dd93ee39fc87db07b7ea05be'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/content: content changed '{md5}fdcc51e399dd2381778a163933ef2beb' to '{md5}afbfd32db940db5fff4701c964169c27'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.78 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/content: content changed '{md5}ffff62426c4f7a42c1cb1ca44b324dad' to '{md5}21ad9f95dd93ee39fc87db07b7ea05be'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/content: content changed '{md5}fdcc51e399dd2381778a163933ef2beb' to '{md5}afbfd32db940db5fff4701c964169c27'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.78 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 28,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352864089985,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/37",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}df2d55356b238461af57fe22ad993e4d' to '{md5}62a467fcccda8169de563170e39e3419'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}1a3769d695902dba39b5645fef3766e0' to '{md5}23097908e8b54f7dbc4d31b5d26d21e7'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.66 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}df2d55356b238461af57fe22ad993e4d' to '{md5}62a467fcccda8169de563170e39e3419'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}1a3769d695902dba39b5645fef3766e0' to '{md5}23097908e8b54f7dbc4d31b5d26d21e7'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.66 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 37,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269589,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/41",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput ]/Exec[hadoop --config /etc/hadoop/conf dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO input.FileInputFormat: Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 WARN snappy.LoadSnappy: Snappy native library is available\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO util.NativeCodeLoader: Loaded the native-hadoop library\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO snappy.LoadSnappy: Snappy native library loaded\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:14 INFO mapred.JobClient: Running job: job_201211132238_0001\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:15 INFO mapred.JobClient:  map 0% reduce 0%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:30 INFO mapred.JobClient:  map 100% reduce 0%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:38 INFO mapred.JobClient:  map 100% reduce 33%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:40 INFO mapred.JobClient:  map 100% reduce 100%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient: Job complete: job_201211132238_0001\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient: Counters: 29\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   Job Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Launched reduce tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SLOTS_MILLIS_MAPS=6106\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total time spent by all reduces waiting after reserving slots (ms)=0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total time spent by all maps waiting after reserving slots (ms)=0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Launched map tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Data-local map tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SLOTS_MILLIS_REDUCES=9332\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   File Output Format Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Bytes Written=1845\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   FileSystemCounters\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     FILE_BYTES_READ=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     HDFS_BYTES_READ=1893\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     FILE_BYTES_WRITTEN=117522\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     HDFS_BYTES_WRITTEN=1845\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   File Input Format Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Bytes Read=1755\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   Map-Reduce Framework\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output materialized bytes=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map input records=36\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce shuffle bytes=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Spilled Records=122\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output bytes=2003\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     CPU time spent (ms)=1920\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total committed heap usage (bytes)=433913856\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine input records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SPLIT_RAW_BYTES=138\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input groups=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Physical memory (bytes) snapshot=381779968\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=2704003072\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 37.52 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input groups=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Physical memory (bytes) snapshot=381779968\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=2704003072\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 37.52 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 41,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "MAPREDUCE_SERVICE_CHECK",
-        "start_time" : 1352864331797,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/44",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/File[/tmp/oozieSmoke.sh]/ensure: defined content as '{md5}a421efea655810cf298d18d7b5c1ebdd'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Deleted hdfs://host5:8020/user/ambari_qa/examples\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Deleted hdfs://host5:8020/user/ambari_qa/input-data\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Job ID : 0000002-121113223948436-oozie-oozi-W\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Workflow Name : map-reduce-wf\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: App Path      : hdfs://host5:8020/user/ambari_qa/examples/apps/map-reduce\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Status        : FAILED\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Run           : 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: User          : ambari_qa\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Group         : -\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Created       : 2012-11-14 03:41\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Started       : 2012-11-14 03:41\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Last Modified : 2012-11-14 03:41\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Ended         : -\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: CoordAction ID: -\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Actions\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ID                                                                            Status    Ext ID                 Ext Status Err Code  \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: 0000002-121113223948436-oozie-oozi-W@mr-node                                  FAILED    -                      -          EL_ERROR  \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Job ID : 0000002-121113223948436-oozie-oozi-W\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Workflow Name : map-reduce-wf\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: App Path      : hdfs://host5:8020/user/ambari_qa/examples/apps/map-reduce\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Status        : FAILED\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Run           : 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: User          : ambari_qa\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Group         : -\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Created       : 2012-11-14 03:41\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Started       : 2012-11-14 03:41\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Last Modified : 2012-11-14 03:41\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Ended         : -\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: CoordAction ID: -\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Actions\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ID                                                                            Status    Ext ID                 Ext Status Err Code  \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: 0000002-121113223948436-oozie-oozi-W@mr-node                                  FAILED    -                      -          EL_ERROR  \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: workflow_status=FAILED\u001B[0m\n\u001B[1;35merr: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: change from notrun to 0 failed: sh /tmp/oozieSmoke.sh /etc/oozie/conf /etc/hadoop/conf ambari_qa false /etc/security/keytabs/ambari_qa.headless.keytab EXAMPLE.COM   returned 1 instead of one of [0] at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/oozie/service_check.pp:62\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.53 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Ended         : -\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: CoordAction ID: -\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Actions\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ID                                                                            Status    Ext ID                 Ext Status Err Code  \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: 0000002-121113223948436-oozie-oozi-W@mr-node                                  FAILED    -                      -          EL_ERROR  \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: workflow_status=FAILED\u001B[0m\n\u001B[1;35merr: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: change from notrun to 0 failed: sh /tmp/oozieSmoke.sh /etc/oozie/conf /etc/hadoop/conf ambari_qa false /etc/security/keytabs/ambari_qa.headless.keytab EXAMPLE.COM   returned 1 instead of one of [0] at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/oozie/service_check.pp:62\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.53 seconds\u001B[0m",
-        "status" : "FAILED",
-        "stderr" : "None",
-        "host_name" : "host2",
-        "id" : 44,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "OOZIE_SERVICE_CHECK",
-        "start_time" : 1352864442993,
-        "stage_id" : 4
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/29",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}d22fc5749dde07b5b5acff255c490e9d' to '{md5}0617b67bc5192f5e44cf98b2fe25eb6f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}0d021082a9258c648b5259d3af27ff62' to '{md5}39e33160b7f2933a12fc338a81ae9fcd'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/File[/tmp/checkForFormat.sh]/ensure: defined content as '{md5}5dd6bddf910d8ca9f6fefa44e7bbec7e'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: NameNode Dirname = /grid/0/hadoop/hdfs/namenode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: NameNode Dirname = /grid/1/hadoop/hdfs/namenode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:57 INFO namenode.NameNode: STARTUP_MSG: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: /************************************************************\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG: Starting NameNode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   host = host5/10.118.58.228\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   args = [-format]\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   version = 1.1.0.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   build =  -r ; compiled by 'jenkins' on Sat Nov 10 18:55:09 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: ************************************************************/\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: Re-format filesystem in /grid/0/hadoop/hdfs/namenode ? (Y or N) Re-format filesystem in /grid/1/hadoop/hdfs/namenode ? (Y or N) 12/11/13 22:36:58 INFO util.GSet: VM type       = 64-bit\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: 2% max memory = 19.2 MB\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: capacity      = 2^21 = 2097152 entries\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: recommended=2097152, actual=2097152\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: fsOwner=hdfs\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: supergroup=supergroup\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: isPermissionEnabled=true\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: dfs.block.invalidate.limit=100\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 WARN namenode.FSNamesystem: The dfs.support.append option is in your configuration, however append is not supported. This configuration option is no longer required to enable sync\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.NameNode: Caching file names occuring more than 10 times \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO common.Storage: Image file of size 110 saved in 0 seconds.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/grid/0/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/grid/0/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Storage directory /grid/0/hadoop/hdfs/namenode has been successfully formatted.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Image file of size 110 saved in 0 seconds.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/grid/1/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/grid/1/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Storage directory /grid/1/hadoop/hdfs/namenode has been successfully formatted.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.NameNode: SHUTDOWN_MSG: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: /************************************************************\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: SHUTDOWN_MSG: Shutting down NameNode at host5/10.118.58.228\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: ************************************************************/\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: yes: standard output: Broken pipe\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: yes: write error\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Hdp::Exec[set namenode mark]/Exec[set namenode mark]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chmod 775 /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chown hdfs /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chmod 770 /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chown ambari_qa /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chmod 777 /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chown oozie /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 51.23 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chmod 775 /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chown hdfs /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chmod 770 /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chown ambari_qa /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chmod 777 /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chown oozie /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 51.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 29,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAMENODE",
-        "start_time" : 1352864090025,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/39",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}db0f2efdd03e4845c0528e1978b25644' to '{md5}84df095b5569e720b4aeaf4a96e0ee6d'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}f72b50738651b3cb6bcef039b59ffdcb' to '{md5}e750ca8f3497b9a4656f782dcf335dab'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.29 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}db0f2efdd03e4845c0528e1978b25644' to '{md5}84df095b5569e720b4aeaf4a96e0ee6d'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}f72b50738651b3cb6bcef039b59ffdcb' to '{md5}e750ca8f3497b9a4656f782dcf335dab'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.29 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 39,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269636,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/32",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}c892638e8c76c66f072640eb32b0637a' to '{md5}db0f2efdd03e4845c0528e1978b25644'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5' to '{md5}036cea2c613ff235499a7ed743be467f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.38 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}c892638e8c76c66f072640eb32b0637a' to '{md5}db0f2efdd03e4845c0528e1978b25644'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5' to '{md5}036cea2c613ff235499a7ed743be467f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.38 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 32,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864090145,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/25",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 25,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089770,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/23",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.77 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.77 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 23,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089600,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_2.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_2.json
deleted file mode 100644
index 822e351..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_2.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352863664537,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666842,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863665177,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863666672,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352863665856,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : 1352863667466,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863665481,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666913,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352863664213,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : 1352863667299,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : 1352863665939,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352863664455,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666165,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352863665690,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863664901,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : 1352863666987,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : 1352863667058,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : 1352863667216,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863667578,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863664723,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863663984,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666401,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_3.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_3.json
deleted file mode 100644
index e179f86..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_3.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : 1352863665939,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-server]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::Package[oozie-server]/Hdp::Package::Yum[oozie-server]/Package[oozie]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-oozie::Download-ext-zip/Hdp::Package[extjs]/Hdp::Package::Yum[extjs]/Package[extjs-2.2-1]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java extjs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}fc664190038e2562fe63acd61ea9480b'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/oozie/]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/oozie]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Exec[mkdir -p /var/tmp/oozie]/Exec[mkdir -p /var/tmp/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/data/oozie]/Hdp::Directory_recursive_create[/var/data/oozie]/Hdp::Exec[mkdir -p /var/data/oozie]/Exec[mkdir -p /var/data/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/oozie]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Directory[/var/tmp/oozie]/File[/var/tmp/oozie]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Directory[/var/tmp/oozie]/File[/var/tmp/oozie]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.33 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/var/log/oozie]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Exec[mkdir -p /var/tmp/oozie]/Exec[mkdir -p /var/tmp/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/data/oozie]/Hdp::Directory_recursive_create[/var/data/oozie]/Hdp::Exec[mkdir -p /var/data/oozie]/Exec[mkdir -p /var/data/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/oozie]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Directory[/var/tmp/oozie]/File[/var/tmp/oozie]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Directory[/var/tmp/oozie]/File[/var/tmp/oozie]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.33 seconds\u001B[0m",
-        "status" : "QUEUED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352863664455,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Snamenode::Create_name_dirs[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Directory_recursive_create[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}de205131fc2bc8c15f7bd1329fc8ea0d' to '{md5}f60477f06af8d1d549460294d0363702'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352863664537,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.18 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863663984,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}43bb2e790338badc9f17297bc958b536'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666842,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::Package[oozie-client]/Hdp::Package::Yum[oozie-client]/Package[oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::User[oozie]/User[oozie]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : 1352863667216,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863664901,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : 1352863667058,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-fping]/Hdp::Package[nagios-fping]/Hdp::Package::Yum[nagios-fping]/Package[fping]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-server]/Hdp::Package[nagios-server]/Hdp::Package::Yum[nagios-server]/Package[nagios-3.2.3]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-plugins]/Hdp::Package[nagios-plugins]/Hdp::Package::Yum[nagios-plugins]/Package[nagios-plugins-1.4.9]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-addons]/Hdp::Package[nagios-addons]/Hdp::Package::Yum[nagios-addons]/Package[hdp_mon_nagios_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_blocks.php]/content: content changed '{md5}4dc9db0a18622b3e40e266a9b54e2a1a' to '{md5}e3a5fa882154aaabe18fce6086346b98'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_rpcq_latency.php]/content: content changed '{md5}4fa432abbf0a15a2c73cd8eb7e657d6e' to '{md5}1601eec7138fcd957159e6524900a1ce'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_cpu.pl]/ensure: defined content as '{md5}ab87290dee3f032770580d7d7713d086'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_datanode_storage.php]/ensure: defined content as '{md5}eeae70d7c6686ff4ce9993244dbbdf34'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-commands.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hive_metastore_status.sh]/content: content changed '{md5}56469a95f854711ff67e961eb91a1b9a' to '{md5}987ce3f5cd654d9e8d13e9bc9d4b4e16'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_aggregate.php]/content: content changed '{md5}eeddc4cf93d7ca7dbf1e6ea1effcc278' to '{md5}ebbacb754b35bcdab9b246a64589f7c6'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-servicegroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/content: content changed '{md5}5ee6a28df66ed3cc6f76b46073ced9ac' to '{md5}05c47f03c0800b968023152479f24ccb'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352863665856,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}428a01fb131e6c37a876dd03a2940d79' to '{md5}a39a2689e76538c6d9090b00ceb04eb0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863665177,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863665481,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863667578,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}428a01fb131e6c37a876dd03a2940d79'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}c14eb8ab2bb5ab75789c875534ab64f4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863664723,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[rrdtool-python]/Hdp::Package::Yum[rrdtool-python]/Package[python-rrdtool.x86_64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-gweb]/Hdp::Package::Yum[ganglia-gweb]/Package[gweb]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-server]/Hdp::Package::Yum[ganglia-server]/Package[ganglia-gmetad-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-hdp-gweb-addons]/Hdp::Package::Yum[ganglia-hdp-gweb-addons]/Package[hdp_mon_ganglia_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352863665690,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : 1352863666987,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : 1352863667466,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666401,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp::Package[ambari-log4j]/Hdp::Package::Yum[ambari-log4j]/Package[ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}de205131fc2bc8c15f7bd1329fc8ea0d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/hadoop/lib/hadoop-tools.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352863664213,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666913,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}54edf0ba61f6501cc49c0d7788b266b1'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666165,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c' to '{md5}a973d9bcff056aeff7f22221886c84b7'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863666672,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig/Hdp::Package[pig]/Hdp::Package::Yum[pig]/Package[pig.noarch]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : 1352863667299,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_4.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_4.json
deleted file mode 100644
index a573984..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_4.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666401,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}428a01fb131e6c37a876dd03a2940d79' to '{md5}a39a2689e76538c6d9090b00ceb04eb0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863665177,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c' to '{md5}a973d9bcff056aeff7f22221886c84b7'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863666672,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Snamenode::Create_name_dirs[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Directory_recursive_create[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}de205131fc2bc8c15f7bd1329fc8ea0d' to '{md5}f60477f06af8d1d549460294d0363702'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352863664537,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863664901,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::Package[oozie-client]/Hdp::Package::Yum[oozie-client]/Package[oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::User[oozie]/User[oozie]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : 1352863667216,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}fc664190038e2562fe63acd61ea9480b' to '{md5}827a6e7bd4233d4dc82b20761aed1e30'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.12 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}fc664190038e2562fe63acd61ea9480b' to '{md5}827a6e7bd4233d4dc82b20761aed1e30'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.12 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352863664455,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.18 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863663984,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}089df31bb24ce7450baba1a7541e4546' to '{md5}8da2518fdfc4a3723e64babe25c8d6d8'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}13834e68f9058d26d30e3e627bea2d08' to '{md5}816e416f74804ba21e3b80b611d59a11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.75 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}13834e68f9058d26d30e3e627bea2d08' to '{md5}816e416f74804ba21e3b80b611d59a11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.75 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863667578,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863665481,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}54edf0ba61f6501cc49c0d7788b266b1'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666165,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-fping]/Hdp::Package[nagios-fping]/Hdp::Package::Yum[nagios-fping]/Package[fping]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-server]/Hdp::Package[nagios-server]/Hdp::Package::Yum[nagios-server]/Package[nagios-3.2.3]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-plugins]/Hdp::Package[nagios-plugins]/Hdp::Package::Yum[nagios-plugins]/Package[nagios-plugins-1.4.9]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-addons]/Hdp::Package[nagios-addons]/Hdp::Package::Yum[nagios-addons]/Package[hdp_mon_nagios_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_blocks.php]/content: content changed '{md5}4dc9db0a18622b3e40e266a9b54e2a1a' to '{md5}e3a5fa882154aaabe18fce6086346b98'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_rpcq_latency.php]/content: content changed '{md5}4fa432abbf0a15a2c73cd8eb7e657d6e' to '{md5}1601eec7138fcd957159e6524900a1ce'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_cpu.pl]/ensure: defined content as '{md5}ab87290dee3f032770580d7d7713d086'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_datanode_storage.php]/ensure: defined content as '{md5}eeae70d7c6686ff4ce9993244dbbdf34'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-commands.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hive_metastore_status.sh]/content: content changed '{md5}56469a95f854711ff67e961eb91a1b9a' to '{md5}987ce3f5cd654d9e8d13e9bc9d4b4e16'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_aggregate.php]/content: content changed '{md5}eeddc4cf93d7ca7dbf1e6ea1effcc278' to '{md5}ebbacb754b35bcdab9b246a64589f7c6'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-servicegroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/content: content changed '{md5}5ee6a28df66ed3cc6f76b46073ced9ac' to '{md5}05c47f03c0800b968023152479f24ccb'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352863665856,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}43bb2e790338badc9f17297bc958b536'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666842,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop/Hdp::Package[sqoop]/Hdp::Package::Yum[sqoop]/Package[sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Mysql-connector/Hdp::Exec[yum install -y mysql-connector-java]/Exec[yum install -y mysql-connector-java]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/lib//mysql-connector-java.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/conf/sqoop-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.25 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop/Hdp::Package[sqoop]/Hdp::Package::Yum[sqoop]/Package[sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Mysql-connector/Hdp::Exec[yum install -y mysql-connector-java]/Exec[yum install -y mysql-connector-java]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/lib//mysql-connector-java.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/conf/sqoop-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.25 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : 1352863667466,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}428a01fb131e6c37a876dd03a2940d79'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}c14eb8ab2bb5ab75789c875534ab64f4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863664723,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : 1352863666987,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[rrdtool-python]/Hdp::Package::Yum[rrdtool-python]/Package[python-rrdtool.x86_64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-gweb]/Hdp::Package::Yum[ganglia-gweb]/Package[gweb]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-server]/Hdp::Package::Yum[ganglia-server]/Package[ganglia-gmetad-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-hdp-gweb-addons]/Hdp::Package::Yum[ganglia-hdp-gweb-addons]/Package[hdp_mon_ganglia_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352863665690,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig/Hdp::Package[pig]/Hdp::Package::Yum[pig]/Package[pig.noarch]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : 1352863667299,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}600cddff911584631420067cd2d2a5f6'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}05bed82ac309c3636b85b7ffae797cd1'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_name_dirs[/grid/0/hadoop/hdfs/namenode,/grid/1/hadoop/hdfs/namenode,]/Hdp::Directory_recursive_create[/grid/0/hadoop/hdfs/namenode]/Hdp::Exec[mkdir -p /grid/0/hadoop/hdfs/namenode]/Exec[mkdir -p /grid/0/hadoop/hdfs/namenode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_name_dirs[/grid/0/hadoop/hdfs/namenode,/grid/1/hadoop/hdfs/namenode,]/Hdp::Directory_recursive_create[/grid/1/hadoop/hdfs/namenode]/Hdp::Exec[mkdir -p /grid/1/hadoop/hdfs/namenode]/Exec[mkdir -p /grid/1/hadoop/hdfs/namenode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/hdfs/namenode]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/hdfs/namenode]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/hdfs/namenode]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/hdfs/namenode]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 75.23 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 75.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : 1352863665939,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666913,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : 1352863667058,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp::Package[ambari-log4j]/Hdp::Package::Yum[ambari-log4j]/Package[ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}de205131fc2bc8c15f7bd1329fc8ea0d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/hadoop/lib/hadoop-tools.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352863664213,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_5.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_5.json
deleted file mode 100644
index eeef50e..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_5.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : 1352863667058,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop/Hdp::Package[sqoop]/Hdp::Package::Yum[sqoop]/Package[sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Mysql-connector/Hdp::Exec[yum install -y mysql-connector-java]/Exec[yum install -y mysql-connector-java]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/lib//mysql-connector-java.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/conf/sqoop-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.25 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop/Hdp::Package[sqoop]/Hdp::Package::Yum[sqoop]/Package[sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Mysql-connector/Hdp::Exec[yum install -y mysql-connector-java]/Exec[yum install -y mysql-connector-java]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/lib//mysql-connector-java.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/conf/sqoop-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.25 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : 1352863667466,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863664901,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig/Hdp::Package[pig]/Hdp::Package::Yum[pig]/Package[pig.noarch]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : 1352863667299,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863665481,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c' to '{md5}a973d9bcff056aeff7f22221886c84b7'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863666672,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : 1352863666987,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}54edf0ba61f6501cc49c0d7788b266b1'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666165,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666913,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[rrdtool-python]/Hdp::Package::Yum[rrdtool-python]/Package[python-rrdtool.x86_64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-gweb]/Hdp::Package::Yum[ganglia-gweb]/Package[gweb]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-server]/Hdp::Package::Yum[ganglia-server]/Package[ganglia-gmetad-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-hdp-gweb-addons]/Hdp::Package::Yum[ganglia-hdp-gweb-addons]/Package[hdp_mon_ganglia_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352863665690,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}428a01fb131e6c37a876dd03a2940d79' to '{md5}a39a2689e76538c6d9090b00ceb04eb0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863665177,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}43bb2e790338badc9f17297bc958b536'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666842,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.18 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863663984,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}fc664190038e2562fe63acd61ea9480b' to '{md5}827a6e7bd4233d4dc82b20761aed1e30'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.12 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}fc664190038e2562fe63acd61ea9480b' to '{md5}827a6e7bd4233d4dc82b20761aed1e30'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.12 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352863664455,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::Package[oozie-client]/Hdp::Package::Yum[oozie-client]/Package[oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::User[oozie]/User[oozie]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : 1352863667216,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}089df31bb24ce7450baba1a7541e4546' to '{md5}8da2518fdfc4a3723e64babe25c8d6d8'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}13834e68f9058d26d30e3e627bea2d08' to '{md5}816e416f74804ba21e3b80b611d59a11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.75 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}13834e68f9058d26d30e3e627bea2d08' to '{md5}816e416f74804ba21e3b80b611d59a11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.75 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863667578,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666401,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-fping]/Hdp::Package[nagios-fping]/Hdp::Package::Yum[nagios-fping]/Package[fping]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-server]/Hdp::Package[nagios-server]/Hdp::Package::Yum[nagios-server]/Package[nagios-3.2.3]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-plugins]/Hdp::Package[nagios-plugins]/Hdp::Package::Yum[nagios-plugins]/Package[nagios-plugins-1.4.9]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-addons]/Hdp::Package[nagios-addons]/Hdp::Package::Yum[nagios-addons]/Package[hdp_mon_nagios_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_blocks.php]/content: content changed '{md5}4dc9db0a18622b3e40e266a9b54e2a1a' to '{md5}e3a5fa882154aaabe18fce6086346b98'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_rpcq_latency.php]/content: content changed '{md5}4fa432abbf0a15a2c73cd8eb7e657d6e' to '{md5}1601eec7138fcd957159e6524900a1ce'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_cpu.pl]/ensure: defined content as '{md5}ab87290dee3f032770580d7d7713d086'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_datanode_storage.php]/ensure: defined content as '{md5}eeae70d7c6686ff4ce9993244dbbdf34'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-commands.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hive_metastore_status.sh]/content: content changed '{md5}56469a95f854711ff67e961eb91a1b9a' to '{md5}987ce3f5cd654d9e8d13e9bc9d4b4e16'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_aggregate.php]/content: content changed '{md5}eeddc4cf93d7ca7dbf1e6ea1effcc278' to '{md5}ebbacb754b35bcdab9b246a64589f7c6'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-servicegroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/content: content changed '{md5}5ee6a28df66ed3cc6f76b46073ced9ac' to '{md5}05c47f03c0800b968023152479f24ccb'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352863665856,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Snamenode::Create_name_dirs[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Directory_recursive_create[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}de205131fc2bc8c15f7bd1329fc8ea0d' to '{md5}f60477f06af8d1d549460294d0363702'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352863664537,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}428a01fb131e6c37a876dd03a2940d79'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}c14eb8ab2bb5ab75789c875534ab64f4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863664723,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp::Package[ambari-log4j]/Hdp::Package::Yum[ambari-log4j]/Package[ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}de205131fc2bc8c15f7bd1329fc8ea0d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/hadoop/lib/hadoop-tools.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352863664213,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : 1352863665939,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_6.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_6.json
deleted file mode 100644
index a3c0a0c..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_6.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/2?fields=tasks/*",
-  "Requests" : {
-    "id" : 2,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/37",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 37,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/25",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 25,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/42",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 42,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "PIG_SERVICE_CHECK",
-        "start_time" : -1,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/32",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 32,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/31",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 31,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/33",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 33,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/26",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 26,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/44",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 44,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "OOZIE_SERVICE_CHECK",
-        "start_time" : -1,
-        "stage_id" : 4
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/36",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 36,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/34",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 34,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "JOBTRACKER",
-        "start_time" : -1,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/35",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 35,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : -1,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/38",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 38,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "HDFS_SERVICE_CHECK",
-        "start_time" : -1,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/29",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 29,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAMENODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/28",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 28,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/24",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 24,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/40",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 40,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "OOZIE_SERVER",
-        "start_time" : -1,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/39",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 39,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/23",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 23,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/27",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 27,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/30",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 30,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/43",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 43,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "SQOOP_SERVICE_CHECK",
-        "start_time" : -1,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/41",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 41,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "MAPREDUCE_SERVICE_CHECK",
-        "start_time" : -1,
-        "stage_id" : 3
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_7.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_7.json
deleted file mode 100644
index d0eb880..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_7.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/2?fields=tasks/*",
-  "Requests" : {
-    "id" : 2,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/32",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}c892638e8c76c66f072640eb32b0637a' to '{md5}db0f2efdd03e4845c0528e1978b25644'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5' to '{md5}036cea2c613ff235499a7ed743be467f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.38 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}c892638e8c76c66f072640eb32b0637a' to '{md5}db0f2efdd03e4845c0528e1978b25644'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5' to '{md5}036cea2c613ff235499a7ed743be467f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.38 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 32,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864090145,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/24",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a39a2689e76538c6d9090b00ceb04eb0' to '{md5}9786ed97b221e37075bdb64400bc804a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}c14eb8ab2bb5ab75789c875534ab64f4' to '{md5}9684de67c2a8fa0f7292418d6c0c1651'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.78 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a39a2689e76538c6d9090b00ceb04eb0' to '{md5}9786ed97b221e37075bdb64400bc804a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}c14eb8ab2bb5ab75789c875534ab64f4' to '{md5}9684de67c2a8fa0f7292418d6c0c1651'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.78 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 24,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864089661,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/29",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}d22fc5749dde07b5b5acff255c490e9d' to '{md5}0617b67bc5192f5e44cf98b2fe25eb6f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}0d021082a9258c648b5259d3af27ff62' to '{md5}39e33160b7f2933a12fc338a81ae9fcd'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/File[/tmp/checkForFormat.sh]/ensure: defined content as '{md5}5dd6bddf910d8ca9f6fefa44e7bbec7e'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: NameNode Dirname = /grid/0/hadoop/hdfs/namenode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: NameNode Dirname = /grid/1/hadoop/hdfs/namenode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:57 INFO namenode.NameNode: STARTUP_MSG: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: /************************************************************\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG: Starting NameNode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   host = host5/10.118.58.228\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   args = [-format]\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   version = 1.1.0.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   build =  -r ; compiled by 'jenkins' on Sat Nov 10 18:55:09 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: ************************************************************/\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: Re-format filesystem in /grid/0/hadoop/hdfs/namenode ? (Y or N) Re-format filesystem in /grid/1/hadoop/hdfs/namenode ? (Y or N) 12/11/13 22:36:58 INFO util.GSet: VM type       = 64-bit\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: 2% max memory = 19.2 MB\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: capacity      = 2^21 = 2097152 entries\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: recommended=2097152, actual=2097152\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: fsOwner=hdfs\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: supergroup=supergroup\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: isPermissionEnabled=true\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: dfs.block.invalidate.limit=100\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 WARN namenode.FSNamesystem: The dfs.support.append option is in your configuration, however append is not supported. This configuration option is no longer required to enable sync\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.NameNode: Caching file names occuring more than 10 times \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO common.Storage: Image file of size 110 saved in 0 seconds.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/grid/0/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/grid/0/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Storage directory /grid/0/hadoop/hdfs/namenode has been successfully formatted.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Image file of size 110 saved in 0 seconds.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/grid/1/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/grid/1/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Storage directory /grid/1/hadoop/hdfs/namenode has been successfully formatted.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.NameNode: SHUTDOWN_MSG: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: /************************************************************\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: SHUTDOWN_MSG: Shutting down NameNode at host5/10.118.58.228\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: ************************************************************/\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: yes: standard output: Broken pipe\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: yes: write error\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Hdp::Exec[set namenode mark]/Exec[set namenode mark]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chmod 775 /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chown hdfs /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chmod 770 /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chown ambari_qa /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chmod 777 /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chown oozie /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 51.23 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chmod 775 /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chown hdfs /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chmod 770 /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chown ambari_qa /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chmod 777 /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chown oozie /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 51.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 29,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAMENODE",
-        "start_time" : 1352864090025,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/28",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/content: content changed '{md5}ffff62426c4f7a42c1cb1ca44b324dad' to '{md5}21ad9f95dd93ee39fc87db07b7ea05be'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/content: content changed '{md5}fdcc51e399dd2381778a163933ef2beb' to '{md5}afbfd32db940db5fff4701c964169c27'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.78 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/content: content changed '{md5}ffff62426c4f7a42c1cb1ca44b324dad' to '{md5}21ad9f95dd93ee39fc87db07b7ea05be'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/content: content changed '{md5}fdcc51e399dd2381778a163933ef2beb' to '{md5}afbfd32db940db5fff4701c964169c27'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.78 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 28,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352864089985,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/23",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.77 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.77 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 23,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089600,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/34",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}63d8feab1255e45d9549ccea14f687c4' to '{md5}4673b67e078cc9d84ffc4873e5198edf'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[historyserver]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}cccc03b9f3384eac76957c7fe2f12849' to '{md5}07e946dbf4ae6632034ee6715a085b92'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 9.76 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}63d8feab1255e45d9549ccea14f687c4' to '{md5}4673b67e078cc9d84ffc4873e5198edf'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[historyserver]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}cccc03b9f3384eac76957c7fe2f12849' to '{md5}07e946dbf4ae6632034ee6715a085b92'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 9.76 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 34,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352864269447,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/41",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput ]/Exec[hadoop --config /etc/hadoop/conf dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO input.FileInputFormat: Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 WARN snappy.LoadSnappy: Snappy native library is available\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO util.NativeCodeLoader: Loaded the native-hadoop library\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO snappy.LoadSnappy: Snappy native library loaded\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:14 INFO mapred.JobClient: Running job: job_201211132238_0001\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:15 INFO mapred.JobClient:  map 0% reduce 0%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:30 INFO mapred.JobClient:  map 100% reduce 0%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:38 INFO mapred.JobClient:  map 100% reduce 33%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:40 INFO mapred.JobClient:  map 100% reduce 100%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient: Job complete: job_201211132238_0001\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient: Counters: 29\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   Job Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Launched reduce tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SLOTS_MILLIS_MAPS=6106\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total time spent by all reduces waiting after reserving slots (ms)=0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total time spent by all maps waiting after reserving slots (ms)=0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Launched map tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Data-local map tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SLOTS_MILLIS_REDUCES=9332\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   File Output Format Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Bytes Written=1845\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   FileSystemCounters\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     FILE_BYTES_READ=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     HDFS_BYTES_READ=1893\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     FILE_BYTES_WRITTEN=117522\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     HDFS_BYTES_WRITTEN=1845\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   File Input Format Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Bytes Read=1755\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   Map-Reduce Framework\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output materialized bytes=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map input records=36\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce shuffle bytes=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Spilled Records=122\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output bytes=2003\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     CPU time spent (ms)=1920\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total committed heap usage (bytes)=433913856\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine input records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SPLIT_RAW_BYTES=138\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input groups=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Physical memory (bytes) snapshot=381779968\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=2704003072\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 37.52 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input groups=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Physical memory (bytes) snapshot=381779968\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=2704003072\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 37.52 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 41,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "MAPREDUCE_SERVICE_CHECK",
-        "start_time" : 1352864331797,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/27",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Gmetad/Hdp::Exec[hdp-gmetad service]/Exec[hdp-gmetad service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Gmetad/Hdp::Exec[hdp-gmetad service]/Exec[hdp-gmetad service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 27,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352864089883,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/35",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}4673b67e078cc9d84ffc4873e5198edf' to '{md5}654e54e7c3f58aa3d37d07110ad63bb5'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}d3b2d5e47669c948fccb907fa32c2b55' to '{md5}0e079fd5bc7cc43a35b60012c9ee00d9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.45 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}4673b67e078cc9d84ffc4873e5198edf' to '{md5}654e54e7c3f58aa3d37d07110ad63bb5'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}d3b2d5e47669c948fccb907fa32c2b55' to '{md5}0e079fd5bc7cc43a35b60012c9ee00d9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.45 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 35,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352864269474,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/37",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}df2d55356b238461af57fe22ad993e4d' to '{md5}62a467fcccda8169de563170e39e3419'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}1a3769d695902dba39b5645fef3766e0' to '{md5}23097908e8b54f7dbc4d31b5d26d21e7'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.66 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}df2d55356b238461af57fe22ad993e4d' to '{md5}62a467fcccda8169de563170e39e3419'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}1a3769d695902dba39b5645fef3766e0' to '{md5}23097908e8b54f7dbc4d31b5d26d21e7'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.66 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 37,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269589,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/43",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 43,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "SQOOP_SERVICE_CHECK",
-        "start_time" : 1352864331830,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/40",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}827a6e7bd4233d4dc82b20761aed1e30' to '{md5}4e59b973cec0811615008a580244bcdb'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp::Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 36.67 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}827a6e7bd4233d4dc82b20761aed1e30' to '{md5}4e59b973cec0811615008a580244bcdb'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp::Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 36.67 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 40,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352864331712,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/33",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.72 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.72 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 33,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864090181,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/25",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 25,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089770,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/31",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.15 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 31,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864090105,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/44",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 44,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "OOZIE_SERVICE_CHECK",
-        "start_time" : -1,
-        "stage_id" : 4
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/26",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 7.68 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 7.68 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 26,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089836,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/30",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a973d9bcff056aeff7f22221886c84b7' to '{md5}df2d55356b238461af57fe22ad993e4d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}54edf0ba61f6501cc49c0d7788b266b1' to '{md5}b25bda7a405235227d20732f0972c5f6'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.00 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a973d9bcff056aeff7f22221886c84b7' to '{md5}df2d55356b238461af57fe22ad993e4d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}54edf0ba61f6501cc49c0d7788b266b1' to '{md5}b25bda7a405235227d20732f0972c5f6'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.00 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 30,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864090068,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/38",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: Safe mode is OFF\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 10.35 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: Safe mode is OFF\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 10.35 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 38,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "HDFS_SERVICE_CHECK",
-        "start_time" : 1352864269616,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/42",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 42,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "PIG_SERVICE_CHECK",
-        "start_time" : 1352864331815,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/39",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}db0f2efdd03e4845c0528e1978b25644' to '{md5}84df095b5569e720b4aeaf4a96e0ee6d'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}f72b50738651b3cb6bcef039b59ffdcb' to '{md5}e750ca8f3497b9a4656f782dcf335dab'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.29 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}db0f2efdd03e4845c0528e1978b25644' to '{md5}84df095b5569e720b4aeaf4a96e0ee6d'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}f72b50738651b3cb6bcef039b59ffdcb' to '{md5}e750ca8f3497b9a4656f782dcf335dab'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.29 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 39,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269636,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/36",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}9786ed97b221e37075bdb64400bc804a' to '{md5}8e06d7ec24fe5acd81917162d58857db'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}30e43dbdb225dad740d632ecc8f6ae11' to '{md5}558aadf67e4d29865a6d935076d3868b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.20 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}9786ed97b221e37075bdb64400bc804a' to '{md5}8e06d7ec24fe5acd81917162d58857db'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}30e43dbdb225dad740d632ecc8f6ae11' to '{md5}558aadf67e4d29865a6d935076d3868b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.20 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 36,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269562,
-        "stage_id" : 2
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_8.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_8.json
deleted file mode 100644
index 92b6401..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_8.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/2?fields=tasks/*",
-  "Requests" : {
-    "id" : 2,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/29",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}d22fc5749dde07b5b5acff255c490e9d' to '{md5}0617b67bc5192f5e44cf98b2fe25eb6f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}0d021082a9258c648b5259d3af27ff62' to '{md5}39e33160b7f2933a12fc338a81ae9fcd'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/File[/tmp/checkForFormat.sh]/ensure: defined content as '{md5}5dd6bddf910d8ca9f6fefa44e7bbec7e'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: NameNode Dirname = /grid/0/hadoop/hdfs/namenode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: NameNode Dirname = /grid/1/hadoop/hdfs/namenode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:57 INFO namenode.NameNode: STARTUP_MSG: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: /************************************************************\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG: Starting NameNode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   host = host5/10.118.58.228\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   args = [-format]\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   version = 1.1.0.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   build =  -r ; compiled by 'jenkins' on Sat Nov 10 18:55:09 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: ************************************************************/\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: Re-format filesystem in /grid/0/hadoop/hdfs/namenode ? (Y or N) Re-format filesystem in /grid/1/hadoop/hdfs/namenode ? (Y or N) 12/11/13 22:36:58 INFO util.GSet: VM type       = 64-bit\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: 2% max memory = 19.2 MB\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: capacity      = 2^21 = 2097152 entries\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: recommended=2097152, actual=2097152\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: fsOwner=hdfs\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: supergroup=supergroup\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: isPermissionEnabled=true\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: dfs.block.invalidate.limit=100\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 WARN namenode.FSNamesystem: The dfs.support.append option is in your configuration, however append is not supported. This configuration option is no longer required to enable sync\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.NameNode: Caching file names occuring more than 10 times \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO common.Storage: Image file of size 110 saved in 0 seconds.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/grid/0/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/grid/0/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Storage directory /grid/0/hadoop/hdfs/namenode has been successfully formatted.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Image file of size 110 saved in 0 seconds.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/grid/1/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/grid/1/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Storage directory /grid/1/hadoop/hdfs/namenode has been successfully formatted.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.NameNode: SHUTDOWN_MSG: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: /************************************************************\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: SHUTDOWN_MSG: Shutting down NameNode at host5/10.118.58.228\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: ************************************************************/\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: yes: standard output: Broken pipe\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: yes: write error\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Hdp::Exec[set namenode mark]/Exec[set namenode mark]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chmod 775 /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chown hdfs /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chmod 770 /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chown ambari_qa /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chmod 777 /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chown oozie /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 51.23 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chmod 775 /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chown hdfs /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chmod 770 /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chown ambari_qa /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chmod 777 /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chown oozie /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 51.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 29,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAMENODE",
-        "start_time" : 1352864090025,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/27",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Gmetad/Hdp::Exec[hdp-gmetad service]/Exec[hdp-gmetad service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Gmetad/Hdp::Exec[hdp-gmetad service]/Exec[hdp-gmetad service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 27,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352864089883,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/41",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput ]/Exec[hadoop --config /etc/hadoop/conf dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO input.FileInputFormat: Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 WARN snappy.LoadSnappy: Snappy native library is available\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO util.NativeCodeLoader: Loaded the native-hadoop library\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO snappy.LoadSnappy: Snappy native library loaded\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:14 INFO mapred.JobClient: Running job: job_201211132238_0001\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:15 INFO mapred.JobClient:  map 0% reduce 0%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:30 INFO mapred.JobClient:  map 100% reduce 0%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:38 INFO mapred.JobClient:  map 100% reduce 33%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:40 INFO mapred.JobClient:  map 100% reduce 100%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient: Job complete: job_201211132238_0001\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient: Counters: 29\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   Job Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Launched reduce tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SLOTS_MILLIS_MAPS=6106\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total time spent by all reduces waiting after reserving slots (ms)=0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total time spent by all maps waiting after reserving slots (ms)=0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Launched map tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Data-local map tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SLOTS_MILLIS_REDUCES=9332\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   File Output Format Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Bytes Written=1845\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   FileSystemCounters\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     FILE_BYTES_READ=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     HDFS_BYTES_READ=1893\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     FILE_BYTES_WRITTEN=117522\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     HDFS_BYTES_WRITTEN=1845\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   File Input Format Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Bytes Read=1755\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   Map-Reduce Framework\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output materialized bytes=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map input records=36\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce shuffle bytes=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Spilled Records=122\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output bytes=2003\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     CPU time spent (ms)=1920\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total committed heap usage (bytes)=433913856\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine input records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SPLIT_RAW_BYTES=138\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input groups=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Physical memory (bytes) snapshot=381779968\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=2704003072\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 37.52 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input groups=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Physical memory (bytes) snapshot=381779968\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=2704003072\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 37.52 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 41,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "MAPREDUCE_SERVICE_CHECK",
-        "start_time" : 1352864331797,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/24",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a39a2689e76538c6d9090b00ceb04eb0' to '{md5}9786ed97b221e37075bdb64400bc804a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}c14eb8ab2bb5ab75789c875534ab64f4' to '{md5}9684de67c2a8fa0f7292418d6c0c1651'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.78 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a39a2689e76538c6d9090b00ceb04eb0' to '{md5}9786ed97b221e37075bdb64400bc804a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}c14eb8ab2bb5ab75789c875534ab64f4' to '{md5}9684de67c2a8fa0f7292418d6c0c1651'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.78 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 24,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864089661,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/44",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 44,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "OOZIE_SERVICE_CHECK",
-        "start_time" : 1352864442993,
-        "stage_id" : 4
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/23",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.77 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.77 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 23,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089600,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/40",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}827a6e7bd4233d4dc82b20761aed1e30' to '{md5}4e59b973cec0811615008a580244bcdb'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp::Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 36.67 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}827a6e7bd4233d4dc82b20761aed1e30' to '{md5}4e59b973cec0811615008a580244bcdb'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp::Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 36.67 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 40,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352864331712,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/34",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}63d8feab1255e45d9549ccea14f687c4' to '{md5}4673b67e078cc9d84ffc4873e5198edf'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[historyserver]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}cccc03b9f3384eac76957c7fe2f12849' to '{md5}07e946dbf4ae6632034ee6715a085b92'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 9.76 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}63d8feab1255e45d9549ccea14f687c4' to '{md5}4673b67e078cc9d84ffc4873e5198edf'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[historyserver]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}cccc03b9f3384eac76957c7fe2f12849' to '{md5}07e946dbf4ae6632034ee6715a085b92'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 9.76 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 34,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352864269447,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/32",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}c892638e8c76c66f072640eb32b0637a' to '{md5}db0f2efdd03e4845c0528e1978b25644'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5' to '{md5}036cea2c613ff235499a7ed743be467f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.38 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}c892638e8c76c66f072640eb32b0637a' to '{md5}db0f2efdd03e4845c0528e1978b25644'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5' to '{md5}036cea2c613ff235499a7ed743be467f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.38 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 32,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864090145,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/42",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfs -rmr pigsmoke.out passwd; hadoop dfs -put /etc/passwd passwd ]/Exec[hadoop --config /etc/hadoop/conf dfs -rmr pigsmoke.out passwd; hadoop dfs -put /etc/passwd passwd ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/File[/tmp/pigSmoke.sh]/ensure: defined content as '{md5}feac231e484c08e3bc5f83d0ee189a8c'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,367 [main] INFO  org.apache.pig.Main - Apache Pig version 0.10.0.1 (rexported) compiled Nov 10 2012, 19:10:20\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,368 [main] INFO  org.apache.pig.Main - Logging error messages to: /home/ambari_qa/pig_1352864398364.log\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,789 [main] INFO  org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to hadoop file system at: hdfs://host5:8020\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:59,058 [main] INFO  org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to map-reduce job tracker at: host3:50300\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:59,907 [main] INFO  org.apache.pig.tools.pigstats.ScriptState - Pig features used in the script: UNKNOWN\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,158 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MRCompiler - File concatenation threshold: 100 optimistic? false\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,183 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MultiQueryOptimizer - MR plan size before optimization: 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,183 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MultiQueryOptimizer - MR plan size after optimization: 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,288 [main] INFO  org.apache.pig.tools.pigstats.ScriptState - Pig script settings are added to the job\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,312 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - mapred.job.reduce.markreset.buffer.percent is not set, set to default 0.3\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,315 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - creating jar file Job4537005419718909074.jar\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,356 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - jar file Job4537005419718909074.jar created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,377 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - Setting up single store job\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,432 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 1 map-reduce job(s) waiting for submission.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,932 [Thread-6] INFO  org.apache.hadoop.mapreduce.lib.input.FileInputFormat - Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,933 [Thread-6] INFO  org.apache.pig.backend.hadoop.executionengine.util.MapRedUtil - Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,934 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 0% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,971 [Thread-6] WARN  org.apache.hadoop.io.compress.snappy.LoadSnappy - Snappy native library is available\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,973 [Thread-6] INFO  org.apache.hadoop.util.NativeCodeLoader - Loaded the native-hadoop library\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,973 [Thread-6] INFO  org.apache.hadoop.io.compress.snappy.LoadSnappy - Snappy native library loaded\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,977 [Thread-6] INFO  org.apache.pig.backend.hadoop.executionengine.util.MapRedUtil - Total input paths (combined) to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:06,811 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - HadoopJobId: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:06,812 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - More information at: http://host3:50030/jobdetails.jsp?jobid=job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:17,380 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 50% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,432 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 100% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,434 [main] INFO  org.apache.pig.tools.pigstats.SimplePigStats - Script Statistics: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: HadoopVersion\tPigVersion\tUserId\tStartedAt\tFinishedAt\tFeatures\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 1.1.0.1\t0.10.0.1\tambari_qa\t2012-11-13 22:40:00\t2012-11-13 22:40:21\tUNKNOWN\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job Stats (time in seconds):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: JobId\tMaps\tReduces\tMaxMapTime\tMinMapTIme\tAvgMapTime\tMaxReduceTime\tMinReduceTime\tAvgReduceTime\tAlias\tFeature\tOutputs\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\t1\t0\t3\t3\t3\t0\t0\t0\tA,B\tMAP_ONLY\thdfs://host5:8020/user/ambari_qa/pigsmoke.out,\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Input(s):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Successfully read 36 records (2137 bytes) from: \"hdfs://host5:8020/user/ambari_qa/passwd\"\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Output(s):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Successfully stored 36 records (236 bytes) in: \"hdfs://host5:8020/user/ambari_qa/pigsmoke.out\"\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Counters:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records written : 36\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total bytes written : 236\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Spillable Memory Manager spill count : 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total bags proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job DAG:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,446 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]/Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 32.06 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job DAG:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,446 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]/Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 32.06 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 42,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "PIG_SERVICE_CHECK",
-        "start_time" : 1352864331815,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/35",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}4673b67e078cc9d84ffc4873e5198edf' to '{md5}654e54e7c3f58aa3d37d07110ad63bb5'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}d3b2d5e47669c948fccb907fa32c2b55' to '{md5}0e079fd5bc7cc43a35b60012c9ee00d9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.45 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}4673b67e078cc9d84ffc4873e5198edf' to '{md5}654e54e7c3f58aa3d37d07110ad63bb5'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}d3b2d5e47669c948fccb907fa32c2b55' to '{md5}0e079fd5bc7cc43a35b60012c9ee00d9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.45 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 35,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352864269474,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/43",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Sqoop 1.4.2.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: git commit id ea3b95785b3daf62c68f1eb0e645636acc00d0c2\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Compiled by jenkins on Sat Nov 10 19:14:01 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Sqoop 1.4.2.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: git commit id ea3b95785b3daf62c68f1eb0e645636acc00d0c2\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Compiled by jenkins on Sat Nov 10 19:14:01 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.15 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 43,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "SQOOP_SERVICE_CHECK",
-        "start_time" : 1352864331830,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/31",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.15 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 31,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864090105,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/33",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.72 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.72 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 33,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864090181,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/25",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 25,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089770,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/38",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: Safe mode is OFF\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 10.35 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: Safe mode is OFF\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 10.35 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 38,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "HDFS_SERVICE_CHECK",
-        "start_time" : 1352864269616,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/28",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/content: content changed '{md5}ffff62426c4f7a42c1cb1ca44b324dad' to '{md5}21ad9f95dd93ee39fc87db07b7ea05be'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/content: content changed '{md5}fdcc51e399dd2381778a163933ef2beb' to '{md5}afbfd32db940db5fff4701c964169c27'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.78 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/content: content changed '{md5}ffff62426c4f7a42c1cb1ca44b324dad' to '{md5}21ad9f95dd93ee39fc87db07b7ea05be'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/content: content changed '{md5}fdcc51e399dd2381778a163933ef2beb' to '{md5}afbfd32db940db5fff4701c964169c27'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.78 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 28,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352864089985,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/37",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}df2d55356b238461af57fe22ad993e4d' to '{md5}62a467fcccda8169de563170e39e3419'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}1a3769d695902dba39b5645fef3766e0' to '{md5}23097908e8b54f7dbc4d31b5d26d21e7'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.66 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}df2d55356b238461af57fe22ad993e4d' to '{md5}62a467fcccda8169de563170e39e3419'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}1a3769d695902dba39b5645fef3766e0' to '{md5}23097908e8b54f7dbc4d31b5d26d21e7'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.66 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 37,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269589,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/30",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a973d9bcff056aeff7f22221886c84b7' to '{md5}df2d55356b238461af57fe22ad993e4d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}54edf0ba61f6501cc49c0d7788b266b1' to '{md5}b25bda7a405235227d20732f0972c5f6'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.00 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a973d9bcff056aeff7f22221886c84b7' to '{md5}df2d55356b238461af57fe22ad993e4d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}54edf0ba61f6501cc49c0d7788b266b1' to '{md5}b25bda7a405235227d20732f0972c5f6'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.00 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 30,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864090068,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/26",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 7.68 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 7.68 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 26,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089836,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/36",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}9786ed97b221e37075bdb64400bc804a' to '{md5}8e06d7ec24fe5acd81917162d58857db'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}30e43dbdb225dad740d632ecc8f6ae11' to '{md5}558aadf67e4d29865a6d935076d3868b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.20 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}9786ed97b221e37075bdb64400bc804a' to '{md5}8e06d7ec24fe5acd81917162d58857db'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}30e43dbdb225dad740d632ecc8f6ae11' to '{md5}558aadf67e4d29865a6d935076d3868b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.20 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 36,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269562,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/39",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}db0f2efdd03e4845c0528e1978b25644' to '{md5}84df095b5569e720b4aeaf4a96e0ee6d'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}f72b50738651b3cb6bcef039b59ffdcb' to '{md5}e750ca8f3497b9a4656f782dcf335dab'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.29 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}db0f2efdd03e4845c0528e1978b25644' to '{md5}84df095b5569e720b4aeaf4a96e0ee6d'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}f72b50738651b3cb6bcef039b59ffdcb' to '{md5}e750ca8f3497b9a4656f782dcf335dab'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.29 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 39,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269636,
-        "stage_id" : 2
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_9.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_9.json
deleted file mode 100644
index 2f94e11..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/5_hosts/poll_9.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/2?fields=tasks/*",
-  "Requests" : {
-    "id" : 2,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/34",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}63d8feab1255e45d9549ccea14f687c4' to '{md5}4673b67e078cc9d84ffc4873e5198edf'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[historyserver]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}cccc03b9f3384eac76957c7fe2f12849' to '{md5}07e946dbf4ae6632034ee6715a085b92'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 9.76 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}63d8feab1255e45d9549ccea14f687c4' to '{md5}4673b67e078cc9d84ffc4873e5198edf'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[historyserver]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}cccc03b9f3384eac76957c7fe2f12849' to '{md5}07e946dbf4ae6632034ee6715a085b92'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 9.76 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 34,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352864269447,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/44",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 44,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "OOZIE_SERVICE_CHECK",
-        "start_time" : 1352864442993,
-        "stage_id" : 4
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/36",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}9786ed97b221e37075bdb64400bc804a' to '{md5}8e06d7ec24fe5acd81917162d58857db'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}30e43dbdb225dad740d632ecc8f6ae11' to '{md5}558aadf67e4d29865a6d935076d3868b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.20 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}9786ed97b221e37075bdb64400bc804a' to '{md5}8e06d7ec24fe5acd81917162d58857db'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}30e43dbdb225dad740d632ecc8f6ae11' to '{md5}558aadf67e4d29865a6d935076d3868b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.20 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 36,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269562,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/29",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}d22fc5749dde07b5b5acff255c490e9d' to '{md5}0617b67bc5192f5e44cf98b2fe25eb6f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}0d021082a9258c648b5259d3af27ff62' to '{md5}39e33160b7f2933a12fc338a81ae9fcd'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/File[/tmp/checkForFormat.sh]/ensure: defined content as '{md5}5dd6bddf910d8ca9f6fefa44e7bbec7e'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: NameNode Dirname = /grid/0/hadoop/hdfs/namenode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: NameNode Dirname = /grid/1/hadoop/hdfs/namenode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:57 INFO namenode.NameNode: STARTUP_MSG: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: /************************************************************\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG: Starting NameNode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   host = host5/10.118.58.228\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   args = [-format]\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   version = 1.1.0.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   build =  -r ; compiled by 'jenkins' on Sat Nov 10 18:55:09 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: ************************************************************/\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: Re-format filesystem in /grid/0/hadoop/hdfs/namenode ? (Y or N) Re-format filesystem in /grid/1/hadoop/hdfs/namenode ? (Y or N) 12/11/13 22:36:58 INFO util.GSet: VM type       = 64-bit\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: 2% max memory = 19.2 MB\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: capacity      = 2^21 = 2097152 entries\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: recommended=2097152, actual=2097152\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: fsOwner=hdfs\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: supergroup=supergroup\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: isPermissionEnabled=true\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: dfs.block.invalidate.limit=100\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 WARN namenode.FSNamesystem: The dfs.support.append option is in your configuration, however append is not supported. This configuration option is no longer required to enable sync\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.NameNode: Caching file names occuring more than 10 times \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO common.Storage: Image file of size 110 saved in 0 seconds.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/grid/0/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/grid/0/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Storage directory /grid/0/hadoop/hdfs/namenode has been successfully formatted.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Image file of size 110 saved in 0 seconds.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/grid/1/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/grid/1/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Storage directory /grid/1/hadoop/hdfs/namenode has been successfully formatted.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.NameNode: SHUTDOWN_MSG: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: /************************************************************\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: SHUTDOWN_MSG: Shutting down NameNode at host5/10.118.58.228\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: ************************************************************/\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: yes: standard output: Broken pipe\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: yes: write error\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Hdp::Exec[set namenode mark]/Exec[set namenode mark]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chmod 775 /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chown hdfs /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chmod 770 /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chown ambari_qa /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chmod 777 /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chown oozie /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 51.23 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chmod 775 /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chown hdfs /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chmod 770 /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chown ambari_qa /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chmod 777 /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chown oozie /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 51.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 29,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAMENODE",
-        "start_time" : 1352864090025,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/33",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.72 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.72 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 33,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864090181,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/39",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}db0f2efdd03e4845c0528e1978b25644' to '{md5}84df095b5569e720b4aeaf4a96e0ee6d'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}f72b50738651b3cb6bcef039b59ffdcb' to '{md5}e750ca8f3497b9a4656f782dcf335dab'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.29 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}db0f2efdd03e4845c0528e1978b25644' to '{md5}84df095b5569e720b4aeaf4a96e0ee6d'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}f72b50738651b3cb6bcef039b59ffdcb' to '{md5}e750ca8f3497b9a4656f782dcf335dab'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.29 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 39,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269636,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/28",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/content: content changed '{md5}ffff62426c4f7a42c1cb1ca44b324dad' to '{md5}21ad9f95dd93ee39fc87db07b7ea05be'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/content: content changed '{md5}fdcc51e399dd2381778a163933ef2beb' to '{md5}afbfd32db940db5fff4701c964169c27'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.78 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/content: content changed '{md5}ffff62426c4f7a42c1cb1ca44b324dad' to '{md5}21ad9f95dd93ee39fc87db07b7ea05be'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/content: content changed '{md5}fdcc51e399dd2381778a163933ef2beb' to '{md5}afbfd32db940db5fff4701c964169c27'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.78 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 28,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352864089985,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/32",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}c892638e8c76c66f072640eb32b0637a' to '{md5}db0f2efdd03e4845c0528e1978b25644'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5' to '{md5}036cea2c613ff235499a7ed743be467f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.38 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}c892638e8c76c66f072640eb32b0637a' to '{md5}db0f2efdd03e4845c0528e1978b25644'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5' to '{md5}036cea2c613ff235499a7ed743be467f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.38 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 32,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864090145,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/25",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 25,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089770,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/30",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a973d9bcff056aeff7f22221886c84b7' to '{md5}df2d55356b238461af57fe22ad993e4d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}54edf0ba61f6501cc49c0d7788b266b1' to '{md5}b25bda7a405235227d20732f0972c5f6'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.00 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a973d9bcff056aeff7f22221886c84b7' to '{md5}df2d55356b238461af57fe22ad993e4d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}54edf0ba61f6501cc49c0d7788b266b1' to '{md5}b25bda7a405235227d20732f0972c5f6'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.00 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 30,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864090068,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/35",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}4673b67e078cc9d84ffc4873e5198edf' to '{md5}654e54e7c3f58aa3d37d07110ad63bb5'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}d3b2d5e47669c948fccb907fa32c2b55' to '{md5}0e079fd5bc7cc43a35b60012c9ee00d9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.45 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}4673b67e078cc9d84ffc4873e5198edf' to '{md5}654e54e7c3f58aa3d37d07110ad63bb5'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}d3b2d5e47669c948fccb907fa32c2b55' to '{md5}0e079fd5bc7cc43a35b60012c9ee00d9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.45 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 35,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352864269474,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/41",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput ]/Exec[hadoop --config /etc/hadoop/conf dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO input.FileInputFormat: Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 WARN snappy.LoadSnappy: Snappy native library is available\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO util.NativeCodeLoader: Loaded the native-hadoop library\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO snappy.LoadSnappy: Snappy native library loaded\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:14 INFO mapred.JobClient: Running job: job_201211132238_0001\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:15 INFO mapred.JobClient:  map 0% reduce 0%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:30 INFO mapred.JobClient:  map 100% reduce 0%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:38 INFO mapred.JobClient:  map 100% reduce 33%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:40 INFO mapred.JobClient:  map 100% reduce 100%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient: Job complete: job_201211132238_0001\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient: Counters: 29\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   Job Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Launched reduce tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SLOTS_MILLIS_MAPS=6106\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total time spent by all reduces waiting after reserving slots (ms)=0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total time spent by all maps waiting after reserving slots (ms)=0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Launched map tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Data-local map tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SLOTS_MILLIS_REDUCES=9332\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   File Output Format Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Bytes Written=1845\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   FileSystemCounters\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     FILE_BYTES_READ=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     HDFS_BYTES_READ=1893\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     FILE_BYTES_WRITTEN=117522\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     HDFS_BYTES_WRITTEN=1845\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   File Input Format Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Bytes Read=1755\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   Map-Reduce Framework\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output materialized bytes=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map input records=36\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce shuffle bytes=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Spilled Records=122\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output bytes=2003\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     CPU time spent (ms)=1920\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total committed heap usage (bytes)=433913856\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine input records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SPLIT_RAW_BYTES=138\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input groups=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Physical memory (bytes) snapshot=381779968\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=2704003072\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 37.52 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input groups=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Physical memory (bytes) snapshot=381779968\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=2704003072\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 37.52 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 41,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "MAPREDUCE_SERVICE_CHECK",
-        "start_time" : 1352864331797,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/31",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.15 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 31,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864090105,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/40",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}827a6e7bd4233d4dc82b20761aed1e30' to '{md5}4e59b973cec0811615008a580244bcdb'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp::Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 36.67 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}827a6e7bd4233d4dc82b20761aed1e30' to '{md5}4e59b973cec0811615008a580244bcdb'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp::Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 36.67 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 40,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352864331712,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/24",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a39a2689e76538c6d9090b00ceb04eb0' to '{md5}9786ed97b221e37075bdb64400bc804a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}c14eb8ab2bb5ab75789c875534ab64f4' to '{md5}9684de67c2a8fa0f7292418d6c0c1651'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.78 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a39a2689e76538c6d9090b00ceb04eb0' to '{md5}9786ed97b221e37075bdb64400bc804a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}c14eb8ab2bb5ab75789c875534ab64f4' to '{md5}9684de67c2a8fa0f7292418d6c0c1651'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.78 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 24,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864089661,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/23",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.77 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.77 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 23,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089600,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/26",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 7.68 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 7.68 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 26,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089836,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/43",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Sqoop 1.4.2.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: git commit id ea3b95785b3daf62c68f1eb0e645636acc00d0c2\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Compiled by jenkins on Sat Nov 10 19:14:01 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Sqoop 1.4.2.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: git commit id ea3b95785b3daf62c68f1eb0e645636acc00d0c2\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Compiled by jenkins on Sat Nov 10 19:14:01 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.15 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 43,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "SQOOP_SERVICE_CHECK",
-        "start_time" : 1352864331830,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/42",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfs -rmr pigsmoke.out passwd; hadoop dfs -put /etc/passwd passwd ]/Exec[hadoop --config /etc/hadoop/conf dfs -rmr pigsmoke.out passwd; hadoop dfs -put /etc/passwd passwd ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/File[/tmp/pigSmoke.sh]/ensure: defined content as '{md5}feac231e484c08e3bc5f83d0ee189a8c'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,367 [main] INFO  org.apache.pig.Main - Apache Pig version 0.10.0.1 (rexported) compiled Nov 10 2012, 19:10:20\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,368 [main] INFO  org.apache.pig.Main - Logging error messages to: /home/ambari_qa/pig_1352864398364.log\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,789 [main] INFO  org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to hadoop file system at: hdfs://host5:8020\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:59,058 [main] INFO  org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to map-reduce job tracker at: host3:50300\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:59,907 [main] INFO  org.apache.pig.tools.pigstats.ScriptState - Pig features used in the script: UNKNOWN\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,158 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MRCompiler - File concatenation threshold: 100 optimistic? false\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,183 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MultiQueryOptimizer - MR plan size before optimization: 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,183 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MultiQueryOptimizer - MR plan size after optimization: 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,288 [main] INFO  org.apache.pig.tools.pigstats.ScriptState - Pig script settings are added to the job\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,312 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - mapred.job.reduce.markreset.buffer.percent is not set, set to default 0.3\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,315 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - creating jar file Job4537005419718909074.jar\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,356 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - jar file Job4537005419718909074.jar created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,377 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - Setting up single store job\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,432 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 1 map-reduce job(s) waiting for submission.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,932 [Thread-6] INFO  org.apache.hadoop.mapreduce.lib.input.FileInputFormat - Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,933 [Thread-6] INFO  org.apache.pig.backend.hadoop.executionengine.util.MapRedUtil - Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,934 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 0% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,971 [Thread-6] WARN  org.apache.hadoop.io.compress.snappy.LoadSnappy - Snappy native library is available\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,973 [Thread-6] INFO  org.apache.hadoop.util.NativeCodeLoader - Loaded the native-hadoop library\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,973 [Thread-6] INFO  org.apache.hadoop.io.compress.snappy.LoadSnappy - Snappy native library loaded\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,977 [Thread-6] INFO  org.apache.pig.backend.hadoop.executionengine.util.MapRedUtil - Total input paths (combined) to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:06,811 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - HadoopJobId: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:06,812 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - More information at: http://host3:50030/jobdetails.jsp?jobid=job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:17,380 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 50% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,432 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 100% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,434 [main] INFO  org.apache.pig.tools.pigstats.SimplePigStats - Script Statistics: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: HadoopVersion\tPigVersion\tUserId\tStartedAt\tFinishedAt\tFeatures\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 1.1.0.1\t0.10.0.1\tambari_qa\t2012-11-13 22:40:00\t2012-11-13 22:40:21\tUNKNOWN\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job Stats (time in seconds):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: JobId\tMaps\tReduces\tMaxMapTime\tMinMapTIme\tAvgMapTime\tMaxReduceTime\tMinReduceTime\tAvgReduceTime\tAlias\tFeature\tOutputs\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\t1\t0\t3\t3\t3\t0\t0\t0\tA,B\tMAP_ONLY\thdfs://host5:8020/user/ambari_qa/pigsmoke.out,\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Input(s):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Successfully read 36 records (2137 bytes) from: \"hdfs://host5:8020/user/ambari_qa/passwd\"\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Output(s):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Successfully stored 36 records (236 bytes) in: \"hdfs://host5:8020/user/ambari_qa/pigsmoke.out\"\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Counters:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records written : 36\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total bytes written : 236\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Spillable Memory Manager spill count : 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total bags proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job DAG:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,446 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]/Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 32.06 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job DAG:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,446 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]/Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 32.06 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 42,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "PIG_SERVICE_CHECK",
-        "start_time" : 1352864331815,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/27",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Gmetad/Hdp::Exec[hdp-gmetad service]/Exec[hdp-gmetad service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Gmetad/Hdp::Exec[hdp-gmetad service]/Exec[hdp-gmetad service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 27,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352864089883,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/37",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}df2d55356b238461af57fe22ad993e4d' to '{md5}62a467fcccda8169de563170e39e3419'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}1a3769d695902dba39b5645fef3766e0' to '{md5}23097908e8b54f7dbc4d31b5d26d21e7'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.66 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}df2d55356b238461af57fe22ad993e4d' to '{md5}62a467fcccda8169de563170e39e3419'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}1a3769d695902dba39b5645fef3766e0' to '{md5}23097908e8b54f7dbc4d31b5d26d21e7'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.66 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 37,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269589,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/38",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: Safe mode is OFF\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 10.35 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: Safe mode is OFF\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 10.35 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 38,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "HDFS_SERVICE_CHECK",
-        "start_time" : 1352864269616,
-        "stage_id" : 2
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/master_failure/poll_1.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/master_failure/poll_1.json
deleted file mode 100644
index fdfb408..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/master_failure/poll_1.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/master_failure/poll_2.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/master_failure/poll_2.json
deleted file mode 100644
index 822e351..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/master_failure/poll_2.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352863664537,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666842,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863665177,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863666672,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352863665856,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : 1352863667466,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863665481,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666913,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352863664213,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : 1352863667299,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : 1352863665939,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352863664455,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666165,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352863665690,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863664901,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : 1352863666987,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : 1352863667058,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : 1352863667216,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863667578,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863664723,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863663984,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666401,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/master_failure/poll_3.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/master_failure/poll_3.json
deleted file mode 100644
index f420185..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/master_failure/poll_3.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : 1352863665939,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-server]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::Package[oozie-server]/Hdp::Package::Yum[oozie-server]/Package[oozie]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-oozie::Download-ext-zip/Hdp::Package[extjs]/Hdp::Package::Yum[extjs]/Package[extjs-2.2-1]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java extjs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}fc664190038e2562fe63acd61ea9480b'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/oozie/]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/oozie]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Exec[mkdir -p /var/tmp/oozie]/Exec[mkdir -p /var/tmp/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/data/oozie]/Hdp::Directory_recursive_create[/var/data/oozie]/Hdp::Exec[mkdir -p /var/data/oozie]/Exec[mkdir -p /var/data/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/oozie]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Directory[/var/tmp/oozie]/File[/var/tmp/oozie]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Directory[/var/tmp/oozie]/File[/var/tmp/oozie]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.33 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/var/log/oozie]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Exec[mkdir -p /var/tmp/oozie]/Exec[mkdir -p /var/tmp/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/data/oozie]/Hdp::Directory_recursive_create[/var/data/oozie]/Hdp::Exec[mkdir -p /var/data/oozie]/Exec[mkdir -p /var/data/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/oozie]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Directory[/var/tmp/oozie]/File[/var/tmp/oozie]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Directory[/var/tmp/oozie]/File[/var/tmp/oozie]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.33 seconds\u001B[0m",
-        "status" : "FAILED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352863664455,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Snamenode::Create_name_dirs[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Directory_recursive_create[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}de205131fc2bc8c15f7bd1329fc8ea0d' to '{md5}f60477f06af8d1d549460294d0363702'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352863664537,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.18 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863663984,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}43bb2e790338badc9f17297bc958b536'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666842,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::Package[oozie-client]/Hdp::Package::Yum[oozie-client]/Package[oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::User[oozie]/User[oozie]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : 1352863667216,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863664901,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : 1352863667058,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-fping]/Hdp::Package[nagios-fping]/Hdp::Package::Yum[nagios-fping]/Package[fping]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-server]/Hdp::Package[nagios-server]/Hdp::Package::Yum[nagios-server]/Package[nagios-3.2.3]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-plugins]/Hdp::Package[nagios-plugins]/Hdp::Package::Yum[nagios-plugins]/Package[nagios-plugins-1.4.9]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-addons]/Hdp::Package[nagios-addons]/Hdp::Package::Yum[nagios-addons]/Package[hdp_mon_nagios_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_blocks.php]/content: content changed '{md5}4dc9db0a18622b3e40e266a9b54e2a1a' to '{md5}e3a5fa882154aaabe18fce6086346b98'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_rpcq_latency.php]/content: content changed '{md5}4fa432abbf0a15a2c73cd8eb7e657d6e' to '{md5}1601eec7138fcd957159e6524900a1ce'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_cpu.pl]/ensure: defined content as '{md5}ab87290dee3f032770580d7d7713d086'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_datanode_storage.php]/ensure: defined content as '{md5}eeae70d7c6686ff4ce9993244dbbdf34'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-commands.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hive_metastore_status.sh]/content: content changed '{md5}56469a95f854711ff67e961eb91a1b9a' to '{md5}987ce3f5cd654d9e8d13e9bc9d4b4e16'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_aggregate.php]/content: content changed '{md5}eeddc4cf93d7ca7dbf1e6ea1effcc278' to '{md5}ebbacb754b35bcdab9b246a64589f7c6'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-servicegroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/content: content changed '{md5}5ee6a28df66ed3cc6f76b46073ced9ac' to '{md5}05c47f03c0800b968023152479f24ccb'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352863665856,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}428a01fb131e6c37a876dd03a2940d79' to '{md5}a39a2689e76538c6d9090b00ceb04eb0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863665177,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863665481,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863667578,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}428a01fb131e6c37a876dd03a2940d79'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}c14eb8ab2bb5ab75789c875534ab64f4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863664723,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[rrdtool-python]/Hdp::Package::Yum[rrdtool-python]/Package[python-rrdtool.x86_64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-gweb]/Hdp::Package::Yum[ganglia-gweb]/Package[gweb]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-server]/Hdp::Package::Yum[ganglia-server]/Package[ganglia-gmetad-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-hdp-gweb-addons]/Hdp::Package::Yum[ganglia-hdp-gweb-addons]/Package[hdp_mon_ganglia_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352863665690,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : 1352863666987,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : 1352863667466,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666401,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp::Package[ambari-log4j]/Hdp::Package::Yum[ambari-log4j]/Package[ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}de205131fc2bc8c15f7bd1329fc8ea0d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/hadoop/lib/hadoop-tools.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352863664213,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666913,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}54edf0ba61f6501cc49c0d7788b266b1'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666165,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c' to '{md5}a973d9bcff056aeff7f22221886c84b7'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863666672,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig/Hdp::Package[pig]/Hdp::Package::Yum[pig]/Package[pig.noarch]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : 1352863667299,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/master_failure/poll_4.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/master_failure/poll_4.json
deleted file mode 100644
index 8a6cffd..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/master_failure/poll_4.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666401,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}428a01fb131e6c37a876dd03a2940d79' to '{md5}a39a2689e76538c6d9090b00ceb04eb0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863665177,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c' to '{md5}a973d9bcff056aeff7f22221886c84b7'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863666672,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Snamenode::Create_name_dirs[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Directory_recursive_create[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}de205131fc2bc8c15f7bd1329fc8ea0d' to '{md5}f60477f06af8d1d549460294d0363702'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352863664537,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863664901,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::Package[oozie-client]/Hdp::Package::Yum[oozie-client]/Package[oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::User[oozie]/User[oozie]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : 1352863667216,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}fc664190038e2562fe63acd61ea9480b' to '{md5}827a6e7bd4233d4dc82b20761aed1e30'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.12 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}fc664190038e2562fe63acd61ea9480b' to '{md5}827a6e7bd4233d4dc82b20761aed1e30'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.12 seconds\u001B[0m",
-        "status" : "FAILED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352863664455,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.18 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863663984,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}089df31bb24ce7450baba1a7541e4546' to '{md5}8da2518fdfc4a3723e64babe25c8d6d8'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}13834e68f9058d26d30e3e627bea2d08' to '{md5}816e416f74804ba21e3b80b611d59a11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.75 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}13834e68f9058d26d30e3e627bea2d08' to '{md5}816e416f74804ba21e3b80b611d59a11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.75 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863667578,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863665481,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}54edf0ba61f6501cc49c0d7788b266b1'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666165,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-fping]/Hdp::Package[nagios-fping]/Hdp::Package::Yum[nagios-fping]/Package[fping]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-server]/Hdp::Package[nagios-server]/Hdp::Package::Yum[nagios-server]/Package[nagios-3.2.3]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-plugins]/Hdp::Package[nagios-plugins]/Hdp::Package::Yum[nagios-plugins]/Package[nagios-plugins-1.4.9]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-addons]/Hdp::Package[nagios-addons]/Hdp::Package::Yum[nagios-addons]/Package[hdp_mon_nagios_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_blocks.php]/content: content changed '{md5}4dc9db0a18622b3e40e266a9b54e2a1a' to '{md5}e3a5fa882154aaabe18fce6086346b98'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_rpcq_latency.php]/content: content changed '{md5}4fa432abbf0a15a2c73cd8eb7e657d6e' to '{md5}1601eec7138fcd957159e6524900a1ce'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_cpu.pl]/ensure: defined content as '{md5}ab87290dee3f032770580d7d7713d086'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_datanode_storage.php]/ensure: defined content as '{md5}eeae70d7c6686ff4ce9993244dbbdf34'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-commands.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hive_metastore_status.sh]/content: content changed '{md5}56469a95f854711ff67e961eb91a1b9a' to '{md5}987ce3f5cd654d9e8d13e9bc9d4b4e16'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_aggregate.php]/content: content changed '{md5}eeddc4cf93d7ca7dbf1e6ea1effcc278' to '{md5}ebbacb754b35bcdab9b246a64589f7c6'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-servicegroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/content: content changed '{md5}5ee6a28df66ed3cc6f76b46073ced9ac' to '{md5}05c47f03c0800b968023152479f24ccb'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352863665856,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}43bb2e790338badc9f17297bc958b536'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666842,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop/Hdp::Package[sqoop]/Hdp::Package::Yum[sqoop]/Package[sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Mysql-connector/Hdp::Exec[yum install -y mysql-connector-java]/Exec[yum install -y mysql-connector-java]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/lib//mysql-connector-java.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/conf/sqoop-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.25 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop/Hdp::Package[sqoop]/Hdp::Package::Yum[sqoop]/Package[sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Mysql-connector/Hdp::Exec[yum install -y mysql-connector-java]/Exec[yum install -y mysql-connector-java]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/lib//mysql-connector-java.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/conf/sqoop-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.25 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : 1352863667466,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}428a01fb131e6c37a876dd03a2940d79'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}c14eb8ab2bb5ab75789c875534ab64f4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863664723,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : 1352863666987,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[rrdtool-python]/Hdp::Package::Yum[rrdtool-python]/Package[python-rrdtool.x86_64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-gweb]/Hdp::Package::Yum[ganglia-gweb]/Package[gweb]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-server]/Hdp::Package::Yum[ganglia-server]/Package[ganglia-gmetad-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-hdp-gweb-addons]/Hdp::Package::Yum[ganglia-hdp-gweb-addons]/Package[hdp_mon_ganglia_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352863665690,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig/Hdp::Package[pig]/Hdp::Package::Yum[pig]/Package[pig.noarch]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : 1352863667299,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}600cddff911584631420067cd2d2a5f6'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}05bed82ac309c3636b85b7ffae797cd1'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_name_dirs[/grid/0/hadoop/hdfs/namenode,/grid/1/hadoop/hdfs/namenode,]/Hdp::Directory_recursive_create[/grid/0/hadoop/hdfs/namenode]/Hdp::Exec[mkdir -p /grid/0/hadoop/hdfs/namenode]/Exec[mkdir -p /grid/0/hadoop/hdfs/namenode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_name_dirs[/grid/0/hadoop/hdfs/namenode,/grid/1/hadoop/hdfs/namenode,]/Hdp::Directory_recursive_create[/grid/1/hadoop/hdfs/namenode]/Hdp::Exec[mkdir -p /grid/1/hadoop/hdfs/namenode]/Exec[mkdir -p /grid/1/hadoop/hdfs/namenode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/hdfs/namenode]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/hdfs/namenode]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/hdfs/namenode]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/hdfs/namenode]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 75.23 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 75.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : 1352863665939,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666913,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : 1352863667058,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp::Package[ambari-log4j]/Hdp::Package::Yum[ambari-log4j]/Package[ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}de205131fc2bc8c15f7bd1329fc8ea0d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/hadoop/lib/hadoop-tools.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352863664213,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_failure/poll_1.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_failure/poll_1.json
deleted file mode 100644
index fdfb408..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_failure/poll_1.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_failure/poll_2.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_failure/poll_2.json
deleted file mode 100644
index 822e351..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_failure/poll_2.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352863664537,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666842,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863665177,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863666672,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352863665856,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : 1352863667466,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863665481,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666913,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352863664213,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : 1352863667299,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : 1352863665939,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352863664455,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666165,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352863665690,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863664901,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : 1352863666987,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : 1352863667058,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : 1352863667216,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863667578,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863664723,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863663984,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666401,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_failure/poll_3.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_failure/poll_3.json
deleted file mode 100644
index eda90c9..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_failure/poll_3.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : 1352863665939,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-server]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::Package[oozie-server]/Hdp::Package::Yum[oozie-server]/Package[oozie]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-oozie::Download-ext-zip/Hdp::Package[extjs]/Hdp::Package::Yum[extjs]/Package[extjs-2.2-1]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java extjs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}fc664190038e2562fe63acd61ea9480b'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/oozie/]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/oozie]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Exec[mkdir -p /var/tmp/oozie]/Exec[mkdir -p /var/tmp/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/data/oozie]/Hdp::Directory_recursive_create[/var/data/oozie]/Hdp::Exec[mkdir -p /var/data/oozie]/Exec[mkdir -p /var/data/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/oozie]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Directory[/var/tmp/oozie]/File[/var/tmp/oozie]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Directory[/var/tmp/oozie]/File[/var/tmp/oozie]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.33 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/var/log/oozie]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Exec[mkdir -p /var/tmp/oozie]/Exec[mkdir -p /var/tmp/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/data/oozie]/Hdp::Directory_recursive_create[/var/data/oozie]/Hdp::Exec[mkdir -p /var/data/oozie]/Exec[mkdir -p /var/data/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/oozie]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Directory[/var/tmp/oozie]/File[/var/tmp/oozie]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Directory[/var/tmp/oozie]/File[/var/tmp/oozie]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.33 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352863664455,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Snamenode::Create_name_dirs[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Directory_recursive_create[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}de205131fc2bc8c15f7bd1329fc8ea0d' to '{md5}f60477f06af8d1d549460294d0363702'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352863664537,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.18 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863663984,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}43bb2e790338badc9f17297bc958b536'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666842,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::Package[oozie-client]/Hdp::Package::Yum[oozie-client]/Package[oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::User[oozie]/User[oozie]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : 1352863667216,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863664901,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : 1352863667058,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-fping]/Hdp::Package[nagios-fping]/Hdp::Package::Yum[nagios-fping]/Package[fping]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-server]/Hdp::Package[nagios-server]/Hdp::Package::Yum[nagios-server]/Package[nagios-3.2.3]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-plugins]/Hdp::Package[nagios-plugins]/Hdp::Package::Yum[nagios-plugins]/Package[nagios-plugins-1.4.9]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-addons]/Hdp::Package[nagios-addons]/Hdp::Package::Yum[nagios-addons]/Package[hdp_mon_nagios_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_blocks.php]/content: content changed '{md5}4dc9db0a18622b3e40e266a9b54e2a1a' to '{md5}e3a5fa882154aaabe18fce6086346b98'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_rpcq_latency.php]/content: content changed '{md5}4fa432abbf0a15a2c73cd8eb7e657d6e' to '{md5}1601eec7138fcd957159e6524900a1ce'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_cpu.pl]/ensure: defined content as '{md5}ab87290dee3f032770580d7d7713d086'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_datanode_storage.php]/ensure: defined content as '{md5}eeae70d7c6686ff4ce9993244dbbdf34'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-commands.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hive_metastore_status.sh]/content: content changed '{md5}56469a95f854711ff67e961eb91a1b9a' to '{md5}987ce3f5cd654d9e8d13e9bc9d4b4e16'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_aggregate.php]/content: content changed '{md5}eeddc4cf93d7ca7dbf1e6ea1effcc278' to '{md5}ebbacb754b35bcdab9b246a64589f7c6'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-servicegroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/content: content changed '{md5}5ee6a28df66ed3cc6f76b46073ced9ac' to '{md5}05c47f03c0800b968023152479f24ccb'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352863665856,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}428a01fb131e6c37a876dd03a2940d79' to '{md5}a39a2689e76538c6d9090b00ceb04eb0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863665177,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863665481,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863667578,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}428a01fb131e6c37a876dd03a2940d79'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}c14eb8ab2bb5ab75789c875534ab64f4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m",
-        "status" : "FAILED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863664723,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[rrdtool-python]/Hdp::Package::Yum[rrdtool-python]/Package[python-rrdtool.x86_64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-gweb]/Hdp::Package::Yum[ganglia-gweb]/Package[gweb]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-server]/Hdp::Package::Yum[ganglia-server]/Package[ganglia-gmetad-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-hdp-gweb-addons]/Hdp::Package::Yum[ganglia-hdp-gweb-addons]/Package[hdp_mon_ganglia_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352863665690,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : 1352863666987,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : 1352863667466,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666401,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp::Package[ambari-log4j]/Hdp::Package::Yum[ambari-log4j]/Package[ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}de205131fc2bc8c15f7bd1329fc8ea0d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/hadoop/lib/hadoop-tools.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352863664213,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666913,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}54edf0ba61f6501cc49c0d7788b266b1'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m",
-        "status" : "FAILED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666165,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c' to '{md5}a973d9bcff056aeff7f22221886c84b7'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863666672,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig/Hdp::Package[pig]/Hdp::Package::Yum[pig]/Package[pig.noarch]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : 1352863667299,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_failure/poll_4.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_failure/poll_4.json
deleted file mode 100644
index 3db0434..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_failure/poll_4.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666401,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}428a01fb131e6c37a876dd03a2940d79' to '{md5}a39a2689e76538c6d9090b00ceb04eb0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863665177,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c' to '{md5}a973d9bcff056aeff7f22221886c84b7'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863666672,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Snamenode::Create_name_dirs[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Directory_recursive_create[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}de205131fc2bc8c15f7bd1329fc8ea0d' to '{md5}f60477f06af8d1d549460294d0363702'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352863664537,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863664901,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::Package[oozie-client]/Hdp::Package::Yum[oozie-client]/Package[oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::User[oozie]/User[oozie]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : 1352863667216,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}fc664190038e2562fe63acd61ea9480b' to '{md5}827a6e7bd4233d4dc82b20761aed1e30'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.12 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}fc664190038e2562fe63acd61ea9480b' to '{md5}827a6e7bd4233d4dc82b20761aed1e30'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.12 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352863664455,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.18 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863663984,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}089df31bb24ce7450baba1a7541e4546' to '{md5}8da2518fdfc4a3723e64babe25c8d6d8'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}13834e68f9058d26d30e3e627bea2d08' to '{md5}816e416f74804ba21e3b80b611d59a11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.75 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}13834e68f9058d26d30e3e627bea2d08' to '{md5}816e416f74804ba21e3b80b611d59a11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.75 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863667578,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863665481,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}54edf0ba61f6501cc49c0d7788b266b1'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m",
-        "status" : "FAILED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666165,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-fping]/Hdp::Package[nagios-fping]/Hdp::Package::Yum[nagios-fping]/Package[fping]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-server]/Hdp::Package[nagios-server]/Hdp::Package::Yum[nagios-server]/Package[nagios-3.2.3]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-plugins]/Hdp::Package[nagios-plugins]/Hdp::Package::Yum[nagios-plugins]/Package[nagios-plugins-1.4.9]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-addons]/Hdp::Package[nagios-addons]/Hdp::Package::Yum[nagios-addons]/Package[hdp_mon_nagios_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_blocks.php]/content: content changed '{md5}4dc9db0a18622b3e40e266a9b54e2a1a' to '{md5}e3a5fa882154aaabe18fce6086346b98'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_rpcq_latency.php]/content: content changed '{md5}4fa432abbf0a15a2c73cd8eb7e657d6e' to '{md5}1601eec7138fcd957159e6524900a1ce'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_cpu.pl]/ensure: defined content as '{md5}ab87290dee3f032770580d7d7713d086'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_datanode_storage.php]/ensure: defined content as '{md5}eeae70d7c6686ff4ce9993244dbbdf34'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-commands.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hive_metastore_status.sh]/content: content changed '{md5}56469a95f854711ff67e961eb91a1b9a' to '{md5}987ce3f5cd654d9e8d13e9bc9d4b4e16'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_aggregate.php]/content: content changed '{md5}eeddc4cf93d7ca7dbf1e6ea1effcc278' to '{md5}ebbacb754b35bcdab9b246a64589f7c6'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-servicegroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/content: content changed '{md5}5ee6a28df66ed3cc6f76b46073ced9ac' to '{md5}05c47f03c0800b968023152479f24ccb'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352863665856,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}43bb2e790338badc9f17297bc958b536'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666842,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop/Hdp::Package[sqoop]/Hdp::Package::Yum[sqoop]/Package[sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Mysql-connector/Hdp::Exec[yum install -y mysql-connector-java]/Exec[yum install -y mysql-connector-java]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/lib//mysql-connector-java.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/conf/sqoop-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.25 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop/Hdp::Package[sqoop]/Hdp::Package::Yum[sqoop]/Package[sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Mysql-connector/Hdp::Exec[yum install -y mysql-connector-java]/Exec[yum install -y mysql-connector-java]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/lib//mysql-connector-java.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/conf/sqoop-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.25 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : 1352863667466,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}428a01fb131e6c37a876dd03a2940d79'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}c14eb8ab2bb5ab75789c875534ab64f4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m",
-        "status" : "FAILED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863664723,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : 1352863666987,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[rrdtool-python]/Hdp::Package::Yum[rrdtool-python]/Package[python-rrdtool.x86_64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-gweb]/Hdp::Package::Yum[ganglia-gweb]/Package[gweb]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-server]/Hdp::Package::Yum[ganglia-server]/Package[ganglia-gmetad-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-hdp-gweb-addons]/Hdp::Package::Yum[ganglia-hdp-gweb-addons]/Package[hdp_mon_ganglia_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352863665690,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig/Hdp::Package[pig]/Hdp::Package::Yum[pig]/Package[pig.noarch]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : 1352863667299,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}600cddff911584631420067cd2d2a5f6'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}05bed82ac309c3636b85b7ffae797cd1'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_name_dirs[/grid/0/hadoop/hdfs/namenode,/grid/1/hadoop/hdfs/namenode,]/Hdp::Directory_recursive_create[/grid/0/hadoop/hdfs/namenode]/Hdp::Exec[mkdir -p /grid/0/hadoop/hdfs/namenode]/Exec[mkdir -p /grid/0/hadoop/hdfs/namenode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_name_dirs[/grid/0/hadoop/hdfs/namenode,/grid/1/hadoop/hdfs/namenode,]/Hdp::Directory_recursive_create[/grid/1/hadoop/hdfs/namenode]/Hdp::Exec[mkdir -p /grid/1/hadoop/hdfs/namenode]/Exec[mkdir -p /grid/1/hadoop/hdfs/namenode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/hdfs/namenode]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/hdfs/namenode]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/hdfs/namenode]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/hdfs/namenode]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 75.23 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 75.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : 1352863665939,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666913,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : 1352863667058,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp::Package[ambari-log4j]/Hdp::Package::Yum[ambari-log4j]/Package[ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}de205131fc2bc8c15f7bd1329fc8ea0d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/hadoop/lib/hadoop-tools.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352863664213,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_1.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_1.json
deleted file mode 100644
index fdfb408..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_1.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_10.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_10.json
deleted file mode 100644
index 1a54b73..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_10.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/2?fields=tasks/*",
-  "Requests" : {
-    "id" : 2,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/33",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.72 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.72 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 33,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864090181,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/30",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a973d9bcff056aeff7f22221886c84b7' to '{md5}df2d55356b238461af57fe22ad993e4d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}54edf0ba61f6501cc49c0d7788b266b1' to '{md5}b25bda7a405235227d20732f0972c5f6'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.00 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a973d9bcff056aeff7f22221886c84b7' to '{md5}df2d55356b238461af57fe22ad993e4d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}54edf0ba61f6501cc49c0d7788b266b1' to '{md5}b25bda7a405235227d20732f0972c5f6'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.00 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 30,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864090068,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/38",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: Safe mode is OFF\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 10.35 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: Safe mode is OFF\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 10.35 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 38,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "HDFS_SERVICE_CHECK",
-        "start_time" : 1352864269616,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/26",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 7.68 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 7.68 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 26,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089836,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/24",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a39a2689e76538c6d9090b00ceb04eb0' to '{md5}9786ed97b221e37075bdb64400bc804a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}c14eb8ab2bb5ab75789c875534ab64f4' to '{md5}9684de67c2a8fa0f7292418d6c0c1651'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.78 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a39a2689e76538c6d9090b00ceb04eb0' to '{md5}9786ed97b221e37075bdb64400bc804a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}c14eb8ab2bb5ab75789c875534ab64f4' to '{md5}9684de67c2a8fa0f7292418d6c0c1651'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.78 seconds\u001B[0m",
-        "status" : "FAILED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 24,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864089661,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/35",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}4673b67e078cc9d84ffc4873e5198edf' to '{md5}654e54e7c3f58aa3d37d07110ad63bb5'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}d3b2d5e47669c948fccb907fa32c2b55' to '{md5}0e079fd5bc7cc43a35b60012c9ee00d9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.45 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}4673b67e078cc9d84ffc4873e5198edf' to '{md5}654e54e7c3f58aa3d37d07110ad63bb5'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}d3b2d5e47669c948fccb907fa32c2b55' to '{md5}0e079fd5bc7cc43a35b60012c9ee00d9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.45 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 35,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352864269474,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/40",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}827a6e7bd4233d4dc82b20761aed1e30' to '{md5}4e59b973cec0811615008a580244bcdb'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp::Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 36.67 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}827a6e7bd4233d4dc82b20761aed1e30' to '{md5}4e59b973cec0811615008a580244bcdb'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp::Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 36.67 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 40,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352864331712,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/31",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.15 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 31,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864090105,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/27",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Gmetad/Hdp::Exec[hdp-gmetad service]/Exec[hdp-gmetad service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Gmetad/Hdp::Exec[hdp-gmetad service]/Exec[hdp-gmetad service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 27,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352864089883,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/43",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Sqoop 1.4.2.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: git commit id ea3b95785b3daf62c68f1eb0e645636acc00d0c2\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Compiled by jenkins on Sat Nov 10 19:14:01 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Sqoop 1.4.2.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: git commit id ea3b95785b3daf62c68f1eb0e645636acc00d0c2\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Compiled by jenkins on Sat Nov 10 19:14:01 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.15 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 43,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "SQOOP_SERVICE_CHECK",
-        "start_time" : 1352864331830,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/42",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfs -rmr pigsmoke.out passwd; hadoop dfs -put /etc/passwd passwd ]/Exec[hadoop --config /etc/hadoop/conf dfs -rmr pigsmoke.out passwd; hadoop dfs -put /etc/passwd passwd ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/File[/tmp/pigSmoke.sh]/ensure: defined content as '{md5}feac231e484c08e3bc5f83d0ee189a8c'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,367 [main] INFO  org.apache.pig.Main - Apache Pig version 0.10.0.1 (rexported) compiled Nov 10 2012, 19:10:20\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,368 [main] INFO  org.apache.pig.Main - Logging error messages to: /home/ambari_qa/pig_1352864398364.log\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,789 [main] INFO  org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to hadoop file system at: hdfs://host5:8020\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:59,058 [main] INFO  org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to map-reduce job tracker at: host3:50300\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:59,907 [main] INFO  org.apache.pig.tools.pigstats.ScriptState - Pig features used in the script: UNKNOWN\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,158 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MRCompiler - File concatenation threshold: 100 optimistic? false\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,183 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MultiQueryOptimizer - MR plan size before optimization: 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,183 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MultiQueryOptimizer - MR plan size after optimization: 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,288 [main] INFO  org.apache.pig.tools.pigstats.ScriptState - Pig script settings are added to the job\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,312 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - mapred.job.reduce.markreset.buffer.percent is not set, set to default 0.3\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,315 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - creating jar file Job4537005419718909074.jar\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,356 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - jar file Job4537005419718909074.jar created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,377 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - Setting up single store job\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,432 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 1 map-reduce job(s) waiting for submission.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,932 [Thread-6] INFO  org.apache.hadoop.mapreduce.lib.input.FileInputFormat - Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,933 [Thread-6] INFO  org.apache.pig.backend.hadoop.executionengine.util.MapRedUtil - Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,934 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 0% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,971 [Thread-6] WARN  org.apache.hadoop.io.compress.snappy.LoadSnappy - Snappy native library is available\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,973 [Thread-6] INFO  org.apache.hadoop.util.NativeCodeLoader - Loaded the native-hadoop library\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,973 [Thread-6] INFO  org.apache.hadoop.io.compress.snappy.LoadSnappy - Snappy native library loaded\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,977 [Thread-6] INFO  org.apache.pig.backend.hadoop.executionengine.util.MapRedUtil - Total input paths (combined) to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:06,811 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - HadoopJobId: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:06,812 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - More information at: http://host3:50030/jobdetails.jsp?jobid=job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:17,380 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 50% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,432 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 100% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,434 [main] INFO  org.apache.pig.tools.pigstats.SimplePigStats - Script Statistics: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: HadoopVersion\tPigVersion\tUserId\tStartedAt\tFinishedAt\tFeatures\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 1.1.0.1\t0.10.0.1\tambari_qa\t2012-11-13 22:40:00\t2012-11-13 22:40:21\tUNKNOWN\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job Stats (time in seconds):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: JobId\tMaps\tReduces\tMaxMapTime\tMinMapTIme\tAvgMapTime\tMaxReduceTime\tMinReduceTime\tAvgReduceTime\tAlias\tFeature\tOutputs\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\t1\t0\t3\t3\t3\t0\t0\t0\tA,B\tMAP_ONLY\thdfs://host5:8020/user/ambari_qa/pigsmoke.out,\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Input(s):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Successfully read 36 records (2137 bytes) from: \"hdfs://host5:8020/user/ambari_qa/passwd\"\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Output(s):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Successfully stored 36 records (236 bytes) in: \"hdfs://host5:8020/user/ambari_qa/pigsmoke.out\"\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Counters:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records written : 36\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total bytes written : 236\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Spillable Memory Manager spill count : 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total bags proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job DAG:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,446 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]/Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 32.06 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job DAG:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,446 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]/Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 32.06 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 42,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "PIG_SERVICE_CHECK",
-        "start_time" : 1352864331815,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/36",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}9786ed97b221e37075bdb64400bc804a' to '{md5}8e06d7ec24fe5acd81917162d58857db'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}30e43dbdb225dad740d632ecc8f6ae11' to '{md5}558aadf67e4d29865a6d935076d3868b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.20 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}9786ed97b221e37075bdb64400bc804a' to '{md5}8e06d7ec24fe5acd81917162d58857db'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}30e43dbdb225dad740d632ecc8f6ae11' to '{md5}558aadf67e4d29865a6d935076d3868b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.20 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 36,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269562,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/34",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}63d8feab1255e45d9549ccea14f687c4' to '{md5}4673b67e078cc9d84ffc4873e5198edf'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[historyserver]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}cccc03b9f3384eac76957c7fe2f12849' to '{md5}07e946dbf4ae6632034ee6715a085b92'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 9.76 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}63d8feab1255e45d9549ccea14f687c4' to '{md5}4673b67e078cc9d84ffc4873e5198edf'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[historyserver]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}cccc03b9f3384eac76957c7fe2f12849' to '{md5}07e946dbf4ae6632034ee6715a085b92'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 9.76 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 34,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352864269447,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/28",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/content: content changed '{md5}ffff62426c4f7a42c1cb1ca44b324dad' to '{md5}21ad9f95dd93ee39fc87db07b7ea05be'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/content: content changed '{md5}fdcc51e399dd2381778a163933ef2beb' to '{md5}afbfd32db940db5fff4701c964169c27'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.78 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/content: content changed '{md5}ffff62426c4f7a42c1cb1ca44b324dad' to '{md5}21ad9f95dd93ee39fc87db07b7ea05be'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/content: content changed '{md5}fdcc51e399dd2381778a163933ef2beb' to '{md5}afbfd32db940db5fff4701c964169c27'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.78 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 28,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352864089985,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/37",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}df2d55356b238461af57fe22ad993e4d' to '{md5}62a467fcccda8169de563170e39e3419'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}1a3769d695902dba39b5645fef3766e0' to '{md5}23097908e8b54f7dbc4d31b5d26d21e7'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.66 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}df2d55356b238461af57fe22ad993e4d' to '{md5}62a467fcccda8169de563170e39e3419'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}1a3769d695902dba39b5645fef3766e0' to '{md5}23097908e8b54f7dbc4d31b5d26d21e7'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.66 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 37,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269589,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/41",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput ]/Exec[hadoop --config /etc/hadoop/conf dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO input.FileInputFormat: Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 WARN snappy.LoadSnappy: Snappy native library is available\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO util.NativeCodeLoader: Loaded the native-hadoop library\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO snappy.LoadSnappy: Snappy native library loaded\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:14 INFO mapred.JobClient: Running job: job_201211132238_0001\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:15 INFO mapred.JobClient:  map 0% reduce 0%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:30 INFO mapred.JobClient:  map 100% reduce 0%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:38 INFO mapred.JobClient:  map 100% reduce 33%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:40 INFO mapred.JobClient:  map 100% reduce 100%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient: Job complete: job_201211132238_0001\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient: Counters: 29\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   Job Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Launched reduce tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SLOTS_MILLIS_MAPS=6106\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total time spent by all reduces waiting after reserving slots (ms)=0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total time spent by all maps waiting after reserving slots (ms)=0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Launched map tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Data-local map tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SLOTS_MILLIS_REDUCES=9332\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   File Output Format Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Bytes Written=1845\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   FileSystemCounters\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     FILE_BYTES_READ=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     HDFS_BYTES_READ=1893\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     FILE_BYTES_WRITTEN=117522\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     HDFS_BYTES_WRITTEN=1845\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   File Input Format Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Bytes Read=1755\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   Map-Reduce Framework\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output materialized bytes=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map input records=36\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce shuffle bytes=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Spilled Records=122\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output bytes=2003\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     CPU time spent (ms)=1920\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total committed heap usage (bytes)=433913856\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine input records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SPLIT_RAW_BYTES=138\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input groups=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Physical memory (bytes) snapshot=381779968\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=2704003072\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 37.52 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input groups=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Physical memory (bytes) snapshot=381779968\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=2704003072\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 37.52 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 41,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "MAPREDUCE_SERVICE_CHECK",
-        "start_time" : 1352864331797,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/44",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/File[/tmp/oozieSmoke.sh]/ensure: defined content as '{md5}a421efea655810cf298d18d7b5c1ebdd'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Deleted hdfs://host5:8020/user/ambari_qa/examples\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Deleted hdfs://host5:8020/user/ambari_qa/input-data\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Job ID : 0000002-121113223948436-oozie-oozi-W\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Workflow Name : map-reduce-wf\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: App Path      : hdfs://host5:8020/user/ambari_qa/examples/apps/map-reduce\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Status        : FAILED\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Run           : 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: User          : ambari_qa\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Group         : -\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Created       : 2012-11-14 03:41\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Started       : 2012-11-14 03:41\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Last Modified : 2012-11-14 03:41\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Ended         : -\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: CoordAction ID: -\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Actions\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ID                                                                            Status    Ext ID                 Ext Status Err Code  \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: 0000002-121113223948436-oozie-oozi-W@mr-node                                  FAILED    -                      -          EL_ERROR  \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Job ID : 0000002-121113223948436-oozie-oozi-W\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Workflow Name : map-reduce-wf\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: App Path      : hdfs://host5:8020/user/ambari_qa/examples/apps/map-reduce\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Status        : FAILED\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Run           : 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: User          : ambari_qa\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Group         : -\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Created       : 2012-11-14 03:41\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Started       : 2012-11-14 03:41\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Last Modified : 2012-11-14 03:41\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Ended         : -\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: CoordAction ID: -\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Actions\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ID                                                                            Status    Ext ID                 Ext Status Err Code  \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: 0000002-121113223948436-oozie-oozi-W@mr-node                                  FAILED    -                      -          EL_ERROR  \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: workflow_status=FAILED\u001B[0m\n\u001B[1;35merr: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: change from notrun to 0 failed: sh /tmp/oozieSmoke.sh /etc/oozie/conf /etc/hadoop/conf ambari_qa false /etc/security/keytabs/ambari_qa.headless.keytab EXAMPLE.COM   returned 1 instead of one of [0] at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/oozie/service_check.pp:62\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.53 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Ended         : -\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: CoordAction ID: -\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: Actions\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ID                                                                            Status    Ext ID                 Ext Status Err Code  \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: 0000002-121113223948436-oozie-oozi-W@mr-node                                  FAILED    -                      -          EL_ERROR  \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: ------------------------------------------------------------------------------------------------------------------------------------\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: workflow_status=FAILED\u001B[0m\n\u001B[1;35merr: /Stage[2]/Hdp-oozie::Oozie::Service_check/Hdp-oozie::Smoke_shell_file[oozieSmoke.sh]/Exec[/tmp/oozieSmoke.sh]/returns: change from notrun to 0 failed: sh /tmp/oozieSmoke.sh /etc/oozie/conf /etc/hadoop/conf ambari_qa false /etc/security/keytabs/ambari_qa.headless.keytab EXAMPLE.COM   returned 1 instead of one of [0] at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/oozie/service_check.pp:62\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.53 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "None",
-        "host_name" : "host2",
-        "id" : 44,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "OOZIE_SERVICE_CHECK",
-        "start_time" : 1352864442993,
-        "stage_id" : 4
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/29",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}d22fc5749dde07b5b5acff255c490e9d' to '{md5}0617b67bc5192f5e44cf98b2fe25eb6f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}0d021082a9258c648b5259d3af27ff62' to '{md5}39e33160b7f2933a12fc338a81ae9fcd'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/File[/tmp/checkForFormat.sh]/ensure: defined content as '{md5}5dd6bddf910d8ca9f6fefa44e7bbec7e'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: NameNode Dirname = /grid/0/hadoop/hdfs/namenode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: NameNode Dirname = /grid/1/hadoop/hdfs/namenode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:57 INFO namenode.NameNode: STARTUP_MSG: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: /************************************************************\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG: Starting NameNode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   host = host5/10.118.58.228\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   args = [-format]\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   version = 1.1.0.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   build =  -r ; compiled by 'jenkins' on Sat Nov 10 18:55:09 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: ************************************************************/\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: Re-format filesystem in /grid/0/hadoop/hdfs/namenode ? (Y or N) Re-format filesystem in /grid/1/hadoop/hdfs/namenode ? (Y or N) 12/11/13 22:36:58 INFO util.GSet: VM type       = 64-bit\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: 2% max memory = 19.2 MB\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: capacity      = 2^21 = 2097152 entries\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: recommended=2097152, actual=2097152\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: fsOwner=hdfs\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: supergroup=supergroup\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: isPermissionEnabled=true\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: dfs.block.invalidate.limit=100\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 WARN namenode.FSNamesystem: The dfs.support.append option is in your configuration, however append is not supported. This configuration option is no longer required to enable sync\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.NameNode: Caching file names occuring more than 10 times \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO common.Storage: Image file of size 110 saved in 0 seconds.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/grid/0/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/grid/0/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Storage directory /grid/0/hadoop/hdfs/namenode has been successfully formatted.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Image file of size 110 saved in 0 seconds.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/grid/1/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/grid/1/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Storage directory /grid/1/hadoop/hdfs/namenode has been successfully formatted.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.NameNode: SHUTDOWN_MSG: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: /************************************************************\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: SHUTDOWN_MSG: Shutting down NameNode at host5/10.118.58.228\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: ************************************************************/\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: yes: standard output: Broken pipe\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: yes: write error\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Hdp::Exec[set namenode mark]/Exec[set namenode mark]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chmod 775 /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chown hdfs /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chmod 770 /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chown ambari_qa /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chmod 777 /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chown oozie /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 51.23 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chmod 775 /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chown hdfs /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chmod 770 /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chown ambari_qa /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chmod 777 /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chown oozie /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 51.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 29,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAMENODE",
-        "start_time" : 1352864090025,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/39",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}db0f2efdd03e4845c0528e1978b25644' to '{md5}84df095b5569e720b4aeaf4a96e0ee6d'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}f72b50738651b3cb6bcef039b59ffdcb' to '{md5}e750ca8f3497b9a4656f782dcf335dab'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.29 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}db0f2efdd03e4845c0528e1978b25644' to '{md5}84df095b5569e720b4aeaf4a96e0ee6d'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}f72b50738651b3cb6bcef039b59ffdcb' to '{md5}e750ca8f3497b9a4656f782dcf335dab'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.29 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 39,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269636,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/32",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}c892638e8c76c66f072640eb32b0637a' to '{md5}db0f2efdd03e4845c0528e1978b25644'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5' to '{md5}036cea2c613ff235499a7ed743be467f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.38 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}c892638e8c76c66f072640eb32b0637a' to '{md5}db0f2efdd03e4845c0528e1978b25644'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5' to '{md5}036cea2c613ff235499a7ed743be467f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.38 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 32,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864090145,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/25",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 25,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089770,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/23",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.77 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.77 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 23,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089600,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_2.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_2.json
deleted file mode 100644
index 822e351..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_2.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352863664537,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666842,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863665177,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863666672,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352863665856,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : 1352863667466,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863665481,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666913,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352863664213,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : 1352863667299,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : 1352863665939,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352863664455,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666165,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352863665690,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863664901,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : 1352863666987,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : 1352863667058,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : 1352863667216,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863667578,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863664723,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863663984,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666401,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_3.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_3.json
deleted file mode 100644
index 2f34bfc..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_3.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : 1352863665939,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-server]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::Package[oozie-server]/Hdp::Package::Yum[oozie-server]/Package[oozie]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-oozie::Download-ext-zip/Hdp::Package[extjs]/Hdp::Package::Yum[extjs]/Package[extjs-2.2-1]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java extjs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}fc664190038e2562fe63acd61ea9480b'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/oozie/]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/oozie]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Exec[mkdir -p /var/tmp/oozie]/Exec[mkdir -p /var/tmp/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/data/oozie]/Hdp::Directory_recursive_create[/var/data/oozie]/Hdp::Exec[mkdir -p /var/data/oozie]/Exec[mkdir -p /var/data/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/oozie]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Directory[/var/tmp/oozie]/File[/var/tmp/oozie]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Directory[/var/tmp/oozie]/File[/var/tmp/oozie]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.33 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/var/log/oozie]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Exec[mkdir -p /var/tmp/oozie]/Exec[mkdir -p /var/tmp/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/data/oozie]/Hdp::Directory_recursive_create[/var/data/oozie]/Hdp::Exec[mkdir -p /var/data/oozie]/Exec[mkdir -p /var/data/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/oozie]/group: group changed 'oozie' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Directory[/var/tmp/oozie]/File[/var/tmp/oozie]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp-oozie::Service::Directory[/var/tmp/oozie]/Hdp::Directory_recursive_create[/var/tmp/oozie]/Hdp::Directory[/var/tmp/oozie]/File[/var/tmp/oozie]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/data/oozie]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.33 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352863664455,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Snamenode::Create_name_dirs[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Directory_recursive_create[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}de205131fc2bc8c15f7bd1329fc8ea0d' to '{md5}f60477f06af8d1d549460294d0363702'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352863664537,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.18 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863663984,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}43bb2e790338badc9f17297bc958b536'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666842,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::Package[oozie-client]/Hdp::Package::Yum[oozie-client]/Package[oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::User[oozie]/User[oozie]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : 1352863667216,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863664901,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : 1352863667058,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-fping]/Hdp::Package[nagios-fping]/Hdp::Package::Yum[nagios-fping]/Package[fping]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-server]/Hdp::Package[nagios-server]/Hdp::Package::Yum[nagios-server]/Package[nagios-3.2.3]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-plugins]/Hdp::Package[nagios-plugins]/Hdp::Package::Yum[nagios-plugins]/Package[nagios-plugins-1.4.9]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-addons]/Hdp::Package[nagios-addons]/Hdp::Package::Yum[nagios-addons]/Package[hdp_mon_nagios_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_blocks.php]/content: content changed '{md5}4dc9db0a18622b3e40e266a9b54e2a1a' to '{md5}e3a5fa882154aaabe18fce6086346b98'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_rpcq_latency.php]/content: content changed '{md5}4fa432abbf0a15a2c73cd8eb7e657d6e' to '{md5}1601eec7138fcd957159e6524900a1ce'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_cpu.pl]/ensure: defined content as '{md5}ab87290dee3f032770580d7d7713d086'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_datanode_storage.php]/ensure: defined content as '{md5}eeae70d7c6686ff4ce9993244dbbdf34'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-commands.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hive_metastore_status.sh]/content: content changed '{md5}56469a95f854711ff67e961eb91a1b9a' to '{md5}987ce3f5cd654d9e8d13e9bc9d4b4e16'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_aggregate.php]/content: content changed '{md5}eeddc4cf93d7ca7dbf1e6ea1effcc278' to '{md5}ebbacb754b35bcdab9b246a64589f7c6'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-servicegroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/content: content changed '{md5}5ee6a28df66ed3cc6f76b46073ced9ac' to '{md5}05c47f03c0800b968023152479f24ccb'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352863665856,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}428a01fb131e6c37a876dd03a2940d79' to '{md5}a39a2689e76538c6d9090b00ceb04eb0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863665177,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863665481,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863667578,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}428a01fb131e6c37a876dd03a2940d79'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}c14eb8ab2bb5ab75789c875534ab64f4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m",
-        "status" : "FAILED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863664723,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[rrdtool-python]/Hdp::Package::Yum[rrdtool-python]/Package[python-rrdtool.x86_64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-gweb]/Hdp::Package::Yum[ganglia-gweb]/Package[gweb]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-server]/Hdp::Package::Yum[ganglia-server]/Package[ganglia-gmetad-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-hdp-gweb-addons]/Hdp::Package::Yum[ganglia-hdp-gweb-addons]/Package[hdp_mon_ganglia_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352863665690,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : 1352863666987,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : 1352863667466,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666401,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp::Package[ambari-log4j]/Hdp::Package::Yum[ambari-log4j]/Package[ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}de205131fc2bc8c15f7bd1329fc8ea0d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/hadoop/lib/hadoop-tools.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352863664213,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666913,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}54edf0ba61f6501cc49c0d7788b266b1'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666165,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c' to '{md5}a973d9bcff056aeff7f22221886c84b7'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863666672,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig/Hdp::Package[pig]/Hdp::Package::Yum[pig]/Package[pig.noarch]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : 1352863667299,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_4.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_4.json
deleted file mode 100644
index 2c965da..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_4.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666401,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}428a01fb131e6c37a876dd03a2940d79' to '{md5}a39a2689e76538c6d9090b00ceb04eb0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863665177,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c' to '{md5}a973d9bcff056aeff7f22221886c84b7'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863666672,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Snamenode::Create_name_dirs[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Directory_recursive_create[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}de205131fc2bc8c15f7bd1329fc8ea0d' to '{md5}f60477f06af8d1d549460294d0363702'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352863664537,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863664901,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::Package[oozie-client]/Hdp::Package::Yum[oozie-client]/Package[oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::User[oozie]/User[oozie]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : 1352863667216,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}fc664190038e2562fe63acd61ea9480b' to '{md5}827a6e7bd4233d4dc82b20761aed1e30'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.12 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}fc664190038e2562fe63acd61ea9480b' to '{md5}827a6e7bd4233d4dc82b20761aed1e30'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.12 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352863664455,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.18 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863663984,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}089df31bb24ce7450baba1a7541e4546' to '{md5}8da2518fdfc4a3723e64babe25c8d6d8'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}13834e68f9058d26d30e3e627bea2d08' to '{md5}816e416f74804ba21e3b80b611d59a11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.75 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}13834e68f9058d26d30e3e627bea2d08' to '{md5}816e416f74804ba21e3b80b611d59a11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.75 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863667578,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863665481,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}54edf0ba61f6501cc49c0d7788b266b1'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666165,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-fping]/Hdp::Package[nagios-fping]/Hdp::Package::Yum[nagios-fping]/Package[fping]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-server]/Hdp::Package[nagios-server]/Hdp::Package::Yum[nagios-server]/Package[nagios-3.2.3]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-plugins]/Hdp::Package[nagios-plugins]/Hdp::Package::Yum[nagios-plugins]/Package[nagios-plugins-1.4.9]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-addons]/Hdp::Package[nagios-addons]/Hdp::Package::Yum[nagios-addons]/Package[hdp_mon_nagios_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_blocks.php]/content: content changed '{md5}4dc9db0a18622b3e40e266a9b54e2a1a' to '{md5}e3a5fa882154aaabe18fce6086346b98'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_rpcq_latency.php]/content: content changed '{md5}4fa432abbf0a15a2c73cd8eb7e657d6e' to '{md5}1601eec7138fcd957159e6524900a1ce'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_cpu.pl]/ensure: defined content as '{md5}ab87290dee3f032770580d7d7713d086'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_datanode_storage.php]/ensure: defined content as '{md5}eeae70d7c6686ff4ce9993244dbbdf34'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-commands.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hive_metastore_status.sh]/content: content changed '{md5}56469a95f854711ff67e961eb91a1b9a' to '{md5}987ce3f5cd654d9e8d13e9bc9d4b4e16'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_aggregate.php]/content: content changed '{md5}eeddc4cf93d7ca7dbf1e6ea1effcc278' to '{md5}ebbacb754b35bcdab9b246a64589f7c6'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-servicegroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/content: content changed '{md5}5ee6a28df66ed3cc6f76b46073ced9ac' to '{md5}05c47f03c0800b968023152479f24ccb'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352863665856,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}43bb2e790338badc9f17297bc958b536'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666842,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop/Hdp::Package[sqoop]/Hdp::Package::Yum[sqoop]/Package[sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Mysql-connector/Hdp::Exec[yum install -y mysql-connector-java]/Exec[yum install -y mysql-connector-java]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/lib//mysql-connector-java.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/conf/sqoop-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.25 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop/Hdp::Package[sqoop]/Hdp::Package::Yum[sqoop]/Package[sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Mysql-connector/Hdp::Exec[yum install -y mysql-connector-java]/Exec[yum install -y mysql-connector-java]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/lib//mysql-connector-java.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/conf/sqoop-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.25 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : 1352863667466,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}428a01fb131e6c37a876dd03a2940d79'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}c14eb8ab2bb5ab75789c875534ab64f4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m",
-        "status" : "FAILED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863664723,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : 1352863666987,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[rrdtool-python]/Hdp::Package::Yum[rrdtool-python]/Package[python-rrdtool.x86_64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-gweb]/Hdp::Package::Yum[ganglia-gweb]/Package[gweb]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-server]/Hdp::Package::Yum[ganglia-server]/Package[ganglia-gmetad-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-hdp-gweb-addons]/Hdp::Package::Yum[ganglia-hdp-gweb-addons]/Package[hdp_mon_ganglia_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352863665690,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig/Hdp::Package[pig]/Hdp::Package::Yum[pig]/Package[pig.noarch]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : 1352863667299,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}600cddff911584631420067cd2d2a5f6'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}05bed82ac309c3636b85b7ffae797cd1'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_name_dirs[/grid/0/hadoop/hdfs/namenode,/grid/1/hadoop/hdfs/namenode,]/Hdp::Directory_recursive_create[/grid/0/hadoop/hdfs/namenode]/Hdp::Exec[mkdir -p /grid/0/hadoop/hdfs/namenode]/Exec[mkdir -p /grid/0/hadoop/hdfs/namenode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_name_dirs[/grid/0/hadoop/hdfs/namenode,/grid/1/hadoop/hdfs/namenode,]/Hdp::Directory_recursive_create[/grid/1/hadoop/hdfs/namenode]/Hdp::Exec[mkdir -p /grid/1/hadoop/hdfs/namenode]/Exec[mkdir -p /grid/1/hadoop/hdfs/namenode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/hdfs/namenode]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/hdfs/namenode]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/hdfs/namenode]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/hdfs/namenode]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 75.23 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namenode]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 75.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : 1352863665939,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666913,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : 1352863667058,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp::Package[ambari-log4j]/Hdp::Package::Yum[ambari-log4j]/Package[ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}de205131fc2bc8c15f7bd1329fc8ea0d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/hadoop/lib/hadoop-tools.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352863664213,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_5.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_5.json
deleted file mode 100644
index cbc327b..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_5.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/1?fields=tasks/*",
-  "Requests" : {
-    "id" : 1,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/18",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}0c93d35cf2bf9188e20542db3417f453' to '{md5}089df31bb24ce7450baba1a7541e4546'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}13834e68f9058d26d30e3e627bea2d08'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.32 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 18,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "MAPREDUCE_CLIENT",
-        "start_time" : 1352863667058,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/21",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop/Hdp::Package[sqoop]/Hdp::Package::Yum[sqoop]/Package[sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Mysql-connector/Hdp::Exec[yum install -y mysql-connector-java]/Exec[yum install -y mysql-connector-java]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/lib//mysql-connector-java.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/conf/sqoop-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.25 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop/Hdp::Package[sqoop]/Hdp::Package::Yum[sqoop]/Package[sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java sqoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Mysql-connector/Hdp::Exec[yum install -y mysql-connector-java]/Exec[yum install -y mysql-connector-java]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/lib//mysql-connector-java.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/sqoop/conf/sqoop-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 50.25 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 21,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SQOOP",
-        "start_time" : 1352863667466,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/6",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 34.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 6,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863664901,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/20",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig/Hdp::Package[pig]/Hdp::Package::Yum[pig]/Package[pig.noarch]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java pig]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig-env.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/content: content changed '{md5}ba02e0d405cdbafd081e13ab2ef06403' to '{md5}c53b9db5f04e0c66451e52b4b39c8b17'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/pig.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/content: content changed '{md5}fd58c23539a391f6a74db6e22b67f7d3' to '{md5}82b233fb0a252aae098c7267aafeb01f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/pig/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 22.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 20,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "PIG",
-        "start_time" : 1352863667299,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/8",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 133.95 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host5",
-        "id" : 8,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863665481,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/14",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c' to '{md5}a973d9bcff056aeff7f22221886c84b7'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}1a3769d695902dba39b5645fef3766e0'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.51 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 14,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863666672,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/17",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}43bb2e790338badc9f17297bc958b536' to '{md5}0c93d35cf2bf9188e20542db3417f453'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4' to '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 17,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "HDFS_CLIENT",
-        "start_time" : 1352863666987,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/12",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}00ac2c1fbbe400ce3e93c316c03c8c8c'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}54edf0ba61f6501cc49c0d7788b266b1'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 130.97 seconds\u001B[0m",
-        "status" : "FAILED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host4",
-        "id" : 12,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666165,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/16",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 29.27 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 16,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666913,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/9",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[rrdtool-python]/Hdp::Package::Yum[rrdtool-python]/Package[python-rrdtool.x86_64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-gweb]/Hdp::Package::Yum[ganglia-gweb]/Package[gweb]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-server]/Hdp::Package::Yum[ganglia-server]/Package[ganglia-gmetad-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Packages/Hdp::Package[ganglia-hdp-gweb-addons]/Hdp::Package::Yum[ganglia-hdp-gweb-addons]/Package[hdp_mon_ganglia_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/owner: owner changed 'apache' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/group: group changed 'apache' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/lib/ganglia/dwoo]/mode: mode changed '0755' to '0777'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-monitor-webserver/Hdp::Exec[monitor webserver start]/Exec[monitor webserver start]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/www/cgi-bin/rrd.py]/ensure: defined content as '{md5}95b666a938f3080c370aeb6e3136cc6b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 63.16 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 9,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352863665690,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/7",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}428a01fb131e6c37a876dd03a2940d79' to '{md5}a39a2689e76538c6d9090b00ceb04eb0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}30e43dbdb225dad740d632ecc8f6ae11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.96 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 7,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863665177,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/15",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.10 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}43bb2e790338badc9f17297bc958b536'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}7c4fcfb891f7ca4a9156ba37d35b7dc4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 125.19 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host2",
-        "id" : 15,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863666842,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.18 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 111.24 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host3",
-        "id" : 1,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863663984,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/3",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}fc664190038e2562fe63acd61ea9480b' to '{md5}827a6e7bd4233d4dc82b20761aed1e30'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.12 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}fc664190038e2562fe63acd61ea9480b' to '{md5}827a6e7bd4233d4dc82b20761aed1e30'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.12 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 3,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352863664455,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/19",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::Package[oozie-client]/Hdp::Package::Yum[oozie-client]/Package[oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java oozie-client]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie/Hdp::User[oozie]/User[oozie]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/content: content changed '{md5}809b11d7098abd5cbcb08b9ceda104ed' to '{md5}851335f018fb288e30cf38afc96fff6a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/content: content changed '{md5}db07d1efafb9314e2e7d2c8a4d3ba6e7' to '{md5}42fd0d6fe7301cb54ea7129d6b930f59'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/owner: owner changed 'root' to 'oozie'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}11c7e0ea46916a75e66920fac8c93621' to '{md5}5ad6256cb12b73724714fc692c18db82'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 21.37 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 19,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "OOZIE_CLIENT",
-        "start_time" : 1352863667216,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/22",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Tasktracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}089df31bb24ce7450baba1a7541e4546' to '{md5}8da2518fdfc4a3723e64babe25c8d6d8'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}13834e68f9058d26d30e3e627bea2d08' to '{md5}816e416f74804ba21e3b80b611d59a11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.75 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}13834e68f9058d26d30e3e627bea2d08' to '{md5}816e416f74804ba21e3b80b611d59a11'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.75 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 22,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352863667578,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/13",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia/User[nobody]/shell: shell changed '/sbin/nologin' to '/bin/bash'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ganglia-monitor]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor/Hdp::Package[ganglia-monitor]/Hdp::Package::Yum[ganglia-monitor]/Package[ganglia-gmond-3.2.0]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmond]/ensure: defined content as '{md5}cc6ebe2015992c0439f874f11484a1ce'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Config/Hdp::Directory_recursive_create[/usr/libexec/hdp/ganglia]/Hdp::Exec[mkdir -p /usr/libexec/hdp/ganglia]/Exec[mkdir -p /usr/libexec/hdp/ganglia]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaLib.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmetadLib.sh]/ensure: defined content as '{md5}edc286fac347b51c3d819bc2c265d42f'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopRrdcached.sh]/ensure: defined content as '{md5}e322ee433df8ad84e40757e2c36c9aa7'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmond.sh]/ensure: defined content as '{md5}150808e93ef5d47ebb3faa10218f02e0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startGmetad.sh]/ensure: defined content as '{md5}684a6370e19c36caee239678ebc46275'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/startRrdcached.sh]/ensure: defined content as '{md5}f6fdf786e6599e4d52fbc48f76fd6523'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkRrdcached.sh]/ensure: defined content as '{md5}087dcb3ba741f6a3ab77046cdc1a3f88'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/rrdcachedLib.sh]/ensure: defined content as '{md5}7711ff27b4514c61ec7997e061b9a582'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/checkGmond.sh]/ensure: defined content as '{md5}10aea6276dd23593e7b247a0065f195a'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaClusters.conf]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/setupGanglia.sh]/ensure: defined content as '{md5}76c86302629a48f53b5e1b6733263900'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gangliaEnv.sh]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/gmondLib.sh]/ensure: defined content as '{md5}b3a69d0856240087d0c6959dcde96fc0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmetad.sh]/ensure: defined content as '{md5}55826d55bf08fa14750c87d51a0f68b3'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/stopGmond.sh]/ensure: defined content as '{md5}47ac3a52746724dc2589818f98e5d261'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/libexec/hdp/ganglia/teardownGanglia.sh]/ensure: defined content as '{md5}c005a9af684576cfeba9abb1b7fe57fa'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/init.d/hdp-gmetad]/ensure: defined content as '{md5}cd585a5eb635c79998438ee3fc5aacc9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 31.06 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 13,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352863666401,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/10",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-fping]/Hdp::Package[nagios-fping]/Hdp::Package::Yum[nagios-fping]/Package[fping]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-server]/Hdp::Package[nagios-server]/Hdp::Package::Yum[nagios-server]/Package[nagios-3.2.3]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-plugins]/Hdp::Package[nagios-plugins]/Hdp::Package::Yum[nagios-plugins]/Package[nagios-plugins-1.4.9]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Packages/Hdp-nagios::Server::Package[nagios-addons]/Hdp::Package[nagios-addons]/Hdp::Package::Yum[nagios-addons]/Package[hdp_mon_nagios_addons]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/owner: owner changed 'nagios' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_blocks.php]/content: content changed '{md5}4dc9db0a18622b3e40e266a9b54e2a1a' to '{md5}e3a5fa882154aaabe18fce6086346b98'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_rpcq_latency.php]/content: content changed '{md5}4fa432abbf0a15a2c73cd8eb7e657d6e' to '{md5}1601eec7138fcd957159e6524900a1ce'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_cpu.pl]/ensure: defined content as '{md5}ab87290dee3f032770580d7d7713d086'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_datanode_storage.php]/ensure: defined content as '{md5}eeae70d7c6686ff4ce9993244dbbdf34'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-commands.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hive_metastore_status.sh]/content: content changed '{md5}56469a95f854711ff67e961eb91a1b9a' to '{md5}987ce3f5cd654d9e8d13e9bc9d4b4e16'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_aggregate.php]/content: content changed '{md5}eeddc4cf93d7ca7dbf1e6ea1effcc278' to '{md5}ebbacb754b35bcdab9b246a64589f7c6'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-servicegroups.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/content: content changed '{md5}5ee6a28df66ed3cc6f76b46073ced9ac' to '{md5}05c47f03c0800b968023152479f24ccb'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/contacts.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_templeton_status.sh]/ensure: defined content as '{md5}017d82f1435f2a1059d13968b14174bc'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_puppet_agent_status.php]/ensure: defined content as '{md5}a623c36478c7ab4bf8df595822683762'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/content: content changed '{md5}3a4b1c8fd3b42affa5ae58a385a48000' to '{md5}870993486c9e4336086645a14d327ead'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/nagios.cfg]/group: group changed 'nagios' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-services.cfg]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_oozie_status.sh]/content: content changed '{md5}e672a044b2aa7163ceda92829d43e92c' to '{md5}3124bf8679ce198ba25509e430db325b'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_webui.sh]/content: content changed '{md5}f8e41fdb30d0cd838aefd7ea9e5af4a2' to '{md5}258d5ced025d4b14853cc0b12bdde1f0'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib64/nagios/plugins/check_hdfs_capacity.php]/content: content changed '{md5}a3391962ab1c1956610d0cb009df9b8c' to '{md5}398026717db006c423441a223731914f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 52.10 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 10,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352863665856,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/4",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Snamenode::Create_name_dirs[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Directory_recursive_create[/grid/0/hadoop/hdfs/namesecondary]/Hdp::Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/Exec[mkdir -p /grid/0/hadoop/hdfs/namesecondary]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/hdfs/namesecondary]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}de205131fc2bc8c15f7bd1329fc8ea0d' to '{md5}f60477f06af8d1d549460294d0363702'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}9a88c58cc1982799c0e4bdd2d1f1e6e0'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 4.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 4,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352863664537,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/5",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}2290dede8198ef8f6d1f7333aaaf26b6'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-epel]/ensure: defined content as '{md5}bcb85db5fb936fca2ced5e8e064ed563'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.15 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /tmp/HDP-artifacts/ ; curl -f --retry 10 http://host5:8080/resources//jdk-6u31-linux-x64.bin -o /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}428a01fb131e6c37a876dd03a2940d79'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}c14eb8ab2bb5ab75789c875534ab64f4'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 148.09 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none\nnone\nnone",
-        "host_name" : "host1",
-        "id" : 5,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "DATANODE",
-        "start_time" : 1352863664723,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp::Package[ambari-log4j]/Hdp::Package::Yum[ambari-log4j]/Package[ambari-log4j]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/0/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/0/hadoop/mapred]/Exec[mkdir -p /grid/0/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Jobtracker::Create_local_dirs[/grid/0/hadoop/mapred,/grid/1/hadoop/mapred,]/Hdp::Directory_recursive_create[/grid/1/hadoop/mapred]/Hdp::Exec[mkdir -p /grid/1/hadoop/mapred]/Exec[mkdir -p /grid/1/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/0/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/grid/1/hadoop/mapred]/seltype: seltype changed 'file_t' to 'default_t'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}de205131fc2bc8c15f7bd1329fc8ea0d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}f8cf0175a38ab1857e7ee4445c7486af'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f213cdc161f38bb0fdd45392d98628dd'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}fb197182ab06e8c37e1ef15554aebcfc'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}7239b08f9b42448302c2a7c7510b97ff'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}1b303202962d0ed6f5ada8222f7ebb42'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/usr/lib/hadoop/lib/hadoop-tools.jar]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/run/hadoop/mapred]/Hdp::Exec[mkdir -p /var/run/hadoop/mapred]/Exec[mkdir -p /var/run/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Directory_recursive_create[/var/log/hadoop/mapred]/Hdp::Exec[mkdir -p /var/log/hadoop/mapred]/Exec[mkdir -p /var/log/hadoop/mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/owner: owner changed 'root' to 'mapred'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/mapred]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}cccc03b9f3384eac76957c7fe2f12849'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 78.36 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 2,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352863664213,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/1/tasks/11",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 11,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 2,
-        "request_id" : 1,
-        "command" : "INSTALL",
-        "role" : "NAMENODE",
-        "start_time" : 1352863665939,
-        "stage_id" : 1
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_6.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_6.json
deleted file mode 100644
index a3c0a0c..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_6.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/2?fields=tasks/*",
-  "Requests" : {
-    "id" : 2,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/37",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 37,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/25",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 25,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/42",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 42,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "PIG_SERVICE_CHECK",
-        "start_time" : -1,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/32",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 32,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/31",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 31,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/33",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 33,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/26",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 26,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/44",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 44,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "OOZIE_SERVICE_CHECK",
-        "start_time" : -1,
-        "stage_id" : 4
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/36",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 36,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/34",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 34,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "JOBTRACKER",
-        "start_time" : -1,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/35",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 35,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : -1,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/38",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 38,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "HDFS_SERVICE_CHECK",
-        "start_time" : -1,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/29",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 29,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAMENODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/28",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 28,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/24",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host1",
-        "id" : 24,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/40",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 40,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "OOZIE_SERVER",
-        "start_time" : -1,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/39",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 39,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : -1,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/23",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host3",
-        "id" : 23,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/27",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host5",
-        "id" : 27,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/30",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host4",
-        "id" : 30,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : -1,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/43",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 43,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "SQOOP_SERVICE_CHECK",
-        "start_time" : -1,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/41",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 41,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "MAPREDUCE_SERVICE_CHECK",
-        "start_time" : -1,
-        "stage_id" : 3
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_7.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_7.json
deleted file mode 100644
index fbccdbd..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_7.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/2?fields=tasks/*",
-  "Requests" : {
-    "id" : 2,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/32",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}c892638e8c76c66f072640eb32b0637a' to '{md5}db0f2efdd03e4845c0528e1978b25644'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5' to '{md5}036cea2c613ff235499a7ed743be467f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.38 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}c892638e8c76c66f072640eb32b0637a' to '{md5}db0f2efdd03e4845c0528e1978b25644'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5' to '{md5}036cea2c613ff235499a7ed743be467f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.38 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 32,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864090145,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/24",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a39a2689e76538c6d9090b00ceb04eb0' to '{md5}9786ed97b221e37075bdb64400bc804a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}c14eb8ab2bb5ab75789c875534ab64f4' to '{md5}9684de67c2a8fa0f7292418d6c0c1651'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.78 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a39a2689e76538c6d9090b00ceb04eb0' to '{md5}9786ed97b221e37075bdb64400bc804a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}c14eb8ab2bb5ab75789c875534ab64f4' to '{md5}9684de67c2a8fa0f7292418d6c0c1651'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.78 seconds\u001B[0m",
-        "status" : "FAILED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 24,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864089661,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/29",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}d22fc5749dde07b5b5acff255c490e9d' to '{md5}0617b67bc5192f5e44cf98b2fe25eb6f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}0d021082a9258c648b5259d3af27ff62' to '{md5}39e33160b7f2933a12fc338a81ae9fcd'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/File[/tmp/checkForFormat.sh]/ensure: defined content as '{md5}5dd6bddf910d8ca9f6fefa44e7bbec7e'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: NameNode Dirname = /grid/0/hadoop/hdfs/namenode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: NameNode Dirname = /grid/1/hadoop/hdfs/namenode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:57 INFO namenode.NameNode: STARTUP_MSG: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: /************************************************************\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG: Starting NameNode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   host = host5/10.118.58.228\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   args = [-format]\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   version = 1.1.0.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   build =  -r ; compiled by 'jenkins' on Sat Nov 10 18:55:09 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: ************************************************************/\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: Re-format filesystem in /grid/0/hadoop/hdfs/namenode ? (Y or N) Re-format filesystem in /grid/1/hadoop/hdfs/namenode ? (Y or N) 12/11/13 22:36:58 INFO util.GSet: VM type       = 64-bit\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: 2% max memory = 19.2 MB\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: capacity      = 2^21 = 2097152 entries\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: recommended=2097152, actual=2097152\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: fsOwner=hdfs\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: supergroup=supergroup\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: isPermissionEnabled=true\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: dfs.block.invalidate.limit=100\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 WARN namenode.FSNamesystem: The dfs.support.append option is in your configuration, however append is not supported. This configuration option is no longer required to enable sync\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.NameNode: Caching file names occuring more than 10 times \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO common.Storage: Image file of size 110 saved in 0 seconds.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/grid/0/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/grid/0/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Storage directory /grid/0/hadoop/hdfs/namenode has been successfully formatted.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Image file of size 110 saved in 0 seconds.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/grid/1/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/grid/1/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Storage directory /grid/1/hadoop/hdfs/namenode has been successfully formatted.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.NameNode: SHUTDOWN_MSG: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: /************************************************************\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: SHUTDOWN_MSG: Shutting down NameNode at host5/10.118.58.228\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: ************************************************************/\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: yes: standard output: Broken pipe\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: yes: write error\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Hdp::Exec[set namenode mark]/Exec[set namenode mark]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chmod 775 /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chown hdfs /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chmod 770 /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chown ambari_qa /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chmod 777 /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chown oozie /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 51.23 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chmod 775 /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chown hdfs /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chmod 770 /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chown ambari_qa /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chmod 777 /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chown oozie /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 51.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 29,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAMENODE",
-        "start_time" : 1352864090025,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/28",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/content: content changed '{md5}ffff62426c4f7a42c1cb1ca44b324dad' to '{md5}21ad9f95dd93ee39fc87db07b7ea05be'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/content: content changed '{md5}fdcc51e399dd2381778a163933ef2beb' to '{md5}afbfd32db940db5fff4701c964169c27'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.78 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/content: content changed '{md5}ffff62426c4f7a42c1cb1ca44b324dad' to '{md5}21ad9f95dd93ee39fc87db07b7ea05be'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/content: content changed '{md5}fdcc51e399dd2381778a163933ef2beb' to '{md5}afbfd32db940db5fff4701c964169c27'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.78 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 28,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352864089985,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/23",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.77 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.77 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 23,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089600,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/34",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}63d8feab1255e45d9549ccea14f687c4' to '{md5}4673b67e078cc9d84ffc4873e5198edf'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[historyserver]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}cccc03b9f3384eac76957c7fe2f12849' to '{md5}07e946dbf4ae6632034ee6715a085b92'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 9.76 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}63d8feab1255e45d9549ccea14f687c4' to '{md5}4673b67e078cc9d84ffc4873e5198edf'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[historyserver]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}cccc03b9f3384eac76957c7fe2f12849' to '{md5}07e946dbf4ae6632034ee6715a085b92'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 9.76 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 34,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352864269447,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/41",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput ]/Exec[hadoop --config /etc/hadoop/conf dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO input.FileInputFormat: Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 WARN snappy.LoadSnappy: Snappy native library is available\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO util.NativeCodeLoader: Loaded the native-hadoop library\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO snappy.LoadSnappy: Snappy native library loaded\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:14 INFO mapred.JobClient: Running job: job_201211132238_0001\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:15 INFO mapred.JobClient:  map 0% reduce 0%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:30 INFO mapred.JobClient:  map 100% reduce 0%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:38 INFO mapred.JobClient:  map 100% reduce 33%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:40 INFO mapred.JobClient:  map 100% reduce 100%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient: Job complete: job_201211132238_0001\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient: Counters: 29\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   Job Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Launched reduce tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SLOTS_MILLIS_MAPS=6106\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total time spent by all reduces waiting after reserving slots (ms)=0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total time spent by all maps waiting after reserving slots (ms)=0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Launched map tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Data-local map tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SLOTS_MILLIS_REDUCES=9332\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   File Output Format Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Bytes Written=1845\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   FileSystemCounters\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     FILE_BYTES_READ=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     HDFS_BYTES_READ=1893\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     FILE_BYTES_WRITTEN=117522\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     HDFS_BYTES_WRITTEN=1845\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   File Input Format Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Bytes Read=1755\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   Map-Reduce Framework\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output materialized bytes=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map input records=36\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce shuffle bytes=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Spilled Records=122\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output bytes=2003\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     CPU time spent (ms)=1920\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total committed heap usage (bytes)=433913856\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine input records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SPLIT_RAW_BYTES=138\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input groups=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Physical memory (bytes) snapshot=381779968\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=2704003072\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 37.52 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input groups=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Physical memory (bytes) snapshot=381779968\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=2704003072\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 37.52 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 41,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "MAPREDUCE_SERVICE_CHECK",
-        "start_time" : 1352864331797,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/27",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Gmetad/Hdp::Exec[hdp-gmetad service]/Exec[hdp-gmetad service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Gmetad/Hdp::Exec[hdp-gmetad service]/Exec[hdp-gmetad service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 27,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352864089883,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/35",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}4673b67e078cc9d84ffc4873e5198edf' to '{md5}654e54e7c3f58aa3d37d07110ad63bb5'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}d3b2d5e47669c948fccb907fa32c2b55' to '{md5}0e079fd5bc7cc43a35b60012c9ee00d9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.45 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}4673b67e078cc9d84ffc4873e5198edf' to '{md5}654e54e7c3f58aa3d37d07110ad63bb5'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}d3b2d5e47669c948fccb907fa32c2b55' to '{md5}0e079fd5bc7cc43a35b60012c9ee00d9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.45 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 35,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352864269474,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/37",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}df2d55356b238461af57fe22ad993e4d' to '{md5}62a467fcccda8169de563170e39e3419'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}1a3769d695902dba39b5645fef3766e0' to '{md5}23097908e8b54f7dbc4d31b5d26d21e7'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.66 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}df2d55356b238461af57fe22ad993e4d' to '{md5}62a467fcccda8169de563170e39e3419'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}1a3769d695902dba39b5645fef3766e0' to '{md5}23097908e8b54f7dbc4d31b5d26d21e7'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.66 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 37,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269589,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/43",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 43,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "SQOOP_SERVICE_CHECK",
-        "start_time" : 1352864331830,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/40",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}827a6e7bd4233d4dc82b20761aed1e30' to '{md5}4e59b973cec0811615008a580244bcdb'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp::Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 36.67 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}827a6e7bd4233d4dc82b20761aed1e30' to '{md5}4e59b973cec0811615008a580244bcdb'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp::Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 36.67 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 40,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352864331712,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/33",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.72 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.72 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 33,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864090181,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/25",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 25,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089770,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/31",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.15 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 31,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864090105,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/44",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "PENDING",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 44,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 0,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "OOZIE_SERVICE_CHECK",
-        "start_time" : -1,
-        "stage_id" : 4
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/26",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 7.68 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 7.68 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 26,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089836,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/30",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a973d9bcff056aeff7f22221886c84b7' to '{md5}df2d55356b238461af57fe22ad993e4d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}54edf0ba61f6501cc49c0d7788b266b1' to '{md5}b25bda7a405235227d20732f0972c5f6'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.00 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a973d9bcff056aeff7f22221886c84b7' to '{md5}df2d55356b238461af57fe22ad993e4d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}54edf0ba61f6501cc49c0d7788b266b1' to '{md5}b25bda7a405235227d20732f0972c5f6'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.00 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 30,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864090068,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/38",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: Safe mode is OFF\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 10.35 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: Safe mode is OFF\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 10.35 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 38,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "HDFS_SERVICE_CHECK",
-        "start_time" : 1352864269616,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/42",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 42,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "PIG_SERVICE_CHECK",
-        "start_time" : 1352864331815,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/39",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}db0f2efdd03e4845c0528e1978b25644' to '{md5}84df095b5569e720b4aeaf4a96e0ee6d'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}f72b50738651b3cb6bcef039b59ffdcb' to '{md5}e750ca8f3497b9a4656f782dcf335dab'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.29 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}db0f2efdd03e4845c0528e1978b25644' to '{md5}84df095b5569e720b4aeaf4a96e0ee6d'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}f72b50738651b3cb6bcef039b59ffdcb' to '{md5}e750ca8f3497b9a4656f782dcf335dab'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.29 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 39,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269636,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/36",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}9786ed97b221e37075bdb64400bc804a' to '{md5}8e06d7ec24fe5acd81917162d58857db'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}30e43dbdb225dad740d632ecc8f6ae11' to '{md5}558aadf67e4d29865a6d935076d3868b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.20 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}9786ed97b221e37075bdb64400bc804a' to '{md5}8e06d7ec24fe5acd81917162d58857db'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}30e43dbdb225dad740d632ecc8f6ae11' to '{md5}558aadf67e4d29865a6d935076d3868b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.20 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 36,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269562,
-        "stage_id" : 2
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_8.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_8.json
deleted file mode 100644
index cf8fbd5..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_8.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/2?fields=tasks/*",
-  "Requests" : {
-    "id" : 2,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/29",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}d22fc5749dde07b5b5acff255c490e9d' to '{md5}0617b67bc5192f5e44cf98b2fe25eb6f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}0d021082a9258c648b5259d3af27ff62' to '{md5}39e33160b7f2933a12fc338a81ae9fcd'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/File[/tmp/checkForFormat.sh]/ensure: defined content as '{md5}5dd6bddf910d8ca9f6fefa44e7bbec7e'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: NameNode Dirname = /grid/0/hadoop/hdfs/namenode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: NameNode Dirname = /grid/1/hadoop/hdfs/namenode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:57 INFO namenode.NameNode: STARTUP_MSG: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: /************************************************************\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG: Starting NameNode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   host = host5/10.118.58.228\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   args = [-format]\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   version = 1.1.0.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   build =  -r ; compiled by 'jenkins' on Sat Nov 10 18:55:09 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: ************************************************************/\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: Re-format filesystem in /grid/0/hadoop/hdfs/namenode ? (Y or N) Re-format filesystem in /grid/1/hadoop/hdfs/namenode ? (Y or N) 12/11/13 22:36:58 INFO util.GSet: VM type       = 64-bit\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: 2% max memory = 19.2 MB\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: capacity      = 2^21 = 2097152 entries\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: recommended=2097152, actual=2097152\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: fsOwner=hdfs\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: supergroup=supergroup\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: isPermissionEnabled=true\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: dfs.block.invalidate.limit=100\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 WARN namenode.FSNamesystem: The dfs.support.append option is in your configuration, however append is not supported. This configuration option is no longer required to enable sync\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.NameNode: Caching file names occuring more than 10 times \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO common.Storage: Image file of size 110 saved in 0 seconds.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/grid/0/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/grid/0/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Storage directory /grid/0/hadoop/hdfs/namenode has been successfully formatted.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Image file of size 110 saved in 0 seconds.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/grid/1/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/grid/1/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Storage directory /grid/1/hadoop/hdfs/namenode has been successfully formatted.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.NameNode: SHUTDOWN_MSG: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: /************************************************************\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: SHUTDOWN_MSG: Shutting down NameNode at host5/10.118.58.228\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: ************************************************************/\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: yes: standard output: Broken pipe\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: yes: write error\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Hdp::Exec[set namenode mark]/Exec[set namenode mark]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chmod 775 /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chown hdfs /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chmod 770 /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chown ambari_qa /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chmod 777 /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chown oozie /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 51.23 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chmod 775 /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chown hdfs /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chmod 770 /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chown ambari_qa /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chmod 777 /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chown oozie /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 51.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 29,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAMENODE",
-        "start_time" : 1352864090025,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/27",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Gmetad/Hdp::Exec[hdp-gmetad service]/Exec[hdp-gmetad service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Gmetad/Hdp::Exec[hdp-gmetad service]/Exec[hdp-gmetad service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 27,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352864089883,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/41",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput ]/Exec[hadoop --config /etc/hadoop/conf dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO input.FileInputFormat: Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 WARN snappy.LoadSnappy: Snappy native library is available\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO util.NativeCodeLoader: Loaded the native-hadoop library\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO snappy.LoadSnappy: Snappy native library loaded\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:14 INFO mapred.JobClient: Running job: job_201211132238_0001\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:15 INFO mapred.JobClient:  map 0% reduce 0%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:30 INFO mapred.JobClient:  map 100% reduce 0%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:38 INFO mapred.JobClient:  map 100% reduce 33%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:40 INFO mapred.JobClient:  map 100% reduce 100%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient: Job complete: job_201211132238_0001\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient: Counters: 29\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   Job Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Launched reduce tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SLOTS_MILLIS_MAPS=6106\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total time spent by all reduces waiting after reserving slots (ms)=0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total time spent by all maps waiting after reserving slots (ms)=0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Launched map tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Data-local map tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SLOTS_MILLIS_REDUCES=9332\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   File Output Format Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Bytes Written=1845\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   FileSystemCounters\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     FILE_BYTES_READ=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     HDFS_BYTES_READ=1893\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     FILE_BYTES_WRITTEN=117522\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     HDFS_BYTES_WRITTEN=1845\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   File Input Format Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Bytes Read=1755\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   Map-Reduce Framework\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output materialized bytes=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map input records=36\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce shuffle bytes=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Spilled Records=122\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output bytes=2003\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     CPU time spent (ms)=1920\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total committed heap usage (bytes)=433913856\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine input records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SPLIT_RAW_BYTES=138\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input groups=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Physical memory (bytes) snapshot=381779968\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=2704003072\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 37.52 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input groups=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Physical memory (bytes) snapshot=381779968\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=2704003072\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 37.52 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 41,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "MAPREDUCE_SERVICE_CHECK",
-        "start_time" : 1352864331797,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/24",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a39a2689e76538c6d9090b00ceb04eb0' to '{md5}9786ed97b221e37075bdb64400bc804a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}c14eb8ab2bb5ab75789c875534ab64f4' to '{md5}9684de67c2a8fa0f7292418d6c0c1651'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.78 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a39a2689e76538c6d9090b00ceb04eb0' to '{md5}9786ed97b221e37075bdb64400bc804a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}c14eb8ab2bb5ab75789c875534ab64f4' to '{md5}9684de67c2a8fa0f7292418d6c0c1651'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.78 seconds\u001B[0m",
-        "status" : "FAILED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 24,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864089661,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/44",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 44,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "OOZIE_SERVICE_CHECK",
-        "start_time" : 1352864442993,
-        "stage_id" : 4
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/23",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.77 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.77 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 23,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089600,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/40",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}827a6e7bd4233d4dc82b20761aed1e30' to '{md5}4e59b973cec0811615008a580244bcdb'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp::Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 36.67 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}827a6e7bd4233d4dc82b20761aed1e30' to '{md5}4e59b973cec0811615008a580244bcdb'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp::Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 36.67 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 40,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352864331712,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/34",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}63d8feab1255e45d9549ccea14f687c4' to '{md5}4673b67e078cc9d84ffc4873e5198edf'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[historyserver]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}cccc03b9f3384eac76957c7fe2f12849' to '{md5}07e946dbf4ae6632034ee6715a085b92'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 9.76 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}63d8feab1255e45d9549ccea14f687c4' to '{md5}4673b67e078cc9d84ffc4873e5198edf'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[historyserver]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}cccc03b9f3384eac76957c7fe2f12849' to '{md5}07e946dbf4ae6632034ee6715a085b92'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 9.76 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 34,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352864269447,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/32",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}c892638e8c76c66f072640eb32b0637a' to '{md5}db0f2efdd03e4845c0528e1978b25644'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5' to '{md5}036cea2c613ff235499a7ed743be467f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.38 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}c892638e8c76c66f072640eb32b0637a' to '{md5}db0f2efdd03e4845c0528e1978b25644'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5' to '{md5}036cea2c613ff235499a7ed743be467f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.38 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 32,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864090145,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/42",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfs -rmr pigsmoke.out passwd; hadoop dfs -put /etc/passwd passwd ]/Exec[hadoop --config /etc/hadoop/conf dfs -rmr pigsmoke.out passwd; hadoop dfs -put /etc/passwd passwd ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/File[/tmp/pigSmoke.sh]/ensure: defined content as '{md5}feac231e484c08e3bc5f83d0ee189a8c'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,367 [main] INFO  org.apache.pig.Main - Apache Pig version 0.10.0.1 (rexported) compiled Nov 10 2012, 19:10:20\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,368 [main] INFO  org.apache.pig.Main - Logging error messages to: /home/ambari_qa/pig_1352864398364.log\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,789 [main] INFO  org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to hadoop file system at: hdfs://host5:8020\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:59,058 [main] INFO  org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to map-reduce job tracker at: host3:50300\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:59,907 [main] INFO  org.apache.pig.tools.pigstats.ScriptState - Pig features used in the script: UNKNOWN\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,158 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MRCompiler - File concatenation threshold: 100 optimistic? false\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,183 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MultiQueryOptimizer - MR plan size before optimization: 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,183 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MultiQueryOptimizer - MR plan size after optimization: 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,288 [main] INFO  org.apache.pig.tools.pigstats.ScriptState - Pig script settings are added to the job\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,312 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - mapred.job.reduce.markreset.buffer.percent is not set, set to default 0.3\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,315 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - creating jar file Job4537005419718909074.jar\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,356 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - jar file Job4537005419718909074.jar created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,377 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - Setting up single store job\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,432 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 1 map-reduce job(s) waiting for submission.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,932 [Thread-6] INFO  org.apache.hadoop.mapreduce.lib.input.FileInputFormat - Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,933 [Thread-6] INFO  org.apache.pig.backend.hadoop.executionengine.util.MapRedUtil - Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,934 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 0% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,971 [Thread-6] WARN  org.apache.hadoop.io.compress.snappy.LoadSnappy - Snappy native library is available\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,973 [Thread-6] INFO  org.apache.hadoop.util.NativeCodeLoader - Loaded the native-hadoop library\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,973 [Thread-6] INFO  org.apache.hadoop.io.compress.snappy.LoadSnappy - Snappy native library loaded\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,977 [Thread-6] INFO  org.apache.pig.backend.hadoop.executionengine.util.MapRedUtil - Total input paths (combined) to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:06,811 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - HadoopJobId: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:06,812 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - More information at: http://host3:50030/jobdetails.jsp?jobid=job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:17,380 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 50% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,432 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 100% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,434 [main] INFO  org.apache.pig.tools.pigstats.SimplePigStats - Script Statistics: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: HadoopVersion\tPigVersion\tUserId\tStartedAt\tFinishedAt\tFeatures\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 1.1.0.1\t0.10.0.1\tambari_qa\t2012-11-13 22:40:00\t2012-11-13 22:40:21\tUNKNOWN\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job Stats (time in seconds):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: JobId\tMaps\tReduces\tMaxMapTime\tMinMapTIme\tAvgMapTime\tMaxReduceTime\tMinReduceTime\tAvgReduceTime\tAlias\tFeature\tOutputs\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\t1\t0\t3\t3\t3\t0\t0\t0\tA,B\tMAP_ONLY\thdfs://host5:8020/user/ambari_qa/pigsmoke.out,\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Input(s):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Successfully read 36 records (2137 bytes) from: \"hdfs://host5:8020/user/ambari_qa/passwd\"\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Output(s):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Successfully stored 36 records (236 bytes) in: \"hdfs://host5:8020/user/ambari_qa/pigsmoke.out\"\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Counters:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records written : 36\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total bytes written : 236\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Spillable Memory Manager spill count : 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total bags proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job DAG:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,446 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]/Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 32.06 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job DAG:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,446 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]/Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 32.06 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 42,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "PIG_SERVICE_CHECK",
-        "start_time" : 1352864331815,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/35",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}4673b67e078cc9d84ffc4873e5198edf' to '{md5}654e54e7c3f58aa3d37d07110ad63bb5'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}d3b2d5e47669c948fccb907fa32c2b55' to '{md5}0e079fd5bc7cc43a35b60012c9ee00d9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.45 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}4673b67e078cc9d84ffc4873e5198edf' to '{md5}654e54e7c3f58aa3d37d07110ad63bb5'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}d3b2d5e47669c948fccb907fa32c2b55' to '{md5}0e079fd5bc7cc43a35b60012c9ee00d9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.45 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 35,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352864269474,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/43",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Sqoop 1.4.2.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: git commit id ea3b95785b3daf62c68f1eb0e645636acc00d0c2\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Compiled by jenkins on Sat Nov 10 19:14:01 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Sqoop 1.4.2.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: git commit id ea3b95785b3daf62c68f1eb0e645636acc00d0c2\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Compiled by jenkins on Sat Nov 10 19:14:01 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.15 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 43,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "SQOOP_SERVICE_CHECK",
-        "start_time" : 1352864331830,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/31",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.15 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 31,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864090105,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/33",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.72 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.72 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 33,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864090181,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/25",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 25,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089770,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/38",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: Safe mode is OFF\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 10.35 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: Safe mode is OFF\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 10.35 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 38,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "HDFS_SERVICE_CHECK",
-        "start_time" : 1352864269616,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/28",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/content: content changed '{md5}ffff62426c4f7a42c1cb1ca44b324dad' to '{md5}21ad9f95dd93ee39fc87db07b7ea05be'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/content: content changed '{md5}fdcc51e399dd2381778a163933ef2beb' to '{md5}afbfd32db940db5fff4701c964169c27'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.78 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/content: content changed '{md5}ffff62426c4f7a42c1cb1ca44b324dad' to '{md5}21ad9f95dd93ee39fc87db07b7ea05be'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/content: content changed '{md5}fdcc51e399dd2381778a163933ef2beb' to '{md5}afbfd32db940db5fff4701c964169c27'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.78 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 28,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352864089985,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/37",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}df2d55356b238461af57fe22ad993e4d' to '{md5}62a467fcccda8169de563170e39e3419'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}1a3769d695902dba39b5645fef3766e0' to '{md5}23097908e8b54f7dbc4d31b5d26d21e7'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.66 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}df2d55356b238461af57fe22ad993e4d' to '{md5}62a467fcccda8169de563170e39e3419'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}1a3769d695902dba39b5645fef3766e0' to '{md5}23097908e8b54f7dbc4d31b5d26d21e7'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.66 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 37,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269589,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/30",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a973d9bcff056aeff7f22221886c84b7' to '{md5}df2d55356b238461af57fe22ad993e4d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}54edf0ba61f6501cc49c0d7788b266b1' to '{md5}b25bda7a405235227d20732f0972c5f6'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.00 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a973d9bcff056aeff7f22221886c84b7' to '{md5}df2d55356b238461af57fe22ad993e4d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}54edf0ba61f6501cc49c0d7788b266b1' to '{md5}b25bda7a405235227d20732f0972c5f6'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.00 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 30,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864090068,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/26",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 7.68 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 7.68 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 26,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089836,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/36",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}9786ed97b221e37075bdb64400bc804a' to '{md5}8e06d7ec24fe5acd81917162d58857db'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}30e43dbdb225dad740d632ecc8f6ae11' to '{md5}558aadf67e4d29865a6d935076d3868b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.20 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}9786ed97b221e37075bdb64400bc804a' to '{md5}8e06d7ec24fe5acd81917162d58857db'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}30e43dbdb225dad740d632ecc8f6ae11' to '{md5}558aadf67e4d29865a6d935076d3868b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.20 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 36,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269562,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/39",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}db0f2efdd03e4845c0528e1978b25644' to '{md5}84df095b5569e720b4aeaf4a96e0ee6d'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}f72b50738651b3cb6bcef039b59ffdcb' to '{md5}e750ca8f3497b9a4656f782dcf335dab'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.29 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}db0f2efdd03e4845c0528e1978b25644' to '{md5}84df095b5569e720b4aeaf4a96e0ee6d'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}f72b50738651b3cb6bcef039b59ffdcb' to '{md5}e750ca8f3497b9a4656f782dcf335dab'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.29 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 39,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269636,
-        "stage_id" : 2
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_9.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_9.json
deleted file mode 100644
index c748b91..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/slave_warning/poll_9.json
+++ /dev/null
@@ -1,405 +0,0 @@
-{
-  "href" : "http://ambari:8080/api/clusters/mycluster/requests/2?fields=tasks/*",
-  "Requests" : {
-    "id" : 2,
-    "cluster_name" : "mycluster"
-  },
-  "tasks" : [
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/34",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}63d8feab1255e45d9549ccea14f687c4' to '{md5}4673b67e078cc9d84ffc4873e5198edf'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[historyserver]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}cccc03b9f3384eac76957c7fe2f12849' to '{md5}07e946dbf4ae6632034ee6715a085b92'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 9.76 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}63d8feab1255e45d9549ccea14f687c4' to '{md5}4673b67e078cc9d84ffc4873e5198edf'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[jobtracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start jobtracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Jobtracker/Hdp-hadoop::Service[historyserver]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start historyserver']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}cccc03b9f3384eac76957c7fe2f12849' to '{md5}07e946dbf4ae6632034ee6715a085b92'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 9.76 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 34,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "JOBTRACKER",
-        "start_time" : 1352864269447,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/44",
-      "Tasks" : {
-        "exit_code" : 999,
-        "stdout" : "",
-        "status" : "QUEUED",
-        "stderr" : "",
-        "host_name" : "host2",
-        "id" : 44,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "OOZIE_SERVICE_CHECK",
-        "start_time" : 1352864442993,
-        "stage_id" : 4
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/36",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}9786ed97b221e37075bdb64400bc804a' to '{md5}8e06d7ec24fe5acd81917162d58857db'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}30e43dbdb225dad740d632ecc8f6ae11' to '{md5}558aadf67e4d29865a6d935076d3868b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.20 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}9786ed97b221e37075bdb64400bc804a' to '{md5}8e06d7ec24fe5acd81917162d58857db'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}30e43dbdb225dad740d632ecc8f6ae11' to '{md5}558aadf67e4d29865a6d935076d3868b'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.20 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 36,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269562,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/29",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}d22fc5749dde07b5b5acff255c490e9d' to '{md5}0617b67bc5192f5e44cf98b2fe25eb6f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}0d021082a9258c648b5259d3af27ff62' to '{md5}39e33160b7f2933a12fc338a81ae9fcd'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/File[/tmp/checkForFormat.sh]/ensure: defined content as '{md5}5dd6bddf910d8ca9f6fefa44e7bbec7e'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: NameNode Dirname = /grid/0/hadoop/hdfs/namenode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: NameNode Dirname = /grid/1/hadoop/hdfs/namenode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:57 INFO namenode.NameNode: STARTUP_MSG: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: /************************************************************\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG: Starting NameNode\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   host = host5/10.118.58.228\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   args = [-format]\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   version = 1.1.0.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: STARTUP_MSG:   build =  -r ; compiled by 'jenkins' on Sat Nov 10 18:55:09 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: ************************************************************/\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: Re-format filesystem in /grid/0/hadoop/hdfs/namenode ? (Y or N) Re-format filesystem in /grid/1/hadoop/hdfs/namenode ? (Y or N) 12/11/13 22:36:58 INFO util.GSet: VM type       = 64-bit\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: 2% max memory = 19.2 MB\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: capacity      = 2^21 = 2097152 entries\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO util.GSet: recommended=2097152, actual=2097152\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: fsOwner=hdfs\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: supergroup=supergroup\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: isPermissionEnabled=true\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: dfs.block.invalidate.limit=100\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 WARN namenode.FSNamesystem: The dfs.support.append option is in your configuration, however append is not supported. This configuration option is no longer required to enable sync\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO namenode.NameNode: Caching file names occuring more than 10 times \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:58 INFO common.Storage: Image file of size 110 saved in 0 seconds.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/grid/0/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/grid/0/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Storage directory /grid/0/hadoop/hdfs/namenode has been successfully formatted.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Image file of size 110 saved in 0 seconds.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/grid/1/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/grid/1/hadoop/hdfs/namenode/current/edits\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO common.Storage: Storage directory /grid/1/hadoop/hdfs/namenode has been successfully formatted.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: 12/11/13 22:36:59 INFO namenode.NameNode: SHUTDOWN_MSG: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: /************************************************************\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: SHUTDOWN_MSG: Shutting down NameNode at host5/10.118.58.228\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: ************************************************************/\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: yes: standard output: Broken pipe\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: yes: write error\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Exec[/tmp/checkForFormat.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode::Format/Hdp::Exec[set namenode mark]/Exec[set namenode mark]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Service[namenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -mkdir /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chmod 775 /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chown hdfs /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chmod 770 /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chown ambari_qa /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chmod 777 /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chown oozie /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 51.23 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -mkdir /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chmod 775 /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 775 /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -mkdir /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -mkdir /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/mapred/system]/Hdp-hadoop::Exec-hadoop[fs -chown mapred /mapred/system]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/Exec[hadoop --config /etc/hadoop/conf fs -chown mapred /mapred/system]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chown hdfs /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chown hdfs /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chmod 770 /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 770 /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/ambari_qa]/Hdp-hadoop::Exec-hadoop[fs -chown ambari_qa /user/ambari_qa]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/Exec[hadoop --config /etc/hadoop/conf fs -chown ambari_qa /user/ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/tmp]/Hdp-hadoop::Exec-hadoop[fs -chmod 777 /tmp]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/Exec[hadoop --config /etc/hadoop/conf fs -chmod 777 /tmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Namenode/Hdp-hadoop::Namenode::Create_app_directories[create_app_directories]/Hdp-hadoop::Hdfs::Directory[/user/oozie]/Hdp-hadoop::Exec-hadoop[fs -chown oozie /user/oozie]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/Exec[hadoop --config /etc/hadoop/conf fs -chown oozie /user/oozie]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 51.23 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 29,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAMENODE",
-        "start_time" : 1352864090025,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/33",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.72 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.72 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 33,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864090181,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/39",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}db0f2efdd03e4845c0528e1978b25644' to '{md5}84df095b5569e720b4aeaf4a96e0ee6d'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}f72b50738651b3cb6bcef039b59ffdcb' to '{md5}e750ca8f3497b9a4656f782dcf335dab'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.29 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}db0f2efdd03e4845c0528e1978b25644' to '{md5}84df095b5569e720b4aeaf4a96e0ee6d'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}f72b50738651b3cb6bcef039b59ffdcb' to '{md5}e750ca8f3497b9a4656f782dcf335dab'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.29 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 39,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269636,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/28",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/content: content changed '{md5}ffff62426c4f7a42c1cb1ca44b324dad' to '{md5}21ad9f95dd93ee39fc87db07b7ea05be'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/content: content changed '{md5}fdcc51e399dd2381778a163933ef2beb' to '{md5}afbfd32db940db5fff4701c964169c27'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.78 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hostgroups.cfg]/content: content changed '{md5}ffff62426c4f7a42c1cb1ca44b324dad' to '{md5}21ad9f95dd93ee39fc87db07b7ea05be'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/nagios/objects/hadoop-hosts.cfg]/content: content changed '{md5}fdcc51e399dd2381778a163933ef2beb' to '{md5}afbfd32db940db5fff4701c964169c27'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Web_permisssions/Hdp::Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/Exec[htpasswd -c -b  /etc/nagios/htpasswd.users nagiosadmin admin]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-nagios::Server::Services/Service[nagios]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.78 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 28,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "NAGIOS_SERVER",
-        "start_time" : 1352864089985,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/32",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}c892638e8c76c66f072640eb32b0637a' to '{md5}db0f2efdd03e4845c0528e1978b25644'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5' to '{md5}036cea2c613ff235499a7ed743be467f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.38 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}c892638e8c76c66f072640eb32b0637a' to '{md5}db0f2efdd03e4845c0528e1978b25644'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}fcfc81d25ae7ad5f5aaaacdc3d47f0f5' to '{md5}036cea2c613ff235499a7ed743be467f'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.38 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 32,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864090145,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/25",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.84 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.84 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 25,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089770,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/30",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a973d9bcff056aeff7f22221886c84b7' to '{md5}df2d55356b238461af57fe22ad993e4d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}54edf0ba61f6501cc49c0d7788b266b1' to '{md5}b25bda7a405235227d20732f0972c5f6'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.00 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a973d9bcff056aeff7f22221886c84b7' to '{md5}df2d55356b238461af57fe22ad993e4d'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}54edf0ba61f6501cc49c0d7788b266b1' to '{md5}b25bda7a405235227d20732f0972c5f6'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.00 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 30,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864090068,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/35",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}4673b67e078cc9d84ffc4873e5198edf' to '{md5}654e54e7c3f58aa3d37d07110ad63bb5'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}d3b2d5e47669c948fccb907fa32c2b55' to '{md5}0e079fd5bc7cc43a35b60012c9ee00d9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.45 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}4673b67e078cc9d84ffc4873e5198edf' to '{md5}654e54e7c3f58aa3d37d07110ad63bb5'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}d3b2d5e47669c948fccb907fa32c2b55' to '{md5}0e079fd5bc7cc43a35b60012c9ee00d9'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Snamenode/Hdp-hadoop::Service[secondarynamenode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.45 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 35,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "SECONDARY_NAMENODE",
-        "start_time" : 1352864269474,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/41",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput ]/Exec[hadoop --config /etc/hadoop/conf dfs -rmr mapredsmokeoutput mapredsmokeinput ; hadoop dfs -put /etc/passwd mapredsmokeinput ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO input.FileInputFormat: Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 WARN snappy.LoadSnappy: Snappy native library is available\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO util.NativeCodeLoader: Loaded the native-hadoop library\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:13 INFO snappy.LoadSnappy: Snappy native library loaded\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:14 INFO mapred.JobClient: Running job: job_201211132238_0001\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:15 INFO mapred.JobClient:  map 0% reduce 0%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:30 INFO mapred.JobClient:  map 100% reduce 0%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:38 INFO mapred.JobClient:  map 100% reduce 33%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:40 INFO mapred.JobClient:  map 100% reduce 100%\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient: Job complete: job_201211132238_0001\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient: Counters: 29\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   Job Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Launched reduce tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SLOTS_MILLIS_MAPS=6106\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total time spent by all reduces waiting after reserving slots (ms)=0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total time spent by all maps waiting after reserving slots (ms)=0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Launched map tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Data-local map tasks=1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SLOTS_MILLIS_REDUCES=9332\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   File Output Format Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Bytes Written=1845\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   FileSystemCounters\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     FILE_BYTES_READ=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     HDFS_BYTES_READ=1893\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     FILE_BYTES_WRITTEN=117522\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     HDFS_BYTES_WRITTEN=1845\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   File Input Format Counters \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Bytes Read=1755\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:   Map-Reduce Framework\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output materialized bytes=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map input records=36\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce shuffle bytes=2095\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Spilled Records=122\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output bytes=2003\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     CPU time spent (ms)=1920\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Total committed heap usage (bytes)=433913856\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine input records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     SPLIT_RAW_BYTES=138\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input groups=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Physical memory (bytes) snapshot=381779968\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=2704003072\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 37.52 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce input groups=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Combine output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Physical memory (bytes) snapshot=381779968\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Reduce output records=61\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=2704003072\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: 12/11/13 22:39:41 INFO mapred.JobClient:     Map output records=62\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::run_wordcount]/Hdp::Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf jar /usr/lib/hadoop//hadoop-examples.jar  wordcount mapredsmokeinput mapredsmokeoutput]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Mapred::Service_check/Hdp-hadoop::Exec-hadoop[mapred::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]/Exec[hadoop --config /etc/hadoop/conf fs -test -e mapredsmokeoutput]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 37.52 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 41,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "MAPREDUCE_SERVICE_CHECK",
-        "start_time" : 1352864331797,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/31",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.15 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 31,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864090105,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/40",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}827a6e7bd4233d4dc82b20761aed1e30' to '{md5}4e59b973cec0811615008a580244bcdb'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp::Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 36.67 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $lzo_enabled at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:37 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ensure at /var/lib/ambari-agent/puppet/modules/hdp-oozie/manifests/service.pp:76 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/oozie/conf/oozie-site.xml]/content: content changed '{md5}827a6e7bd4233d4dc82b20761aed1e30' to '{md5}4e59b973cec0811615008a580244bcdb'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-oozie::Service/Hdp::Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/Exec[/bin/sh -c 'cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz && mkdir -p /var/tmp/oozie && chown oozie:hadoop /var/tmp/oozie && cd /var/tmp/oozie' && su - oozie -c '/usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 /usr/lib/hadoop/ -extjs /usr/share/HDP-oozie/ext.zip -jars /usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/oozie/share && /usr/lib/oozie/bin/oozie-start.sh' ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 36.67 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 40,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "OOZIE_SERVER",
-        "start_time" : 1352864331712,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/24",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a39a2689e76538c6d9090b00ceb04eb0' to '{md5}9786ed97b221e37075bdb64400bc804a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}c14eb8ab2bb5ab75789c875534ab64f4' to '{md5}9684de67c2a8fa0f7292418d6c0c1651'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.78 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}a39a2689e76538c6d9090b00ceb04eb0' to '{md5}9786ed97b221e37075bdb64400bc804a'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}c14eb8ab2bb5ab75789c875534ab64f4' to '{md5}9684de67c2a8fa0f7292418d6c0c1651'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/Exec[su - hdfs -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.78 seconds\u001B[0m",
-        "status" : "FAILED",
-        "stderr" : "none",
-        "host_name" : "host1",
-        "id" : 24,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "DATANODE",
-        "start_time" : 1352864089661,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/23",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.77 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 6.77 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host3",
-        "id" : 23,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089600,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/26",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 7.68 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Config-gen/Hdp-ganglia::Config::Generate_monitor[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Monitor::Gmond/Hdp::Exec[hdp-gmond service]/Exec[hdp-gmond service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 7.68 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 26,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_MONITOR",
-        "start_time" : 1352864089836,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/43",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Sqoop 1.4.2.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: git commit id ea3b95785b3daf62c68f1eb0e645636acc00d0c2\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Compiled by jenkins on Sat Nov 10 19:14:01 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.15 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Sqoop 1.4.2.1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: git commit id ea3b95785b3daf62c68f1eb0e645636acc00d0c2\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: Compiled by jenkins on Sat Nov 10 19:14:01 PST 2012\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-sqoop::Sqoop::Service_check/Exec[sqoop_smoke]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 3.15 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 43,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "SQOOP_SERVICE_CHECK",
-        "start_time" : 1352864331830,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/42",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfs -rmr pigsmoke.out passwd; hadoop dfs -put /etc/passwd passwd ]/Exec[hadoop --config /etc/hadoop/conf dfs -rmr pigsmoke.out passwd; hadoop dfs -put /etc/passwd passwd ]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/File[/tmp/pigSmoke.sh]/ensure: defined content as '{md5}feac231e484c08e3bc5f83d0ee189a8c'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,367 [main] INFO  org.apache.pig.Main - Apache Pig version 0.10.0.1 (rexported) compiled Nov 10 2012, 19:10:20\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,368 [main] INFO  org.apache.pig.Main - Logging error messages to: /home/ambari_qa/pig_1352864398364.log\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:58,789 [main] INFO  org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to hadoop file system at: hdfs://host5:8020\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:59,058 [main] INFO  org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to map-reduce job tracker at: host3:50300\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:39:59,907 [main] INFO  org.apache.pig.tools.pigstats.ScriptState - Pig features used in the script: UNKNOWN\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,158 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MRCompiler - File concatenation threshold: 100 optimistic? false\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,183 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MultiQueryOptimizer - MR plan size before optimization: 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,183 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MultiQueryOptimizer - MR plan size after optimization: 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,288 [main] INFO  org.apache.pig.tools.pigstats.ScriptState - Pig script settings are added to the job\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,312 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - mapred.job.reduce.markreset.buffer.percent is not set, set to default 0.3\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:00,315 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - creating jar file Job4537005419718909074.jar\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,356 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - jar file Job4537005419718909074.jar created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,377 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.JobControlCompiler - Setting up single store job\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,432 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 1 map-reduce job(s) waiting for submission.\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,932 [Thread-6] INFO  org.apache.hadoop.mapreduce.lib.input.FileInputFormat - Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,933 [Thread-6] INFO  org.apache.pig.backend.hadoop.executionengine.util.MapRedUtil - Total input paths to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,934 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 0% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,971 [Thread-6] WARN  org.apache.hadoop.io.compress.snappy.LoadSnappy - Snappy native library is available\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,973 [Thread-6] INFO  org.apache.hadoop.util.NativeCodeLoader - Loaded the native-hadoop library\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,973 [Thread-6] INFO  org.apache.hadoop.io.compress.snappy.LoadSnappy - Snappy native library loaded\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:05,977 [Thread-6] INFO  org.apache.pig.backend.hadoop.executionengine.util.MapRedUtil - Total input paths (combined) to process : 1\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:06,811 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - HadoopJobId: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:06,812 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - More information at: http://host3:50030/jobdetails.jsp?jobid=job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:17,380 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 50% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,432 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - 100% complete\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,434 [main] INFO  org.apache.pig.tools.pigstats.SimplePigStats - Script Statistics: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: HadoopVersion\tPigVersion\tUserId\tStartedAt\tFinishedAt\tFeatures\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 1.1.0.1\t0.10.0.1\tambari_qa\t2012-11-13 22:40:00\t2012-11-13 22:40:21\tUNKNOWN\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job Stats (time in seconds):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: JobId\tMaps\tReduces\tMaxMapTime\tMinMapTIme\tAvgMapTime\tMaxReduceTime\tMinReduceTime\tAvgReduceTime\tAlias\tFeature\tOutputs\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\t1\t0\t3\t3\t3\t0\t0\t0\tA,B\tMAP_ONLY\thdfs://host5:8020/user/ambari_qa/pigsmoke.out,\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Input(s):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Successfully read 36 records (2137 bytes) from: \"hdfs://host5:8020/user/ambari_qa/passwd\"\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Output(s):\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Successfully stored 36 records (236 bytes) in: \"hdfs://host5:8020/user/ambari_qa/pigsmoke.out\"\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Counters:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records written : 36\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total bytes written : 236\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Spillable Memory Manager spill count : 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total bags proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job DAG:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,446 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]/Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 32.06 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Total records proactively spilled: 0\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: Job DAG:\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: job_201211132238_0002\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: \u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: 2012-11-13 22:40:21,446 [main] INFO  org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher - Success!\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Exec[/tmp/pigSmoke.sh]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-pig::Pig::Service_check/Hdp-hadoop::Exec-hadoop[pig::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]/Exec[hadoop --config /etc/hadoop/conf fs -test -e pigsmoke.out]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 32.06 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 42,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "PIG_SERVICE_CHECK",
-        "start_time" : 1352864331815,
-        "stage_id" : 3
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/27",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Gmetad/Hdp::Exec[hdp-gmetad service]/Exec[hdp-gmetad service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.14 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPJobTracker]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPJobTracker -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPNameNode]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPNameNode -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPSlaves]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPSlaves -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[gmetad]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -t]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server/Hdp-ganglia::Config::Generate_server[HDPHBaseMaster]/Hdp::Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/Exec[/usr/libexec/hdp/ganglia/setupGanglia.sh -c HDPHBaseMaster -m]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-ganglia::Server::Gmetad/Hdp::Exec[hdp-gmetad service]/Exec[hdp-gmetad service]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 8.14 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host5",
-        "id" : 27,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "GANGLIA_SERVER",
-        "start_time" : 1352864089883,
-        "stage_id" : 1
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/37",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}df2d55356b238461af57fe22ad993e4d' to '{md5}62a467fcccda8169de563170e39e3419'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}1a3769d695902dba39b5645fef3766e0' to '{md5}23097908e8b54f7dbc4d31b5d26d21e7'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.66 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:134 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $ambari_db_server_host is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}df2d55356b238461af57fe22ad993e4d' to '{md5}62a467fcccda8169de563170e39e3419'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Tasktracker/Hdp-hadoop::Service[tasktracker]/Hdp::Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/Exec[su - mapred -c  '/usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start tasktracker']/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/mapred-site.xml]/content: content changed '{md5}1a3769d695902dba39b5645fef3766e0' to '{md5}23097908e8b54f7dbc4d31b5d26d21e7'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 5.66 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host4",
-        "id" : 37,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "START",
-        "role" : "TASKTRACKER",
-        "start_time" : 1352864269589,
-        "stage_id" : 2
-      }
-    },
-    {
-      "href" : "http://ambari:8080/api/clusters/mycluster/requests/2/tasks/38",
-      "Tasks" : {
-        "exit_code" : 0,
-        "stdout" : "\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: Safe mode is OFF\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 10.35 seconds\u001B[0m\n\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: Safe mode is OFF\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::check_safemode]/Hdp::Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/Exec[hadoop --config /etc/hadoop/conf dfsadmin -safemode get | grep OFF]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::create_file]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -rm /tmp/id280a7781_date381312; hadoop fs -put /etc/passwd /tmp/id280a7781_date381312]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Hdfs::Service_check/Hdp-hadoop::Exec-hadoop[hdfs::service_check::test]/Hdp::Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]/Exec[hadoop --config /etc/hadoop/conf fs -test -e /tmp/id280a7781_date381312]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 10.35 seconds\u001B[0m",
-        "status" : "COMPLETED",
-        "stderr" : "none",
-        "host_name" : "host2",
-        "id" : 38,
-        "cluster_name" : "mycluster",
-        "attempt_cnt" : 1,
-        "request_id" : 2,
-        "command" : "EXECUTE",
-        "role" : "HDFS_SERVICE_CHECK",
-        "start_time" : 1352864269616,
-        "stage_id" : 2
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/task_log.json b/branch-1.2/ambari-web/app/assets/data/wizard/deploy/task_log.json
deleted file mode 100644
index bcfda6b..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/deploy/task_log.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
-  "href": "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/1",
-  "Tasks": {
-    "exit_code": 0,
-    "stdout": "\u001B[0;36mnotice: /File[HDP]/ensure: defined content as '{md5}1b9bf482b0d9000dc118f861807e3768'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.05 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[HDP-UTILS]/ensure: defined content as '{md5}33b4d1cfff9651814935531c07f95619'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.04 seconds\u001B[0m\n\n\u001B[0;33mwarning: Unrecognised escape sequence '\\u' in file /tmp/ambari-agent/epel-1.pp at line 14\u001B[0m\n\u001B[0;33mwarning: Unrecognised escape sequence '\\u' in file /tmp/ambari-agent/epel-1.pp at line 14\u001B[0m\n\u001B[0;33mwarning: Unrecognised escape sequence '\\u' in file /tmp/ambari-agent/epel-1.pp at line 14\u001B[0m\n\u001B[0;36mnotice: /File[epel]/content: content changed '{md5}4cd77946d1b5176987036e8fb382ce2d' to '{md5}01ce1b3ac52d274854b80bb793b779fa'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 0.16 seconds\u001B[0m\n\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/init.pp:130 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:74 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;33mwarning: Dynamic lookup of $service_state at /var/lib/ambari-agent/puppet/modules/hdp-hadoop/manifests/service.pp:83 is deprecated.  Support will be removed in Puppet 2.8.  Use a fully-qualified variable name (e.g., $classname::variable) or parameterized classes.\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Iptables/Service[iptables]/ensure: ensure changed 'running' to 'stopped'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::User[ambari_qa]/User[ambari_qa]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::Package[glibc-rhel6]/Hdp::Package::Yum[glibc-rhel6]/Package[glibc.i686]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Create_smoke_user/Hdp::Exec[usermod -g  users  ambari_qa]/Exec[usermod -g  users  ambari_qa]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Set_selinux/Hdp::Exec[/bin/echo 0 > /selinux/enforce]/Exec[/bin/echo 0 > /selinux/enforce]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp-utils]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Package[net-snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Hdp::Package[snmp]/Hdp::Package::Yum[snmp]/Hdp::Java::Package[snmp]/Exec[mkdir -p /usr/jdk64 ; chmod +x /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin; cd /usr/jdk64 ; echo A | /tmp/HDP-artifacts//jdk-6u31-linux-x64.bin -noregister > /dev/null 2>&1 snmp]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java snmp]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/snmp//snmpd.conf]/content: content changed '{md5}8307434bc8ed4e2a7df4928fb4232778' to '{md5}f786955c0c36f7f5a4f375e3fe93c959'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]/ensure: ensure changed 'stopped' to 'running'\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snmp/Service[snmpd]: Triggered 'refresh' from 1 events\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Package[snappy]/Hdp::Package::Yum[snappy]/Package[snappy-devel]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[64]/Hdp::Exec[hdp::snappy::package::ln 64]/Exec[hdp::snappy::package::ln 64]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp::Snappy::Package/Hdp::Snappy::Package::Ln[32]/Hdp::Exec[hdp::snappy::package::ln 32]/Exec[hdp::snappy::package::ln 32]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Group[hadoop]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[1]/Hdp/Hdp::User[hadoop_deploy]/User[hadoop_deploy]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/Exec[mkdir -p /tmp/hadoop-hdfs/dfs/data]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp::Directory_recursive_create[/var/log/hadoop]/Hdp::Exec[mkdir -p /var/log/hadoop]/Exec[mkdir -p /var/log/hadoop]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-sbin]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-libhdfs]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-pipes]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/usr/jdk64/jdk1.6.0_31/bin/java hadoop 64]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /Stage[main]/Hdp-hadoop/Hdp-hadoop::Package[hadoop]/Hdp::Package[hadoop 64]/Hdp::Package::Yum[hadoop 64]/Package[hadoop-lzo-native]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/owner: owner changed 'root' to 'hadoop_deploy'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/core-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}683863be6291488bc08abbd97c7ff623'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/content: content changed '{md5}58885021e0700a18e824207296abb335' to '{md5}8a0bed492eb9ccb05bab8b86d36e1344'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-env.sh]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/content: content changed '{md5}98463b57c5de7290573af36f0738cd95' to '{md5}f467666fc6a0c1ad80c6fd2048b2aa75'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/log4j.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/content: content changed '{md5}0e24dd628517df6d84383a5098d2f602' to '{md5}ad35ea77bb42bd3f92aa809be896d259'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/taskcontroller.cfg]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hdfs-site.xml]/content: content changed '{md5}5248c973f1cd3c22fefd056024434bcb' to '{md5}692150e423bb94f2f0f388983e75b1dc'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Datanode::Create_data_dirs[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory_recursive_create[/tmp/hadoop-hdfs/dfs/data]/Hdp::Directory[/tmp/hadoop-hdfs/dfs/data]/File[/tmp/hadoop-hdfs/dfs/data]/mode: mode changed '0755' to '0750'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/health_check]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/content: content changed '{md5}cdd8f9ac4e75353f997a7cfb44e0ee9a' to '{md5}ea74c62fc5454962c15de62bcf9bfa5f'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/hadoop-metrics2.properties]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/commons-logging.properties]/ensure: created\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/content: content changed '{md5}b8419160170a41ae01abab13a3b887df' to '{md5}d20f9cdc4f9769b0c61ace4e267fa4bb'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 232.49 seconds\u001B[0m\n\n\u001B[0;36mnotice: /File[/etc/hadoop/conf/slaves]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/log/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/log/hadoop/hdfs]/Exec[mkdir -p /var/log/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/log/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: /Stage[2]/Hdp-hadoop::Datanode/Hdp-hadoop::Service[datanode]/Hdp::Directory_recursive_create[/var/run/hadoop/hdfs]/Hdp::Exec[mkdir -p /var/run/hadoop/hdfs]/Exec[mkdir -p /var/run/hadoop/hdfs]/returns: executed successfully\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/owner: owner changed 'root' to 'hdfs'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/group: group changed 'root' to 'hadoop'\u001B[0m\n\u001B[0;36mnotice: /File[/var/run/hadoop/hdfs]/seluser: seluser changed 'unconfined_u' to 'system_u'\u001B[0m\n\u001B[0;36mnotice: Finished catalog run in 232.49 seconds\u001B[0m",
-    "status": "COMPLETED",
-    "stderr": "none\nnone\nnone\nnone",
-    "host_name": "myhost",
-    "id": 1,
-    "cluster_name": "mycluster",
-    "attempt_cnt": 1,
-    "request_id": 1,
-    "command": "INSTALL",
-    "role": "DATANODE",
-    "start_time": 1352331312891,
-    "stage_id": 1
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version/1.2.0.json b/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version/1.2.0.json
deleted file mode 100644
index c91a038..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version/1.2.0.json
+++ /dev/null
@@ -1,347 +0,0 @@
-{
-  "name" : "HDP",
-  "version" : "1.2.0",
-  "repositories" : [ {
-    "baseUrl" : "http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/centos6",
-    "osType" : "centos6",
-    "repoId" : "HDP-1.2.0",
-    "repoName" : "HDP",
-    "mirrorsList" : null
-  }, {
-    "baseUrl" : null,
-    "osType" : "centos6",
-    "repoId" : "HDP-epel",
-    "repoName" : "HDP-epel",
-    "mirrorsList" : "http://mirrors.fedoraproject.org/mirrorlist?repo=epel-6&arch=$basearch"
-  }, {
-    "baseUrl" : "http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/centos5",
-    "osType" : "centos5",
-    "repoId" : "HDP-1.2.0",
-    "repoName" : "HDP",
-    "mirrorsList" : null
-  }, {
-    "baseUrl" : null,
-    "osType" : "centos5",
-    "repoId" : "HDP-epel",
-    "repoName" : "HDP-epel",
-    "mirrorsList" : "http://mirrors.fedoraproject.org/mirrorlist?repo=epel-5&arch=$basearch"
-  }, {
-    "baseUrl" : "http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/centos6",
-    "osType" : "redhat6",
-    "repoId" : "HDP-1.2.0",
-    "repoName" : "HDP",
-    "mirrorsList" : null
-  }, {
-    "baseUrl" : null,
-    "osType" : "redhat6",
-    "repoId" : "HDP-epel",
-    "repoName" : "HDP-epel",
-    "mirrorsList" : "http://mirrors.fedoraproject.org/mirrorlist?repo=epel-6&arch=$basearch"
-  }, {
-    "baseUrl" : "http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/centos5",
-    "osType" : "redhat5",
-    "repoId" : "HDP-1.2.0",
-    "repoName" : "HDP",
-    "mirrorsList" : null
-  }, {
-    "baseUrl" : null,
-    "osType" : "redhat5",
-    "repoId" : "HDP-epel",
-    "repoName" : "HDP-epel",
-    "mirrorsList" : "http://mirrors.fedoraproject.org/mirrorlist?repo=epel-5&arch=$basearch"
-  }, {
-    "baseUrl" : "http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/suse11",
-    "osType" : "suse11",
-    "repoId" : "HDP-1.2.0",
-    "repoName" : "HDP",
-    "mirrorsList" : null
-  }, {
-    "baseUrl" : "http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11",
-    "osType" : "suse11",
-    "repoId" : "HDP-UTILS-1.1.0.15",
-    "repoName" : "HDP-UTILS",
-    "mirrorsList" : null
-  } ],
-  "services" : [ {
-    "name" : "WEBHCAT",
-    "version" : "0.1.4.1-1",
-    "user" : "root",
-    "comment" : "This is comment for WEBHCAT service",
-    "components" : [ {
-      "name" : "WEBHCAT_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    } ],
-    "clientOnlyService" : false,
-    "clientComponent" : {
-      "name" : "WEBHCAT_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }
-  }, {
-    "name" : "SQOOP",
-    "version" : "1.4.2.1-1",
-    "user" : "root",
-    "comment" : "Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases",
-    "components" : [ {
-      "name" : "SQOOP",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientOnlyService" : true,
-    "clientComponent" : {
-      "name" : "SQOOP",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "NAGIOS",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "Nagios Monitoring and Alerting system",
-    "components" : [ {
-      "name" : "NAGIOS_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    } ],
-    "clientOnlyService" : false,
-    "clientComponent" : {
-      "name" : "NAGIOS_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }
-  }, {
-    "name" : "HDFS",
-    "version" : "1.1.0.1-1",
-    "user" : "root",
-    "comment" : "Apache Hadoop Distributed File System",
-    "components" : [ {
-      "name" : "NAMENODE",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "DATANODE",
-      "category" : "SLAVE",
-      "client" : false,
-      "master" : false
-    }, {
-      "name" : "SECONDARY_NAMENODE",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "HDFS_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientOnlyService" : false,
-    "clientComponent" : {
-      "name" : "HDFS_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "PIG",
-    "version" : "0.10.0.1-1",
-    "user" : "root",
-    "comment" : "Scripting platform for analyzing large datasets",
-    "components" : [ {
-      "name" : "PIG",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientOnlyService" : true,
-    "clientComponent" : {
-      "name" : "PIG",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "HBASE",
-    "version" : "0.94.2.1-1",
-    "user" : "mapred",
-    "comment" : "Non-relational distributed database and centralized service for configuration management & synchronization",
-    "components" : [ {
-      "name" : "HBASE_MASTER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "HBASE_REGIONSERVER",
-      "category" : "SLAVE",
-      "client" : false,
-      "master" : false
-    }, {
-      "name" : "HBASE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientOnlyService" : false,
-    "clientComponent" : {
-      "name" : "HBASE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "ZOOKEEPER",
-    "version" : "3.4.5.1-1",
-    "user" : "root",
-    "comment" : "This is comment for ZOOKEEPER service",
-    "components" : [ {
-      "name" : "ZOOKEEPER_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "ZOOKEEPER_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientOnlyService" : false,
-    "clientComponent" : {
-      "name" : "ZOOKEEPER_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "GANGLIA",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "Ganglia Metrics Collection system",
-    "components" : [ {
-      "name" : "GANGLIA_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "GANGLIA_MONITOR",
-      "category" : "SLAVE",
-      "client" : false,
-      "master" : false
-    }, {
-      "name" : "MONITOR_WEBSERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    } ],
-    "clientOnlyService" : false,
-    "clientComponent" : {
-      "name" : "GANGLIA_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }
-  }, {
-    "name" : "HCATALOG",
-    "version" : "0.4.0.1-1",
-    "user" : "root",
-    "comment" : "This is comment for HCATALOG service",
-    "components" : [ {
-      "name" : "HCAT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientOnlyService" : true,
-    "clientComponent" : {
-      "name" : "HCAT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "HIVE",
-    "version" : "0.9.0.1-1",
-    "user" : "root",
-    "comment" : "Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service",
-    "components" : [ {
-      "name" : "HIVE_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "MYSQL_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "HIVE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientOnlyService" : false,
-    "clientComponent" : {
-      "name" : "HIVE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "OOZIE",
-    "version" : "3.2.0.1-1",
-    "user" : "root",
-    "comment" : "System for workflow coordination and execution of Apache Hadoop jobs",
-    "components" : [ {
-      "name" : "OOZIE_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "OOZIE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientOnlyService" : false,
-    "clientComponent" : {
-      "name" : "OOZIE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "MAPREDUCE",
-    "version" : "1.1.0.1-1",
-    "user" : "mapred",
-    "comment" : "Apache Hadoop Distributed Processing Framework",
-    "components" : [ {
-      "name" : "JOBTRACKER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "TASKTRACKER",
-      "category" : "SLAVE",
-      "client" : false,
-      "master" : false
-    }, {
-      "name" : "MAPREDUCE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientOnlyService" : false,
-    "clientComponent" : {
-      "name" : "MAPREDUCE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  } ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version0.1.json b/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version0.1.json
deleted file mode 100644
index ca4439d..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version0.1.json
+++ /dev/null
@@ -1,306 +0,0 @@
-{
-  "name" : "HDP",
-  "version" : "0.1",
-  "repositories" : [ {
-    "baseUrl" : "http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6",
-    "osType" : "centos6",
-    "repoId" : "HDP-1.1.1.16",
-    "repoName" : "HDP",
-    "mirrorsList" : null
-  }, {
-    "baseUrl" : "http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6",
-    "osType" : "centos6",
-    "repoId" : "HDP-UTILS-1.1.0.15",
-    "repoName" : "HDP-UTILS",
-    "mirrorsList" : null
-  }, {
-    "baseUrl" : null,
-    "osType" : "centos6",
-    "repoId" : "epel",
-    "repoName" : "epel",
-    "mirrorsList" : "https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch"
-  }, {
-    "baseUrl" : "http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5",
-    "osType" : "centos5",
-    "repoId" : "HDP-1.1.1.16",
-    "repoName" : "HDP",
-    "mirrorsList" : null
-  }, {
-    "baseUrl" : "http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5",
-    "osType" : "centos5",
-    "repoId" : "HDP-UTILS-1.1.0.15",
-    "repoName" : "HDP-UTILS",
-    "mirrorsList" : null
-  }, {
-    "baseUrl" : null,
-    "osType" : "centos5",
-    "repoId" : "epel",
-    "repoName" : "epel",
-    "mirrorsList" : "https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch"
-  } ],
-  "services" : [ {
-    "name" : "WEBHCAT",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for WEBHCAT service",
-    "components" : [ {
-      "name" : "WEBHCAT_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    } ],
-    "clientComponent" : {
-      "name" : "WEBHCAT_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }
-  }, {
-    "name" : "SQOOP",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for SQOOP service",
-    "components" : [ {
-      "name" : "SQOOP",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "SQOOP",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "NAGIOS",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for NAGIOS service",
-    "components" : [ {
-      "name" : "NAGIOS_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    } ],
-    "clientComponent" : {
-      "name" : "NAGIOS_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }
-  }, {
-    "name" : "HDFS",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for HDFS service",
-    "components" : [ {
-      "name" : "NAMENODE",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "DATANODE",
-      "category" : "SLAVE",
-      "client" : false,
-      "master" : false
-    }, {
-      "name" : "SECONDARY_NAMENODE",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "HDFS_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "HDFS_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "PIG",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for PIG service",
-    "components" : [ {
-      "name" : "PIG",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "PIG",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "HBASE",
-    "version" : "1.0",
-    "user" : "mapred",
-    "comment" : "This is comment for HBASE service",
-    "components" : [ {
-      "name" : "HBASE_MASTER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "HBASE_REGIONSERVER",
-      "category" : "SLAVE",
-      "client" : false,
-      "master" : false
-    }, {
-      "name" : "HBASE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "HBASE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "ZOOKEEPER",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for ZOOKEEPER service",
-    "components" : [ {
-      "name" : "ZOOKEEPER_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "ZOOKEEPER_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "ZOOKEEPER_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "GANGLIA",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for GANGLIA service",
-    "components" : [ {
-      "name" : "GANGLIA_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "GANGLIA_MONITOR",
-      "category" : "SLAVE",
-      "client" : false,
-      "master" : false
-    }, {
-      "name" : "MONITOR_WEBSERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    } ],
-    "clientComponent" : {
-      "name" : "GANGLIA_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }
-  }, {
-    "name" : "HCATALOG",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for HCATALOG service",
-    "components" : [ {
-      "name" : "HCAT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "HCAT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "HIVE",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for HIVE service",
-    "components" : [ {
-      "name" : "HIVE_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "HIVE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "HIVE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "OOZIE",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for OOZIE service",
-    "components" : [ {
-      "name" : "OOZIE_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "OOZIE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "OOZIE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "MAPREDUCE",
-    "version" : "1.0",
-    "user" : "mapred",
-    "comment" : "This is comment for MAPREDUCE service",
-    "components" : [ {
-      "name" : "JOBTRACKER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "TASKTRACKER",
-      "category" : "SLAVE",
-      "client" : false,
-      "master" : false
-    }, {
-      "name" : "MAPREDUCE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "MAPREDUCE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  } ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/HBASE.json b/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/HBASE.json
deleted file mode 100644
index 008b71f..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/HBASE.json
+++ /dev/null
@@ -1,55 +0,0 @@
-{
-  "name" : "HBASE",
-  "version" : "0.94.2.1-1",
-  "user" : "mapred",
-  "comment" : "This is comment for HBASE service",
-  "properties" : [ {
-    "name" : "hbase.cluster.distributed",
-    "value" : "true",
-    "description" : "The mode the cluster will be in. Possible values are\n      false for standalone mode and true for distributed mode.  If\n      false, startup will run all HBase and ZooKeeper daemons together\n      in the one JVM.\n    ",
-    "filename" : "hbase-site.xml"
-  }, {
-    "name" : "hbase.master.lease.thread.wakefrequency",
-    "value" : "3000",
-    "description" : "The interval between checks for expired region server leases.\n    This value has been reduced due to the other reduced values above so that\n    the master will notice a dead region server sooner. The default is 15 seconds.\n    ",
-    "filename" : "hbase-site.xml"
-  }, {
-    "name" : "hbase.superuser",
-    "value" : "hbase",
-    "description" : "List of users or groups (comma-separated), who are allowed\n    full privileges, regardless of stored ACLs, across the cluster.\n    Only used when HBase security is enabled.\n    ",
-    "filename" : "hbase-site.xml"
-  }, {
-    "name" : "hbase.zookeeper.property.clientPort",
-    "value" : "2181",
-    "description" : "Property from ZooKeeper's config zoo.cfg.\n    The port at which the clients will connect.\n    ",
-    "filename" : "hbase-site.xml"
-  }, {
-    "name" : "hbase.regionserver.optionalcacheflushinterval",
-    "value" : "10000",
-    "description" : "\n      Amount of time to wait since the last time a region was flushed before\n      invoking an optional cache flush. Default 60,000.\n    ",
-    "filename" : "hbase-site.xml"
-  } ],
-  "components" : [ {
-    "name" : "HBASE_MASTER",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "HBASE_REGIONSERVER",
-    "category" : "SLAVE",
-    "client" : false,
-    "master" : false
-  }, {
-    "name" : "HBASE_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  } ],
-  "clientOnlyService" : false,
-  "clientComponent" : {
-    "name" : "HBASE_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/HCATALOG.json b/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/HCATALOG.json
deleted file mode 100644
index c6bb7b6..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/HCATALOG.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
-  "name" : "HCATALOG",
-  "version" : "0.4.0.1-1",
-  "user" : "root",
-  "comment" : "This is comment for HCATALOG service",
-  "properties" : [ ],
-  "components" : [ {
-    "name" : "HCAT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  } ],
-  "clientOnlyService" : true,
-  "clientComponent" : {
-    "name" : "HCAT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/HDFS.json b/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/HDFS.json
deleted file mode 100644
index ef101e0..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/HDFS.json
+++ /dev/null
@@ -1,200 +0,0 @@
-{
-  "name" : "HDFS",
-  "version" : "1.1.0.1-1",
-  "user" : "root",
-  "comment" : "This is comment for HDFS service",
-  "properties" : [ {
-    "name" : "dfs.replication.max",
-    "value" : "50",
-    "description" : "Maximal block replication.\n  ",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.heartbeat.interval",
-    "value" : "3",
-    "description" : "Determines datanode heartbeat interval in seconds.",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.safemode.threshold.pct",
-    "value" : "1.0f",
-    "description" : "\n        Specifies the percentage of blocks that should satisfy\n        the minimal replication requirement defined by dfs.replication.min.\n        Values less than or equal to 0 mean not to start in safe mode.\n        Values greater than 1 will make safe mode permanent.\n        ",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.balance.bandwidthPerSec",
-    "value" : "6250000",
-    "description" : "\n        Specifies the maximum amount of bandwidth that each datanode\n        can utilize for the balancing purpose in term of\n        the number of bytes per second.\n  ",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.block.size",
-    "value" : "134217728",
-    "description" : "The default block size for new files.",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.datanode.ipc.address",
-    "value" : "0.0.0.0:8010",
-    "description" : "\nThe datanode ipc server address and port.\nIf the port is 0 then the server will start on a free port.\n",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.blockreport.initialDelay",
-    "value" : "120",
-    "description" : "Delay for first block report in seconds.",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.datanode.du.pct",
-    "value" : "0.85f",
-    "description" : "When calculating remaining space, only use this percentage of the real available space\n",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.namenode.handler.count",
-    "value" : "40",
-    "description" : "The number of server threads for the namenode.",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.datanode.max.xcievers",
-    "value" : "1024",
-    "description" : "PRIVATE CONFIG VARIABLE",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.umaskmode",
-    "value" : "077",
-    "description" : "\nThe octal umask used when creating files and directories.\n",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.web.ugi",
-    "value" : "gopher,gopher",
-    "description" : "The user account used by the web interface.\nSyntax: USERNAME,GROUP1,GROUP2, ...\n",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.permissions",
-    "value" : "true",
-    "description" : "\nIf \"true\", enable permission checking in HDFS.\nIf \"false\", permission checking is turned off,\nbut all other behavior is unchanged.\nSwitching from one parameter value to the other does not change the mode,\nowner or group of files or directories.\n",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.permissions.supergroup",
-    "value" : "hdfs",
-    "description" : "The name of the group of super-users.",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.namenode.handler.count",
-    "value" : "100",
-    "description" : "Added to grow Queue size so that more client connections are allowed",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "ipc.server.max.response.size",
-    "value" : "5242880",
-    "description" : null,
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.block.access.token.enable",
-    "value" : "true",
-    "description" : "\nIf \"true\", access tokens are used as capabilities for accessing datanodes.\nIf \"false\", no access tokens are checked on accessing datanodes.\n",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.secondary.https.port",
-    "value" : "50490",
-    "description" : "The https port where secondary-namenode binds",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.https.port",
-    "value" : "50470",
-    "description" : "The https port where namenode binds",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.access.time.precision",
-    "value" : "0",
-    "description" : "The access time for HDFS file is precise upto this value.\n               The default value is 1 hour. Setting a value of 0 disables\n               access times for HDFS.\n  ",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.cluster.administrators",
-    "value" : " hdfs",
-    "description" : "ACL for who all can view the default servlets in the HDFS",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "ipc.server.read.threadpool.size",
-    "value" : "5",
-    "description" : null,
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "io.file.buffer.size",
-    "value" : "131072",
-    "description" : "The size of buffer for use in sequence files.\n  The size of this buffer should probably be a multiple of hardware\n  page size (4096 on Intel x86), and it determines how much data is\n  buffered during read and write operations.",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "io.serializations",
-    "value" : "org.apache.hadoop.io.serializer.WritableSerialization",
-    "description" : null,
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "io.compression.codec.lzo.class",
-    "value" : "com.hadoop.compression.lzo.LzoCodec",
-    "description" : "The implementation for lzo codec.",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "fs.trash.interval",
-    "value" : "360",
-    "description" : "Number of minutes between trash checkpoints.\n  If zero, the trash feature is disabled.\n  ",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "fs.checkpoint.edits.dir",
-    "value" : "${fs.checkpoint.dir}",
-    "description" : "Determines where on the local filesystem the DFS secondary\n        name node should store the temporary edits to merge.\n        If this is a comma-delimited list of directoires then teh edits is\n        replicated in all of the directoires for redundancy.\n        Default value is same as fs.checkpoint.dir\n    ",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "fs.checkpoint.period",
-    "value" : "21600",
-    "description" : "The number of seconds between two periodic checkpoints.\n  ",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "fs.checkpoint.size",
-    "value" : "536870912",
-    "description" : "The size of the current edit log (in bytes) that triggers\n       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.\n  ",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "ipc.client.idlethreshold",
-    "value" : "8000",
-    "description" : "Defines the threshold number of connections after which\n               connections will be inspected for idleness.\n  ",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "ipc.client.connection.maxidletime",
-    "value" : "30000",
-    "description" : "The maximum time after which a client will bring down the\n               connection to the server.\n  ",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "ipc.client.connect.max.retries",
-    "value" : "50",
-    "description" : "Defines the maximum number of retries for IPC connections.",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "webinterface.private.actions",
-    "value" : "false",
-    "description" : " If set to true, the web interfaces of JT and NN may contain\n                actions, such as kill job, delete file, etc., that should\n                not be exposed to public. Enable this option if the interfaces\n                are only reachable by those who have the right authorization.\n  ",
-    "filename" : "core-site.xml"
-  } ],
-  "components" : [ {
-    "name" : "NAMENODE",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "DATANODE",
-    "category" : "SLAVE",
-    "client" : false,
-    "master" : false
-  }, {
-    "name" : "SECONDARY_NAMENODE",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "HDFS_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  } ],
-  "clientOnlyService" : false,
-  "clientComponent" : {
-    "name" : "HDFS_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/HIVE.json b/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/HIVE.json
deleted file mode 100644
index 855a9f8..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/HIVE.json
+++ /dev/null
@@ -1,80 +0,0 @@
-{
-  "name" : "HIVE",
-  "version" : "0.9.0.1-1",
-  "user" : "root",
-  "comment" : "This is comment for HIVE service",
-  "properties" : [ {
-    "name" : "hive.metastore.local",
-    "value" : "false",
-    "description" : "controls whether to connect to remove metastore server or\n    open a new metastore server in Hive Client JVM",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "javax.jdo.option.ConnectionDriverName",
-    "value" : "com.mysql.jdbc.Driver",
-    "description" : "Driver class name for a JDBC metastore",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hive.metastore.warehouse.dir",
-    "value" : "/apps/hive/warehouse",
-    "description" : "location of default database for the warehouse",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hive.metastore.cache.pinobjtypes",
-    "value" : "Table,Database,Type,FieldSchema,Order",
-    "description" : "List of comma separated metastore object types that should be pinned in the cache",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hive.semantic.analyzer.factory.impl",
-    "value" : "org.apache.hivealog.cli.HCatSemanticAnalyzerFactory",
-    "description" : "controls which SemanticAnalyzerFactory implemenation class is used by CLI",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hadoop.clientside.fs.operations",
-    "value" : "true",
-    "description" : "FS operations are owned by client",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hive.metastore.client.socket.timeout",
-    "value" : "60",
-    "description" : "MetaStore Client socket timeout in seconds",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hive.metastore.execute.setugi",
-    "value" : "true",
-    "description" : "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hive.security.authorization.enabled",
-    "value" : "true",
-    "description" : "enable or disable the hive client authorization",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hive.security.authorization.manager",
-    "value" : "org.apache.hcatalog.security.HdfsAuthorizationProvider",
-    "description" : "the hive client authorization manager class name.\n    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  ",
-    "filename" : "hive-site.xml"
-  } ],
-  "components" : [ {
-    "name" : "HIVE_SERVER",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "MYSQL_SERVER",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "HIVE_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  } ],
-  "clientOnlyService" : false,
-  "clientComponent" : {
-    "name" : "HIVE_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/MAPREDUCE.json b/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/MAPREDUCE.json
deleted file mode 100644
index 93daacb..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/MAPREDUCE.json
+++ /dev/null
@@ -1,225 +0,0 @@
-{
-  "name" : "MAPREDUCE",
-  "version" : "1.1.0.1-1",
-  "user" : "mapred",
-  "comment" : "This is comment for MAPREDUCE service",
-  "properties" : [ {
-    "name" : "io.sort.record.percent",
-    "value" : ".2",
-    "description" : "No description",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "io.sort.factor",
-    "value" : "100",
-    "description" : "No description",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.tasktracker.tasks.sleeptime-before-sigkill",
-    "value" : "250",
-    "description" : "Normally, this is the amount of time before killing\n  processes, and the recommended-default is 5.000 seconds - a value of\n  5000 here.  In this case, we are using it solely to blast tasks before\n  killing them, and killing them very quickly (1/4 second) to guarantee\n  that we do not leave VMs around for later jobs.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.job.tracker.handler.count",
-    "value" : "50",
-    "description" : "\n    The number of server threads for the JobTracker. This should be roughly\n    4% of the number of tasktracker nodes.\n    ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapreduce.cluster.administrators",
-    "value" : " hadoop",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.reduce.parallel.copies",
-    "value" : "30",
-    "description" : "No description",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "tasktracker.http.threads",
-    "value" : "50",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.map.tasks.speculative.execution",
-    "value" : "false",
-    "description" : "If true, then multiple instances of some map tasks\n               may be executed in parallel.",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.reduce.tasks.speculative.execution",
-    "value" : "false",
-    "description" : "If true, then multiple instances of some reduce tasks\n               may be executed in parallel.",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.reduce.slowstart.completed.maps",
-    "value" : "0.05",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.inmem.merge.threshold",
-    "value" : "1000",
-    "description" : "The threshold, in terms of the number of files\n  for the in-memory merge process. When we accumulate threshold number of files\n  we initiate the in-memory merge and spill to disk. A value of 0 or less than\n  0 indicates we want to DON'T have any threshold and instead depend only on\n  the ramfs's memory consumption to trigger the merge.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.job.shuffle.merge.percent",
-    "value" : "0.66",
-    "description" : "The usage threshold at which an in-memory merge will be\n  initiated, expressed as a percentage of the total memory allocated to\n  storing in-memory map outputs, as defined by\n  mapred.job.shuffle.input.buffer.percent.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.job.shuffle.input.buffer.percent",
-    "value" : "0.7",
-    "description" : "The percentage of memory to be allocated from the maximum heap\n  size to storing map outputs during the shuffle.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.output.compression.type",
-    "value" : "BLOCK",
-    "description" : "If the job outputs are to compressed as SequenceFiles, how should\n               they be compressed? Should be one of NONE, RECORD or BLOCK.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.jobtracker.completeuserjobs.maximum",
-    "value" : "0",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.jobtracker.restart.recover",
-    "value" : "false",
-    "description" : "\"true\" to enable (job) recovery upon restart,\n               \"false\" to start afresh\n    ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.job.reduce.input.buffer.percent",
-    "value" : "0.0",
-    "description" : "The percentage of memory- relative to the maximum heap size- to\n  retain map outputs during the reduce. When the shuffle is concluded, any\n  remaining map outputs in memory must consume less than this threshold before\n  the reduce can begin.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapreduce.reduce.input.limit",
-    "value" : "10737418240",
-    "description" : "The limit on the input size of the reduce. (This value\n  is 10 Gb.)  If the estimated input size of the reduce is greater than\n  this value, job is failed. A value of -1 means that there is no limit\n  set. ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.task.timeout",
-    "value" : "600000",
-    "description" : "The number of milliseconds before a task will be\n  terminated if it neither reads an input, writes an output, nor\n  updates its status string.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "jetty.connector",
-    "value" : "org.mortbay.jetty.nio.SelectChannelConnector",
-    "description" : "No description",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.child.root.logger",
-    "value" : "INFO,TLA",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.max.tracker.blacklists",
-    "value" : "16",
-    "description" : "\n    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.healthChecker.interval",
-    "value" : "135000",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.healthChecker.script.timeout",
-    "value" : "60000",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.job.tracker.persist.jobstatus.active",
-    "value" : "false",
-    "description" : "Indicates if persistency of job status information is\n  active or not.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.job.tracker.persist.jobstatus.hours",
-    "value" : "1",
-    "description" : "The number of hours job status information is persisted in DFS.\n    The job status information will be available after it drops of the memory\n    queue and between jobtracker restarts. With a zero value the job status\n    information is not persisted at all in DFS.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.jobtracker.retirejob.check",
-    "value" : "10000",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.jobtracker.retirejob.interval",
-    "value" : "0",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.job.tracker.history.completed.location",
-    "value" : "/mapred/history/done",
-    "description" : "No description",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapreduce.fileoutputcommitter.marksuccessfuljobs",
-    "value" : "false",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.job.reuse.jvm.num.tasks",
-    "value" : "1",
-    "description" : "\n    How many tasks to run per jvm. If set to -1, there is no limit\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "hadoop.job.history.user.location",
-    "value" : "none",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapreduce.jobtracker.staging.root.dir",
-    "value" : "/user",
-    "description" : "The Path prefix for where the staging directories should be placed. The next level is always the user's\n   name. It is a path in the default file system.",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapreduce.tasktracker.group",
-    "value" : "hadoop",
-    "description" : "The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapreduce.jobtracker.split.metainfo.maxsize",
-    "value" : "50000000",
-    "description" : "If the size of the split metainfo file is larger than this, the JobTracker will fail the job during\n    initialize.\n   ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapreduce.history.server.embedded",
-    "value" : "false",
-    "description" : "Should job history server be embedded within Job tracker\nprocess",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.jobtracker.blacklist.fault-timeout-window",
-    "value" : "180",
-    "description" : "\n    3-hour sliding window (value is in minutes)\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.jobtracker.blacklist.fault-bucket-width",
-    "value" : "15",
-    "description" : "\n    15-minute bucket size (value is in minutes)\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.queue.names",
-    "value" : "default",
-    "description" : " Comma separated list of queues configured for this jobtracker.",
-    "filename" : "mapred-site.xml"
-  } ],
-  "components" : [ {
-    "name" : "JOBTRACKER",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "TASKTRACKER",
-    "category" : "SLAVE",
-    "client" : false,
-    "master" : false
-  }, {
-    "name" : "MAPREDUCE_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  } ],
-  "clientOnlyService" : false,
-  "clientComponent" : {
-    "name" : "MAPREDUCE_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/OOZIE.json b/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/OOZIE.json
deleted file mode 100644
index 566dacc..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/OOZIE.json
+++ /dev/null
@@ -1,640 +0,0 @@
-{
-  "name" : "OOZIE",
-  "version" : "3.2.0.1-1",
-  "user" : "root",
-  "comment" : "This is comment for OOZIE service",
-  "properties" : [ {
-    "name" : "oozie.base.url",
-    "value" : "http://localhost:8080/oozie",
-    "description" : "\n      Base Oozie URL.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.system.id",
-    "value" : "oozie-${user.name}",
-    "description" : "\n      The Oozie system ID.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.systemmode",
-    "value" : "NORMAL",
-    "description" : "\n      System mode for Oozie at startup.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.delete.runtime.dir.on.shutdown",
-    "value" : "true",
-    "description" : "\n      If the runtime directory should be kept after Oozie shutdowns down.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.services",
-    "value" : "\n      org.apache.oozie.service.SchedulerService,\n      org.apache.oozie.service.InstrumentationService,\n      org.apache.oozie.service.CallableQueueService,\n      org.apache.oozie.service.UUIDService,\n      org.apache.oozie.service.ELService,\n      org.apache.oozie.service.AuthorizationService,\n      org.apache.oozie.service.HadoopAccessorService,\n      org.apache.oozie.service.MemoryLocksService,\n      org.apache.oozie.service.DagXLogInfoService,\n      org.apache.oozie.service.SchemaService,\n      org.apache.oozie.service.LiteWorkflowAppService,\n      org.apache.oozie.service.JPAService,\n      org.apache.oozie.service.StoreService,\n      org.apache.oozie.service.CoordinatorStoreService,\n      org.apache.oozie.service.SLAStoreService,\n      org.apache.oozie.service.DBLiteWorkflowStoreService,\n      org.apache.oozie.service.CallbackService,\n      org.apache.oozie.service.ActionService,\n      org.apache.oozie.service.ActionCheckerService,\n      org.apache.oozie.service.RecoveryService,\n      org.apache.oozie.service.PurgeService,\n      org.apache.oozie.service.CoordinatorEngineService,\n      org.apache.oozie.service.BundleEngineService,\n      org.apache.oozie.service.DagEngineService,\n      org.apache.oozie.service.CoordMaterializeTriggerService,\n      org.apache.oozie.service.StatusTransitService,\n      org.apache.oozie.service.PauseTransitService,\n      org.apache.oozie.service.GroupsService,\n      org.apache.oozie.service.ProxyUserService\n    ",
-    "description" : "\n      All services to be created and managed by Oozie Services singleton.\n      Class names must be separated by commas.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ConfigurationService.ignore.system.properties",
-    "value" : "\n      oozie.service.AuthorizationService.security.enabled\n    ",
-    "description" : "\n      Specifies \"oozie.*\" properties to cannot be overriden via Java system properties.\n      Property names must be separted by commas.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.SchedulerService.threads",
-    "value" : "5",
-    "description" : "\n      The number of threads to be used by the SchedulerService to run deamon tasks.\n      If maxed out, scheduled daemon tasks will be queued up and delayed until threads become available.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.AuthorizationService.authorization.enabled",
-    "value" : "false",
-    "description" : "\n      Specifies whether security (user name/admin role) is enabled or not.\n      If disabled any user can manage Oozie system and manage any job.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.AuthorizationService.default.group.as.acl",
-    "value" : "false",
-    "description" : "\n      Enables old behavior where the User's default group is the job's ACL.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.InstrumentationService.logging.interval",
-    "value" : "60",
-    "description" : "\n      Interval, in seconds, at which instrumentation should be logged by the InstrumentationService.\n      If set to 0 it will not log instrumentation data.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.PurgeService.older.than",
-    "value" : "30",
-    "description" : "\n      Completed workflow jobs older than this value, in days, will be purged by the PurgeService.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.PurgeService.coord.older.than",
-    "value" : "7",
-    "description" : "\n      Completed coordinator jobs older than this value, in days, will be purged by the PurgeService.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.PurgeService.bundle.older.than",
-    "value" : "7",
-    "description" : "\n      Completed bundle jobs older than this value, in days, will be purged by the PurgeService.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.PurgeService.purge.limit",
-    "value" : "100",
-    "description" : "\n      Completed Actions purge - limit each purge to this value\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.PurgeService.purge.interval",
-    "value" : "3600",
-    "description" : "\n      Interval at which the purge service will run, in seconds.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.RecoveryService.wf.actions.older.than",
-    "value" : "120",
-    "description" : "\n      Age of the actions which are eligible to be queued for recovery, in seconds.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.RecoveryService.callable.batch.size",
-    "value" : "10",
-    "description" : "\n      This value determines the number of callable which will be batched together\n      to be executed by a single thread.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.RecoveryService.interval",
-    "value" : "60",
-    "description" : "\n      Interval at which the RecoverService will run, in seconds.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.RecoveryService.coord.older.than",
-    "value" : "600",
-    "description" : "\n      Age of the Coordinator jobs or actions which are eligible to be queued for recovery, in seconds.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.RecoveryService.bundle.older.than",
-    "value" : "600",
-    "description" : "\n      Age of the Bundle jobs which are eligible to be queued for recovery, in seconds.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.CallableQueueService.queue.size",
-    "value" : "10000",
-    "description" : "Max callable queue size",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.CallableQueueService.threads",
-    "value" : "10",
-    "description" : "Number of threads used for executing callables",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.CallableQueueService.callable.concurrency",
-    "value" : "3",
-    "description" : "\n      Maximum concurrency for a given callable type.\n      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).\n      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).\n      All commands that use action executors (action-start, action-end, action-kill and action-check) use\n      the action type as the callable type.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.CallableQueueService.callable.next.eligible",
-    "value" : "true",
-    "description" : "\n      If true, when a callable in the queue has already reached max concurrency,\n      Oozie continuously find next one which has not yet reach max concurrency.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.CallableQueueService.InterruptMapMaxSize",
-    "value" : "500",
-    "description" : "\n      Maximum Size of the Interrupt Map, the interrupt element will not be inserted in the map if exceeded the size.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.CallableQueueService.InterruptTypes",
-    "value" : "\n      kill,resume,suspend,bundle_kill,bundle_resume,bundle_suspend,coord_kill,coord_change,coord_resume,coord_suspend\n    ",
-    "description" : "\n      Getting the types of XCommands that are considered to be of Interrupt type\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.CoordMaterializeTriggerService.lookup.interval\n    ",
-    "value" : "300",
-    "description" : "Coordinator Job Lookup trigger command is scheduled at\n      this \"interval\" (in seconds).\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.CoordMaterializeTriggerService.materialization.window\n    ",
-    "value" : "3600",
-    "description" : "Coordinator Job Lookup command materialized each job for\n      this next \"window\" duration\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.CoordMaterializeTriggerService.callable.batch.size",
-    "value" : "10",
-    "description" : "\n      This value determines the number of callable which will be batched together\n      to be executed by a single thread.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.CoordMaterializeTriggerService.materialization.system.limit",
-    "value" : "50",
-    "description" : "\n      This value determines the number of coordinator jobs to be materialized at a given time.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.coord.normal.default.timeout\n    ",
-    "value" : "10080",
-    "description" : "Default timeout for a coordinator action input check (in minutes) for normal job.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.coord.default.max.timeout\n    ",
-    "value" : "86400",
-    "description" : "Default maximum timeout for a coordinator action input check (in minutes). 86400= 60days\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.coord.input.check.requeue.interval\n    ",
-    "value" : "60000",
-    "description" : "Command re-queue interval for coordinator data input check (in millisecond).\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.coord.default.concurrency\n    ",
-    "value" : "1",
-    "description" : "Default concurrency for a coordinator job to determine how many maximum action should\n      be executed at the same time. -1 means infinite concurrency.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.coord.default.throttle\n    ",
-    "value" : "12",
-    "description" : "Default throttle for a coordinator job to determine how many maximum action should\n      be in WAITING state at the same time.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.coord.materialization.throttling.factor\n    ",
-    "value" : "0.05",
-    "description" : "Determine how many maximum actions should be in WAITING state for a single job at any time. The value\n      is calculated by\n      this factor X the total queue size.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.groups",
-    "value" : "\n      workflow,wf-sla-submit,coord-job-submit-freq,coord-job-submit-nofuncs,coord-job-submit-data,coord-job-submit-instances,coord-sla-submit,coord-action-create,coord-action-create-inst,coord-sla-create,coord-action-start\n    ",
-    "description" : "List of groups for different ELServices",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.constants.workflow",
-    "value" : "\n      KB=org.apache.oozie.util.ELConstantsFunctions#KB,\n      MB=org.apache.oozie.util.ELConstantsFunctions#MB,\n      GB=org.apache.oozie.util.ELConstantsFunctions#GB,\n      TB=org.apache.oozie.util.ELConstantsFunctions#TB,\n      PB=org.apache.oozie.util.ELConstantsFunctions#PB,\n      RECORDS=org.apache.oozie.action.hadoop.HadoopELFunctions#RECORDS,\n      MAP_IN=org.apache.oozie.action.hadoop.HadoopELFunctions#MAP_IN,\n      MAP_OUT=org.apache.oozie.action.hadoop.HadoopELFunctions#MAP_OUT,\n      REDUCE_IN=org.apache.oozie.action.hadoop.HadoopELFunctions#REDUCE_IN,\n      REDUCE_OUT=org.apache.oozie.action.hadoop.HadoopELFunctions#REDUCE_OUT,\n      GROUPS=org.apache.oozie.action.hadoop.HadoopELFunctions#GROUPS\n    ",
-    "description" : "\n      EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.functions.workflow",
-    "value" : "\n      firstNotNull=org.apache.oozie.util.ELConstantsFunctions#firstNotNull,\n      concat=org.apache.oozie.util.ELConstantsFunctions#concat,\n      trim=org.apache.oozie.util.ELConstantsFunctions#trim,\n      timestamp=org.apache.oozie.util.ELConstantsFunctions#timestamp,\n      urlEncode=org.apache.oozie.util.ELConstantsFunctions#urlEncode,\n      toJsonStr=org.apache.oozie.util.ELConstantsFunctions#toJsonStr,\n      toPropertiesStr=org.apache.oozie.util.ELConstantsFunctions#toPropertiesStr,\n      toConfigurationStr=org.apache.oozie.util.ELConstantsFunctions#toConfigurationStr,\n      wf:id=org.apache.oozie.DagELFunctions#wf_id,\n      wf:name=org.apache.oozie.DagELFunctions#wf_name,\n      wf:appPath=org.apache.oozie.DagELFunctions#wf_appPath,\n      wf:conf=org.apache.oozie.DagELFunctions#wf_conf,\n      wf:user=org.apache.oozie.DagELFunctions#wf_user,\n      wf:group=org.apache.oozie.DagELFunctions#wf_group,\n      wf:callback=org.apache.oozie.DagELFunctions#wf_callback,\n      wf:transition=org.apache.oozie.DagELFunctions#wf_transition,\n      wf:lastErrorNode=org.apache.oozie.DagELFunctions#wf_lastErrorNode,\n      wf:errorCode=org.apache.oozie.DagELFunctions#wf_errorCode,\n      wf:errorMessage=org.apache.oozie.DagELFunctions#wf_errorMessage,\n      wf:run=org.apache.oozie.DagELFunctions#wf_run,\n      wf:actionData=org.apache.oozie.DagELFunctions#wf_actionData,\n      wf:actionExternalId=org.apache.oozie.DagELFunctions#wf_actionExternalId,\n      wf:actionTrackerUri=org.apache.oozie.DagELFunctions#wf_actionTrackerUri,\n      wf:actionExternalStatus=org.apache.oozie.DagELFunctions#wf_actionExternalStatus,\n      hadoop:counters=org.apache.oozie.action.hadoop.HadoopELFunctions#hadoop_counters,\n      fs:exists=org.apache.oozie.action.hadoop.FsELFunctions#fs_exists,\n      fs:isDir=org.apache.oozie.action.hadoop.FsELFunctions#fs_isDir,\n      fs:dirSize=org.apache.oozie.action.hadoop.FsELFunctions#fs_dirSize,\n      fs:fileSize=org.apache.oozie.action.hadoop.FsELFunctions#fs_fileSize,\n      fs:blockSize=org.apache.oozie.action.hadoop.FsELFunctions#fs_blockSize\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.WorkflowAppService.WorkflowDefinitionMaxLength",
-    "value" : "100000",
-    "description" : "\n      The maximum length of the workflow definition in bytes\n      An error will be reported if the length exceeds the given maximum\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.ext.functions.workflow",
-    "value" : "\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n      This property is a convenience property to add extensions to the built in executors without having to\n      include all the built in ones.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.constants.wf-sla-submit",
-    "value" : "\n      MINUTES=org.apache.oozie.util.ELConstantsFunctions#SUBMIT_MINUTES,\n      HOURS=org.apache.oozie.util.ELConstantsFunctions#SUBMIT_HOURS,\n      DAYS=org.apache.oozie.util.ELConstantsFunctions#SUBMIT_DAYS\n    ",
-    "description" : "\n      EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.ext.functions.wf-sla-submit",
-    "value" : "\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n      This property is a convenience property to add extensions to the built in executors without having to\n      include all the built in ones.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.functions.coord-job-submit-freq",
-    "value" : "\n      coord:days=org.apache.oozie.coord.CoordELFunctions#ph1_coord_days,\n      coord:months=org.apache.oozie.coord.CoordELFunctions#ph1_coord_months,\n      coord:hours=org.apache.oozie.coord.CoordELFunctions#ph1_coord_hours,\n      coord:minutes=org.apache.oozie.coord.CoordELFunctions#ph1_coord_minutes,\n      coord:endOfDays=org.apache.oozie.coord.CoordELFunctions#ph1_coord_endOfDays,\n      coord:endOfMonths=org.apache.oozie.coord.CoordELFunctions#ph1_coord_endOfMonths,\n      coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,\n      coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.ext.functions.coord-job-submit-freq",
-    "value" : "\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n      This property is a convenience property to add extensions to the built in executors without having to\n      include all the built in ones.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.constants.coord-job-submit-nofuncs",
-    "value" : "\n      MINUTE=org.apache.oozie.coord.CoordELConstants#SUBMIT_MINUTE,\n      HOUR=org.apache.oozie.coord.CoordELConstants#SUBMIT_HOUR,\n      DAY=org.apache.oozie.coord.CoordELConstants#SUBMIT_DAY,\n      MONTH=org.apache.oozie.coord.CoordELConstants#SUBMIT_MONTH,\n      YEAR=org.apache.oozie.coord.CoordELConstants#SUBMIT_YEAR\n    ",
-    "description" : "\n      EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.functions.coord-job-submit-nofuncs",
-    "value" : "\n      coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,\n      coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.functions.coord-job-submit-instances",
-    "value" : "\n      coord:hoursInDay=org.apache.oozie.coord.CoordELFunctions#ph1_coord_hoursInDay_echo,\n      coord:daysInMonth=org.apache.oozie.coord.CoordELFunctions#ph1_coord_daysInMonth_echo,\n      coord:tzOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_tzOffset_echo,\n      coord:current=org.apache.oozie.coord.CoordELFunctions#ph1_coord_current_echo,\n      coord:latest=org.apache.oozie.coord.CoordELFunctions#ph1_coord_latest_echo,\n      coord:future=org.apache.oozie.coord.CoordELFunctions#ph1_coord_future_echo,\n      coord:formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,\n      coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,\n      coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.ext.functions.coord-job-submit-instances",
-    "value" : "\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n      This property is a convenience property to add extensions to the built in executors without having to\n      include all the built in ones.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.functions.coord-job-submit-data",
-    "value" : "\n      coord:dataIn=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dataIn_echo,\n      coord:dataOut=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dataOut_echo,\n      coord:nominalTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap,\n      coord:actualTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_actualTime_echo_wrap,\n      coord:dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo,\n      coord:formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,\n      coord:actionId=org.apache.oozie.coord.CoordELFunctions#ph1_coord_actionId_echo,\n      coord:name=org.apache.oozie.coord.CoordELFunctions#ph1_coord_name_echo,\n      coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,\n      coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.ext.functions.coord-job-submit-data",
-    "value" : "\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n      This property is a convenience property to add extensions to the built in executors without having to\n      include all the built in ones.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.constants.coord-sla-submit",
-    "value" : "\n      MINUTES=org.apache.oozie.coord.CoordELConstants#SUBMIT_MINUTES,\n      HOURS=org.apache.oozie.coord.CoordELConstants#SUBMIT_HOURS,\n      DAYS=org.apache.oozie.coord.CoordELConstants#SUBMIT_DAYS\n    ",
-    "description" : "\n      EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.functions.coord-sla-submit",
-    "value" : "\n      coord:nominalTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed,\n      coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,\n      coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.ext.functions.coord-sla-submit",
-    "value" : "\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n      This property is a convenience property to add extensions to the built in executors without having to\n      include all the built in ones.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.constants.coord-action-create",
-    "value" : "\n    ",
-    "description" : "\n      EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.functions.coord-action-create",
-    "value" : "\n      coord:hoursInDay=org.apache.oozie.coord.CoordELFunctions#ph2_coord_hoursInDay,\n      coord:daysInMonth=org.apache.oozie.coord.CoordELFunctions#ph2_coord_daysInMonth,\n      coord:tzOffset=org.apache.oozie.coord.CoordELFunctions#ph2_coord_tzOffset,\n      coord:current=org.apache.oozie.coord.CoordELFunctions#ph2_coord_current,\n      coord:latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,\n      coord:future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,\n      coord:actionId=org.apache.oozie.coord.CoordELFunctions#ph2_coord_actionId,\n      coord:name=org.apache.oozie.coord.CoordELFunctions#ph2_coord_name,\n      coord:formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,\n      coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,\n      coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.ext.functions.coord-action-create",
-    "value" : "\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n      This property is a convenience property to add extensions to the built in executors without having to\n      include all the built in ones.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.constants.coord-action-create-inst",
-    "value" : "\n    ",
-    "description" : "\n      EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.functions.coord-action-create-inst",
-    "value" : "\n      coord:hoursInDay=org.apache.oozie.coord.CoordELFunctions#ph2_coord_hoursInDay,\n      coord:daysInMonth=org.apache.oozie.coord.CoordELFunctions#ph2_coord_daysInMonth,\n      coord:tzOffset=org.apache.oozie.coord.CoordELFunctions#ph2_coord_tzOffset,\n      coord:current=org.apache.oozie.coord.CoordELFunctions#ph2_coord_current_echo,\n      coord:latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,\n      coord:future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,\n      coord:formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,\n      coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,\n      coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.ext.functions.coord-action-create-inst",
-    "value" : "\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n      This property is a convenience property to add extensions to the built in executors without having to\n      include all the built in ones.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.ext.constants.coord-sla-create",
-    "value" : "\n      MINUTES=org.apache.oozie.coord.CoordELConstants#SUBMIT_MINUTES,\n      HOURS=org.apache.oozie.coord.CoordELConstants#SUBMIT_HOURS,\n      DAYS=org.apache.oozie.coord.CoordELConstants#SUBMIT_DAYS\n    ",
-    "description" : "\n      EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.\n      This property is a convenience property to add extensions to the built in executors without having to\n      include all the built in ones.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.functions.coord-sla-create",
-    "value" : "\n      coord:nominalTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime,\n      coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,\n      coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.ext.functions.coord-sla-create",
-    "value" : "\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n      This property is a convenience property to add extensions to the built in executors without having to\n      include all the built in ones.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.constants.coord-action-start",
-    "value" : "\n    ",
-    "description" : "\n      EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.functions.coord-action-start",
-    "value" : "\n      coord:hoursInDay=org.apache.oozie.coord.CoordELFunctions#ph3_coord_hoursInDay,\n      coord:daysInMonth=org.apache.oozie.coord.CoordELFunctions#ph3_coord_daysInMonth,\n      coord:tzOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_tzOffset,\n      coord:latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest,\n      coord:future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future,\n      coord:dataIn=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dataIn,\n      coord:dataOut=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dataOut,\n      coord:nominalTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime,\n      coord:actualTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_actualTime,\n      coord:dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset,\n      coord:formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime,\n      coord:actionId=org.apache.oozie.coord.CoordELFunctions#ph3_coord_actionId,\n      coord:name=org.apache.oozie.coord.CoordELFunctions#ph3_coord_name,\n      coord:conf=org.apache.oozie.coord.CoordELFunctions#coord_conf,\n      coord:user=org.apache.oozie.coord.CoordELFunctions#coord_user\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ELService.ext.functions.coord-action-start",
-    "value" : "\n    ",
-    "description" : "\n      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.\n      This property is a convenience property to add extensions to the built in executors without having to\n      include all the built in ones.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.UUIDService.generator",
-    "value" : "counter",
-    "description" : "\n      random : generated UUIDs will be random strings.\n      counter: generated UUIDs generated will be a counter postfixed with the system startup time.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.DBLiteWorkflowStoreService.status.metrics.collection.interval",
-    "value" : "5",
-    "description" : "Workflow Status metrics collection interval in minutes.",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.DBLiteWorkflowStoreService.status.metrics.window",
-    "value" : "3600",
-    "description" : "\n      Workflow Status metrics collection window in seconds. Workflow status will be instrumented for the window.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.db.schema.name",
-    "value" : "oozie",
-    "description" : "\n      Oozie DataBase Name\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.JPAService.create.db.schema",
-    "value" : "true",
-    "description" : "\n      Creates Oozie DB.\n\n      If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.\n      If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.JPAService.validate.db.connection",
-    "value" : "false",
-    "description" : "\n      Validates DB connections from the DB connection pool.\n      If the 'oozie.service.JPAService.create.db.schema' property is set to true, this property is ignored.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.JPAService.validate.db.connection.eviction.interval",
-    "value" : "300000",
-    "description" : "\n      Validates DB connections from the DB connection pool.\n      When validate db connection 'TestWhileIdle' is true, the number of milliseconds to sleep\n      between runs of the idle object evictor thread.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.JPAService.validate.db.connection.eviction.num",
-    "value" : "10",
-    "description" : "\n      Validates DB connections from the DB connection pool.\n      When validate db connection 'TestWhileIdle' is true, the number of objects to examine during\n      each run of the idle object evictor thread.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.JPAService.connection.data.source",
-    "value" : "org.apache.commons.dbcp.BasicDataSource",
-    "description" : "\n      DataSource to be used for connection pooling.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.JPAService.jdbc.driver",
-    "value" : "org.apache.derby.jdbc.EmbeddedDriver",
-    "description" : "\n      JDBC driver class.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.JPAService.jdbc.url",
-    "value" : "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true",
-    "description" : "\n      JDBC URL.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.JPAService.jdbc.username",
-    "value" : "sa",
-    "description" : "\n      DB user name.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.JPAService.pool.max.active.conn",
-    "value" : "10",
-    "description" : "\n      Max number of connections.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.SchemaService.wf.ext.schemas",
-    "value" : "oozie-sla-0.1.xsd",
-    "description" : "\n      Schemas for additional actions types.\n\n      IMPORTANT: if there are no schemas leave a 1 space string, the service trims the value,\n      if empty Configuration assumes it is NULL.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.SchemaService.coord.ext.schemas",
-    "value" : "oozie-sla-0.1.xsd",
-    "description" : "\n      Schemas for additional actions types.\n\n      IMPORTANT: if there are no schemas leave a 1 space string, the service trims the value,\n      if empty Configuration assumes it is NULL.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.CallbackService.base.url",
-    "value" : "${oozie.base.url}/callback",
-    "description" : "\n      Base callback URL used by ActionExecutors.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.servlet.CallbackServlet.max.data.len",
-    "value" : "2048",
-    "description" : "\n      Max size in characters for the action completion data output.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.external.stats.max.size",
-    "value" : "-1",
-    "description" : "\n      Max size in bytes for action stats. -1 means infinite value.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.JobCommand.job.console.url",
-    "value" : "${oozie.base.url}?job=",
-    "description" : "\n      Base console URL for a workflow job.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ActionService.executor.classes",
-    "value" : "\n      org.apache.oozie.action.decision.DecisionActionExecutor,\n      org.apache.oozie.action.hadoop.JavaActionExecutor,\n      org.apache.oozie.action.hadoop.FsActionExecutor,\n      org.apache.oozie.action.hadoop.MapReduceActionExecutor,\n      org.apache.oozie.action.hadoop.PigActionExecutor,\n      org.apache.oozie.action.ssh.SshActionExecutor,\n      org.apache.oozie.action.oozie.SubWorkflowActionExecutor\n    ",
-    "description" : "\n      List of ActionExecutors classes (separated by commas).\n      Only action types with associated executors can be used in workflows.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ActionCheckerService.action.check.interval",
-    "value" : "60",
-    "description" : "\n      The frequency at which the ActionCheckService will run.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ActionCheckerService.action.check.delay",
-    "value" : "600",
-    "description" : "\n      The time, in seconds, between an ActionCheck for the same action.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ActionCheckerService.callable.batch.size",
-    "value" : "10",
-    "description" : "\n      This value determines the number of actions which will be batched together\n      to be executed by a single thread.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.StatusTransitService.statusTransit.interval",
-    "value" : "60",
-    "description" : "\n      The frequency in seconds at which the StatusTransitService will run.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.StatusTransitService.backward.support.for.coord.status",
-    "value" : "false",
-    "description" : "\n      true, if coordinator job submits using 'uri:oozie:coordinator:0.1' namespace and wants to keep Oozie 2.x status\n      transit.\n      if set true,\n      1. SUCCEEDED state in coordinator job means materialization done.\n      2. No DONEWITHERROR state in coordinator job\n      3. No PAUSED or PREPPAUSED state in coordinator job\n      4. PREPSUSPENDED becomes SUSPENDED in coordinator job\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.PauseTransitService.PauseTransit.interval",
-    "value" : "60",
-    "description" : "\n      The frequency in seconds at which the PauseTransitService will run.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.action.retries.max",
-    "value" : "3",
-    "description" : "\n      The number of retries for executing an action in case of failure\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.action.hadoop.delete.hdfs.tmp.dir",
-    "value" : "false",
-    "description" : "\n      If set to true, it will delete temporary directory at the end of execution of map reduce action.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.action.pig.delete.hdfs.tmp.dir",
-    "value" : "false",
-    "description" : "\n      If set to true, it will delete temporary directory at the end of execution of pig action.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.action.ssh.delete.remote.tmp.dir",
-    "value" : "false",
-    "description" : "\n      If set to true, it will delete temporary directory at the end of execution of ssh action.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.action.ssh.http.command",
-    "value" : "curl",
-    "description" : "\n      Command to use for callback to oozie, normally is 'curl' or 'wget'.\n      The command must available in PATH environment variable of the USER@HOST box shell.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.action.ssh.http.command.post.options",
-    "value" : "--data-binary @#stdout --request POST --header \"content-type:text/plain\"",
-    "description" : "\n      The callback command POST options.\n      Used when the ouptut of the ssh action is captured.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.action.ssh.allow.user.at.host",
-    "value" : "true",
-    "description" : "\n      Specifies whether the user specified by the ssh action is allowed or is to be replaced\n      by the Job user\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.HadoopAccessorService.kerberos.enabled",
-    "value" : "false",
-    "description" : "\n      Indicates if Oozie is configured to use Kerberos.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "local.realm",
-    "value" : "LOCALHOST",
-    "description" : "\n      Kerberos Realm used by Oozie and Hadoop. Using 'local.realm' to be aligned with Hadoop configuration\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.HadoopAccessorService.keytab.file",
-    "value" : "${user.home}/oozie.keytab",
-    "description" : "\n      Location of the Oozie user keytab file.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.HadoopAccessorService.kerberos.principal",
-    "value" : "${user.name}/localhost@${local.realm}",
-    "description" : "\n      Kerberos principal for Oozie service.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.HadoopAccessorService.hadoop.configurations",
-    "value" : "*=/etc/hadoop/conf",
-    "description" : "\n      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of\n      the Hadoop service (JobTracker, YARN, HDFS). The wildcard '*' configuration is\n      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains\n      the relevant Hadoop *-site.xml files. If the path is relative is looked within\n      the Oozie configuration directory; though the path can be absolute (i.e. to point\n      to Hadoop client conf/ directories in the local filesystem.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.HadoopAccessorService.action.configurations",
-    "value" : "*=action-conf",
-    "description" : "\n      Comma separated AUTHORITY=ACTION_CONF_DIR, where AUTHORITY is the HOST:PORT of\n      the Hadoop MapReduce service (JobTracker, YARN). The wildcard '*' configuration is\n      used when there is no exact match for an authority. The ACTION_CONF_DIR may contain\n      ACTION.xml files where ACTION is the action type ('java', 'map-reduce', 'pig',\n      'hive', 'sqoop', etc.). If the ACTION.xml file exists, its properties will be used\n      as defaults properties for the action. If the path is relative is looked within\n      the Oozie configuration directory; though the path can be absolute (i.e. to point\n      to Hadoop client conf/ directories in the local filesystem.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.actions.main.classnames",
-    "value" : "distcp=org.apache.hadoop.tools.DistCp",
-    "description" : "\n      A list of class name mapping for Action classes\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.WorkflowAppService.system.libpath",
-    "value" : "/user/${user.name}/share/lib",
-    "description" : "\n      System library path to use for workflow applications.\n      This path is added to workflow application if their job properties sets\n      the property 'oozie.use.system.libpath' to true.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "use.system.libpath.for.mapreduce.and.pig.jobs",
-    "value" : "false",
-    "description" : "\n      If set to true, submissions of MapReduce and Pig jobs will include\n      automatically the system library path, thus not requiring users to\n      specify where the Pig JAR files are. Instead, the ones from the system\n      library path are used.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.command.default.lock.timeout",
-    "value" : "5000",
-    "description" : "\n      Default timeout (in milliseconds) for commands for acquiring an exclusive lock on an entity.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.LiteWorkflowStoreService.user.retry.max",
-    "value" : "3",
-    "description" : "\n      Automatic retry max count for workflow action is 3 in default.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.LiteWorkflowStoreService.user.retry.inteval",
-    "value" : "10",
-    "description" : "\n      Automatic retry interval for workflow action is in minutes and the default value is 10 minutes.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.LiteWorkflowStoreService.user.retry.error.code",
-    "value" : "JA008,JA009,JA017,JA018,JA019,FS009,FS008",
-    "description" : "\n      Automatic retry interval for workflow action is handled for these specified error code:\n      FS009, FS008 is file exists error when using chmod in fs action.\n      JA018 is output directory exists error in workflow map-reduce action.\n      JA019 is error while executing distcp action.\n      JA017 is job not exists error in action executor.\n      JA008 is FileNotFoundException in action executor.\n      JA009 is IOException in action executor.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.LiteWorkflowStoreService.node.def.version",
-    "value" : "_oozie_inst_v_1",
-    "description" : "\n      NodeDef default version, _oozie_inst_v_0 or _oozie_inst_v_1\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.authentication.type",
-    "value" : "simple",
-    "description" : "\n      Defines authentication used for Oozie HTTP endpoint.\n      Supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.authentication.token.validity",
-    "value" : "36000",
-    "description" : "\n      Indicates how long (in seconds) an authentication token is valid before it has\n      to be renewed.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.authentication.signature.secret",
-    "value" : "oozie",
-    "description" : "\n      The signature secret for signing the authentication tokens.\n      If not set a random secret is generated at startup time.\n      In order to authentiation to work correctly across multiple hosts\n      the secret must be the same across al the hosts.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.authentication.simple.anonymous.allowed",
-    "value" : "true",
-    "description" : "\n      Indicates if anonymous requests are allowed when using 'simple' authentication.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.authentication.kerberos.principal",
-    "value" : "HTTP/localhost@${local.realm}",
-    "description" : "\n      Indicates the Kerberos principal to be used for HTTP endpoint.\n      The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.authentication.kerberos.keytab",
-    "value" : "${oozie.service.HadoopAccessorService.keytab.file}",
-    "description" : "\n      Location of the keytab file with the credentials for the principal.\n      Referring to the same keytab file Oozie uses for its Kerberos credentials for Hadoop.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.authentication.kerberos.name.rules",
-    "value" : "DEFAULT",
-    "description" : "\n      The kerberos names rules is to resolve kerberos principal names, refer to Hadoop's\n      KerberosName for more details.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.coord.actions.default.length",
-    "value" : "1000",
-    "description" : "\n      Default number of coordinator actions to be retrieved by the info command\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.validate.ForkJoin",
-    "value" : "true",
-    "description" : "\n      If true, fork and join should be validated at wf submission time.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.coord.action.get.all.attributes",
-    "value" : "false",
-    "description" : "\n      Setting to true is not recommended as coord job/action info will bring all columns of the action in memory.\n      Set it true only if backward compatibility for action/job info is required.\n    ",
-    "filename" : "oozie-site.xml"
-  } ],
-  "components" : [ {
-    "name" : "OOZIE_SERVER",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "OOZIE_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  } ],
-  "clientOnlyService" : false,
-  "clientComponent" : {
-    "name" : "OOZIE_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/WEBHCAT.json b/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/WEBHCAT.json
deleted file mode 100644
index a292728..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/WEBHCAT.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
-  "name" : "WEBHCAT",
-  "version" : "0.1.4.1-1",
-  "user" : "root",
-  "comment" : "This is comment for WEBHCAT service",
-  "properties" : [ {
-    "name" : "templeton.port",
-    "value" : "50111",
-    "description" : "The HTTP port for the main server.",
-    "filename" : "webhcat-site.xml"
-  }, {
-    "name" : "templeton.storage.class",
-    "value" : "org.apache.hcatalog.templeton.tool.ZooKeeperStorage",
-    "description" : "The class to use as storage",
-    "filename" : "webhcat-site.xml"
-  }, {
-    "name" : "templeton.override.enabled",
-    "value" : "false",
-    "description" : "\n     Enable the override path in templeton.override.jars\n   ",
-    "filename" : "webhcat-site.xml"
-  }, {
-    "name" : "templeton.streaming.jar",
-    "value" : "hdfs:///apps/templeton/hadoop-streaming.jar",
-    "description" : "The hdfs path to the Hadoop streaming jar file.",
-    "filename" : "webhcat-site.xml"
-  } ],
-  "components" : [ {
-    "name" : "WEBHCAT_SERVER",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  } ],
-  "clientOnlyService" : false,
-  "clientComponent" : {
-    "name" : "WEBHCAT_SERVER",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/ZOOKEEPER.json b/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/ZOOKEEPER.json
deleted file mode 100644
index 6815994..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/stack/hdp/version01/ZOOKEEPER.json
+++ /dev/null
@@ -1,25 +0,0 @@
-{
-  "name" : "ZOOKEEPER",
-  "version" : "3.4.5.1-1",
-  "user" : "root",
-  "comment" : "This is comment for ZOOKEEPER service",
-  "properties" : [ ],
-  "components" : [ {
-    "name" : "ZOOKEEPER_SERVER",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "ZOOKEEPER_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  } ],
-  "clientOnlyService" : false,
-  "clientComponent" : {
-    "name" : "ZOOKEEPER_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/data/wizard/stack/stacks.json b/branch-1.2/ambari-web/app/assets/data/wizard/stack/stacks.json
deleted file mode 100644
index 8210b16..0000000
--- a/branch-1.2/ambari-web/app/assets/data/wizard/stack/stacks.json
+++ /dev/null
@@ -1,306 +0,0 @@
-[ {
-  "name" : "HDP",
-  "version" : "0.1",
-  "repositories" : [ {
-    "baseUrl" : "http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6",
-    "osType" : "centos6",
-    "repoId" : "HDP-1.1.1.16",
-    "repoName" : "HDP",
-    "mirrorsList" : null
-  }, {
-    "baseUrl" : "http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6",
-    "osType" : "centos6",
-    "repoId" : "HDP-UTILS-1.1.0.15",
-    "repoName" : "HDP-UTILS",
-    "mirrorsList" : null
-  }, {
-    "baseUrl" : null,
-    "osType" : "centos6",
-    "repoId" : "epel",
-    "repoName" : "epel",
-    "mirrorsList" : "https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch"
-  }, {
-    "baseUrl" : "http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5",
-    "osType" : "centos5",
-    "repoId" : "HDP-1.1.1.16",
-    "repoName" : "HDP",
-    "mirrorsList" : null
-  }, {
-    "baseUrl" : "http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5",
-    "osType" : "centos5",
-    "repoId" : "HDP-UTILS-1.1.0.15",
-    "repoName" : "HDP-UTILS",
-    "mirrorsList" : null
-  }, {
-    "baseUrl" : null,
-    "osType" : "centos5",
-    "repoId" : "epel",
-    "repoName" : "epel",
-    "mirrorsList" : "https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch"
-  } ],
-  "services" : [ {
-    "name" : "WEBHCAT",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for WEBHCAT service",
-    "components" : [ {
-      "name" : "WEBHCAT_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    } ],
-    "clientComponent" : {
-      "name" : "WEBHCAT_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }
-  }, {
-    "name" : "SQOOP",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for SQOOP service",
-    "components" : [ {
-      "name" : "SQOOP",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "SQOOP",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "NAGIOS",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for NAGIOS service",
-    "components" : [ {
-      "name" : "NAGIOS_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    } ],
-    "clientComponent" : {
-      "name" : "NAGIOS_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }
-  }, {
-    "name" : "HDFS",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for HDFS service",
-    "components" : [ {
-      "name" : "NAMENODE",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "DATANODE",
-      "category" : "SLAVE",
-      "client" : false,
-      "master" : false
-    }, {
-      "name" : "SECONDARY_NAMENODE",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "HDFS_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "HDFS_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "PIG",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for PIG service",
-    "components" : [ {
-      "name" : "PIG",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "PIG",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "HBASE",
-    "version" : "1.0",
-    "user" : "mapred",
-    "comment" : "This is comment for HBASE service",
-    "components" : [ {
-      "name" : "HBASE_MASTER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "HBASE_REGIONSERVER",
-      "category" : "SLAVE",
-      "client" : false,
-      "master" : false
-    }, {
-      "name" : "HBASE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "HBASE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "ZOOKEEPER",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for ZOOKEEPER service",
-    "components" : [ {
-      "name" : "ZOOKEEPER_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "ZOOKEEPER_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "ZOOKEEPER_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "GANGLIA",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for GANGLIA service",
-    "components" : [ {
-      "name" : "GANGLIA_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "GANGLIA_MONITOR",
-      "category" : "SLAVE",
-      "client" : false,
-      "master" : false
-    }, {
-      "name" : "MONITOR_WEBSERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    } ],
-    "clientComponent" : {
-      "name" : "GANGLIA_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }
-  }, {
-    "name" : "HCATALOG",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for HCATALOG service",
-    "components" : [ {
-      "name" : "HCAT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "HCAT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "HIVE",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for HIVE service",
-    "components" : [ {
-      "name" : "HIVE_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "HIVE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "HIVE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "OOZIE",
-    "version" : "1.0",
-    "user" : "root",
-    "comment" : "This is comment for OOZIE service",
-    "components" : [ {
-      "name" : "OOZIE_SERVER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "OOZIE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "OOZIE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  }, {
-    "name" : "MAPREDUCE",
-    "version" : "1.0",
-    "user" : "mapred",
-    "comment" : "This is comment for MAPREDUCE service",
-    "components" : [ {
-      "name" : "JOBTRACKER",
-      "category" : "MASTER",
-      "client" : false,
-      "master" : true
-    }, {
-      "name" : "TASKTRACKER",
-      "category" : "SLAVE",
-      "client" : false,
-      "master" : false
-    }, {
-      "name" : "MAPREDUCE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    } ],
-    "clientComponent" : {
-      "name" : "MAPREDUCE_CLIENT",
-      "category" : "CLIENT",
-      "client" : true,
-      "master" : false
-    }
-  } ]
-} ]
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/font/fontawesome-webfont.eot b/branch-1.2/ambari-web/app/assets/font/fontawesome-webfont.eot
deleted file mode 100644
index 89070c1..0000000
--- a/branch-1.2/ambari-web/app/assets/font/fontawesome-webfont.eot
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/font/fontawesome-webfont.svg b/branch-1.2/ambari-web/app/assets/font/fontawesome-webfont.svg
deleted file mode 100644
index 1245f92..0000000
--- a/branch-1.2/ambari-web/app/assets/font/fontawesome-webfont.svg
+++ /dev/null
@@ -1,255 +0,0 @@
-<?xml version="1.0" standalone="no"?>
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
-<svg xmlns="http://www.w3.org/2000/svg">
-<metadata></metadata>
-<defs>
-<font id="FontAwesomeRegular" horiz-adv-x="1843" >
-<font-face units-per-em="2048" ascent="1536" descent="-512" />
-<missing-glyph horiz-adv-x="512" />
-<glyph horiz-adv-x="0" />
-<glyph horiz-adv-x="0" />
-<glyph unicode="&#xd;" horiz-adv-x="512" />
-<glyph unicode=" "  horiz-adv-x="512" />
-<glyph unicode="&#x09;" horiz-adv-x="512" />
-<glyph unicode="&#xa0;" horiz-adv-x="512" />
-<glyph unicode="o" horiz-adv-x="1591" />
-<glyph unicode="&#xa8;" horiz-adv-x="2048" />
-<glyph unicode="&#xa9;" horiz-adv-x="2048" />
-<glyph unicode="&#xae;" horiz-adv-x="2048" />
-<glyph unicode="&#xb4;" horiz-adv-x="2048" />
-<glyph unicode="&#xc6;" horiz-adv-x="2048" />
-<glyph unicode="&#xcd;" horiz-adv-x="2048" />
-<glyph unicode="&#x2000;" horiz-adv-x="784" />
-<glyph unicode="&#x2001;" horiz-adv-x="1569" />
-<glyph unicode="&#x2002;" horiz-adv-x="784" />
-<glyph unicode="&#x2003;" horiz-adv-x="1569" />
-<glyph unicode="&#x2004;" horiz-adv-x="523" />
-<glyph unicode="&#x2005;" horiz-adv-x="392" />
-<glyph unicode="&#x2006;" horiz-adv-x="261" />
-<glyph unicode="&#x2007;" horiz-adv-x="261" />
-<glyph unicode="&#x2008;" horiz-adv-x="196" />
-<glyph unicode="&#x2009;" horiz-adv-x="313" />
-<glyph unicode="&#x200a;" horiz-adv-x="87" />
-<glyph unicode="&#x202f;" horiz-adv-x="313" />
-<glyph unicode="&#x203a;" horiz-adv-x="2048" />
-<glyph unicode="&#x205f;" horiz-adv-x="392" />
-<glyph unicode="&#x2122;" horiz-adv-x="2048" />
-<glyph unicode="&#x221e;" horiz-adv-x="2048" />
-<glyph unicode="&#xe000;" horiz-adv-x="1024" d="M0 0z" />
-<glyph unicode="&#xf000;" horiz-adv-x="1536" d="M6 1489q20 47 70 47h1382q51 0 72 -47q20 -47 -17 -84l-610 -610v-641h248q33 0 55.5 -22.5t22.5 -53.5q0 -33 -22.5 -55.5t-55.5 -22.5h-768q-31 0 -53.5 22.5t-22.5 55.5q0 31 22.5 53.5t53.5 22.5h250v641l-610 610q-37 37 -17 84z" />
-<glyph unicode="&#xf001;" horiz-adv-x="1488" d="M0 213q0 57 27.5 103t72.5 77t98.5 47.5t106.5 16.5q25 0 50.5 -4t50.5 -11v779q0 27 16 48t43 29q23 6 99.5 29t178 52.5t215 62.5t211 60.5t164 46t74.5 18.5q35 0 58.5 -23.5t23.5 -58.5v-1028q0 -59 -27.5 -104.5t-73 -76t-99.5 -47t-105 -16.5t-105.5 16.5t-98.5 47 t-71.5 75.5t-27.5 105q0 57 27.5 103t71.5 77t98.5 47t105.5 16q27 0 52.5 -4t49.5 -10v537l-678 -195v-815q0 -59 -27.5 -104.5t-71.5 -76t-98.5 -47t-105.5 -16.5q-53 0 -106.5 16.5t-98.5 47t-72.5 76t-27.5 104.5z" />
-<glyph unicode="&#xf002;" horiz-adv-x="1597" d="M0 901q0 137 52 258t143.5 212t212 143.5t258.5 52.5q137 0 257.5 -52.5t212 -143.5t143.5 -212t52 -258q0 -98 -28.5 -191.5t-81.5 -174.5l358 -359q18 -18 18 -47q0 -16 -18 -43t-45 -53.5t-53.5 -45t-42.5 -18.5q-29 0 -47 19l-359 358q-82 -53 -175 -81.5t-191 -28.5 q-137 0 -258 52t-212.5 143t-143.5 212t-52 258zM266 901q0 -84 32 -156.5t86 -126t127 -85t155 -31.5t154.5 31.5t126.5 85t86 126t32 156.5q0 82 -32 154.5t-86 127t-126.5 86t-154.5 31.5t-155 -31.5t-127 -86t-86 -127t-32 -154.5zM414 901q0 51 19.5 97t54 81t80 54.5 t98.5 19.5q20 0 34.5 -14.5t14.5 -36.5q0 -20 -14.5 -34.5t-34.5 -14.5q-63 0 -107.5 -44t-44.5 -108q0 -20 -14.5 -34.5t-34.5 -14.5q-23 0 -37 14.5t-14 34.5z" />
-<glyph unicode="&#xf003;" d="M0 115v1306q0 47 34 81t81 34h1614q47 0 80.5 -34t33.5 -81v-1306q0 -47 -33.5 -81t-80.5 -34h-1614q-47 0 -81 34t-34 81zM154 154h1536v852q-31 -31 -58 -50q-106 -80 -212.5 -159.5t-211.5 -163.5q-61 -49 -131.5 -94t-156.5 -45q-82 0 -153 45t-132 94 q-104 84 -211 164t-213 159q-27 18 -57 50v-852zM154 1317q0 -31 14 -65.5t35.5 -66.5t47 -59.5t50.5 -46.5q100 -76 199.5 -150.5t199.5 -152.5q20 -16 48 -37.5t58 -42t59.5 -35t54.5 -14.5h2h2q25 0 54.5 14.5t59 35t57 42t48.5 37.5q100 78 199.5 153t199.5 150 q25 18 50.5 46t47 60t36 66.5t14.5 65.5v65h-1536v-65z" />
-<glyph unicode="&#xf004;" horiz-adv-x="1802" d="M0 1073q0 113 34 205t97.5 155.5t153.5 98.5t202 35q59 0 117 -18.5t110 -48.5t99 -68.5t88 -77.5q39 39 87 77.5t100.5 68.5t109 48.5t115.5 18.5q113 0 204 -35t154.5 -98.5t97 -155.5t33.5 -205q0 -66 -18.5 -130t-51 -124.5t-74.5 -115t-87 -99.5l-615 -612 q-23 -23 -55 -23q-31 0 -57 23l-615 614q-45 45 -87 99.5t-73.5 114t-50 124t-18.5 129.5z" />
-<glyph unicode="&#xf005;" horiz-adv-x="1675" d="M1 959.5q9 27.5 54 33.5l506 74l227 459q20 41 49 41t50 -41l227 -459l506 -74q45 -6 54 -33.5t-23 -60.5l-367 -356l86 -504q8 -45 -15.5 -62.5t-64.5 5.5l-452 237l-453 -237q-41 -23 -64.5 -5.5t-15.5 62.5l86 504l-364 356q-35 33 -26 60.5z" />
-<glyph unicode="&#xf006;" horiz-adv-x="1675" d="M0 948q0 23 18.5 32t36.5 13l506 74l227 459q6 14 20 27.5t30 13.5q18 0 30.5 -13.5t18.5 -27.5l227 -459l506 -74q18 -4 36.5 -13t18.5 -32q0 -14 -7 -26.5t-17 -22.5l-367 -356l86 -504q0 -4 1 -9t1 -12q0 -20 -9 -34.5t-32 -14.5t-41 13l-452 237l-453 -237 q-18 -12 -39 -13q-23 0 -33 14.5t-10 34.5q0 6 1 11.5t1 9.5l86 504l-364 356q-10 10 -18.5 22.5t-8.5 26.5zM289 866l274 -268l-65 -377l340 178l340 -178l-66 377l274 268l-378 56l-170 344l-170 -344z" />
-<glyph unicode="&#xf007;" horiz-adv-x="1566" d="M0 57v387q0 37 18.5 82t48 86t65.5 74t71 43q18 6 66 13.5t102.5 14.5t104.5 13t77 10q-92 59 -144.5 153.5t-52.5 205.5q0 88 34 165.5t91.5 136t135 92.5t165.5 34t166 -34t136.5 -92.5t92 -136t33.5 -165.5q0 -109 -52 -204.5t-144 -154.5q27 -4 77 -10t104 -13 t101 -14.5t68 -13.5q35 -10 70.5 -42t65.5 -74t48.5 -87t18.5 -82v-387q-10 -4 -22.5 -14t-27 -19.5t-27.5 -16.5t-22 -7h-1370q-35 0 -53 21.5t-45 35.5z" />
-<glyph unicode="&#xf008;" d="M0 115v1306q0 47 34 81t81 34h1614q47 0 80.5 -34t33.5 -81v-1306q0 -47 -33.5 -81t-80.5 -34h-1614q-47 0 -81 34t-34 81zM115 154q0 -16 11 -27.5t28 -11.5h153q16 0 27.5 11t11.5 28v153q0 16 -11 27.5t-28 11.5h-153q-16 0 -27.5 -11t-11.5 -28v-153zM115 512 q0 -16 11 -27.5t28 -11.5h153q16 0 27.5 11.5t11.5 27.5v154q0 16 -11 27.5t-28 11.5h-153q-16 0 -27.5 -11.5t-11.5 -27.5v-154zM115 870q0 -16 11 -27.5t28 -11.5h153q16 0 27.5 11.5t11.5 27.5v154q0 16 -11 27.5t-28 11.5h-153q-16 0 -27.5 -11.5t-11.5 -27.5v-154z M115 1229q0 -16 11 -27.5t28 -11.5h153q16 0 27.5 11t11.5 28v153q0 16 -11 27.5t-28 11.5h-153q-16 0 -27.5 -11t-11.5 -28v-153zM461 154q0 -16 11 -27.5t28 -11.5h843q16 0 27.5 11t11.5 28v512q0 16 -11 27.5t-28 11.5h-843q-16 0 -27.5 -11.5t-11.5 -27.5v-512z M461 870q0 -16 11 -27.5t28 -11.5h843q16 0 27.5 11.5t11.5 27.5v512q0 16 -11 27.5t-28 11.5h-843q-16 0 -27.5 -11t-11.5 -28v-512zM1497 154q0 -16 11.5 -27.5t27.5 -11.5h154q16 0 27.5 11t11.5 28v153q0 16 -11.5 27.5t-27.5 11.5h-154q-16 0 -27.5 -11t-11.5 -28v-153 zM1497 512q0 -16 11.5 -27.5t27.5 -11.5h154q16 0 27.5 11.5t11.5 27.5v154q0 16 -11.5 27.5t-27.5 11.5h-154q-16 0 -27.5 -11.5t-11.5 -27.5v-154zM1497 870q0 -16 11.5 -27.5t27.5 -11.5h154q16 0 27.5 11.5t11.5 27.5v154q0 16 -11.5 27.5t-27.5 11.5h-154 q-16 0 -27.5 -11.5t-11.5 -27.5v-154zM1497 1229q0 -16 11.5 -27.5t27.5 -11.5h154q16 0 27.5 11t11.5 28v153q0 16 -11.5 27.5t-27.5 11.5h-154q-16 0 -27.5 -11t-11.5 -28v-153z" />
-<glyph unicode="&#xf009;" d="M0 78v536q0 33 22.5 55.5t55.5 22.5h690q33 0 55.5 -22.5t22.5 -55.5v-536q0 -33 -22.5 -55.5t-55.5 -22.5h-690q-33 0 -55.5 22.5t-22.5 55.5zM0 922v538q0 31 22.5 53.5t55.5 22.5h690q33 0 55.5 -22.5t22.5 -53.5v-538q0 -33 -22.5 -54.5t-55.5 -21.5h-690 q-33 0 -55.5 21.5t-22.5 54.5zM999 78v536q0 33 21.5 55.5t54.5 22.5h692q31 0 53.5 -22.5t22.5 -55.5v-536q0 -33 -22.5 -55.5t-53.5 -22.5h-692q-33 0 -54.5 22.5t-21.5 55.5zM999 922v538q0 31 21.5 53.5t54.5 22.5h692q31 0 53.5 -22.5t22.5 -53.5v-538 q0 -33 -22.5 -54.5t-53.5 -21.5h-692q-33 0 -54.5 21.5t-21.5 54.5z" />
-<glyph unicode="&#xf00a;" d="M0 78v270q0 33 22.5 54.5t55.5 21.5h358q31 0 53.5 -21.5t22.5 -54.5v-270q0 -33 -22.5 -55.5t-53.5 -22.5h-358q-33 0 -55.5 22.5t-22.5 55.5zM0 655v226q0 33 22.5 54t55.5 21h358q31 0 53.5 -21.5t22.5 -53.5v-226q0 -33 -22.5 -55t-53.5 -22h-358q-33 0 -55.5 22.5 t-22.5 54.5zM0 1188v272q0 31 22.5 53.5t55.5 22.5h358q31 0 53.5 -22.5t22.5 -53.5v-272q0 -33 -22.5 -55.5t-53.5 -22.5h-358q-33 0 -55.5 22.5t-22.5 55.5zM666 78v270q0 33 22.5 54.5t54.5 21.5h359q31 0 53.5 -21.5t22.5 -54.5v-270q0 -33 -22.5 -55.5t-53.5 -22.5 h-359q-33 0 -55 22.5t-22 55.5zM666 655v226q0 33 22.5 54t54.5 21h359q31 0 53.5 -21.5t22.5 -53.5v-226q0 -33 -22.5 -55t-53.5 -22h-359q-33 0 -55 22.5t-22 54.5zM666 1188v272q0 31 22.5 53.5t54.5 22.5h359q31 0 53.5 -22.5t22.5 -53.5v-272q0 -33 -22.5 -55.5 t-53.5 -22.5h-359q-33 0 -55 22.5t-22 55.5zM1331 78v270q0 33 22.5 54.5t55.5 21.5h358q31 0 53.5 -21.5t22.5 -54.5v-270q0 -33 -22.5 -55.5t-53.5 -22.5h-358q-33 0 -55.5 22.5t-22.5 55.5zM1331 655v226q0 33 22.5 54t55.5 21h358q31 0 53.5 -21.5t22.5 -53.5v-226 q0 -33 -22.5 -55t-53.5 -22h-358q-33 0 -55.5 22.5t-22.5 54.5zM1331 1188v272q0 31 22.5 53.5t55.5 22.5h358q31 0 53.5 -22.5t22.5 -53.5v-272q0 -33 -22.5 -55.5t-53.5 -22.5h-358q-33 0 -55.5 22.5t-22.5 55.5z" />
-<glyph unicode="&#xf00b;" d="M0 78v270q0 33 22.5 54.5t55.5 21.5h297q31 0 53.5 -21.5t22.5 -54.5v-270q0 -33 -22.5 -55.5t-53.5 -22.5h-297q-33 0 -55.5 22.5t-22.5 55.5zM0 655v226q0 33 22.5 54t55.5 21h297q31 0 53.5 -21.5t22.5 -53.5v-226q0 -33 -22.5 -55t-53.5 -22h-297q-33 0 -55.5 22.5 t-22.5 54.5zM0 1188v272q0 31 22.5 53.5t55.5 22.5h297q31 0 53.5 -22.5t22.5 -53.5v-272q0 -33 -22.5 -55.5t-53.5 -22.5h-297q-33 0 -55.5 22.5t-22.5 55.5zM604 78v270q0 33 22.5 54.5t55.5 21.5h1085q31 0 53.5 -21.5t22.5 -54.5v-270q0 -33 -22.5 -55.5t-53.5 -22.5 h-1085q-33 0 -55.5 22.5t-22.5 55.5zM604 655v226q0 33 22.5 54t55.5 21h1085q31 0 53.5 -21.5t22.5 -53.5v-226q0 -33 -22.5 -55t-53.5 -22h-1085q-33 0 -55.5 22.5t-22.5 54.5zM604 1188v272q0 31 22.5 53.5t55.5 22.5h1085q31 0 53.5 -22.5t22.5 -53.5v-272 q0 -33 -22.5 -55.5t-53.5 -22.5h-1085q-33 0 -55.5 22.5t-22.5 55.5z" />
-<glyph unicode="&#xf00c;" d="M0 732.5q0 33.5 23 55.5l174 175q23 23 56.5 22.5t55.5 -22.5l365 -365q23 -23 56.5 -23t55.5 23l746 745q23 23 56.5 23t56.5 -23l174 -174q23 -23 22.5 -56.5t-22.5 -55.5l-910 -910q-23 -23 -62.5 -39t-72.5 -16h-88q-35 0 -75 16.5t-62 38.5l-526 529 q-23 23 -23 56.5z" />
-<glyph unicode="&#xf00d;" horiz-adv-x="1536" d="M0 192.5q0 38.5 29 67.5l508 510l-508 500q-29 29 -29 67.5t29 67.5l100 100q29 29 68 29t67 -29l504 -504l504 504q29 29 67.5 29t67.5 -29l100 -100q29 -29 29 -68t-29 -67l-508 -510l508 -500q29 -29 29 -66.5t-29 -66.5l-100 -102q-29 -29 -68 -29t-67 29l-504 505 l-506 -505q-29 -29 -66.5 -29t-66.5 29l-100 100q-29 29 -29 67.5z" />
-<glyph unicode="&#xf00e;" horiz-adv-x="1597" d="M0 901q0 137 52 258t143.5 212t212 143.5t258.5 52.5q137 0 259 -52.5t212 -143.5t142 -212t52 -258q0 -102 -28.5 -195.5t-81.5 -170.5l358 -359q18 -18 18 -46t-18 -48l-94 -94q-20 -18 -48 -18.5t-46 18.5l-359 358q-78 -53 -171 -81.5t-195 -28.5q-137 0 -258 52 t-212.5 142t-143.5 211t-52 260zM266 901q0 -84 32 -156.5t86 -126t127 -85t155 -31.5t155.5 31.5t126.5 85t85 126t32 156.5q0 82 -32 154.5t-85 127t-126.5 86t-155.5 31.5t-155 -31.5t-127 -86t-86 -127t-32 -154.5zM399 868v66q0 33 33 33h168v168q0 33 33 32h65 q14 0 24.5 -9t10.5 -23v-168h166q33 0 33 -33v-66q0 -14 -9.5 -24t-23.5 -10h-166v-166q0 -14 -10 -23.5t-25 -9.5h-65q-33 0 -33 33v166h-168q-14 0 -23.5 10t-9.5 24z" />
-<glyph unicode="&#xf010;" horiz-adv-x="1597" d="M0 901q0 137 52 258t143.5 212t212 143.5t258.5 52.5q137 0 259 -52.5t212 -143.5t142 -212t52 -258q0 -102 -28.5 -195.5t-81.5 -170.5l358 -359q18 -18 18 -46t-18 -48l-94 -94q-20 -18 -48 -18.5t-46 18.5l-359 358q-78 -53 -171 -81.5t-195 -28.5q-137 0 -258 52 t-212.5 142t-143.5 211t-52 260zM266 901q0 -84 32 -156.5t86 -126t127 -85t155 -31.5t155.5 31.5t126.5 85t85 126t32 156.5q0 82 -32 154.5t-85 127t-126.5 86t-155.5 31.5t-155 -31.5t-127 -86t-86 -127t-32 -154.5zM399 868v66q0 33 33 33h467q33 0 33 -33v-66 q0 -14 -9.5 -24t-23.5 -10h-467q-14 0 -23.5 10t-9.5 24z" />
-<glyph unicode="&#xf011;" horiz-adv-x="1488" d="M0 713q0 186 86 349t240 267q12 10 28 6q18 -4 25 -16l90 -131q10 -12 6 -27.5t-16 -25.5q-106 -72 -164.5 -182.5t-58.5 -239.5q0 -104 39.5 -197.5t108.5 -162t162 -108.5t197 -40t197.5 40t163.5 108.5t109.5 161.5t39.5 198q0 129 -59 239.5t-164 182.5 q-14 10 -16 24q-4 16 6 29l88 131q10 12 25.5 15t29.5 -5q154 -104 240 -267t86 -349q0 -154 -58.5 -289t-160 -236.5t-237.5 -160t-290 -58.5t-289 58.5t-236 160t-159.5 236.5t-58.5 289zM627 793v704q0 16 11 27.5t28 11.5h157q16 0 27.5 -11.5t11.5 -27.5v-704 q0 -16 -11 -27.5t-28 -11.5h-157q-16 0 -27.5 11t-11.5 28z" />
-<glyph unicode="&#xf012;" d="M0 39v260q0 16 11.5 27.5t27.5 11.5h192q16 0 26.5 -11.5t10.5 -27.5v-260q0 -16 -10 -27.5t-27 -11.5h-192q-39 0 -39 39zM393 39v434q0 16 11.5 27.5t27.5 11.5h193q16 0 26.5 -11.5t10.5 -27.5v-434q0 -16 -10.5 -27.5t-26.5 -11.5h-193q-16 0 -27.5 11.5t-11.5 27.5z M786 39v676q0 16 11.5 27.5t27.5 11.5h193q16 0 27.5 -11.5t11.5 -27.5v-676q0 -16 -11.5 -27.5t-27.5 -11.5h-193q-16 0 -27.5 11.5t-11.5 27.5zM1182 39v995q0 16 10 27.5t27 11.5h192q16 0 27.5 -11t11.5 -28v-995q0 -16 -11.5 -27.5t-27.5 -11.5h-192q-16 0 -26.5 11.5 t-10.5 27.5zM1575 39v1458q0 39 39 39h190q39 0 39 -39v-1458q0 -39 -39 -39h-190q-39 0 -39 39z" />
-<glyph unicode="&#xf013;" horiz-adv-x="1593" d="M0 651v236q0 12 30.5 21.5t68.5 15.5t74 9t48 5q18 61 49 117q-55 82 -120 157l-7 15q0 8 28 38.5t62.5 65.5t66.5 62.5t40 27.5q2 0 26.5 -18.5t54.5 -41t56.5 -43t32.5 -24.5q29 16 58.5 26.5t60.5 20.5q0 12 3 49t9 75t15.5 69t21.5 31h237q14 0 19 -15 q12 -49 17 -103t14 -106q31 -8 59.5 -19t56.5 -28q8 6 34 26.5t55.5 43t53 40t29.5 17.5t37 -27.5t65 -62.5t61.5 -65.5t27.5 -38.5q0 -4 -17.5 -28.5t-39 -53.5t-42 -55.5t-24.5 -32.5q33 -55 51 -123q49 -10 103.5 -13t101.5 -20q16 -4 16 -18v-236q0 -12 -29.5 -21.5 t-68.5 -15.5t-76 -9t-49 -5q-14 -57 -47 -117q55 -82 121 -157l6 -15q0 -8 -27.5 -38.5t-62.5 -65.5t-66.5 -62.5t-40.5 -27.5q-2 0 -26.5 18.5t-54 41t-56 43t-33.5 24.5q-29 -16 -58.5 -27.5t-59.5 -19.5q-2 -12 -5.5 -49.5t-9.5 -76t-14 -69.5t-21 -31h-237q-14 0 -19 17 q-14 49 -19 103t-11 103q-61 18 -117 50q-41 -31 -81 -60.5t-79 -62.5l-12 -4q-6 0 -37 27.5t-64.5 62.5t-61 65.5t-27.5 38.5q0 2 16 26.5t37.5 53.5t42 55.5t26.5 34.5q-33 55 -51 123q-51 10 -104.5 13t-100.5 20q-16 4 -16 18zM557 768q0 -49 18.5 -93t51 -77t77 -52.5 t93.5 -19.5t93 19.5t75.5 52.5t51 77t19.5 93t-19.5 92t-51 76t-75.5 51.5t-93 18.5t-93.5 -18.5t-77 -51.5t-51 -76t-18.5 -92z" />
-<glyph unicode="&#xf014;" horiz-adv-x="1304" d="M0 1175.5v34.5v36t2 36q25 14 71 23.5t98 15.5t102.5 9t78.5 5q-8 82 11.5 128t59.5 68.5t98.5 29t131.5 6.5q55 0 109.5 -3t97.5 -20.5t68.5 -54.5t25.5 -105v-24t-2 -25q29 -2 79 -5t102.5 -9t99.5 -15.5t72 -23.5v-72v-69q-37 -20 -123 -32.5t-185.5 -19t-193.5 -7.5 t-150 -1q-55 0 -150 1t-194.5 7.5t-184.5 18.5t-122 33q-2 16 -2 34.5zM133 154v837q123 -16 244 -21t244 -5h32q129 2 258 6t258 20v-837q0 -63 -44 -108.5t-107 -45.5h-731q-63 0 -108.5 45t-45.5 109zM303 199q0 -16 10.5 -26.5t26.5 -10.5h39q16 0 27.5 10t11.5 27v614 q0 16 -11.5 27.5t-27.5 11.5h-39q-16 0 -26.5 -11.5t-10.5 -27.5v-614zM504 1384q0 -10 1 -22t3 -23q144 2 291 0q0 12 2 23.5t0 21.5v15q-35 10 -74 11t-74 1q-37 0 -75.5 -1t-73.5 -11v-15zM596 199q0 -16 10 -26.5t27 -10.5h39q16 0 27.5 10t11.5 27v614q0 16 -11.5 27.5 t-27.5 11.5h-39q-16 0 -26.5 -11.5t-10.5 -27.5v-614zM887 199q0 -16 11 -26.5t28 -10.5h39q16 0 27.5 10t11.5 27v614q0 16 -11.5 27.5t-27.5 11.5h-39q-16 0 -27.5 -11.5t-11.5 -27.5v-614z" />
-<glyph unicode="&#xf015;" horiz-adv-x="1880" d="M0 809.5q2 15.5 14 26.5l867 710q27 20 59 21q33 0 59 -21l240 -196v102q0 16 11.5 27.5t27.5 11.5h223q16 0 26.5 -11.5t10.5 -27.5v-348l328 -268q12 -10 14 -25.5t-8 -28.5l-45 -53q-10 -14 -29 -14h-65q-16 0 -25 8l-743 608q-25 20 -50 0l-743 -608q-8 -8 -25 -8 h-65q-18 0 -29 14l-45 53q-10 12 -8 27.5zM266 76v622l674 553l674 -553v-622q0 -33 -21.5 -54.5t-54.5 -21.5h-422v498h-352v-498h-422q-33 0 -54.5 21.5t-21.5 54.5z" />
-<glyph unicode="&#xf016;" horiz-adv-x="1228" d="M0 78v1382q0 31 22.5 53.5t55.5 22.5h614q33 0 71 -16.5t60 -38.5l351 -351q23 -23 39 -60.5t16 -70.5v-921q0 -33 -22.5 -55.5t-53.5 -22.5h-1075q-33 0 -55.5 22.5t-22.5 55.5zM154 154h921v692h-459q-31 0 -53 22.5t-22 55.5v458h-387v-1228zM268 326v116h693v-116 h-693zM268 596v115h693v-115h-693zM694 999h381q0 4 -4 13.5t-6 11.5l-350 348q-2 4 -9.5 6t-11.5 4v-383z" />
-<glyph unicode="&#xf017;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM240 768q0 -117 44 -218t119.5 -177t177 -120t218.5 -44t218 44t177 120t120 177 t44 218t-44 218t-120 177t-177.5 120t-217.5 44q-117 0 -218.5 -44t-177 -120t-119.5 -177t-44 -218zM635 608v465q0 33 23.5 56.5t56.5 23.5h80q35 0 57 -23.5t22 -56.5v-305h201q33 0 56.5 -23.5t23.5 -56.5v-80q0 -33 -23.5 -56.5t-56.5 -23.5h-360q-33 0 -56.5 23.5 t-23.5 56.5z" />
-<glyph unicode="&#xf018;" d="M2 35l594 1466q6 14 21.5 24.5t31.5 10.5h195l-8 -170h172l-9 170h195q16 0 31.5 -10t21.5 -25l594 -1466q6 -14 -1 -24.5t-23 -10.5h-740l-26 512h-258l-27 -512h-739q-16 0 -23.5 10t-1.5 25zM807 797h229l-20 413h-189z" />
-<glyph unicode="&#xf019;" d="M0 39v614q0 16 11.5 27.5t27.5 11.5h229q16 0 27.5 -11t11.5 -28v-346h1229v346q0 16 11.5 27.5t27.5 11.5h229q16 0 27.5 -11t11.5 -28v-614q0 -39 -39 -39h-1765q-39 0 -39 39zM345.5 944.5q6.5 16.5 39.5 16.5h307v499q0 31 21.5 53.5t54.5 22.5h307q33 0 55.5 -22.5 t22.5 -53.5v-499h307q31 0 37 -16.5t-16 -39.5l-504 -506q-23 -23 -55.5 -22.5t-55.5 22.5l-504 506q-23 23 -16.5 39.5z" />
-<glyph unicode="&#xf01a;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM240 768q0 -117 44 -218t119.5 -177t177 -120t218.5 -44t218 44t177 120t120 177 t44 218t-44 218t-120 177t-177.5 120t-217.5 44q-117 0 -218.5 -44t-177 -120t-119.5 -177t-44 -218zM414 743q10 25 37 25h208v358q0 16 11.5 28.5t27.5 12.5h201q16 0 27.5 -12t11.5 -29v-358h209q27 0 37 -25t-10 -43l-347 -346q-14 -10 -28 -10t-29 10l-346 346 q-20 18 -10 43z" />
-<glyph unicode="&#xf01b;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM240 768q0 -117 44 -218t119.5 -177t177 -120t218.5 -44t218 44t177 120t120 177 t44 218t-44 218t-120 177t-177 120t-218 44t-218.5 -44t-177 -120t-119.5 -177t-44 -218zM414 793q-10 25 10 43l346 346q14 10 29 10q14 0 28 -10l347 -346q20 -18 10 -43t-37 -25h-209v-360q0 -16 -11.5 -27.5t-27.5 -11.5h-201q-16 0 -27.5 11t-11.5 28v360h-208 q-27 0 -37 25z" />
-<glyph unicode="&#xf01c;" d="M0 78v577q0 33 9 76t22 72l284 663q12 29 44 49.5t63 20.5h999q31 0 63 -20.5t44 -49.5l284 -663q12 -29 21.5 -72t9.5 -76v-577q0 -33 -22.5 -55.5t-53.5 -22.5h-1689q-33 0 -55.5 22.5t-22.5 55.5zM238 694h387l114 -231h383l117 231h367q-2 4 -2 9.5t-2 9.5l-256 594 h-848l-256 -596q-2 -2 -2 -7.5t-2 -9.5z" />
-<glyph unicode="&#xf01d;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM240 768q0 -117 44 -218t119.5 -177t177 -120t218.5 -44t218 44t177 120t120 177 t44 218t-44 218t-120 177t-177.5 120t-217.5 44q-117 0 -218.5 -44t-177 -120t-119.5 -177t-44 -218zM582 453v628q0 18 16 29q20 8 31 0l545 -315q16 -6 16 -27q0 -20 -16 -27l-545 -315q-8 -4 -15 -4q-8 0 -16 4q-16 10 -16 27z" />
-<glyph unicode="&#xf01e;" horiz-adv-x="1591" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5q141 0 271 -48.5t239 -140.5l161 162q35 35 58.5 24.5t23.5 -57.5v-463q0 -33 -22 -55q-10 -10 -23.5 -16t-29.5 -6h-463q-47 0 -58.5 23.5t23.5 58.5l160 159q-72 57 -159 88t-181 31q-117 0 -218.5 -44t-177 -120 t-119.5 -177t-44 -218t44 -218t119.5 -177t177 -120t218.5 -44q104 0 198.5 37t169 101.5t123.5 153.5t64 191q0 16 14 27q14 10 31 8l157 -20q16 -4 26.5 -16.5t8.5 -28.5q-20 -147 -89.5 -274.5t-176 -220.5t-242 -145.5t-284.5 -52.5q-166 0 -311.5 62.5t-254 171 t-171 254t-62.5 311.5z" />
-<glyph unicode="&#xf021;" horiz-adv-x="1916" d="M6 311l150 437q10 33 39 45t59 4l438 -150q45 -16 46 -40.5t-42 -47.5l-202 -100q53 -78 123.5 -134.5t160.5 -86.5q111 -37 221.5 -28t207 56t170 129t110.5 193q6 16 20.5 22t30.5 2l152 -51q16 -6 23 -20.5t1 -30.5q-53 -158 -159.5 -274.5t-243.5 -184t-296 -81 t-315 39.5q-135 47 -241.5 134t-179.5 208l-205 -100q-43 -23 -63.5 -5.5t-4.5 64.5zM203 1024q53 156 159.5 273.5t243.5 185t295 81t316 -39.5q133 -47 240.5 -134t180.5 -208l205 100q43 23 63.5 5.5t4.5 -62.5l-150 -439q-10 -33 -39 -45t-59 -4l-438 150 q-45 16 -46 40.5t40 47.5l202 100q-51 78 -122.5 134.5t-159.5 86.5q-111 37 -221.5 28t-207 -56t-170 -129t-110.5 -193q-6 -16 -20.5 -22t-30.5 -2l-152 51q-16 6 -23 20.5t-1 30.5z" />
-<glyph unicode="&#xf022;" d="M0 115v1306q0 47 34 81t81 34h1614q47 0 80.5 -34t33.5 -81v-1306q0 -47 -33.5 -81t-80.5 -34h-1614q-47 0 -81 34t-34 81zM154 154h1536v1075h-1536v-1075zM307 346v76q0 39 39 39h154q16 0 27.5 -11.5t11.5 -27.5v-76q0 -16 -11.5 -27.5t-27.5 -11.5h-154q-39 0 -39 39 zM307 653v76q0 39 39 39h154q16 0 27.5 -11.5t11.5 -27.5v-76q0 -16 -11.5 -27.5t-27.5 -11.5h-154q-39 0 -39 39zM307 961v75q0 39 39 39h154q16 0 27.5 -11t11.5 -28v-75q0 -16 -11.5 -27.5t-27.5 -11.5h-154q-39 0 -39 39zM692 346v76q0 16 10.5 27.5t26.5 11.5h768 q39 0 39 -39v-76q0 -39 -39 -39h-768q-16 0 -26.5 11.5t-10.5 27.5zM692 653v76q0 16 10.5 27.5t26.5 11.5h768q39 0 39 -39v-76q0 -39 -39 -39h-768q-16 0 -26.5 11.5t-10.5 27.5zM692 961v75q0 16 10.5 27.5t26.5 11.5h768q39 0 39 -39v-75q0 -39 -39 -39h-768 q-16 0 -26.5 11t-10.5 28z" />
-<glyph unicode="&#xf023;" horiz-adv-x="1253" d="M0 117v626q0 39 21.5 69t56.5 42v164q0 113 43 213t117.5 175t175 118t213.5 43t213 -43t175 -118t118 -175.5t43 -212.5v-164q35 -12 56 -42t21 -69v-626q0 -47 -34.5 -82t-81.5 -35h-1020q-47 0 -82 35t-35 82zM313 862h627v156q0 66 -24.5 123t-67.5 99t-100.5 66.5 t-120.5 24.5q-66 0 -122 -24.5t-99.5 -66.5t-68 -99.5t-24.5 -122.5v-156zM494 164h266l-66 285q29 18 47.5 48.5t18.5 65.5q0 55 -39 95t-94 40t-94 -40t-39 -95q0 -35 18 -65.5t47 -46.5z" />
-<glyph unicode="&#xf024;" d="M0 1382q0 63 45 108.5t109 45.5q63 0 108 -45.5t45 -108.5q0 -41 -20.5 -74.5t-55.5 -56.5v-1212q0 -16 -11 -27.5t-27 -11.5h-78q-16 0 -26.5 11.5t-10.5 27.5v1212q-35 23 -56.5 56.5t-21.5 74.5zM307 416v745q0 33 19.5 66t48.5 49q104 55 188 86t144 45q70 16 124 18 q68 0 124.5 -11t107.5 -29.5t99 -43t100 -51.5q63 -29 145 -33q70 -4 164 15.5t207 87.5q29 16 47 6t18 -43v-748q0 -31 -18.5 -64.5t-46.5 -49.5q-113 -68 -207 -87.5t-164 -15.5q-82 4 -145 33q-51 27 -99.5 51.5t-99.5 43t-107.5 29.5t-124.5 11q-55 -2 -124 -18 q-59 -14 -143.5 -45t-188.5 -88q-29 -16 -48.5 -4t-19.5 45z" />
-<glyph unicode="&#xf025;" horiz-adv-x="1916" d="M0 729q0 172 85 324.5t221 266.5t307 180.5t345 66.5t345.5 -66.5t307.5 -180.5t221 -266.5t85 -326.5q0 -182 -78 -350l-27 -60l-174 -26q-27 -104 -110.5 -173t-196.5 -69v-41q0 -16 -12 -27.5t-29 -11.5h-80q-16 0 -27.5 11.5t-11.5 27.5v719q0 16 11.5 28.5 t27.5 12.5h80q16 0 28.5 -12.5t12.5 -28.5v-39q88 0 161 -44t114 -116l39 7q33 90 32 192q0 125 -65.5 233.5t-170 190.5t-232.5 129t-251 47t-250.5 -47t-232 -128t-170 -190.5t-65.5 -232.5q0 -104 32 -194l39 -7q41 72 114 116t161 44v39q0 16 12 28.5t29 12.5h80 q16 0 27 -12.5t11 -28.5v-719q0 -16 -11 -27.5t-27 -11.5h-80q-16 0 -28.5 11.5t-12.5 27.5v41q-55 0 -106.5 18.5t-91.5 50.5t-68.5 76t-40.5 97l-175 26l-26 60q-78 168 -78 352z" />
-<glyph unicode="&#xf026;" horiz-adv-x="905" d="M0 578v380q0 16 11.5 27.5t27.5 11.5h420l325 326q49 51 84 37t35 -86v-1012q0 -72 -34.5 -86t-84.5 37l-325 326h-420q-16 0 -27.5 11t-11.5 28z" />
-<glyph unicode="&#xf027;" horiz-adv-x="1277" d="M0 578v380q0 16 11.5 27.5t27.5 11.5h420l325 326q49 51 84 37t35 -86v-1012q0 -72 -34.5 -86t-84.5 37l-325 326h-420q-16 0 -27.5 11t-11.5 28zM1027 406q-9 32 7 61q84 145 84 301t-84 301q-16 29 -7 61t38 48t60.5 8t48.5 -37q104 -182 104 -381q0 -201 -104 -381 q-23 -41 -70 -41q-20 0 -39 12q-29 16 -38 48z" />
-<glyph unicode="&#xf028;" horiz-adv-x="1916" d="M0 578v380q0 16 11.5 27.5t27.5 11.5h420l325 326q49 51 84 37t35 -86v-1012q0 -72 -34.5 -86t-84.5 37l-325 326h-420q-16 0 -27.5 11t-11.5 28zM1027 406q-9 32 7 61q84 145 84 301t-84 301q-16 29 -7 61t38 48t60.5 8t48.5 -37q104 -182 104 -381q0 -201 -104 -381 q-23 -41 -70 -41q-20 0 -39 12q-29 16 -38 48zM1285 219q-7 33 11 62q141 225 142 487q0 262 -142 487q-18 29 -11 62t36 49q29 18 61 11t50 -36q82 -131 123.5 -275t41.5 -298q0 -309 -167 -573q-10 -18 -29 -27.5t-37 -9.5q-25 0 -43 12q-29 16 -36 49zM1540 33 q-6 33 12 59q100 154 152.5 325t52.5 351t-52 351t-153 323q-18 29 -12 61.5t35 50.5q29 16 61 10.5t50 -32.5q115 -174 173 -366.5t58 -397.5t-58.5 -397.5t-172.5 -364.5q-10 -18 -29 -27.5t-37 -9.5q-25 0 -45 13q-29 18 -35 51z" />
-<glyph unicode="&#xf029;" horiz-adv-x="1536" d="M0 0v698h698v-698h-698zM0 838v698h698v-698h-698zM139 139h420v420h-420v-420zM139 977h420v420h-420v-420zM279 279v141h141v-141h-141zM279 1116v139h141v-139h-141zM838 0v698h417v-139h142v139h139v-419h-420v139h-139v-418h-139zM838 838v698h698v-698h-698z M977 977h420v420h-420v-420zM1116 0v139h139v-139h-139zM1116 1116v139h139v-139h-139zM1397 0v139h139v-139h-139z" />
-<glyph unicode="&#xf02a;" d="M0 0v1536h154v-1536h-154zM227 0v1536h37v-1536h-37zM356 0v1536h117v-1536h-117zM545 0v1536h78v-1536h-78zM715 0v1536h76v-1536h-76zM903 0v1536h37v-1536h-37zM1014 0v1536h153v-1536h-153zM1221 0v1536h77v-1536h-77zM1409 0v1536h39v-1536h-39zM1579 0v1536h37 v-1536h-37zM1690 0v1536h153v-1536h-153z" />
-<glyph unicode="&#xf02b;" horiz-adv-x="1488" d="M0 961v454q0 49 35 85t86 36h454q51 0 113 -24.5t94 -61.5l672 -748q33 -39 34 -88t-34 -84l-526 -526q-35 -35 -86 -36t-86 36l-670 750q-35 37 -60.5 96t-25.5 111zM197 1223q0 -49 33.5 -83t82.5 -34t83 34t34 83t-34 82.5t-83 33.5t-82.5 -33.5t-33.5 -82.5z" />
-<glyph unicode="&#xf02c;" horiz-adv-x="1875" d="M0 961v454q0 49 35 85t86 36h454q25 0 53.5 -6t57.5 -18.5t54.5 -28t41.5 -33.5l670 -748q33 -37 34 -86t-34 -84l-526 -524q-35 -35 -86.5 -37t-83.5 37l-670 746q-35 39 -60.5 98t-25.5 109zM195 1223q0 -47 34.5 -82t83.5 -35q47 0 82 35t35 82q0 49 -35 83.5 t-82 34.5q-49 0 -83.5 -34.5t-34.5 -83.5zM791 1534h174q51 0 112.5 -24.5t93.5 -61.5l670 -748q35 -37 35 -87t-35 -85l-524 -524q-35 -35 -86 -36t-86 36l-12 14l514 514q35 35 34.5 84.5t-34.5 85.5l-670 748q-31 35 -84 56.5t-102 27.5z" />
-<glyph unicode="&#xf02d;" horiz-adv-x="1710" d="M10 311q2 16 4 31.5t4 34.5q0 10 -4 20.5t-2 20.5q2 16 15.5 31.5t25.5 35.5q23 37 45.5 90.5t32.5 92.5q4 16 -1 30.5t-1 26.5q4 16 16.5 27.5t20.5 23.5q10 18 21.5 42t21.5 49.5t16 50t8 40.5t-2 33t0 29q6 16 20.5 26.5t24.5 24.5q10 12 21.5 34.5t23 49.5t19.5 52.5 t10 45.5q2 12 -4 24.5t-2 27.5q4 14 18.5 29.5t26.5 31.5q16 25 28.5 58.5t30 61t46 43t77.5 1.5l-2 -4q31 10 54 10h780q78 0 119 -57q41 -53 18 -129l-283 -906q-18 -63 -77.5 -107t-126.5 -44h-893q-10 0 -20.5 -2t-18.5 -12q-12 -20 0 -56q16 -43 60 -75.5t87 -32.5h946 q29 0 57.5 21.5t37.5 47.5l309 987q4 16 5 29.5t-1 28.5q41 -14 61 -43q41 -53 19 -129l-283 -905q-18 -66 -77.5 -109t-127.5 -43h-946q-41 0 -79.5 14.5t-73.5 39t-61.5 58t-41.5 72.5q-25 68 -2 127zM500 961q-10 -39 26 -39h615q16 0 30.5 11t18.5 28l24 75 q4 16 -3 27.5t-23 11.5h-615q-16 0 -31.5 -11t-19.5 -28zM569 1190q-4 -16 3.5 -26.5t23.5 -10.5h614q16 0 30.5 10.5t21.5 26.5l22 78q4 16 -3 27.5t-24 11.5h-614q-16 0 -30.5 -11.5t-20.5 -27.5z" />
-<glyph unicode="&#xf02e;" horiz-adv-x="1253" d="M0 84v1337q0 47 34 81t81 34h1024q47 0 80.5 -34t33.5 -81v-1337q0 -47 -33.5 -81t-80.5 -34t-80 33l-432 432l-432 -432q-33 -33 -80 -33t-81 34t-34 81z" />
-<glyph unicode="&#xf02f;" d="M0 39v346q0 47 18.5 89t50 73t73.5 49t89 18h1383q47 0 89 -18t72.5 -49t49 -73t18.5 -89v-346q0 -39 -39 -39h-1765q-39 0 -39 39zM268 193q0 -16 11.5 -26.5t27.5 -10.5h1229q16 0 27.5 10t11.5 27v38q0 16 -11.5 27.5t-27.5 11.5h-1229q-16 0 -27.5 -11t-11.5 -28v-38 zM307 729v731q0 31 22.5 53.5t55.5 22.5h651v-383q0 -49 34 -83t81 -34h385v-307h-1229zM1151 1153v383l385 -383h-385z" />
-<glyph unicode="&#xf030;" d="M0 115v1075q0 47 34 82t81 35h366l58 125q18 43 66 73.5t95 30.5h443q47 0 95 -30.5t67 -73.5l57 -125h367q47 0 80.5 -35t33.5 -82v-1075q0 -47 -33.5 -81t-80.5 -34h-1614q-47 0 -81 34t-34 81zM442 653q0 -100 38 -187t102.5 -152.5t153 -103.5t186.5 -38t186 38 t152.5 103.5t102.5 152.5t38 187q0 98 -38 186.5t-102.5 153t-152.5 102.5t-186 38t-186.5 -38t-153 -102.5t-102.5 -152.5t-38 -187zM596 653q0 68 25.5 127.5t69.5 103.5t103.5 69.5t127.5 25.5t127 -25.5t103 -69.5t69.5 -103.5t25.5 -127.5t-25.5 -127t-69.5 -104 t-103.5 -70.5t-126.5 -25.5q-68 0 -127.5 25.5t-103.5 70.5t-69.5 104.5t-25.5 126.5z" />
-<glyph unicode="&#xf031;" horiz-adv-x="1644" d="M0 0l2 80q10 4 29.5 8t48.5 8q92 18 108 33q16 10 50 68l233 614l277 725h73h53l11 -20l202 -482q33 -78 64 -151.5t59 -145.5q29 -72 52.5 -130t42.5 -103q12 -29 28.5 -70t36.5 -94q23 -66 64 -150q25 -49 34 -57q20 -18 68 -24q25 -2 49.5 -9.5t52.5 -17.5 q6 -37 7 -55v-10.5t-3 -16.5q-43 0 -90 2t-98 6q-53 4 -99 6t-87 2h-80t-53 -2l-199 -10l-57 -2q0 20 1 39.5t3 38.5l129 26q57 14 67 25q12 8 13 27q0 14 -7 30l-47 115l-90 227l-446 2q-12 -29 -37 -96t-66 -178q-23 -63 -22 -84q0 -27 16 -43q14 -10 40 -17.5t63 -13.5 q14 -4 84 -12v-59q0 -16 -2 -27q-35 0 -121 5t-224 16l-49 -9q-43 -8 -83 -11t-81 -3h-20zM549 655q135 -2 216 -4t105 0l29 2q-18 51 -40.5 111.5t-51.5 130.5t-51.5 122t-38.5 87z" />
-<glyph unicode="&#xf032;" horiz-adv-x="1419" d="M0 0l2 94q27 6 68 12q39 6 69.5 13.5t55.5 17.5q8 14 13 26.5t7 24.5q6 33 8 81t2 112l-2 498q-2 39 -3 139t-5 266q-4 88 -12 109q-4 8 -13 10q-20 14 -69 16q-23 0 -115 13l-4 84l262 6l383 12h45q8 2 15.5 2h13.5t21.5 -1t39.5 -1h76q92 0 193 -27q18 -4 42.5 -13 t53.5 -26q63 -31 104 -75q45 -47 66 -105q10 -29 15 -58.5t5 -62.5q0 -72 -32 -129q-31 -57 -95 -104q-16 -12 -54 -30.5t-97 -47.5q178 -41 268 -145q92 -104 92 -236q0 -72 -28 -162q-23 -66 -72 -116q-66 -72 -141 -109q-78 -35 -205 -59q-70 -12 -199 -11l-199 5 q-63 2 -138 -2.5t-163 -10.5q-25 -2 -93 -4t-181 -6zM537 1419q0 -12 1 -31.5t3 -44.5q2 -51 4 -119.5t0 -158.5v-98v-78q25 -4 52.5 -6t57.5 -2q176 0 267 65q90 66 90 225q0 113 -86 187q-84 76 -258 76q-53 0 -131 -15zM545 457l4 -271q0 -16 10 -43q74 -33 141 -32 q131 0 220 41q82 39 122 112q18 37 28.5 82t10.5 100q0 113 -43 181q-59 94 -141 125q-80 33 -250 32q-37 0 -61.5 -3t-40.5 -7v-143v-174z" />
-<glyph unicode="&#xf033;" horiz-adv-x="1054" d="M0 0l18 84q12 4 32 9t46 11q41 10 71 19.5t50 19.5q29 39 41 103l29 137l57 268l12 64q23 119 41.5 178t18.5 63l30 156l17 64l22 135l9 49v39q-45 23 -148 28q-14 0 -23.5 1.5t-17.5 1.5l21 104l325 -14q31 -2 49.5 -2h26.5q35 0 89 2t132 6q41 4 68.5 6t38.5 2 q-2 -10 -3 -19.5t-3 -19.5q-4 -10 -7.5 -22.5t-7.5 -28.5q-49 -16 -110 -31q-66 -16 -105 -31q-12 -33 -24 -88q-6 -25 -9.5 -45t-5.5 -37q-23 -100 -40 -175.5t-27 -129.5l-64 -311l-39 -158l-43 -235l-14 -45v-10.5t2 -16.5q35 -8 64.5 -13t58.5 -9q4 0 21.5 -2.5 l45.5 -6.5q-2 -18 -3 -32.5t-3 -26.5q-2 -6 -4 -16.5t-6 -22.5q-8 0 -14 -1t-10 -1q-18 -2 -28.5 -2h-14.5h-11.5t-17.5 4q-8 0 -45 4t-105 12l-202 2q-61 0 -181 -12q-39 -4 -63.5 -6t-36.5 -2z" />
-<glyph unicode="&#xf034;" d="M0 1151q14 37 34.5 110.5t45.5 184.5q8 33 13 54.5t9 31.5h58q4 -6 6 -10t4 -9q29 -57 41 -71q16 -4 129 -4q35 0 66.5 1t60.5 1l20 2l113 2l213 -2h289l55 10q10 8 27 53q2 6 4 12.5t6 16.5l43 2h10.5t16.5 -2q2 -39 1 -97.5t1 -138.5v-100v-57q0 -14 -1 -27.5t-3 -23.5 q-20 -8 -37 -11.5t-31 -7.5q-27 51 -53 129q-29 82 -37 92q-12 14 -27 21q-10 4 -60 4h-138h-31t-35 -4q-6 -43 -6 -72l2 -151v-334l2 -359v-147q0 -72 10 -117q8 -4 21.5 -8t34.5 -8q4 0 21 -4t50 -13q27 -10 49 -18q4 -20 4 -33.5v-17.5v-11.5t-2 -17.5h-34q-47 0 -88 2 t-76 6t-95.5 6t-148.5 2q-16 0 -57 -4t-109 -10q-29 -2 -45 -3t-24 -1q0 10 -1.5 16.5t-1.5 10.5l-2 24v10q18 31 80 50q94 27 135 49q4 10 6.5 25.5t4.5 31.5q4 68 6 176.5t0 255.5l-4 428q-2 90 -2 142.5t-4 72.5q0 8 -7 15q-4 6 -12 6q-16 4 -63 4h-127q-90 0 -119 -21 q-41 -29 -121 -153q-23 -35 -35 -35q-23 12 -36 23.5t-19 19.5zM1383 1305.5q-5 13.5 14 33.5l184 185q14 12 33 12q14 0 31 -12l184 -185q18 -20 13 -33.5t-34 -13.5h-118v-1048h118q29 0 34 -13.5t-13 -31.5l-184 -187q-16 -12 -33 -12q-16 0 -31 12l-184 187 q-18 18 -13 31.5t31 13.5h121v1048h-121q-27 0 -32 13.5z" />
-<glyph unicode="&#xf035;" horiz-adv-x="1536" d="M0 233q0 18 12 31l187 185q18 20 31.5 14t13.5 -33v-121h1048v121q0 27 13.5 33t33.5 -14l185 -185q12 -12 12 -31q0 -18 -12 -30l-185 -187q-20 -18 -33.5 -13t-13.5 34v119h-1048v-119q0 -29 -13.5 -34t-31.5 13l-187 187q-12 12 -12 30zM0 1233q14 29 33.5 87 t44.5 146q6 27 11 43.5t9 26.5h56q8 -12 10 -14q27 -47 37 -58q2 0 35.5 -1t77.5 -1h90.5h74.5h123l19 2h108h203h416l53 6q12 10 24 46l4.5 9l6.5 13h39h28v-188v-80v-45q0 -12 -1 -21.5t-3 -19.5q-33 -10 -63 -15q-25 37 -52 103q-27 59 -34 74q-12 10 -27 14q-6 2 -42 3 t-85 1h-103.5h-97.5h-28.5t-34.5 -2q-2 -18 -3 -32.5t-1 -24.5l4 -445l-2 -119q0 -61 12 -92q12 -6 53 -12q4 0 20.5 -4t45.5 -10q14 -4 26.5 -7.5t22.5 -7.5q2 -16 3 -25.5t1 -13.5t-1 -10t-1 -14h-33q-94 0 -157 6q-66 6 -236 6q-14 0 -53 -3t-105 -7q-27 -2 -43 -3 t-24 -1q0 16 -2 20v21v8q20 27 73 39q90 20 132 41q4 8 6 19t4 26q0 18 1 70.5t1 120t-1 142t-2 139t-2 107.5t-1 47q0 8 -6 13q-2 2 -13 6q-14 2 -59 2h-123q-20 0 -62 -1t-85 -2t-78 -4t-41 -7q-41 -25 -117 -123q-20 -29 -33 -29q-23 10 -35 19.5t-18 15.5z" />
-<glyph unicode="&#xf036;" d="M0 78v115q0 31 22.5 53t55.5 22h1689q31 0 53.5 -22.5t22.5 -52.5v-115q0 -33 -22.5 -55.5t-53.5 -22.5h-1689q-33 0 -55.5 22.5t-22.5 55.5zM0 500v114q0 33 22.5 55.5t55.5 22.5h1075q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -55.5t-53.5 -22.5h-1075 q-33 0 -55.5 22.5t-22.5 55.5zM0 922v114q0 33 22.5 55.5t55.5 22.5h1536q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -54.5t-53.5 -21.5h-1536q-33 0 -55.5 21.5t-22.5 54.5zM0 1343v117q0 31 22.5 53.5t55.5 22.5h921q31 0 53.5 -22.5t22.5 -53.5v-117q0 -31 -22.5 -53 t-53.5 -22h-921q-33 0 -55.5 22.5t-22.5 52.5z" />
-<glyph unicode="&#xf037;" d="M0 78v115q0 31 22.5 53t55.5 22h1689q31 0 53.5 -22.5t22.5 -52.5v-115q0 -33 -22.5 -55.5t-53.5 -22.5h-1689q-33 0 -55.5 22.5t-22.5 55.5zM78 922v114q0 33 21.5 55.5t54.5 22.5h1536q33 0 55 -22.5t22 -55.5v-114q0 -33 -22.5 -54.5t-54.5 -21.5h-1536 q-33 0 -54.5 21.5t-21.5 54.5zM307 500v114q0 33 22.5 55.5t55.5 22.5h1075q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -55.5t-53.5 -22.5h-1075q-33 0 -55.5 22.5t-22.5 55.5zM385 1343v117q0 31 21.5 53.5t54.5 22.5h921q33 0 55.5 -22.5t22.5 -53.5v-117 q0 -31 -22.5 -53t-55.5 -22h-921q-33 0 -54.5 22.5t-21.5 52.5z" />
-<glyph unicode="&#xf038;" d="M0 78v115q0 31 22.5 53t55.5 22h1689q31 0 53.5 -22.5t22.5 -52.5v-115q0 -33 -22.5 -55.5t-53.5 -22.5h-1689q-33 0 -55.5 22.5t-22.5 55.5zM154 922v114q0 33 22.5 55.5t54.5 22.5h1536q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -54.5t-53.5 -21.5h-1536 q-33 0 -55 21.5t-22 54.5zM614 500v114q0 33 22.5 55.5t55.5 22.5h1075q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -55.5t-53.5 -22.5h-1075q-33 0 -55.5 22.5t-22.5 55.5zM768 1343v117q0 31 22.5 53.5t55.5 22.5h921q31 0 53.5 -22.5t22.5 -53.5v-117q0 -31 -22.5 -53 t-53.5 -22h-921q-33 0 -55.5 22.5t-22.5 52.5z" />
-<glyph unicode="&#xf039;" d="M0 78v115q0 31 22.5 53t55.5 22h1689q31 0 53.5 -22.5t22.5 -52.5v-115q0 -33 -22.5 -55.5t-53.5 -22.5h-1689q-33 0 -55.5 22.5t-22.5 55.5zM0 500v114q0 33 22.5 55.5t55.5 22.5h1689q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -55.5t-53.5 -22.5h-1689 q-33 0 -55.5 22.5t-22.5 55.5zM0 922v114q0 33 22.5 55.5t55.5 22.5h1689q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -54.5t-53.5 -21.5h-1689q-33 0 -55.5 21.5t-22.5 54.5zM0 1343v117q0 31 22.5 53.5t55.5 22.5h1689q31 0 53.5 -22.5t22.5 -53.5v-117 q0 -31 -22.5 -53t-53.5 -22h-1689q-33 0 -55.5 22.5t-22.5 52.5z" />
-<glyph unicode="&#xf03a;" d="M0 78v115q0 31 22.5 53t55.5 22h153q31 0 53.5 -22.5t22.5 -52.5v-115q0 -33 -22.5 -55.5t-53.5 -22.5h-153q-33 0 -55.5 22.5t-22.5 55.5zM0 500v114q0 33 22.5 55.5t55.5 22.5h153q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -55.5t-53.5 -22.5h-153 q-33 0 -55.5 22.5t-22.5 55.5zM0 922v114q0 33 22.5 55.5t55.5 22.5h153q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -54.5t-53.5 -21.5h-153q-33 0 -55.5 21.5t-22.5 54.5zM0 1343v117q0 31 22.5 53.5t55.5 22.5h153q31 0 53.5 -22.5t22.5 -53.5v-117q0 -31 -22.5 -53 t-53.5 -22h-153q-33 0 -55.5 22.5t-22.5 52.5zM461 78v115q0 31 22.5 53t55.5 22h1228q31 0 53.5 -22.5t22.5 -52.5v-115q0 -33 -22.5 -55.5t-53.5 -22.5h-1228q-33 0 -55.5 22.5t-22.5 55.5zM461 500v114q0 33 22.5 55.5t55.5 22.5h1228q31 0 53.5 -22.5t22.5 -55.5v-114 q0 -33 -22.5 -55.5t-53.5 -22.5h-1228q-33 0 -55.5 22.5t-22.5 55.5zM461 922v114q0 33 22.5 55.5t55.5 22.5h1228q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -54.5t-53.5 -21.5h-1228q-33 0 -55.5 21.5t-22.5 54.5zM461 1343v117q0 31 22.5 53.5t55.5 22.5h1228 q31 0 53.5 -22.5t22.5 -53.5v-117q0 -31 -22.5 -53t-53.5 -22h-1228q-33 0 -55.5 22.5t-22.5 52.5z" />
-<glyph unicode="&#xf03b;" d="M0 756v75q0 39 39 39h213v154q0 31 16.5 37t38.5 -17l215 -215q16 -16 17 -36q0 -18 -17 -35l-215 -215q-23 -23 -39 -17t-16 39v152h-213q-39 0 -39 39zM614 39v1458q0 39 39 39h76q39 0 39 -39v-1458q0 -39 -39 -39h-76q-39 0 -39 39zM922 78v115q0 31 22.5 53t54.5 22 h768q31 0 53.5 -22.5t22.5 -52.5v-115q0 -33 -22.5 -55.5t-53.5 -22.5h-768q-33 0 -55 22.5t-22 55.5zM922 500v114q0 33 22.5 55.5t54.5 22.5h615q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -55.5t-53.5 -22.5h-615q-33 0 -55 22.5t-22 55.5zM922 922v114 q0 33 22.5 55.5t54.5 22.5h691q33 0 55 -22.5t22 -55.5v-114q0 -33 -22.5 -54.5t-54.5 -21.5h-691q-33 0 -55 21.5t-22 54.5zM922 1343v117q0 31 22.5 53.5t54.5 22.5h537q33 0 55.5 -22.5t22.5 -53.5v-117q0 -31 -22.5 -53t-55.5 -22h-537q-33 0 -55 22.5t-22 52.5z" />
-<glyph unicode="&#xf03c;" d="M0 78v115q0 31 22.5 53t53.5 22h768q33 0 55.5 -22.5t22.5 -52.5v-115q0 -33 -22.5 -55.5t-55.5 -22.5h-768q-31 0 -53.5 22.5t-22.5 55.5zM0 500v114q0 33 22.5 55.5t53.5 22.5h614q33 0 55.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -55.5t-55.5 -22.5h-614 q-31 0 -53.5 22.5t-22.5 55.5zM0 922v114q0 33 22.5 55.5t53.5 22.5h692q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -54.5t-53.5 -21.5h-692q-31 0 -53.5 21.5t-22.5 54.5zM0 1343v117q0 31 22.5 53.5t53.5 22.5h538q31 0 53.5 -22.5t22.5 -53.5v-117q0 -31 -22.5 -53 t-53.5 -22h-538q-31 0 -53.5 22.5t-22.5 52.5zM1075 39v1458q0 39 39 39h76q16 0 27.5 -11.5t11.5 -27.5v-1458q0 -16 -11.5 -27.5t-27.5 -11.5h-76q-39 0 -39 39zM1305 743q0 16 14 35l217 215q23 23 38 17t15 -39v-152h215q16 0 27.5 -11t11.5 -28v-75q0 -16 -11 -27.5 t-28 -11.5h-215v-154q0 -31 -15 -38t-38 15l-217 218q-14 18 -14 36z" />
-<glyph unicode="&#xf03d;" d="M0 324v768q0 47 18.5 89t50 72.5t73.5 49t89 18.5h768q47 0 89 -18.5t73 -49t49.5 -72.5t18.5 -89v-240l483 471q23 23 55 23q13 -1 29 -7q47 -20 47 -69v-1127q0 -49 -47 -69q-16 -6 -29 -6q-33 0 -55 22l-483 471v-237q0 -47 -18.5 -89t-49.5 -74t-72.5 -50.5 t-89.5 -18.5h-768q-47 0 -89 18.5t-73.5 50.5t-50 73.5t-18.5 89.5z" />
-<glyph unicode="&#xf03e;" d="M0 115v1306q0 47 34 81t81 34h1614q47 0 80.5 -34t33.5 -81v-1306q0 -47 -33.5 -81t-80.5 -34h-1614q-47 0 -81 34t-34 81zM154 154h1536v1228h-1536v-1228zM307 307v105l277 360l188 -156l354 537l410 -424v-422h-1229zM307 1073q0 66 45 111t111 45q63 0 108 -45 t45 -111q0 -63 -45 -108t-108 -45q-66 0 -111 45t-45 108z" />
-<glyph unicode="&#xf040;" horiz-adv-x="1536" d="M0 0l137 418l867 866l280 -280l-866 -867zM287 407.5q0 -12.5 10 -22.5q8 -8 22 -8q12 0 21 8l690 690q20 20 0 43q-10 10 -22.5 10t-20.5 -10l-690 -688q-10 -10 -10 -22.5zM1102 1382l119 119q35 35 84 35t84 -35l57 -55l55 -57q35 -35 35 -84.5t-35 -83.5l-119 -119z " />
-<glyph unicode="&#xf041;" horiz-adv-x="1128" d="M0 1001q0 117 44 220.5t121 180.5t180.5 121t219.5 44q117 0 219.5 -44t179 -121t120.5 -180.5t44 -220.5q0 -84 -24.5 -159.5t-65.5 -143.5l-379 -661q-41 -68 -95 -68t-93 68l-381 663q-41 68 -65.5 143t-24.5 158zM285 1001q0 -57 21.5 -108t60.5 -89t89 -59.5 t109 -21.5q57 0 108.5 21.5t89.5 59.5t59.5 89t21.5 108t-21.5 108.5t-59.5 90.5t-89 60.5t-109 21.5q-59 0 -109 -21.5t-89 -60.5t-60.5 -90t-21.5 -109z" />
-<glyph unicode="&#xf042;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM240 768q0 -117 44 -218t119.5 -177t177 -120t218.5 -44v1118q-117 0 -218.5 -44 t-177 -120t-119.5 -177t-44 -218z" />
-<glyph unicode="&#xf043;" horiz-adv-x="1130" d="M0 535q0 86 23.5 161.5t66.5 141.5q20 33 69.5 99.5t108 154.5t113.5 193.5t90 217.5q10 35 37 51.5t57 12.5q31 4 57.5 -12.5t36.5 -51.5q33 -113 89.5 -218t115 -193t107.5 -154.5t69 -99.5q43 -66 66.5 -141.5t23.5 -161.5q0 -117 -44 -220.5t-120.5 -180.5t-180 -121 t-220.5 -44t-220 44t-180 121t-121 180.5t-44 220.5zM248 410q0 -59 41 -100.5t100 -41.5t100 41t41 101q0 43 -22 75q-6 8 -18.5 24.5t-26.5 39t-28.5 48.5t-22.5 54q-4 20 -23 17q-18 4 -24 -17q-8 -29 -21.5 -54.5t-28 -48t-27 -38.5t-18.5 -25q-23 -33 -22 -75z" />
-<glyph unicode="&#xf044;" d="M0 307v922q0 63 24.5 119.5t65.5 97.5t97.5 65.5t119.5 24.5h1075q4 0 10.5 -1t10.5 -1l-191 -191h-905q-47 0 -80.5 -33.5t-33.5 -80.5v-922q0 -47 33.5 -80.5t80.5 -33.5h1075q47 0 81 33.5t34 80.5v445l193 192v-637q0 -63 -25 -118.5t-67 -97.5t-97 -66.5t-119 -24.5 h-1075q-63 0 -119.5 24.5t-97.5 66.5t-65.5 97.5t-24.5 118.5zM631 324l108 329l652 652l221 -222l-651 -651zM866 629q6 -8 17 -8q10 0 16 8l512 510q18 18 0 35q-18 16 -35 0l-510 -510q-18 -18 0 -35zM1501 1415l92 94q29 29 68 29t65 -29l46 -45l45 -45 q27 -29 27.5 -66.5t-27.5 -66.5l-95 -92z" />
-<glyph unicode="&#xf045;" d="M0 307v922q0 63 24.5 119.5t65.5 97.5t97.5 65.5t119.5 24.5h836q-6 -31 -6 -63v-52q-164 -20 -310 -78h-520q-47 0 -80.5 -33.5t-33.5 -80.5v-922q0 -47 33.5 -80.5t80.5 -33.5h1075q47 0 81 33.5t34 80.5v111q16 10 31.5 21.5t32.5 27.5l129 127v-287q0 -63 -25 -118.5 t-67 -97.5t-97 -66.5t-119 -24.5h-1075q-63 0 -119.5 24.5t-97.5 66.5t-65.5 97.5t-24.5 118.5zM385 388.5v37.5q0 166 56.5 312.5t173 256t293.5 173t419 65.5v231q0 57 28.5 69.5t69.5 -28.5l392 -391q27 -25 26 -65q0 -39 -26 -64l-392 -391q-41 -41 -69.5 -28.5 t-28.5 69.5v260q-207 0 -364.5 -43t-266 -116.5t-170 -174t-77.5 -215.5q-4 -27 -31 -27q-25 0 -29 27q-4 23 -4 42.5z" />
-<glyph unicode="&#xf046;" d="M0 307v922q0 63 24.5 119.5t65.5 97.5t97.5 65.5t119.5 24.5h1075q27 0 52 -6l-187 -187h-940q-47 0 -80.5 -33.5t-33.5 -80.5v-922q0 -47 33.5 -80.5t80.5 -33.5h1075q47 0 81 33.5t34 80.5v326l193 192v-518q0 -63 -25 -118.5t-67 -97.5t-97 -66.5t-119 -24.5h-1075 q-63 0 -119.5 24.5t-97.5 66.5t-65.5 97.5t-24.5 118.5zM385 966.5q0 32.5 23 55.5l98 98q23 23 55.5 23t54.5 -23l340 -340l654 656q23 23 56.5 22.5t55.5 -22.5l99 -99q23 -23 22.5 -55.5t-22.5 -54.5l-711 -711l-98 -98q-23 -23 -55.5 -23t-55.5 23l-100 98l-393 395 q-23 23 -23 55.5z" />
-<glyph unicode="&#xf047;" horiz-adv-x="1597" d="M0 768q0 25 18 43l256 256q27 27 45.5 19.5t18.5 -46.5v-170h360v359h-172q-37 0 -45 18.5t19 44.5l256 256q18 18 43 19q25 0 43 -19l256 -256q27 -27 19.5 -45t-46.5 -18h-172v-359h361v170q0 39 18 46.5t45 -19.5l256 -256q18 -18 18 -43t-18 -43l-256 -256 q-27 -27 -45 -19.5t-18 46.5v176h-361v-365h172q39 0 47 -18.5t-20 -44.5l-256 -256q-18 -18 -43 -19q-25 0 -43 19l-256 256q-27 27 -20 45t46 18h172v365h-360v-176q0 -39 -18.5 -46.5t-45.5 19.5l-256 256q-18 18 -18 43z" />
-<glyph unicode="&#xf048;" horiz-adv-x="1075" d="M0 76q0 -31 22.5 -53.5t53.5 -22.5h153q33 0 55.5 22.5t22.5 53.5v1382q0 33 -22.5 55.5t-55.5 22.5h-153q-31 0 -53.5 -22.5t-22.5 -55.5v-1382zM307.5 768q-0.5 27 16.5 43l653 707q14 18 41 18q6 0 22 -4q35 -18 35 -59v-1412q0 -41 -35 -57q-39 -14 -63 14l-653 705 q-16 18 -16.5 45z" />
-<glyph unicode="&#xf049;" d="M0 76q0 -31 22.5 -53.5t53.5 -22.5h153q33 0 55.5 22.5t22.5 53.5v1382q0 33 -22.5 55.5t-55.5 22.5h-153q-31 0 -53.5 -22.5t-22.5 -55.5v-1382zM307.5 768q-0.5 27 16.5 43l653 707q14 18 41 18q6 0 22 -4q35 -18 35 -59v-1412q0 -41 -35 -57q-39 -14 -63 14l-653 705 q-16 18 -16.5 45zM1075.5 768q-0.5 27 16.5 43l653 707q14 18 41 18q6 0 22 -4q35 -18 35 -59v-1412q0 -41 -35 -57q-39 -14 -63 14l-653 705q-16 18 -16.5 45z" />
-<glyph unicode="&#xf04a;" horiz-adv-x="1536" d="M0 767q0 26 16 44l654 707q14 18 41 18q10 0 22 -6q35 -14 35 -57v-1412q0 -41 -35 -57q-37 -16 -63 14l-654 705q-16 18 -16 44zM768 767q0 26 16 44l654 707q14 18 41 18q10 0 22 -6q35 -14 35 -57v-1412q0 -41 -35 -57q-37 -16 -63 14l-654 705q-16 18 -16 44z" />
-<glyph unicode="&#xf04b;" horiz-adv-x="1349" d="M0 70v1396q0 39 35 60q37 23 69 0l1211 -697q35 -25 35 -61q0 -37 -35 -61l-1211 -697q-16 -10 -34 -10t-35 10q-35 20 -35 60z" />
-<glyph unicode="&#xf04c;" horiz-adv-x="1536" d="M0 70v1396q0 29 20.5 49.5t49.5 20.5h489q29 0 49.5 -20.5t20.5 -49.5v-1396q0 -29 -20.5 -49.5t-49.5 -20.5h-489q-29 0 -49.5 20.5t-20.5 49.5zM907 70v1396q0 29 20.5 49.5t49.5 20.5h489q29 0 49.5 -20.5t20.5 -49.5v-1396q0 -29 -20.5 -49.5t-49.5 -20.5h-489 q-29 0 -49.5 20.5t-20.5 49.5z" />
-<glyph unicode="&#xf04d;" horiz-adv-x="1536" d="M0 70v1396q0 29 20.5 49.5t49.5 20.5h1396q29 0 49.5 -20.5t20.5 -49.5v-1396q0 -29 -20.5 -49.5t-49.5 -20.5h-1396q-29 0 -49.5 20.5t-20.5 49.5z" />
-<glyph unicode="&#xf04e;" horiz-adv-x="1536" d="M0 61v1414q0 39 35 57q39 14 63 -14l654 -705q16 -18 16 -45t-16 -45l-654 -705q-16 -18 -41 -18q-6 0 -22 4q-35 16 -35 57zM768 61v1414q0 39 35 57q39 14 63 -14l654 -705q16 -18 16 -45t-16 -45l-654 -705q-16 -18 -41 -18q-6 0 -22 4q-35 16 -35 57z" />
-<glyph unicode="&#xf050;" d="M0 61v1414q0 39 35 57q39 14 63 -14l654 -705q16 -18 16 -45t-16 -45l-654 -705q-16 -18 -41 -18q-6 0 -22 4q-35 16 -35 57zM768 61v1414q0 39 35 57q39 14 63 -14l654 -705q16 -18 16 -45t-16 -45l-654 -705q-16 -18 -41 -18q-6 0 -22 4q-35 16 -35 57zM1536 76 q0 -31 22.5 -53.5t53.5 -22.5h153q33 0 55.5 22.5t22.5 53.5v1382q0 33 -22.5 55.5t-55.5 22.5h-153q-31 0 -53.5 -22.5t-22.5 -55.5v-1382z" />
-<glyph unicode="&#xf051;" horiz-adv-x="1075" d="M0 61v1414q0 39 35 57q39 14 63 -14l654 -705q16 -18 16 -45t-16 -45l-654 -705q-16 -18 -41 -18q-6 0 -22 4q-35 16 -35 57zM768 76q0 -31 22.5 -53.5t53.5 -22.5h153q33 0 55.5 22.5t22.5 53.5v1382q0 33 -22.5 55.5t-55.5 22.5h-153q-31 0 -53.5 -22.5t-22.5 -55.5 v-1382z" />
-<glyph unicode="&#xf052;" horiz-adv-x="1536" d="M0 70v198q0 29 20.5 49.5t49.5 20.5h1396q29 0 49.5 -20.5t20.5 -49.5v-198q0 -29 -20.5 -49.5t-49.5 -20.5h-1396q-29 0 -49.5 20.5t-20.5 49.5zM6 594q-18 43 14 76l699 698q20 20 49 20.5t49 -20.5l699 -698q33 -33 14 -76q-16 -43 -64 -43h-1396q-47 0 -64 43z" />
-<glyph unicode="&#xf053;" horiz-adv-x="964" d="M0 765q0 38 29 66l671 674q29 29 68 29t68 -29l100 -100q29 -29 29 -68t-29 -67l-508 -510l508 -500q29 -29 29 -66.5t-29 -66.5l-100 -102q-29 -29 -68 -29t-68 29l-671 673q-29 29 -29 67z" />
-<glyph unicode="&#xf054;" horiz-adv-x="964" d="M0 194.5q0 38.5 29 67.5l508 508l-508 502q-29 29 -29 66.5t29 66.5l100 102q29 29 68 29t67 -29l672 -673q29 -29 29 -68t-29 -68l-672 -671q-29 -29 -67.5 -29t-67.5 29l-100 100q-29 29 -29 67.5z" />
-<glyph unicode="&#xf055;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM338 688q0 -33 33 -33h313v-344q0 -33 33 -32h164q33 0 32 32v344h314 q12 0 22.5 9.5t10.5 23.5v158q0 14 -10.5 23.5t-22.5 9.5h-314v346q0 33 -32 32h-164q-33 0 -33 -32v-346h-313q-33 0 -33 -33v-158z" />
-<glyph unicode="&#xf056;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM338 688q0 -33 33 -33h856q12 0 22.5 9.5t10.5 23.5v158q0 14 -10.5 23.5 t-22.5 9.5h-856q-33 0 -33 -33v-158z" />
-<glyph unicode="&#xf057;" horiz-adv-x="1597" d="M0 770q0 154 58.5 301.5t174.5 263.5q117 117 264.5 175.5t301 58.5t301 -58.5t264.5 -175.5t175 -264t58 -301t-58 -301t-175 -264t-264.5 -175.5t-301 -58.5t-301 58.5t-264.5 175.5t-175 264t-58 301zM385 505q0 -13 10 -24l115 -116q10 -10 23.5 -10.5t23.5 10.5 l244 243l219 -221q10 -10 23.5 -10t23.5 10l113 113q23 23 0 47l-222 219l246 246q23 23 0 47l-117 115q-25 25 -47 0l-243 -244l-222 221q-10 10 -23 10t-24 -10l-110 -113q-25 -23 0 -47l219 -219l-242 -244q-10 -10 -10 -23z" />
-<glyph unicode="&#xf058;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM240 718q0 -20 14 -34l319 -319q14 -14 38 -24.5t44 -10.5h56q20 0 43.5 10 t38.5 25l550 550q14 14 14.5 34t-14.5 34l-104 107q-16 14 -35.5 14t-34.5 -14l-452 -453q-14 -14 -33.5 -14t-34.5 14l-221 221q-14 14 -33.5 14t-36.5 -14l-104 -106q-14 -14 -14 -34z" />
-<glyph unicode="&#xf059;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM532 1100l95 -115q4 -8 20 -12q12 0 23 6l10 8t27.5 17.5t39 16.5t46.5 7 q41 0 69.5 -22.5t28.5 -57.5q0 -37 -24.5 -64.5t-61.5 -60.5q-23 -18 -46.5 -40.5t-43 -51.5t-31.5 -63.5t-12 -79.5v-64q0 -12 9 -21t21 -9h164q12 0 20.5 9t8.5 21v51q0 39 25.5 66.5t62.5 60.5q25 20 49.5 46t46 57t34.5 69.5t13 90.5q0 68 -27.5 121t-73.5 87.5 t-103.5 53t-114.5 18.5q-63 0 -113.5 -16.5t-85.5 -35.5t-53 -35.5t-20 -18.5q-16 -16 -3 -39zM672 252q0 -12 9 -21.5t21 -9.5h164q12 0 20.5 9.5t8.5 21.5v156q0 12 -8 21t-21 9h-164q-12 0 -21 -9t-9 -21v-156z" />
-<glyph unicode="&#xf05a;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM573 858q0 -29 29 -29h86v-409h-78q-12 0 -21 -8.5t-9 -20.5v-139q0 -12 9 -21.5 t21 -9.5h402q12 0 20 9.5t8 21.5v139q0 29 -28 29h-78v577q0 12 -8 21.5t-21 9.5h-303q-12 0 -20.5 -9t-8.5 -22v-139zM686 1151q0 -12 9.5 -21.5t21.5 -9.5h188q12 0 20.5 9.5t8.5 21.5v166q0 29 -29 29h-188q-12 0 -21.5 -8.5t-9.5 -20.5v-166z" />
-<glyph unicode="&#xf05b;" horiz-adv-x="1536" d="M0 692v154q0 16 11.5 26.5t27.5 10.5h164q18 84 59 158.5t99.5 133t133 99.5t158.5 59v164q0 39 39 39h154q16 0 26.5 -11.5t10.5 -27.5v-164q84 -18 158.5 -59t133 -99.5t99.5 -133t59 -158.5h164q16 0 27.5 -10.5t11.5 -26.5v-154q0 -39 -39 -39h-164 q-18 -84 -59 -158.5t-99.5 -133t-133 -99.5t-158.5 -59v-164q0 -16 -10.5 -27.5t-26.5 -11.5h-154q-39 0 -39 39v164q-84 18 -158.5 59t-133 99.5t-99.5 133t-59 158.5h-164q-39 0 -39 39zM365 653q29 -106 105.5 -183t182.5 -105v174q0 16 11.5 26t27.5 10h154 q16 0 26.5 -10t10.5 -26v-174q106 29 183 105.5t105 182.5h-172q-39 0 -38 39v154q0 16 11 26.5t27 10.5h172q-29 106 -105.5 183t-182.5 105v-172q0 -16 -10.5 -27t-26.5 -11h-154q-39 0 -39 38v172q-106 -29 -183 -105.5t-105 -182.5h174q16 0 26 -10.5t10 -26.5v-154 q0 -16 -10 -27.5t-26 -11.5h-174z" />
-<glyph unicode="&#xf05c;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM240 768q0 -117 44 -218t119.5 -177t177 -120t218.5 -44t218 44t177 120t120 177 t44 218t-44 218t-120 177t-177.5 120t-217.5 44q-117 0 -218.5 -44t-177 -120t-119.5 -177t-44 -218zM451 573.5q0 16.5 12 28.5l166 166l-166 166q-12 12 -12 28.5t12 28.5l112 113q29 29 58 0l166 -166l166 166q29 29 57 0l113 -113q29 -29 0 -57l-166 -166l166 -166 q29 -29 0 -57l-113 -113q-12 -12 -28.5 -12t-28.5 12l-166 166l-166 -166q-12 -12 -28.5 -12t-29.5 12l-112 113q-12 12 -12 28.5z" />
-<glyph unicode="&#xf05d;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM240 768q0 -117 44 -218t119.5 -177t177 -120t218.5 -44t218 44t177 120t120 177 t44 218t-44 218t-120 177t-177.5 120t-217.5 44q-117 0 -218.5 -44t-177 -120t-119.5 -177t-44 -218zM332 717.5q0 17.5 10 27.5l115 115q10 10 27.5 10t27.5 -10l178 -180q29 -25 58 0l337 340q10 10 27.5 10t28.5 -10l114 -115q10 -10 10.5 -27.5t-10.5 -27.5l-409 -410 q-12 -12 -32.5 -20t-37.5 -8h-114q-16 0 -37 8t-33 20l-250 250q-10 10 -10 27.5z" />
-<glyph unicode="&#xf05e;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM240 768q0 -84 23.5 -160t66.5 -141l770 770q-66 43 -141.5 66.5t-159.5 23.5 q-117 0 -218.5 -44t-177 -120t-119.5 -177t-44 -218zM498 297q66 -41 141.5 -64.5t159.5 -23.5q117 0 218 44t177 120t120 177t44 218q0 84 -23.5 160t-64.5 141z" />
-<glyph unicode="&#xf060;" horiz-adv-x="1536" d="M0 768q0 39 29 68l671 673q29 29 68 29t68 -29l100 -100q29 -29 29 -67.5t-29 -67.5l-338 -338h842q41 0 68.5 -27.5t27.5 -68.5v-144q0 -39 -27.5 -66.5t-66.5 -27.5h-844l338 -338q29 -29 29 -67.5t-29 -67.5l-100 -100q-29 -29 -68 -29t-68 29l-671 671q-29 29 -29 68 z" />
-<glyph unicode="&#xf061;" horiz-adv-x="1536" d="M0 698v144q0 39 27.5 66.5t66.5 27.5h844l-338 338q-29 29 -29 67.5t29 67.5l100 100q29 29 68 29t68 -29l671 -673q29 -29 29 -67t-29 -67l-671 -673q-29 -29 -68 -29t-68 29l-100 100q-29 29 -29 68t29 67l338 338h-844q-39 0 -66.5 27.5t-27.5 68.5z" />
-<glyph unicode="&#xf062;" horiz-adv-x="1536" d="M-1 768q-1 39 28 68l673 671q29 29 68 29t68 -29l671 -671q29 -29 29 -68t-29 -68l-100 -100q-29 -29 -66.5 -29t-66.5 29l-340 338v-844q0 -39 -27.5 -66.5t-66.5 -27.5h-144q-41 0 -67.5 27.5t-26.5 66.5v844l-338 -338q-29 -29 -67.5 -29t-67.5 29l-100 100 q-29 29 -30 68z" />
-<glyph unicode="&#xf063;" horiz-adv-x="1536" d="M0 766q0 39 29 68l100 100q29 29 68 29t67 -29l338 -338v844q0 39 27.5 66.5t66.5 27.5h144q41 0 67.5 -27.5t26.5 -66.5v-844l340 338q29 29 66.5 29t66.5 -29l102 -100q29 -29 29 -68t-29 -68l-673 -671q-29 -29 -68 -29t-68 29l-671 671q-29 29 -29 68z" />
-<glyph unicode="&#xf064;" d="M0 135q0 209 71.5 393.5t218 322.5t371 219t531.5 83v293q0 72 36 87t89 -36l491 -493q35 -33 35 -82q0 -47 -35 -82l-491 -494q-51 -51 -88 -35.5t-37 86.5v330q-262 -2 -461 -56.5t-336 -147.5t-215 -219t-98 -271q-4 -33 -37 -33h-2q-33 0 -37 33q-6 51 -6 102z" />
-<glyph unicode="&#xf065;" horiz-adv-x="1536" d="M0 86v522q0 53 26.5 64.5t65.5 -25.5l166 -166l274 275q12 12 31 12t33 -12l160 -160q12 -14 12 -32.5t-12 -31.5l-275 -274l166 -166q39 -39 27 -65.5t-66 -26.5h-520q-37 0 -61 25q-27 27 -27 61zM768 972.5q0 18.5 12 31.5l275 274l-166 166q-39 39 -27 65.5t66 26.5 h520q37 0 61 -25q27 -27 27 -61v-522q0 -53 -26.5 -64.5t-65.5 25.5l-166 166l-274 -273q-12 -14 -31 -14t-33 14l-160 158q-12 14 -12 32.5z" />
-<glyph unicode="&#xf066;" horiz-adv-x="1536" d="M0 202.5q0 18.5 12 33.5l275 272l-166 166q-39 39 -27 65.5t66 26.5h520q38 0 61 -25q27 -27 27 -61v-522q0 -53 -26.5 -64.5t-65.5 27.5l-166 166l-274 -275q-12 -14 -31 -14t-33 14l-160 160q-12 12 -12 30.5zM768 854v522q0 53 26.5 64.5t65.5 -27.5l166 -166l274 275 q12 14 31 14t33 -14l160 -160q12 -12 12 -30.5t-12 -33.5l-275 -272l166 -166q39 -39 27 -65.5t-66 -26.5h-520q-41 0 -61 25q-27 27 -27 61z" />
-<glyph unicode="&#xf067;" horiz-adv-x="1536" d="M0 696v144q0 39 27.5 66.5t66.5 27.5h508v506q0 41 26.5 68.5t67.5 27.5h144q39 0 66.5 -27.5t27.5 -66.5v-508h508q39 0 66.5 -27.5t27.5 -66.5v-144q0 -39 -27.5 -66.5t-66.5 -27.5h-508v-506q0 -41 -27.5 -68.5t-66.5 -27.5h-144q-39 0 -66.5 27.5t-27.5 66.5v508 h-506q-41 0 -68.5 26.5t-27.5 67.5z" />
-<glyph unicode="&#xf068;" horiz-adv-x="1536" d="M0 696v144q0 39 27.5 66.5t66.5 27.5h1348q39 0 66.5 -27.5t27.5 -66.5v-144q0 -39 -27.5 -66.5t-66.5 -27.5h-1346q-41 0 -68.5 26.5t-27.5 67.5z" />
-<glyph unicode="&#xf069;" horiz-adv-x="1427" d="M2 1018q-10 37 10 72l72 124q20 35 58 44.5t73 -9.5l332 -192v383q0 41 27.5 68.5t68.5 27.5h141q41 0 69 -27.5t28 -66.5v-385l331 192q35 18 73 9t58 -44l70 -124q20 -35 11 -72t-44 -57l-333 -193l333 -193q35 -20 44.5 -57t-9.5 -72l-72 -124q-20 -35 -58 -44.5 t-73 9.5l-331 192v-383q0 -41 -28 -68.5t-69 -27.5h-141q-41 0 -68.5 27.5t-27.5 66.5v385l-332 -192q-35 -20 -73 -10t-58 45l-72 124q-18 35 -9 72t44 57l334 193l-334 193q-35 20 -45 57z" />
-<glyph unicode="&#xf06a;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM672 1274l14 -739q4 -29 31 -29h162q12 0 21 8t9 21l17 739q0 10 -8 22q-8 8 -23 9 h-193q-14 0 -22 -9q-8 -12 -8 -22zM680 236q0 -12 8 -21.5t21 -9.5h180q12 0 21.5 9t9.5 22v174q0 12 -9.5 21t-21.5 9h-180q-12 0 -20.5 -9t-8.5 -21v-174z" />
-<glyph unicode="&#xf06b;" horiz-adv-x="1880" d="M0 588v391q0 16 11.5 27.5t27.5 11.5h539q-57 0 -107.5 21.5t-87.5 58.5t-58.5 87t-21.5 107t21.5 107.5t58.5 87.5t87 58.5t108 21.5q61 0 115.5 -24.5t88.5 -69.5l158 -203l158 203q35 45 89 69.5t116 24.5q57 0 107 -21.5t87 -58.5t58.5 -87t21.5 -108 q0 -57 -21.5 -107t-58.5 -87t-87 -58.5t-107 -21.5h538q16 0 27.5 -11.5t11.5 -27.5v-391q0 -16 -11 -27.5t-28 -11.5h-117v-432q0 -47 -34.5 -82t-83.5 -35h-1332q-49 0 -82.5 35t-33.5 82v432h-119q-16 0 -27.5 11t-11.5 28zM461 1292q0 -49 33.5 -82.5t83.5 -33.5h237 l-151 196q-10 10 -32 24.5t-54 14.5q-49 0 -83 -35t-34 -84zM743 221q0 -33 24 -56.5t56 -23.5h234q33 0 56.5 23.5t23.5 56.5v797h-394v-797zM1065 1176h238q49 0 82.5 33.5t33.5 82.5t-33.5 84t-82.5 35q-33 0 -54.5 -14.5t-31.5 -24.5z" />
-<glyph unicode="&#xf06c;" horiz-adv-x="1916" d="M6 135q20 53 56 89t71 65q29 23 50.5 43t27.5 43q2 6 0 12t-10 25q-6 12 -11.5 27.5t-9.5 35.5q-25 160 13 295t120 242.5t195 185.5t235 121q82 29 179.5 34t203.5 7q61 0 127 2t127.5 11t113.5 28.5t85 54.5q20 20 38.5 41t38 36t43 24.5t58.5 9.5q23 0 42.5 -11.5 t29.5 -31.5q66 -133 83 -281.5t-18 -322.5q-90 -487 -579 -740q-231 -123 -467 -123q-154 0 -307 54q-23 8 -45.5 20t-44.5 25q-29 16 -57.5 30.5t-49.5 14.5q-10 -2 -23.5 -17.5t-27.5 -35t-26.5 -41t-20.5 -33.5q-14 -23 -26.5 -41.5t-22.5 -32.5q-25 -31 -64 -31h-4 q-29 2 -49.5 12.5t-33.5 24.5t-20.5 28.5t-9.5 22.5q-27 37 -10 78zM401 507.5q2 -32.5 29 -54.5q20 -18 51 -19q37 0 62 27q90 102 181 175t191.5 117t212 62t242.5 14q33 -4 57.5 19.5t26.5 56.5q2 35 -20.5 58.5t-57.5 25.5q-147 6 -277 -16.5t-247 -73.5t-223.5 -133 t-206.5 -199q-23 -27 -21 -59.5z" />
-<glyph unicode="&#xf06d;" horiz-adv-x="1445" d="M0 442q0 123 63.5 254t174.5 238q10 16 33.5 13t31.5 -21q4 -16 0 -29q-8 -35 -14 -84t-4 -101.5t13 -101.5t38 -84q31 -37 78 -49q-49 154 -40 293t49 259t101.5 217t118.5 167t98 106.5t43 39.5q25 18 47 0q10 -8 13.5 -21.5t-0.5 -24.5q0 -2 -15.5 -42.5t-21.5 -102 t7 -134.5t71 -136q37 -43 67.5 -85t52 -93.5t34 -120t12.5 -162.5q0 -33 -31 -39q-12 -2 -24.5 4t-16.5 19q-14 31 -42 49t-63 18q-47 0 -79.5 -33.5t-32.5 -81.5q0 -121 156 -120q98 0 153 63q37 43 53.5 101.5t20.5 114.5t2 102.5t-4 62.5q-6 12 0 29q8 18 31.5 21 t35.5 -13q111 -106 173.5 -237.5t62.5 -254.5q0 -109 -51.5 -195.5t-145.5 -148t-228 -95.5t-298 -34t-298 34t-228.5 95.5t-145.5 148.5t-51 195z" />
-<glyph unicode="&#xf06e;" d="M0 722q0 44 25 83q78 125 178 224t216 168t242.5 104.5t260.5 35.5q135 0 262 -35.5t241.5 -103.5t215 -167t180.5 -226q23 -39 22.5 -83t-22.5 -81q-80 -127 -180.5 -226.5t-215 -167t-241.5 -103t-262 -35.5q-133 0 -260 35.5t-243 104t-216.5 168t-177.5 224.5 q-25 37 -25 81zM154 723q66 -104 149.5 -190.5t182 -145.5t208 -92t228.5 -33t228.5 33t207.5 92t182 145t150 191q-80 129 -187.5 226.5t-236.5 156.5q51 -59 79.5 -133t28.5 -160q0 -96 -35.5 -179t-99 -146.5t-147.5 -99.5t-179 -36q-96 0 -179 36t-146.5 99.5t-99 146.5 t-35.5 179q0 78 25.5 148.5t68.5 128.5q-117 -59 -217.5 -152.5t-175.5 -214.5zM614 813q0 -23 16.5 -40t41.5 -17t41 17.5t16 39.5q0 78 53.5 131t130.5 53q25 0 41.5 17.5t16.5 40.5q0 25 -16.5 41t-41.5 16q-61 0 -116.5 -23.5t-95 -63.5t-63.5 -95.5t-24 -116.5z" />
-<glyph unicode="&#xf070;" d="M0 767q0 44 25 83q78 125 178 224.5t216 168t242.5 104t260.5 35.5q53 0 105 -7t103 -17l89 158q8 14 24 18q12 6 29 -2l133 -76q14 -8 19.5 -23.5t-3.5 -29.5l-774 -1383q-6 -14 -22 -18q-4 -2 -11 -2q-6 0 -18 4l-135 76q-14 8 -18.5 23.5t3.5 29.5l66 115 q-145 68 -269 178.5t-218 259.5q-25 37 -25 81zM154 768q82 -131 191.5 -229.5t240.5 -157.5l57 104q-86 63 -138 159.5t-52 213.5q0 78 25.5 148.5t68.5 130.5q-117 -61 -217.5 -153.5t-175.5 -215.5zM614 858q0 -23 16.5 -40t41.5 -17t41 17.5t16 39.5q0 78 53.5 131 t130.5 53q25 0 41.5 17.5t16.5 40.5q0 25 -16.5 41t-41.5 16q-61 0 -116.5 -23.5t-95 -63.5t-63.5 -95.5t-24 -116.5zM928 154l88 159q209 25 382 145t292 310q-109 170 -263 283l76 137q92 -66 172 -150t146 -188q23 -39 22.5 -83t-22.5 -81q-160 -254 -392.5 -392 t-500.5 -140zM1081 430l285 510q4 -20 6 -39.5t2 -42.5q0 -74 -21.5 -140.5t-60.5 -121.5t-93 -98t-118 -68z" />
-<glyph unicode="&#xf071;" horiz-adv-x="1775" d="M0 92q0 20 6 39t17 35l768 1331q16 27 39.5 48.5t56.5 21.5t56.5 -21.5t39.5 -48.5l770 -1331q10 -16 16.5 -34.5t6.5 -39.5q0 -49 -38 -70.5t-81 -21.5h-1538q-43 0 -81 21.5t-38 70.5zM770 1114l12 -633q0 -14 10.5 -24.5t24.5 -10.5h139q14 0 24.5 10.5t10.5 24.5 l15 633q0 14 -10.5 24.5t-24.5 10.5h-166q-16 0 -25.5 -10.5t-9.5 -24.5zM776 168q0 -16 10.5 -25.5t24.5 -9.5h154q35 0 34 35v147q0 16 -9 26.5t-25 10.5h-154q-14 0 -24.5 -10t-10.5 -27v-147z" />
-<glyph unicode="&#xf072;" horiz-adv-x="1597" d="M0 541v4q0 31 23 57l30 29q20 25 56 24q8 0 10 -2l280 -32q39 63 93.5 134.5t125.5 151.5l-583 457q-31 25 -31 59v4q0 31 23 58l57 57q27 23 57 23h13.5t13.5 -4l803 -293l151 149q70 70 161 110t167 40q72 0 109 -37q20 -18 28 -46t8 -63q0 -76 -38.5 -167t-110.5 -160 l-150 -152l293 -803q6 -12 6 -26q0 -33 -24 -58l-58 -57q-23 -25 -57 -25h-4q-35 4 -59 31l-455 586q-82 -72 -153.5 -126t-135.5 -93l35 -281v-10q0 -33 -22 -55l-31 -31q-23 -23 -58 -23h-4q-39 4 -59 31l-205 272l-274 207q-29 25 -31 60z" />
-<glyph unicode="&#xf073;" horiz-adv-x="1566" d="M0 117v1147q0 47 35 81.5t82 34.5h39v13q0 31 10 62.5t32.5 56t59.5 40t90 15.5t90 -15.5t59.5 -40t33 -56.5t10.5 -62v-13h49v13q0 31 10 62.5t32.5 56t59.5 40t90 15.5t90 -15.5t60.5 -40t34 -56.5t10.5 -62v-13h49v13q0 31 10.5 62.5t33 56t59 40t90.5 15.5 q53 0 90 -15.5t59.5 -40t32.5 -56.5t10 -62v-13h39q47 0 82 -34.5t35 -81.5v-1147q0 -47 -35 -82t-82 -35h-1333q-47 0 -82 35t-35 82zM158 158h282v250h-282v-250zM158 446h282v252h-282v-252zM158 737h282v252h-282v-252zM272 1165q0 -57 76 -57t76 57v228q0 57 -76 57 t-76 -57v-228zM479 158h285v250h-285v-250zM479 446h285v252h-285v-252zM479 737h285v252h-285v-252zM709 1165q0 -31 16 -44t57 -13t58.5 13.5t17.5 43.5v228q0 29 -17.5 43t-58.5 14t-57 -14.5t-16 -42.5v-228zM803 158h284v250h-284v-250zM803 446h284v252h-284v-252z M803 737h284v252h-284v-252zM1126 158h283v250h-283v-250zM1126 446h283v252h-283v-252zM1126 737h283v252h-283v-252zM1143 1165q0 -57 76 -57t75 57v228q0 57 -75 57q-76 0 -76 -57v-228z" />
-<glyph unicode="&#xf074;" d="M0 252v154q0 16 11.5 27t27.5 11h219q51 0 99.5 31t93.5 83t90 119.5t90 139.5q55 88 114.5 175t127 156t147.5 110.5t180 41.5h203v185q0 41 24.5 49t59.5 -21l334 -278q23 -18 22 -45q0 -29 -22 -47l-334 -277q-35 -29 -59.5 -20.5t-24.5 49.5v174h-203 q-53 0 -100 -31.5t-93 -84t-91.5 -120t-90.5 -139.5q-55 -88 -113.5 -174t-126 -154.5t-148.5 -110.5t-179 -42h-219q-16 0 -27.5 11.5t-11.5 27.5zM0 1108v154q0 39 39 38h219q68 0 127 -20t111.5 -56t97.5 -84t88 -104q-61 -90 -117 -178q-4 -8 -9 -14t-9 -15 q-70 102 -139.5 172t-149.5 70h-219q-16 0 -27.5 10.5t-11.5 26.5zM778 481q29 41 56.5 85t56.5 87q4 10 10 17.5t10 17.5q70 -102 139.5 -170.5t149.5 -68.5h203v190q0 41 24.5 49t59.5 -20l334 -277q23 -18 22 -47q0 -27 -22 -45l-334 -279q-35 -29 -59.5 -20.5 t-24.5 49.5v168h-203q-68 0 -127 20.5t-110.5 56.5t-97.5 84t-87 103z" />
-<glyph unicode="&#xf075;" horiz-adv-x="1916" d="M0 866q0 145 76 272.5t206 222.5t303.5 150.5t372.5 55.5t373 -55.5t304 -150.5t206 -222t76 -273q0 -145 -76 -272t-206 -222.5t-304 -150.5t-373 -55q-92 0 -180 12q-176 -139 -411 -192q-25 -4 -50.5 -8.5t-54.5 -8.5q-16 -2 -27.5 6.5t-15.5 24.5t5 26.5t20 20.5 q23 23 43 44.5t36.5 51t30 69.5t23.5 97q-174 98 -275.5 241.5t-101.5 315.5z" />
-<glyph unicode="&#xf076;" horiz-adv-x="1566" d="M0 621v288q0 16 11.5 27.5t27.5 11.5h391q16 0 28.5 -11t12.5 -28v-288q0 -29 22.5 -60t63.5 -58.5t98.5 -45t128.5 -17.5q70 0 128.5 17.5t99.5 45t63.5 58.5t22.5 60v288q0 39 39 39h391q16 0 27.5 -11t11.5 -28v-288q0 -135 -61.5 -254t-168 -207t-249 -139.5 t-304.5 -51.5q-164 0 -306 51.5t-248.5 139.5t-168 206.5t-61.5 254.5zM0 1106v391q0 16 11.5 27.5t27.5 11.5h391q16 0 28.5 -11.5t12.5 -27.5v-391q0 -16 -12.5 -27.5t-28.5 -11.5h-391q-16 0 -27.5 11.5t-11.5 27.5zM1098 1106v391q0 16 11 27.5t28 11.5h391 q16 0 27.5 -11.5t11.5 -27.5v-391q0 -16 -11.5 -27.5t-27.5 -11.5h-391q-39 0 -39 39z" />
-<glyph unicode="&#xf077;" horiz-adv-x="1536" d="M-1 355.5q-1 37.5 28 66.5l673 674q29 29 68 28.5t68 -28.5l671 -674q29 -29 29 -67t-29 -66l-100 -103q-29 -29 -68 -28.5t-67 28.5l-508 510l-500 -510q-29 -29 -67.5 -28.5t-67.5 28.5l-100 103q-29 29 -30 66.5z" />
-<glyph unicode="&#xf078;" horiz-adv-x="1536" d="M0 918.5q0 37.5 29 66.5l100 102q29 29 68 29t67 -29l508 -509l500 509q29 29 67.5 29t67.5 -29l100 -102q29 -29 30 -66.5t-28 -66.5l-673 -674q-29 -29 -68 -28.5t-68 28.5l-671 674q-29 29 -29 66.5z" />
-<glyph unicode="&#xf079;" horiz-adv-x="1916" d="M2.5 1038.5q-12.5 30.5 28.5 75.5l385 424q25 29 63 29q39 0 64 -29l385 -424q41 -45 28.5 -75.5t-67.5 -30.5h-250v-680h174q6 -10 12 -19.5t17 -19.5l252 -281h-695q-33 0 -56.5 23.5t-23.5 56.5v920h-249q-55 0 -67.5 30.5zM823 1526h695q33 0 56 -22.5t23 -57.5v-918 h250q55 0 67.5 -30.5t-28.5 -75.5l-385 -424q-25 -29 -63 -29q-39 0 -64 29l-385 424q-41 45 -28.5 75.5t67.5 30.5h250v678h-174q-6 10 -12.5 20.5t-14.5 20.5z" />
-<glyph unicode="&#xf07a;" d="M0 1421v76q0 39 39 39h231q16 0 38 -4t36 -8q6 -4 14.5 -14.5t15.5 -23.5t12 -26.5t7 -21.5l27 -125h1347q35 0 58 -27t16 -59l-108 -578q-6 -25 -26.5 -42t-49.5 -17h-1084l35 -168q4 -16 17.5 -26.5t29.5 -10.5h856q16 0 27.5 -11.5t11.5 -27.5v-78q0 -16 -11 -26.5 t-28 -10.5h-163h-652h-104q-16 0 -36.5 3.5t-35.5 9.5q-6 2 -14 13t-15.5 24.5t-12.5 27t-7 21.5l-215 1016q-4 16 -17.5 26t-29.5 10h-170q-39 0 -39 39zM582 115q0 47 33.5 81.5t80.5 34.5q49 0 83 -34.5t34 -81.5t-34 -81t-83 -34q-47 0 -80.5 34t-33.5 81zM1233 115 q0 47 33.5 81.5t81.5 34.5q47 0 80.5 -34.5t33.5 -81.5t-33.5 -81t-80.5 -34t-81 34t-34 81z" />
-<glyph unicode="&#xf07b;" d="M0 115v1306q0 47 34 81t81 34h692q47 0 81 -34t34 -81t33.5 -80.5t80.5 -33.5h693q47 0 80.5 -35t33.5 -82v-1075q0 -47 -33.5 -81t-80.5 -34h-1614q-47 0 -81 34t-34 81z" />
-<glyph unicode="&#xf07c;" d="M0 379v1042q0 47 34 81t81 34h692q47 0 81 -34t34 -81t33.5 -80.5t80.5 -33.5h443q47 0 80.5 -35t33.5 -82v-221h-1228q-37 0 -72 -12.5t-65.5 -34t-53 -51t-35.5 -64.5zM43 0l246 760q6 23 30.5 39t47.5 16h1476l-260 -758q-6 -23 -30.5 -40t-47.5 -17h-1462z" />
-<glyph unicode="&#xf07d;" horiz-adv-x="798" d="M2 368.5q10 22.5 57 22.5h203v754h-203q-47 0 -57 22.5t25 57.5l319 319q23 23 53 23q31 0 54 -23l321 -319q33 -35 22.5 -57.5t-57.5 -22.5h-202v-754h202q47 0 57.5 -22.5t-24.5 -57.5l-319 -319q-23 -23 -54 -23t-53 23l-321 319q-33 35 -23 57.5z" />
-<glyph unicode="&#xf07e;" horiz-adv-x="1597" d="M0 729q0 31 23 53l319 322q35 33 57.5 23.5t22.5 -56.5v-205h754v203q0 47 22.5 57.5t56.5 -22.5l320 -322q23 -23 22 -53q0 -31 -22 -53l-320 -322q-35 -33 -57 -22.5t-22 57.5v203h-754v-203q0 -47 -22.5 -57t-57.5 24l-319 320q-23 23 -23 53z" />
-<glyph unicode="&#xf080;" d="M0 115v1306q0 47 35 81t82 34h1612q47 0 80.5 -34t33.5 -81v-1306q0 -47 -33.5 -81t-80.5 -34h-1612q-47 0 -82 34t-35 81zM154 154h1536v1228h-1536v-1228zM307 264v363h203v-363h-203zM649 264v776h201v-776h-201zM993 264v592h203v-592h-203zM1333 264v922h203v-922 h-203z" />
-<glyph unicode="&#xf081;" horiz-adv-x="1536" d="M0 193v1150q0 39 15.5 75t41 61.5t60.5 41t73 15.5h1153q80 0 136.5 -56.5t56.5 -136.5v-1150q0 -41 -15.5 -76t-41 -60.5t-61.5 -41t-75 -15.5h-1153q-39 0 -73.5 15.5t-60 41t-41 60.5t-15.5 76zM201 559q80 -111 198.5 -169t259.5 -58q96 0 184.5 26.5t160 73.5 t124 112.5t76.5 143.5q82 6 129 57q14 14 4 33q-8 18 -30 15h-4q23 23 30 45q8 20 -8 32q-14 14 -33 2q-8 -4 -29.5 -10t-45.5 -6q-4 0 -7.5 1t-7.5 1q0 2 -1 4t-1 4q-16 61 -57 110.5t-94 72.5q4 4 6 8t6 8q6 16 0 33q-2 6 -12.5 16t-34.5 8q-2 4 -6 8q-12 12 -25 9 q-25 -4 -49 -13l-2 2q-14 8 -31 -2q-59 -37 -98 -100t-68 -135q-35 31 -57 41q-61 35 -129 63.5t-154 61.5q-14 4 -24 -4q-10 -6 -15 -21q-2 -27 8.5 -58.5t38.5 -62.5q-25 -6 -20 -32q12 -68 68 -101l-13 -12q-14 -14 -4 -33q4 -12 26.5 -37.5t65.5 -38.5q-6 -12 -6 -22 t2 -14q6 -33 39 -50q-37 -25 -79 -34t-85 -5.5t-83 20.5t-70 46q-8 8 -19.5 8t-19.5 -8q-23 -18 -4 -39z" />
-<glyph unicode="&#xf082;" horiz-adv-x="1536" d="M2 193v1150q0 39 15.5 75t41 61.5t60.5 41t74 15.5h1153q80 0 136 -56.5t56 -136.5v-1150q0 -41 -15.5 -76t-41 -60.5t-61.5 -41t-74 -15.5h-486v643h172q12 0 21.5 8t9.5 21l12 168q0 14 -8 24q-10 10 -23 10h-184v74q0 41 10.5 54.5t53.5 13.5q25 0 55.5 -4t58.5 -11 q6 0 13.5 1.5t11.5 5.5q10 6 14 22l23 162q4 29 -25 35q-90 25 -188 24q-301 0 -301 -293v-84h-103q-33 0 -32 -32v-168q0 -12 9 -21.5t23 -9.5h103v-643h-393q-39 0 -74 15.5t-60.5 41t-41 60.5t-15.5 76z" />
-<glyph unicode="&#xf083;" d="M0 115v1306q0 47 34 81t81 34h1614q47 0 80.5 -34t33.5 -81v-1306q0 -47 -33.5 -81t-80.5 -34h-1614q-47 0 -81 34t-34 81zM154 160h1536v114h-1536v-114zM154 1145h1536v231h-957l-14 -92h-565v-139zM266 1341h316v93h-316v-93zM557 709q0 -76 28.5 -142.5t78 -116 t116 -78t142.5 -28.5t142 28.5t115.5 78t78 116t28.5 142.5t-28.5 142.5t-78 115.5t-116 77.5t-141.5 28.5q-76 0 -142.5 -28.5t-116 -77.5t-78 -115.5t-28.5 -142.5zM672 709q0 51 19.5 97t53 80t79.5 53t98 19q51 0 97 -19t80 -53t53 -80t19 -97t-19 -97.5t-53 -80 t-80 -53t-97 -19.5t-97.5 19.5t-80 53t-53 79.5t-19.5 98zM743 709q0 -16 12.5 -28.5t28.5 -12.5q18 0 30.5 12.5t12.5 28.5q0 41 28 67.5t67 26.5v2q18 0 30.5 12t12.5 29q0 18 -12.5 30.5t-30.5 12.5q-74 0 -126.5 -53.5t-52.5 -126.5z" />
-<glyph unicode="&#xf084;" horiz-adv-x="1916" d="M0 803.5q-6 111.5 30 230.5t113 228q78 109 179.5 180t209 102t213 17.5t191.5 -74.5q76 -53 121 -134t59 -176.5t-4 -199t-68 -203.5l414 -297l101 139l-105 74q-14 10 -17 27.5t7 32.5l51 69q10 14 26.5 17.5t32.5 -7.5l344 -247q16 -10 18.5 -27.5t-7.5 -32.5l-49 -69 q-10 -14 -27.5 -17.5t-32.5 6.5l-102 76l-101 -139l248 -178q43 -31 52.5 -83t-21.5 -95q-33 -43 -84 -51.5t-94 22.5l-803 573q-78 -80 -170 -130t-186.5 -68.5t-185.5 -2t-167 71.5q-86 61 -133 157.5t-53 208zM230.5 766q7.5 -41 30 -78t58.5 -63q37 -27 79 -36t83 -2 t78 29.5t64 59.5q39 53 41 114.5t-27 114.5q59 -10 116.5 11.5t96.5 74.5q27 37 36 79t2 84t-29.5 79t-59.5 63q-37 27 -79 36t-84 2t-79 -29.5t-63 -59.5q-37 -53 -39 -114.5t26 -114.5q-59 10 -116.5 -12.5t-96.5 -75.5q-27 -37 -36 -79t-1.5 -83z" />
-<glyph unicode="&#xf085;" horiz-adv-x="1916" d="M0 801v180q0 8 22.5 15.5t52 11.5t56.5 7t37 3q12 49 37 88q-23 31 -45.5 61.5t-46.5 59.5l-4 10q0 6 20.5 29.5t47 50t50 47t29.5 20.5q2 0 20.5 -13t41 -30.5t43 -34t24.5 -20.5q43 25 90 37q0 8 3 37t7 57.5t10.5 52t16.5 23.5h178q8 0 15.5 -23.5t11.5 -52t7 -57.5 t5 -37q45 -12 86 -35q31 23 62.5 45.5t60.5 46.5l8 4q4 0 27.5 -21.5t49.5 -47t47.5 -49t21.5 -29.5q0 -4 -13.5 -22.5t-30 -40t-32 -41t-19.5 -25.5q23 -39 39 -92q10 -2 37 -5t55.5 -8.5t51 -11.5t22.5 -14v-178q0 -10 -22.5 -16.5t-51 -11.5t-56.5 -7t-36 -4 q-14 -43 -37 -88q23 -31 44.5 -60.5t48.5 -58.5l2 -10q0 -6 -20.5 -30t-47.5 -50.5t-50.5 -47t-29.5 -20.5q-2 0 -20.5 13.5t-41 31t-42 32.5t-25.5 20q-45 -23 -88 -37q0 -10 -3 -37t-8 -56.5t-12.5 -52t-15.5 -22.5h-178q-8 0 -15.5 22.5t-11.5 52t-7 56t-3 37.5 q-45 12 -88 37q-31 -23 -61.5 -46.5t-59.5 -48.5l-10 -2q-4 0 -27.5 20.5t-49 47t-46 50.5t-20.5 30q0 2 13 20.5t29.5 40t31 41t18.5 25.5q-23 43 -39 94q-10 2 -37 5t-55.5 7t-51 10.5t-22.5 14.5zM420 889q0 -37 14.5 -70t39 -57.5t58 -38.5t70.5 -14t70 14t57.5 38.5 t38.5 57.5t14 70t-14 70.5t-38.5 58t-57.5 39t-70 14.5q-76 0 -129 -53t-53 -129zM1114 373q0 6 13.5 13t31 14.5t33.5 12.5t22 7q4 23 9.5 39t15.5 37q-4 4 -13 17t-18.5 27.5t-16.5 28t-7 17.5t18.5 23.5t43 41t47 40t28.5 24.5l8 4q4 0 16.5 -8.5t25.5 -19.5t25.5 -21.5 t16.5 -14.5q35 12 76 19q2 6 9 21.5t16.5 30.5t17.5 27.5t14 12.5q4 0 34 -7t63.5 -18.5t60.5 -23.5t27 -23q0 -23 -5.5 -47t-9.5 -47q16 -12 28.5 -26.5t22.5 -30.5q25 2 49.5 3t47.5 1q8 0 18 -26.5t17.5 -60.5t12.5 -64.5t5 -39.5q0 -6 -13.5 -13t-29.5 -13t-32.5 -12.5 t-22.5 -8.5q-9 -38 -23 -71q2 -6 11.5 -18.5t18.5 -27t16 -27.5t7 -18q0 -4 -18.5 -23.5t-43 -41t-47 -40.5t-28.5 -26l-8 -4q-4 0 -16.5 8.5t-25.5 19.5t-25.5 21.5t-16.5 14.5q-37 -12 -78 -19q-2 -6 -9 -21.5t-15.5 -30.5t-16.5 -27.5t-14 -12.5t-35 7.5t-62.5 18.5 t-60 23.5t-26.5 22.5q0 23 5 47.5t9 46.5q-16 12 -28.5 26.5t-22.5 30.5q-23 -2 -45.5 -3t-44.5 -1h-14.5t-8.5 11q-2 8 -8 35.5t-13.5 58t-12.5 55.5t-5 29zM1192 1264q0 6 12.5 12t29.5 10t33.5 6t22.5 4q10 33 31 64q-2 4 -8 17.5t-12.5 27.5t-11.5 25.5t-5 15.5 q0 6 21.5 22.5t49 35t51.5 32.5t28 14t13 -9t20.5 -21.5t20.5 -23.5t13 -15q14 4 28.5 6t29.5 0h14q2 4 11 16.5t18.5 25.5t17.5 23.5t12 10.5t30 -11.5t55.5 -26t53 -28.5t23.5 -20q0 -4 -4 -15.5t-8 -26t-8.5 -27.5t-6.5 -17q20 -25 39 -60q51 -4 74 -7t29 -18.5t5 -52 t3 -106.5q0 -6 -12.5 -12.5t-28.5 -10.5t-32.5 -6t-22.5 -4q-13 -36 -31 -63q2 -4 8 -16.5t13.5 -27t12.5 -26.5t5 -14q0 -6 -22.5 -23.5t-50 -36t-51.5 -33t-26 -14.5q-4 0 -14 9.5t-21.5 21.5t-20.5 23.5t-13 15.5q-14 -4 -28.5 -6t-29.5 0h-14q-4 -4 -12 -16.5t-17.5 -26 t-18.5 -23.5t-13 -10t-30 11t-54.5 25.5t-52 29t-23.5 20.5q0 2 3 14.5t8 26.5t9 27.5t6 17.5q-23 23 -38 59q-53 2 -75 5t-29 18.5t-5 52.5t-4 109zM1397 367q0 -49 34.5 -85t84.5 -36q49 0 84.5 34.5t35.5 86.5q0 49 -34.5 83.5t-85.5 34.5q-49 0 -84 -34.5t-35 -83.5z M1446 1206q0 -47 31.5 -78.5t76.5 -31.5q47 0 79 31.5t32 76.5q0 47 -31.5 79t-77.5 32q-47 0 -78.5 -32t-31.5 -77z" />
-<glyph unicode="&#xf086;" horiz-adv-x="1916" d="M0 997q0 119 61.5 222.5t167 180.5t246.5 122t303 45t303.5 -45t247 -122t166.5 -180.5t61 -222.5q0 -117 -61 -221t-166.5 -181t-247 -122t-303.5 -45q-37 0 -73.5 3t-71.5 7q-147 -113 -336 -155q-20 -4 -40.5 -7.5t-43.5 -7.5q-12 -2 -21.5 6.5t-13.5 18.5v2 q-4 12 3 19.5t18 17.5q18 18 34.5 36.5t29.5 42t24.5 55.5t19.5 79q-141 78 -224 195.5t-83 256.5zM649 258q6 4 13.5 8t13.5 8q51 -6 102 -6q197 0 369 56.5t298 155t198.5 231.5t72.5 286q0 41 -6 84q96 -78 151.5 -175t55.5 -208q0 -139 -83 -256.5t-224 -195.5 q8 -47 19 -79t25.5 -55.5t30 -42t33.5 -36.5q10 -10 17.5 -18.5t3.5 -18.5v-2q-2 -12 -12.5 -19.5t-22.5 -5.5q-23 4 -43.5 7.5t-40.5 7.5q-96 20 -180 60t-156 95q-35 -4 -71.5 -7t-73.5 -3q-141 0 -265 35t-225 94z" />
-<glyph unicode="&#xf087;" horiz-adv-x="1597" d="M0 193v643q0 49 35 83.5t84 34.5h348q27 14 43 30.5t35 39.5q16 20 32.5 38.5t34.5 37.5q33 35 76 68.5t66 74.5q16 29 23 64.5t12.5 71.5t12.5 70t23.5 60.5t47 42t81.5 15.5q61 0 111.5 -25.5t85.5 -68.5t54.5 -98.5t19.5 -112.5q0 -59 -17.5 -112.5t-42.5 -107.5 q35 2 70 4.5t70 2.5q55 0 107 -10.5t93 -35t66.5 -67.5t25.5 -109q0 -29 -5 -57t-15 -57q18 -43 18 -90q0 -78 -41 -142q10 -59 -7 -118.5t-58 -104.5q-4 -84 -45 -139.5t-102.5 -89t-135 -47t-143.5 -13.5q-72 0 -144.5 10.5t-142.5 28.5q-70 20 -139.5 43t-142.5 23h-375 q-49 0 -84 34.5t-35 84.5zM236 276q0 -35 22 -57t57 -22q33 0 56.5 22.5t23.5 56.5q0 33 -23.5 56.5t-56.5 23.5q-35 0 -57 -23.5t-22 -56.5zM492 193q70 0 136 -16.5t133.5 -35t141.5 -35t160 -16.5q45 0 98 6t99.5 26.5t78 56.5t31.5 95q0 10 -1 18.5t-3 18.5 q35 16 53.5 53t18.5 74q0 39 -21 68q60 49 60 123q0 23 -12.5 43t-26.5 35q16 29 28.5 57.5t12.5 62.5q0 35 -17.5 55.5t-43 31t-56.5 12.5t-57 2q-45 0 -90.5 -3t-90.5 -3q-31 0 -61.5 3t-58.5 15q0 41 16 78t35.5 74.5t35 78.5t15.5 91q0 33 -10.5 65.5t-29.5 59t-47 44 t-65 17.5h-11t-11 -2q-8 -4 -9 -8t-3 -13q-12 -59 -22.5 -123.5t-39.5 -117.5q-29 -51 -74 -88t-86 -78q-29 -31 -49 -56.5t-41.5 -48t-48.5 -42t-65 -35.5h-2v-643z" />
-<glyph unicode="&#xf088;" horiz-adv-x="1597" d="M0 309q0 29 5 57.5t15 57.5q-18 43 -18 90q0 78 41 141q-10 59 7.5 119t58.5 105q4 84 45 139t102 89t135 47t144 13q72 0 144.5 -10t141.5 -29q70 -20 139.5 -42.5t143.5 -22.5h375q49 0 83.5 -35t34.5 -84v-643q0 -49 -34.5 -84t-83.5 -35h-349q-27 -14 -43 -30.5 t-34 -38.5q-16 -20 -32.5 -39t-35.5 -37q-33 -35 -76 -69t-65 -74q-25 -43 -31 -99.5t-18.5 -106.5t-44 -84t-107.5 -34q-61 0 -111.5 25.5t-85 68.5t-54 98.5t-19.5 112.5q0 59 17.5 112.5t41.5 106.5q-35 -2 -69.5 -4t-69.5 -2q-55 0 -107.5 10.5t-93.5 35t-66.5 67.5 t-25.5 108zM119 309q0 -35 17.5 -55.5t43 -30.5t56 -12t57.5 -2q47 0 91 3t89 3q31 0 61.5 -3t59.5 -15q0 -41 -16.5 -78t-36 -75t-34.5 -79t-15 -90q0 -33 10 -65.5t29.5 -59t47 -43t64.5 -16.5q4 0 11.5 -1t11.5 1q8 4 9 8l3 12q12 59 22.5 124t38.5 118q29 51 74 88 t86 78q29 31 49.5 56.5t42 48t47 42t66.5 35.5h2v643q-72 0 -137.5 16.5t-133 36t-141 36t-159.5 16.5q-45 0 -98.5 -7.5t-99.5 -27t-78 -56t-32 -96.5q0 -10 1 -18t3 -19q-35 -16 -53 -53t-18 -74q0 -39 20 -67q-59 -49 -59 -123q0 -23 12 -43.5t27 -34.5 q-16 -29 -28.5 -57.5t-12.5 -63.5zM1202 860q0 -33 23.5 -56.5t56.5 -23.5q35 0 57.5 23.5t22.5 56.5q0 35 -22.5 57.5t-57.5 22.5q-33 0 -56.5 -22.5t-23.5 -57.5z" />
-<glyph unicode="&#xf089;" horiz-adv-x="837" d="M1 959.5q9 27.5 54 33.5l506 74l227 459q23 41 50 41v-1348l-453 -237q-41 -23 -64.5 -6.5t-15.5 63.5l86 504l-364 356q-35 33 -26 60.5z" />
-<glyph unicode="&#xf08a;" horiz-adv-x="1802" d="M0 1073q0 137 43 231.5t112.5 153t156.5 84t177 25.5q63 0 125 -21.5t115 -53.5t97 -70t75 -68q31 31 76 68.5t98 69.5t113.5 53.5t126.5 21.5q88 0 175 -25.5t156.5 -84t112.5 -153t43 -231.5q0 -94 -34.5 -177t-76.5 -146.5t-79 -102.5t-39 -43l-615 -612 q-26 -23 -57 -23q-33 0 -55 23l-617 614q-4 2 -39.5 41t-77.5 102.5t-77 146.5t-35 177zM160 1073q0 -68 27.5 -131t61.5 -112.5t63 -79.5l28 -29l561 -559l561 559l29 29q29 30 62.5 79.5t61 113t27.5 130.5q0 104 -29.5 169t-77.5 101.5t-106.5 50t-113.5 13.5 q-53 0 -107.5 -25.5t-102.5 -61.5t-86 -74t-56 -60q-25 -31 -62 -31t-61 31q-18 23 -56.5 60.5t-86.5 73.5t-102.5 61.5t-105.5 25.5q-57 0 -115.5 -13.5t-106.5 -50t-77.5 -101t-29.5 -169.5z" />
-<glyph unicode="&#xf08b;" horiz-adv-x="1689" d="M0 307v922q0 63 24.5 118.5t66.5 97.5t97.5 66.5t118.5 24.5h461v-193h-461q-47 0 -80.5 -33.5t-33.5 -80.5v-922q0 -47 33.5 -80.5t80.5 -33.5h461v-193h-461q-63 0 -118.5 24.5t-97.5 66.5t-66.5 97.5t-24.5 118.5zM508 584v368q0 33 22.5 54.5t55.5 21.5h428v293 q0 41 39 57q39 14 65 -14l553 -553q18 -18 18.5 -44t-18.5 -42l-553 -553q-18 -18 -43 -18q-10 0 -22 4q-39 16 -39 57v291h-428q-33 0 -55.5 22.5t-22.5 55.5z" />
-<glyph unicode="&#xf08c;" horiz-adv-x="1536" d="M0 193v1150q0 39 15.5 75t41 61.5t60.5 41t76 15.5h1150q80 0 136.5 -56.5t56.5 -136.5v-1150q0 -41 -15.5 -76t-41 -60.5t-61.5 -41t-75 -15.5h-1150q-80 0 -136.5 56.5t-56.5 136.5zM207 1192q0 -57 40 -97t95 -40q57 0 97 40t40 97q0 55 -40 95t-97 40q-55 0 -95 -40 t-40 -95zM213 240q0 -10 9 -19.5t20 -9.5h200q12 0 20.5 9t8.5 20v706q0 29 -29 29h-200q-10 0 -19.5 -8.5t-9.5 -20.5v-706zM580 240q0 -10 9 -19.5t19 -9.5h201q12 0 20.5 9t8.5 20v383q0 68 26.5 113.5t102.5 45.5q59 0 79.5 -28.5t20.5 -81.5v-432q0 -10 8 -19.5 t21 -9.5h204q10 0 19.5 9t9.5 20v477q0 141 -81 208.5t-216 67.5q-55 0 -107.5 -15t-93.5 -56q0 16 -3 34.5t-25 18.5h-195q-10 0 -19 -8.5t-9 -20.5v-706z" />
-<glyph unicode="&#xf08d;" horiz-adv-x="1916" d="M2 950q8 82 45 172t100 176t139 147.5t152 92.5t145.5 30t121.5 -40q53 -41 73.5 -107.5t12.5 -148.5l301 -225q111 63 215 73t180 -47q55 -41 80.5 -108.5t23.5 -150.5t-30.5 -177t-83.5 -188l428 -410q25 -25 4 -53q-12 -16 -33 -17q-10 0 -18 6l-517 293 q-74 -80 -155.5 -135t-161.5 -80.5t-151.5 -19.5t-127.5 47q-76 55 -95 158.5t9 228.5l-303 223q-76 -33 -145.5 -32t-122.5 40q-51 39 -72.5 104.5t-13.5 147.5zM171 858.5q3 -16.5 15 -27.5q21 -14 50 -14q31 0 65.5 17.5t71.5 46t72.5 66.5t66.5 79q10 14 7 30.5 t-15 26.5q-14 10 -30.5 8.5t-27.5 -16.5q-78 -104 -138 -143t-72 -35q-14 10 -31 7t-27 -15q-10 -14 -7 -30.5zM465 754l364 -271q8 -8 23 -8q20 0 33 17q10 14 8 29.5t-16 25.5l-347 258q-16 -14 -32.5 -27.5t-32.5 -23.5zM829.5 239.5q1.5 -16.5 16.5 -26.5q29 -23 69 -23 q41 0 88.5 21.5t95.5 57.5t94 84.5t87 101.5q10 12 8 28.5t-16 26.5q-12 10 -28.5 8t-26.5 -16q-51 -68 -102.5 -116t-95.5 -75.5t-78 -36t-48 4.5q-14 10 -29.5 7t-25.5 -18q-10 -12 -8.5 -28.5z" />
-<glyph unicode="&#xf08e;" d="M0 307v922q0 63 24.5 119.5t65.5 97.5t97.5 65.5t119.5 24.5h582q-2 -14 -4 -27.5t-2 -29.5v-88q0 -23 6 -48h-582q-47 0 -80.5 -33.5t-33.5 -80.5v-922q0 -47 33.5 -80.5t80.5 -33.5h1075q47 0 81 33.5t34 80.5v340q41 -31 90 -49t103 -20v-271q0 -63 -25 -118.5 t-67 -97.5t-97 -66.5t-119 -24.5h-1075q-63 0 -119.5 24.5t-97.5 66.5t-65.5 97.5t-24.5 118.5zM692.5 522q-0.5 25 16.5 41l770 772h-269q-25 0 -41 16.5t-16 41.5v86q-2 23 15.5 40t41.5 17h576q23 0 40 -17.5t17 -39.5v-86v-490q0 -25 -17.5 -42t-39.5 -15h-86 q-25 0 -41.5 16.5t-16.5 40.5v268l-772 -770q-16 -16 -40.5 -16t-41.5 16l-79 80q-16 16 -16.5 41z" />
-<glyph unicode="&#xf090;" horiz-adv-x="1689" d="M0 584v368q0 33 22.5 54.5t55.5 21.5h428v293q0 41 37 57q39 14 67 -14l553 -553q16 -18 16.5 -44t-16.5 -42l-553 -553q-18 -18 -43 -18q-8 0 -24 4q-37 16 -37 57v291h-428q-33 0 -55.5 22.5t-22.5 55.5zM922 0v193h460q47 0 81 33.5t34 80.5v922q0 47 -33.5 80.5 t-81.5 33.5h-460v193h460q63 0 118.5 -24.5t97.5 -65.5t67 -97.5t25 -119.5v-922q0 -63 -25 -118.5t-67 -97.5t-97 -66.5t-119 -24.5h-460z" />
-<glyph unicode="&#xf091;" horiz-adv-x="1689" d="M0 1042v187q0 33 22.5 54.5t55.5 21.5h323q-2 12 -2 25v24v4q0 53 4.5 88t15.5 54.5t31.5 27.5t55.5 8h678q33 0 54.5 -8t32.5 -27.5t15 -54.5t4 -88v-25.5t-2 -27.5h324q33 0 55.5 -21.5t22.5 -54.5v-187q0 -63 -45.5 -130.5t-122 -128t-180 -106.5t-222.5 -65 q-51 -10 -91 -41.5t-40 -70.5q0 -35 17.5 -51.5t39 -31t40 -31.5t22.5 -52q4 -23 -2 -47q-4 -14 23.5 -23.5t67.5 -18.5t82 -22.5t64 -34.5q12 -10 19.5 -39.5t9.5 -64.5q2 -33 -6 -58.5t-29 -25.5h-985q-20 0 -28.5 25.5t-6.5 58.5q2 35 9.5 64.5t19.5 39.5q23 20 64 34 t80.5 23t68.5 18t25 24q-4 14 -4 25.5v21.5q2 35 21.5 52t42 31.5t39.5 31t17 51.5q0 39 -39.5 70.5t-93.5 41.5q-117 20 -220 66.5t-180 106t-122 127t-45 130.5zM154 1042q0 -20 23.5 -54t67.5 -70.5t106.5 -71.5t140.5 -60q-25 80 -44.5 175.5t-33.5 189.5h-260v-109z M1198 786q78 25 140.5 60t106.5 71.5t67.5 70.5t23.5 54v109h-262q-12 -94 -31.5 -189.5t-44.5 -175.5z" />
-<glyph unicode="&#xf092;" horiz-adv-x="1536" d="M0 193v1150q0 39 15.5 75t41 61.5t60.5 41t76 15.5h1150q80 0 136.5 -56.5t56.5 -136.5v-1150q0 -41 -15.5 -76t-41 -60.5t-61.5 -41t-75 -15.5h-337v25q0 39 2 97t-3.5 115.5t-24.5 104.5t-65 66q188 20 290 110t102 287q0 66 -22.5 128t-67.5 114q6 23 8 45t2 45 q0 41 -9.5 91t-31.5 85h-12q-43 2 -79 -8.5t-68 -27.5t-62.5 -37.5t-65.5 -39.5q-33 4 -65.5 6t-65.5 2t-65.5 -2t-65.5 -6q-35 18 -65.5 39t-62.5 38t-67.5 27.5t-79.5 8.5h-12q-23 -35 -32 -85t-9 -91q0 -23 2 -45.5t8 -44.5q-45 -51 -66.5 -114t-21.5 -128 q0 -193 95.5 -283t281.5 -112q-39 -16 -59.5 -50t-32.5 -75q-27 -8 -49.5 -15.5t-50.5 -7.5q-55 0 -88 28.5t-59.5 62.5t-56.5 62.5t-79 28.5q-4 0 -20.5 -2t-16.5 -12q0 -23 19.5 -32t31.5 -19q39 -31 56.5 -71t39 -76.5t62.5 -62.5t127 -26q35 0 80 11q0 -10 -1 -18.5 t-1 -16.5q0 -29 2 -59.5t-2 -59.5h-317q-80 0 -136.5 56.5t-56.5 136.5z" />
-<glyph unicode="&#xf093;" d="M0 39v614q0 16 11.5 27.5t27.5 11.5h229q16 0 27.5 -11t11.5 -28v-346h1229v346q0 16 11.5 27.5t27.5 11.5h229q16 0 27.5 -11t11.5 -28v-614q0 -39 -39 -39h-1765q-39 0 -39 39zM346 969.5q-6 15.5 16 38.5l504 505q23 23 55.5 23t55.5 -23l506 -505q23 -23 15.5 -38.5 t-38.5 -15.5h-307v-499q0 -33 -22.5 -55.5t-55.5 -22.5h-307q-33 0 -54.5 22.5t-21.5 55.5v499h-307q-33 0 -39 15.5z" />
-<glyph unicode="&#xf094;" horiz-adv-x="1609" d="M2 514q-2 35 1 68.5t7 74.5q4 35 7 72t10 66q14 66 30.5 128t44.5 117q20 41 45 81t54 79q10 14 21 24.5t24 20.5q23 23 45 45.5t49 42.5t57.5 36.5t65.5 33.5q33 16 67.5 29.5t71.5 27.5q70 29 156 51l2 2q45 12 85 17.5t79 5.5q59 0 112.5 -9t106.5 -20q41 -8 85 -15 t93 -7h2q29 0 62.5 5t66.5 5q25 0 45.5 -6t32.5 -25q23 -31 26 -71.5t-2 -75.5q-4 -35 -8 -71t2 -72q4 -23 11.5 -39.5t15.5 -36.5q8 -18 11 -40t7 -42q18 -104 15.5 -195.5t-23 -170.5t-56.5 -148.5t-81 -132.5q-37 -49 -79 -97.5t-92 -91.5t-110.5 -77.5t-134.5 -59.5 q-76 -27 -158.5 -34t-158.5 -11h-31q-104 0 -194.5 16.5t-192.5 16.5h-4q-35 0 -77 -10.5t-83 -12.5h-2q-37 0 -65.5 17.5t-42.5 42.5q-20 35 -17.5 72.5t12.5 68.5t10.5 66.5t-5 75.5t-12.5 81t-9 82z" />
-<glyph unicode="&#xf095;" horiz-adv-x="1597" d="M0 1169q0 55 14.5 114.5t43 114t68.5 98.5t93 69q12 -2 26.5 0t26.5 2q10 0 30 -1l40 -2t38.5 -4.5t27.5 -7.5q8 -6 15 -18t12 -26.5t9.5 -30t8.5 -25.5q6 -18 23.5 -65.5t35.5 -97.5t32.5 -93t14.5 -53q0 -37 -24.5 -68t-54 -57.5t-54.5 -50t-25 -45.5q0 -20 16.5 -49 t26.5 -46q84 -145 188.5 -248.5t252.5 -187.5q18 -10 45.5 -27.5t50.5 -17.5t52.5 32t60 70.5t62 70.5t58.5 32q10 0 51.5 -22.5t89.5 -50t93 -55.5t61 -36q16 -10 46 -25.5t40 -35.5q2 -6 2 -23q0 -16 -3 -36.5t-8 -43t-12 -43t-13 -35.5q-18 -39 -59.5 -71.5t-92.5 -55 t-104.5 -36t-92.5 -13.5q-80 0 -153.5 25.5t-145.5 54.5q-123 45 -232.5 118t-203.5 164t-175 196.5t-144 215.5q-25 41 -52.5 95.5t-51 112t-39 113.5t-15.5 105z" />
-<glyph unicode="&#xf096;" d="M0 307v922q0 63 24.5 118.5t66.5 97.5t97.5 66.5t118.5 24.5h1075q63 0 118.5 -24.5t97.5 -66.5t67 -97.5t25 -118.5v-922q0 -63 -25 -118.5t-67 -97.5t-97 -66.5t-119 -24.5h-1075q-63 0 -118.5 24.5t-97.5 66.5t-66.5 97.5t-24.5 118.5zM193 307q0 -47 33.5 -80.5 t80.5 -33.5h1075q47 0 81 33.5t34 80.5v922q0 47 -34 80.5t-81 33.5h-1075q-47 0 -80.5 -33.5t-33.5 -80.5v-922z" />
-<glyph unicode="&#xf097;" horiz-adv-x="1253" d="M0 84v1337q0 47 34 81t81 34h1024q47 0 80.5 -34t33.5 -81v-1337q0 -47 -33.5 -81t-80.5 -34t-80 33l-432 432l-432 -432q-33 -33 -80 -33t-81 34t-34 81zM158 186l469 471l469 -471v1194h-938v-1194z" />
-<glyph unicode="&#xf098;" horiz-adv-x="1536" d="M0 193v1150q0 39 15.5 75t41 61.5t60.5 41t76 15.5h1150q80 0 136.5 -56.5t56.5 -136.5v-1150q0 -41 -15.5 -76t-41 -60.5t-61.5 -41t-75 -15.5h-1150q-80 0 -136.5 56.5t-56.5 136.5zM193 1040q2 -35 15 -76.5t30 -78.5q16 -37 33.5 -72t33.5 -63q74 -129 176.5 -241 t225.5 -190q31 -18 67.5 -36.5t75.5 -32.5q41 -16 83 -31.5t81 -21.5q63 -10 112.5 2t90.5 34q27 12 57.5 33.5t44.5 52.5q4 8 9 27t9 38t6.5 37.5t-2.5 29.5q-4 12 -24.5 22t-36.5 21q-43 25 -71.5 41t-67.5 38q-16 10 -36 23.5t-38 13.5q-23 0 -46.5 -27.5t-37.5 -43.5 q-12 -14 -36.5 -43t-47.5 -31q-14 -2 -32.5 10t-37.5 23q-106 59 -182 136t-135 177q-10 16 -21.5 37.5t-7.5 38.5q2 18 24.5 33.5t35.5 29.5q14 16 31.5 37t19.5 43q4 16 -5 38t-16 42q-16 43 -28.5 76t-26.5 76q-6 16 -12 40.5t-21 30.5q-8 4 -26.5 7t-39 4.5t-37.5 0 t-24 -1.5h-8q-33 -16 -62.5 -45.5t-51 -69.5t-34 -88t-9.5 -100z" />
-<glyph unicode="&#xf099;" d="M0 399q0 20 13.5 33.5t33.5 13.5q14 0 31 -12q123 -109 287 -108q59 0 117.5 15t107.5 48q-29 16 -47.5 42t-18.5 58q0 23 8 39q-18 6 -44.5 20.5t-51 34t-41 43t-16.5 46.5q0 16 10 28.5t25 22.5q-41 25 -76 74t-35 98q0 33 33 43q-35 35 -55.5 81t-20.5 95 q0 23 9.5 41.5t37.5 18.5q18 0 84 -27t142.5 -60.5t146.5 -67.5t98 -48q25 -14 47.5 -31.5t44.5 -36.5q20 51 47 106.5t60 106.5t73 96t89 74q8 6 24 6q18 0 27 -8q16 6 41.5 13.5t42.5 7.5q29 0 43 -27q31 0 56.5 -15.5t25.5 -50.5q0 -31 -27 -53q96 -43 159.5 -125 t90.5 -182q6 -2 23 -2q47 0 88 16.5t57 16.5q18 0 31.5 -14.5t13.5 -32.5q0 -23 -19.5 -48.5t-33.5 -39.5q23 4 42 -7.5t19 -38.5q0 -29 -26.5 -50t-61.5 -37.5t-72.5 -25.5t-58.5 -11q-43 -139 -133 -246t-210 -178.5t-258 -109.5t-275 -38q-219 0 -415 93.5t-325 273.5 q-8 16 -8 26z" />
-<glyph unicode="&#xf09a;" horiz-adv-x="790" d="M0 862v203q0 16 11.5 28.5t27.5 12.5h125v102q0 358 366 359q123 0 232 -31q33 -10 29 -43l-27 -199q-4 -16 -16 -26q-16 -10 -31 -6q-35 8 -72.5 12t-66.5 4q-53 0 -65.5 -16.5t-12.5 -67.5v-88h223q14 0 29 -14q10 -10 10 -29l-17 -205q0 -16 -11 -25.5t-27 -9.5h-207 v-784q0 -16 -11.5 -27.5t-27.5 -11.5h-258q-16 0 -27.5 11.5t-11.5 27.5v784h-125q-16 0 -27.5 11.5t-11.5 27.5z" />
-<glyph unicode="&#xf09b;" horiz-adv-x="1847" d="M0 743q0 115 37 222.5t113 193.5l-3 4l3 2q-16 39 -21.5 81t-5.5 85q0 25 2 56.5t8 65.5t15.5 63.5t23.5 50.5h8q61 0 110.5 -12.5t94.5 -35t87 -52t91 -60.5q86 25 178.5 33t182.5 8t182 -8t180 -33q47 31 90 60.5t87 52t93.5 35t110.5 12.5h10q12 -20 21.5 -50 t15.5 -64t9 -65.5t3 -56.5q0 -43 -6 -85t-20 -81v-2l-2 -4q76 -86 112.5 -193.5t36.5 -222.5q0 -233 -66.5 -383.5t-188.5 -236.5t-292 -120t-376 -34q-207 0 -378 34t-292 120t-187.5 236.5t-66.5 383.5zM250 483q0 -145 64.5 -224t165 -116t219 -43t227.5 -6q74 0 152.5 2 t153.5 12.5t142.5 34t117.5 67.5t80 111.5t30 165.5q0 84 -27 147.5t-75 106.5t-114.5 64.5t-146.5 21.5q-78 0 -156.5 -7t-156.5 -7t-157 7t-157 7q-162 0 -262 -87t-100 -257zM494 524q0 66 26.5 113t65.5 47t66.5 -47t27.5 -113t-27.5 -112.5t-66.5 -46.5t-65.5 46.5 t-26.5 112.5zM795 258q-6 16 12 25q16 6 24 -13q27 -76 93 -75q31 0 56.5 20.5t35.5 54.5q8 20 26 13q16 -6 13 -25q-16 -47 -51 -75.5t-80 -28.5q-43 0 -78 28.5t-51 75.5zM864 369q0 -12 17.5 -21.5t42.5 -9.5t42 9.5t17 21.5t-17.5 21t-41.5 9q-25 0 -42.5 -9t-17.5 -21z M1167 524q0 -66 28 -112.5t67 -46.5t65.5 46.5t26.5 112.5t-27 113t-65 47q-39 0 -67 -47t-28 -113z" />
-<glyph unicode="&#xf09c;" horiz-adv-x="1880" d="M0 117v626q0 49 35 84t82 35h665v156q0 113 43 213t118 175t175 118t213 43t213.5 -43t175 -118t117.5 -175.5t43 -212.5v-152q0 -33 -22.5 -55.5t-55.5 -22.5h-80q-33 0 -55 23t-22 55v152q0 66 -25 123t-67 99t-99 66.5t-123 24.5t-122 -24.5t-99 -66.5t-67.5 -99.5 t-24.5 -122.5v-156h119q47 0 81.5 -34.5t34.5 -84.5v-626q0 -47 -34.5 -82t-81.5 -35h-1020q-47 0 -82 35t-35 82zM494 164h266l-66 285q29 18 47.5 48.5t18.5 65.5q0 55 -39 95t-94 40t-94 -40t-39 -95q0 -35 18 -65.5t47 -46.5z" />
-<glyph unicode="&#xf09d;" d="M0 154v1228q0 63 45 108.5t109 45.5h1536q63 0 108 -45t45 -109v-1228q0 -63 -45 -108.5t-108 -45.5h-1536q-63 0 -108.5 45t-45.5 109zM154 154h1536v575h-1536v-575zM154 1114h1536v268h-1536v-268zM260 250v115h307v-115h-307zM676 250v115h446v-115h-446z" />
-<glyph unicode="&#xf09e;" horiz-adv-x="1566" d="M0 236q0 49 18.5 91t50 74.5t75 51t92.5 18.5t91 -18.5t74.5 -51t51 -74.5t18.5 -91t-18.5 -92.5t-51 -75t-74.5 -50t-91 -18.5t-92.5 18.5t-75 50t-50 74.5t-18.5 93zM0 819v158q0 35 25 57q23 23 53 23q2 0 3 -1t3 -1q193 -14 362.5 -95t301 -212.5t212 -301.5 t95.5 -362q4 -35 -21 -59q-23 -25 -57 -25h-158q-29 0 -51.5 20.5t-26.5 51.5q-12 133 -68.5 249.5t-145.5 205.5t-205.5 145.5t-249.5 68.5q-31 4 -51.5 27t-20.5 51zM0 1331v158q0 33 25 55q23 23 53 23h4q301 -16 565 -137t463 -320t319.5 -463t137.5 -565q4 -31 -23 -57 q-23 -25 -55 -25h-158q-31 0 -53.5 21.5t-24.5 52.5q-14 238 -110 446.5t-255 367.5t-368 255t-446 110q-31 2 -52.5 24.5t-21.5 53.5z" />
-<glyph unicode="&#xf0a0;" d="M0 193v382q0 23 6 45.5t12 45.5l238 727q20 63 75.5 103t121.5 40h938q66 0 121 -40t75 -103l238 -727q6 -23 12 -45.5t6 -45.5v-382q0 -41 -15.5 -76t-41 -60.5t-61 -41t-74.5 -15.5h-1458q-80 0 -136.5 56.5t-56.5 136.5zM154 193q0 -16 11 -27.5t28 -11.5h1458 q16 0 27.5 11t11.5 28v382q0 16 -11.5 27.5t-27.5 11.5h-1458q-16 0 -27.5 -11t-11.5 -28v-382zM213 768h1417l-188 578q-4 16 -19.5 26t-31.5 10h-938q-16 0 -31.5 -10t-20.5 -26zM1057 385q0 39 27.5 66.5t68.5 27.5q39 0 66.5 -27.5t27.5 -66.5q0 -41 -27.5 -68.5 t-66.5 -27.5q-41 0 -68.5 27.5t-27.5 68.5zM1364 385q0 39 27.5 66.5t68.5 27.5q39 0 66.5 -27.5t27.5 -66.5q0 -41 -27.5 -68.5t-66.5 -27.5q-41 0 -68.5 27.5t-27.5 68.5z" />
-<glyph unicode="&#xf0a1;" d="M0 754v229q0 63 45 108.5t109 45.5h499q115 0 245 33.5t255 90t235.5 128t186.5 147.5q63 0 108.5 -45t45.5 -109v-368q49 -12 81.5 -52.5t32.5 -93.5t-32.5 -93t-81.5 -52v-369q0 -63 -45.5 -108t-108.5 -45q-68 68 -164 132t-206.5 117.5t-227.5 91.5t-227 50 q-43 -12 -70 -40t-37 -62.5t-2 -70.5t37 -65q-25 -41 -23 -75.5t20.5 -66.5t49.5 -61.5t66 -58.5q-20 -43 -69.5 -65.5t-106 -25.5t-110.5 10.5t-85 43.5q-18 61 -40 125t-36 130.5t-14 138t24 151.5h-200q-63 0 -108.5 45t-45.5 109zM807 741q102 -16 206.5 -49t205 -78 t191.5 -99t165 -112v930q-76 -59 -166 -113.5t-190.5 -98.5t-205 -77.5t-206.5 -48.5v-254z" />
-<glyph unicode="&#xf0a2;" horiz-adv-x="1759" d="M0 317q104 78 172 172.5t109 201t60 224t28 240.5q6 84 56 159t126 131t163 89t167 33q74 0 163 -33t165.5 -88t129 -129t52.5 -158q6 -121 27.5 -239.5t63.5 -227t109.5 -204t167.5 -171.5q-4 -70 -56 -114.5t-120 -44.5h-469q-18 -82 -82.5 -135.5t-150.5 -53.5 t-151.5 53.5t-84.5 135.5h-469q-68 0 -120 45t-56 114zM238 317h1284q-78 86 -129.5 183.5t-84 202t-50 214t-25.5 222.5q-4 55 -41 104t-89 86t-111.5 57.5t-110.5 20.5q-47 0 -107.5 -21.5t-114 -56.5t-91.5 -83t-40 -99q-6 -111 -23.5 -222.5t-51 -218t-87 -205 t-128.5 -184.5zM713 209q0 -70 49 -119t119 -49q18 0 18 20t-18 21q-53 0 -91 37t-38 90q0 20 -19 20q-20 0 -20 -20z" />
-<glyph unicode="&#xf0a3;" horiz-adv-x="1597" d="M0 573q0 33 23 56l143 139l-143 139q-23 23 -23 56q0 25 16.5 45t40.5 26l195 49q-6 18 -15.5 48t-18.5 61t-16 59.5t-7 46.5q0 31 21.5 52.5t51.5 21.5q18 0 47 -7t60 -16.5t60.5 -18.5t47.5 -15l49 194q6 25 27 41.5t45 16.5q35 0 53 -23l142 -143l139 143q23 23 53 23 q27 0 47.5 -15.5t26.5 -42.5l49 -194q18 6 48 15t60.5 18.5t59.5 16.5t47 7q29 0 51.5 -21.5t22.5 -52.5q0 -18 -7 -46.5t-16.5 -59.5t-18.5 -60.5t-15 -48.5l194 -49q25 -6 41 -26.5t16 -44.5q0 -33 -22 -56l-145 -139l145 -139q23 -23 22 -56q0 -25 -16 -45t-41 -26 l-194 -49q6 -18 15 -48t18.5 -61t16.5 -59.5t7 -46.5q0 -29 -21.5 -51.5t-52.5 -22.5q-18 0 -47 7t-59.5 16.5t-60 18.5t-48.5 15l-49 -194q-6 -25 -26.5 -41.5t-45.5 -16.5q-33 0 -55 23l-139 143l-142 -143q-18 -23 -53 -23q-25 0 -45 16.5t-27 41.5l-49 194 q-18 -6 -47.5 -15t-60.5 -18.5t-59.5 -16.5t-47.5 -7q-31 0 -52 22.5t-21 51.5q0 18 7 46.5t16 59.5t18.5 60.5t15.5 48.5l-195 49q-25 6 -41 26.5t-16 44.5z" />
-<glyph unicode="&#xf0a4;" horiz-adv-x="1916" d="M0 193v643q0 49 35 83.5t84 34.5h348q27 14 42 30.5t34 39.5q16 20 33.5 38.5t33.5 37.5q35 35 77 68.5t65 74.5q25 43 29.5 99.5t17 106.5t44 84t109.5 34q61 0 111.5 -25.5t85.5 -68.5t54.5 -98.5t19.5 -112.5q0 -53 -15.5 -104.5t-38.5 -98.5h134l376 -2 q49 0 93.5 -17.5t76 -49.5t50 -75t18.5 -94q0 -49 -18.5 -92t-50 -74.5t-75.5 -49t-94 -17.5h-200q-10 -59 -39 -107q10 -59 -7.5 -117.5t-56.5 -103.5q0 -76 -28.5 -131t-75.5 -90t-109.5 -52.5t-132.5 -17.5q-78 0 -148.5 16.5t-136 36t-128 36t-123.5 16.5h-375 q-49 0 -84 34.5t-35 84.5zM233 276q0 -35 22.5 -57t57.5 -22q33 0 56.5 22.5t23.5 56.5q0 33 -23.5 56.5t-56.5 23.5q-35 0 -57.5 -23.5t-22.5 -56.5zM489 193q70 0 136.5 -16.5t133 -35t133 -35t138.5 -16.5q43 0 84 8t72 27.5t50 52.5t19 82q0 14 -1 26.5t-3 24.5 q35 16 53.5 53t18.5 74q0 39 -20 68q59 49 59 123q0 23 -12.5 43.5t-26.5 34.5q90 0 178 1t178 1q51 0 84 30.5t33 81.5q0 53 -32.5 85t-84.5 32q-172 0 -340 1t-338 1q0 41 16.5 78t35 74.5t34 78.5t15.5 91q0 33 -10.5 65.5t-29 59t-46 44t-64.5 17.5h-11t-11 -2 q-8 -4 -9 -8t-3 -13q-12 -59 -22.5 -123.5t-39.5 -117.5q-29 -51 -73 -88t-85 -78q-29 -31 -49 -56.5t-41.5 -48t-48.5 -42t-65 -35.5h-5v-643z" />
-<glyph unicode="&#xf0a5;" horiz-adv-x="1916" d="M0 821q0 51 18.5 94t50 75t75.5 49.5t94 17.5l376 2h134q-23 47 -38.5 98t-15.5 105q0 57 19.5 112.5t54.5 98.5t85 68.5t112 25.5q78 0 109.5 -34t43.5 -84t17.5 -106.5t29.5 -99.5q23 -41 65 -74.5t77 -68.5q16 -18 33.5 -37t33.5 -39q18 -23 33.5 -39t42.5 -31h348 q49 0 84 -34.5t35 -83.5v-643q0 -49 -35 -84t-84 -35h-375q-61 0 -123.5 -16.5t-128 -36t-136 -36t-148.5 -16.5q-70 0 -132.5 17.5t-109.5 52.5t-75.5 90t-28.5 131q-39 45 -56.5 103.5t-7.5 117.5q-29 47 -39 107h-200q-49 0 -93.5 17.5t-76 49t-50 74.5t-18.5 92z M121 821q0 -51 32.5 -81.5t84.5 -30.5q90 0 178 -1t178 -1q-14 -14 -26.5 -35t-12.5 -43q0 -74 59 -123q-20 -29 -20 -68q0 -37 18.5 -73.5t53.5 -53.5q-2 -12 -3 -24.5t-1 -26.5q0 -49 19 -82t50 -52.5t72 -27.5t84 -8q70 0 137.5 16.5t134 35t133 35t135.5 16.5v643h-4 q-41 16 -66.5 35.5t-47 42t-42 48t-48.5 56.5q-41 41 -85 78t-73 88q-29 53 -39 117.5t-23 123.5q-2 8 -3 12.5t-9 8.5q-4 2 -11 2h-11q-37 0 -65 -17.5t-46 -44t-28.5 -59.5t-10.5 -65q0 -49 15.5 -90.5t34 -79t34.5 -74.5t16 -78q-170 0 -337.5 -1t-339.5 -1 q-51 0 -84 -32t-33 -85zM1524 276q0 -35 23.5 -57t56.5 -22q35 0 57 22.5t22 56.5q0 33 -22.5 56.5t-56.5 23.5q-33 0 -56.5 -23.5t-23.5 -56.5z" />
-<glyph unicode="&#xf0a6;" horiz-adv-x="1597" d="M0 602q0 61 25.5 111.5t68.5 85.5t98.5 54t112.5 19q53 0 104.5 -15t98.5 -38v133l2 377q0 49 17.5 93t49 76t74.5 50.5t94 18.5q49 0 92.5 -18.5t75 -50.5t49 -76t17.5 -93v-201q59 -10 106 -38q59 10 118 -7.5t104 -56.5q76 0 131 -28.5t90 -76t52 -109.5t17 -132 q0 -78 -16 -148.5t-35.5 -136t-36 -128t-16.5 -124.5v-374q0 -49 -35 -84t-84 -35h-643q-49 0 -84 34.5t-35 84.5v348q-14 27 -30.5 42t-38.5 34q-20 16 -39 33.5t-37 33.5q-35 35 -68.5 77t-74.5 64q-43 25 -99.5 30t-106.5 17.5t-84 44t-34 109.5zM120 591q-1 -7 1 -11 q4 -8 8 -9.5t12 -3.5q59 -12 124 -22t118 -39q51 -29 88 -73t78 -85q31 -29 56.5 -49t48 -41.5t42 -48.5t35.5 -66v-4h643q0 70 16.5 136.5t35 133t35 133t16.5 138.5q0 43 -8.5 84t-28 71.5t-52 50t-81.5 19.5q-14 0 -26.5 -1t-25.5 -3q-16 35 -53 53.5t-74 18.5 q-39 0 -67 -21q-49 59 -123 60q-23 0 -43.5 -12.5t-34.5 -26.5q0 90 -1 178t-1 178q0 51 -30.5 84t-82.5 33q-53 0 -84.5 -33t-31.5 -84q0 -172 -1 -340t-1 -338q-41 0 -78 16.5t-75 35t-79 34t-90 15.5q-33 0 -65.5 -10.5t-59 -29t-43 -46t-16.5 -64.5q0 -4 -1 -11z M1210 -37q0 -35 23.5 -57.5t56.5 -22.5q35 0 57.5 22.5t22.5 57.5q0 33 -22.5 56.5t-57.5 23.5q-33 0 -56.5 -23.5t-23.5 -56.5z" />
-<glyph unicode="&#xf0a7;" horiz-adv-x="1597" d="M0 584q0 78 34 109.5t84 44t106.5 17.5t99.5 29q41 23 74.5 65t68.5 77q18 16 36.5 33.5t39.5 33.5q23 18 39 33.5t30 42.5v348q0 49 35 84t84 35h643q49 0 84 -35t35 -84v-375q0 -61 16.5 -123.5t36 -128t35.5 -136t16 -148.5q0 -70 -17 -132.5t-52 -109.5t-90.5 -75.5 t-130.5 -28.5q-45 -39 -103.5 -56.5t-118.5 -7.5q-47 -29 -106 -39v-200q0 -49 -17.5 -93.5t-49 -76t-74.5 -50t-93 -18.5q-51 0 -94 18.5t-74.5 50t-49 75.5t-17.5 94l-2 376v134q-47 -23 -98.5 -38.5t-104.5 -15.5q-57 0 -112.5 19.5t-98.5 54.5t-68.5 85t-25.5 112z M120 595q1 -7 1 -11q0 -37 16.5 -64.5t43 -46t59 -29t65.5 -10.5q49 0 90 15.5t79 34t75 35t78 16.5q0 -170 1 -338t1 -340q0 -51 31.5 -84t84.5 -33q51 0 82 32.5t31 84.5q0 90 1 178t1 178q14 -14 34.5 -26.5t43.5 -12.5q74 0 123 59q29 -20 67 -20q37 0 74 18.5t53 53.5 q12 -2 24.5 -3t27.5 -1q49 0 81.5 19t52 50t28 72t8.5 84q0 70 -16.5 137.5t-35 134t-35 133t-16.5 136.5h-643v-5q-16 -41 -35.5 -66.5t-42 -47t-48 -42t-56.5 -48.5q-41 -41 -78 -85t-88 -73q-53 -29 -117.5 -39t-124.5 -23l-12 -3t-8 -9q-2 -4 -1 -11zM1210 1223 q0 -33 23.5 -56.5t56.5 -23.5q35 0 57.5 23.5t22.5 56.5q0 35 -22.5 57.5t-57.5 22.5q-33 0 -56.5 -23t-23.5 -57z" />
-<glyph unicode="&#xf0a8;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM240 768q0 -29 20 -49l490 -490q20 -20 48.5 -20t49.5 20l74 74q20 20 20 48 t-20 50l-246 246h612q29 0 49.5 19.5t20.5 48.5v104q0 29 -20.5 49.5t-49.5 20.5h-612l246 246q20 20 20 48.5t-20 49.5l-74 74q-20 20 -48 20t-50 -20l-490 -490q-20 -20 -20 -49z" />
-<glyph unicode="&#xf0a9;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM240 717q0 -29 20.5 -49.5t48.5 -20.5h613l-246 -246q-20 -20 -20.5 -48.5 t20.5 -49.5l74 -74q20 -20 47.5 -20t50.5 20l489 490q20 20 20.5 49t-20.5 49l-489 490q-20 20 -49 20t-49 -20l-74 -74q-20 -20 -20.5 -48t20.5 -50l246 -246h-613q-29 0 -49 -19.5t-20 -48.5v-104z" />
-<glyph unicode="&#xf0aa;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM240 768q0 -29 20 -49l74 -74q20 -20 48 -20t50 20l246 246v-612q0 -29 19.5 -49.5 t47.5 -20.5h105q29 0 49.5 20.5t20.5 49.5v612l245 -246q20 -20 49 -20t50 20l73 74q20 20 20.5 48t-20.5 50l-489 490q-20 20 -49 20t-49 -20l-490 -490q-20 -20 -20 -49z" />
-<glyph unicode="&#xf0ab;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5t-311.5 62.5t-254 171t-171 254t-62.5 311.5zM240 769q0 -28 20 -50l490 -490q20 -20 48.5 -20t49.5 20l489 490q20 20 20.5 49 t-20.5 49l-73 74q-20 20 -48 20t-51 -20l-245 -246v612q0 29 -19.5 49.5t-48.5 20.5h-104q-29 0 -49.5 -20.5t-20.5 -49.5v-612l-246 246q-20 20 -49 20t-49 -20l-74 -74q-20 -20 -20 -48z" />
-<glyph unicode="&#xf0ac;" horiz-adv-x="1597" d="M0 768q0 162 63.5 307.5t173 255t254 173t308.5 63.5q162 0 307 -63.5t254.5 -173t173 -255t63.5 -307.5q0 -164 -63.5 -308.5t-173 -254t-254.5 -173t-307 -63.5q-164 0 -308.5 63.5t-254 173t-173 254t-63.5 308.5zM186 1141h4q4 0 12.5 3t8.5 -9q0 -4 -3 -10.5 t11 -6.5q4 0 5 6t3 0l4 -16v-2q0 -4 -5 -6t-3 -6q4 -4 9.5 -4h9.5l4 2l2 2q0 -6 6 -8.5t10 -2.5h2q0 -2 -4 -4t0 -6l23 -4v-2l14 -29q0 -4 -3 -10t-7 -6t-3 3t1 7t-2 7t-15 3q-2 0 -5 -1t-1 -5l12 -26l3 -2l2 -2q-10 0 -13.5 -24t-3.5 -34l4 -18l2 -6v-4l-4 -19l29 -43h6 q2 -4 -2 -8t-2 -8l8 -8q0 -10 4 -14.5t13 -12.5q-2 -12 20.5 -22.5t28.5 -14.5q8 -23 20 -45.5t29 -38.5l2 -14q0 -2 -5 -4t-1 -6l16 -7q4 4 10.5 -8t10.5 -16l-2 -6l12 -17l6 -2l4 8q-4 10 -14 26.5t-20.5 32t-17.5 28t-7 14.5t-2 16t-4 17q6 -4 15 -7.5t15 -7.5 q4 -25 19.5 -40t27.5 -34q-4 -4 0 -5t7 -1q4 -4 4 -14q14 -16 36.5 -44t22.5 -44v-2l-4 -14q6 -16 20.5 -25.5t28.5 -15.5h4q20 -10 41 -21.5t43 -19.5l23 14q8 -2 17 -10t20.5 -18.5t27 -19.5t35.5 -11q14 10 14 -5v-4l25 -30l4 -15q10 -6 20.5 -15t16.5 -20h4 q10 0 16.5 -9t16.5 -9q6 0 6 6q0 14 2 18.5t5 5.5t6 1t3 4l-4 6q-4 0 -6 -5t-6 -1l-14 -8l-17 4l-24 41l8 57q2 4 -7.5 9t-5.5 10q-14 8 -34 8q-4 0 -20.5 -3t-16.5 3t3 16t7 21.5t6 19.5t2 10l13 27l-3 4l-16 4q-4 0 -13 -6t-17.5 -14.5t-14.5 -16.5t-6 -14l-45 -10 q-14 0 -23 12q-4 16 -17 35.5t-13 34.5q0 23 8 43t-6 43q2 0 5 2t1 6l12 14l2 2l2 -2q14 10 37 7t29 14l16 -13q4 -2 9 4.5t5 10.5l-8 4l39 10l2 -6l19 2l22 -14q4 0 8 5t9 1l20 -21q-4 -8 -1 -12t3 -8q0 -6 10.5 -27.5t18.5 -21.5q10 0 9 13t-1 17q0 20 -8 39t-15 39v6 q0 10 12.5 17.5t12.5 9.5q10 8 22.5 16t18.5 19l8 16v10h6t4 6q0 2 -3 3.5t-7 5.5q-4 2 -8 6l6 4q4 6 6 15t-2 16l15 8q-2 -6 4 -8t10 0l10 16q-4 10 -5 7t5 7q10 4 19.5 9.5t19.5 5.5q2 -2 4 -2q6 0 6 2q0 10 -4 12l12 25q14 0 21 12l18 2q6 2 6 8v2l35 10l4 11l-10 14 q2 0 2 4t-4 6t-7 4t-7 4l-4 -2l4 2h6h13t9 9q0 12 -14 12q-18 0 -41.5 -8.5t-32.5 -28.5l-14 -8l18 18l3 6q0 4 -9.5 5.5t-5.5 1.5q18 0 27.5 4t15.5 9t12.5 10t18.5 9q23 -4 44 -2t44 2q6 4 12 8.5t8 10.5l23 4q4 -4 12 2t8 10q0 10 -11 13t-11 14q0 2 1 6t-3 4 q-6 0 -19.5 -6t-19.5 -10q-4 -2 -5 1t-1 7l2 -2l8 4l23 8l4 4q0 6 -7.5 8t-11.5 2t-12 -3t-8 3l2 4q-12 10 -21.5 22.5t-9.5 16.5t1 8.5t-5 4.5q-4 0 -7 -1t-3 5q0 4 -5 17t-14 13l-8 -8q0 -6 -6 -9t-6 -7h-4l-21 -12q-2 4 -5 2t-7 -2h-2l-2 -2q8 0 7 9t-7 9l-16 -4 q-4 0 -4.5 1t2 4t4.5 7t0 8t-6.5 1t-4.5 1h6l5 5q2 2 -1.5 7t-5.5 7l-22 4l-14 12q-2 -2 -7.5 3t-9.5 7l-16 -6l-39 9q-4 0 -8 -2.5t-4 -6.5t4 -6t4 -6t3 -21.5t-5 -13.5l-10 -14q2 -4 7 -7t10 -7.5t9 -10.5t2 -18l-45 -31v-4q0 -8 4 -15t8 -18q10 -4 9.5 -7t-7 -6t-12.5 -6 t-6 -5t-4 -2h-8h-4q0 2 2 3t2 5l-19 12v-2l-8 16q4 10 1 14.5t-3 10.5q0 16 -14.5 16t-30.5 -4q4 2 -1 6.5t-7 4.5q-16 0 -38 14t-36 14q-6 0 -14 -2t-15 -4q4 2 5 10l-13 23l-2 2q-6 0 -14 -5t-8 9q0 2 2 4t0 4q-2 12 5 20.5t11 18.5q4 4 4 8t4 4q8 0 15.5 4.5t17.5 6.5 l2 6q0 4 -21.5 7t-21.5 7l2 2q20 -6 30.5 -7t17.5 2t16.5 8t29.5 14q0 4 -21.5 8t-27.5 8h12q4 0 10.5 -2t10.5 -4q0 -4 6 -5t10 -1l15 10v6l-4 6l22 4q-2 2 1 4t5 2q6 0 12.5 -6t8.5 -6l20 8q-2 2 5.5 3t4.5 6l-14 16q-2 0 -3 3t1 3q10 0 6 10q-8 4 -17 9.5t-20 5.5 q-4 0 -9 -2t-5 -7q0 -4 5 -4t7 -4q4 -4 -2 -4t-6 -2q-8 0 -15 -12t-18 -17q-4 0 -3 2.5t-1 4.5q-2 4 -7 5t-5 5t5 13t-13 9q-8 0 -11.5 -6t-7.5 -12l-22 25l-17 2q0 8 3.5 14t-9.5 16q-6 4 -11 7.5t-11 3.5q-2 0 -9.5 -4.5t-13.5 -8.5t-6 -8t10 -4h-2q-6 0 -6 -8 q0 -2 9.5 -5t13.5 -3q4 2 6 -1.5t6 -3.5l8 3v-5q-2 -2 -2 -4l2 -8l-20 -10q-2 -2 -4 -2t-5 -2q0 -6 5.5 -12.5t-11.5 -6.5l-6 4q0 8 -16.5 13.5t-40 7.5t-47 3t-35.5 1l-33 -10l8 -19q-4 0 -5 -3t3 -5q-4 4 -15 14.5t-15 10.5l-17 4q-35 -2 -78 -33t-84 -72t-75.5 -84 t-53.5 -71zM471 674h2h-2zM487 1413q14 2 25.5 8t24.5 6l6 -4q6 -2 11 -2t9 -4q8 2 17 -2l6 4v8l-2 5l6 -2q6 0 12 10l-2 4q-4 2 -10 4t-10 2t-16.5 -5t-28 -11.5t-29.5 -12.5t-19 -8zM918 86q0 -4 3 -9t3 -9q0 -5 -2 -7q115 20 217 74.5t182 140.5h-2q-8 -4 -14 0l-5 -4 l-12 4h-4l-4 -8l2 8q-6 8 -14 15l-4 2q-4 0 -4 -8q2 16 -8.5 27.5t-26.5 11.5q0 -2 -2 -2h-4l-5 4h7l4 12l-11 8l-2 -2q-14 2 -20 16l-4 2l-2 -2l-4 -2q-12 -4 -21 -8q-12 4 -18 10l-27 -2q0 6 -4 12.5t-12 6.5q-10 0 -20.5 -2t-14.5 -13q0 -4 2 -8t4 -6v-8l-2 -6l-4 -2h-2 l-6 16l6 10q-2 4 -2 10.5t-2 10.5l-2 4h-6l-15 -10h-8l-4 -4q-2 -2 -2 -4t-2 -3l-2 3h-8q-8 -8 -9 -19l3 -4l-9 -6l-2 -4l-6 -4q0 -2 -1 -2t-1 -2v-2v-9l-2 -2v4l-2 2q-2 12 -23 21h-6v-4q2 -6 8.5 -10.5t10.5 -8.5q-2 2 -5 1t-3 -3v-4l14 -20v-39l4 -10q-4 -16 -16 -27v2 l-4 -2l-3 -2l-2 -10l2 -2v-2l-4 4l-2 -13l-12 -4q-6 -4 -5 -11t-5 -11l2 -4l-6 -6q0 -4 -1 -6.5t-1 -6.5l2 -14l6 -4l4 4l2 6l2 -12q0 -4 -4 -8q-6 -4 -12 -9.5t-6 -15.5z" />
-<glyph unicode="&#xf0ad;" horiz-adv-x="1593" d="M8 242q0 33 12.5 62.5t34.5 51.5l609 609q-16 66 -17 122q0 98 38 186.5t102.5 153t151.5 102.5t185 38q94 0 181.5 -36t154.5 -101l-442 -162l-43 -236l184 -153l443 159q-10 -92 -50.5 -171t-104 -137t-145 -91t-174.5 -33q-61 0 -127 17l-606 -607q-47 -47 -112 -47 q-68 0 -115 47q-23 23 -50.5 47.5t-52 52t-41 58.5t-16.5 68zM203 242q0 -33 22.5 -56.5t57.5 -23.5q33 0 56 23.5t23 56.5q0 35 -23.5 57.5t-55.5 22.5q-35 0 -57.5 -23t-22.5 -57z" />
-<glyph unicode="&#xf0ae;" d="M0 76v217q0 31 22.5 53.5t53.5 22.5h1689q33 0 55.5 -22.5t22.5 -53.5v-217q0 -31 -22.5 -53.5t-55.5 -22.5h-1689q-31 0 -53.5 22.5t-22.5 53.5zM0 662v215q0 31 22.5 53t53.5 22h1689q33 0 55.5 -22.5t22.5 -52.5v-215q0 -33 -22.5 -55.5t-55.5 -22.5h-1689 q-31 0 -53.5 22.5t-22.5 55.5zM0 1243v217q0 31 22.5 53.5t53.5 22.5h1689q33 0 55.5 -22.5t22.5 -53.5v-217q0 -31 -22.5 -53.5t-55.5 -22.5h-1689q-31 0 -53.5 22.5t-22.5 53.5zM752 692h999v154h-999v-154zM1059 109h692v153h-692v-153zM1366 1274h385v153h-385v-153z " />
-<glyph unicode="&#xf0b0;" horiz-adv-x="1566" d="M0 1458q0 33 22.5 55.5t55.5 22.5h1411q33 0 55.5 -22.5t22.5 -55.5t-23 -55l-565 -565v-791q0 -33 -22.5 -55.5t-55.5 -22.5t-55 23l-236 235q-23 23 -22 56v555l-565 565q-23 23 -23 55z" />
-<glyph unicode="&#xf0b1;" d="M0 115v512h713q-2 -6 -2 -19v-153q0 -55 38.5 -95.5t96.5 -40.5h153q55 0 94.5 40t39.5 96v153q0 12 -3 19h713v-512q0 -47 -33.5 -81t-80.5 -34h-1614q-47 0 -81 34t-34 81zM0 743v371q0 47 34 81t81 34h424v231q0 31 21.5 53.5t53.5 22.5h615q33 0 55.5 -22.5 t22.5 -53.5v-231h422q47 0 80.5 -34t33.5 -81v-371h-1843zM692 1229h461v153h-461v-153zM825 455v153q0 18 21 19h153q18 0 19 -19v-153q0 -18 -19 -19h-153q-20 0 -21 19z" />
-<glyph unicode="&#xf0b2;" horiz-adv-x="1536" d="M0 80v477q0 51 24.5 61.5t59.5 -24.5l162 -162l340 338l-338 338l-164 -164q-35 -35 -59.5 -25.5t-24.5 60.5v477q0 31 23 57q27 23 57 23h477q51 0 61.5 -24.5t-24.5 -59.5l-160 -158l338 -338l332 334l-162 162q-35 35 -24.5 59.5t61.5 24.5h477q33 0 55 -23 q25 -25 25 -57v-477q0 -51 -24.5 -61.5t-59.5 24.5l-162 162l-334 -334l338 -336l158 160q35 35 59.5 24.5t24.5 -61.5v-477q0 -35 -25 -55q-23 -25 -55 -25h-477q-51 0 -61.5 24.5t24.5 59.5l166 166l-336 336l-340 -340l162 -162q35 -35 24.5 -59.5t-61.5 -24.5h-477 q-31 0 -55 25q-25 20 -25 55z" />
-<glyph unicode="&#xf0c0;" horiz-adv-x="1880" d="M0 852v152q0 18 1 47.5t10 56.5t29.5 46.5t57.5 19.5q-45 29 -71.5 75.5t-26.5 104.5q0 43 16.5 82t46 68.5t68.5 46t82 16.5q45 0 84 -16.5t67.5 -46t46 -68.5t17.5 -82q0 -57 -27.5 -104t-72.5 -76q37 0 57.5 -19.5t29.5 -46.5t11 -56.5t2 -47.5v-152 q-14 -8 -23.5 -18.5t-27.5 -10.5h-328q-16 0 -26.5 10.5t-22.5 18.5zM158 57v387q0 78 45 138.5t98 109.5q10 10 25.5 21.5t33.5 15.5q18 6 41 7t45 5q61 10 130 19.5t135 19.5q-90 57 -144.5 151.5t-54.5 207.5q0 88 34 166.5t92 136t136 91.5t166 34t166 -34t136 -91.5 t92 -136t34 -166.5q0 -113 -54 -207t-145 -152q66 -10 134.5 -19t130.5 -20q23 -4 45 -5t41 -7q18 -4 33.5 -15.5t27.5 -21.5q66 -59 103.5 -116.5t37.5 -131.5v-387q-12 -6 -20 -13t-18.5 -14t-23.5 -14.5t-36 -15.5h-1368q-35 0 -54.5 22.5t-43.5 34.5zM1452 852v152 q0 18 2 47.5t11.5 56.5t30 46.5t56.5 19.5q-45 29 -72.5 75.5t-27.5 104.5q0 43 16.5 82t46 68.5t68.5 46t84 16.5q43 0 82 -16.5t68.5 -46t46 -68.5t16.5 -82q0 -57 -26.5 -104t-71.5 -76q37 0 56.5 -19.5t28.5 -46.5t11 -56.5t2 -47.5v-152q-12 -8 -22.5 -18.5 t-26.5 -10.5h-328q-18 0 -27.5 10.5t-23.5 18.5z" />
-<glyph unicode="&#xf0c1;" horiz-adv-x="1597" d="M0 1137q0 88 34 166.5t92 137t136 92.5t168 34q86 0 166 -33t139 -92q8 -8 21.5 -20.5t26 -25t21.5 -25.5t9 -25q0 -18 -12 -31q-6 -8 -25 -12q-47 -10 -88 -22.5t-86 -31.5q-4 -4 -16 -4t-25.5 10.5t-31 21.5t-42 21.5t-57.5 10.5q-35 0 -66.5 -13.5t-54 -37t-36 -54 t-13.5 -67.5q0 -41 17.5 -75t43 -63.5t56.5 -56.5t57 -53l180 -178q23 -25 54.5 -37t66.5 -12q43 0 73 16t46 16q12 0 39 -21.5t55.5 -49t50 -55t21.5 -42.5q0 -29 -36 -51t-83 -38.5t-94 -26t-72 -9.5q-86 0 -164.5 33t-140.5 92l-303 305q-61 59 -94 139.5t-33 166.5z M578 1010q0 29 35.5 51t82.5 38.5t94 26t72 9.5q86 0 166 -33t139 -92l303 -305q61 -59 94 -139.5t33 -166.5q0 -90 -33.5 -167.5t-92 -136t-137.5 -92.5t-167 -34q-86 0 -165.5 34t-139.5 93q-8 8 -21.5 19.5t-25.5 25t-21.5 26.5t-9.5 26q0 18 13 28q6 8 24 12 q47 10 88 22.5t86 33.5q12 4 17 4q12 0 25.5 -10.5t30.5 -21.5t41 -21.5t58 -10.5q72 0 121 49.5t49 120.5q0 41 -17 76t-44 63.5t-56.5 55.5t-56.5 53l-178 180q-53 49 -123 50q-43 0 -72.5 -17.5t-46.5 -17.5q-12 0 -38.5 21.5t-55 49t-50 56t-21.5 41.5z" />
-<glyph unicode="&#xf0c2;" horiz-adv-x="1880" d="M0 391q0 70 23.5 132.5t64.5 113.5t96.5 86t120.5 51q-33 41 -51 90t-18 105q0 66 24.5 123t66.5 99t99 66.5t123 24.5q113 0 196 -72q18 76 61.5 141.5t103 113.5t133 75t157.5 27q96 0 182 -37t149.5 -100.5t100.5 -149.5t37 -182q0 -57 -13 -109.5t-36 -99.5 q117 -59 188.5 -170t71.5 -248q0 -98 -37 -183t-101.5 -149.5t-149.5 -101.5t-183 -37h-1018q-80 0 -151.5 30.5t-125 84t-84 125t-30.5 151.5z" />
-<glyph unicode="&#xf0c3;" horiz-adv-x="1536" d="M16 72q-39 72 7 141l516 803v366h-78q-33 0 -54.5 22.5t-21.5 55.5q0 31 21.5 53.5t54.5 22.5h614q33 0 55.5 -22.5t22.5 -53.5q0 -33 -22.5 -55.5t-55.5 -22.5h-76v-368l514 -801q45 -70 7 -141q-18 -35 -51 -53.5t-70 -18.5h-1262q-37 0 -69.5 18.5t-51.5 53.5z M377 479h782l-313 490v413h-154v-411z" />
-<glyph unicode="&#xf0c4;" horiz-adv-x="1916" d="M0 180q0 86 44 168t113.5 146.5t154.5 103.5t169 39h13q18 25 57 60.5t63 58.5v12v12q-12 12 -28.5 27.5t-35 32t-33.5 32t-23 27.5q-84 -6 -172.5 33t-160 105.5t-116.5 150.5t-45 168q0 98 62.5 154.5t160.5 56.5q82 0 168 -39t155.5 -103.5t114 -146.5t44.5 -168 q0 -10 -1.5 -21.5t-3.5 -21.5l86 -76l34 17q34 18 90.5 46.5t129 65.5t148.5 76t150.5 77t135 69.5t102.5 52t52 26.5q16 8 37 9q14 0 39 -4.5t52.5 -9.5t53 -11t41.5 -8q29 -4 47.5 -26.5t18.5 -51.5q0 -43 -35 -66l-700 -485l700 -485q35 -23 35 -66q0 -29 -18.5 -51.5 t-47.5 -26.5q-16 -4 -41.5 -9t-53 -10t-52.5 -9.5t-39 -4.5q-10 0 -18.5 2.5t-18.5 6.5l-842 430l-86 -76q2 -10 3.5 -21.5t1.5 -21.5q0 -86 -44.5 -168t-114 -146.5t-155.5 -103.5t-168 -39q-98 0 -160.5 56.5t-62.5 154.5zM197 1358q0 -23 11 -49.5t27.5 -51t35 -45 t34.5 -34.5q35 -29 82 -54.5t94 -25.5q10 0 27 4v6q0 23 -11.5 48.5t-27.5 50t-34.5 46t-35.5 35.5q-35 29 -82 54.5t-94 25.5q-14 0 -26 -4v-6zM199 180q0 -6 2 -10q6 0 11 -1t11 -1q41 0 90 24.5t92.5 61.5t72 83t28.5 89q0 4 -1 5t-1 5q-8 2 -23 2q-41 0 -90 -24.5 t-92 -61.5t-71.5 -83t-28.5 -89zM586 616q53 -25 82 -69l108 96l45 -27l1016 703l-172 31l-975 -498l6 -127zM586 920l35 -35q12 29 36.5 41t53.5 26l-43 37q-29 -45 -82 -69zM741 768q0 35 23.5 57.5t58.5 22.5q33 0 55.5 -22.5t22.5 -57.5q0 -33 -22.5 -56.5t-57.5 -23.5 q-33 0 -56.5 23.5t-23.5 56.5zM903 575l762 -389l172 31l-727 502z" />
-<glyph unicode="&#xf0c5;" horiz-adv-x="1597" d="M0 385v707q0 33 16.5 70.5t38.5 60.5l258 258q23 23 61 39t70 16h463q31 0 53.5 -22.5t22.5 -53.5v-250q16 8 35.5 13.5t40.5 5.5h463q31 0 53 -22.5t22 -53.5v-1075q0 -33 -22.5 -55.5t-52.5 -22.5h-830q-33 0 -55.5 22.5t-22.5 55.5v229h-536q-33 0 -55.5 22.5 t-22.5 55.5zM154 461h460v293q0 45 10.5 87t45.5 74l159 162v305h-278v-321q0 -31 -22.5 -53.5t-53.5 -22.5h-321v-524zM186 1139h211v211zM768 154h676v921h-279v-321q0 -31 -22.5 -53.5t-52.5 -22.5h-322v-524zM801 831h211v211z" />
-<glyph unicode="&#xf0c6;" horiz-adv-x="1470" d="M0 1128q0 90 35 170t94 139.5t139 94.5t170 35q88 0 169 -34t143 -95l655 -656q12 -12 12 -28q0 -10 -11 -26.5t-26.5 -32t-32 -27t-26.5 -11.5q-16 0 -29 13l-655 655q-41 41 -92 61.5t-107 20.5q-57 0 -108 -21.5t-89 -59.5t-59.5 -89t-21.5 -109q0 -55 20.5 -106 t61.5 -92l745 -746q55 -55 133 -55q39 0 74 15.5t60.5 41t41 60.5t15.5 73q0 78 -56 134l-563 563q-29 29 -69 28q-41 0 -71 -29.5t-30 -70.5t29 -70l475 -475q12 -12 12 -28q0 -10 -11 -26.5t-26.5 -32t-32 -27t-26.5 -11.5q-16 0 -29 13l-475 473q-35 37 -55.5 85 t-20.5 99q0 53 20.5 100.5t55.5 83t83.5 56t101.5 20.5q51 0 99 -20.5t85 -55.5l561 -563q102 -102 102 -246q0 -74 -27.5 -137t-74.5 -110t-110.5 -75t-137.5 -28q-143 0 -246 103l-745 745q-61 61 -95 142t-34 169z" />
-<glyph unicode="&#xf0c7;" horiz-adv-x="1536" d="M0 154v1228q0 63 45 108.5t109 45.5h1075q63 0 108 -45l154 -154q45 -45 45 -108v-1075q0 -63 -45 -108.5t-109 -45.5h-1228q-63 0 -108.5 45t-45.5 109zM154 846h1075v536h-1075v-536zM846 922v385h190v-385h-190z" />
-<glyph unicode="&#xf0c8;" horiz-adv-x="1536" d="M0 193v1150q0 39 15.5 75t41 61.5t60.5 41t76 15.5h1150q80 0 136.5 -56.5t56.5 -136.5v-1150q0 -41 -15.5 -76t-41 -60.5t-61.5 -41t-75 -15.5h-1150q-80 0 -136.5 56.5t-56.5 136.5z" />
-<glyph unicode="&#xf0c9;" d="M0 78v215q0 31 22.5 53.5t55.5 22.5h1689q31 0 53.5 -22.5t22.5 -53.5v-215q0 -33 -22.5 -55.5t-53.5 -22.5h-1689q-33 0 -55.5 22.5t-22.5 55.5zM0 659v218q0 31 22.5 53t55.5 22h1689q31 0 53.5 -22.5t22.5 -52.5v-218q0 -31 -22.5 -53t-53.5 -22h-1689 q-33 0 -55.5 22.5t-22.5 52.5zM0 1243v217q0 31 22.5 53.5t55.5 22.5h1689q31 0 53.5 -22.5t22.5 -53.5v-217q0 -31 -22.5 -53.5t-53.5 -22.5h-1689q-33 0 -55.5 22.5t-22.5 53.5z" />
-<glyph unicode="&#xf0ca;" d="M0 211q0 39 15.5 75t41 61.5t60.5 40.5t76 15q39 0 73.5 -15t61 -40.5t42 -61.5t15.5 -75q0 -41 -15.5 -76t-42 -60.5t-61.5 -41t-73 -15.5q-80 0 -136.5 56.5t-56.5 136.5zM0 768q0 39 15.5 75t41 61.5t60.5 41t76 15.5q39 0 73.5 -15.5t61 -41t42 -61.5t15.5 -75 q0 -41 -15.5 -76t-42 -60.5t-61.5 -41t-73 -15.5q-41 0 -76 15.5t-60.5 41t-41 60.5t-15.5 76zM0 1325q0 39 15.5 75t41 61.5t60.5 41t76 15.5q39 0 73.5 -15.5t61 -41t42 -61.5t15.5 -75q0 -41 -15.5 -76t-42 -60.5t-61.5 -40.5t-73 -15q-41 0 -76 15t-60.5 40.5t-41 60.5 t-15.5 76zM522 154v114q0 33 22.5 55.5t55.5 22.5h1167q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -54.5t-53.5 -21.5h-1167q-33 0 -55.5 21.5t-22.5 54.5zM522 711v114q0 33 22.5 55.5t55.5 22.5h1167q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -55.5t-53.5 -22.5 h-1167q-33 0 -55.5 22.5t-22.5 55.5zM522 1268v114q0 33 22.5 55.5t55.5 22.5h1167q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -55.5t-53.5 -22.5h-1167q-33 0 -55.5 22.5t-22.5 55.5z" />
-<glyph unicode="&#xf0cb;" d="M0 594q0 55 30.5 90t68.5 61.5t69 50t31 56.5q0 29 -17.5 44t-46.5 15q-25 0 -44 -15t-34 -36l-53 37q23 39 58.5 60.5t78.5 21.5q55 0 96.5 -32t41.5 -91q0 -49 -30 -80t-67 -55.5t-67.5 -47t-30.5 -55.5h143v54h66v-115h-289q-2 10 -3 18.5t-1 18.5zM4 55l37 54 q6 -4 12 -10.5t15 -10.5q12 -8 29.5 -14t37.5 -6q35 0 55.5 18t20.5 47q0 31 -23.5 48.5t-58.5 17.5h-23l-16 37l90 108l6 6t7 6l4 6h-9q-4 -2 -14 -2h-92v-49h-66v111h263v-53l-97 -115q41 -6 74 -35t33 -82t-38 -95t-110 -42q-35 0 -60.5 9t-41.5 20q-25 14 -35 26z M14 1434l105 102h67v-360h93v-62h-259v62h93v258v8h-2q-10 -16 -22.5 -28.5t-29.5 -27.5zM522 154v114q0 33 22.5 55.5t55.5 22.5h1167q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -54.5t-53.5 -21.5h-1167q-33 0 -55.5 21.5t-22.5 54.5zM522 711v114q0 33 22.5 55.5 t55.5 22.5h1167q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -55.5t-53.5 -22.5h-1167q-33 0 -55.5 22.5t-22.5 55.5zM522 1268v114q0 33 22.5 55.5t55.5 22.5h1167q31 0 53.5 -22.5t22.5 -55.5v-114q0 -33 -22.5 -55.5t-53.5 -22.5h-1167q-33 0 -55.5 22.5t-22.5 55.5z " />
-<glyph unicode="&#xf0cc;" horiz-adv-x="1536" d="M0 692v152h1536v-152h-1536zM268 1114q0 117 44 202t118 141t170 83t203 27q88 0 176 -20.5t172 -49.5q20 -72 27.5 -156t7.5 -157q0 -10 -1 -22.5t-3 -24.5l-13 -2q-23 2 -50 2t-50 6q-18 59 -44 114.5t-62.5 98.5t-88 68.5t-122.5 25.5q-49 0 -94.5 -13.5t-80 -40 t-56 -66.5t-21.5 -93q0 -59 28.5 -102t75.5 -75t105.5 -54.5t118 -42t113.5 -39t93 -41.5h-696q-33 53 -51.5 110.5t-18.5 120.5zM285 310v63v45l110 2q31 -72 64 -134.5t76 -108.5t104 -71.5t152 -25.5q55 0 110 16.5t100.5 48t74 78.5t28.5 109q0 76 -49 128t-119 88 t-146.5 59.5t-132.5 45.5h617q8 -16 15 -38.5t10.5 -48t5.5 -50t2 -43.5q0 -131 -47.5 -226t-127 -156.5t-186 -91.5t-225.5 -30q-53 0 -93 4t-78 12.5t-78 20.5t-93 29q-12 4 -38 12t-36 16q-8 6 -12 50.5t-6 96.5t-2 100z" />
-<glyph unicode="&#xf0cd;" horiz-adv-x="1536" d="M0 0v154h1536v-154h-1536zM0 1532q10 2 19.5 2h19.5q72 0 140.5 -5t137.5 -5q100 0 200.5 2t199.5 6q-4 -16 -1 -36.5t3 -37.5v-8q-66 -10 -108 -7t-65.5 -5t-32.5 -39t-9 -109q0 -137 4 -272t10 -273q8 -147 88 -237t238 -90q125 0 207 29.5t131 88t69.5 146.5t20.5 203 q0 20 -2 65t-5 102.5t-8.5 119t-11.5 113.5t-13 89t-15 45q-33 33 -78 33q-6 0 -23.5 -1t-36 -1t-34 1t-21.5 3l2 82q82 4 163 -3t165 -7q39 0 77.5 5t79.5 5q4 0 9.5 -1t9.5 -1q2 -12 4 -24.5t2 -24.5t-4 -29q-23 -6 -52.5 -8t-58 -7t-48 -16.5t-19.5 -38.5q0 -14 1 -27 t3 -28q2 -6 5 -38.5t5 -79t4 -101.5t4 -103t3 -85t1 -50q0 -37 -2 -80.5t-8 -89t-17.5 -87.5t-29.5 -74q-41 -68 -104.5 -114t-137 -72.5t-153.5 -38t-154 -11.5q-72 0 -144.5 9.5t-140.5 35.5q-96 35 -151 88.5t-84 121t-36 148.5t-7 173v321v42t-1 62.5t-5 60.5t-10 38 q-12 16 -37 23.5t-53.5 10.5t-56.5 3t-44 4z" />
-<glyph unicode="&#xf0ce;" d="M0 115v1306q0 47 34 81t81 34h1614q47 0 80.5 -34t33.5 -81v-1306q0 -47 -33.5 -81t-80.5 -34h-1614q-47 0 -81 34t-34 81zM154 154h460v335h-460v-335zM154 565h460v332h-460v-332zM154 975h460v332h-460v-332zM692 154h461v335h-461v-335zM692 565h461v332h-461v-332z M692 975h461v332h-461v-332zM1229 154h461v335h-461v-335zM1229 565h461v332h-461v-332zM1229 975h461v332h-461v-332z" />
-<glyph unicode="&#xf0d0;" d="M0 197q0 39 29 61l1579 1247q20 17 47 17q35 0 59 -29q10 -12 29.5 -33.5t38 -44t32 -47.5t13.5 -43q0 -39 -29 -61l-1579 -1248q-20 -16 -47 -16q-37 0 -59 29q-10 12 -30 33.5t-38 44t-31.5 47t-13.5 43.5zM256 922l117 36l37 117l34 -117l119 -36l-119 -37l-34 -117 l-37 117zM373 1229l235 71l72 236l72 -236l235 -71l-235 -72l-72 -235l-72 235zM942 1382l117 37l37 117l37 -117l116 -37l-116 -36l-37 -117l-37 117zM1286 1153l94 -121l369 291l-94 121zM1536 885l117 35l37 118l36 -118l117 -35l-117 -37l-36 -117l-37 117z" />
-<glyph unicode="&#xf0d1;" d="M0 358v404q0 25 6 51.5t17.5 53t27 51t31.5 40.5l223 222q16 16 41 31.5t51.5 26.5t53 17.5t51.5 6.5h80v198q0 31 22.5 53.5t52.5 22.5h1108q33 0 55.5 -22.5t22.5 -53.5v-1102q0 -31 -22.5 -53t-55.5 -22h-75v-7q0 -63 -25 -119.5t-65.5 -97.5t-97 -65.5t-120.5 -24.5 q-63 0 -119.5 24.5t-97.5 65.5t-65.5 97.5t-24.5 119.5v7h-307v-7q0 -63 -24.5 -119.5t-65.5 -97.5t-97.5 -65.5t-119.5 -24.5t-119.5 24.5t-97.5 65.5t-65.5 97.5t-24.5 119.5v7h-78q-31 0 -53.5 22.5t-22.5 52.5zM193 700h387v369h-78q-10 0 -31.5 -8t-30.5 -17l-223 -221 q-8 -8 -16 -29.5t-8 -31.5v-62zM346 276q0 -47 34 -80.5t81 -33.5t80.5 33.5t33.5 80.5t-33.5 81t-80.5 34t-81 -33.5t-34 -81.5zM1268 276q0 -47 33.5 -80.5t80.5 -33.5t81 33.5t34 80.5t-33.5 81t-81.5 34q-47 0 -80.5 -33.5t-33.5 -81.5z" />
-<glyph unicode="&#xf0d2;" horiz-adv-x="1597" d="M0 768q0 166 62.5 311.5t171 254t254 171t311.5 62.5t311 -62.5t254 -171t171 -254t62 -311.5t-62 -311.5t-171 -254t-254 -171t-311 -62.5q-59 0 -115.5 8.5t-112.5 24.5q23 35 46.5 80t35.5 92l11 41q6 23 16 66t29 112q23 -41 76 -70.5t118 -29.5q96 0 176 41 t136.5 114.5t88 174t31.5 217.5q0 88 -35.5 171t-102.5 147.5t-160 102t-208 37.5q-141 0 -248.5 -46t-179 -118.5t-107.5 -160.5t-36 -174q0 -104 40 -187.5t124 -117.5q31 -10 41 20q2 10 7 31.5t9 32.5q4 16 1 23t-13 22q-53 59 -53 155q0 76 27.5 145.5t78.5 122t124 84 t163 31.5q80 0 142.5 -23.5t105.5 -64.5t64.5 -96t21.5 -121q0 -86 -18.5 -164t-52.5 -136t-80 -92t-103 -34q-31 0 -57.5 12.5t-45 34t-25.5 49t1 60.5q18 76 44.5 151.5t26.5 130.5q0 49 -26.5 84t-81.5 35q-66 0 -110 -58.5t-44 -146.5q-2 -23 2 -45q2 -18 7.5 -40.5 t15.5 -41.5q-33 -141 -53.5 -225t-30.5 -131q-12 -55 -18 -80q-10 -45 -13.5 -93t-1.5 -89q-104 45 -192 119.5t-151.5 169t-98.5 207t-35 235.5z" />
-<glyph unicode="&#xf0d3;" horiz-adv-x="1536" d="M0 193v1150q0 39 15.5 75t41 61.5t60.5 41t76 15.5h1150q80 0 136.5 -56.5t56.5 -136.5v-1150q0 -41 -15.5 -76t-41 -60.5t-61.5 -41t-75 -15.5h-829q27 39 59.5 95.5t49.5 117.5l10 41q6 25 17.5 68t29.5 112q20 -41 75.5 -70.5t121.5 -29.5q96 0 176 42t137 115.5 t89 175t32 220.5q0 90 -37 173t-103.5 147.5t-161 102.5t-208.5 38q-143 0 -251 -46.5t-180.5 -120t-108.5 -162.5t-36 -175q0 -104 40 -188.5t124 -118.5q14 -6 25.5 -1t15.5 21q4 10 8 31.5t8 34.5q8 23 -12 43q-23 29 -37 67.5t-14 91.5q0 76 27.5 145.5t79 123t124 85 t164.5 31.5q82 0 144.5 -23.5t105.5 -64.5t64.5 -97t21.5 -124q0 -86 -18.5 -164t-52 -136t-81 -93t-104.5 -35q-31 0 -57.5 12.5t-45 34t-25.5 49t1 60.5q18 78 45 154.5t27 131.5q0 49 -26.5 85t-82.5 36q-66 0 -110.5 -59t-44.5 -150q-2 -23 2 -45q4 -43 22 -84 q-35 -141 -54.5 -226t-29.5 -132q-12 -55 -18 -80q-16 -70 -14 -142.5t6 -117.5h-260q-80 0 -136.5 56.5t-56.5 136.5z" />
-<glyph unicode="&#xf0d4;" horiz-adv-x="1536" d="M0 342v438q55 -68 120.5 -96.5t153.5 -28.5h36t34 4q-8 -23 -14 -42t-6 -42q0 -41 18 -77.5t43 -67.5q-104 -4 -198.5 -22.5t-186.5 -65.5zM0 1214v129q0 39 15.5 75t41 61.5t60.5 41t76 15.5h1150q80 0 136.5 -56.5t56.5 -136.5v-1150q0 -41 -15.5 -76t-41 -60.5 t-61.5 -41t-75 -15.5h-583q23 39 36 81t13 89q0 70 -21.5 121t-54.5 90t-70.5 69.5t-70.5 57.5t-54.5 53.5t-21.5 59.5q0 49 34 81.5t76 72.5t75.5 99.5t33.5 163.5q0 78 -39 157t-106 122h131l133 76h-428q-129 0 -237.5 -36t-188.5 -143zM2 170q14 57 55 96t96.5 61.5 t115 33t110.5 10.5h31.5t31.5 -2q37 -27 78 -54.5t75 -61.5t55.5 -75t21.5 -92q0 -47 -19 -86h-460q-72 0 -126.5 49t-64.5 121zM102 1094q0 43 11.5 85t35 73.5t59.5 51t85 19.5q68 0 118 -41t81.5 -100.5t47 -128t15.5 -123.5q0 -45 -9 -84t-30.5 -68.5t-56.5 -47 t-84 -17.5q-66 0 -116 39t-85 96t-53.5 123.5t-18.5 122.5zM854 1051h223v-224h109v224h223v108h-223v225h-109v-225h-223v-108z" />
-<glyph unicode="&#xf0d5;" horiz-adv-x="1536" d="M0 262q0 100 56.5 166t139.5 105.5t179 56t174 18.5q-23 29 -41.5 64t-18.5 76q0 23 6.5 41t14.5 40q-16 -4 -33.5 -4h-34.5q-68 0 -130 23.5t-108 66.5t-74 101.5t-28 130.5q0 68 25 128t67 109.5t98 84t120 49.5q88 18 176 18h411l-127 -74h-127q72 -43 107 -120.5 t35 -157.5q0 -94 -33 -149.5t-73 -94.5t-72.5 -70.5t-32.5 -78.5q0 -45 44 -82t97 -82t97 -107.5t44 -161.5q0 -102 -50 -175.5t-126.5 -121t-170 -70t-181.5 -22.5q-66 0 -141.5 15.5t-140 50.5t-106.5 90t-42 137zM174 307q0 -68 35 -116t87 -78.5t114.5 -44t117.5 -13.5 q51 0 104.5 11.5t96.5 38t69.5 68.5t26.5 103q0 49 -21.5 89.5t-53 72t-70.5 59t-76 52.5q-14 2 -29.5 3t-29.5 1q-57 0 -123 -12.5t-121 -41t-91 -75.5t-36 -117zM276 1247q0 -53 17.5 -117.5t51.5 -120t82.5 -92t111.5 -36.5q96 0 134 61.5t38 147.5q0 53 -14.5 118.5 t-45 124t-78 97t-112.5 38.5q-47 0 -82 -18t-57.5 -49t-34 -71t-11.5 -83zM999 1206h215v-215h107v215h215v105h-215v217h-107v-217h-215v-105z" />
-<glyph unicode="&#xf0d6;" d="M0 78v1382q0 31 22.5 53.5t55.5 22.5h1689q31 0 53.5 -22.5t22.5 -53.5v-1382q0 -33 -22.5 -55.5t-53.5 -22.5h-1689q-33 0 -55.5 22.5t-22.5 55.5zM154 461q63 0 119.5 -24.5t97.5 -65.5t65.5 -97.5t24.5 -119.5h921q0 63 25 119.5t65.5 97.5t97 65.5t120.5 24.5v614 q-63 0 -120 24.5t-97.5 65.5t-65.5 97.5t-25 119.5h-921q0 -63 -24.5 -119.5t-65.5 -97.5t-97.5 -65.5t-119.5 -24.5v-614zM539 768q0 104 29.5 195.5t81.5 159t122 106.5t150 39t149.5 -39t121.5 -106.5t83 -159t31 -195.5t-31 -195.5t-83 -159t-122 -106.5t-149 -39 q-80 0 -150 39t-122 106.5t-81.5 159t-29.5 195.5zM692 969l94 -99l39 35q8 6 11.5 13.5t7.5 11.5q4 2 8 10h2v-16q0 -8 -1 -16.5t-1 -18.5v-283h-139v-129h438v129h-141v529h-140z" />
-<glyph unicode="&#xf0d7;" horiz-adv-x="1228" d="M6 967q20 47 72 47h1075q50 0 70 -47t-17 -84l-536 -537q-27 -23 -56 -22q-29 0 -55 22l-536 537q-37 37 -17 84z" />
-<glyph unicode="&#xf0d8;" horiz-adv-x="1228" d="M6 371q-20 47 17 84l536 536q23 23 55 23q31 0 54 -23l538 -536q37 -37 17 -84t-70 -47h-1077q-50 0 -70 47z" />
-<glyph unicode="&#xf0d9;" horiz-adv-x="692" d="M0 768q0 33 23 55l538 537q16 16 38 21t44 -5q23 -8 36 -27.5t13 -41.5v-1076q0 -25 -13 -43t-36 -28t-44 -5t-38 21l-538 539q-23 23 -23 53z" />
-<glyph unicode="&#xf0da;" horiz-adv-x="692" d="M0 231v1076q0 49 47 69t84 -16l539 -537q23 -23 22 -55q0 -31 -22 -53l-539 -539q-37 -37 -84 -16q-47 20 -47 71z" />
-<glyph unicode="&#xf0db;" d="M0 115v1306q0 47 34 81t81 34h1614q47 0 80.5 -34t33.5 -81v-1306q0 -47 -33.5 -81t-80.5 -34h-1614q-47 0 -81 34t-34 81zM154 154h692v1153h-692v-1153zM999 154h691v1153h-691v-1153z" />
-<glyph unicode="&#xf0dc;" horiz-adv-x="1228" d="M0 584q0 31 22.5 54.5t53.5 23.5h1075q33 0 55.5 -23t22.5 -55q0 -33 -23 -56l-536 -536q-23 -23 -56 -23t-55 23l-536 536q-23 23 -23 56zM0 952q0 33 23 56l536 536q23 23 55 23q33 0 56 -23l536 -536q23 -23 23 -56q0 -31 -22.5 -54.5t-55.5 -23.5h-1075 q-31 0 -53.5 23t-22.5 55z" />
-<glyph unicode="&#xf0dd;" horiz-adv-x="1228" d="M0 584q0 31 22.5 54.5t53.5 23.5h1077q31 0 53.5 -23t22.5 -55q0 -33 -23 -56l-536 -536q-23 -23 -56 -23q-35 0 -53 23l-538 536q-23 23 -23 56z" />
-<glyph unicode="&#xf0de;" horiz-adv-x="1228" d="M0 952q0 33 23 56l536 536q23 23 55 23q33 0 56 -23l536 -536q23 -23 23 -56q0 -31 -22.5 -54.5t-53.5 -23.5h-1077q-31 0 -53.5 23t-22.5 55z" />
-<glyph unicode="&#xf0e0;" d="M0 115v905q10 -10 20.5 -17.5t20.5 -15.5q129 -96 256 -191.5t252 -195.5q39 -31 82 -62.5t89 -57t96.5 -41t103.5 -15.5q55 0 105 15.5t96 40t89 56t84 64.5q125 100 252 195.5t256 191.5q10 8 20.5 15.5t20.5 17.5v-905q0 -47 -33.5 -81t-80.5 -34h-1614q-47 0 -81 34 t-34 81zM2 1434q0 41 36 71.5t77 30.5h1614q41 0 76.5 -30.5t35.5 -71.5q0 -31 -18.5 -69t-45 -75t-57 -67.5t-53.5 -48.5q-123 -92 -241.5 -182.5t-239.5 -182.5q-25 -18 -57.5 -45t-67.5 -51.5t-71 -42t-66 -17.5h-2h-2q-31 0 -67 17.5t-70.5 42t-67.5 51.5t-58 45 q-121 92 -239.5 182t-241.5 183q-23 18 -53.5 48.5t-57 67.5t-45 75t-18.5 69z" />
-<glyph unicode="&#xf0e1;" horiz-adv-x="1576" d="M0 1376q0 39 15.5 74t41 60.5t60 41t73.5 15.5t74 -15.5t61.5 -41t41 -60.5t14.5 -74t-14.5 -73.5t-41 -60t-61.5 -41t-74 -15.5t-73.5 15.5t-60 41t-41 60t-15.5 73.5zM10 41v991q0 16 12.5 28.5t28.5 12.5h281q16 0 28.5 -12t12.5 -29v-991q0 -16 -12.5 -28.5 t-28.5 -12.5h-281q-16 0 -28.5 12.5t-12.5 28.5zM524 41v991q0 16 12.5 28.5t28.5 12.5h273q33 0 37 -25.5t4 -48.5q57 53 129.5 76t150.5 23q190 0 304 -95.5t114 -293.5v-668q0 -16 -12.5 -28.5t-28.5 -12.5h-289q-16 0 -27.5 12.5t-11.5 28.5v604q0 76 -27.5 116 t-111.5 40q-53 0 -89 -17.5t-56.5 -47t-28.5 -70.5t-8 -88v-537q0 -16 -12.5 -28.5t-28.5 -12.5h-281q-16 0 -28.5 12.5t-12.5 28.5z" />
-<glyph unicode="&#xf0e2;" horiz-adv-x="1591" d="M0 662q-4 39 35 45l158 20q12 2 30 -8q12 -8 15 -27q14 -102 63 -191t123 -153.5t169 -101.5t200 -37q115 0 217 44t178 120t120 177t44 218t-44 218t-120 177t-178.5 120t-216.5 44q-94 0 -181.5 -30.5t-158.5 -88.5l159 -159q35 -35 24 -58.5t-58 -23.5h-463 q-16 0 -29.5 6t-24.5 16q-25 25 -24 55v463q0 47 24.5 57.5t59.5 -24.5l162 -162q109 92 238.5 140.5t271.5 48.5q166 0 311 -62.5t253.5 -171t171 -254t62.5 -311.5t-62.5 -311.5t-171 -254t-254 -171t-310.5 -62.5q-150 0 -285 52.5t-241.5 145.5t-176.5 220t-90 275z" />
-<glyph unicode="&#xf0e3;" horiz-adv-x="1916" d="M0 651q0 49 37 91t88 42q35 0 68 -22q-20 14 -21 37q0 8 8 24l334 502q14 20 37 21q16 0 24 -9q-25 18 -40 43t-15 56q0 49 38 90t87 41q33 0 62 -19l514 -342q23 -16 36 -40.5t13 -53.5q0 -25 -10.5 -48.5t-26.5 -41.5t-40 -29.5t-48 -11.5q-33 0 -68 23q20 -14 21 -37 q0 -14 -8 -25l-105 -158l227 -151q14 4 28 6t28 2q53 0 94 -29l475 -315q37 -27 58.5 -65.5t21.5 -84.5q0 -74 -52.5 -126t-125.5 -52q-55 0 -99 31l-475 315q-70 47 -75 131l-228 152l-106 -158q-14 -20 -37 -20q-14 0 -25 8q55 -39 56 -100q0 -25 -10.5 -47.5t-28 -41 t-40 -29.5t-46.5 -11q-35 0 -62 18l-514 342q-49 33 -49 92z" />
-<glyph unicode="&#xf0e4;" horiz-adv-x="1880" d="M0 627q0 195 73.5 365.5t201.5 298.5t299 202t366 74t366 -74t299 -202t201.5 -299t73.5 -365q0 -78 -14.5 -159t-42 -159t-68.5 -149.5t-92 -130.5q-23 -29 -59 -29h-1328q-37 0 -59 29q-53 59 -93 130.5t-67.5 149.5t-42 159t-14.5 159zM158 627q0 -49 33.5 -83 t82.5 -34t83 34t34 83t-34.5 82.5t-82.5 33.5q-49 0 -82.5 -33.5t-33.5 -82.5zM352 1098q0 -49 35 -84t82 -35q49 0 84 35t35 84t-35 82.5t-84 33.5q-47 0 -82 -33.5t-35 -82.5zM743 313q0 -82 57.5 -139t139.5 -57t139.5 57t57.5 139q0 49 -26 91t-67 71q4 14 15.5 55 t27 95.5t33 114t30.5 110.5t22.5 88t9.5 43q0 23 -17.5 40t-40.5 17q-20 0 -35.5 -12t-21.5 -31l-135 -487q-39 -2 -74 -17.5t-60.5 -42t-40 -61.5t-14.5 -74zM823 1292q0 -49 34 -82.5t83 -33.5t83 33.5t34 82.5t-34 83t-83 34t-83 -34t-34 -83zM1292 1098q0 -49 35 -83 t84 -34t83 34t34 83t-34 82.5t-83 33.5t-84 -33.5t-35 -82.5zM1489 627q0 -49 34 -83t83 -34t82.5 34t33.5 83t-33.5 82.5t-82.5 33.5t-83 -33.5t-34 -82.5z" />
-<glyph unicode="&#xf0e5;" horiz-adv-x="1916" d="M0 866q0 123 49 225.5t130 183.5t184.5 139.5t209.5 92.5q94 31 190.5 45.5t194.5 14.5t194.5 -14.5t190.5 -45.5q106 -35 210 -93t185 -139t130 -183.5t49 -225.5t-49 -225t-130 -183t-184.5 -140.5t-210.5 -92.5q-94 -31 -190 -45t-195 -14q-92 0 -180 12 q-92 -72 -195.5 -119t-215.5 -73q-27 -4 -55.5 -10.5t-53.5 -6.5q-16 0 -27.5 11.5t-11.5 27.5q0 12 8.5 21.5t16.5 17.5q31 33 53 60.5t37.5 57.5t25 64.5t17.5 79.5q-78 43 -145.5 100.5t-120 128t-82 153.5t-29.5 175zM160 866q0 -72 24.5 -133t65.5 -113.5t94 -95.5 t111 -75q27 -14 51 -27.5t49 -28.5q-10 -47 -17.5 -94t-19.5 -94q59 29 111.5 64.5t105.5 76.5q55 -8 110.5 -14t112.5 -6q156 0 308 43q84 25 171 68.5t158.5 107t116.5 143.5t45 178t-45 178t-116.5 143.5t-158.5 107.5t-171 69q-152 43 -308 43q-158 0 -307 -43 q-84 -25 -171 -69t-158.5 -107.5t-116.5 -143t-45 -178.5z" />
-<glyph unicode="&#xf0e6;" horiz-adv-x="1916" d="M0 997q0 98 40 182.5t106.5 151t150.5 113.5t170 76q76 25 154.5 36t156.5 11q80 0 159 -11.5t155 -35.5q86 -29 170 -76t149.5 -113.5t105 -150.5t39.5 -183q0 -98 -39.5 -182t-105 -149.5t-149.5 -113.5t-170 -77q-154 -47 -314 -47q-37 0 -72.5 3t-72.5 7 q-150 -115 -336 -155q-20 -4 -43 -9.5t-45 -5.5q-12 0 -21.5 9.5t-9.5 23.5q0 10 7.5 17.5l13.5 13.5q51 51 72.5 96t35.5 117q-63 35 -118.5 82t-97.5 104t-66.5 124t-24.5 142zM160 997q0 -72 30.5 -127t77.5 -99t104.5 -78t112.5 -64l-20 -111q33 18 63.5 42t59.5 46 q47 -6 95 -12t95 -6q135 0 264 41q61 18 124 52t114 81t84 105.5t33 129.5q0 72 -33 131.5t-84 105.5t-113.5 80t-124.5 52q-63 23 -129.5 32t-134.5 9q-66 0 -132 -9t-132 -32q-59 -18 -122.5 -52t-115 -80t-84 -105.5t-32.5 -131.5zM649 258l27 16q51 -6 102 -6 q197 0 381 62q111 37 211 99.5t177 147.5t123 190t46 230q0 41 -6 84q90 -72 148.5 -169t58.5 -214q0 -76 -24.5 -142.5t-66.5 -123.5t-97.5 -104t-118.5 -82q14 -72 35.5 -117t72.5 -96q6 -6 13.5 -14.5t7.5 -18.5q0 -16 -10.5 -23.5t-24.5 -7.5q-20 0 -43 5.5t-41 9.5 q-186 41 -336 155q-37 -4 -72.5 -7t-72.5 -3q-129 0 -254 32t-236 97z" />
-<glyph unicode="&#xf0e7;" horiz-adv-x="933" d="M0 557q0 8 2 10l209 932q4 16 16.5 26.5t28.5 10.5h344q18 0 31.5 -13.5t13.5 -33.5q0 -6 -1 -10t-3 -11l-180 -524q12 4 47 13.5t81 21.5t95 26.5t93 25.5t75 18.5t37 7.5q18 0 31.5 -13.5t13.5 -33.5q0 -12 -2 -19l-565 -1306q-10 -29 -43 -29q-18 0 -33 13.5t-15 33.5 q0 6 3 10l204 914q-12 -4 -48 -14.5t-82 -22.5t-97 -26.5t-95 -27t-76 -19.5t-38 -7q-20 0 -33.5 14.5t-13.5 32.5z" />
-<glyph unicode="&#xf0e8;" d="M0 78v383q0 33 22.5 55.5t55.5 22.5h133v153q0 55 40 94t95 39h518v174h-135q-31 0 -53.5 21.5t-22.5 54.5v385q0 31 22.5 53.5t53.5 22.5h385q31 0 53.5 -22.5t22.5 -53.5v-385q0 -33 -22.5 -54.5t-53.5 -21.5h-135v-174h518q55 0 95 -38.5t40 -94.5v-153h135 q31 0 53.5 -22.5t22.5 -55.5v-383q0 -33 -22.5 -55.5t-53.5 -22.5h-385q-33 0 -54 22.5t-21 55.5v383q0 33 21.5 55.5t53.5 22.5h136v153q0 18 -21 19h-518v-172h135q31 0 53.5 -22.5t22.5 -55.5v-383q0 -33 -22.5 -55.5t-53.5 -22.5h-385q-31 0 -53.5 22.5t-22.5 55.5v383 q0 33 22.5 55.5t53.5 22.5h135v172h-518q-20 0 -20 -19v-153h135q33 0 55.5 -22.5t22.5 -55.5v-383q0 -33 -22.5 -55.5t-55.5 -22.5h-383q-33 0 -55.5 22.5t-22.5 55.5z" />
-<glyph unicode="&#xf0e9;" horiz-adv-x="1884" d="M0 770q31 137 109.5 253t193.5 204t257 141t302 66v53q0 35 23.5 57.5t56.5 22.5q35 0 57.5 -22.5t22.5 -57.5v-53q160 -12 303 -65.5t258 -141.5t192.5 -204t108.5 -253q6 -29 -20 -43q-10 -6 -19 -6q-20 0 -28 12q-98 102 -218 103q-86 0 -163.5 -54.5t-130.5 -152.5 q-10 -23 -35 -23t-35 23q-82 150 -213 194v-526q0 -68 -23.5 -127t-63.5 -104.5t-93.5 -71t-114.5 -25.5t-115.5 25.5t-94.5 71t-63.5 104.5t-23.5 127q0 35 23.5 57.5t56.5 22.5q35 0 57.5 -22.5t22.5 -57.5q0 -70 40 -119t97 -49q55 0 95 49t40 119v526 q-131 -45 -213 -194q-10 -23 -35 -23t-34 23q-53 98 -130 152.5t-163 54.5q-121 0 -217 -103q-12 -12 -31 -12q-10 0 -19 6q-25 14 -20 43z" />
-<glyph unicode="&#xf0ea;" horiz-adv-x="1597" d="M0 385v1075q0 31 22.5 53.5t55.5 22.5h921q31 0 53.5 -22.5t22.5 -53.5v-231h33h61q25 0 55.5 -13.5t49.5 -29.5l280 -281q18 -18 30.5 -49t12.5 -55v-62v-678q0 -27 -17 -44t-44 -17h-860q-27 0 -44.5 17.5t-17.5 43.5v246h-536q-33 0 -55.5 22.5t-22.5 55.5zM276 1321 q0 -12 9.5 -21.5t21.5 -9.5h461q12 0 21.5 9.5t9.5 21.5v61q0 12 -9.5 21.5t-21.5 9.5h-461q-12 0 -21.5 -9t-9.5 -22v-61zM737 123h738v555h-367q-27 0 -44 17.5t-17 43.5v367h-310v-983zM1169 801h306q0 2 -3.5 8t-5.5 10l-280 279q0 4 -6 4q-2 0 -2 1t-2 1t-3.5 1t-3.5 1 v-305z" />
-<glyph unicode="&#xf105;" horiz-adv-x="2048" />
-<glyph unicode="&#xf200;" horiz-adv-x="1536" d="M0 57v379q0 39 20.5 87t54.5 90t74.5 73t81.5 35q-35 -59 -34 -129q0 -59 16 -124t59 -106q-18 -37 -18 -75q0 -72 50 -122t122 -50q37 0 68.5 13t55 36.5t37 55.5t13.5 67t-13.5 66.5t-37 55t-55 38t-68.5 14.5q-18 0 -36.5 -5t-35.5 -14q-12 12 -20 31t-13.5 40.5 t-7.5 41.5t-2 37q0 47 28 86t62 67l142 21q-92 57 -142.5 150.5t-50.5 201.5q0 86 33 162t90 133t133 90t162 33t162 -33t133 -90t90 -133t33 -162q0 -109 -50.5 -202t-142.5 -150l158 -23q18 -16 34.5 -36.5t16.5 -47.5q0 -45 -18 -77q-59 33 -121 32q-45 0 -87 -17 t-77 -46q-6 2 -11 3t-11 1q-47 0 -82 -34t-35 -81t34.5 -80.5t82.5 -33.5q47 0 80.5 33.5t33.5 80.5q0 8 -2 15.5t-4 13.5q16 12 35.5 20.5t42.5 8.5q55 0 95 -39t40 -94q0 -23 -8 -41.5t-19 -34.5q-25 10 -45 10q-47 0 -81.5 -34t-34.5 -81t34.5 -80.5t81.5 -33.5t81 33.5 t34 80.5l-2 4q74 74 74 177q0 74 -41 137q45 70 45 157q0 31 -12 62q41 -4 83 -34t74.5 -72t53 -90t20.5 -89v-379q-10 -6 -22.5 -15t-25.5 -18.5t-26.5 -16.5t-21.5 -7h-1344q-35 0 -52 20.5t-44 36.5zM369 287q0 25 17 41t40 16q25 0 41 -16t16 -41q0 -23 -16 -40.5 t-41 -17.5q-23 0 -40 17.5t-17 40.5z" />
-<glyph unicode="&#xf201;" horiz-adv-x="1597" />
-<glyph unicode="&#xf202;" horiz-adv-x="1597" />
-<glyph unicode="&#xf203;" horiz-adv-x="1597" />
-<glyph unicode="&#xf204;" horiz-adv-x="1597" />
-</font>
-</defs></svg> 
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/assets/font/fontawesome-webfont.ttf b/branch-1.2/ambari-web/app/assets/font/fontawesome-webfont.ttf
deleted file mode 100644
index c17e9f8..0000000
--- a/branch-1.2/ambari-web/app/assets/font/fontawesome-webfont.ttf
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/font/fontawesome-webfont.woff b/branch-1.2/ambari-web/app/assets/font/fontawesome-webfont.woff
deleted file mode 100644
index 09f2469..0000000
--- a/branch-1.2/ambari-web/app/assets/font/fontawesome-webfont.woff
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/arrow-right.png b/branch-1.2/ambari-web/app/assets/img/arrow-right.png
deleted file mode 100644
index 2cac47b..0000000
--- a/branch-1.2/ambari-web/app/assets/img/arrow-right.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/glyphicons-halflings-white.png b/branch-1.2/ambari-web/app/assets/img/glyphicons-halflings-white.png
deleted file mode 100644
index 3bf6484..0000000
--- a/branch-1.2/ambari-web/app/assets/img/glyphicons-halflings-white.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/glyphicons-halflings.png b/branch-1.2/ambari-web/app/assets/img/glyphicons-halflings.png
deleted file mode 100644
index 79bc568..0000000
--- a/branch-1.2/ambari-web/app/assets/img/glyphicons-halflings.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/health-status-dead-orange.png b/branch-1.2/ambari-web/app/assets/img/health-status-dead-orange.png
deleted file mode 100644
index 68e9bcc..0000000
--- a/branch-1.2/ambari-web/app/assets/img/health-status-dead-orange.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/health-status-dead-yellow.png b/branch-1.2/ambari-web/app/assets/img/health-status-dead-yellow.png
deleted file mode 100644
index 7fec6d4..0000000
--- a/branch-1.2/ambari-web/app/assets/img/health-status-dead-yellow.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/health-status-dead.png b/branch-1.2/ambari-web/app/assets/img/health-status-dead.png
deleted file mode 100644
index 882e42e..0000000
--- a/branch-1.2/ambari-web/app/assets/img/health-status-dead.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/health-status-live.png b/branch-1.2/ambari-web/app/assets/img/health-status-live.png
deleted file mode 100644
index 4d6bb86..0000000
--- a/branch-1.2/ambari-web/app/assets/img/health-status-live.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/heatmap_node_critical_block.png b/branch-1.2/ambari-web/app/assets/img/heatmap_node_critical_block.png
deleted file mode 100644
index 40b30d0..0000000
--- a/branch-1.2/ambari-web/app/assets/img/heatmap_node_critical_block.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/heatmap_node_dead_block.png b/branch-1.2/ambari-web/app/assets/img/heatmap_node_dead_block.png
deleted file mode 100644
index c6a356d..0000000
--- a/branch-1.2/ambari-web/app/assets/img/heatmap_node_dead_block.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/heatmap_node_live_block.png b/branch-1.2/ambari-web/app/assets/img/heatmap_node_live_block.png
deleted file mode 100644
index e649ca5..0000000
--- a/branch-1.2/ambari-web/app/assets/img/heatmap_node_live_block.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_flat_0_aaaaaa_40x100.png b/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_flat_0_aaaaaa_40x100.png
deleted file mode 100644
index 5b5dab2..0000000
--- a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_flat_0_aaaaaa_40x100.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_glass_55_fbf9ee_1x400.png b/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_glass_55_fbf9ee_1x400.png
deleted file mode 100644
index ad3d634..0000000
--- a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_glass_55_fbf9ee_1x400.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_glass_65_ffffff_1x400.png b/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_glass_65_ffffff_1x400.png
deleted file mode 100644
index 42ccba2..0000000
--- a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_glass_65_ffffff_1x400.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_glass_75_dadada_1x400.png b/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_glass_75_dadada_1x400.png
deleted file mode 100644
index 5a46b47..0000000
--- a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_glass_75_dadada_1x400.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_glass_75_e6e6e6_1x400.png b/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_glass_75_e6e6e6_1x400.png
deleted file mode 100644
index 86c2baa..0000000
--- a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_glass_75_e6e6e6_1x400.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_glass_75_ffffff_1x400.png b/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_glass_75_ffffff_1x400.png
deleted file mode 100644
index e65ca12..0000000
--- a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_glass_75_ffffff_1x400.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_highlight-soft_75_cccccc_1x100.png b/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_highlight-soft_75_cccccc_1x100.png
deleted file mode 100644
index 7c9fa6c..0000000
--- a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_highlight-soft_75_cccccc_1x100.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_inset-soft_95_fef1ec_1x100.png b/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_inset-soft_95_fef1ec_1x100.png
deleted file mode 100644
index 0e05810..0000000
--- a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-bg_inset-soft_95_fef1ec_1x100.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_222222_256x240.png b/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_222222_256x240.png
deleted file mode 100644
index b273ff1..0000000
--- a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_222222_256x240.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_2e83ff_256x240.png b/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_2e83ff_256x240.png
deleted file mode 100644
index 09d1cdc..0000000
--- a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_2e83ff_256x240.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_454545_256x240.png b/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_454545_256x240.png
deleted file mode 100644
index 59bd45b..0000000
--- a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_454545_256x240.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_888888_256x240.png b/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_888888_256x240.png
deleted file mode 100644
index 6d02426..0000000
--- a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_888888_256x240.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_cd0a0a_256x240.png b/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_cd0a0a_256x240.png
deleted file mode 100644
index 2ab019b..0000000
--- a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_cd0a0a_256x240.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_f6cf3b_256x240.png b/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_f6cf3b_256x240.png
deleted file mode 100644
index c986935..0000000
--- a/branch-1.2/ambari-web/app/assets/img/jquery-ui-bootstrap/ui-icons_f6cf3b_256x240.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/logo-small-gold.png b/branch-1.2/ambari-web/app/assets/img/logo-small-gold.png
deleted file mode 100755
index 7f5098f..0000000
--- a/branch-1.2/ambari-web/app/assets/img/logo-small-gold.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/logo-small-yellow.png b/branch-1.2/ambari-web/app/assets/img/logo-small-yellow.png
deleted file mode 100755
index 527b30c..0000000
--- a/branch-1.2/ambari-web/app/assets/img/logo-small-yellow.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/logo-small.png b/branch-1.2/ambari-web/app/assets/img/logo-small.png
deleted file mode 100644
index b7ac8f3..0000000
--- a/branch-1.2/ambari-web/app/assets/img/logo-small.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/rack-state-minus.png b/branch-1.2/ambari-web/app/assets/img/rack-state-minus.png
deleted file mode 100644
index c11eb11..0000000
--- a/branch-1.2/ambari-web/app/assets/img/rack-state-minus.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/rack-state-plus.png b/branch-1.2/ambari-web/app/assets/img/rack-state-plus.png
deleted file mode 100644
index b1e8044..0000000
--- a/branch-1.2/ambari-web/app/assets/img/rack-state-plus.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/rack-state-toggler.png b/branch-1.2/ambari-web/app/assets/img/rack-state-toggler.png
deleted file mode 100644
index 8ac4e9c..0000000
--- a/branch-1.2/ambari-web/app/assets/img/rack-state-toggler.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/rack-status-critical.png b/branch-1.2/ambari-web/app/assets/img/rack-status-critical.png
deleted file mode 100644
index fc531ab..0000000
--- a/branch-1.2/ambari-web/app/assets/img/rack-status-critical.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/rack-status-dead.png b/branch-1.2/ambari-web/app/assets/img/rack-status-dead.png
deleted file mode 100644
index 60ddccc..0000000
--- a/branch-1.2/ambari-web/app/assets/img/rack-status-dead.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/rack-status-live.png b/branch-1.2/ambari-web/app/assets/img/rack-status-live.png
deleted file mode 100644
index b5c24f8..0000000
--- a/branch-1.2/ambari-web/app/assets/img/rack-status-live.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/rack_node_hover.png b/branch-1.2/ambari-web/app/assets/img/rack_node_hover.png
deleted file mode 100644
index 45936b8..0000000
--- a/branch-1.2/ambari-web/app/assets/img/rack_node_hover.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/spinner.gif b/branch-1.2/ambari-web/app/assets/img/spinner.gif
deleted file mode 100644
index 39fcb67..0000000
--- a/branch-1.2/ambari-web/app/assets/img/spinner.gif
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/status-corrupt.jpg b/branch-1.2/ambari-web/app/assets/img/status-corrupt.jpg
deleted file mode 100644
index 0877a7f..0000000
--- a/branch-1.2/ambari-web/app/assets/img/status-corrupt.jpg
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/status-ok.jpg b/branch-1.2/ambari-web/app/assets/img/status-ok.jpg
deleted file mode 100644
index 636000d..0000000
--- a/branch-1.2/ambari-web/app/assets/img/status-ok.jpg
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/task-detail-open-dialog.png b/branch-1.2/ambari-web/app/assets/img/task-detail-open-dialog.png
deleted file mode 100644
index f21d85d..0000000
--- a/branch-1.2/ambari-web/app/assets/img/task-detail-open-dialog.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/task-log-cancelled.png b/branch-1.2/ambari-web/app/assets/img/task-log-cancelled.png
deleted file mode 100644
index a153a81..0000000
--- a/branch-1.2/ambari-web/app/assets/img/task-log-cancelled.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/task-log-copy.png b/branch-1.2/ambari-web/app/assets/img/task-log-copy.png
deleted file mode 100644
index 8308f88..0000000
--- a/branch-1.2/ambari-web/app/assets/img/task-log-copy.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/task-log-fail.png b/branch-1.2/ambari-web/app/assets/img/task-log-fail.png
deleted file mode 100644
index 1f66113..0000000
--- a/branch-1.2/ambari-web/app/assets/img/task-log-fail.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/task-log-pending.png b/branch-1.2/ambari-web/app/assets/img/task-log-pending.png
deleted file mode 100644
index 41d53d7..0000000
--- a/branch-1.2/ambari-web/app/assets/img/task-log-pending.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/task-log-progress.png b/branch-1.2/ambari-web/app/assets/img/task-log-progress.png
deleted file mode 100644
index c949e1e..0000000
--- a/branch-1.2/ambari-web/app/assets/img/task-log-progress.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/task-log-show-detail.png b/branch-1.2/ambari-web/app/assets/img/task-log-show-detail.png
deleted file mode 100644
index 83ba231..0000000
--- a/branch-1.2/ambari-web/app/assets/img/task-log-show-detail.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/task-log-success.png b/branch-1.2/ambari-web/app/assets/img/task-log-success.png
deleted file mode 100644
index 8707623..0000000
--- a/branch-1.2/ambari-web/app/assets/img/task-log-success.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/img/task-log-timedout.png b/branch-1.2/ambari-web/app/assets/img/task-log-timedout.png
deleted file mode 100644
index e3dd81a..0000000
--- a/branch-1.2/ambari-web/app/assets/img/task-log-timedout.png
+++ /dev/null
Binary files differ
diff --git a/branch-1.2/ambari-web/app/assets/index.html b/branch-1.2/ambari-web/app/assets/index.html
deleted file mode 100644
index 1895e58..0000000
--- a/branch-1.2/ambari-web/app/assets/index.html
+++ /dev/null
@@ -1,50 +0,0 @@
-<!--
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
--->
-
-
-<!DOCTYPE html>
-<html lang="en">
-<head>
-  <meta charset="utf-8">
-  <meta http-equiv="X-UA-Compatible" content="IE=edge">
-  <meta name="viewport" content="width=device-width, initial-scale=1.0">
-  <link rel="stylesheet" href="stylesheets/app.css">
-  <script src="javascripts/vendor.js"></script>
-  <script src="javascripts/app.js"></script>
-  <script>
-      $(document).ready(function() {
-          require('initialize');
-          $('#loading').remove();
-      });
-  </script>
-  <title>Ambari</title>
-  <link rel="shortcut icon" href="/img/logo-micro.gif">
-</head>
-<body>
-    <div id="loading">...Loading...</div>
-    <div id="wrapper">
-    <!-- ApplicationView -->
-    </div>
-    <footer>
-        <div class="container">
-            <a href="http://www.apache.org/licenses/LICENSE-2.0" target="_blank">Licensed under the Apache License, Version 2.0</a>.<br>
-            <a href="/licenses/NOTICE.txt" target="_blank">See third-party tools/resources that Ambari uses and their respective authors</a>
-         </div>
-    </footer>
-</body>
-</html>
diff --git a/branch-1.2/ambari-web/app/assets/licenses/NOTICE.txt b/branch-1.2/ambari-web/app/assets/licenses/NOTICE.txt
deleted file mode 100644
index 7a464dc..0000000
--- a/branch-1.2/ambari-web/app/assets/licenses/NOTICE.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-Apache Ambari Web
-Copyright 2012 The Apache Software Foundation
-
-This product includes software developed by The Apache Software Foundation (http://www.apache.org/).
-
-This product includes jQuery (http://jquery.org - MIT license)
-Copyright (c) 2012, John Resig.
-
-This product includes Ember.js (http://emberjs.com - https://github.com/emberjs/ember.js/blob/master/LICENSE)
-Copyright (c) 2011, Yehuda Katz, Tom Dale, Charles Jolley and Ember.js contributors
-
-This product was generated using Brunch (https://github.com/brunch/brunch/blob/master/LICENSE)
-Copyright (c) 2011, Allan Berger, Jan Monschke, Martin Schürrer, Thomas Schranz, Nik Graf, Paul Miller
-
-This product includes Twitter Bootstrap 2 (http://twitter.github.com/bootstrap/ - Apache License v2.0.)
-
-This product, as part of Twitter Bootstrap 2, includes GLYPHICONS FREE (http://glyphicons.com - Apache License v2.0.)
-
-This product includes LESS (http://lesscss.org - Apache License v2.0.)
-
-This product includes Sinon.JS (http://sinonjs.org - BSD license)
-Copyright (c) 2010-2012, Christian Johansen.
-
-This product includes ember-i18n (http://github.com/zendesk/ember-i18n - MIT license)
-Copyright (c) 2011 by James A. Rosen; Zendesk, Inc.
-
-This product includes D3.js (http://d3js.org - BSD license)
-Copyright (c) 2012, Michael Bostock.
-
-This product includes bootstrap-datepicker.js (http://www.eyecon.ro/bootstrap-datepicker - Apache License, Version 2.0)
-Copyright (c) 2012 Stefan Petre
-
-This product includes Font Awesome 2.0 (http://fortawesome.github.com/Font-Awesome - Creative Commons 3.0)
-
-This product incudes Rickshaw 1.1.2 (http://code.shutterstock.com/rickshaw/ - MIT License)
-Copyright (C) 2011 by Shutterstock Images, LLC
-
-This product includes Timeago (http://timeago.yarp.com/ - MIT License)
-Copyright (c) 2008-2012, Ryan McGeary (ryan -[at]- mcgeary [*dot*] org)
-
-This product includes Spin.js (http://fgnass.github.com/spin.js/ - MIT license)
-Copyright (c) 2011 Felix Gnass [fgnass at neteye dot de]
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/classes/job_class.js b/branch-1.2/ambari-web/app/classes/job_class.js
deleted file mode 100644
index 378708a..0000000
--- a/branch-1.2/ambari-web/app/classes/job_class.js
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-var date = require('utils/date');
-var misc = require('utils/misc');
-
-App.Job2 = Ember.Object.extend({
-
-  id: "", //string
-  jobName: "", //string
-  workflowEntityName: "", //string
-  maps: 0, //number
-  reduces: 0, //number
-  status: "", //string
-  input: 0, //number
-  output: 0, //number
-  elapsed_time: 0, //number
-
-  duration: function() {
-    return date.timingFormat(parseInt(this.get('elapsed_time')));
-  }.property('elapsed_time'),
-
-  inputFormatted: function () {
-    return misc.formatBandwidth(this.get('input'));
-  }.property('input'),
-
-  outputFormatted: function () {
-    return misc.formatBandwidth(this.get('output'));
-  }.property('output')
-
-});
diff --git a/branch-1.2/ambari-web/app/classes/run_class.js b/branch-1.2/ambari-web/app/classes/run_class.js
deleted file mode 100644
index 801c550..0000000
--- a/branch-1.2/ambari-web/app/classes/run_class.js
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-var date = require('utils/date');
-var misc = require('utils/misc');
-
-App.Run2 = Ember.Object.extend({
-  id: null, //string
-  appName: null, //string
-  userName: null, //string
-  numJobsTotal: 0, //number
-  numJobsCompleted: 0, //number
-  startTime: 0, //number
-  elapsedTime: 0, //number
-  workflowContext: null, //string
-  input: 0, //number
-  output: 0, //number
-
-  /**
-   * Will set to true when we load all jobs related to this run
-   */
-  loadAllJobs : false,
-
-  /**
-   * runId  short part
-   */
-  idFormatted: function() {
-    return this.get('id').substr(0, 20);
-  }.property('id'),
-
-  /**
-   * Run duration
-   */
-  duration: function() {
-    return date.timingFormat(this.get('elapsedTime'));
-  }.property('elapsedTime'),
-
-  /**
-   * Status of running jobs
-   */
-  isRunning: function () {
-    return !this.get('numJobsTotal') == this.get('numJobsCompleted');
-  }.property('numJobsTotal', 'numJobsCompleted'),
-
-  /**
-   * Sum of input bandwidth for all jobs with appropriate measure
-   */
-  inputFormatted: function () {
-    return misc.formatBandwidth(this.get('input'));
-  }.property('input'),
-
-  /**
-   *  Sum of output bandwidth for all jobs with appropriate measure
-   */
-  outputFormatted: function () {
-    return misc.formatBandwidth(this.get('output'));
-  }.property('output'),
-
-  lastUpdateTime: function() {
-    return this.get('startTime') + this.get('elapsedTime');
-  }.property('elapsedTime', 'startTime'),
-
-  lastUpdateTimeFormatted: function() {
-    return date.dateFormat(this.get('lastUpdateTime'));
-  }.property('lastUpdateTime'),
-
-  lastUpdateTimeFormattedShort: function(){
-    return date.dateFormatShort(this.get('lastUpdateTime'));
-  }.property('lastUpdateTime'),
-
-  /**
-   * Type value based on first part of id
-   */
-  type: function() {
-    if (this.get('id').indexOf('pig_') === 0) {
-      return 'Pig';
-    }
-    if (this.get('id').indexOf('hive_') === 0) {
-      return 'Hive';
-    }
-    if (this.get('id').indexOf('mr_') === 0) {
-      return 'MapReduce';
-    }
-    return 'Undefined';
-  }.property('id')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers.js b/branch-1.2/ambari-web/app/controllers.js
deleted file mode 100644
index 24d93f2..0000000
--- a/branch-1.2/ambari-web/app/controllers.js
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-// load all controllers here
-
-require('controllers/application');
-require('controllers/login_controller');
-require('controllers/wizard');
-require('controllers/installer');
-require('controllers/global/background_operations_controller');
-require('controllers/main');
-require('controllers/main/admin');
-require('controllers/main/admin/user');
-require('controllers/main/admin/user/edit');
-require('controllers/main/admin/user/create');
-require('controllers/main/admin/advanced');
-require('controllers/main/admin/authentication');
-require('controllers/main/service');
-require('controllers/main/service/item');
-require('controllers/main/service/info/summary');
-require('controllers/main/service/info/configs');
-require('controllers/main/service/info/audit');
-require('controllers/main/service/add_controller');
-require('controllers/main/host');
-require('controllers/main/host/details');
-require('controllers/main/host/add_controller');
-require('controllers/main/charts');
-require('controllers/main/charts/heatmap_metrics/heatmap_metric');
-require('controllers/main/charts/heatmap_metrics/heatmap_metric_processrun');
-require('controllers/main/charts/heatmap_metrics/heatmap_metric_diskspaceused');
-require('controllers/main/charts/heatmap_metrics/heatmap_metric_memoryused');
-require('controllers/main/charts/heatmap_metrics/heatmap_metric_dfs');
-require('controllers/main/charts/heatmap_metrics/heatmap_metric_dfs_bytesread');
-require('controllers/main/charts/heatmap_metrics/heatmap_metric_dfs_byteswritten');
-require('controllers/main/charts/heatmap_metrics/heatmap_metric_dfs_gctime');
-require('controllers/main/charts/heatmap_metrics/heatmap_metric_dfs_memHeapUsed');
-require('controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce');
-require('controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce_gctime');
-require('controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce_mapsRunning');
-require('controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce_reducesRunning');
-require('controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce_memHeapUsed');
-require('controllers/main/charts/heatmap');
-require('controllers/main/apps_controller');
-require('controllers/main/apps/item_controller');
-require('controllers/wizard/slave_component_groups_controller');
-require('controllers/wizard/step1_controller');
-require('controllers/wizard/step2_controller');
-require('controllers/wizard/step3_controller');
-require('controllers/wizard/step4_controller');
-require('controllers/wizard/step5_controller');
-require('controllers/wizard/step6_controller');
-require('controllers/wizard/step7_controller');
-require('controllers/wizard/step8_controller');
-require('controllers/wizard/step9_controller');
-require('controllers/wizard/step10_controller');
-require('controllers/global/cluster_controller');
-require('controllers/global/update_controller');
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/application.js b/branch-1.2/ambari-web/app/controllers/application.js
deleted file mode 100644
index 52a7bde..0000000
--- a/branch-1.2/ambari-web/app/controllers/application.js
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.ApplicationController = Em.Controller.extend({
-
-  name: 'applicationController',
-
-  clusterName: function () {
-    return (App.router.get('clusterController.clusterName') || 'My Cluster').capitalize();
-  }.property('App.router.clusterController.clusterName'),
-
-  clusterDisplayName: function () {
-    var name = this.get('clusterName');
-    var displayName = name.length > 13 ? name.substr(0, 10) + "..." : name;
-    return displayName.capitalize();
-  }.property('clusterName'),
-
-  isClusterDataLoaded: function() {
-    return App.router.get('clusterController.isLoaded');
-  }.property('App.router.clusterController.isLoaded'),
-
-  init: function(){
-    this._super();
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/global/background_operations_controller.js b/branch-1.2/ambari-web/app/controllers/global/background_operations_controller.js
deleted file mode 100644
index 5e3205e..0000000
--- a/branch-1.2/ambari-web/app/controllers/global/background_operations_controller.js
+++ /dev/null
@@ -1,267 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.BackgroundOperationsController = Em.Controller.extend({
-  name: 'backgroundOperationsController',
-
-  /**
-   * Whether we need to refresh background operations or not
-   */
-  isWorking : false,
-
-  allOperations: [],
-  allOperationsCount : 0,
-  executeTasks: [],
-
-  getTasksByRole: function (role) {
-    return this.get('allOperations').filterProperty('role', role);
-  },
-
-  getOperationsForRequestId: function(requestId){
-    return this.get('allOperations').filterProperty('request_id', requestId);
-  },
-
-  updateInterval: App.bgOperationsUpdateInterval,
-  url : '',
-
-  generateUrl: function(){
-    var url = App.testMode ?
-      '/data/background_operations/list_on_start.json' :
-      App.apiPrefix + '/clusters/' + App.router.getClusterName() + '/requests/?fields=tasks/*';
-
-    this.set('url', url);
-    return url;
-  },
-
-  timeoutId : null,
-
-  /**
-   * Background operations will not be working if receive <code>attemptsCount</code> response with errors
-   */
-  attemptsCount: 20,
-
-  errorsCount: 0,
-
-  /**
-   * Call this.loadOperations with delay
-   * @param delay time in milliseconds (updateInterval by default)
-   * @param reason reason why we call it(used to calculate count of errors)
-   */
-  loadOperationsDelayed: function(delay, reason){
-    delay = delay || this.get('updateInterval');
-    var self = this;
-
-    if(reason && reason.indexOf('error:clusterName:') === 0){
-      var errors = this.get('errorsCount') + 1;
-      this.set('errorsCount', errors);
-      if(errors > this.get('attemptsCount')){
-        console.log('Stop loading background operations: clusterName is undefined');
-        return;
-      }
-    }
-
-    this.set('timeoutId',
-      setTimeout(function(){
-        self.loadOperations();
-      }, delay)
-    );
-  },
-
-  /**
-   * Reload operations
-   * We can call it manually <code>controller.loadOperations();</code>
-   * or it fires automatically, when <code>isWorking</code> becomes <code>true</code>
-   */
-  loadOperations : function(){
-
-    var timeoutId = this.get('timeoutId');
-    if(timeoutId){
-      clearTimeout(timeoutId);
-      this.set('timeoutId', null);
-    }
-
-    if(!this.get('isWorking')){
-      return;
-    }
-    var self = this;
-
-    if(!App.router.getClusterName()){
-      this.loadOperationsDelayed(this.get('updateInterval')/2, 'error:clusterName');
-      return;
-    }
-
-    var url = this.get('url');
-    if(!url){
-      url = this.generateUrl();
-    }
-
-    $.ajax({
-      type: "GET",
-      url: url,
-      dataType: 'json',
-      timeout: App.timeout,
-      success: function (data) {
-        //refresh model
-        self.updateBackgroundOperations(data);
-
-        self.loadOperationsDelayed();
-      },
-
-      error: function (request, ajaxOptions, error) {
-        self.loadOperationsDelayed(null, 'error:response error');
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-
-  }.observes('isWorking'),
-
-  /**
-   * Update info about background operations
-   * Put all tasks with command 'EXECUTE' into <code>executeTasks</code>, other tasks with it they are still running put into <code>runningTasks</code>
-   * Put all task that should be shown in popup modal window into <code>this.allOperations</code>
-   * @param data json loaded from server
-   */
-  updateBackgroundOperations: function (data) {
-    var runningTasks = [];
-    var executeTasks = this.get('executeTasks');
-    data.items.forEach(function (item) {
-      item.tasks.forEach(function (task) {
-        if (task.Tasks.command == 'EXECUTE') {
-          if (!executeTasks.someProperty('id', task.Tasks.id)) {
-            executeTasks.push(task.Tasks);
-          }
-        } else {
-          if (task.Tasks.status == 'QUEUED' || task.Tasks.status == 'PENDING' || task.Tasks.status == 'IN_PROGRESS') {
-            runningTasks.push(task.Tasks);
-          }
-        }
-      });
-    });
-
-    for (var i = 0; i < executeTasks.length; i++) {
-      if (executeTasks[i].status == 'QUEUED' || executeTasks[i].status == 'PENDING' || executeTasks[i].status == 'IN_PROGRESS') {
-        var url = App.testMode ? '/data/background_operations/list_on_start.json' :
-            App.apiPrefix + '/clusters/' + App.router.getClusterName() + '/requests/' + executeTasks[i].request_id + '/tasks/' + executeTasks[i].id;
-        $.ajax({
-          type: "GET",
-          url: url,
-          dataType: 'json',
-          timeout: App.timeout,
-          success: function (data) {
-            if (data) {
-              for(var i = 0;i < executeTasks.length; i++){
-                if(data.Tasks.id == executeTasks[i].id){
-                  executeTasks[i] = data.Tasks;
-                }
-              }
-            }
-          },
-          error: function () {
-            console.log('ERROR: error during executeTask update');
-          },
-
-          statusCode: require('data/statusCodes')
-        });
-      }
-    }
-    var currentTasks;
-    currentTasks = runningTasks.concat(executeTasks);
-    currentTasks = currentTasks.sort(function (a, b) {
-      return a.id - b.id;
-    });
-
-    // If the server is returning 999 as the return code, display blank and not 999
-    currentTasks.forEach( function (task) {
-      if (task.exit_code == 999) {
-        task.display_exit_code = false;
-      } else {
-        task.display_exit_code = true;
-      }
-    });
-
-    this.get('allOperations').filterProperty('isOpen').mapProperty('id').forEach(function(id){
-      if (currentTasks.someProperty('id', id)) {
-        currentTasks.findProperty('id', id).isOpen = true;
-      }
-    });
-
-    this.set('allOperations', currentTasks);
-    this.set('allOperationsCount', runningTasks.length + executeTasks.filterProperty('status', 'PENDING').length + executeTasks.filterProperty('status', 'QUEUED').length + executeTasks.filterProperty('status', 'IN_PROGRESS').length);
-
-    var eventsArray = this.get('eventsArray');
-    if (eventsArray.length) {
-
-      var itemsToRemove = [];
-      eventsArray.forEach(function(item){
-        //if when returns true
-        if(item.when(this)){
-          //fire do method
-          item.do();
-          //and remove it
-          itemsToRemove.push(item);
-        }
-      }, this);
-
-      itemsToRemove.forEach(function(item){
-        eventsArray.splice(eventsArray.indexOf(item), 1);
-      });
-    }
-  },
-
-  /**
-   * Onclick handler for background operations number located right to logo
-   */
-  showPopup: function(){
-    this.set('executeTasks', []);
-    this.loadOperations();
-    App.ModalPopup.show({
-      headerClass: Ember.View.extend({
-        controllerBinding: 'App.router.backgroundOperationsController',
-        template:Ember.Handlebars.compile('{{allOperationsCount}} Background Operations Running')
-      }),
-      bodyClass: Ember.View.extend({
-        controllerBinding: 'App.router.backgroundOperationsController',
-        templateName: require('templates/main/background_operations_popup')
-      }),
-      onPrimary: function() {
-        this.hide();
-      },
-      secondary : null
-    });
-  },
-
-  /**
-   * Exaple of data inside:
-   * {
-   *   when : function(backgroundOperationsController){
-   *     return backgroundOperationsController.getOperationsForRequestId(requestId).length == 0;
-   *   },
-   *   do : function(){
-   *     component.set('status', 'cool');
-   *   }
-   * }
-   *
-   * Function <code>do</code> will be fired once, when <code>when</code> returns true.
-   * Example, how to use it, you can see in app\controllers\main\host\details.js
-   */
-  eventsArray : []
-
-});
diff --git a/branch-1.2/ambari-web/app/controllers/global/cluster_controller.js b/branch-1.2/ambari-web/app/controllers/global/cluster_controller.js
deleted file mode 100644
index 262e50a..0000000
--- a/branch-1.2/ambari-web/app/controllers/global/cluster_controller.js
+++ /dev/null
@@ -1,321 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.ClusterController = Em.Controller.extend({
-  name:'clusterController',
-  cluster:null,
-  isLoaded:false,
-  /**
-   * Whether we need to update statuses automatically or not
-   */
-  isWorking: false,
-  updateLoadStatus:function (item) {
-    var loadList = this.get('dataLoadList');
-    var loaded = true;
-    loadList.set(item, true);
-    for (var i in loadList) {
-      if (loadList.hasOwnProperty(i) && !loadList[i] && loaded) {
-        loaded = false;
-      }
-    }
-    this.set('isLoaded', loaded);
-  },
-
-  dataLoadList:Em.Object.create({
-    'hosts':false,
-    'services':false,
-    'cluster':false,
-    'racks':false,
-    'alerts':false,
-    'users':false
-  }),
-
-  /**
-   * load cluster name
-   */
-  loadClusterName:function (reload) {
-    if (this.get('clusterName') && !reload) {
-      return;
-    }
-    var self = this;
-    var url = (App.testMode) ? '/data/clusters/info.json' : App.apiPrefix + '/clusters';
-    $.ajax({
-      async:false,
-      type:"GET",
-      url:url,
-      dataType:'json',
-      timeout:App.timeout,
-      success:function (data) {
-        self.set('cluster', data.items[0]);
-      },
-      error:function (request, ajaxOptions, error) {
-        console.log('failed on loading cluster name');
-        self.set('isLoaded', true);
-      },
-      statusCode:require('data/statusCodes')
-    });
-  },
-
-  getUrl:function (testUrl, url) {
-    return (App.testMode) ? testUrl : App.apiPrefix + '/clusters/' + this.get('clusterName') + url;
-  },
-
-  /**
-   * Provides the URL to use for Ganglia server. This URL
-   * is helpful in populating links in UI.
-   *
-   * If null is returned, it means GANGLIA service is not installed.
-   */
-  gangliaUrl: function () {
-    if (App.testMode) {
-      return 'http://gangliaserver/ganglia/?t=yes';
-    } else {
-      // We want live data here
-      var svcs = App.Service.find();
-      var gangliaSvc = svcs.findProperty("serviceName", "GANGLIA");
-      if (gangliaSvc) {
-        var svcComponents = gangliaSvc.get('hostComponents');
-        if (svcComponents) {
-          var gangliaSvcComponent = svcComponents.findProperty("componentName", "GANGLIA_SERVER");
-          if (gangliaSvcComponent) {
-            var hostName = gangliaSvcComponent.get('host.hostName');
-            if (hostName) {
-              var host = App.Host.find(hostName);
-              if (host) {
-                hostName = host.get('publicHostName');
-              }
-              return "http://" + hostName + "/ganglia";
-            }
-          }
-        }
-      }
-      return null;
-    }
-  }.property('App.router.updateController.isUpdated', 'dataLoadList.hosts'),
-
-  /**
-   * Provides the URL to use for NAGIOS server. This URL
-   * is helpful in getting alerts data from server and also
-   * in populating links in UI.
-   *
-   * If null is returned, it means NAGIOS service is not installed.
-   */
-  nagiosUrl:function () {
-    if (App.testMode) {
-      return 'http://nagiosserver/nagios';
-    } else {
-      // We want live data here
-      var svcs = App.Service.find();
-      var nagiosSvc = svcs.findProperty("serviceName", "NAGIOS");
-      if (nagiosSvc) {
-        var svcComponents = nagiosSvc.get('hostComponents');
-        if (svcComponents) {
-          var nagiosSvcComponent = svcComponents.findProperty("componentName", "NAGIOS_SERVER");
-          if (nagiosSvcComponent) {
-            var hostName = nagiosSvcComponent.get('host.hostName');
-            if (hostName) {
-              var host = App.Host.find(hostName);
-              if (host) {
-                hostName = host.get('publicHostName');
-              }
-              return "http://" + hostName + "/nagios";
-            }
-          }
-        }
-      }
-      return null;
-    }
-  }.property('App.router.updateController.isUpdated', 'dataLoadList.services', 'dataLoadList.hosts'),
-
-  isNagiosInstalled:function () {
-    if (App.testMode) {
-      return true;
-    } else {
-      var svcs = App.Service.find();
-      var nagiosSvc = svcs.findProperty("serviceName", "NAGIOS");
-      return nagiosSvc != null;
-    }
-  }.property('App.router.updateController.isUpdated', 'dataLoadList.services'),
-
-  /**
-   * Sorted list of alerts.
-   * Changes whenever alerts are loaded.
-   */
-  alerts:[],
-  updateAlerts: function(){
-    var alerts = App.Alert.find();
-    var alertsArray = alerts.toArray();
-    var sortedArray = alertsArray.sort(function (left, right) {
-      var statusDiff = right.get('status') - left.get('status');
-      if (statusDiff == 0) { // same error severity - sort by time
-        var rightTime = right.get('date');
-        var leftTime = left.get('date');
-        rightTime = rightTime ? rightTime.getTime() : 0;
-        leftTime = leftTime ? leftTime.getTime() : 0;
-        statusDiff = rightTime - leftTime;
-      }
-      return statusDiff;
-    });
-    this.set('alerts', sortedArray);
-  },
-  
-  /**
-   * Determination of Nagios presence is known only after App.Service is
-   * loaded from server. When that is done, no one tells alerts to load,
-   * due to which alerts are not loaded & shown till the next polling cycle.
-   * This method immediately loads alerts once Nagios presence is known.
-   */
-  isNagiosInstalledListener: function () {
-    var self = this;
-    self.loadAlerts(function () {
-      self.updateLoadStatus('alerts');
-    });
-  }.observes('isNagiosInstalled'),
-
-  /**
-   * Load alerts from server
-   * @param callback Slave function, should be called to fire delayed update.
-   * Look at <code>App.updater.run</code> for more information.
-   * Also used to set <code>dataLoadList.alerts</code> status during app loading
-   */
-  loadAlerts:function (callback) {
-    if (this.get('isNagiosInstalled')) {
-      var dataUrl = this.getUrl('/data/alerts/alerts.json', '/host_components?fields=HostRoles/nagios_alerts&HostRoles/component_name=NAGIOS_SERVER');
-      var self = this;
-      var ajaxOptions = {
-        dataType:"json",
-        complete:function () {
-          self.updateAlerts();
-          callback();
-        },
-        error: function(jqXHR, testStatus, error) {
-          console.log('Nagios $.ajax() response:', error);
-        }
-      };
-      App.HttpClient.get(dataUrl, App.alertsMapper, ajaxOptions);
-    } else {
-      console.log("No Nagios URL provided.")
-      callback();
-    }
-  },
-
-  /**
-   * Send request to server to load components updated statuses
-   * @param callback Slave function, should be called to fire delayed update.
-   * Look at <code>App.updater.run</code> for more information
-   * @return {Boolean} Whether we have errors
-   */
-  loadUpdatedStatus: function(callback){
-
-    if(!this.get('clusterName')){
-      callback();
-      return false;
-    }
-    
-    var servicesUrl = this.getUrl('/data/dashboard/services.json', '/services?fields=ServiceInfo,components/host_components/HostRoles/desired_state,components/host_components/HostRoles/state');
-
-    App.HttpClient.get(servicesUrl, App.statusMapper, {
-      complete: callback
-    });
-    return true;
-  },
-
-  loadUpdatedStatusDelayed: function(delay){
-    setTimeout(function(){
-      App.updater.immediateRun('loadUpdatedStatus');
-    }, delay);
-  },
-
-  /**
-   * Start polling, when <code>isWorking</code> become true
-   */
-  startPolling: function(){
-    if(!this.get('isWorking')){
-      return false;
-    }
-    App.updater.run(this, 'loadUpdatedStatus', 'isWorking'); //update will not run it immediately
-    App.updater.run(this, 'loadAlerts', 'isWorking'); //update will not run it immediately
-    return true;
-  }.observes('isWorking'),
-  /**
-   *
-   *  load all data and update load status
-   */
-  loadClusterData:function () {
-    var self = this;
-    if (!this.get('clusterName')) {
-      return;
-    }
-
-    if(this.get('isLoaded')) { // do not load data repeatedly
-      return;
-    }
-
-    var clusterUrl = this.getUrl('/data/clusters/cluster.json', '?fields=Clusters');
-    var hostsUrl = this.getUrl('/data/hosts/hosts.json', '/hosts?fields=Hosts/host_name,Hosts/public_host_name,Hosts/disk_info,Hosts/cpu_count,Hosts/total_mem,Hosts/host_status,Hosts/last_heartbeat_time,Hosts/os_arch,Hosts/os_type,Hosts/ip,host_components,metrics/disk,metrics/load/load_one');
-    var usersUrl = App.testMode ? '/data/users/users.json' : App.apiPrefix + '/users/?fields=*';
-    var racksUrl = "/data/racks/racks.json";
-
-    App.HttpClient.get(racksUrl, App.racksMapper, {
-      complete:function (jqXHR, textStatus) {
-        self.updateLoadStatus('racks');
-      }
-    }, function (jqXHR, textStatus) {
-      self.updateLoadStatus('racks');
-    });
-
-    App.HttpClient.get(clusterUrl, App.clusterMapper, {
-      complete:function (jqXHR, textStatus) {
-        self.updateLoadStatus('cluster');
-      }
-    }, function (jqXHR, textStatus) {
-      self.updateLoadStatus('cluster');
-    });
-
-    App.HttpClient.get(hostsUrl, App.hostsMapper, {
-      complete:function (jqXHR, textStatus) {
-        self.updateLoadStatus('hosts');
-      }
-    }, function (jqXHR, textStatus) {
-      self.updateLoadStatus('hosts');
-    });
-
-    App.HttpClient.get(usersUrl, App.usersMapper, {
-      complete:function (jqXHR, textStatus) {
-        self.updateLoadStatus('users');
-      }
-    }, function (jqXHR, textStatus) {
-      self.updateLoadStatus('users');
-    });
-
-    App.router.get('updateController').updateServiceMetric(function(){
-      self.updateLoadStatus('services');
-    }, true);
-
-    this.loadAlerts(function(){
-      self.updateLoadStatus('alerts');
-    });
-
-  },
-
-  clusterName:function () {
-    return (this.get('cluster')) ? this.get('cluster').Clusters.cluster_name : null;
-  }.property('cluster')
-})
diff --git a/branch-1.2/ambari-web/app/controllers/global/update_controller.js b/branch-1.2/ambari-web/app/controllers/global/update_controller.js
deleted file mode 100644
index bbd2190..0000000
--- a/branch-1.2/ambari-web/app/controllers/global/update_controller.js
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.UpdateController = Em.Controller.extend({
-  name:'updateController',
-  isUpdated:false,
-  cluster:null,
-  isWorking: false,
-  timeIntervalId: null,
-  clusterName:function () {
-    return App.router.get('clusterController.clusterName');
-  }.property('App.router.clusterController.clusterName'),
-
-  getUrl:function (testUrl, url) {
-    return (App.testMode) ? testUrl : App.apiPrefix + '/clusters/' + this.get('clusterName') + url;
-  },
-
-  /**
-   * Start polling, when <code>isWorking</code> become true
-   */
-  updateAll:function(){
-    if(this.get('isWorking')) {
-      App.updater.run(this, 'updateHost', 'isWorking');
-      App.updater.run(this, 'updateServiceMetric', 'isWorking');
-      App.updater.run(this, 'graphsUpdate', 'isWorking');
-    }
-  }.observes('isWorking'),
-
-  updateHost:function(callback) {
-    var self = this;
-      var hostsUrl = this.getUrl('/data/hosts/hosts.json', '/hosts?fields=Hosts/host_name,Hosts/public_host_name,Hosts/disk_info,Hosts/cpu_count,Hosts/total_mem,Hosts/host_status,Hosts/last_heartbeat_time,Hosts/os_arch,Hosts/os_type,Hosts/ip,host_components,metrics/disk,metrics/load/load_one');
-      App.HttpClient.get(hostsUrl, App.hostsMapper, {
-        complete: callback
-      });
-  },
-  graphs: [],
-  graphsUpdate: function (callback) {
-      var existedGraphs = [];
-      this.get('graphs').forEach(function (_graph) {
-        var view = Em.View.views[_graph.id];
-        if (view) {
-          existedGraphs.push(_graph);
-          //console.log('updated graph', _graph.name);
-          view.loadData();
-          //if graph opened as modal popup update it to
-          if($(".modal-graph-line .modal-body #" + _graph.popupId + "-container-popup").length) {
-            view.loadData();
-          }
-        }
-      });
-    callback();
-    this.set('graphs', existedGraphs);
-  },
-
-  /**
-   * Updates the services information. 
-   *
-   * @param callback
-   * @param isInitialLoad  If true, only basic information is loaded.
-   */
-  updateServiceMetric: function (callback, isInitialLoad) {
-    var self = this;
-    self.set('isUpdated', false);
-    var servicesUrl = isInitialLoad ? 
-        this.getUrl('/data/dashboard/services.json', '/services?fields=components/ServiceComponentInfo,components/host_components,components/host_components/HostRoles') : 
-        this.getUrl('/data/dashboard/services.json', '/services?fields=components/ServiceComponentInfo,components/host_components,components/host_components/HostRoles,components/host_components/metrics/jvm/memHeapUsedM,components/host_components/metrics/jvm/memHeapCommittedM,components/host_components/metrics/mapred/jobtracker/trackers_decommissioned');
-    var callback = callback || function (jqXHR, textStatus) {
-      self.set('isUpdated', true);
-    };
-      App.HttpClient.get(servicesUrl, App.servicesMapper, {
-        complete: callback
-      });
-  }
-
-
-});
diff --git a/branch-1.2/ambari-web/app/controllers/installer.js b/branch-1.2/ambari-web/app/controllers/installer.js
deleted file mode 100644
index 1f29f00..0000000
--- a/branch-1.2/ambari-web/app/controllers/installer.js
+++ /dev/null
@@ -1,446 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.InstallerController = App.WizardController.extend({
-
-  name: 'installerController',
-
-  totalSteps: 10,
-
-  content: Em.Object.create({
-    cluster: null,
-    installOptions: null,
-    hosts: null,
-    services: null,
-    slaveComponentHosts: null,
-    masterComponentHosts: null,
-    serviceConfigProperties: null,
-    advancedServiceConfig: null,
-    slaveGroupProperties: null,
-    controllerName: 'installerController'
-  }),
-
-  getCluster: function(){
-    return jQuery.extend({}, this.get('clusterStatusTemplate'));
-  },
-
-  getInstallOptions: function(){
-    return jQuery.extend({}, this.get('installOptionsTemplate'));
-  },
-
-  getHosts: function(){
-    return [];
-  },
-
-  /**
-   * Remove host from model. Used at <code>Confirm hosts(step2)</code> step
-   * @param hosts Array of hosts, which we want to delete
-   */
-  removeHosts: function (hosts) {
-    //todo Replace this code with real logic
-    App.db.removeHosts(hosts);
-  },
-
-  /**
-   * Save data, which user filled, to main controller
-   * @param stepController App.WizardStep3Controller
-   */
-  saveConfirmedHosts: function (stepController) {
-    var hostInfo = {};
-    stepController.get('content.hosts').forEach(function (_host) {
-      hostInfo[_host.name] = {
-        name: _host.name,
-        cpu: _host.cpu,
-        memory: _host.memory,
-        disk_info: _host.disk_info,
-        bootStatus: _host.bootStatus,
-        isInstalled: false
-      };
-    });
-    this.set('content.hosts', hostInfo);
-    this.save('hosts');
-  },
-  /**
-   * Load confirmed hosts.
-   * Will be used at <code>Assign Masters(step5)</code> step
-   */
-  loadConfirmedHosts: function () {
-    this.set('content.hosts', App.db.getHosts() || []);
-  },
-
-  /**
-   * Save data after installation to main controller
-   * @param stepController App.WizardStep9Controller
-   */
-  saveInstalledHosts: function (stepController) {
-    var hosts = stepController.get('hosts');
-    var hostInfo = App.db.getHosts();
-
-    for (var index in hostInfo) {
-      var host = hosts.findProperty('name', hostInfo[index].name);
-      if (host) {
-        hostInfo[index].status = host.status;
-        //tasks should be empty because they loads from the server
-        //hostInfo[index].tasks = host.tasks;
-        hostInfo[index].message = host.message;
-        hostInfo[index].progress = host.progress;
-      }
-    }
-    this.set('content.hosts', hostInfo);
-    this.save('hosts');
-  },
-
-  /**
-   * Load services data. Will be used at <code>Select services(step4)</code> step
-   */
-  loadServices: function () {
-    var servicesInfo = App.db.getService();
-    servicesInfo.forEach(function (item, index) {
-      servicesInfo[index] = Em.Object.create(item);
-      servicesInfo[index].isInstalled = false;
-    });
-    this.set('content.services', servicesInfo);
-    console.log('installerController.loadServices: loaded data ', JSON.stringify(servicesInfo));
-    console.log("The type odf serviceInfo: " + typeof servicesInfo);
-    console.log('selected services ', servicesInfo.filterProperty('isSelected', true).mapProperty('serviceName'));
-  },
-
-  /**
-   * Save data to model
-   * @param stepController App.WizardStep4Controller
-   */
-  saveServices: function (stepController) {
-    var serviceNames = [];
-    App.db.setService(stepController.get('content'));
-    stepController.filterProperty('isSelected', true).forEach(function (item) {
-      serviceNames.push(item.serviceName);
-    });
-    this.set('content.selectedServiceNames', serviceNames);
-    App.db.setSelectedServiceNames(serviceNames);
-    console.log('installerController.saveServices: saved data ', serviceNames);
-  },
-
-  /**
-   * Save Master Component Hosts data to Main Controller
-   * @param stepController App.WizardStep5Controller
-   */
-  saveMasterComponentHosts: function (stepController) {
-    var obj = stepController.get('selectedServicesMasters');
-
-    var masterComponentHosts = [];
-    obj.forEach(function (_component) {
-      masterComponentHosts.push({
-        display_name: _component.get('display_name'),
-        component: _component.get('component_name'),
-        hostName: _component.get('selectedHost'),
-        serviceId: _component.get('serviceId'),
-        isInstalled: false
-      });
-    });
-
-    console.log("installerController.saveMasterComponentHosts: saved hosts ", masterComponentHosts);
-    App.db.setMasterComponentHosts(masterComponentHosts);
-    this.set('content.masterComponentHosts', masterComponentHosts);
-  },
-
-  /**
-   * Load master component hosts data for using in required step controllers
-   */
-  loadMasterComponentHosts: function () {
-    var masterComponentHosts = App.db.getMasterComponentHosts() || [];
-    this.set("content.masterComponentHosts", masterComponentHosts);
-    console.log("InstallerController.loadMasterComponentHosts: loaded hosts ", masterComponentHosts);
-  },
-
-  /**
-   * Save slaveHostComponents to main controller
-   * @param stepController called at the submission of step6
-   */
-  saveSlaveComponentHosts: function (stepController) {
-
-    var hosts = stepController.get('hosts');
-    var isMrSelected = stepController.get('isMrSelected');
-    var isHbSelected = stepController.get('isHbSelected');
-
-    var dataNodeHosts = [];
-    var taskTrackerHosts = [];
-    var regionServerHosts = [];
-    var clientHosts = [];
-
-    hosts.forEach(function (host) {
-      if (host.get('isDataNode')) {
-        dataNodeHosts.push({
-          hostName: host.hostName,
-          group: 'Default',
-          isInstalled: false
-        });
-      }
-      if (isMrSelected && host.get('isTaskTracker')) {
-        taskTrackerHosts.push({
-          hostName: host.hostName,
-          group: 'Default',
-          isInstalled: false
-        });
-      }
-      if (isHbSelected && host.get('isRegionServer')) {
-        regionServerHosts.push({
-          hostName: host.hostName,
-          group: 'Default',
-          isInstalled: false
-        });
-      }
-      if (host.get('isClient')) {
-        clientHosts.pushObject({
-          hostName: host.hostName,
-          group: 'Default',
-          isInstalled: false
-        });
-      }
-    }, this);
-
-    var slaveComponentHosts = [];
-    slaveComponentHosts.push({
-      componentName: 'DATANODE',
-      displayName: 'DataNode',
-      hosts: dataNodeHosts
-    });
-    if (isMrSelected) {
-      slaveComponentHosts.push({
-        componentName: 'TASKTRACKER',
-        displayName: 'TaskTracker',
-        hosts: taskTrackerHosts
-      });
-    }
-    if (isHbSelected) {
-      slaveComponentHosts.push({
-        componentName: 'HBASE_REGIONSERVER',
-        displayName: 'RegionServer',
-        hosts: regionServerHosts
-      });
-    }
-    slaveComponentHosts.pushObject({
-      componentName: 'CLIENT',
-      displayName: 'client',
-      hosts: clientHosts
-    });
-
-    App.db.setSlaveComponentHosts(slaveComponentHosts);
-    this.set('content.slaveComponentHosts', slaveComponentHosts);
-    console.log("InstallerController.saveSlaveComponentHosts: saved hosts ", slaveComponentHosts);
-  },
-
-  /**
-   * Load master component hosts data for using in required step controllers
-   */
-  loadSlaveComponentHosts: function () {
-    var slaveComponentHosts = App.db.getSlaveComponentHosts() || null;
-    this.set("content.slaveComponentHosts", slaveComponentHosts);
-    console.log("InstallerController.loadSlaveComponentHosts: loaded hosts ", slaveComponentHosts);
-  },
-
-  /**
-   * Save config properties
-   * @param stepController Step7WizardController
-   */
-  saveServiceConfigProperties: function (stepController) {
-    var serviceConfigProperties = [];
-    stepController.get('stepConfigs').forEach(function (_content) {
-      _content.get('configs').forEach(function (_configProperties) {
-        var displayType = _configProperties.get('displayType');
-        if (displayType === 'directories' || displayType === 'directory') {
-          var value = _configProperties.get('value').trim().split(/\s+/g).join(',');
-          _configProperties.set('value', value);
-        }
-        var configProperty = {
-          id: _configProperties.get('id'),
-          name: _configProperties.get('name'),
-          value: _configProperties.get('value'),
-          defaultValue: _configProperties.get('defaultValue'),
-          service: _configProperties.get('serviceName'),
-          domain:  _configProperties.get('domain'),
-          filename: _configProperties.get('filename')
-        };
-        serviceConfigProperties.push(configProperty);
-      }, this);
-
-    }, this);
-
-    App.db.setServiceConfigProperties(serviceConfigProperties);
-    this.set('content.serviceConfigProperties', serviceConfigProperties);
-
-    //TODO: Uncomment below code to enable slave Configuration
- /*
-    var slaveConfigProperties = [];
-    stepController.get('stepConfigs').forEach(function (_content) {
-      if (_content.get('configCategories').someProperty('isForSlaveComponent', true)) {
-        var slaveCategory = _content.get('configCategories').findProperty('isForSlaveComponent', true);
-        slaveCategory.get('slaveConfigs.groups').forEach(function (_group) {
-          _group.get('properties').forEach(function (_property) {
-            var displayType = _property.get('displayType');
-            if (displayType === 'directories' || displayType === 'directory') {
-              var value = _property.get('value').trim().split(/\s+/g).join(',');
-              _property.set('value', value);
-            }
-            _property.set('storeValue', _property.get('value'));
-          }, this);
-        }, this);
-        slaveConfigProperties.pushObject(slaveCategory.get('slaveConfigs'));
-      }
-    }, this);
-    App.db.setSlaveProperties(slaveConfigProperties);
-    this.set('content.slaveGroupProperties', slaveConfigProperties);
-    */
-  },
-
-  /**
-   * Load serviceConfigProperties to model
-   */
-  loadServiceConfigProperties: function () {
-    var serviceConfigProperties = App.db.getServiceConfigProperties();
-    this.set('content.serviceConfigProperties', serviceConfigProperties);
-    console.log("InstallerController.loadServiceConfigProperties: loaded config ", serviceConfigProperties);
-
-    this.set('content.advancedServiceConfig', App.db.getAdvancedServiceConfig());
-  },
-
-  /**
-   * Load information about hosts with clients components
-   */
-  loadClients: function () {
-    var clients = App.db.getClientsForSelectedServices();
-    this.set('content.clients', clients);
-    console.log("InstallerController.loadClients: loaded list ", clients);
-  },
-
-  /**
-   * Generate clients list for selected services and save it to model
-   * @param stepController step4WizardController
-   */
-  saveClients: function (stepController) {
-    var clients = [];
-    var serviceComponents = require('data/service_components');
-
-    stepController.get('content').filterProperty('isSelected', true).forEach(function (_service) {
-      var client = serviceComponents.filterProperty('service_name', _service.serviceName).findProperty('isClient', true);
-      if (client) {
-        clients.pushObject({
-          component_name: client.component_name,
-          display_name: client.display_name,
-          isInstalled: false
-        });
-      }
-    }, this);
-
-    App.db.setClientsForSelectedServices(clients);
-    this.set('content.clients', clients);
-    console.log("InstallerController.saveClients: saved list ", clients);
-  },
-
-  /**
-   * Load data for all steps until <code>current step</code>
-   */
-  loadAllPriorSteps: function () {
-    var step = this.get('currentStep');
-    switch (step) {
-      case '10':
-      case '9':
-      case '8':
-      case '7':
-        this.loadServiceConfigProperties();
-        // loadSlaveGroupProperties depends on loadSlaveComponentHosts; call loadSlaveComponentHosts first
-       // this.loadSlaveComponentHosts();
-       // this.loadSlaveGroupProperties();
-      case '6':
-        this.loadSlaveComponentHosts();
-        this.loadClients();
-      case '5':
-        this.loadMasterComponentHosts();
-        this.loadConfirmedHosts();
-      case '4':
-        this.loadServices();
-      case '3':
-        this.loadConfirmedHosts();
-      case '2':
-        this.load('installOptions');
-      case '1':
-        this.load('cluster');
-    }
-  },
-
-
-
-  loadAdvancedConfigs: function () {
-    var configs = [];
-    App.db.getSelectedServiceNames().forEach(function (_serviceName) {
-      var serviceComponents = this.loadAdvancedConfig(_serviceName);
-      configs = configs.concat(serviceComponents);
-    }, this);
-    this.set('content.advancedServiceConfig', configs);
-    App.db.setAdvancedServiceConfig(configs);
-  },
-
-  /**
-   * Generate serviceProperties save it to localdata
-   * called form stepController step6WizardController
-   */
-
-  loadAdvancedConfig: function (serviceName) {
-    var self = this;
-    var url = (App.testMode) ? '/data/wizard/stack/hdp/version01/' + serviceName + '.json' : App.apiPrefix + App.get('stackVersionURL') + '/services/' + serviceName; // TODO: get this url from the stack selected by the user in Install Options page
-    var method = 'GET';
-    var serviceComponents;
-    $.ajax({
-      type: method,
-      url: url,
-      async: false,
-      dataType: 'text',
-      timeout: App.timeout,
-      success: function (data) {
-        var jsonData = jQuery.parseJSON(data);
-        console.log("TRACE: Step6 submit -> In success function for the loadAdvancedConfig call");
-        console.log("TRACE: Step6 submit -> value of the url is: " + url);
-        serviceComponents = jsonData.properties;
-        serviceComponents.setEach('serviceName', serviceName);
-        console.log('TRACE: servicename: ' + serviceName);
-      },
-
-      error: function (request, ajaxOptions, error) {
-        console.log("TRACE: STep6 submit -> In error function for the loadAdvancedConfig call");
-        console.log("TRACE: STep6 submit-> value of the url is: " + url);
-        console.log("TRACE: STep6 submit-> error code status is: " + request.status);
-        console.log('Step6 submit: Error message is: ' + request.responseText);
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-    return serviceComponents;
-  },
-
-  /**
-   * Clear all temporary data
-   */
-  finish: function () {
-    this.setCurrentStep('1');
-    this.clearStorageData();
-  }
-
-});
-
diff --git a/branch-1.2/ambari-web/app/controllers/login_controller.js b/branch-1.2/ambari-web/app/controllers/login_controller.js
deleted file mode 100644
index 329d8ed..0000000
--- a/branch-1.2/ambari-web/app/controllers/login_controller.js
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.LoginController = Em.Object.extend({
-
-  name: 'loginController',
-
-  loginName: '',
-  password: '',
-
-  errorMessage: '',
-
-  submit: function (e) {
-    this.set('errorMessage', '');
-
-    var self = this;
-
-    App.get('router').login(function (isAuthenticated) {
-      if (!isAuthenticated) {
-        console.log('Failed to login as: ' + self.get('loginName'));
-        self.set('errorMessage', Em.I18n.t('login.error'));
-      }
-    });
-  }
-
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main.js b/branch-1.2/ambari-web/app/controllers/main.js
deleted file mode 100644
index 7d78d6d..0000000
--- a/branch-1.2/ambari-web/app/controllers/main.js
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-require('models/background_operation');
-
-App.MainController = Em.Controller.extend({
-  name: 'mainController',
-
-  updateTitle: function(){
-    var name = App.router.get('clusterController.clusterName');
-    if (name) {
-      name = name.length > 13 ? name.substr(0, 10) + "..." : name;
-      name = name.capitalize();
-    } else {
-      name = 'Loading';
-    }
-    $('title').text('Ambari - ' + name);
-  }.observes('App.router.clusterController.clusterName'),
-
-  isClusterDataLoaded: function(){
-      return App.router.get('clusterController.isLoaded');
-  }.property('App.router.clusterController.isLoaded'),
-  /**
-   * run all processes and cluster's data loading
-   */
-  initialize: function(){
-    App.router.get('clusterController').loadClusterData();
-    this.startPolling();
-  },
-  startPolling: function(){
-    App.router.get('updateController').set('isWorking', true);
-    App.router.get('backgroundOperationsController').set('isWorking', true);
-    App.router.get('clusterController').set('isWorking', true);
-  },
-  stopPolling: function(){
-    App.router.get('updateController').set('isWorking', false);
-    App.router.get('backgroundOperationsController').set('isWorking', false);
-    App.router.get('clusterController').set('isWorking', false);
-  },
-
-  reloadTimeOut: null,
-
-  pageReload: function () {
-
-    clearTimeout(this.get("reloadTimeOut"));
-
-    this.set('reloadTimeOut',
-        setTimeout(function () {
-          location.reload()
-        }, App.pageReloadTime)
-    );
-  }.observes("App.router.location.lastSetURL")
-
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/admin.js b/branch-1.2/ambari-web/app/controllers/main/admin.js
deleted file mode 100644
index 410422a..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/admin.js
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAdminController = Em.Controller.extend({
-  name:'mainAdminController',
-  category:'user'
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/admin/advanced.js b/branch-1.2/ambari-web/app/controllers/main/admin/advanced.js
deleted file mode 100644
index 3fdab5b..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/admin/advanced.js
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAdminAdvancedController = Em.Controller.extend({
-  name:'mainAdminAdvancedController',
-  uninstall: function(event){
-    var params = event.context;
-    App.ModalPopup.show({
-      uninstallParams: params,
-      header: Em.I18n.t('admin.advanced.popup.header'),
-      bodyClass: App.MainAdminAdvancedPasswordView.reopen({}), // layout: Em.Handlebars.compile()
-      onPrimary: function(){
-        var form = this.getForm();
-        if(form) {
-          if(form.isValid()) {
-            console.warn("TODO: request for cluster uninstall");
-          }
-        }
-        this.onClose();
-      },
-      onSecondary: function(){
-        this.onClose();
-      },
-
-      getForm: function(){
-        var form = false;
-        $.each(this.get('_childViews'), function(){
-          if(this.get('path') == "bodyClass") {
-            return form = this.get('_childViews')[0];
-          }
-        });
-
-        return form;
-      }
-    })
-  }
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/admin/authentication.js b/branch-1.2/ambari-web/app/controllers/main/admin/authentication.js
deleted file mode 100644
index 5826fda..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/admin/authentication.js
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAdminAuthenticationController = Em.Controller.extend({
-  name:'mainAdminAuthenticationController',
-  /**
-   * save user form after editing
-   * @param event
-   */
-  save:function (event) {
-    var form = event.context;
-    if (form.isValid()) {
-      form.save();
-      App.ModalPopup.show({
-        header:Em.I18n.t('admin.authentication.form.testConfiguration'),
-        body:form.get('resultText'),
-        secondary:false,
-        onPrimary:function () {
-          this.hide();
-        }
-      });
-    }
-  },
-  content:App.Authentication.find(1)
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/admin/user.js b/branch-1.2/ambari-web/app/controllers/main/admin/user.js
deleted file mode 100644
index 73fc248..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/admin/user.js
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAdminUserController = Em.Controller.extend({
-  name:'mainAdminUserController',
-
-  /**
-   * send request to the server to delete user if selected user is not current user
-   * @param event
-   */
-  deleteRecord:function (event) {
-    var self = this;
-    if (event.context.get('userName') == App.get('router').getLoginName()) {
-      App.ModalPopup.show({
-        header:Em.I18n.t('admin.users.delete.yourself.header'),
-        body:Em.I18n.t('admin.users.delete.yourself.message'),
-        onPrimary:function (event) {
-          this.hide();
-        },
-        secondary:false
-      });
-
-      return;
-    }
-    ;
-
-    App.ModalPopup.show({
-      header:Em.I18n.t('admin.users.delete.header').format(event.context.get('userName')),
-      body:Em.I18n.t('question.sure'),
-      primary:Em.I18n.t('yes'),
-      secondary:Em.I18n.t('no'),
-
-      onPrimary:function () {
-        self.sendCommandToServer('/users/' +  event.context.get("userName"), "DELETE" ,{},
-          function (success) {
-
-            if (!success) {
-              return;
-            }
-
-            event.context.deleteRecord();
-
-            try {
-              App.store.commit()
-            } catch (err) {
-
-            }
-          })
-        this.hide();
-      },
-      onSecondary:function () {
-        this.hide();
-      }
-    });
-  },
-
-  /**
-   * send request to the server and call callback function with true if request was success or false if request was failed
-   * @param url
-   * @param method
-   * @param postData
-   * @param callback
-   */
-  sendCommandToServer : function(url, method, postData, callback){
-    var url =  (App.testMode) ?
-        '/data/wizard/deploy/poll_1.json' : //content is the same as ours
-        App.apiPrefix + url;
-
-    var method = App.testMode ? 'GET' : method;
-
-    $.ajax({
-      type: method,
-      url: url,
-      data: JSON.stringify(postData),
-      dataType: 'json',
-      timeout: App.timeout,
-      success: function(data){
-          callback(true, '');
-      },
-
-      error: function (request, ajaxOptions, error) {
-        //do something
-        var message = $.parseJSON(request.responseText).message;
-        message = message.substr(message.indexOf(':') + 1); // Remove classname
-        callback(false, message);
-        console.log('error on change component host status')
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-  }
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/admin/user/create.js b/branch-1.2/ambari-web/app/controllers/main/admin/user/create.js
deleted file mode 100644
index 97cfc87..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/admin/user/create.js
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAdminUserCreateController = Em.Controller.extend({
-  name:'mainAdminUserCreateController',
-  content:false
-})
diff --git a/branch-1.2/ambari-web/app/controllers/main/admin/user/edit.js b/branch-1.2/ambari-web/app/controllers/main/admin/user/edit.js
deleted file mode 100644
index 7ceab28..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/admin/user/edit.js
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAdminUserEditController = Em.Controller.extend({
-  name:'mainAdminUserEditController',
-  content:false
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/apps/item_controller.js b/branch-1.2/ambari-web/app/controllers/main/apps/item_controller.js
deleted file mode 100644
index 8659169..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/apps/item_controller.js
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAppsItemController = Em.Controller.extend({
-  name:'mainAppsItemController',
-  /**
-   * Was set outside in App.MainAppsView.
-   * It's instance of App.Run model
-   */
-  content: [],
-  jobsLoaded:false,
-
-  lastJobId : null,
-  gettingJobs:function(){
-    var currentId = this.get('content.id');
-    if(currentId == this.get('lastJobId')){
-      return;
-    }
-    if(this.get('content.loadAllJobs')){
-      return;
-    }
-    this.set('lastJobId', currentId);
-    var self = this;
-
-    var url = App.testMode ? '/data/apps/jobs/'+ currentId +'.json' :
-      App.apiPrefix + "/jobhistory/job?workflowId=" + currentId;
-
-    var mapper = App.jobsMapper;
-    mapper.set('controller', this);
-    App.HttpClient.get(url, mapper,{
-      complete:function(jqXHR, textStatus) {
-        self.set('content.loadAllJobs', true);
-      }
-    });
-  }.observes('content')
-
-})
diff --git a/branch-1.2/ambari-web/app/controllers/main/apps_controller.js b/branch-1.2/ambari-web/app/controllers/main/apps_controller.js
deleted file mode 100644
index badcd82..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/apps_controller.js
+++ /dev/null
@@ -1,498 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-var misc = require('utils/misc');
-var date = require('utils/date');
-
-App.MainAppsController = Em.ArrayController.extend({
-
-  name:'mainAppsController',
-  content: [],
-
-  loaded : false,
-  loading : false,
-
-  /**
-   * List of users.
-   * Will be used for filtering in user column.
-   * Go to App.MainAppsView.userFilterView for more information
-   */
-  users: function () {
-    return this.get('content').mapProperty("userName").uniq().map(function(userName){
-      return {
-        name: userName,
-        checked: false
-      };
-    });
-  }.property('content.length'),
-
-  loadRuns:function () {
-
-    this.set('loading', true);
-    var self = this;
-
-    //var runsUrl = App.testMode ? "/data/apps/runs.json" : App.apiPrefix + "/jobhistory/workflow?orderBy=startTime&sortDir=DESC&limit=" + App.maxRunsForAppBrowser;
-    var runsUrl = App.testMode ? "/data/apps/runs.json" : App.apiPrefix + this.get("runUrl");
-
-    App.HttpClient.get(runsUrl, App.runsMapper, {
-      complete:function (jqXHR, textStatus) {
-        self.set('loading', false);
-        self.set('loaded', true);
-      }
-    });
-  },
-
-  //Pagination Object
-
-  paginationObject:{
-    iTotalDisplayRecords :null,
-    iTotalRecords:null,
-    startIndex:null,
-    endIndex:null
-  },
-
-  /*
-   Set number of filtered jobs when switching to all jobs
-   */
-  iTotalDisplayRecordsObserver:function(){
-    if(this.get("filterObject.allFilterActivated")){
-      this.set("filterObject.allFilterActivated", false);
-    }else{
-      this.set("filterObject.filteredDisplayRecords",this.get("paginationObject.iTotalDisplayRecords"));
-    }
-  }.observes("paginationObject.iTotalDisplayRecords"),
-
-
-  //Filter object
-
-  filterObject : Ember.Object.create({
-    sSearch_0:"",
-    sSearch_1:"",
-    sSearch_2:"",
-    sSearch_3:"",
-    minJobs:"",
-    maxJobs:"",
-    minInputBytes:"",
-    maxInputBytes:"",
-    minOutputBytes:"",
-    maxOutputBytes:"",
-    minDuration:"",
-    maxDuration:"",
-    minStartTime:"",
-    maxStartTime:"",
-    sSearch:"",
-    iDisplayLength:"",
-    iDisplayStart:"",
-    iSortCol_0:"",
-    sSortDir_0:"",
-
-    allFilterActivated:false,
-    filteredDisplayRecords:null,
-
-    viewType:"all",
-    viewTypeClickEvent:false,
-
-    /**
-     * Direct binding to job filter field
-     */
-    runType:"",
-    onRunTypeChange:function(){
-      if(this.runType == "MapReduce"){
-        this.set("sSearch_2","mr");
-      }else if(this.runType == "Hive"){
-        this.set("sSearch_2","hive");
-      }else if(this.runType == "Pig"){
-        this.set("sSearch_2","pig");
-      }else{
-        this.set("sSearch_2","");
-      }
-    }.observes("runType"),
-
-    /**
-     * Direct binding to job filter field
-     */
-    jobs:"",
-    onJobsChange:function(){
-      var minMaxTmp = this.parseNumber(this.jobs);
-      this.set("minJobs", minMaxTmp.min);
-      this.set("maxJobs", minMaxTmp.max);
-    }.observes("jobs"),
-
-    /**
-     * Direct binding to Input filter field
-     */
-    input:"",
-    onInputChange:function(){
-      var minMaxTmp = this.parseBandWidth(this.input);
-      this.set("minInputBytes", minMaxTmp.min);
-      this.set("maxInputBytes", minMaxTmp.max);
-    }.observes("input"),
-
-    /**
-     * Direct binding to Output filter field
-     */
-    output:"",
-    onOutputChange:function(){
-      var minMaxTmp = this.parseBandWidth(this.output);
-      this.set("minOutputBytes", minMaxTmp.min);
-      this.set("maxOutputBytes", minMaxTmp.max);
-    }.observes("output"),
-
-    /**
-     * Direct binding to Duration filter field
-     */
-    duration:"",
-    onDurationChange:function(){
-      var minMaxTmp = this.parseDuration(this.duration);
-      this.set("minDuration", minMaxTmp.min);
-      this.set("maxDuration", minMaxTmp.max);
-    }.observes("duration"),
-
-    /**
-     * Direct binding to Run Date filter field
-     */
-    runDate:"",
-    onRunDateChange:function(){
-      var minMaxTmp = this.parseDate(this.runDate);
-      this.set("minStartTime", minMaxTmp.min);
-      this.set("maxStartTime", minMaxTmp.max);
-    }.observes("runDate"),
-
-    parseDuration:function(value){
-      var tmp={
-        min:"",
-        max:""
-      };
-
-      var compareChar = isNaN(value.charAt(0)) ? value.charAt(0) : false;
-      var compareScale = value.match(/s|m|h/);
-      compareScale = compareScale ? compareScale[0] : "";
-      var compareValue = compareChar ? parseFloat(value.substr(1, value.length)) : parseFloat(value.substr(0, value.length));
-      if(isNaN(compareValue)){
-        return tmp;
-      }
-      switch (compareScale) {
-        case 'h':
-        tmp.min = Math.ceil((parseFloat(compareValue)-0.0001)*1000*60*60);
-        tmp.max = Math.floor((parseFloat(compareValue)+0.0001)*1000*60*60);
-        break;
-        case 'm':
-        tmp.min = Math.ceil((parseFloat(compareValue)-0.001)*1000*60);
-        tmp.max = Math.floor((parseFloat(compareValue)+0.001)*1000*60);
-        break;
-        case 's':
-        tmp.min = Math.ceil((parseFloat(compareValue)-0.01)*1000);
-        tmp.max = Math.floor((parseFloat(compareValue)+0.01)*1000);
-        break;
-        default:
-          tmp.min = Math.ceil((parseFloat(compareValue)-0.01)*1000);
-          tmp.max = Math.floor((parseFloat(compareValue)+0.01)*1000);
-      }
-      switch (compareChar) {
-        case '<':
-          tmp.min="";
-          break;
-        case '>':
-          tmp.max="";
-          break;
-      }
-      return tmp;
-    },
-
-    parseDate:function(value){
-      var tmp={
-        min:"",
-        max:""
-      };
-      var nowTime = new Date().getTime();
-
-      switch (value){
-        case 'Any':
-          break;
-        case 'Past 1 Day':
-          tmp.min= nowTime - 86400000;
-          break;
-        case 'Past 2 Days':
-          tmp.min= nowTime - 172800000;
-          break;
-        case 'Past 7 Days':
-          tmp.min= nowTime - 604800000;
-          break;
-        case 'Past 14 Days':
-          tmp.min= nowTime - 1209600000;
-          break;
-        case 'Past 30 Days':
-          tmp.min= nowTime - 2592000000;
-          break;
-        case 'Running Now':
-          tmp.min= nowTime;
-          break;
-      }
-      return tmp;
-    },
-
-    parseBandWidth:function(value){
-      var tmp={
-        min:"",
-        max:""
-      };
-
-      var compareChar = isNaN(value.charAt(0)) ? value.charAt(0) : false;
-      var compareScale = value.match(/kb|k|mb|m|gb|g/);
-      compareScale = compareScale ? compareScale[0] : "";
-      var compareValue = compareChar ? parseFloat(value.substr(1, value.length)) : parseFloat(value.substr(0, value.length));
-      if(isNaN(compareValue)){
-        return tmp;
-      }
-      switch (compareScale) {
-        case 'g': case 'gb':
-          tmp.min = Math.max(1073741824,Math.ceil((compareValue-0.005)*1073741824));
-          tmp.max = Math.floor((compareValue+0.005)*1073741824);
-          break;
-        case 'm': case 'mb':
-          tmp.min = Math.max(1048576,Math.ceil((compareValue-0.05)*1048576));
-          tmp.max = Math.min(1073741823,Math.floor((compareValue+0.05)*1048576));
-          break;
-        case 'k': case 'kb':
-          tmp.min = Math.max(1024,Math.ceil((compareValue-0.05)*1024));
-          tmp.max = Math.min(1048575,Math.floor((compareValue+0.05)*1024));
-          break;
-        default:
-          tmp.min = Math.max(1024,Math.ceil((compareValue-0.05)*1024));
-          tmp.max = Math.min(1048575,Math.floor((compareValue+0.05)*1024));
-      }
-      switch (compareChar) {
-        case '<':
-          tmp.min="";
-          break;
-        case '>':
-          tmp.max="";
-          break;
-      }
-      return tmp;
-    },
-    parseNumber:function(value){
-      var tmp={
-        min:"",
-        max:""
-      };
-      switch (value.charAt(0)) {
-        case '<':
-          tmp.max=value.substr(1);
-          break;
-        case '>':
-          tmp.min=value.substr(1);
-          break;
-        case '=':
-          tmp.min=value.substr(1);
-          tmp.max=value.substr(1);
-          break;
-        default:
-          tmp.min=value;
-          tmp.max=value;
-      }
-      return tmp;
-    },
-
-    /**
-     * Create link for server request
-     * @return {String}
-     */
-    createAppLink:function(){
-      var link = "/jobhistory/datatable?";
-
-
-      var arr = [
-        "sSearch_0", "sSearch_1", "sSearch_2", "sSearch_3", "minJobs",
-        "maxJobs", "minInputBytes", "maxInputBytes", "minOutputBytes",
-        "maxOutputBytes", "minDuration", "maxDuration", "minStartTime",
-        "maxStartTime", "sSearch", "iDisplayLength", "iDisplayStart",
-        "iSortCol_0", "sSortDir_0"
-      ];
-
-      for (var n=0; n<arr.length;n++) {
-        if(this.get(arr[n])){
-          link += arr[n] + "=" + this.get(arr[n]) + "&";
-        }
-      };
-
-      link = link.slice(0,link.length-1);
-
-      var valueInString=link.match(/&/g);
-
-      if(!this.get("viewTypeClickEvent"))
-      if(valueInString != null){
-        this.set("viewType","filtered");
-      }else{
-        this.set("viewType","all");
-      }
-
-      return link;
-    }
-  }),
-
-  /**
-   * reset all filters in table
-   *
-   */
-  clearFilters: function () {
-    var obj=this.get("filterObject");
-    obj.set("sSearch_0","");
-    obj.set("sSearch_1","");
-    obj.set("sSearch_2","");
-    obj.set("sSearch_3","");
-    obj.set("runType","Any");
-    obj.set("jobs","");
-    obj.set("input","");
-    obj.set("output","");
-    obj.set("duration","");
-    obj.set("runDate","Any");
-  },
-
-
-  runUrl : "/jobhistory/datatable",
-  runTimeout : null,
-
-  valueObserver: function(){
-    var link = this.get('filterObject').createAppLink();
-
-    if(this.get("filterObject.viewType") == "filtered"){
-      this.set("runUrl", link);
-    }else{
-      this.set("runUrl",  "/jobhistory/datatable?iDisplayLength="+this.get('filterObject.iDisplayLength'));
-    }
-
-    var timeout = this.get('runTimeout');
-    var self = this;
-
-    clearTimeout(timeout);
-    timeout = setTimeout(function(){
-      console.log(self.get("runUrl"));
-      self.loadRuns();
-    }, 300);
-
-    this.set('runTimeout', timeout);
-
-  }.observes(
-      'filterObject.sSearch_0',
-      'filterObject.sSearch_1',
-      'filterObject.sSearch_2',
-      'filterObject.sSearch_3',
-      'filterObject.minJobs',
-      'filterObject.maxJobs',
-      'filterObject.minInputBytes',
-      'filterObject.maxInputBytes',
-      'filterObject.minOutputBytes',
-      'filterObject.maxOutputBytes',
-      'filterObject.minDuration',
-      'filterObject.maxDuration',
-      'filterObject.minStartTime',
-      'filterObject.maxStartTime',
-      'filterObject.sSearch',
-      'filterObject.iDisplayLength',
-      'filterObject.iDisplayStart',
-      'filterObject.iSortCol_0',
-      'filterObject.sSortDir_0',
-      'filterObject.viewType'
-  ),
-
-  serverData: "",
-  summary: null,
-
-  /**
-   * Observer for summary data from server
-   */
-  summaryInfo: function(){
-    var tmp;
-    var summary = this.get('serverData');
-    if(!summary){
-      tmp = {
-        'jobs': {
-          'avg': '-',
-          'min': '-',
-          'max': '-'
-        },
-        'input': {
-          'avg': '-',
-          'min': '-',
-          'max': '-'
-        },
-        'output': {
-          'avg': '-',
-          'min': '-',
-          'max': '-'
-        },
-        'duration': {
-          'avg': '-',
-          'min': '-',
-          'max': '-'
-        },
-        'times': {
-          'oldest': '-',
-          'youngest': '-'
-        }
-      };
-    }else{
-      tmp = {
-        'jobs': {
-          'avg': summary.jobs.avg.toFixed(2),
-          'min': summary.jobs.min,
-          'max': summary.jobs.max
-        },
-        'input': {
-          'avg': misc.formatBandwidth(summary.input.avg),
-          'min': misc.formatBandwidth(summary.input.min),
-          'max': misc.formatBandwidth(summary.input.max)
-        },
-        'output': {
-          'avg': misc.formatBandwidth(summary.output.avg),
-          'min': misc.formatBandwidth(summary.output.min),
-          'max': misc.formatBandwidth(summary.output.max)
-        },
-        'duration': {
-          'avg': date.timingFormat(Math.round(summary.duration.avg)),
-          'min': date.timingFormat(summary.duration.min),
-          'max': date.timingFormat(summary.duration.max)
-        },
-        'times': {
-          'oldest': new Date(summary.times.oldest).toDateString(),
-          'youngest': new Date(summary.times.youngest).toDateString()
-        }
-      };
-    }
-    this.set("summary",tmp);
-  }.observes('serverData'),
-
-
-  columnsName: Ember.ArrayController.create({
-    content: [
-      { name: 'App ID', index: 0 },
-      { name: 'Name', index: 1 },
-      { name: 'Type', index: 2 },
-      { name: 'User', index: 3 },
-      { name: 'Jobs', index: 4 },
-      { name: 'Input', index: 5 },
-      { name: 'Output', index: 6 },
-      { name: 'Duration', index: 7 },
-      { name: 'Run Date', index: 8 }
-    ]
-  })
-
-
-})
diff --git a/branch-1.2/ambari-web/app/controllers/main/charts.js b/branch-1.2/ambari-web/app/controllers/main/charts.js
deleted file mode 100644
index f364554..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/charts.js
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainChartsController = Em.ArrayController.extend({
-  name:'mainChartsController'
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap.js b/branch-1.2/ambari-web/app/controllers/main/charts/heatmap.js
deleted file mode 100644
index 1f79707..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap.js
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-App.MainChartsHeatmapController = Em.Controller.extend({
-  name: 'mainChartsHeatmapController',
-  cluster: function() {
-    return App.Cluster.find().objectAt(0);
-  }.property(''),
-  allMetrics: function(){
-    var metrics = [
-      Em.Object.create({
-        label: Em.I18n.t('charts.heatmap.category.host'),
-        category: 'host',
-        items: [ App.MainChartHeatmapDiskSpaceUsedMetric.create(),
-          App.MainChartHeatmapMemoryUsedMetric.create()
-          /*, App.MainChartHeatmapProcessRunMetric.create()*/ ]
-      }),
-      Em.Object.create({
-        label: Em.I18n.t('charts.heatmap.category.hdfs'),
-        category: 'hdfs',
-        items: [ App.MainChartHeatmapDFSBytesReadMetric.create(),
-          App.MainChartHeatmapDFSBytesWrittenMetric.create(),
-          App.MainChartHeatmapDFSGCTimeMillisMetric.create(),
-          App.MainChartHeatmapDFSMemHeapUsedMetric.create() ]
-      }),
-    ];
-
-    if(App.MapReduceService.find().get('length')) {
-      metrics.push(
-        Em.Object.create({
-          label: Em.I18n.t('charts.heatmap.category.mapreduce'),
-          category: 'mapreduce',
-          items: [ App.MainChartHeatmapMapreduceMapsRunningMetric.create(),
-            App.MainChartHeatmapMapreduceReducesRunningMetric.create(),
-            App.MainChartHeatmapMapreduceGCTimeMillisMetric.create(),
-            App.MainChartHeatmapMapreduceMemHeapUsedMetric.create() ]
-        })
-      );
-    }
-
-    return metrics;
-  }.property(),
-
-  selectedMetric: null,
-  /**
-   *  route on host detail page
-   * @param event
-   */
-  routeHostDetail: function(event){
-    App.router.transitionTo('main.hosts.hostDetails.summary', event.context)
-  },
-  showHeatMapMetric: function (event) {
-    var metricItem = event.context;
-    if (metricItem) {
-      this.set('selectedMetric', metricItem);
-    }
-  },
-
-  hostToSlotMap: function () {
-    return this.get('selectedMetric.hostToSlotMap');
-  }.property('selectedMetric.hostToSlotMap'),
-
-  loadMetrics: function () {
-    var selectedMetric = this.get('selectedMetric');
-    if (selectedMetric) {
-      selectedMetric.refreshHostSlots();
-    }
-  }.observes('selectedMetric'),
-
-  /**
-   * return class name for to be used for containing each rack.
-   * 
-   * @this App.MainChartsHeatmapController
-   */
-  rackClass: function () {
-    var rackCount = this.get('cluster.racks.length');
-    if (rackCount < 2) {
-      return "span12";
-    } else if (rackCount == 2) {
-      return "span6";
-    } else {
-      return "span4";
-    }
-  }.property('cluster')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric.js b/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric.js
deleted file mode 100644
index 04d7434..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric.js
+++ /dev/null
@@ -1,314 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-var date = require('utils/date');
-
-/**
- * Base class for any heatmap metric.
- * 
- * This class basically provides the following for each heatmap metric.
- * <ul>
- * <li> Provides number of slots in which temperature can fall.
- * <li> Maintains the maximum value so as to scale slot ranges.
- * <li> Gets JSON data from server and maps response for all hosts into above
- * slots.
- * </ul>
- * 
- */
-App.MainChartHeatmapMetric = Em.Object.extend({
-  /**
-   * Name of this metric
-   */
-  name: null,
-
-  /**
-   * Number of slots this metric will be mapped into. When changing this value,
-   * the color count in 'slotColors' should also be changed.
-   */
-  numberOfSlots: 5,
-
-  /**
-   * Colors for the each of the number of slots defined above. When changing the
-   * number of slots, the number of colors also should be updated.
-   * 
-   * @type {Array}
-   */
-  slotColors: [ {
-    r: 0x00,
-    g: 0xcc,
-    b: 0x00
-  }, // Green
-  {
-    r: 0x9f,
-    g: 0xee,
-    b: 0x00
-  }, {
-    r: 0xff,
-    g: 0xff,
-    b: 0x00
-  }, // Yellow
-  {
-    r: 0xff,
-    g: 0xc0,
-    b: 0x00
-  }, // Orange
-  {
-    r: 0xff,
-    g: 0x00,
-    b: 0x00
-  } ],// Red
-
-  /**
-   * Minimum value of this metric. Default is 0.
-   */
-  minimumValue: 0,
-
-  /**
-   * Maximum value of this metric. This has to be specified by extending classes
-   * so that the range from 'minimumValue' to 'maximumValue' can be split among
-   * 'numberOfSlots'. It is recommended that this value be a multiple of
-   * 'numberOfSlots'.
-   */
-  maximumValue: 100,
-
-  /**
-   * Units of the maximum value which is shown in UI {String}
-   */
-  units: '',
-
-  /**
-   * Indicates whether this metric is currently loading data from the server.
-   * {Boolean}
-   */
-  loading: false,
-
-  /**
-   * Provides following information about slots in an array of objects.
-   * <ul>
-   * <li> from: {number} Slot starts from this value
-   * <li> to: {number} Slot ends at this value (inclusive)
-   * <li> label: {String} Slot name to be shown
-   * <li> cssStyle: {String} style to be embedded on hosts which fall into this
-   * slot.
-   * </ul>
-   * 
-   * Slot count will be the same as specified in 'numberOfSlots'. Slot
-   * definitions will be given in increasing temperature from 'minimumValue' to
-   * 'maximumValue'.
-   * 
-   */
-  slotDefinitions: function () {
-    var min = this.get('minimumValue');
-    var max = this.get('maximumValue');
-    var slotCount = this.get('numberOfSlots');
-    var labelSuffix = this.get('slotDefinitionLabelSuffix');
-    var delta = (max - min) / slotCount;
-    var defs = [];
-    var fractions = max < 5;
-    var slotColors = this.get('slotColors');
-    var slotColorIndex = 0;
-    for ( var c = 0; c < slotCount - 1; c++) {
-      var from = this.formatLegendNumber(c * delta);
-      var to = this.formatLegendNumber((c + 1) * delta);
-      if ($.trim(labelSuffix) == 'ms') {
-      	var label = date.timingFormat(from) + " - " + date.timingFormat(to);
-      } else {
-	      var label = from + labelSuffix + " - " + to + labelSuffix;
-      }
-      var slotColor = slotColors[slotColorIndex++];
-      defs.push(Em.Object.create({
-        from: from,
-        to: to,
-        label: label,
-        cssStyle: "background-color:rgb(" + slotColor.r + "," + slotColor.g + "," + slotColor.b + ")"
-      }));
-    }
-    from = this.formatLegendNumber((slotCount - 1) * delta);
-    to = this.formatLegendNumber(max);
-
-    if ($.trim(labelSuffix) == 'ms') {
-      var label = date.timingFormat(from) + " - " + date.timingFormat(to);
-    } else {
-      var label = from + labelSuffix + " - " + to + labelSuffix;
-    }
-
-    slotColor = slotColors[slotColorIndex++];
-    defs.push(Em.Object.create({
-      from: from,
-      to: to,
-      label: label,
-      cssStyle: "background-color:rgb(" + slotColor.r + "," + slotColor.g + "," + slotColor.b + ")"
-    }));
-    var hatchStyle = "background-color:rgb(135, 206, 250)";
-    if(jQuery.browser.webkit){
-      hatchStyle = "background-image:-webkit-repeating-linear-gradient(-45deg, #FF1E10, #FF1E10 3px, #ff6c00 3px, #ff6c00 6px)";
-    }else if(jQuery.browser.mozilla){
-      hatchStyle = "background-image:repeating-linear-gradient(-45deg, #FF1E10, #FF1E10 3px, #ff6c00 3px, #ff6c00 6px)";
-    }else if(jQuery.browser.msie && jQuery.browser.version){
-      var majorVersion =  parseInt(jQuery.browser.version.split('.')[0]);
-      if(majorVersion>9){
-        hatchStyle = "background-image:repeating-linear-gradient(-45deg, #FF1E10, #FF1E10 3px, #ff6c00 3px, #ff6c00 6px)";
-      }
-    }
-    defs.push(Em.Object.create({
-      from: NaN,
-      to: NaN,
-      label: "Invalid data",
-      cssStyle: hatchStyle
-    }));
-    defs.push(Em.Object.create({
-      from: -1,
-      to: -1,
-      label: "Not Applicable",
-      cssStyle: "background-color:rgb(200, 200, 200)"
-    }));
-    return defs;
-  }.property('minimumValue', 'maximumValue', 'numberOfSlots'),
-
-  /**
-   * In slot definitions this value is used to construct the label by appending
-   * it to slot min-max values. For example giving '%' here would result in slot
-   * definition label being '0% - 10%'.
-   */
-  slotDefinitionLabelSuffix: '',
-
-  /**
-   * URL template from which metrics will be gotten for all hosts. The
-   * {metricName} param will be replaced by the 'defaultMetric' value.
-   */
-  metricUrlTemplate: "/clusters/{clusterName}/hosts?fields={metricName}",
-
-  /**
-   * URL from which data for this metric can be gotten from. This should be
-   * extended by classes to provide correct value.
-   */
-  metricUrl: function () {
-    var clusterName = App.router.get('clusterController.clusterName');
-    var fixedMetricName = this.get('defaultMetric');
-    fixedMetricName = fixedMetricName.replace(/\./g, "/");
-    return App.formatUrl(App.apiPrefix + this.get('metricUrlTemplate'), {
-      clusterName: App.router.get('clusterController.clusterName'),
-      metricName: fixedMetricName
-    }, "/data/cluster_metrics/cpu_1hr.json");
-  }.property('App.router.clusterController.clusterName', 'defaultMetric'),
-
-  defaultMetric: '',
-
-  /**
-   * Maps server JSON into an object where keys are hostnames and values are the
-   * true metric values. This function by default will map 'defaultMetric' into
-   * its corresponding value.
-   * 
-   * @type Function
-   */
-  metricMapper: function (json) {
-    var hostToValueMap = {};
-    var metricName = this.get('defaultMetric');
-    if (json.items) {
-      var props = metricName.split('.');
-      json.items.forEach(function (item) {
-        var value = item;
-        props.forEach(function (prop) {
-          if (value != null && prop in value) {
-            value = value[prop];
-          } else {
-            value = null;
-          }
-        });
-        if (value != null) {
-          var hostName = item.Hosts.host_name;
-          hostToValueMap[hostName] = value;
-        }
-      });
-    }
-    return hostToValueMap;
-  },
-
-  hostToValueMap: null,
-
-  hostToSlotMap: function(){
-    var hostToValueMap = this.get('hostToValueMap');
-    var slotDefs = this.get('slotDefinitions');
-    var allHosts = App.Host.find();
-    var hostToSlotMap = {};
-    if (hostToValueMap && allHosts) {
-      allHosts.forEach(function(host, index, list){
-        var slot = -1;
-        var key = host.get('hostName');
-        if (key in hostToValueMap) {
-          var value = hostToValueMap[key];
-          if (isNaN(value)) {
-            slot = slotDefs.length - 2;
-          } else {
-            for ( var slotIndex = 0; slotIndex < slotDefs.length - 2; slotIndex++) {
-              var slotDef = slotDefs[slotIndex];
-              if (value >= slotDef.from && value <= slotDef.to) {
-                slot = slotIndex;
-              }
-            }
-            if(slot < 0){
-              // Assign it to the last legend
-              slot = slotDefs.length - 3;
-            }
-          }
-        } else {
-          slot = slotDefs.length - 1;
-        }
-        if (slot > -1) {
-          hostToSlotMap[key] = slot;
-        }
-      });
-    }
-    return hostToSlotMap;
-  }.property('hostToValueMap', 'slotDefinitions'),
-
-  /**
-   * Determines which slot each host falls into. This information is given to
-   * the callback's #map(hostnameToSlotObject) method. The
-   * 'hostnameToSlotObject' has key as hostname, and the slot index as value.
-   */
-  refreshHostSlots: function () {
-    this.set('loading', true);
-    jQuery.ajax({
-      url: this.get('metricUrl'),
-      dataType: 'json',
-      error: jQuery.proxy(function () {
-        this.set('loading', false);
-      }, this),
-      success: jQuery.proxy(function (data) {
-        var hostToValueMap = this.metricMapper(data);
-        this.set('hostToValueMap', hostToValueMap);
-        this.set('loading', false);
-      }, this)
-    });
-  }.observes('slotDefinitions'),
-
-  /**
-   * Turns numbers into displayable values. For example 24.345432425 into 24.3
-   * etc.
-   * 
-   * @private
-   */
-  formatLegendNumber: function (num) {
-    var fraction = num % 1;
-    if (fraction > 0) {
-      return num.toFixed(1);
-    }
-    return num;
-  }
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_dfs.js b/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_dfs.js
deleted file mode 100644
index 5692188..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_dfs.js
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * Base class for any HDFS metric.
- */
-App.MainChartHeatmapDFSMetrics = App.MainChartHeatmapMetric.extend({
-  metricUrlTemplate: "/clusters/{clusterName}/services/HDFS/components/DATANODE?fields=host_components/{metricName}",
-
-  /**
-   * Custom mapper for DFS metrics
-   */
-  metricMapper: function (json) {
-    var hostToValueMap = {};
-    var metricName = this.get('defaultMetric');
-    if (json.host_components) {
-      var props = metricName.split('.');
-      transformValueFunction = this.get('transformValue');
-      json.host_components.forEach(function (hc) {
-        var value = hc;
-        props.forEach(function (prop) {
-          if (value != null && prop in value) {
-            value = value[prop];
-          } else {
-            value = null;
-          }
-        });
-        if (value != null) {
-          if (transformValueFunction) {
-            value = transformValueFunction(value);
-          }
-          var hostName = hc.HostRoles.host_name;
-          hostToValueMap[hostName] = value;
-        }
-      });
-    }
-    return hostToValueMap;
-  },
-
-  /**
-   * Utility function which allows extending classes to transform the value
-   * assigned to a host.
-   * 
-   * @type Function
-   */
-  tranformValue: null
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_dfs_bytesread.js b/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_dfs_bytesread.js
deleted file mode 100644
index 901184e..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_dfs_bytesread.js
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * 
- */
-App.MainChartHeatmapDFSBytesReadMetric = App.MainChartHeatmapDFSMetrics.extend({
-  name: 'Bytes Read',
-  maximumValue: 1024, // 1GB
-  defaultMetric: 'metrics.dfs.datanode.bytes_read',
-  units: 'MB',
-  slotDefinitionLabelSuffix: 'MB',
-  transformValue: function (value) {
-    return value / (1 << 6); // bytes divided by 1MB.
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_dfs_byteswritten.js b/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_dfs_byteswritten.js
deleted file mode 100644
index 94200f5..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_dfs_byteswritten.js
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * 
- */
-App.MainChartHeatmapDFSBytesWrittenMetric = App.MainChartHeatmapDFSMetrics.extend({
-  name: 'Bytes Written',
-  maximumValue: 1024, // 1GB
-  defaultMetric: 'metrics.dfs.datanode.bytes_written',
-  units: 'MB',
-  slotDefinitionLabelSuffix: 'MB',
-  transformValue: function (value) {
-    return value / (1 << 6); // bytes divided by 1MB.
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_dfs_gctime.js b/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_dfs_gctime.js
deleted file mode 100644
index 754c2ee..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_dfs_gctime.js
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * 
- */
-App.MainChartHeatmapDFSGCTimeMillisMetric = App.MainChartHeatmapDFSMetrics.extend({
-  name: 'Garbage Collection Time',
-  maximumValue: 10000,
-  defaultMetric: 'metrics.jvm.gcTimeMillis',
-  units: ' ms',
-  slotDefinitionLabelSuffix: ' ms'
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_dfs_memHeapUsed.js b/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_dfs_memHeapUsed.js
deleted file mode 100644
index 0ced10e..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_dfs_memHeapUsed.js
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * 
- */
-App.MainChartHeatmapDFSMemHeapUsedMetric = App.MainChartHeatmapDFSMetrics.extend({
-  name: 'JVM Heap Memory Used',
-  maximumValue: 512,
-  defaultMetric: 'metrics.jvm.memHeapUsedM',
-  units: 'MB',
-  slotDefinitionLabelSuffix: 'MB'
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_diskspaceused.js b/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_diskspaceused.js
deleted file mode 100644
index b99c178..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_diskspaceused.js
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * 
- */
-App.MainChartHeatmapDiskSpaceUsedMetric = App.MainChartHeatmapMetric.extend({
-  name: 'Disk Space Used %',
-  maximumValue: 100,
-  defaultMetric: 'metrics.disk',
-  units: '%',
-  slotDefinitionLabelSuffix: '%',
-  metricMapper: function (json) {
-    var hostToValueMap = {};
-    var metricName = this.get('defaultMetric');
-    if (json.items) {
-      var props = metricName.split('.');
-      json.items.forEach(function (item) {
-        var value = item;
-        props.forEach(function (prop) {
-          if (value != null && prop in value) {
-            value = value[prop];
-          } else {
-            value = null;
-          }
-        });
-        if (value != null) {
-          var total = value.disk_total;
-          var free = value.disk_free;
-          value = (((total - free) * 100) / total).toFixed(1);
-          var hostName = item.Hosts.host_name;
-          hostToValueMap[hostName] = value;
-        }
-      });
-    }
-    return hostToValueMap;
-  }
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce.js b/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce.js
deleted file mode 100644
index 6b910cf..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce.js
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * Base class for any MapReduce metric.
- */
-App.MainChartHeatmapMapreduceMetrics = App.MainChartHeatmapMetric.extend({
-  metricUrlTemplate: "/clusters/{clusterName}/services/MAPREDUCE/components/TASKTRACKER?fields=host_components/{metricName}",
-
-  /**
-   * Custom mapper for DFS metrics
-   */
-  metricMapper: function (json) {
-    var hostToValueMap = {};
-    var metricName = this.get('defaultMetric');
-    if (json.host_components) {
-      var props = metricName.split('.');
-      transformValueFunction = this.get('transformValue');
-      json.host_components.forEach(function (hc) {
-        var value = hc;
-        props.forEach(function (prop) {
-          if (value != null && prop in value) {
-            value = value[prop];
-          } else {
-            value = null;
-          }
-        });
-        if (value != null) {
-          if (transformValueFunction) {
-            value = transformValueFunction(value);
-          }
-          var hostName = hc.HostRoles.host_name;
-          hostToValueMap[hostName] = value;
-        }
-      });
-    }
-    return hostToValueMap;
-  },
-
-  /**
-   * Utility function which allows extending classes to transform the value
-   * assigned to a host.
-   * 
-   * @type Function
-   */
-  tranformValue: null
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce_gctime.js b/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce_gctime.js
deleted file mode 100644
index 7486441..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce_gctime.js
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * 
- */
-App.MainChartHeatmapMapreduceGCTimeMillisMetric = App.MainChartHeatmapMapreduceMetrics.extend({
-  name: 'Garbage Collection Time',
-  maximumValue: 10000,
-  defaultMetric: 'metrics.jvm.gcTimeMillis',
-  units: ' ms',
-  slotDefinitionLabelSuffix: ' ms'
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce_mapsRunning.js b/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce_mapsRunning.js
deleted file mode 100644
index 2248eae..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce_mapsRunning.js
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * 
- */
-App.MainChartHeatmapMapreduceMapsRunningMetric = App.MainChartHeatmapMapreduceMetrics.extend({
-  name: 'Maps Running',
-  maximumValue: 100,
-  defaultMetric: 'metrics.mapred.tasktracker.maps_running',
-  units: ' maps',
-  slotDefinitionLabelSuffix: ' Maps'
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce_memHeapUsed.js b/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce_memHeapUsed.js
deleted file mode 100644
index c3d4adf..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce_memHeapUsed.js
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * 
- */
-App.MainChartHeatmapMapreduceMemHeapUsedMetric = App.MainChartHeatmapMapreduceMetrics.extend({
-  name: 'JVM Heap Memory Used',
-  maximumValue: 512,
-  defaultMetric: 'metrics.jvm.memHeapUsedM',
-  units: 'MB',
-  slotDefinitionLabelSuffix: 'MB'
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce_reducesRunning.js b/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce_reducesRunning.js
deleted file mode 100644
index 702d3f4..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_mapreduce_reducesRunning.js
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * 
- */
-App.MainChartHeatmapMapreduceReducesRunningMetric = App.MainChartHeatmapMapreduceMetrics.extend({
-  name: 'Reduces Running',
-  maximumValue: 100,
-  defaultMetric: 'metrics.mapred.tasktracker.reduces_running',
-  units: ' reduces',
-  slotDefinitionLabelSuffix: ' Reduces'
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_memoryused.js b/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_memoryused.js
deleted file mode 100644
index 3bcc355..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_memoryused.js
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * Base class for any heatmap metric.
- * 
- * This class basically provides the following for each heatmap metric.
- * <ul>
- * <li> Provides number of slots in which temperature can fall.
- * <li> Maintains the maximum value so as to scale slot ranges.
- * <li> Gets JSON data from server and maps response for all hosts into above
- * slots.
- * </ul>
- * 
- */
-App.MainChartHeatmapMemoryUsedMetric = App.MainChartHeatmapMetric.extend({
-  name: 'Memory Used %',
-  maximumValue: 100,
-  defaultMetric: 'metrics.memory',
-  units: '%',
-  slotDefinitionLabelSuffix: '%',
-  metricMapper: function (json) {
-    var hostToValueMap = {};
-    var metricName = this.get('defaultMetric');
-    if (json.items) {
-      var props = metricName.split('.');
-      json.items.forEach(function (item) {
-        var value = item;
-        props.forEach(function (prop) {
-          if (value != null && prop in value) {
-            value = value[prop];
-          } else {
-            value = null;
-          }
-        });
-        if (value != null) {
-          var total = value.mem_total;
-          var used = value.mem_total - value.mem_free;
-          value = ((used * 100) / total).toFixed(1);
-          var hostName = item.Hosts.host_name;
-          hostToValueMap[hostName] = value;
-        }
-      });
-    }
-    return hostToValueMap;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_processrun.js b/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_processrun.js
deleted file mode 100644
index fcb64a6..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/charts/heatmap_metrics/heatmap_metric_processrun.js
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * Base class for any heatmap metric.
- * 
- * This class basically provides the following for each heatmap metric.
- * <ul>
- * <li> Provides number of slots in which temperature can fall.
- * <li> Maintains the maximum value so as to scale slot ranges.
- * <li> Gets JSON data from server and maps response for all hosts into above
- * slots.
- * </ul>
- * 
- */
-App.MainChartHeatmapProcessRunMetric = App.MainChartHeatmapMetric.extend({
-  name: 'Total Running Processes',
-  maximumValue: 1,
-  defaultMetric: 'metrics.process.proc_run',
-  units: 'Processes',
-  metricMapper: function (json) {
-    var map = this._super(json);
-    for ( var host in map) {
-      if (host in map) {
-        var val = map[host];
-        map[host] = val.toFixed(1);
-      }
-    }
-    return map;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/host.js b/branch-1.2/ambari-web/app/controllers/main/host.js
deleted file mode 100644
index 43aa1e4..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/host.js
+++ /dev/null
@@ -1,184 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-var validator = require('utils/validator');
-var componentHelper = require('utils/component');
-
-App.MainHostController = Em.ArrayController.extend({
-  name:'mainHostController',
-  content: App.Host.find(),
-  comeWithFilter: false,
-
-  alerts: function () {
-    return App.router.get('clusterController.alerts').filterProperty('isOk', false).filterProperty('ignoredForHosts', false);
-  }.property('App.router.clusterController.alerts.length'),
-
-  /**
-   * Components which will be shown in component filter
-   */
-  componentsForFilter:function() {
-    var installedComponents = componentHelper.getInstalledComponents();
-    installedComponents.setEach('checkedForHostFilter', false);
-    return installedComponents;
-  }.property('App.router.clusterController.isLoaded'),
-
-  masterComponents:function () {
-    return this.get('componentsForFilter').filterProperty('isMaster', true);
-  }.property('componentsForFilter'),
-
-  slaveComponents:function () {
-    return this.get('componentsForFilter').filterProperty('isSlave', true);
-  }.property('componentsForFilter'),
-
-  clientComponents: function() {
-    return this.get('componentsForFilter').filterProperty('isClient', true);
-  }.property('componentsForFilter'),
-
-  /**
-   * Is true if alets filter is active
-   */
-  filteredByAlerts:false,
-
-  /**
-   * Is true if Hosts page was opened by clicking on alerts count badge
-   */
-  comeWithAlertsFilter: false,
-
-  /**
-   * Enable or disable filtering by alets
-   */
-  filterByAlerts: function () {
-    if (App.router.get('currentState.name') == 'index') {
-      this.set('filteredByAlerts', !this.get('filteredByAlerts'));
-    } else {
-      App.router.transitionTo('hosts.index');
-      this.set('comeWithAlertsFilter', true);
-    }
-  },
-
-  /**
-   * Filter hosts by componentName of <code>component</code>
-   * @param component App.HostComponent
-   */
-  filterByComponent:function (component) {
-    var id = component.get('componentName');
-
-    this.get('componentsForFilter').setEach('checkedForHostFilter', false);
-    this.get('componentsForFilter').filterProperty('id', id).setEach('checkedForHostFilter', true);
-
-    this.set('comeWithFilter', true);
-  },
-
-  /**
-   * On click callback for decommission button
-   * @param event
-   */
-  decommissionButtonPopup:function () {
-    var self = this;
-    App.ModalPopup.show({
-      header:Em.I18n.t('hosts.decommission.popup.header'),
-      body:Em.I18n.t('hosts.decommission.popup.body'),
-      primary:'Yes',
-      secondary:'No',
-      onPrimary:function () {
-        alert('do');
-        this.hide();
-      },
-      onSecondary:function () {
-        this.hide();
-      }
-    });
-  },
-
-  /**
-   * On click callback for delete button
-   * @param event
-   */
-  deleteButtonPopup:function () {
-    var self = this;
-    App.ModalPopup.show({
-      header:Em.I18n.t('hosts.delete.popup.header'),
-      body:Em.I18n.t('hosts.delete.popup.body'),
-      primary:'Yes',
-      secondary:'No',
-      onPrimary:function () {
-        self.removeHosts();
-        this.hide();
-      },
-      onSecondary:function () {
-        this.hide();
-      }
-    });
-  },
-
-  showAlertsPopup: function (event) {
-    var host = event.context;
-    App.ModalPopup.show({
-      header: this.t('services.alerts.headingOfList'),
-      bodyClass: Ember.View.extend({
-        hostAlerts: function () {
-          var allAlerts = App.router.get('clusterController.alerts').filterProperty('ignoredForHosts', false);
-          if (host) {
-            return allAlerts.filterProperty('hostName', host.get('hostName'));
-          }
-          return 0;
-        }.property('App.router.clusterController.alerts'),
-
-        closePopup: function () {
-          this.get('parentView').hide();
-        },
-
-        templateName: require('templates/main/host/alerts_popup')
-      }),
-      primary: 'Close',
-      onPrimary: function() {
-        this.hide();
-      },
-      secondary : null,
-      didInsertElement: function () {
-        this.$().find('.modal-footer').addClass('align-center');
-        this.$().children('.modal').css({'margin-top': '-350px'});
-      }
-    });
-    event.stopPropagation();
-  },
-
-  /**
-   * remove selected hosts
-   */
-  removeHosts:function () {
-    var hosts = this.get('content');
-    var selectedHosts = hosts.filterProperty('isChecked', true);
-    selectedHosts.forEach(function (_hostInfo) {
-      console.log('Removing:  ' + _hostInfo.hostName);
-    });
-    this.get('fullContent').removeObjects(selectedHosts);
-  },
-
-  /**
-   * remove hosts with id equal host_id
-   * @param host_id
-   */
-  checkRemoved:function (host_id) {
-    var hosts = this.get('content');
-    var selectedHosts = hosts.filterProperty('id', host_id);
-    this.get('fullContent').removeObjects(selectedHosts);
-  }
-
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/host/add_controller.js b/branch-1.2/ambari-web/app/controllers/main/host/add_controller.js
deleted file mode 100644
index 98ad89b..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/host/add_controller.js
+++ /dev/null
@@ -1,560 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.AddHostController = App.WizardController.extend({
-
-  name: 'addHostController',
-
-  totalSteps: 6,
-
-  /**
-   * Used for hiding back button in wizard
-   */
-  hideBackButton: true,
-
-  /**
-   * All wizards data will be stored in this variable
-   *
-   * cluster - cluster name
-   * hosts - hosts, ssh key, repo info, etc.
-   * services - services list
-   * hostsInfo - list of selected hosts
-   * slaveComponentHosts, hostSlaveComponents - info about slave hosts
-   * masterComponentHosts - info about master hosts
-   * config??? - to be described later
-   */
-  content: Em.Object.create({
-    cluster: null,
-    hosts: null,
-    installOptions: null,
-    services: null,
-    slaveComponentHosts: null,
-    masterComponentHosts: null,
-    serviceConfigProperties: null,
-    advancedServiceConfig: null,
-    controllerName: 'addHostController',
-    isWizard: true
-  }),
-
-  /**
-   * return new object extended from clusterStatusTemplate
-   * @return Object
-   */
-  getCluster: function(){
-    return jQuery.extend({}, this.get('clusterStatusTemplate'), {name: App.router.getClusterName()});
-  },
-
-  /**
-   * return new object extended from installOptionsTemplate
-   * @return Object
-   */
-  getInstallOptions: function(){
-    return jQuery.extend({}, this.get('installOptionsTemplate'));
-  },
-
-  /**
-   * return empty hosts array
-   * @return Array
-   */
-  getHosts: function(){
-    return [];
-  },
-
-   /**
-   * Remove host from model. Used at <code>Confirm hosts(step2)</code> step
-   * @param hosts Array of hosts, which we want to delete
-   */
-  removeHosts: function (hosts) {
-    //todo Replace this code with real logic
-    App.db.removeHosts(hosts);
-  },
-
-  /**
-   * Save data, which user filled, to main controller
-   * @param stepController App.WizardStep3Controller
-   */
-  saveConfirmedHosts: function (stepController) {
-    var hostInfo = {};
-
-    stepController.get('content.hosts').forEach(function (_host) {
-      hostInfo[_host.name] = {
-        name: _host.name,
-        cpu: _host.cpu,
-        memory: _host.memory,
-        disk_info: _host.disk_info,
-        bootStatus: _host.bootStatus,
-        isInstalled: false
-      };
-    });
-
-
-    console.log('addHostController:saveConfirmedHosts: save hosts ', hostInfo);
-    App.db.setHosts(hostInfo);
-    this.set('content.hosts', hostInfo);
-  },
-
-  /**
-   * Save data after installation to main controller
-   * @param stepController App.WizardStep9Controller
-   */
-  saveInstalledHosts: function (stepController) {
-    var hosts = stepController.get('hosts');
-    var hostInfo = App.db.getHosts();
-
-    for (var index in hostInfo) {
-      var host = hosts.findProperty('name', hostInfo[index].name);
-      if (host) {
-        hostInfo[index].status = host.status;
-        //tasks should be empty because they loads from the server
-        //hostInfo[index].tasks = host.tasks;
-        hostInfo[index].message = host.message;
-        hostInfo[index].progress = host.progress;
-      }
-    }
-    App.db.setHosts(hostInfo);
-    this.set('content.hosts', hostInfo);
-    console.log('addHostController:saveInstalledHosts: save hosts ', hostInfo);
-  },
-
-  /**
-   * Load services data from server.
-   */
-  loadServicesFromServer: function() {
-    var displayOrderConfig = require('data/services');
-    var apiUrl = App.get('stackVersionURL');
-    var apiService = this.loadServiceComponents(displayOrderConfig, apiUrl);
-    //
-    apiService.forEach(function(item, index){
-      apiService[index].isSelected = App.Service.find().someProperty('id', item.serviceName);
-      apiService[index].isDisabled = apiService[index].isSelected;
-      apiService[index].isInstalled = apiService[index].isSelected;
-    });
-    this.set('content.services', apiService);
-    App.db.setService(apiService);
-  },
-
-  /**
-   * Load services data. Will be used at <code>Select services(step4)</code> step
-   */
-  loadServices: function () {
-    var servicesInfo = App.db.getService();
-
-    servicesInfo.forEach(function (item, index) {
-      servicesInfo[index] = Em.Object.create(item);
-    });
-    this.set('content.services', servicesInfo);
-    console.log('AddHostController.loadServices: loaded data ', servicesInfo);
-    console.log('selected services ', servicesInfo.filterProperty('isSelected', true).filterProperty('isDisabled', false).mapProperty('serviceName'));
-  },
-
-  /**
-   * Load master component hosts data for using in required step controllers
-   */
-  loadMasterComponentHosts: function () {
-    var masterComponentHosts = App.db.getMasterComponentHosts();
-    if (!masterComponentHosts) {
-      masterComponentHosts = [];
-      App.HostComponent.find().filterProperty('isMaster', true).forEach(function (item) {
-        masterComponentHosts.push({
-          component: item.get('componentName'),
-          hostName: item.get('host.hostName'),
-          isInstalled: true,
-          serviceId: item.get('service.id'),
-          display_name: item.get('displayName')
-        })
-      });
-      App.db.setMasterComponentHosts(masterComponentHosts);
-    }
-    this.set("content.masterComponentHosts", masterComponentHosts);
-    console.log("AddHostController.loadMasterComponentHosts: loaded hosts ", masterComponentHosts);
-  },
-
-  /**
-   * Save slaveHostComponents to main controller
-   * @param stepController
-   */
-  saveSlaveComponentHosts: function (stepController) {
-
-    var hosts = stepController.get('hosts');
-    var isMrSelected = stepController.get('isMrSelected');
-    var isHbSelected = stepController.get('isHbSelected');
-
-    var dataNodeHosts = [];
-    var taskTrackerHosts = [];
-    var regionServerHosts = [];
-    var clientHosts = [];
-
-    hosts.forEach(function (host) {
-
-      if (host.get('isDataNode')) {
-        dataNodeHosts.push({
-          hostName: host.hostName,
-          group: 'Default',
-          isInstalled: host.get('isDataNodeInstalled')
-        });
-      }
-      if (isMrSelected && host.get('isTaskTracker')) {
-        taskTrackerHosts.push({
-          hostName: host.hostName,
-          group: 'Default',
-          isInstalled: host.get('isTaskTrackerInstalled')
-        });
-      }
-      if (isHbSelected && host.get('isRegionServer')) {
-        regionServerHosts.push({
-          hostName: host.hostName,
-          group: 'Default',
-          isInstalled: host.get('isRegionServerInstalled')
-        });
-      }
-      if (host.get('isClient')) {
-        clientHosts.pushObject({
-          hostName: host.hostName,
-          group: 'Default',
-          isInstalled: host.get('isClientInstalled')
-        });
-      }
-    }, this);
-
-    var slaveComponentHosts = [];
-    slaveComponentHosts.push({
-      componentName: 'DATANODE',
-      displayName: 'DataNode',
-      hosts: dataNodeHosts
-    });
-    if (isMrSelected) {
-      slaveComponentHosts.push({
-        componentName: 'TASKTRACKER',
-        displayName: 'TaskTracker',
-        hosts: taskTrackerHosts
-      });
-    }
-    if (isHbSelected) {
-      slaveComponentHosts.push({
-        componentName: 'HBASE_REGIONSERVER',
-        displayName: 'RegionServer',
-        hosts: regionServerHosts
-      });
-    }
-    slaveComponentHosts.pushObject({
-      componentName: 'CLIENT',
-      displayName: 'client',
-      hosts: clientHosts
-    });
-
-    App.db.setSlaveComponentHosts(slaveComponentHosts);
-    console.log('addHostController.slaveComponentHosts: saved hosts', slaveComponentHosts);
-    this.set('content.slaveComponentHosts', slaveComponentHosts);
-  },
-
-  /**
-   * return slaveComponents bound to hosts
-   * @return {Array}
-   */
-  getSlaveComponentHosts: function () {
-    var components = [
-      {
-        name: 'DATANODE',
-        service: 'HDFS'
-      },
-      {
-        name: 'TASKTRACKER',
-        service: 'MAPREDUCE'
-      },
-      {
-        name: 'HBASE_REGIONSERVER',
-        service: 'HBASE'
-      }
-    ];
-
-    var result = [];
-    var services = App.Service.find();
-    var selectedServices = this.get('content.services').filterProperty('isSelected', true).mapProperty('serviceName');
-    for (var index = 0; index < components.length; index++) {
-      var comp = components[index];
-      if (!selectedServices.contains(comp.service)) {
-        continue;
-      }
-
-
-      var service = services.findProperty('id', comp.service);
-      var hosts = [];
-
-      service.get('hostComponents').filterProperty('componentName', comp.name).forEach(function (host_component) {
-        hosts.push({
-          group: "Default",
-          hostName: host_component.get('host.id'),
-          isInstalled: true
-        });
-      }, this);
-
-      result.push({
-        componentName: comp.name,
-        displayName: App.format.role(comp.name),
-        hosts: hosts,
-        isInstalled: true
-      })
-    }
-
-    var clientsHosts = App.HostComponent.find().filterProperty('componentName', 'HDFS_CLIENT');
-    var hosts = [];
-
-    clientsHosts.forEach(function (host_component) {
-      hosts.push({
-        group: "Default",
-        hostName: host_component.get('host.id'),
-        isInstalled: true
-      });
-    }, this);
-
-    result.push({
-      componentName: 'CLIENT',
-      displayName: 'client',
-      hosts: hosts,
-      isInstalled: true
-    })
-
-    return result;
-  },
-
-  /**
-   * Load master component hosts data for using in required step controllers
-   */
-  loadSlaveComponentHosts: function () {
-    var slaveComponentHosts = App.db.getSlaveComponentHosts();
-    if (!slaveComponentHosts) {
-      slaveComponentHosts = this.getSlaveComponentHosts();
-    }
-    this.set("content.slaveComponentHosts", slaveComponentHosts);
-    console.log("AddHostController.loadSlaveComponentHosts: loaded hosts ", slaveComponentHosts);
-  },
-
-  /**
-   * Save config properties
-   * @param stepController Step7WizardController
-   */
-  saveServiceConfigProperties: function (stepController) {
-    var serviceConfigProperties = [];
-    stepController.get('stepConfigs').forEach(function (_content) {
-      _content.get('configs').forEach(function (_configProperties) {
-        var displayType = _configProperties.get('displayType');
-        if (displayType === 'directories' || displayType === 'directory') {
-          var value = _configProperties.get('value').trim().split(/\s+/g).join(',');
-          _configProperties.set('value', value);
-        }
-        var configProperty = {
-          id: _configProperties.get('id'),
-          name: _configProperties.get('name'),
-          value: _configProperties.get('value'),
-          defaultValue: _configProperties.get('defaultValue'),
-          service: _configProperties.get('serviceName'),
-          domain:  _configProperties.get('domain'),
-          filename: _configProperties.get('filename')
-        };
-        serviceConfigProperties.push(configProperty);
-      }, this);
-
-    }, this);
-
-    App.db.setServiceConfigProperties(serviceConfigProperties);
-    this.set('content.serviceConfigProperties', serviceConfigProperties);
-
-    //TODO: Uncomment below code to enable slave Configuration
-
-    /*var slaveConfigProperties = [];
-    stepController.get('stepConfigs').forEach(function (_content) {
-      if (_content.get('configCategories').someProperty('isForSlaveComponent', true)) {
-        var slaveCategory = _content.get('configCategories').findProperty('isForSlaveComponent', true);
-        slaveCategory.get('slaveConfigs.groups').forEach(function (_group) {
-          _group.get('properties').forEach(function (_property) {
-            var displayType = _property.get('displayType');
-            if (displayType === 'directories' || displayType === 'directory') {
-              var value = _property.get('value').trim().split(/\s+/g).join(',');
-              _property.set('value', value);
-            }
-            _property.set('storeValue', _property.get('value'));
-          }, this);
-        }, this);
-        slaveConfigProperties.pushObject(slaveCategory.get('slaveConfigs'));
-      }
-    }, this);
-    App.db.setSlaveProperties(slaveConfigProperties);
-    this.set('content.slaveGroupProperties', slaveConfigProperties);*/
-  },
-
-  /**
-   * Load serviceConfigProperties to model
-   */
-  loadServiceConfigProperties: function () {
-    var serviceConfigProperties = App.db.getServiceConfigProperties();
-    this.set('content.serviceConfigProperties', serviceConfigProperties);
-    console.log("AddHostController.loadServiceConfigProperties: loaded config ", serviceConfigProperties);
-  },
-
-  /**
-   * Load information about hosts with clients components
-   */
-  loadClients: function () {
-    var clients = App.db.getClientsForSelectedServices();
-    this.set('content.clients', clients);
-    console.log("AddHostController.loadClients: loaded list ", clients);
-  },
-
-  /**
-   * return true if cluster data is loaded and false otherwise
-   */
-  dataLoading: function () {
-    var dfd = $.Deferred();
-    this.connectOutlet('loading');
-    var interval = setInterval(function () {
-      if (App.router.get('clusterController.isLoaded')) {
-        dfd.resolve();
-        clearInterval(interval);
-      }
-    }, 50);
-    return dfd.promise();
-  },
-
-  /**
-   * Generate clients list for selected services and save it to model
-   * @param stepController step4WizardController
-   */
-  saveClients: function () {
-    var clients = [];
-    var serviceComponents = require('data/service_components');
-    var hostComponents = App.HostComponent.find();
-
-    this.get('content.services').filterProperty('isSelected', true).forEach(function (_service) {
-      var client = serviceComponents.filterProperty('service_name', _service.serviceName).findProperty('isClient', true);
-      if (client) {
-        clients.pushObject({
-          component_name: client.component_name,
-          display_name: client.display_name,
-          isInstalled: hostComponents.filterProperty('componentName', client.component_name).length > 0
-        });
-      }
-    }, this);
-
-    App.db.setClientsForSelectedServices(clients);
-    this.set('content.clients', clients);
-    console.log("AddHostController.saveClients: saved list ", clients);
-  },
-
-  /**
-   * Load data for all steps until <code>current step</code>
-   */
-  loadAllPriorSteps: function () {
-    var step = this.get('currentStep');
-    switch (step) {
-      case '8':
-      case '7':
-      case '6':
-      case '5':
-      case '4':
-        this.loadServiceConfigProperties();
-      case '3':
-        this.loadClients();
-        this.loadServices();
-        this.loadMasterComponentHosts();
-        this.loadSlaveComponentHosts();
-        this.load('hosts');
-      case '2':
-        this.loadServices();
-      case '1':
-        this.load('hosts');
-        this.load('installOptions');
-        this.load('cluster');
-    }
-  },
-
-  /**
-   * load advanced configs for all selected services
-   */
-  loadAdvancedConfigs: function () {
-    this.get('content.services').filterProperty('isSelected', true).mapProperty('serviceName').forEach(function (_serviceName) {
-      this.loadAdvancedConfig(_serviceName);
-    }, this);
-  },
-
-  /**
-   * load advanced config for one service
-   * @param serviceName
-   */
-  loadAdvancedConfig: function (serviceName) {
-    var self = this;
-    var url = (App.testMode) ? '/data/wizard/stack/hdp/version01/' + serviceName + '.json' : App.apiPrefix + '/stacks/HDP/version/1.2.0/services/' + serviceName; // TODO: get this url from the stack selected by the user in Install Options page
-    var method = 'GET';
-    $.ajax({
-      type: method,
-      url: url,
-      async: false,
-      dataType: 'text',
-      timeout: App.timeout,
-      success: function (data) {
-        var jsonData = jQuery.parseJSON(data);
-        console.log("TRACE: Step6 submit -> In success function for the loadAdvancedConfig call");
-        console.log("TRACE: Step6 submit -> value of the url is: " + url);
-        var serviceComponents = jsonData.properties;
-        serviceComponents.setEach('serviceName', serviceName);
-        var configs;
-        if (App.db.getAdvancedServiceConfig()) {
-          configs = App.db.getAdvancedServiceConfig();
-        } else {
-          configs = [];
-        }
-        configs = configs.concat(serviceComponents);
-        self.set('content.advancedServiceConfig', configs);
-        App.db.setAdvancedServiceConfig(configs);
-        console.log('TRACE: servicename: ' + serviceName);
-      },
-
-      error: function (request, ajaxOptions, error) {
-        console.log("TRACE: STep6 submit -> In error function for the loadAdvancedConfig call");
-        console.log("TRACE: STep6 submit-> value of the url is: " + url);
-        console.log("TRACE: STep6 submit-> error code status is: " + request.status);
-        console.log('Step6 submit: Error message is: ' + request.responseText);
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-  },
-
-  /**
-   * Remove all loaded data.
-   * Created as copy for App.router.clearAllSteps
-   */
-  clearAllSteps: function () {
-    this.clearInstallOptions();
-    // clear temporary information stored during the install
-    this.set('content.cluster', this.getCluster());
-  },
-
-  /**
-   * Clear all temporary data
-   */
-  finish: function () {
-    this.setCurrentStep('1');
-    this.clearAllSteps();
-    this.clearStorageData();
-    App.router.get('updateController').updateAll();
-  }
-
-});
diff --git a/branch-1.2/ambari-web/app/controllers/main/host/details.js b/branch-1.2/ambari-web/app/controllers/main/host/details.js
deleted file mode 100644
index bf79891..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/host/details.js
+++ /dev/null
@@ -1,414 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainHostDetailsController = Em.Controller.extend({
-  name: 'mainHostDetailsController',
-  content: null,
-  isFromHosts: false,
-
-  /**
-   * open dashboard page
-   */
-  routeHome: function () {
-    App.router.transitionTo('main.dashboard');
-  },
-
-  /**
-   * open summary page of the selected service
-   * @param event
-   */
-  routeToService: function(event){
-    var service = event.context;
-    App.router.transitionTo('main.services.service.summary',service);
-  },
-
-  /**
-   * set new value to isFromHosts property
-   * @param isFromHosts new value
-   */
-  setBack: function(isFromHosts){
-    this.set('isFromHosts', isFromHosts);
-  },
-
-  /**
-   * Send specific command to server
-   * @param url
-   * @param data Object to send
-   */
-  sendCommandToServer : function(url, postData, callback){
-    var url =  (App.testMode) ?
-      '/data/wizard/deploy/poll_1.json' : //content is the same as ours
-      App.apiPrefix + '/clusters/' + App.router.getClusterName() + url;
-
-    var method = App.testMode ? 'GET' : 'PUT';
-
-    $.ajax({
-      type: method,
-      url: url,
-      data: JSON.stringify(postData),
-      dataType: 'json',
-      timeout: App.timeout,
-      success: function(data){
-        if(data && data.Requests){
-          callback(data.Requests.id);
-        } else{
-          callback(null);
-          console.log('cannot get request id from ', data);
-        }
-      },
-
-      error: function (request, ajaxOptions, error) {
-        //do something
-        callback(null);
-        console.log('error on change component host status')
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-  },
-
-  /**
-   * send command to server to start selected host component
-   * @param event
-   */
-  startComponent: function (event) {
-    var self = this;
-    App.ModalPopup.show({
-      header: Em.I18n.t('hosts.host.start.popup.header'),
-      body: Em.I18n.t('hosts.host.start.popup.body'),
-      primary: 'Yes',
-      secondary: 'No',
-      onPrimary: function() {
-        var component = event.context;
-
-        self.sendCommandToServer('/hosts/' + self.get('content.hostName') + '/host_components/' + component.get('componentName').toUpperCase(),{
-          HostRoles:{
-            state: 'STARTED'
-          }
-        }, function(requestId){
-
-          if(!requestId){
-            return;
-          }
-
-          console.log('Send request for STARTING successfully');
-
-          if (App.testMode) {
-            component.set('workStatus', App.HostComponentStatus.starting);
-            setTimeout(function(){
-              component.set('workStatus', App.HostComponentStatus.started);
-            },10000);
-          } else {
-            App.router.get('clusterController').loadUpdatedStatusDelayed(500);
-            App.router.get('backgroundOperationsController.eventsArray').push({
-              "when" : function(controller){
-                var result = (controller.getOperationsForRequestId(requestId).length == 0);
-                console.log('startComponent.when = ', result)
-                return result;
-              },
-              "do" : function(){
-                App.router.get('clusterController').loadUpdatedStatus();
-              }
-            });
-          }
-
-          App.router.get('backgroundOperationsController').showPopup();
-
-        });
-
-        this.hide();
-      },
-      onSecondary: function() {
-        this.hide();
-      }
-    });
-  },
-
-  /**
-   * send command to server to stop selected host component
-   * @param event
-   */
-  stopComponent: function (event) {
-    var self = this;
-    App.ModalPopup.show({
-      header: Em.I18n.t('hosts.host.start.popup.header'),
-      body: Em.I18n.t('hosts.host.start.popup.body'),
-      primary: 'Yes',
-      secondary: 'No',
-      onPrimary: function() {
-        var component = event.context;
-
-        self.sendCommandToServer('/hosts/' + self.get('content.hostName') + '/host_components/' + component.get('componentName').toUpperCase(),{
-          HostRoles:{
-            state: 'INSTALLED'
-          }
-        }, function(requestId){
-          if(!requestId){
-            return
-          }
-
-          console.log('Send request for STOPPING successfully');
-
-          if (App.testMode) {
-            component.set('workStatus', App.HostComponentStatus.stopping);
-            setTimeout(function(){
-              component.set('workStatus', App.HostComponentStatus.stopped);
-            },10000);
-          } else {
-            App.router.get('clusterController').loadUpdatedStatus();
-            App.router.get('backgroundOperationsController.eventsArray').push({
-              "when" : function(controller){
-                var result = (controller.getOperationsForRequestId(requestId).length == 0);
-                console.log('stopComponent.when = ', result)
-                return result;
-              },
-              "do" : function(){
-                App.router.get('clusterController').loadUpdatedStatus();
-              }
-            });
-          }
-
-          App.router.get('backgroundOperationsController').showPopup();
-
-        });
-
-        this.hide();
-      },
-      onSecondary: function() {
-        this.hide();
-      }
-    });
-  },
-
-  /**
-   * send command to server to run decommission on DATANODE
-   * @param event
-   */
-  decommission: function(event){
-    var self = this;
-    var decommissionHostNames = this.get('view.decommissionDataNodeHostNames');
-    if (decommissionHostNames == null) {
-      decommissionHostNames = [];
-    }
-    App.ModalPopup.show({
-      header: Em.I18n.t('hosts.host.start.popup.header'),
-      body: Em.I18n.t('hosts.host.start.popup.body'),
-      primary: 'Yes',
-      secondary: 'No',
-      onPrimary: function(){
-        var component = event.context;
-        // Only HDFS service as of now
-        var svcName = component.get('service.serviceName');
-        if (svcName === "HDFS") {
-          var hostName = self.get('content.hostName');
-          var index = decommissionHostNames.indexOf(hostName);
-          if (index < 0) {
-            decommissionHostNames.push(hostName);
-          }
-          self.doDatanodeDecommission(decommissionHostNames);
-        }
-        App.router.get('backgroundOperationsController').showPopup();
-        this.hide();
-      },
-      onSecondary: function() {
-        this.hide();
-      }
-    });
-  },
-
-  /**
-   * Performs either Decommission or Recommision by updating the hosts list on
-   * server.
-   */
-  doDatanodeDecommission: function(decommissionHostNames){
-    var self = this;
-    if (decommissionHostNames == null) {
-      decommissionHostNames = [];
-    }
-    var invocationTag = String(new Date().getTime());
-    var clusterName = App.router.get('clusterController.clusterName');
-    var clusterUrl = App.apiPrefix + '/clusters/' + clusterName;
-    var configsUrl = clusterUrl + '/configurations';
-    var configsData = {
-      type: "hdfs-exclude-file",
-      tag: invocationTag,
-      properties: {
-        datanodes: decommissionHostNames.join(',')
-      }
-    };
-    var configsAjax = {
-      type: 'POST',
-      url: configsUrl,
-      dataType: 'json',
-      data: JSON.stringify(configsData),
-      timeout: App.timeout,
-      success: function(){
-        var actionsUrl = clusterUrl + '/services/HDFS/actions/DECOMMISSION_DATANODE';
-        var actionsData = {
-          parameters: {
-            excludeFileTag: invocationTag
-          }
-        }
-        var actionsAjax = {
-          type: 'POST',
-          url: actionsUrl,
-          dataType: 'json',
-          data: JSON.stringify(actionsData),
-          timeout: App.timeout,
-          success: function(){
-            var persistUrl = App.apiPrefix + '/persist';
-            var persistData = {
-              "decommissionDataNodesTag": invocationTag
-            };
-            var persistPutAjax = {
-              type: 'POST',
-              url: persistUrl,
-              dataType: 'json',
-              data: JSON.stringify(persistData),
-              timeout: App.timeout,
-              success: function(){
-                var view = self.get('view');
-                view.loadDecommissionNodesList();
-              }
-            };
-            jQuery.ajax(persistPutAjax);
-          },
-          error: function(xhr, textStatus, errorThrown){
-            console.log(textStatus);
-            console.log(errorThrown);
-          }
-        };
-        jQuery.ajax(actionsAjax);
-      },
-      error: function(xhr, textStatus, errorThrown){
-        console.log(textStatus);
-        console.log(errorThrown);
-      }
-    }
-    jQuery.ajax(configsAjax);
-  },
-
-  /**
-   * send command to server to run recommission on DATANODE
-   * @param event
-   */
-  recommission: function(event){
-    var self = this;
-    var decommissionHostNames = this.get('view.decommissionDataNodeHostNames');
-    if (decommissionHostNames == null) {
-      decommissionHostNames = [];
-    }
-    App.ModalPopup.show({
-      header: Em.I18n.t('hosts.host.start.popup.header'),
-      body: Em.I18n.t('hosts.host.start.popup.body'),
-      primary: 'Yes',
-      secondary: 'No',
-      onPrimary: function(){
-        var component = event.context;
-        // Only HDFS service as of now
-        var svcName = component.get('service.serviceName');
-        if (svcName === "HDFS") {
-          var hostName = self.get('content.hostName');
-          var index = decommissionHostNames.indexOf(hostName);
-          decommissionHostNames.splice(index, 1);
-          self.doDatanodeDecommission(decommissionHostNames);
-        }
-        App.router.get('backgroundOperationsController').showPopup();
-        this.hide();
-      },
-      onSecondary: function(){
-        this.hide();
-      }
-    });
-  },
-
-  /**
-   * Deletion of hosts not supported for this version
-   * 
-   * validateDeletion: function () { var slaveComponents = [ 'DataNode',
-   * 'TaskTracker', 'RegionServer' ]; var masterComponents = []; var
-   * workingComponents = [];
-   * 
-   * var components = this.get('content.components');
-   * components.forEach(function (cInstance) { var cName =
-   * cInstance.get('componentName'); if (slaveComponents.contains(cName)) { if
-   * (cInstance.get('workStatus') === App.HostComponentStatus.stopped &&
-   * !cInstance.get('decommissioned')) { workingComponents.push(cName); } } else {
-   * masterComponents.push(cName); } }); // debugger; if
-   * (workingComponents.length || masterComponents.length) {
-   * this.raiseWarning(workingComponents, masterComponents); } else {
-   * this.deleteButtonPopup(); } },
-   */
-
-  raiseWarning: function (workingComponents, masterComponents) {
-    var self = this;
-    var masterString = '';
-    var workingString = '';
-    if(masterComponents && masterComponents.length) {
-      var masterList = masterComponents.join(', ');
-      var ml_text = Em.I18n.t('hosts.cant.do.popup.masterList.body');
-      masterString = ml_text.format(masterList);
-    }
-    if(workingComponents && workingComponents.length) {
-      var workingList = workingComponents.join(', ');
-      var wl_text = Em.I18n.t('hosts.cant.do.popup.workingList.body');
-      workingString = wl_text.format(workingList);
-    }
-    App.ModalPopup.show({
-      header: Em.I18n.t('hosts.cant.do.popup.header'),
-      html: true,
-      body: masterString + workingString,
-      primary: "OK",
-      secondary: null,
-      onPrimary: function() {
-        this.hide();
-      }
-    })
-  },
-
-  /**
-   * show confirmation popup to delete host
-   */
-  deleteButtonPopup: function() {
-    var self = this;
-    App.ModalPopup.show({
-      header: Em.I18n.t('hosts.delete.popup.header'),
-      body: Em.I18n.t('hosts.delete.popup.body'),
-      primary: 'Yes',
-      secondary: 'No',
-      onPrimary: function() {
-        self.removeHost();
-        this.hide();
-      },
-      onSecondary: function() {
-        this.hide();
-      }
-    });
-  },
-
-  /**
-   * remove host and open hosts page
-   */
-  removeHost: function () {
-    App.router.get('mainHostController').checkRemoved(this.get('content.id'));
-    App.router.transitionTo('hosts');
-  }
-
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/service.js b/branch-1.2/ambari-web/app/controllers/main/service.js
deleted file mode 100644
index 2147a4e..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/service.js
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainServiceController = Em.ArrayController.extend({
-  name:'mainServiceController',
-  content: function(){
-    if(!App.router.get('clusterController.isLoaded')){
-      return [];
-    }
-    return App.Service.find();
-  }.property('App.router.clusterController.isLoaded'),
-
-  hdfsService: function () {
-    var hdfsSvcs = App.HDFSService.find();
-    if (hdfsSvcs && hdfsSvcs.get('length') > 0) {
-      return hdfsSvcs.objectAt(0);
-    }
-    return null;
-  }.property('App.router.clusterController.isLoaded', 'App.router.updateController.isUpdated')
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/service/add_controller.js b/branch-1.2/ambari-web/app/controllers/main/service/add_controller.js
deleted file mode 100644
index 3789887..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/service/add_controller.js
+++ /dev/null
@@ -1,589 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.AddServiceController = App.WizardController.extend({
-
-  name: 'addServiceController',
-
-  totalSteps: 7,
-
-  /**
-   * Used for hiding back button in wizard
-   */
-  hideBackButton: true,
-
-  /**
-   * All wizards data will be stored in this variable
-   *
-   * cluster - cluster name
-   * installOptions - ssh key, repo info, etc.
-   * services - services list
-   * hosts - list of selected hosts
-   * slaveComponentHosts, - info about slave hosts
-   * masterComponentHosts - info about master hosts
-   * config??? - to be described later
-   */
-  content: Em.Object.create({
-    cluster: null,
-    hosts: null,
-    installOptions: null,
-    services: null,
-    slaveComponentHosts: null,
-    masterComponentHosts: null,
-    serviceConfigProperties: null,
-    advancedServiceConfig: null,
-    controllerName: 'addServiceController',
-    isWizard: true
-  }),
-
-  /**
-   * return new object extended from clusterStatusTemplate
-   * @return Object
-   */
-  getCluster: function(){
-    return jQuery.extend({}, this.get('clusterStatusTemplate'), {name: App.router.getClusterName()});
-  },
-
-  /**
-   * Load confirmed hosts.
-   * Will be used at <code>Assign Masters(step5)</code> step
-   */
-  loadConfirmedHosts: function(){
-    var hosts = App.db.getHosts();
-    if(!hosts){
-      var hosts = {};
-
-      App.Host.find().forEach(function(item){
-        hosts[item.get('id')] = {
-          name: item.get('id'),
-          cpu: item.get('cpu'),
-          memory: item.get('memory'),
-          disk_info: item.get('diskInfo'),
-          bootStatus: "REGISTERED",
-          isInstalled: true
-        };
-      });
-      App.db.setHosts(hosts);
-    }
-
-    this.set('content.hosts', hosts);
-    console.log('AddServiceController.loadConfirmedHosts: loaded hosts', hosts);
-  },
-
-  /**
-   * Save data after installation to main controller
-   * @param stepController App.WizardStep9Controller
-   */
-  saveInstalledHosts: function (stepController) {
-    var hosts = stepController.get('hosts');
-    var hostInfo = App.db.getHosts();
-
-    for (var index in hostInfo) {
-      hostInfo[index].status = "pending";
-      var host = hosts.findProperty('name', hostInfo[index].name);
-      if (host) {
-        hostInfo[index].status = host.status;
-        hostInfo[index].message = host.message;
-        hostInfo[index].progress = host.progress;
-      }
-    }
-    this.set('content.hosts', hostInfo);
-    this.save('hosts');
-    console.log('AddServiceController:saveInstalledHosts: save hosts ', hostInfo);
-  },
-
-  /**
-   * Load services data from server.
-   */
-  loadServicesFromServer: function() {
-    var displayOrderConfig = require('data/services');
-    var apiUrl = App.get('stackVersionURL');
-    var apiService = this.loadServiceComponents(displayOrderConfig, apiUrl);
-    //
-    apiService.forEach(function(item, index){
-      apiService[index].isSelected = App.Service.find().someProperty('id', item.serviceName);
-      apiService[index].isDisabled = apiService[index].isSelected;
-      apiService[index].isInstalled = apiService[index].isSelected;
-    });
-    this.set('content.services', apiService);
-    App.db.setService(apiService);
-  },
-
-  /**
-   * Load services data. Will be used at <code>Select services(step4)</code> step
-   */
-  loadServices: function () {
-    var servicesInfo = App.db.getService();
-    servicesInfo.forEach(function (item, index) {
-      servicesInfo[index] = Em.Object.create(item);
-    });
-    this.set('content.services', servicesInfo);
-    console.log('AddServiceController.loadServices: loaded data ', servicesInfo);
-
-    var serviceNames = servicesInfo.filterProperty('isSelected', true).filterProperty('isDisabled', false).mapProperty('serviceName');
-    console.log('selected services ', serviceNames);
-
-    this.set('content.missSlavesStep', !serviceNames.contains('MAPREDUCE') && !serviceNames.contains('HBASE'));
-  },
-
-  /**
-   * Save data to model
-   * @param stepController App.WizardStep4Controller
-   */
-  saveServices: function (stepController) {var serviceNames = [];
-    App.db.setService(stepController.get('content'));
-    console.log('AddServiceController.saveServices: saved data', stepController.get('content'));
-    stepController.filterProperty('isSelected', true).filterProperty('isInstalled', false).forEach(function (item) {
-      serviceNames.push(item.serviceName);
-    });
-    this.set('content.selectedServiceNames', serviceNames);
-    App.db.setSelectedServiceNames(serviceNames);
-    console.log('AddServiceController.selectedServiceNames:', serviceNames);
-
-    this.set('content.missSlavesStep', !serviceNames.contains('MAPREDUCE') && !serviceNames.contains('HBASE'));
-  },
-
-  /**
-   * Save Master Component Hosts data to Main Controller
-   * @param stepController App.WizardStep5Controller
-   */
-  saveMasterComponentHosts: function (stepController) {
-    var obj = stepController.get('selectedServicesMasters');
-    var masterComponentHosts = [];
-    var installedComponents = App.HostComponent.find();
-
-    obj.forEach(function (_component) {
-        masterComponentHosts.push({
-          display_name: _component.display_name,
-          component: _component.component_name,
-          hostName: _component.selectedHost,
-          serviceId: _component.serviceId,
-          isInstalled: installedComponents.someProperty('componentName', _component.component_name)
-        });
-    });
-
-    console.log("AddServiceController.saveMasterComponentHosts: saved hosts ", masterComponentHosts);
-    App.db.setMasterComponentHosts(masterComponentHosts);
-    this.set('content.masterComponentHosts', masterComponentHosts);
-
-    this.set('content.missMasterStep', this.get('content.masterComponentHosts').everyProperty('isInstalled', true));
-  },
-
-  /**
-   * Load master component hosts data for using in required step controllers
-   */
-  loadMasterComponentHosts: function () {
-    var masterComponentHosts = App.db.getMasterComponentHosts();
-    if(!masterComponentHosts){
-      masterComponentHosts = [];
-      App.HostComponent.find().filterProperty('isMaster', true).forEach(function(item){
-        masterComponentHosts.push({
-          component: item.get('componentName'),
-          hostName: item.get('host.hostName'),
-          isInstalled: true
-        })
-      });
-
-    }
-    this.set("content.masterComponentHosts", masterComponentHosts);
-    console.log("AddServiceController.loadMasterComponentHosts: loaded hosts ", masterComponentHosts);
-
-    this.set('content.missMasterStep', this.get('content.masterComponentHosts').everyProperty('isInstalled', true));
-  },
-
-  /**
-   * Save slaveHostComponents to main controller
-   * @param stepController
-   */
-  saveSlaveComponentHosts: function (stepController) {
-
-    var hosts = stepController.get('hosts');
-    var isMrSelected = stepController.get('isMrSelected');
-    var isHbSelected = stepController.get('isHbSelected');
-
-    var dataNodeHosts = [];
-    var taskTrackerHosts = [];
-    var regionServerHosts = [];
-    var clientHosts = [];
-
-    hosts.forEach(function (host) {
-
-      if (host.get('isDataNode')) {
-        dataNodeHosts.push({
-          hostName: host.hostName,
-          group: 'Default',
-          isInstalled: host.get('isDataNodeInstalled')
-        });
-      }
-      if (isMrSelected && host.get('isTaskTracker')) {
-        taskTrackerHosts.push({
-          hostName: host.hostName,
-          group: 'Default',
-          isInstalled: host.get('isTaskTrackerInstalled')
-        });
-      }
-      if (isHbSelected && host.get('isRegionServer')) {
-        regionServerHosts.push({
-          hostName: host.hostName,
-          group: 'Default',
-          isInstalled: host.get('isRegionServerInstalled')
-        });
-      }
-      if (host.get('isClient')) {
-        clientHosts.pushObject({
-          hostName: host.hostName,
-          group: 'Default',
-          isInstalled: host.get('isClientInstalled')
-        });
-      }
-    }, this);
-
-    var slaveComponentHosts = [];
-    slaveComponentHosts.push({
-      componentName: 'DATANODE',
-      displayName: 'DataNode',
-      hosts: dataNodeHosts
-    });
-    if (isMrSelected) {
-      slaveComponentHosts.push({
-        componentName: 'TASKTRACKER',
-        displayName: 'TaskTracker',
-        hosts: taskTrackerHosts
-      });
-    }
-    if (isHbSelected) {
-      slaveComponentHosts.push({
-        componentName: 'HBASE_REGIONSERVER',
-        displayName: 'RegionServer',
-        hosts: regionServerHosts
-      });
-    }
-    slaveComponentHosts.pushObject({
-      componentName: 'CLIENT',
-      displayName: 'client',
-      hosts: clientHosts
-    });
-
-    App.db.setSlaveComponentHosts(slaveComponentHosts);
-    console.log('addServiceController.slaveComponentHosts: saved hosts', slaveComponentHosts);
-    this.set('content.slaveComponentHosts', slaveComponentHosts);
-  },
-
-  /**
-   * return slaveComponents bound to hosts
-   * @return {Array}
-   */
-  getSlaveComponentHosts: function () {
-    var components = [{
-      name : 'DATANODE',
-      service : 'HDFS'
-    },
-    {
-      name: 'TASKTRACKER',
-      service: 'MAPREDUCE'
-    },{
-      name: 'HBASE_REGIONSERVER',
-      service: 'HBASE'
-    }];
-
-    var result = [];
-    var services = App.Service.find();
-    var selectedServices = this.get('content.services').filterProperty('isSelected', true).mapProperty('serviceName');
-    for(var index=0; index < components.length; index++){
-      var comp = components[index];
-      if(!selectedServices.contains(comp.service)){
-        continue;
-      }
-
-
-      var service = services.findProperty('id', comp.service);
-      var hosts = [];
-
-      if(!service){
-        service = services.findProperty('id', 'HDFS');
-        service.get('hostComponents').filterProperty('componentName', 'DATANODE').forEach(function (host_component) {
-          hosts.push({
-            group: "Default",
-            hostName: host_component.get('host.id'),
-            isInstalled: false
-          });
-        }, this);
-      } else {
-        service.get('hostComponents').filterProperty('componentName', comp.name).forEach(function (host_component) {
-          hosts.push({
-            group: "Default",
-            hostName: host_component.get('host.id'),
-            isInstalled: true
-          });
-        }, this);
-      }
-
-      result.push({
-        componentName: comp.name,
-        displayName: App.format.role(comp.name),
-        hosts: hosts
-      })
-    }
-
-    var clientsHosts = App.HostComponent.find().filterProperty('componentName', 'HDFS_CLIENT');
-    var hosts = [];
-
-    clientsHosts.forEach(function (host_component) {
-        hosts.push({
-          group: "Default",
-          hostName: host_component.get('host.id'),
-          isInstalled: true
-        });
-    }, this);
-
-    result.push({
-      componentName: 'CLIENT',
-      displayName: 'client',
-      hosts: hosts
-    })
-
-    return result;
-  },
-
-  /**
-   * Load master component hosts data for using in required step controllers
-   */
-  loadSlaveComponentHosts: function () {
-    var slaveComponentHosts = App.db.getSlaveComponentHosts();
-    if(!slaveComponentHosts){
-      slaveComponentHosts = this.getSlaveComponentHosts();
-    }
-    this.set("content.slaveComponentHosts", slaveComponentHosts);
-    console.log("AddServiceController.loadSlaveComponentHosts: loaded hosts ", slaveComponentHosts);
-  },
-
-  /**
-   * Save config properties
-   * @param stepController Step7WizardController
-   */
-  saveServiceConfigProperties: function (stepController) {
-    var serviceConfigProperties = [];
-    stepController.get('stepConfigs').forEach(function (_content) {
-      _content.get('configs').forEach(function (_configProperties) {
-        var displayType = _configProperties.get('displayType');
-        if (displayType === 'directories' || displayType === 'directory') {
-          var value = _configProperties.get('value').trim().split(/\s+/g).join(',');
-          _configProperties.set('value', value);
-        }
-        var configProperty = {
-          id: _configProperties.get('id'),
-          name: _configProperties.get('name'),
-          value: _configProperties.get('value'),
-          defaultValue: _configProperties.get('defaultValue'),
-          service: _configProperties.get('serviceName'),
-          domain:  _configProperties.get('domain'),
-          filename: _configProperties.get('filename')
-        };
-        serviceConfigProperties.push(configProperty);
-      }, this);
-
-    }, this);
-
-    App.db.setServiceConfigProperties(serviceConfigProperties);
-    this.set('content.serviceConfigProperties', serviceConfigProperties);
-
-    //TODO: Uncomment below code to enable slave Configuration
-
-    /*var slaveConfigProperties = [];
-    stepController.get('stepConfigs').forEach(function (_content) {
-      if (_content.get('configCategories').someProperty('isForSlaveComponent', true)) {
-        var slaveCategory = _content.get('configCategories').findProperty('isForSlaveComponent', true);
-        slaveCategory.get('slaveConfigs.groups').forEach(function (_group) {
-          _group.get('properties').forEach(function (_property) {
-            var displayType = _property.get('displayType');
-            if (displayType === 'directories' || displayType === 'directory') {
-              var value = _property.get('value').trim().split(/\s+/g).join(',');
-              _property.set('value', value);
-            }
-            _property.set('storeValue', _property.get('value'));
-          }, this);
-        }, this);
-        slaveConfigProperties.pushObject(slaveCategory.get('slaveConfigs'));
-      }
-    }, this);
-    App.db.setSlaveProperties(slaveConfigProperties);
-    this.set('content.slaveGroupProperties', slaveConfigProperties);*/
-  },
-
-  /**
-   * Load serviceConfigProperties to model
-   */
-  loadServiceConfigProperties: function () {
-    var serviceConfigProperties = App.db.getServiceConfigProperties();
-    this.set('content.serviceConfigProperties', serviceConfigProperties);
-    console.log("AddServiceController.loadServiceConfigProperties: loaded config ", serviceConfigProperties);
-  },
-
-  /**
-   * Load information about hosts with clients components
-   */
-  loadClients: function(){
-    var clients = App.db.getClientsForSelectedServices();
-    this.set('content.clients', clients);
-    console.log("AddServiceController.loadClients: loaded list ", clients);
-  },
-
-  /**
-   * return true if cluster data is loaded and false otherwise
-   */
-  dataLoading: function(){
-    var dfd = $.Deferred();
-    this.connectOutlet('loading');
-    if (App.router.get('clusterController.isLoaded')){
-      dfd.resolve();
-    } else{
-      var interval = setInterval(function(){
-        if (App.router.get('clusterController.isLoaded')){
-          dfd.resolve();
-          clearInterval(interval);
-        }
-      },50);
-    }
-    return dfd.promise();
-  },
-
-  /**
-   * Generate clients list for selected services and save it to model
-   * @param stepController step4WizardController
-   */
-  saveClients: function(stepController){
-    var clients = [];
-    var serviceComponents = require('data/service_components');
-    var hostComponents = App.HostComponent.find();
-
-    stepController.get('content').filterProperty('isSelected',true).forEach(function (_service) {
-      var client = serviceComponents.filterProperty('service_name', _service.serviceName).findProperty('isClient', true);
-      if (client) {
-        clients.pushObject({
-          component_name: client.component_name,
-          display_name: client.display_name,
-          isInstalled: hostComponents.filterProperty('componentName', client.component_name).length > 0
-        });
-      }
-    }, this);
-
-    App.db.setClientsForSelectedServices(clients);
-    this.set('content.clients', clients);
-    console.log("AddServiceController.saveClients: saved list ", clients);
-  },
-
-  /**
-   * Load data for all steps until <code>current step</code>
-   */
-  loadAllPriorSteps: function () {
-    var step = this.get('currentStep');
-    switch (step) {
-      case '7':
-      case '6':
-      case '5':
-        this.load('cluster');
-      case '4':
-        this.loadServiceConfigProperties();
-      case '3':
-        this.loadServices();
-        this.loadClients();
-        this.loadSlaveComponentHosts();//depends on loadServices
-      case '2':
-        this.loadMasterComponentHosts();
-        this.loadConfirmedHosts();
-      case '1':
-        this.loadServices();
-    }
-  },
-
-  /**
-   * load advanced configs for all selected services
-   */
-  loadAdvancedConfigs: function () {
-    App.db.getSelectedServiceNames().forEach(function (_serviceName) {
-      this.loadAdvancedConfig(_serviceName);
-    }, this);
-  },
-
-  /**
-   * load advanced config for one service
-   * @param serviceName
-   */
-  loadAdvancedConfig: function (serviceName) {
-    var self = this;
-    var url = (App.testMode) ? '/data/wizard/stack/hdp/version01/' + serviceName + '.json' : App.apiPrefix + App.get('stackVersionURL') +'/services/' + serviceName; // TODO: get this url from the stack selected by the user in Install Options page
-    var method = 'GET';
-    $.ajax({
-      type: method,
-      url: url,
-      async: false,
-      dataType: 'text',
-      timeout: App.timeout,
-      success: function (data) {
-        var jsonData = jQuery.parseJSON(data);
-        console.log("TRACE: Step6 submit -> In success function for the loadAdvancedConfig call");
-        console.log("TRACE: Step6 submit -> value of the url is: " + url);
-        var serviceComponents = jsonData.properties;
-        serviceComponents.setEach('serviceName', serviceName);
-        var configs;
-        if (App.db.getAdvancedServiceConfig()) {
-          configs = App.db.getAdvancedServiceConfig();
-        } else {
-          configs = [];
-        }
-        configs = configs.concat(serviceComponents);
-        self.set('content.advancedServiceConfig', configs);
-        App.db.setAdvancedServiceConfig(configs);
-        console.log('TRACE: servicename: ' + serviceName);
-      },
-
-      error: function (request, ajaxOptions, error) {
-        console.log("TRACE: STep6 submit -> In error function for the loadAdvancedConfig call");
-        console.log("TRACE: STep6 submit-> value of the url is: " + url);
-        console.log("TRACE: STep6 submit-> error code status is: " + request.status);
-        console.log('Step6 submit: Error message is: ' + request.responseText);
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-  },
-
-  /**
-   * Remove all loaded data.
-   * Created as copy for App.router.clearAllSteps
-   */
-  clearAllSteps: function () {
-    this.clearInstallOptions();
-    // clear temporary information stored during the install
-    this.set('content.cluster', this.getCluster());
-  },
-
-  /**
-   * Clear all temporary data
-   */
-  finish: function () {
-    this.setCurrentStep('1');
-    this.clearAllSteps();
-    this.clearStorageData();
-    App.router.get('updateController').updateAll();
-  }
-
-});
diff --git a/branch-1.2/ambari-web/app/controllers/main/service/info/audit.js b/branch-1.2/ambari-web/app/controllers/main/service/info/audit.js
deleted file mode 100644
index 1a17ac2..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/service/info/audit.js
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainServiceInfoAuditController = Em.Controller.extend({
-  name: 'mainServiceInfoAuditController'
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/service/info/configs.js b/branch-1.2/ambari-web/app/controllers/main/service/info/configs.js
deleted file mode 100644
index 229864e..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/service/info/configs.js
+++ /dev/null
@@ -1,1107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-require('controllers/wizard/slave_component_groups_controller');
-
-App.MainServiceInfoConfigsController = Em.Controller.extend({
-  name: 'mainServiceInfoConfigsController',
-  dataIsLoaded: false,
-  stepConfigs: [], //contains all field properties that are viewed in this service
-  selectedService: null,
-  serviceConfigTags: null,
-  globalConfigs: [],
-  uiConfigs: [],
-  customConfig: [],
-  isApplyingChanges: false,
-  serviceConfigs: require('data/service_configs'),
-  configs: require('data/config_properties').configProperties,
-  configMapping: require('data/config_mapping'),
-  customConfigs: require('data/custom_configs'),
-
-  isSubmitDisabled: function () {
-    return (!(this.stepConfigs.everyProperty('errorCount', 0)) || this.get('isApplyingChanges'));
-  }.property('stepConfigs.@each.errorCount', 'isApplyingChanges'),
-
-  slaveComponentGroups: null,
-
-  /**
-   * clear and set properties to default value
-   */
-  clearStep: function () {
-    this.set('dataIsLoaded', false);
-    this.get('stepConfigs').clear();
-    this.get('globalConfigs').clear();
-    this.get('uiConfigs').clear();
-    this.get('customConfig').clear();
-    if (this.get('serviceConfigTags')) {
-      this.set('serviceConfigTags', null);
-    }
-  },
-
-  serviceConfigProperties: function () {
-    return App.db.getServiceConfigProperties();
-  }.property('content'),
-
-  /**
-   * On load function
-   */
-  loadStep: function () {
-    console.log("TRACE: Loading configure for service");
-    this.clearStep();
-    // this.set('serviceConfigs',require('data/service_configs'));
-    //STEP 1: set the present state of the service Properties. State depends on array of: unique combination of type(ex. core-site) and tag (ex. version01) derived from serviceInfo desired_state
-    this.loadMasterComponents();
-    //this.loadSlaveComponentVersion();
-  },
-
-  /**
-   * loads Master component properties
-   */
-  loadMasterComponents: function () {
-    this.setServciceConfigs();
-  },
-
-
-  /**
-   * loads slave Group Version from Ambari UI Database
-   */
-  loadSlaveComponentVersion: function () {
-    var self = this;
-    var url = App.apiPrefix + '/persist/current_version';
-    $.ajax({
-      type: 'GET',
-      url: url,
-      timeout: 10000,
-
-      success: function (data) {
-        var jsonData = jQuery.parseJSON(data);
-        console.log("TRACE: In success function for the GET loadSlaveComponentGroup call");
-        console.log("TRACE: The url is: " + url);
-        self.loadSlaveComponentGroup(jsonData["current_version"]);
-      },
-
-      error: function (request, ajaxOptions, error) {
-        console.log("TRACE: In error function for the getServciceConfigs call");
-        console.log("TRACE: value of the url is: " + url);
-        console.log("TRACE: error code status is: " + request.status);
-
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-
-  },
-
-  /**
-   * loads slave Group properties of currntly applid version from Ambari UI Database
-   */
-  loadSlaveComponentGroup: function (version) {
-    var self = this;
-    var url = App.apiPrefix + '/persist/' + version;
-    $.ajax({
-      type: 'GET',
-      url: url,
-      timeout: 10000,
-      success: function (data) {
-        var jsonData = jQuery.parseJSON(data);
-        console.log("TRACE: In success function for the GET loadSlaveComponentGroup call");
-        console.log("TRACE: The url is: " + url);
-        self.set('slaveComponentGroups', jsonData[version]);
-      },
-
-      error: function (request, ajaxOptions, error) {
-        console.log("TRACE: In error function for the getServciceConfigs call");
-        console.log("TRACE: value of the url is: " + url);
-        console.log("TRACE: error code status is: " + request.status);
-
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-  },
-
-  /**
-   * Get the current applied slave configuration version from Ambari UI Database
-   */
-  getCurrentSlaveConfiguration: function () {
-
-  },
-
-  /**
-   *  Loads the advanced configs fetched from the server metadata libarary
-   */
-  loadAdvancedConfig: function (serviceConfigs, advancedConfig) {
-    advancedConfig.forEach(function (_config) {
-      if (_config) {
-        if (this.get('configMapping').someProperty('name', _config.name)) {
-        } else if (!(serviceConfigs.someProperty('name', _config.name))) {
-          _config.id = "site property";
-          _config.category = 'Advanced';
-          _config.displayName = _config.name;
-          _config.defaultValue = _config.value;
-          _config.isRequired = false;
-          _config.isVisible = true;
-          _config.displayType = 'advanced';
-          _config.serviceName = this.get('content.serviceName');
-          serviceConfigs.pushObject(_config);
-        }
-      }
-    }, this);
-  },
-
-  /**
-   * Get configuration for the *-site.xml
-   */
-  setServciceConfigs: function () {
-    var self = this;
-    var url = App.apiPrefix + '/clusters/' + App.router.getClusterName() + '/services/' + this.get('content.serviceName');
-    $.ajax({
-      type: 'GET',
-      url: url,
-      timeout: 10000,
-      dataType: 'text',
-      success: function (data) {
-        console.log("TRACE: In success function for the GET getServciceConfigs call");
-        console.log("TRACE: The url is: " + url);
-        var jsonData = jQuery.parseJSON(data);
-        self.set('serviceConfigTags', jsonData.ServiceInfo.desired_configs);
-        //STEP 2: Create an array of objects defining tagnames to be polled and new tagnames to be set after submit
-        self.setServiceTagNames();
-        //STEP 5: Add the advanced configs to the serviceConfigs property
-        var advancedConfig = App.router.get('installerController').loadAdvancedConfig(self.get('content.serviceName')) || [];
-        //STEP 3: Set globalConfigs and Get an array of serviceProperty objects
-        var serviceConfigs = self.getSitesConfigProperties(advancedConfig);
-        self.loadAdvancedConfig(serviceConfigs, advancedConfig);
-        self.loadCustomConfig(serviceConfigs);
-        var serviceConfig = self.get('serviceConfigs').findProperty('serviceName', self.get('content.serviceName'));
-        self.addHostNamesToGlobalConfig();
-        serviceConfig.configs = self.get('globalConfigs').concat(serviceConfigs);
-
-        self.renderServiceConfigs(serviceConfig);
-        self.set('dataIsLoaded', true);
-      },
-
-      error: function (request, ajaxOptions, error) {
-        console.log("TRACE: In error function for the getServciceConfigs call");
-        console.log("TRACE: value of the url is: " + url);
-        console.log("TRACE: error code status is: " + request.status);
-
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-  },
-
-
-  /**
-   * set tagnames for configuration of the *-site.xml
-   */
-  setServiceTagNames: function () {
-    console.log("TRACE: In setServiceTagNames function:");
-    var newServiceConfigTags = [];
-    var serviceConfigTags = this.get('serviceConfigTags');
-    for (var index in serviceConfigTags) {
-      console.log("The value of serviceConfigTags[index]: " + serviceConfigTags[index]);
-      newServiceConfigTags.pushObject({
-        siteName: index,
-        tagName: serviceConfigTags[index],
-        newTagName: null
-      }, this);
-    }
-    this.set('serviceConfigTags', newServiceConfigTags);
-  },
-
-  /**
-   * Render a custom conf-site box for entering properties that will be written in *-site.xml files of the services
-   */
-
-  loadCustomConfig: function (serviceConfigs) {
-    if (this.get('customConfigs').findProperty('serviceName', this.get('content.serviceName'))) {
-      var customConfigs = this.get('customConfigs').filterProperty('serviceName', this.get('content.serviceName'));
-      customConfigs.forEach(function (_customConfig) {
-        var customValue = '';
-        var length = this.get('customConfig').length;
-        this.get('customConfig').forEach(function (_config, index) {
-          if ((_config.filename !== 'core-site.xml' && _customConfig.name !== 'core-site') || (_config.filename === 'core-site.xml' && _customConfig.name === 'core-site')) {
-            customValue += _config.name + '=' + _config.value;
-            if (index !== length - 1) {
-              customValue += '\n';
-            }
-          }
-        }, this);
-        _customConfig.value = customValue;
-        serviceConfigs.pushObject(_customConfig);
-
-      }, this);
-
-    }
-  },
-
-
-  /**
-   * load the configs from the server
-   */
-
-  getSitesConfigProperties: function (advancedConfig) {
-    var serviceConfigs = [];
-    var globalConfigs = [];
-    var localServiceConfigs = this.get('serviceConfigs').findProperty('serviceName', this.get('content.serviceName'));
-
-    this.get('serviceConfigTags').forEach(function (_tag) {
-      var properties = this.getSiteConfigProperties(_tag.siteName, _tag.tagName);
-      for (var index in properties) {
-        var serviceConfigObj = {
-          name: index,
-          value: properties[index],
-          defaultValue: properties[index],
-          filename: _tag.siteName + ".xml"
-        };
-        if (this.get('configs').someProperty('name', index)) {
-          var configProperty = this.get('configs').findProperty('name', index);
-          if (this.get('configs').findProperty('name', index).isReconfigurable === false) {
-          }
-          serviceConfigObj.displayType = configProperty.displayType;
-          serviceConfigObj.isRequired = configProperty.isRequired ? configProperty.isRequired : true;
-          serviceConfigObj.isReconfigurable = (configProperty.isReconfigurable !== undefined) ? configProperty.isReconfigurable : true;
-          serviceConfigObj.isVisible = (configProperty.isVisible !== undefined) ? configProperty.isVisible : true;
-          serviceConfigObj.unit = (configProperty.unit !== undefined) ? configProperty.unit : undefined;
-          serviceConfigObj.description = (configProperty.description !== undefined) ? configProperty.description : undefined;
-
-        }
-        serviceConfigObj.displayType = this.get('configs').someProperty('name', index) ? this.get('configs').findProperty('name', index).displayType : null;
-
-        serviceConfigObj.isRequired = this.get('configs').someProperty('name', index) ? this.get('configs').findProperty('name', index).isRequired : null;
-
-        if (_tag.siteName === 'global') {
-          if (this.get('configs').someProperty('name', index)) {
-            var item = this.get('configs').findProperty('name', index);
-            if (item.displayType === 'int') {
-              if (/\d+m$/.test(properties[index])) {
-
-                serviceConfigObj.value = properties[index].slice(0, properties[index].length - 1);
-                serviceConfigObj.defaultValue = serviceConfigObj.value;
-              }
-            }
-            if (item.displayType === 'checkbox') {
-              switch (properties[index]) {
-                case 'true' :
-                  serviceConfigObj.value = true;
-                  serviceConfigObj.defaultValue = true;
-                  break;
-                case 'false' :
-                  serviceConfigObj.value = false;
-                  serviceConfigObj.defaultValue = false;
-                  break;
-              }
-            }
-          }
-          serviceConfigObj.id = 'puppet var';
-          serviceConfigObj.serviceName = this.get('configs').someProperty('name', index) ? this.get('configs').findProperty('name', index).serviceName : null;
-          serviceConfigObj.displayName = this.get('configs').someProperty('name', index) ? this.get('configs').findProperty('name', index).displayName : null;
-          serviceConfigObj.category = this.get('configs').someProperty('name', index) ? this.get('configs').findProperty('name', index).category : null;
-          serviceConfigObj.options = this.get('configs').someProperty('name', index) ? this.get('configs').findProperty('name', index).options : null;
-          globalConfigs.pushObject(serviceConfigObj);
-        } else if (!this.get('configMapping').someProperty('name', index)) {
-          if (advancedConfig.someProperty('name', index)) {
-            serviceConfigObj.id = 'site property';
-            serviceConfigObj.serviceName = this.get('content.serviceName');
-            serviceConfigObj.category = 'Advanced';
-            serviceConfigObj.displayName = index;
-            serviceConfigObj.displayType = 'advanced';
-            if (advancedConfig.findProperty('name', index).filename) {
-              serviceConfigObj.filename = advancedConfig.findProperty('name', index).filename;
-            }
-            serviceConfigs.pushObject(serviceConfigObj);
-          } else {
-            serviceConfigObj.id = 'conf-site';
-            serviceConfigObj.serviceName = this.get('content.serviceName');
-            this.get('customConfig').pushObject(serviceConfigObj);
-          }
-
-        }
-
-      }
-    }, this);
-    this.set('globalConfigs', globalConfigs);
-    return serviceConfigs;
-  },
-
-  /**
-   * return site config properties
-   * @param sitename
-   * @param tagname
-   * @return {Object}
-   */
-  getSiteConfigProperties: function (sitename, tagname) {
-    var self = this;
-    var properties = {};
-    var url = App.apiPrefix + '/clusters/' + App.router.getClusterName() + '/configurations/?type=' + sitename + '&tag=' + tagname;
-    $.ajax({
-      type: 'GET',
-      url: url,
-      async: false,
-      timeout: 10000,
-      dataType: 'json',
-      success: function (data) {
-        console.log("TRACE: In success function for the GET getSiteConfigProperties call");
-        console.log("TRACE: The url is: " + url);
-        properties = data.items.findProperty('tag', tagname).properties;
-
-        console.log("The value of config properties is: " + properties);
-      },
-
-      error: function (request, ajaxOptions, error) {
-        console.log("TRACE: In error function for the getServciceConfigs call");
-        console.log("TRACE: value of the url is: " + url);
-        console.log("TRACE: error code status is: " + request.status);
-
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-    return properties;
-  },
-
-  /**
-   * Render configs for active services
-   * @param serviceConfigs
-   */
-  renderServiceConfigs: function (serviceConfigs) {
-
-    var serviceConfig = App.ServiceConfig.create({
-      filename: serviceConfigs.filename,
-      serviceName: serviceConfigs.serviceName,
-      displayName: serviceConfigs.displayName,
-      configCategories: serviceConfigs.configCategories,
-      configs: []
-    });
-
-    if ((this.get('content.serviceName') && this.get('content.serviceName').toUpperCase() === serviceConfig.serviceName) || serviceConfig.serviceName === 'MISC') {
-
-      this.loadComponentConfigs(serviceConfigs, serviceConfig);
-
-      console.log('pushing ' + serviceConfig.serviceName);
-      this.get('stepConfigs').pushObject(serviceConfig);
-
-    } else {
-      console.log('skipping ' + serviceConfig.serviceName);
-    }
-
-    this.set('selectedService', this.get('stepConfigs').objectAt(0));
-  },
-
-  /**
-   * Load child components to service config object
-   * @param _componentConfig
-   * @param componentConfig
-   */
-  loadComponentConfigs: function (_componentConfig, componentConfig) {
-    _componentConfig.configs.forEach(function (_serviceConfigProperty) {
-      console.log("config", _serviceConfigProperty);
-      if (!_serviceConfigProperty) return;
-      var serviceConfigProperty = App.ServiceConfigProperty.create(_serviceConfigProperty);
-      if (serviceConfigProperty.get('serviceName') === this.get('content.serviceName')) {
-        // serviceConfigProperty.serviceConfig = componentConfig;
-        if (App.db.getUser().admin) {
-          serviceConfigProperty.set('isEditable', serviceConfigProperty.get('isReconfigurable'));
-        } else {
-          serviceConfigProperty.set('isEditable', false);
-        }
-
-        console.log("config result", serviceConfigProperty);
-      } else {
-        serviceConfigProperty.set('isVisible', false);
-      }
-      componentConfig.configs.pushObject(serviceConfigProperty);
-      serviceConfigProperty.validate();
-    }, this);
-  },
-
-  /**
-   * open popup with appropriate message
-   */
-  restartServicePopup: function (event) {
-    var header;
-    var message;
-    var value;
-    var flag;
-    if ((this.get('content.serviceName') !== 'HDFS' && this.get('content.isStopped') === true) || (this.get('content.serviceName') === 'HDFS') && this.get('content.isStopped') === true && App.Service.find('MAPREDUCE').get('isStopped')) {
-      var result = this.saveServiceConfigProperties();
-      flag = result.flag;
-      if (flag === true) {
-        header = 'Start Service';
-        message = 'Service configuration applied successfully';
-      } else {
-        header = 'Faliure';
-        message = result.message;
-        value = result.value;
-      }
-
-    } else {
-      if (this.get('content.serviceName') !== 'HDFS') {
-        header = 'Stop Service';
-        message = 'Stop the service and wait till it stops completely. Thereafter you can apply configuration changes';
-      } else {
-        header = 'Stop Services';
-        message = 'Stop HDFS and MapReduce. Wait till both of them stops completely. Thereafter you can apply configuration changes';
-      }
-    }
-    App.ModalPopup.show({
-      header: header,
-      primary: 'OK',
-      secondary: null,
-      onPrimary: function () {
-        this.hide();
-      },
-      bodyClass: Ember.View.extend({
-        flag: flag,
-        message: message,
-        siteProperties: value,
-        getDisplayMessage: function () {
-          var displayMsg = [];
-          var siteProperties = this.get('siteProperties');
-          if (siteProperties) {
-            siteProperties.forEach(function (_siteProperty) {
-              var displayProperty = _siteProperty.siteProperty;
-              var displayNames = _siteProperty.displayNames;
-              /////////
-              if (displayNames && displayNames.length) {
-                if (displayNames.length === 1) {
-                  displayMsg.push(displayProperty + ' as ' + displayNames[0]);
-                } else {
-                  var name;
-                  displayNames.forEach(function (_name, index) {
-                    if (index === 0) {
-                      name = _name;
-                    } else if (index === siteProperties.length - 1) {
-                      name = name + ' and ' + _name;
-                    } else {
-                      name = name + ', ' + _name;
-                    }
-                  }, this);
-                  displayMsg.push(displayProperty + ' as ' + name);
-
-                }
-              } else {
-                displayMsg.push(displayProperty);
-              }
-            }, this);
-          }
-          return displayMsg;
-
-        }.property('siteProperties'),
-        template: Ember.Handlebars.compile([
-          '<h5>{{view.message}}</h5>',
-          '{{#unless view.flag}}',
-          '<br/>',
-          '<div class="pre-scrollable" style="max-height: 250px;">',
-          '<ul>',
-          '{{#each val in view.getDisplayMessage}}',
-          '<li>',
-          '{{val}}',
-          '</li>',
-          '{{/each}}',
-          '</ul>',
-          '</div>',
-          '{{/unless}}'
-        ].join('\n'))
-      })
-    });
-  },
-
-  /**
-   * Save config properties
-   */
-  saveServiceConfigProperties: function () {
-    var result = {
-      flag: false,
-      message: null,
-      value: null
-    };
-    var configs = this.get('stepConfigs').findProperty('serviceName', this.get('content.serviceName')).get('configs');
-    this.saveGlobalConfigs(configs);
-    this.saveSiteConfigs(configs);
-    var customConfigResult = this.setCustomConfigs();
-    result.flag = customConfigResult.flag;
-    result.value = customConfigResult.value;
-    /*
-    For now, we are skipping validation checks to see if the user is overriding already-defined paramaters, as
-    the user needs this flexibility.  We may turn this back on as a warning in the future...
-    if (result.flag !== true) {
-      result.message = 'Error in custom configuration. Some properties entered in the box are already exposed on this page';
-      return result;
-    }
-    */
-    result.flag = result.flag && this.createConfigurations();
-    if (result.flag === true) {
-      if (this.get('content.serviceName') !== 'HDFS') {
-        result.flag = this.applyCreatedConfToService(this.get('content.serviceName'));
-      } else {
-        result.flag = this.applyCreatedConfToService(this.get('content.serviceName')) && this.applyCreatedConfToService('MAPREDUCE');
-      }
-    } else {
-      result.message = 'Faliure in applying service configuration';
-    }
-    console.log("The result from applyCreatdConfToService is: " + result);
-    return result;
-  },
-
-  /**
-   * save new or change exist configs in global configs
-   * @param configs
-   */
-  saveGlobalConfigs: function (configs) {
-    var globalConfigs = this.get('globalConfigs');
-    configs.filterProperty('id', 'puppet var').forEach(function (_config) {
-      if (globalConfigs.someProperty('name', _config.name)) {
-        globalConfigs.findProperty('name', _config.name).value = _config.value;
-      } else {
-        globalConfigs.pushObject({
-          name: _config.name,
-          value: _config.value
-        });
-      }
-    }, this);
-
-    this.setHiveHostName(globalConfigs);
-    this.set('globalConfigs', globalConfigs);
-  },
-
-  /**
-   * set hive hostnames in global configs
-   * @param globals
-   */
-  setHiveHostName: function (globals) {
-    if (globals.someProperty('name', 'hive_database')) {
-      //TODO: Hive host depends on the type of db selected. Change puppet variable name if postgres is not the default db
-      var hiveDb = globals.findProperty('name', 'hive_database');
-      if (hiveDb.value === 'New MySQL Database') {
-        if (globals.someProperty('name', 'hive_ambari_host')) {
-          globals.findProperty('name', 'hive_ambari_host').name = 'hive_mysql_hostname';
-        }
-        globals = globals.without(globals.findProperty('name', 'hive_existing_host'));
-        globals = globals.without(globals.findProperty('name', 'hive_existing_database'));
-      } else {
-        globals.findProperty('name', 'hive_existing_host').name = 'hive_mysql_hostname';
-        globals = globals.without(globals.findProperty('name', 'hive_ambari_host'));
-        globals = globals.without(globals.findProperty('name', 'hive_ambari_database'));
-      }
-    }
-  },
-
-  /**
-   * save site configs
-   * @param configs
-   */
-  saveSiteConfigs: function (configs) {
-    var storedConfigs = configs.filterProperty('id', 'site property').filterProperty('value');
-    var uiConfigs = this.loadUiSideConfigs();
-    this.set('uiConfigs', storedConfigs.concat(uiConfigs));
-  },
-
-  /**
-   * return configs from the UI side
-   * @return {Array}
-   */
-  loadUiSideConfigs: function () {
-    var uiConfig = [];
-    var configs = this.get('configMapping').filterProperty('foreignKey', null);
-    configs.forEach(function (_config) {
-      var value = this.getGlobConfigValue(_config.templateName, _config.value, _config.name);
-      if (value !== null) {
-        uiConfig.pushObject({
-          "id": "site property",
-          "name": _config.name,
-          "value": value,
-          "filename": _config.filename
-        });
-      }
-    }, this);
-    var dependentConfig = this.get('configMapping').filterProperty('foreignKey');
-    dependentConfig.forEach(function (_config) {
-      this.setConfigValue(uiConfig, _config);
-      uiConfig.pushObject({
-        "id": "site property",
-        "name": _config._name || _config.name,
-        "value": _config.value,
-        "filename": _config.filename
-      });
-    }, this);
-    return uiConfig;
-  },
-
-  /**
-   * return global config value
-   * @param templateName
-   * @param expression
-   * @param name
-   * @return {*}
-   */
-  getGlobConfigValue: function (templateName, expression, name) {
-    var express = expression.match(/<(.*?)>/g);
-    var value = expression;
-    if (express == null) {
-      return expression;
-    }
-
-    express.forEach(function (_express) {
-      //console.log("The value of template is: " + _express);
-      var index = parseInt(_express.match(/\[([\d]*)(?=\])/)[1]);
-      if (this.get('globalConfigs').someProperty('name', templateName[index])) {
-        //console.log("The name of the variable is: " + this.get('content.serviceConfigProperties').findProperty('name', templateName[index]).name);
-        var globValue = this.get('globalConfigs').findProperty('name', templateName[index]).value;
-        // Hack for templeton.zookeeper.hosts
-        if (value !== null) {   // if the property depends on more than one template name like <templateName[0]>/<templateName[1]> then don't proceed to the next if the prior is null or not found in the global configs
-          if (name === "templeton.zookeeper.hosts" || name === 'hbase.zookeeper.quorum') {
-            var zooKeeperPort = '2181';
-            if (typeof globValue === 'string') {
-              var temp = [];
-              temp.push(globValue);
-              globValue = temp;
-            }
-            if (name === "templeton.zookeeper.hosts") {
-              var temp = [];
-              globValue.forEach(function (_host, index) {
-                temp.push(globValue[index] + ':' + zooKeeperPort);
-              }, this);
-              globValue = temp;
-            }
-            value = value.replace(_express, globValue.toString());
-          } else {
-            value = value.replace(_express, globValue);
-          }
-        }
-      } else {
-        /*
-         console.log("ERROR: The variable name is: " + templateName[index]);
-         console.log("ERROR: mapped config from configMapping file has no corresponding variable in " +
-         "content.serviceConfigProperties. Two possible reasons for the error could be: 1) The service is not selected. " +
-         "and/OR 2) The service_config metadata file has no corresponding global var for the site property variable");
-         */
-        value = null;
-      }
-    }, this);
-    return value;
-  },
-  /**
-   * Set all site property that are derived from other site-properties
-   */
-  setConfigValue: function (uiConfig, config) {
-    if (config.value == null) {
-      return;
-    }
-    var fkValue = config.value.match(/<(foreignKey.*?)>/g);
-    if (fkValue) {
-      fkValue.forEach(function (_fkValue) {
-        var index = parseInt(_fkValue.match(/\[([\d]*)(?=\])/)[1]);
-        if (uiConfig.someProperty('name', config.foreignKey[index])) {
-          var globalValue = uiConfig.findProperty('name', config.foreignKey[index]).value;
-          config.value = config.value.replace(_fkValue, globalValue);
-        } else if (this.get('globalConfigs').someProperty('name', config.foreignKey[index])) {
-          var globalValue;
-          if (this.get('globalConfigs').findProperty('name', config.foreignKey[index]).value === '') {
-            globalValue = this.get('globalConfigs').findProperty('name', config.foreignKey[index]).defaultValue;
-          } else {
-            globalValue = this.get('globalConfigs').findProperty('name', config.foreignKey[index]).value;
-          }
-          config.value = config.value.replace(_fkValue, globalValue);
-        }
-      }, this);
-    }
-    if (fkValue = config.name.match(/<(foreignKey.*?)>/g)) {
-      fkValue.forEach(function (_fkValue) {
-        var index = parseInt(_fkValue.match(/\[([\d]*)(?=\])/)[1]);
-        if (uiConfig.someProperty('name', config.foreignKey[index])) {
-          var globalValue = uiConfig.findProperty('name', config.foreignKey[index]).value;
-          config._name = config.name.replace(_fkValue, globalValue);
-        } else if (this.get('globalConfigs').someProperty('name', config.foreignKey[index])) {
-          var globalValue;
-          if (this.get('globalConfigs').findProperty('name', config.foreignKey[index]).value === '') {
-            globalValue = this.get('globalConfigs').findProperty('name', config.foreignKey[index]).defaultValue;
-          } else {
-            globalValue = this.get('globalConfigs').findProperty('name', config.foreignKey[index]).value;
-          }
-          config._name = config.name.replace(_fkValue, globalValue);
-        }
-      }, this);
-    }
-    //For properties in the configMapping file having foreignKey and templateName properties.
-    var templateValue = config.value.match(/<(templateName.*?)>/g);
-    if (templateValue) {
-      templateValue.forEach(function (_value) {
-        var index = parseInt(_value.match(/\[([\d]*)(?=\])/)[1]);
-        if (this.get('globalConfigs').someProperty('name', config.templateName[index])) {
-          var globalValue = this.get('globalConfigs').findProperty('name', config.templateName[index]).value;
-          config.value = config.value.replace(_value, globalValue);
-        } else {
-          config.value = null;
-        }
-      }, this);
-    }
-  },
-
-  /**
-   * try to create configuration and return true for success or false for failure
-   * @return {Boolean}
-   */
-  createConfigurations: function () {
-    var result = true;
-    var serviceConfigTags = this.get('serviceConfigTags');
-    this.setNewTagNames(serviceConfigTags);
-    serviceConfigTags.forEach(function (_serviceTags) {
-      if (_serviceTags.siteName === 'global') {
-        console.log("TRACE: Inside global");
-        result = result && this.createConfigSite(this.createGlobalSiteObj(_serviceTags.newTagName));
-      } else if (_serviceTags.siteName === 'core-site') {
-        console.log("TRACE: Inside core-site");
-        if (this.get('content.serviceName') === 'HDFS') {
-          result = result && this.createConfigSite(this.createCoreSiteObj(_serviceTags.newTagName));
-        }
-      } else {
-        result = result && this.createConfigSite(this.createSiteObj(_serviceTags.siteName, _serviceTags.newTagName));
-      }
-    }, this);
-    return result;
-  },
-
-  /**
-   * add newTagName property to each config in serviceConfigs
-   * @param serviceConfigs
-   */
-  setNewTagNames: function (serviceConfigs) {
-    var time = (new Date).getTime();
-    serviceConfigs.forEach(function (_serviceConfigs) {
-      _serviceConfigs.newTagName = 'version' + time;
-    }, this);
-  },
-
-  /**
-   * send request to the server to create configs and return true for success and false for failure
-   * @param data
-   * @return {*}
-   */
-  createConfigSite: function (data) {
-    var result;
-    var realData = data;
-    console.log("Inside createConfigSite");
-    var clusterName = App.router.getClusterName();
-    var url = App.apiPrefix + '/clusters/' + clusterName + '/configurations';
-    $.ajax({
-      type: 'POST',
-      url: url,
-      data: JSON.stringify(data),
-      async: false,
-      dataType: 'text',
-      timeout: 5000,
-      success: function (data) {
-        var jsonData = jQuery.parseJSON(data);
-        result = true;
-        console.log("TRACE: In success function for the createConfigSite");
-        console.log("TRACE: value of the url is: " + url);
-        console.log("TRACE: value of the received data is: " + jsonData);
-      },
-
-      error: function (request, ajaxOptions, error) {
-        result = false;
-        console.log('TRACE: In Error ');
-        console.log("The original data was: " + JSON.stringify(realData));
-        console.log('TRACE: Error message is: ' + request.responseText);
-        console.log("TRACE: value of the url is: " + url);
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-    console.log("Exiting createConfigSite");
-    console.log("Value of result is: " + result);
-    return result;
-  },
-
-  /**
-   * create global site object
-   * @param tagName
-   * @return {Object}
-   */
-  createGlobalSiteObj: function (tagName) {
-    var globalSiteProperties = {};
-    this.get('globalConfigs').forEach(function (_globalSiteObj) {
-      // do not pass any globalConfigs whose name ends with _host or _hosts
-      if (!/_hosts?$/.test(_globalSiteObj.name)) {
-        // append "m" to JVM memory options except for hadoop_heapsize
-        if (/_heapsize|_newsize|_maxnewsize$/.test(_globalSiteObj.name) && _globalSiteObj.name !== 'hadoop_heapsize') {
-          _globalSiteObj.value += "m";
-        }
-        globalSiteProperties[_globalSiteObj.name] = _globalSiteObj.value;
-        //console.log("TRACE: name of the global property is: " + _globalSiteObj.name);
-        //console.log("TRACE: value of the global property is: " + _globalSiteObj.value);
-      }
-    }, this);
-    return {"type": "global", "tag": tagName, "properties": globalSiteProperties};
-  },
-
-  /**
-   * create core site object
-   * @param tagName
-   * @return {Object}
-   */
-  createCoreSiteObj: function (tagName) {
-    var coreSiteObj = this.get('uiConfigs').filterProperty('filename', 'core-site.xml');
-    var coreSiteProperties = {};
-    // hadoop.proxyuser.oozie.hosts needs to be skipped if oozie is not selected
-    var isOozieSelected = App.Service.find().someProperty('serviceName', 'OOZIE');
-    var oozieUser = this.get('globalConfigs').someProperty('name', 'oozie_user') ? this.get('globalConfigs').findProperty('name', 'oozie_user').value : null;
-    var isHiveSelected = App.Service.find().someProperty('serviceName', 'HIVE');
-    var hiveUser = this.get('globalConfigs').someProperty('name', 'hive_user') ? this.get('globalConfigs').findProperty('name', 'hive_user').value : null;
-    var isHcatSelected = App.Service.find().someProperty('serviceName', 'WEBHCAT');
-    var hcatUser = this.get('globalConfigs').someProperty('name', 'hcat_user') ? this.get('globalConfigs').findProperty('name', 'hcat_user').value : null;
-    coreSiteObj.forEach(function (_coreSiteObj) {
-      if ((isOozieSelected || (_coreSiteObj.name != 'hadoop.proxyuser.' + oozieUser + '.hosts' && _coreSiteObj.name != 'hadoop.proxyuser.' + oozieUser + '.groups')) && (isHiveSelected || (_coreSiteObj.name != 'hadoop.proxyuser.' + hiveUser + '.hosts' && _coreSiteObj.name != 'hadoop.proxyuser.' + hiveUser + '.groups')) && (isHcatSelected || (_coreSiteObj.name != 'hadoop.proxyuser.' + hcatUser + '.hosts' && _coreSiteObj.name != 'hadoop.proxyuser.' + hcatUser + '.groups'))) {
-        coreSiteProperties[_coreSiteObj.name] = _coreSiteObj.value;
-      }
-      //console.log("TRACE: name of the property is: " + _coreSiteObj.name);
-      //console.log("TRACE: value of the property is: " + _coreSiteObj.value);
-    }, this);
-    return {"type": "core-site", "tag": tagName, "properties": coreSiteProperties};
-  },
-
-  /**
-   * create site object
-   * @param siteName
-   * @param tagName
-   * @return {Object}
-   */
-  createSiteObj: function (siteName, tagName) {
-    var siteObj = this.get('uiConfigs').filterProperty('filename', siteName + '.xml');
-    var siteProperties = {};
-    siteObj.forEach(function (_siteObj) {
-      siteProperties[_siteObj.name] = _siteObj.value;
-    }, this);
-    return {"type": siteName, "tag": tagName, "properties": siteProperties};
-  },
-
-  /**
-   * apply created configs to service and return true for success and false for failure
-   * @param serviceName
-   * @return {*}
-   */
-  applyCreatedConfToService: function (serviceName) {
-    var result;
-    var clusterName = App.router.getClusterName();
-    var url = App.apiPrefix + '/clusters/' + clusterName + '/services/' + serviceName;
-    var data = this.getConfigForService(serviceName);
-    var realData = data;
-    $.ajax({
-      type: 'PUT',
-      url: url,
-      async: false,
-      dataType: 'text',
-      data: JSON.stringify(data),
-      timeout: 5000,
-      success: function (data) {
-        var jsonData = jQuery.parseJSON(data);
-        console.log("TRACE: In success function for the applyCreatedConfToService call");
-        console.log("TRACE: value of the url is: " + url);
-        result = true;
-      },
-
-      error: function (request, ajaxOptions, error) {
-        console.log('Error: In Error of apply');
-        console.log("The original data was: " + JSON.stringify(realData));
-        console.log('Error: Error message is: ' + request.responseText);
-        result = false;
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-    console.log("Exiting applyCreatedConfToService");
-    console.log("Value of result is: " + result);
-    return result;
-  },
-
-  /**
-   * return config for service
-   * @param serviceName
-   * @return {Object}
-   */
-  getConfigForService: function (serviceName) {
-    var data = {config: {}};
-    this.get('serviceConfigTags').forEach(function (_serviceTag) {
-      if (_serviceTag.siteName === 'core-site') {
-        if (this.get('content.serviceName') === 'HDFS') {
-          data.config[_serviceTag.siteName] = _serviceTag.newTagName;
-        } else {
-          data.config[_serviceTag.siteName] = _serviceTag.tagName;
-        }
-      } else if (this.get('content.serviceName') === serviceName) {
-        data.config[_serviceTag.siteName] = _serviceTag.newTagName;
-      }
-    }, this);
-    return data;
-  },
-
-  /**
-   * return custom comfig
-   * @return {Object}
-   */
-  setCustomConfigs: function () {
-    var site = this.get('stepConfigs').findProperty('serviceName', this.get('content.serviceName')).get('configs').filterProperty('id', 'conf-site');
-    var siteProperties = [];
-    var flag = true;
-    site.forEach(function (_site) {
-      var keyValue = _site.value.split(/\n+/);
-      if (keyValue) {
-        keyValue.forEach(function (_keyValue) {
-          console.log("The value of the keyValue is: " + _keyValue.trim());
-          _keyValue = _keyValue.trim();
-
-          // split on the first = encountered (the value may contain ='s)
-          var matches = _keyValue.match(/^([^=]+)=(.*)$/);
-          if (matches) {
-            var key = matches[1];
-            var value = matches[2];
-            // Check that entered config is allowed to reconfigure
-            if (this.get('uiConfigs').filterProperty('filename', _site.name + '.xml').someProperty('name', key)) {
-              var property = {
-                siteProperty: null,
-                displayNames: []
-              };
-              if (_site.name !== 'core-site') {
-                property.siteProperty = key;
-
-                if (this.get('configMapping').someProperty('name', key)) {
-                  this.setPropertyDisplayNames(property.displayNames, this.get('configMapping').findProperty('name', key).templateName);
-                }
-                siteProperties.push(property);
-                flag = false;
-              } else {
-                this.setSiteProperty(key, value, _site.name + '.xml');
-              }
-            } else if (flag) {
-              this.setSiteProperty(key, value, _site.name + '.xml');
-            }
-          }
-        }, this);
-      }
-    }, this);
-
-    var result = {
-      flag: flag,
-      value: siteProperties
-    };
-    return result;
-  },
-
-  /**
-   * Set display names of the property tfrom he puppet/global names
-   * @param: displayNames: a field to be set with displayNames
-   * @param names: array of property puppet/global names
-   */
-  setPropertyDisplayNames: function (displayNames, names) {
-    var stepConfigs = this.get('stepConfigs').findProperty('serviceName', this.get('content.serviceName')).configs;
-    names.forEach(function (_name, index) {
-      if (stepConfigs.someProperty('name', _name)) {
-        displayNames.push(stepConfigs.findProperty('name', _name).displayName);
-      }
-    }, this);
-  },
-
-  /**
-   * Set property of the site variable
-   */
-  setSiteProperty: function (key, value, filename) {
-    if (filename === 'core-site.xml' && this.get('uiConfigs').filterProperty('filename', 'core-site.xml').someProperty('name', key)) {
-      this.get('uiConfigs').filterProperty('filename', 'core-site.xml').findProperty('name', key).value = value;
-      return;
-    }
-    this.get('uiConfigs').pushObject({
-      "id": "site property",
-      "name": key,
-      "value": value,
-      "filename": filename
-    });
-  },
-
-  /**
-   * return either specific url for request if testMode is false or testUrl
-   * @param testUrl
-   * @param url
-   * @return {*}
-   */
-  getUrl: function (testUrl, url) {
-    return (App.testMode) ? testUrl : App.apiPrefix + '/clusters/' + App.router.getClusterName() + url;
-  },
-
-  /**
-   * Adds host name of master component to global config;
-   */
-  addHostNamesToGlobalConfig: function () {
-    var serviceName = this.get('content.serviceName');
-    var globalConfigs = this.get('globalConfigs');
-    var serviceConfigs = this.get('serviceConfigs').findProperty('serviceName', serviceName).configs;
-    //namenode_host is required to derive "fs.default.name" a property of core-site
-    var nameNodeHost = this.get('serviceConfigs').findProperty('serviceName', 'HDFS').configs.findProperty('name', 'namenode_host');
-    nameNodeHost.defaultValue = App.Service.find('HDFS').get('hostComponents').findProperty('componentName', 'NAMENODE').get('host.hostName');
-    globalConfigs.push(nameNodeHost);
-
-    //zooKeeperserver_host
-    var zooKeperHost = this.get('serviceConfigs').findProperty('serviceName', 'ZOOKEEPER').configs.findProperty('name', 'zookeeperserver_hosts');
-    if (serviceName === 'ZOOKEEPER' || serviceName === 'HBASE' || serviceName === 'WEBHCAT') {
-      zooKeperHost.defaultValue = App.Service.find('ZOOKEEPER').get('hostComponents').filterProperty('componentName', 'ZOOKEEPER_SERVER').mapProperty('host.hostName');
-      globalConfigs.push(zooKeperHost);
-    }
-
-    switch (serviceName) {
-      case 'HDFS':
-        var sNameNodeHost = serviceConfigs.findProperty('name', 'snamenode_host');
-        sNameNodeHost.defaultValue = this.get('content.hostComponents').findProperty('componentName', 'SECONDARY_NAMENODE').get('host.hostName');
-        globalConfigs.push(sNameNodeHost);
-        break;
-      case 'MAPREDUCE':
-        var jobTrackerHost = serviceConfigs.findProperty('name', 'jobtracker_host');
-        jobTrackerHost.defaultValue = this.get('content.hostComponents').findProperty('componentName', 'JOBTRACKER').get('host.hostName');
-        globalConfigs.push(jobTrackerHost);
-        break;
-      case 'HIVE':
-        var hiveMetastoreHost = serviceConfigs.findProperty('name', 'hivemetastore_host');
-        hiveMetastoreHost.defaultValue = this.get('content.hostComponents').findProperty('componentName', 'HIVE_SERVER').get('host.hostName');
-        globalConfigs.push(hiveMetastoreHost);
-        break;
-      case 'OOZIE':
-        var oozieServerHost = serviceConfigs.findProperty('name', 'oozieserver_host');
-        oozieServerHost.defaultValue = this.get('content.hostComponents').findProperty('componentName', 'OOZIE_SERVER').get('host.hostName');
-        globalConfigs.push(oozieServerHost);
-        break;
-      case 'HBASE':
-        var hbaseMasterHost = serviceConfigs.findProperty('name', 'hbasemaster_host');
-        hbaseMasterHost.defaultValue = this.get('content.hostComponents').findProperty('componentName', 'HBASE_MASTER').get('host.hostName');
-        globalConfigs.push(hbaseMasterHost);
-        break;
-    }
-  }
-
-})
-;
-
-
-App.MainServiceSlaveComponentGroupsController = App.SlaveComponentGroupsController.extend({
-  name: 'mainServiceSlaveComponentGroupsController',
-  contentBinding: 'App.router.mainServiceInfoConfigsController.slaveComponentGroups',
-  stepConfigsBinding: 'App.router.mainServiceInfoConfigsController.stepConfigs',
-  serviceBinding: 'App.router.mainServiceInfoConfigsController.selectedService'
-
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/service/info/summary.js b/branch-1.2/ambari-web/app/controllers/main/service/info/summary.js
deleted file mode 100644
index e5f07a2..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/service/info/summary.js
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-App.MainServiceInfoSummaryController = Em.Controller.extend({
-  name: 'mainServiceInfoSummaryController',
-  allAlerts: function(){
-    return App.router.get('clusterController.alerts');
-  }.property('App.router.clusterController.alerts'),
-
-  alerts: function () {
-    var serviceId = this.get('content.serviceName');
-    if (serviceId) {
-      return this.get('allAlerts').filter(function (item) {
-        return item.get('serviceType').toLowerCase() == serviceId.toLowerCase() && !item.get('ignoredForServices');
-      });
-    }
-    return [];
-  }.property('allAlerts', 'content.serviceName'),
-  
-  nagiosUrl: function(){
-    return App.router.get('clusterController.nagiosUrl');
-  }.property('App.router.clusterController.nagiosUrl'),
-  
-  isNagiosInstalled: function(){
-    return App.router.get('clusterController.isNagiosInstalled');
-  }.property('App.router.clusterController.isNagiosInstalled')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/main/service/item.js b/branch-1.2/ambari-web/app/controllers/main/service/item.js
deleted file mode 100644
index e1759f5..0000000
--- a/branch-1.2/ambari-web/app/controllers/main/service/item.js
+++ /dev/null
@@ -1,276 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainServiceItemController = Em.Controller.extend({
-  name: 'mainServiceItemController',
-
-  /**
-   * Send specific command to server
-   * @param url
-   * @param data Object to send
-   */
-  sendCommandToServer : function(url, method, postData, callback){
-    var url =  (App.testMode) ?
-      '/data/wizard/deploy/poll_1.json' : //content is the same as ours
-      App.apiPrefix + '/clusters/' + App.router.getClusterName() + url;
-
-    method = App.testMode ? 'GET' : method;
-
-    $.ajax({
-      type: method,
-      url: url,
-      data: (postData != null) ? JSON.stringify(postData) : null,
-      dataType: 'json',
-      timeout: App.timeout,
-      success: function(data){
-        if(data && data.Requests){
-          callback(data.Requests.id);
-        } else{
-          callback(null);
-          console.log('cannot get request id from ', data);
-        }
-      },
-
-      error: function (request, ajaxOptions, error) {
-        //do something
-        callback(null);
-        console.log('error on change component host status')
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-  },
-
-  /**
-   * On click callback for <code>start service</code> button
-   * @param event
-   */
-  startService: function (event) {
-    if ($(event.target).hasClass('disabled') || $(event.target.parentElement).hasClass('disabled')) {
-      return;
-    }
-
-    var self = this;
-    App.ModalPopup.show({
-      header: Em.I18n.t('services.service.confirmation.header'),
-      body: Em.I18n.t('services.service.confirmation.body'),
-      primary: 'Yes',
-      secondary: 'No',
-      onPrimary: function () {
-        self.sendCommandToServer('/services/' + self.get('content.serviceName').toUpperCase(), "PUT", {
-          ServiceInfo: {
-            state: 'STARTED'
-          }
-        }, function (requestId) {
-
-          if (!requestId) {
-            return;
-          }
-          console.log('Send request for STARTING successfully');
-
-          if (App.testMode) {
-            self.set('content.workStatus', App.Service.Health.starting);
-            self.get('content.hostComponents').setEach('workStatus', App.HostComponentStatus.starting);
-            setTimeout(function () {
-              self.set('content.workStatus', App.Service.Health.live);
-              self.get('content.hostComponents').setEach('workStatus', App.HostComponentStatus.started);
-            }, 10000);
-          } else {
-            App.router.get('clusterController').loadUpdatedStatusDelayed(500);
-            App.router.get('backgroundOperationsController.eventsArray').push({
-              "when": function (controller) {
-                var result = (controller.getOperationsForRequestId(requestId).length == 0);
-                console.log('startService.when = ', result)
-                return result;
-              },
-              "do": function () {
-                App.router.get('clusterController').loadUpdatedStatus();
-              }
-            });
-          }
-          App.router.get('backgroundOperationsController').showPopup();
-        });
-        self.set('content.isStopDisabled',true);
-        self.set('content.isStartDisabled',true);
-        this.hide();
-      },
-      onSecondary: function() {
-        this.hide();
-      }
-    });
-  },
-
-  /**
-   * On click callback for <code>stop service</code> button
-   * @param event
-   */
-  stopService: function (event) {
-    if ($(event.target).hasClass('disabled') || $(event.target.parentElement).hasClass('disabled')) {
-      return;
-    }
-
-    var self = this;
-    App.ModalPopup.show({
-      header: Em.I18n.t('services.service.confirmation.header'),
-      body: Em.I18n.t('services.service.confirmation.body'),
-      primary: 'Yes',
-      secondary: 'No',
-      onPrimary: function() {
-        self.sendCommandToServer('/services/' + self.get('content.serviceName').toUpperCase(), "PUT",{
-          ServiceInfo:{
-            state: 'INSTALLED'
-          }
-        }, function (requestId) {
-          if (!requestId) {
-            return
-          }
-          console.log('Send request for STOPPING successfully');
-          if (App.testMode) {
-            self.set('content.workStatus', App.Service.Health.stopping);
-            self.get('content.hostComponents').setEach('workStatus', App.HostComponentStatus.stopping);
-            setTimeout(function () {
-              self.set('content.workStatus', App.Service.Health.dead);
-              self.get('content.hostComponents').setEach('workStatus', App.HostComponentStatus.stopped);
-            }, 10000);
-          } else {
-            App.router.get('clusterController').loadUpdatedStatusDelayed(500);
-            App.router.get('backgroundOperationsController.eventsArray').push({
-              "when": function (controller) {
-                var result = (controller.getOperationsForRequestId(requestId).length == 0);
-                console.log('stopService.when = ', result)
-                return result;
-              },
-              "do": function () {
-                App.router.get('clusterController').loadUpdatedStatus();
-              }
-            });
-          }
-          App.router.get('backgroundOperationsController').showPopup();
-        });
-        self.set('content.isStopDisabled',true);
-        self.set('content.isStartDisabled',true);
-        this.hide();
-      },
-      onSecondary: function () {
-        this.hide();
-      }
-    });
-  },
-
-  /**
-   * On click callback for <code>run rebalancer</code> button
-   * @param event
-   */
-  runRebalancer: function (event) {
-    var self = this;
-    App.ModalPopup.show({
-      header: Em.I18n.t('services.service.confirmation.header'),
-      body: Em.I18n.t('services.service.confirmation.body'),
-      primary: 'Yes',
-      secondary: 'No',
-      onPrimary: function() {
-        self.content.set('runRebalancer', true);
-        App.router.get('backgroundOperationsController').showPopup();
-        this.hide();
-      },
-      onSecondary: function() {
-        this.hide();
-      }
-    });
-  },
-
-  /**
-   * On click callback for <code>run compaction</code> button
-   * @param event
-   */
-  runCompaction: function (event) {
-    var self = this;
-    App.ModalPopup.show({
-      header: Em.I18n.t('services.service.confirmation.header'),
-      body: Em.I18n.t('services.service.confirmation.body'),
-      primary: 'Yes',
-      secondary: 'No',
-      onPrimary: function() {
-        self.content.set('runCompaction', true);
-        App.router.get('backgroundOperationsController').showPopup();
-        this.hide();
-      },
-      onSecondary: function() {
-        this.hide();
-      }
-    });
-  },
-
-  /**
-   * On click callback for <code>run smoke test</code> button
-   * @param event
-   */
-  runSmokeTest: function (event) {
-    var self = this;
-    App.ModalPopup.show({
-      header: Em.I18n.t('services.service.confirmation.header'),
-      body: Em.I18n.t('services.service.confirmation.body'),
-      primary: 'Yes',
-      secondary: 'No',
-      onPrimary: function() {
-
-        var serviceName = self.get('content.serviceName').toUpperCase();
-        var smokeName = serviceName + "_SERVICE_CHECK";
-        self.sendCommandToServer('/services/' + serviceName + '/actions/' + smokeName, "POST",
-            null,
-            function (requestId) {
-
-              if (!requestId) {
-                return;
-              }
-              self.content.set('runSmokeTest', true);
-              App.router.get('backgroundOperationsController').showPopup();
-            }
-        );
-        this.hide();
-      },
-      onSecondary: function () {
-        this.hide();
-      }
-    });
-  },
-
-  /**
-   * On click callback for <code>action</code> dropdown menu
-   * @param event
-   */
-  doAction: function (event) {
-    if ($(event.target).hasClass('disabled') || $(event.target.parentElement).hasClass('disabled')) {
-      return;
-    }
-    var methodName = event.context;
-    switch (methodName) {
-      case 'runRebalancer':
-        this.runRebalancer();
-        break;
-      case 'runCompaction':
-        this.runCompaction();
-        break;
-      case 'runSmokeTest':
-        this.runSmokeTest();
-        break;
-    }
-  }
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/wizard.js b/branch-1.2/ambari-web/app/controllers/wizard.js
deleted file mode 100644
index b8177f1..0000000
--- a/branch-1.2/ambari-web/app/controllers/wizard.js
+++ /dev/null
@@ -1,556 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.WizardController = Em.Controller.extend({
-
-  isStepDisabled: null,
-
-  init: function () {
-    this.set('isStepDisabled', []);
-    this.clusters = App.Cluster.find();
-    this.isStepDisabled.pushObject(Ember.Object.create({
-      step: 1,
-      value: false
-    }));
-    for (var i = 2; i <= this.get('totalSteps'); i++) {
-      this.isStepDisabled.pushObject(Ember.Object.create({
-        step: i,
-        value: true
-      }));
-    }
-    // window.onbeforeunload = function () {
-    // return "You have not saved your document yet.  If you continue, your work will not be saved."
-    //}
-  },
-
-  setStepsEnable: function () {
-    for (var i = 1; i <= this.totalSteps; i++) {
-      var step = this.get('isStepDisabled').findProperty('step', i);
-      if (i <= this.get('currentStep')) {
-        step.set('value', false);
-      } else {
-        step.set('value', true);
-      }
-    }
-  }.observes('currentStep'),
-
-  setLowerStepsDisable: function (stepNo) {
-    for (var i = 1; i < stepNo; i++) {
-      var step = this.get('isStepDisabled').findProperty('step', i);
-      step.set('value', true);
-    }
-  },
-
-  prevInstallStatus: function () {
-    console.log('Inside the prevInstallStep function: The name is ' + App.router.get('loginController.loginName'));
-    var result = App.db.isCompleted()
-    if (result == '1') {
-      return true;
-    }
-  }.property('App.router.loginController.loginName'),
-
-  /**
-   * Set current step to new value.
-   * Method moved from App.router.setInstallerCurrentStep
-   * @param currentStep
-   * @param completed
-   */
-  currentStep: function () {
-    return App.get('router').getWizardCurrentStep(this.get('name').substr(0, this.get('name').length - 10));
-  }.property(),
-
-  /**
-   * Set current step to new value.
-   * Method moved from App.router.setInstallerCurrentStep
-   * @param currentStep
-   * @param completed
-   */
-  setCurrentStep: function (currentStep, completed) {
-    App.db.setWizardCurrentStep(this.get('name').substr(0, this.get('name').length - 10), currentStep, completed);
-    this.set('currentStep', currentStep);
-  },
-
-  clusters: null,
-
-  isStep1: function () {
-    return this.get('currentStep') == 1;
-  }.property('currentStep'),
-
-  isStep2: function () {
-    return this.get('currentStep') == 2;
-  }.property('currentStep'),
-
-  isStep3: function () {
-    return this.get('currentStep') == 3;
-  }.property('currentStep'),
-
-  isStep4: function () {
-    return this.get('currentStep') == 4;
-  }.property('currentStep'),
-
-  isStep5: function () {
-    return this.get('currentStep') == 5;
-  }.property('currentStep'),
-
-  isStep6: function () {
-    return this.get('currentStep') == 6;
-  }.property('currentStep'),
-
-  isStep7: function () {
-    return this.get('currentStep') == 7;
-  }.property('currentStep'),
-
-  isStep8: function () {
-    return this.get('currentStep') == 8;
-  }.property('currentStep'),
-
-  isStep9: function () {
-    return this.get('currentStep') == 9;
-  }.property('currentStep'),
-
-  isStep10: function () {
-    return this.get('currentStep') == 10;
-  }.property('currentStep'),
-
-  gotoStep: function (step) {
-    if (this.get('isStepDisabled').findProperty('step', step).get('value') !== false) {
-      return;
-    }
-    // if going back from Step 9 in Install Wizard, delete the checkpoint so that the user is not redirected
-    // to Step 9
-    if (this.get('content.controllerName') == 'installerController' && this.get('currentStep') === '9' && step < 9) {
-      App.clusterStatus.setClusterStatus({
-        clusterName: this.get('clusterName'),
-        clusterState: 'CLUSTER_NOT_CREATED_1',
-        wizardControllerName: 'installerController',
-        localdb: App.db.data
-      });
-    }
-    if ((this.get('currentStep') - step) > 1) {
-      App.ModalPopup.show({
-        header: Em.I18n.t('installer.navigation.warning.header'),
-        onPrimary: function () {
-          App.router.send('gotoStep' + step);
-          this.hide();
-        },
-        body: "If you proceed to go back to Step " + step + ", you will lose any changes you have made beyond this step"
-      });
-    } else {
-      App.router.send('gotoStep' + step);
-    }
-  },
-
-  gotoStep1: function () {
-    this.gotoStep(1);
-  },
-
-  gotoStep2: function () {
-    this.gotoStep(2);
-  },
-
-  gotoStep3: function () {
-    this.gotoStep(3);
-  },
-
-  gotoStep4: function () {
-    this.gotoStep(4);
-  },
-
-  gotoStep5: function () {
-    this.gotoStep(5);
-  },
-
-  gotoStep6: function () {
-    this.gotoStep(6);
-  },
-
-  gotoStep7: function () {
-    this.gotoStep(7);
-  },
-
-  gotoStep8: function () {
-    this.gotoStep(8);
-  },
-
-  gotoStep9: function () {
-    this.gotoStep(9);
-  },
-
-  gotoStep10: function () {
-    this.gotoStep(10);
-  },
-
-  /**
-   * Temporary function for wizardStep9, before back-end integration
-   */
-  setInfoForStep9: function () {
-
-    var hostInfo = App.db.getHosts();
-    for (var index in hostInfo) {
-      hostInfo[index].status = "pending";
-      hostInfo[index].message = 'Waiting';
-      hostInfo[index].logTasks = [];
-      hostInfo[index].tasks = [];
-      hostInfo[index].progress = '0';
-    }
-    App.db.setHosts(hostInfo);
-  },
-
-  /**
-   * Remove all data for installOptions step
-   */
-  clearInstallOptions: function () {
-    var installOptions = jQuery.extend({}, this.get('installOptionsTemplate'));
-    this.set('content.installOptions', installOptions);
-    this.save('installOptions');
-    this.set('content.hosts', []);
-    this.save('hosts');
-  },
-
-  toObject: function(object){
-    var result = {};
-    for(var i in object){
-      if(object.hasOwnProperty(i)){
-        result[i] = object[i];
-      }
-    }
-    return result;
-  },
-
-  /**
-   * save status of the cluster. This is called from step8 and step9 to persist install and start requestId
-   * @param clusterStatus object with status, isCompleted, requestId, isInstallError and isStartError field.
-   */
-  saveClusterStatus: function (clusterStatus) {
-    var oldStatus = this.toObject(this.get('content.cluster'));
-    clusterStatus = jQuery.extend(oldStatus, clusterStatus);
-    if (clusterStatus.requestId &&
-      clusterStatus.oldRequestsId.indexOf(clusterStatus.requestId) === -1){
-      clusterStatus.oldRequestsId.push(clusterStatus.requestId);
-    }
-    this.set('content.cluster', clusterStatus);
-    this.save('cluster');
-  },
-
-  /**
-   * Invoke installation of selected services to the server and saves the request id returned by the server.
-   * @param isRetry
-   */
-  installServices: function (isRetry) {
-
-    // clear requests since we are installing services
-    // and we don't want to get tasks for previous install attempts
-    this.set('content.cluster.oldRequestsId', []);
-    this.set('content.cluster.requestId', null);
-
-    var self = this;
-    var clusterName = this.get('content.cluster.name');
-    var url;
-    var method = (App.testMode) ? 'GET' : 'PUT';
-    var data;
-
-    switch (this.get('content.controllerName')) {
-      case 'addHostController':
-        if (isRetry) {
-          url = App.apiPrefix + '/clusters/' + clusterName + '/host_components?HostRoles/state=INSTALLED';
-          data = '{"HostRoles": {"state": "INSTALLED"}}';
-        } else {
-          url = App.apiPrefix + '/clusters/' + clusterName + '/host_components?HostRoles/state=INIT';
-          data = '{"HostRoles": {"state": "INSTALLED"}}';
-        }
-        break;
-      case 'installerController':
-      default:
-        if (isRetry) {
-          url = (App.testMode) ? '/data/wizard/deploy/2_hosts/poll_1.json' : App.apiPrefix + '/clusters/' + clusterName + '/host_components?HostRoles/state!=INSTALLED';
-          data = '{"HostRoles": {"state": "INSTALLED"}}';
-        } else {
-          url = (App.testMode) ? '/data/wizard/deploy/2_hosts/poll_1.json' : App.apiPrefix + '/clusters/' + clusterName + '/services?ServiceInfo/state=INIT';
-          data = '{"ServiceInfo": {"state": "INSTALLED"}}';
-        }
-        break;
-    }
-
-    $.ajax({
-      type: method,
-      url: url,
-      data: data,
-      async: false,
-      dataType: 'text',
-      timeout: App.timeout,
-      success: function (data) {
-        var jsonData = jQuery.parseJSON(data);
-        var installStartTime = new Date().getTime();
-        console.log("TRACE: In success function for the installService call");
-        console.log("TRACE: value of the url is: " + url);
-        if (jsonData) {
-          var requestId = jsonData.Requests.id;
-          console.log('requestId is: ' + requestId);
-          var clusterStatus = {
-            status: 'PENDING',
-            requestId: requestId,
-            isInstallError: false,
-            isCompleted: false,
-            installStartTime: installStartTime
-          };
-          self.saveClusterStatus(clusterStatus);
-        } else {
-          console.log('ERROR: Error occurred in parsing JSON data');
-        }
-      },
-
-      error: function (request, ajaxOptions, error) {
-        console.log("TRACE: In error function for the installService call");
-        console.log("TRACE: value of the url is: " + url);
-        console.log("TRACE: error code status is: " + request.status);
-        console.log('Error message is: ' + request.responseText);
-        var clusterStatus = {
-          status: 'PENDING',
-          isInstallError: false,
-          isCompleted: false
-        };
-
-        self.saveClusterStatus(clusterStatus);
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-  },
-
-  /*
-   Bootstrap selected hosts.
-   */
-  launchBootstrap: function (bootStrapData) {
-    var self = this;
-    var requestId = null;
-    var method = App.testMode ? 'GET' : 'POST';
-    var url = App.testMode ? '/data/wizard/bootstrap/bootstrap.json' : App.apiPrefix + '/bootstrap';
-    $.ajax({
-      type: method,
-      url: url,
-      async: false,
-      data: bootStrapData,
-      timeout: App.timeout,
-      contentType: 'application/json',
-      success: function (data) {
-        console.log("TRACE: POST bootstrap succeeded");
-        requestId = data.requestId;
-      },
-      error: function () {
-        console.log("ERROR: POST bootstrap failed");
-        alert('Bootstrap call failed.  Please try again.');
-      },
-      statusCode: require('data/statusCodes')
-    });
-    return requestId;
-  },
-
-  /**
-   * Load <code>content.<name></code> variable from localStorage, if wasn't loaded before.
-   * If you specify <code>reload</code> to true - it will reload it.
-   * @param name
-   * @param reload
-   * @return {Boolean}
-   */
-  load: function (name, reload) {
-    if (this.get('content.' + name) && !reload) {
-      return false;
-    }
-    var result = App.db['get' + name.capitalize()]();
-    if (!result){
-      result = this['get' + name.capitalize()]();
-      App.db['set' + name.capitalize()](result);
-      console.log(this.get('name') + ": created " + name, result);
-    }
-    this.set('content.' + name, result);
-    console.log(this.get('name') + ": loaded " + name, result);
-  },
-
-  save: function(name){
-    var value = this.toObject(this.get('content.' + name));
-    App.db['set' + name.capitalize()](value);
-    console.log(this.get('name') + ": saved " + name, value);
-  },
-
-  clear: function () {
-    this.set('content', Ember.Object.create({
-      'controllerName': this.get('content.controllerName'),
-      'isWizard': !(this.get('content.controllerName') === 'installerController')
-    }));
-    this.set('currentStep', 0);
-    this.clearStorageData();
-  },
-
-  clusterStatusTemplate : {
-    name: "",
-    status: "PENDING",
-    isCompleted: false,
-    requestId: null,
-    installStartTime: null,
-    installTime: null,
-    isInstallError: false,
-    isStartError: false,
-    oldRequestsId: []
-  },
-
-  clearStorageData: function(){
-    App.db.setService(undefined); //not to use this data at AddService page
-    App.db.setHosts(undefined);
-    App.db.setMasterComponentHosts(undefined);
-    App.db.setSlaveComponentHosts(undefined);
-    App.db.setCluster(undefined);
-    App.db.setAllHostNames(undefined);
-    App.db.setSlaveProperties(undefined);
-    App.db.setInstallOptions(undefined);
-    App.db.setAllHostNamesPattern(undefined);
-  },
-
-  installOptionsTemplate: {
-    hostNames: "", //string
-    manualInstall: false, //true, false
-    useSsh: true, //bool
-    isJavaHome : false, //bool
-    javaHome: App.defaultJavaHome, //string
-    localRepo: false, //true, false
-    sshKey: "", //string
-    bootRequestId: null //string
-  },
-  /**
-   * Generate serviceComponents as pr the stack definition  and save it to localdata
-   * called form stepController step4WizardController
-   */
-  loadServiceComponents: function (displayOrderConfig, apiUrl) {
-    var result = null;
-    var method = 'GET';
-    var testUrl = '/data/wizard/stack/hdp/version/1.2.0.json';
-    var url = (App.testMode) ? testUrl : App.apiPrefix + apiUrl;
-    $.ajax({
-      type: method,
-      url: url,
-      async: false,
-      dataType: 'text',
-      timeout: App.timeout,
-      success: function (data) {
-        var jsonData = jQuery.parseJSON(data);
-        console.log("TRACE: getService ajax call  -> In success function for the getServiceComponents call");
-        console.log("TRACE: jsonData.services : " + jsonData.services);
-
-        // Creating Model
-        var Service = Ember.Object.extend({
-          serviceName: null,
-          displayName: null,
-          isDisabled: true,
-          isSelected: true,
-          isInstalled: false,
-          description: null,
-          version: null
-        });
-
-        var data = [];
-
-        // loop through all the service components
-        for (var i = 0; i < displayOrderConfig.length; i++) {
-          var entry = jsonData.services.findProperty("name", displayOrderConfig[i].serviceName);
-
-          var myService = Service.create({
-            serviceName: entry.name,
-            displayName: displayOrderConfig[i].displayName,
-            isDisabled: i === 0,
-            isSelected: true,
-            isInstalled: false,
-            isHidden: displayOrderConfig[i].isHidden,
-            description: entry.comment,
-            version: entry.version
-          });
-
-          data.push(myService);
-        }
-
-        result = data;
-        console.log('TRACE: service components: ' + JSON.stringify(data));
-
-      },
-
-      error: function (request, ajaxOptions, error) {
-        console.log("TRACE: STep5 -> In error function for the getServiceComponents call");
-        console.log("TRACE: STep5 -> value of the url is: " + url);
-        console.log("TRACE: STep5 -> error code status is: " + request.status);
-        console.log('Step8: Error message is: ' + request.responseText);
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-    return result;
-  },
-
-  loadServicesFromServer: function() {
-    var services = App.db.getService();
-    if (services) {
-      return;
-    }
-    var displayOrderConfig = require('data/services');
-    var apiUrl = App.get('stackVersionURL');
-    var apiService = this.loadServiceComponents(displayOrderConfig, apiUrl);
-    this.set('content.services', apiService);
-    App.db.setService(apiService);
-  },
-
-  /**
-   * Load properties for group of slaves to model
-   */
-  loadSlaveGroupProperties: function () {
-    var groupConfigProperties = App.db.getSlaveProperties() ? App.db.getSlaveProperties() : this.get('content.slaveComponentHosts');
-    if (groupConfigProperties) {
-      groupConfigProperties.forEach(function (_slaveComponentObj) {
-        if (_slaveComponentObj.groups) {
-          var groups = [];
-          _slaveComponentObj.groups.forEach(function (_group) {
-            var properties = [];
-            _group.properties.forEach(function (_property) {
-              var property = App.ServiceConfigProperty.create(_property);
-              property.set('value', _property.storeValue);
-              properties.pushObject(property);
-            }, this);
-            _group.properties = properties;
-            groups.pushObject(App.Group.create(_group));
-          }, this);
-          _slaveComponentObj.groups = groups;
-        }
-      }, this);
-    }
-    this.set('content.slaveGroupProperties', groupConfigProperties);
-  },
-
-  registerErrPopup: function (header, message) {
-    App.ModalPopup.show({
-      header: header,
-      secondary: false,
-      onPrimary: function () {
-        this.hide();
-      },
-      bodyClass: Ember.View.extend({
-        template: Ember.Handlebars.compile(['<p>{{view.message}}</p>'].join('\n')),
-        message: message
-      })
-    });
-  }
-
-})
diff --git a/branch-1.2/ambari-web/app/controllers/wizard/slave_component_groups_controller.js b/branch-1.2/ambari-web/app/controllers/wizard/slave_component_groups_controller.js
deleted file mode 100644
index 7d5a5d6..0000000
--- a/branch-1.2/ambari-web/app/controllers/wizard/slave_component_groups_controller.js
+++ /dev/null
@@ -1,364 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-var App = require('app');
-/**
- * Used to manage slave component config. User could create different settings for separate group
- * @type {*}
- */
-App.SlaveComponentGroupsController = Em.ArrayController.extend({
-
-  name: 'slaveComponentGroupsController',
-
-  contentBinding: 'App.router.wizardStep7Controller.slaveComponentHosts',
-
-  stepConfigsBinding: 'App.router.wizardStep7Controller.stepConfigs',
-
-  serviceBinding: 'App.router.wizardStep7Controller.selectedService',
-
-  servicesBinding: 'App.router.wizardStep7Controller.content.services',
-
-  clearStep: function () {
-
-  },
-
-  loadStep: function () {
-    this.clearStep();
-    this.loadGroups();
-  },
-
-  loadGroups: function () {
-
-    this.get('stepConfigs').forEach(function (_serviceConfig) {
-      var categoryConfig = _serviceConfig.get('configCategories');
-      if (categoryConfig.someProperty('isForSlaveComponent', true)) {
-        var slaveCategory = categoryConfig.findProperty('isForSlaveComponent', true);
-        // this.get('content') -> Output of Step 6: Mapping of each slave component and set of hosts it runs on
-        if (this.get('content')) {
-          if (this.get('content').someProperty('componentName', slaveCategory.get('primaryName'))) {
-            // component --> each column in Step 6 is a component ( slave component )
-            var component = this.get('content').findProperty('componentName', slaveCategory.get('primaryName'));
-            // slaveConfigs --> originally set as null in the class App.SlaveCategory in model/service_config.js
-            var slaveConfigs = slaveCategory.get('slaveConfigs');
-            
-            slaveCategory.set('slaveConfigs', App.SlaveConfigs.create(component));
-            var slaveGroups = [];
-            if (component.groups) {
-              component.groups.forEach(function (_group) {
-                slaveGroups.pushObject(_group);
-              }, this);
-              slaveCategory.set('slaveConfigs.groups', slaveGroups);
-            }
-            slaveCategory.set('slaveConfigs.componentName', component.componentName);
-            slaveCategory.set('slaveConfigs.displayName', component.displayName);
-            /*slaveCategory.set('slaveConfigs.groups.name', component.get('name'));
-             slaveCategory.set('slaveConfigs.groups.index', component.get('index'));
-             slaveCategory.set('slaveConfigs.groups.type', component.get('type'));
-             slaveCategory.set('slaveConfigs.groups.active', component.get('active'));*/
-            if (!slaveCategory.get('slaveConfigs.groups')) {
-              slaveCategory.set('slaveConfigs.groups', []);
-              var componentProperties = this.componentProperties(_serviceConfig.serviceName);
-              var defaultGroup = {name: 'Default', index: 'default', type: 'default', active: true, properties: componentProperties};
-              slaveCategory.get('slaveConfigs.groups').pushObject(App.Group.create(defaultGroup));
-            }
-          }
-        }
-      }
-    }, this);
-  },
-
-  // returns key-value pairs i.e. all fields for slave component for this specific service.
-  componentProperties: function (serviceName) {
-
-    var serviceConfigs = require('data/service_configs').findProperty('serviceName', serviceName);
-
-    var configs = [];
-    var componentName = null;
-    switch (serviceName) {
-      case 'HDFS':
-        componentName = 'DataNode';
-        break;
-      case 'MAPREDUCE':
-        componentName = 'TaskTracker';
-        break;
-      case 'HBASE':
-        componentName = 'RegionServer';
-    }
-    var slaveConfigs = serviceConfigs.configs.filterProperty('category', componentName);
-    slaveConfigs.forEach(function (_serviceConfigProperty) {
-      var serviceConfigProperty = App.ServiceConfigProperty.create(_serviceConfigProperty);
-
-      switch(serviceConfigProperty.name){
-        case 'dfs_data_dir' :
-          serviceConfigProperty.initialValue();
-          break;
-        case 'mapred_local_dir' :
-          serviceConfigProperty.initialValue();
-          break;
-      }
-      configs.pushObject(serviceConfigProperty);
-      serviceConfigProperty.validate();
-    }, this);
-    return configs;
-  },
-
-  selectedComponentName: function () {
-    switch (App.router.get('wizardStep7Controller.selectedService.serviceName')) {
-      case 'HDFS':
-        return { name: 'DATANODE',
-          displayName: 'DataNode'};
-      case 'MAPREDUCE':
-        return { name: 'TASKTRACKER',
-          displayName: 'TaskTracker'};
-
-      case 'HBASE':
-        return { name: 'HBASE_REGIONSERVER',
-          displayName: 'RegionServer'};
-      default:
-        return null;
-    }
-
-  }.property('service'),
-
-  selectedComponentDisplayName: function() {
-    return App.format.role(this.get('selectedComponentName'));
-  }.property('selectedComponentName'),
-
-  selectedSlaveComponent: function () {
-    var selectedComponentName = this.get('selectedComponentName') ? this.get('selectedComponentName').displayName : null;
-    var configs = null;
-    if (selectedComponentName) {
-      App.router.get('wizardStep7Controller.stepConfigs').forEach(function (_serviceConfig) {
-        var categoryConfig = _serviceConfig.get('configCategories');
-        if (categoryConfig.someProperty('name', selectedComponentName)) {
-          configs = categoryConfig.findProperty('name', selectedComponentName).get('slaveConfigs');
-        }
-      }, this);
-    }
-    return configs;
-  }.property('selectedComponentName', 'stepConfigs.@each.configCategories', 'stepConfigs.@each.configCategories.@each.slaveConfigs'),
-
-  hosts: function () {
-    if (this.get('selectedSlaveComponent')) {
-      return this.get('selectedSlaveComponent').get('hosts');
-    }
-  }.property('selectedSlaveComponent'),
-
-  groups: function () {
-    var hosts = this.get('hosts');
-    if(hosts){
-      return hosts.mapProperty('group').uniq();
-    }
-  }.property('hosts'),
-
-  componentGroups: function () {
-    var component = this.get('selectedSlaveComponent');
-    if (component && component.get('groups')) {
-      return component.get('groups');
-    }
-    return [];
-  }.property('selectedSlaveComponent', 'selectedSlaveComponent.groups', 'stepConfigs.@each.configCategories.@each.slaveConfigs.groups.@each.properties.@each.value'),
-
-
-  getGroupsForDropDown: function () {
-    return this.get('componentGroups').getEach('name');
-  }.property('selectedComponentName', 'componentGroups.@each.name'),
-
-  activeGroup: function () {
-    var componentGroups = this.get('componentGroups');
-    if (componentGroups) {
-      var active = componentGroups.findProperty('active', true);
-      if (active){
-        return active;
-      }
-    }
-    return null;
-  }.property('componentGroups.@each.active', 'componentGroups.@each.name', 'componentGroups.@each.properties.@each.value'),
-
-
-  /**
-   * Show slave hosts to groups popup
-   * @param event
-   */
-  showAddSlaveComponentGroup: function (event) {
-    var componentName = event.context;
-    var component = this.get('selectedSlaveComponent');
-    App.ModalPopup.show({
-      header: componentName + ' Groups',
-      bodyClass: Ember.View.extend({
-        controllerBinding: 'App.router.slaveComponentGroupsController',
-        header: Em.I18n.t('installer.slaveComponentHostsPopup.header').format(this.get('selectedComponentDisplayName')),
-        templateName: require('templates/wizard/slave_component_hosts_popup')
-      }),
-      onPrimary: function (event) {
-        if (component.tempSelectedGroups && component.tempSelectedGroups.length) {
-          component.tempSelectedGroups.forEach(function (item) {
-            var changed = component.get('hosts').filterProperty('hostName', item.hostName);
-            changed.setEach('group', item.groupName);
-          })
-        }
-        delete component.tempSelectedGroups;
-        this.hide();
-      },
-      onSecondary: function (event) {
-        delete component.tempSelectedGroups;
-        this.hide();
-      },
-      onClose: function (event) {
-        delete component.tempSelectedGroups;
-        this.hide();
-      }
-    });
-  },
-
-  /**
-   * Utility method. Save temporary info about changes in <code>slave hosts to groups</code> popup
-   * @param host
-   * @param groupName
-   */
-  changeHostGroup: function (host, groupName) {
-    var component = this.get('selectedSlaveComponent');
-    if (component.tempSelectedGroups === undefined) {
-      component.tempSelectedGroups = [];
-    }
-    var values = component.tempSelectedGroups.filterProperty('hostName', host.hostName);
-    if (values.length === 0)
-      component.tempSelectedGroups.pushObject({hostName: host.hostName, groupName: groupName});
-    else
-      values.setEach('groupName', groupName);
-
-  },
-
-  /**
-   * add new group to component(click on button)
-   */
-  addSlaveComponentGroup: function () {
-    var component = this.get('selectedSlaveComponent');
-    var newGroupName = 'New Group';
-    component.get('groups').setEach('active', false);
-    var newGroups = component.get('groups').filterProperty('name', newGroupName);
-    if (newGroups.length === 0){
-      component.newGroupIndex = 0;
-    }
-    else {
-      component.newGroupIndex = component.newGroupIndex || 0;
-      this.checkGroupName();
-      newGroupName = 'New Group ' + component.newGroupIndex;
-    }
-    var newGroup = {name: newGroupName, index: component.newGroupIndex, type: 'new', active: true, properties: this.componentProperties(App.router.get('wizardStep7Controller.selectedService.serviceName'))};
-    component.groups.pushObject(App.Group.create(newGroup));
-    $('.remove-group-error').hide();
-  },
-
-  checkGroupName: function () {
-    var component = this.get('selectedSlaveComponent');
-    component.newGroupIndex++;
-    var newGroupName = 'New Group ' + component.newGroupIndex;
-    var groups = component.get('groups').filterProperty('name', newGroupName);
-    if (groups.length !== 0) {
-      this.checkGroupName();
-    }
-  },
-
-  /**
-   * Onclick handler for <code>choose hosts for slave group</code> link
-   * @param event
-   */
-  showEditSlaveComponentGroups: function (event) {
-    this.showAddSlaveComponentGroup(event);
-  },
-
-  getHostsByGroup: function (group) {
-    var hosts = this.get('hosts');
-    if(hosts){
-      return hosts.filterProperty('group', group.name);
-    }
-  },
-
-  /**
-   * Change tab
-   * @param event
-   */
-  showSlaveComponentGroup: function (event) {
-    var component = this.get('selectedSlaveComponent');
-    if(!component.groups){
-
-    }
-    component.get('groups').setEach('active', false);
-    var group = component.get('groups').filterProperty('name', event.context.name);
-    group.setEach('active', true);
-    var assignedHosts = component.get('hosts').filterProperty('group', event.context.name);
-    if (assignedHosts.length === 0) {
-      $('.remove-group-error').hide();
-    }
-  },
-
-  /**
-   * Remove tab
-   * @param event
-   */
-  removeSlaveComponentGroup: function (event) {
-    var group = event.context;
-    var component = this.get('selectedSlaveComponent');
-    var assignedHosts = component.get('hosts').filterProperty('group', group.name);
-    if (assignedHosts.length !== 0) {
-      $('.remove-group-error').show();
-    } else {
-      $('.remove-group-error').hide();
-      var key = component.groups.indexOf(group);
-      component.groups.removeObject(component.groups[key]);
-
-      var newGroups = component.groups.filterProperty('type', 'new');
-      if (newGroups.length == 0)
-        component.newGroupIndex = 0;
-      else {
-        var lastNewGroup = newGroups[newGroups.length - 1];
-        component.newGroupIndex = lastNewGroup.index;
-      }
-      if (group.active) {
-        var lastGroup;
-        if (key === component.groups.length)
-          lastGroup = component.groups.slice(key - 1, key);
-        else lastGroup = component.groups.slice(key, key + 1);
-        lastGroup.setEach('active', true);
-      }
-    }
-  },
-
-  /**
-   * change group name of slave component
-   * @param group
-   * @param newGroupName
-   * @return {Boolean}
-   */
-  changeSlaveGroupName: function (group, newGroupName) {
-    var component = this.get('selectedSlaveComponent');
-    var isExist = component.get('groups').filterProperty('name', newGroupName);
-    if (isExist.length !== 0)
-      return true;
-    else {
-      var assignedHosts = component.get('hosts').filterProperty('group', group.name);
-      if (assignedHosts.length !== 0){
-        assignedHosts.setEach('group', newGroupName);
-      }
-      var groupFilter = component.get('groups').filterProperty('name', group.name);
-      groupFilter.setEach('name', newGroupName);
-    }
-    return false;
-  }
-
-});
diff --git a/branch-1.2/ambari-web/app/controllers/wizard/step10_controller.js b/branch-1.2/ambari-web/app/controllers/wizard/step10_controller.js
deleted file mode 100644
index 70ea90d..0000000
--- a/branch-1.2/ambari-web/app/controllers/wizard/step10_controller.js
+++ /dev/null
@@ -1,370 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.WizardStep10Controller = Em.Controller.extend({
-  clusterInfo: [],
-
-  /**
-   * check if were added regionservers then NAGIOS services should be restarted
-   * to update number of regionservers in NAGIOS
-   */
-  isNagiosRestartRequired: function() {
-    return this.get('content.controllerName') !== 'installerController' && App.Service.find('NAGIOS').get('isLoaded');
-  }.property(),
-
-  clearStep: function () {
-    this.get('clusterInfo').clear();
-  },
-
-  loadStep: function () {
-    console.log("TRACE: Loading step10: Summary Page");
-    this.clearStep();
-    this.loadInstalledHosts(this.loadRegisteredHosts());
-    var installFlag = true;
-    var startFlag = true;
-    if (this.get('content.controllerName') == 'installerController') {
-      installFlag = this.loadMasterComponents();
-      startFlag = this.loadStartedServices();
-    }
-    if (installFlag && startFlag) {
-      this.loadInstallTime();
-    }
-  },
-
-  loadRegisteredHosts: function () {
-    var masterHosts = this.get('content.masterComponentHosts').mapProperty('hostName').uniq();
-    var slaveHosts = this.get('content.slaveComponentHosts');
-    var hostObj = [];
-    slaveHosts.forEach(function (_hosts) {
-      hostObj = hostObj.concat(_hosts.hosts);
-    }, this);
-    slaveHosts = hostObj.mapProperty('hostName').uniq();
-    var registeredHosts = App.Host.find().mapProperty('hostName').concat(masterHosts.concat(slaveHosts)).uniq();
-    var registerHostsStatement = 'The cluster consists of ' + registeredHosts.length + ' hosts';
-    var registerHostsObj = Ember.Object.create({
-      id: 1,
-      color: 'text-info',
-      displayStatement: registerHostsStatement,
-      status: []
-    });
-    this.get('clusterInfo').pushObject(registerHostsObj);
-
-    return registerHostsObj;
-  },
-
-  loadInstalledHosts: function (host) {
-    var hosts = this.get('content.hosts');
-    var hostsInfo = [];
-    for (var index in hosts) {
-      hostsInfo.pushObject(hosts[index]);
-      console.log('Step10 SUMMARY: value of hosts is: ' + hosts[index].status);
-    }
-    var succeededHosts = hostsInfo.filterProperty('status', 'success');
-    var warnedHosts = hostsInfo.filterProperty('status', 'warning').concat(hostsInfo.filterProperty('status', 'failed'));
-    if (succeededHosts.length) {
-      var successStatement = 'Installed and started services successfully on ' + succeededHosts.length + ' new ' + ((succeededHosts.length > 1) ? 'hosts' : 'host');
-      this.get('clusterInfo').findProperty('id', 1).get('status').pushObject(Ember.Object.create({
-        id: 1,
-        color: 'text-success',
-        displayStatement: successStatement
-      }));
-    }
-
-    if (warnedHosts.length) {
-      var warnStatement = warnedHosts.length + ' warnings';
-      this.get('clusterInfo').findProperty('id', 1).get('status').pushObject(Ember.Object.create({
-        id: 2,
-        color: 'text-warning',
-        displayStatement: warnStatement,
-        statements: []
-      }));
-
-      warnedHosts.forEach(function (_host) {
-        var clusterState;
-        console.log("Content.cluster.status is: " + this.get('content.cluster.status'));
-        if (this.get('content.cluster.status') === 'INSTALL FAILED') {
-          clusterState = 'Installing ';
-        } else if (this.get('content.cluster.status') === 'START FAILED') {
-          clusterState = 'Starting ';
-        }
-        console.log('host value is: ' + JSON.stringify(_host));
-        var failedTasks = _host.tasks.filterProperty('Tasks.status', 'FAILED');
-        failedTasks.forEach(function (_task) {
-          var taskStatement = clusterState + App.format.role(_task.Tasks.role) + ' failed on ' + _host.name;
-          this.get('clusterInfo').findProperty('id', 1).get('status').findProperty('id', 2).get('statements').pushObject(Ember.Object.create({
-            status: 'failed',
-            color: 'text-info',
-            displayStatement: taskStatement
-          }));
-        }, this);
-
-        var abortedTasks = _host.tasks.filterProperty('Tasks.status', 'ABORTED');
-        abortedTasks.forEach(function (_task) {
-          var abortStatement = clusterState + App.format.role(_task.Tasks.role) + ' aborted on ' + _host.name;
-          this.get('clusterInfo').findProperty('id', 1).get('status').findProperty('id', 2).get('statements').pushObject(Ember.Object.create({
-            status: 'aborted',
-            color: 'text-info',
-            displayStatement: abortStatement
-          }));
-        }, this);
-
-        var timedOutTasks = _host.tasks.filterProperty('Tasks.status', 'TIMEDOUT');
-        timedOutTasks.forEach(function (_task) {
-          var timedOutStatement = clusterState + App.format.role(_task.Tasks.role) + ' timed out on ' + _host.name;
-          this.get('clusterInfo').findProperty('id', 1).get('status').findProperty('id', 2).get('statements').pushObject(Ember.Object.create({
-            status: 'timedout',
-            color: 'text-info',
-            displayStatement: timedOutStatement
-          }));
-        }, this);
-      }, this);
-    }
-  },
-
-  loadMasterComponents: function () {
-    var components = this.get('content.masterComponentHosts');
-    var statement;
-    if (this.get('content.cluster.status') === 'INSTALL FAILED') {
-      this.get('clusterInfo').pushObject(Ember.Object.create({
-        id: 2,
-        displayStatement: 'Installing master services failed',
-        color: 'text-error',
-        status: []
-      }));
-      return false;
-    } else {
-      this.get('clusterInfo').pushObject(Ember.Object.create({
-        id: 2,
-        displayStatement: 'Master services installed',
-        color: 'text-success',
-        status: []
-      }));
-    }
-
-    console.log('STEP10 master components:  ' + JSON.stringify(components));
-    components.forEach(function (_component) {
-      var component = Ember.Object.create(_component);
-      switch (component.component) {
-        case 'NAMENODE':
-          this.loadNn(component);
-          break;
-        case 'SECONDARY_NAMENODE':
-          this.loadSnn(component);
-          break;
-        case 'JOBTRACKER' :
-          this.loadJt(component);
-          break;
-        case 'ZOOKEEPER_SERVER' :
-          // TODO: Fix this; redundant entries and wrong number
-          //this.loadZk(component);
-          break;
-        case 'HBASE_MASTER':
-          this.loadHb(component);
-          break;
-        case 'HIVE_SERVER':
-          this.loadHiveServer(component);
-          break;
-        case 'OOZIE_SERVER':
-          this.loadOozieServer(component);
-          break;
-        case 'GANGLIA_SERVER':
-          this.loadGanglia(component)
-          break;
-        case 'NAGIOS_SERVER':
-          this.loadNagios(component);
-          break;
-      }
-    }, this);
-    return true;
-  },
-
-  loadNn: function (component) {
-    if (component.get('hostName')) {
-      var statement = 'NameNode installed on ' + component.get('hostName');
-      this.get('clusterInfo').findProperty('id', 2).get('status').pushObject(Ember.Object.create({
-        id: 1,
-        color: 'text-info',
-        displayStatement: statement
-      }));
-    } else {
-      console.log('ERROR: no host name assigned to NameNode component');
-    }
-  },
-
-  loadSnn: function (component) {
-    if (component.get('hostName')) {
-      var statement = 'SecondaryNameNode installed on ' + component.get('hostName');
-      this.get('clusterInfo').findProperty('id', 2).get('status').pushObject(Ember.Object.create({
-        id: 1,
-        color: 'text-info',
-        displayStatement: statement
-      }));
-    } else {
-      console.log('ERROR: no host name assigned to SecondaryNameNode component');
-    }
-  },
-
-  loadJt: function (component) {
-    if (component.get('hostName')) {
-      var statement = 'JobTracker installed on ' + component.get('hostName');
-      this.get('clusterInfo').findProperty('id', 2).get('status').pushObject(Ember.Object.create({
-        id: 1,
-        color: 'text-info',
-        displayStatement: statement
-      }));
-    } else {
-      console.log('ERROR: no host name assigned to JobTracker component');
-    }
-  },
-
-  loadZk: function (component) {
-    var hostLength = component.get('hostName').length;
-    if (hostLength) {
-      var hostVal;
-      if (hostLength === 1) {
-        hostVal = 'host';
-      } else {
-        hostVal = 'hosts';
-      }
-      var statement = 'ZooKeeper installed on ' + component.get('hostName').length + ' ' + hostVal;
-      this.get('clusterInfo').findProperty('id', 2).get('status').pushObject(Ember.Object.create({
-        id: 1,
-        color: 'text-info',
-        displayStatement: statement
-      }));
-    } else {
-      console.log('ERROR: no host name assigned to Zookeeper component');
-    }
-  },
-
-  loadHb: function (component) {
-    if (component.get('hostName')) {
-      var statement = 'HBase Master installed on ' + component.get('hostName');
-      this.get('clusterInfo').findProperty('id', 2).get('status').pushObject(Ember.Object.create({
-        id: 1,
-        color: 'text-info',
-        displayStatement: statement
-      }));
-    } else {
-      console.log('ERROR: no host name assigned to HBase Master component');
-    }
-  },
-
-  loadHiveServer: function (component) {
-    if (component.get('hostName')) {
-      var statement = 'Hive Metastore installed on ' + component.get('hostName');
-      this.get('clusterInfo').findProperty('id', 2).get('status').pushObject(Ember.Object.create({
-        id: 1,
-        color: 'text-info',
-        displayStatement: statement
-      }));
-    } else {
-      console.log('ERROR: no host name assigned to Hive server component');
-    }
-  },
-
-  loadOozieServer: function (component) {
-    if (component.get('hostName')) {
-      var statement = 'Oozie Server installed on ' + component.get('hostName');
-      this.get('clusterInfo').findProperty('id', 2).get('status').pushObject(Ember.Object.create({
-        id: 1,
-        color: 'text-info',
-        displayStatement: statement
-      }));
-    } else {
-      console.log('ERROR: no host name assigned to Oozie server component');
-    }
-  },
-
-  loadGanglia: function (component) {
-    if (component.get('hostName')) {
-      var statement = 'Ganglia Server installed on ' + component.get('hostName');
-      this.get('clusterInfo').findProperty('id', 2).get('status').pushObject(Ember.Object.create({
-        id: 1,
-        color: 'text-info',
-        displayStatement: statement
-      }));
-    } else {
-      console.log('ERROR: no host name assigned to Ganglia server component');
-    }
-  },
-
-  loadNagios: function (component) {
-    if (component.get('hostName')) {
-      var statement = 'Nagios Server installed on ' + component.get('hostName');
-      this.get('clusterInfo').findProperty('id', 2).get('status').pushObject(Ember.Object.create({
-        id: 1,
-        color: 'text-info',
-        displayStatement: statement
-      }));
-    } else {
-      console.log('ERROR: no host name assigned to Nagios server component');
-    }
-  },
-
-  loadStartedServices: function (component) {
-    if (this.get('content.cluster.status') === 'STARTED') {
-      var statement = 'All services started';
-      this.get('clusterInfo').pushObject(Ember.Object.create({
-        id: 3,
-        color: 'text-success',
-        displayStatement: 'All services started',
-        status: []
-      }));
-      this.get('clusterInfo').pushObject(Ember.Object.create({
-        id: 4,
-        color: 'text-success',
-        displayStatement: 'All tests passed',
-        status: []
-      }));
-      return true;
-    } else {
-      this.get('clusterInfo').pushObject(Ember.Object.create({
-        id: 3,
-        color: 'text-error',
-        displayStatement: 'Starting services failed',
-        status: []
-      }));
-      return false;
-    }
-  },
-
-  loadInstallTime: function () {
-    var secondsPerMinute = 60;
-    var statement;
-    if (this.get('content.cluster.installTime')) {
-      var minutes = Math.floor(this.get('content.cluster.installTime'));
-      var seconds = Math.floor((this.get('content.cluster.installTime') - minutes) * secondsPerMinute);
-      var statement;
-      if (minutes !== 0) {
-        statement = 'Install and start completed in ' + minutes + ' minutes and ' + seconds + ' seconds';
-      } else {
-        statement = 'Install and start completed in ' + seconds + ' seconds';
-      }
-      this.get('clusterInfo').pushObject(Ember.Object.create({
-        id: 5,
-        color: 'text-info',
-        displayStatement: statement,
-        status: []
-      }));
-    }
-  }
-});
-
diff --git a/branch-1.2/ambari-web/app/controllers/wizard/step1_controller.js b/branch-1.2/ambari-web/app/controllers/wizard/step1_controller.js
deleted file mode 100644
index 58069ce..0000000
--- a/branch-1.2/ambari-web/app/controllers/wizard/step1_controller.js
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.WizardStep1Controller = Em.Controller.extend({
-  name: 'wizardStep1Controller',
-
-  hasSubmitted : false,
-
-  loadStep: function () {
-    this.set('hasSubmitted',false);
-  },
-  /**
-   * validate cluster name
-   * set clusterNameError if validation fails
-   */
-  invalidClusterName : function(){
-    if(!this.get('hasSubmitted')){
-      this.set('clusterNameError', '');
-      return false;
-    }
-
-    var clusterName = this.get('content.cluster.name');
-    if (clusterName == '') {
-      this.set('clusterNameError', Em.I18n.t('installer.step1.clusterName.error.required'));
-      return true;
-    } else if (/\s/.test(clusterName)) {
-      this.set('clusterNameError', Em.I18n.t('installer.step1.clusterName.error.whitespaces'));
-      return true;
-    } else if (/[^\w\s]/gi.test(clusterName)) {
-      this.set('clusterNameError', Em.I18n.t('installer.step1.clusterName.error.specialChar'));
-      return true;
-    } else {
-      this.set('clusterNameError', '');
-      return false;
-    }
-  }.property('hasSubmitted', 'content.cluster.name').cacheable(),
-
-  /**
-   * calculates by <code>invalidClusterName</code> property
-   */
-  clusterNameError: '',
-
-  /**
-   * Onclick handler for <code>next</code> button
-   */
-  submit: function () {
-    this.set('hasSubmitted', true);
-    if (!this.get('invalidClusterName')) {
-      App.clusterStatus.set('clusterName', this.get('content.cluster.name'));
-      this.set('content.cluster.status', 'PENDING');
-      this.set('content.cluster.isCompleted', false);
-      App.router.send('next');
-    }
-  }
-
-});
diff --git a/branch-1.2/ambari-web/app/controllers/wizard/step2_controller.js b/branch-1.2/ambari-web/app/controllers/wizard/step2_controller.js
deleted file mode 100644
index bb17f28..0000000
--- a/branch-1.2/ambari-web/app/controllers/wizard/step2_controller.js
+++ /dev/null
@@ -1,304 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-var validator = require('utils/validator');
-
-App.WizardStep2Controller = Em.Controller.extend({
-  name: 'wizardStep2Controller',
-  hostNameArr: [],
-  isPattern: false,
-  bootRequestId:  null,
-  hasSubmitted: false,
-  inputtedAgainHostNames: [],
-  hostNames: function () {
-    return this.get('content.installOptions.hostNames');
-  }.property('content.installOptions.hostNames'),
-
-  manualInstall: function () {
-    return this.get('content.installOptions.manualInstall');
-  }.property('content.installOptions.manualInstall'),
-
-  sshKey: function () {
-    return this.get('content.installOptions.sshKey');
-  }.property('content.installOptions.sshKey'),
-
-  installType: function () {
-    return this.get('manualInstall') ? 'manualDriven' : 'ambariDriven';
-  }.property('manualInstall'),
-
-  isHostNameValid: function (hostname) {
-    // disabling hostname validation as we don't want to be too restrictive and disallow
-    // user's hostnames
-    // return validator.isHostname(hostname) && (!(/^\-/.test(hostname) || /\-$/.test(hostname)));
-    return true;
-  },
-  /**
-   * set not installed hosts to the hostNameArr
-   */
-  updateHostNameArr: function(){
-    this.hostNameArr = this.get('hostNames').trim().split(new RegExp("\\s+", "g"));
-    this.patternExpression();
-    this.get('inputtedAgainHostNames').clear();
-    var installedHostNames = App.Host.find().mapProperty('hostName');
-    var tempArr = [];
-    for (i = 0; i < this.hostNameArr.length; i++) {
-      if (!installedHostNames.contains(this.hostNameArr[i])) {
-        tempArr.push(this.hostNameArr[i]);
-      } else {
-        this.get('inputtedAgainHostNames').push(this.hostNameArr[i]);
-      }
-    }
-    this.set('hostNameArr', tempArr);
-  },
-  /**
-   * validate host names
-   * @return {Boolean}
-   */
-  isAllHostNamesValid: function () {
-    var self = this;
-    var result = true;
-    this.updateHostNameArr();
-
-    this.hostNameArr.forEach(function(hostName){
-      if (!self.isHostNameValid(hostName)) {
-        result = false;
-      }
-    });
-
-    return result;
-  },
-
-  hostsError: null,
-  /**
-   * set hostsError if host names don't pass validation
-   */
-  checkHostError: function () {
-    if (this.get('hostNames').trim() === '') {
-      this.set('hostsError', Em.I18n.t('installer.step2.hostName.error.required'));
-    }
-    else {
-      if (this.isAllHostNamesValid() === false) {
-        this.set('hostsError', Em.I18n.t('installer.step2.hostName.error.invalid'));
-      }
-      else {
-        this.set('hostsError', null);
-      }
-    }
-  },
-
-  checkHostAfterSubmitHandler: function() {
-    if (this.get('hasSubmitted')) {
-      this.checkHostError();
-    }
-  }.observes('hasSubmitted', 'hostNames'),
-
-  sshKeyError: function () {
-    if (this.get('hasSubmitted') && this.get('manualInstall') === false && this.get('sshKey').trim() === '') {
-      return Em.I18n.t('installer.step2.sshKey.error.required');
-    }
-    return null;
-  }.property('sshKey', 'manualInstall', 'hasSubmitted'),
-
-  /**
-   * Get host info, which will be saved in parent controller
-   */
-  getHostInfo: function () {
-
-    var hostNameArr = this.get('hostNameArr');
-    var hostInfo = {};
-    for (var i = 0; i < hostNameArr.length; i++) {
-      hostInfo[hostNameArr[i]] = {
-        name: hostNameArr[i],
-        installType: this.get('installType'),
-        bootStatus: 'PENDING'
-      };
-    }
-
-    return hostInfo;
-  },
-
-  /**
-   * Used to set sshKey from FileUploader
-   * @param sshKey
-   */
-  setSshKey: function(sshKey){
-    this.set("content.installOptions.sshKey", sshKey);
-  },
-
-  /**
-   * Onclick handler for <code>next button</code>. Do all UI work except data saving.
-   * This work is doing by router.
-   * @return {Boolean}
-   */
-  evaluateStep: function () {
-    console.log('TRACE: Entering controller:WizardStep2:evaluateStep function');
-
-    if (this.get('isSubmitDisabled')) {
-      return false;
-    }
-
-    this.set('hasSubmitted', true);
-
-    this.checkHostError();
-    if (this.get('hostsError')) {
-      return false;
-    }
-
-    if (this.get('sshKeyError')) {
-      return false;
-    }
-
-    this.updateHostNameArr();
-
-    if (!this.hostNameArr.length) {
-      this.set('hostsError', Em.I18n.t('installer.step2.hostName.error.already_installed'));
-      return false;
-    }
-
-    if(this.isPattern)
-    {
-      this.hostNamePatternPopup(this.hostNameArr);
-      return false;
-    }
-    if (this.get('inputtedAgainHostNames').length) {
-      var self = this;
-      App.ModalPopup.show({
-        header: 'Warning',
-        onPrimary: function () {
-          self.proceedNext();
-          this.hide();
-        },
-        bodyClass: Ember.View.extend({
-          template: Ember.Handlebars.compile('<p>These hosts are already installed on the cluster and will be ignored:</p><p>' + self.get('inputtedAgainHostNames').join(', ') + '</p><p>Do you want to continue?</p>')
-        })
-      });
-    } else {
-      this.proceedNext();
-    }
-  },
-  /**
-   * check is there a pattern expression in host name textarea
-   * push hosts that match pattern in hostNamesArr
-   */
-  patternExpression: function(){
-    this.isPattern = false;
-    var self = this;
-    var hostNames = [];
-    $.each(this.hostNameArr, function(e,a){
-      var start, end, extra = {0:""};
-      if(/\[\d*\-\d*\]/.test(a)){
-        start=a.match(/\[\d*/);
-        end=a.match(/\-\d*]/);
-
-        start=start[0].substr(1);
-        end=end[0].substr(1);
-
-        if(parseInt(start) <= parseInt(end) && parseInt(start) >= 0){
-          self.isPattern = true;
-
-          if(start[0] == "0" && start.length > 1) {
-            extra = start.match(/0*/);
-          }
-
-          for (var i = parseInt(start); i < parseInt(end) + 1; i++) {
-            hostNames.push(a.replace(/\[\d*\-\d*\]/,extra[0].substring(1,1+extra[0].length-i.toString().length)+i))
-          }
-
-        }else{
-          hostNames.push(a);
-        }
-      }else{
-        hostNames.push(a);
-      }
-    });
-    this.hostNameArr =  hostNames;
-  },
-  /**
-   * launch hosts to bootstrap
-   * and save already registered hosts
-   * @return {Boolean}
-   */
-  proceedNext: function(){
-    if (this.get('manualInstall') === true) {
-      this.manualInstallPopup();
-      return false;
-    }
-
-    var bootStrapData = JSON.stringify({'verbose': true, 'sshKey': this.get('sshKey'), hosts: this.get('hostNameArr')});
-
-    if (App.skipBootstrap) {
-      this.saveHosts();
-      return true;
-    }
-
-    var requestId = App.router.get(this.get('content.controllerName')).launchBootstrap(bootStrapData);
-    if (requestId == '0') {
-      var controller = App.router.get(App.clusterStatus.wizardControllerName);
-      controller.registerErrPopup('Information', 'Host Registration is currently in progress.  Please try again later.');
-    } else if (requestId) {
-      this.set('content.installOptions.bootRequestId', requestId);
-      this.saveHosts();
-    }
-  },
-  /**
-   * show popup with hosts generated by pattern
-   * @param hostNames
-   */
-  hostNamePatternPopup: function (hostNames) {
-    var self = this;
-    App.ModalPopup.show({
-      header: Em.I18n.t('installer.step2.hostName.pattern.header'),
-      onPrimary: function () {
-        self.proceedNext();
-        this.hide();
-      },
-      bodyClass: Ember.View.extend({
-        template: Ember.Handlebars.compile(['{{#each host in view.hostNames}}<p>{{host}}</p>{{/each}}'].join('\n')),
-        hostNames: hostNames
-      })
-    });
-  },
-  /**
-   * show notify that installation is manual
-   * save hosts
-   */
-  manualInstallPopup: function () {
-    var self = this;
-    App.ModalPopup.show({
-      header: Em.I18n.t('installer.step2.manualInstall.popup.header'),
-      onPrimary: function () {
-        this.hide();
-        self.saveHosts();
-      },
-      bodyClass: Ember.View.extend({
-        templateName: require('templates/wizard/step2ManualInstallPopup')
-      })
-    });
-  },
-
-  isSubmitDisabled: function () {
-    return (this.get('hostsError') || this.get('sshKeyError'));
-  }.property('hostsError', 'sshKeyError'),
-
-  saveHosts: function(){
-    this.set('content.hosts', this.getHostInfo());
-    App.router.send('next');
-  }
-
-});
diff --git a/branch-1.2/ambari-web/app/controllers/wizard/step3_controller.js b/branch-1.2/ambari-web/app/controllers/wizard/step3_controller.js
deleted file mode 100644
index d63df1f..0000000
--- a/branch-1.2/ambari-web/app/controllers/wizard/step3_controller.js
+++ /dev/null
@@ -1,950 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.WizardStep3Controller = Em.Controller.extend({
-  name: 'wizardStep3Controller',
-  hosts: [],
-  content: [],
-  bootHosts: [],
-  registrationStartedAt: null,
-  registrationTimeoutSecs: 120,
-  stopBootstrap: false,
-  isSubmitDisabled: true,
-
-  categoryObject: Em.Object.extend({
-    hostsCount: function () {
-      var category = this;
-      var hosts = this.get('controller.hosts').filter(function(_host) {
-        if (_host.get('bootStatus') == category.get('hostsBootStatus')) {
-          return true;
-        } else if (_host.get('bootStatus') == 'DONE' && category.get('hostsBootStatus') == 'REGISTERING') {
-          return true;
-        } else {
-          return false;
-        }
-      }, this);
-      return hosts.get('length');
-    }.property('controller.hosts.@each.bootStatus'), // 'hosts.@each.bootStatus'
-    label: function () {
-      return "%@ (%@)".fmt(this.get('value'), this.get('hostsCount'));
-    }.property('value', 'hostsCount')
-  }),
-  getCategory: function(field, value){
-    return this.get('categories').find(function(item){
-      return item.get(field) == value;
-    });
-  },
-
-  categories: function () {
-    var self = this;
-    self.categoryObject.reopen({
-      controller: self,
-      isActive: function(){
-        return this.get('controller.category') == this;
-      }.property('controller.category'),
-      itemClass: function(){
-        return this.get('isActive') ? 'active' : '';
-      }.property('isActive')
-    });
-
-    var categories = [
-      self.categoryObject.create({value: 'All', hostsCount: function () {
-        return this.get('controller.hosts.length');
-      }.property('controller.hosts.length') }),
-      self.categoryObject.create({value: 'Installing', hostsBootStatus: 'RUNNING'}),
-      self.categoryObject.create({value: 'Registering', hostsBootStatus: 'REGISTERING'}),
-      self.categoryObject.create({value: 'Success', hostsBootStatus: 'REGISTERED' }),
-      self.categoryObject.create({value: 'Fail', hostsBootStatus: 'FAILED', last: true })
-    ];
-
-    this.set('category', categories.get('firstObject'));
-
-    return categories;
-  }.property(),
-
-  category: false,
-
-  allChecked: false,
-
-  onAllChecked: function () {
-    var hosts = this.get('visibleHosts');
-    hosts.setEach('isChecked', this.get('allChecked'));
-  }.observes('allChecked'),
-
-  noHostsSelected: function () {
-    return !(this.hosts.someProperty('isChecked', true));
-  }.property('hosts.@each.isChecked'),
-
-  isRetryDisabled: true,
-
-  mockData: require('data/mock/step3_hosts'),
-  mockRetryData: require('data/mock/step3_pollData'),
-
-  navigateStep: function () {
-    this.loadStep();
-    if (this.get('content.installOptions.manualInstall') !== true) {
-      if (!App.db.getBootStatus()) {
-        this.startBootstrap();
-      }
-    } else {
-      this.set('bootHosts', this.get('hosts'));
-      if (App.testMode) {
-        this.getHostInfo();
-        this.get('bootHosts').setEach('bootStatus', 'REGISTERED');
-        this.get('bootHosts').setEach('cpu', '2');
-        this.get('bootHosts').setEach('memory', '2000000');
-        this.set('isSubmitDisabled', false);
-      } else {
-        this.set('registrationStartedAt', null);
-        this.get('bootHosts').setEach('bootStatus', 'DONE');
-        this.startRegistration();
-      }
-    }
-  },
-
-  clearStep: function () {
-    this.set('stopBootstrap', false);
-    this.hosts.clear();
-    this.bootHosts.clear();
-    App.db.setBootStatus(false);
-    this.set('isSubmitDisabled', true);
-    this.set('isRetryDisabled', true);
-  },
-
-  loadStep: function () {
-    console.log("TRACE: Loading step3: Confirm Hosts");
-    this.set('registrationStartedAt', null);
-
-    this.clearStep();
-    var hosts = this.loadHosts();
-    // hosts.setEach('bootStatus', 'RUNNING');
-    this.renderHosts(hosts);
-  },
-
-  /* Loads the hostinfo from localStorage on the insertion of view. It's being called from view */
-  loadHosts: function () {
-    var hostInfo = this.get('content.hosts');
-    var hosts = new Ember.Set();
-    for (var index in hostInfo) {
-      hosts.add(hostInfo[index]);
-      console.log("TRACE: host name is: " + hostInfo[index].name);
-    }
-    return hosts;
-  },
-
-  /* Renders the set of passed hosts */
-  renderHosts: function (hostsInfo) {
-    var self = this;
-    hostsInfo.forEach(function (_hostInfo) {
-      var hostInfo = App.HostInfo.create({
-        name: _hostInfo.name,
-        bootStatus: _hostInfo.bootStatus,
-        isChecked: false
-      });
-
-      console.log('pushing ' + hostInfo.name);
-      self.hosts.pushObject(hostInfo);
-    });
-  },
-
-  /**
-   * Parses and updates the content based on bootstrap API response.
-   * Returns true if polling should continue (some hosts are in "RUNNING" state); false otherwise
-   */
-  parseHostInfo: function (hostsStatusFromServer) {
-    hostsStatusFromServer.forEach(function (_hostStatus) {
-      var host = this.get('bootHosts').findProperty('name', _hostStatus.hostName);
-      // check if hostname extracted from REST API data matches any hostname in content
-      // also, make sure that bootStatus modified by isHostsRegistered call does not get overwritten
-      // since these calls are being made in parallel
-      if (host && !['REGISTERED', 'REGISTERING'].contains(host.get('bootStatus'))) {
-        host.set('bootStatus', _hostStatus.status);
-        host.set('bootLog', _hostStatus.log);
-      }
-    }, this);
-    // if the data rendered by REST API has hosts in "RUNNING" state, polling will continue
-    return this.get('bootHosts').length != 0 && this.get('bootHosts').someProperty('bootStatus', 'RUNNING');
-  },
-
-  /* Returns the current set of visible hosts on view (All, Succeeded, Failed) */
-  visibleHosts: function () {
-    var self = this;
-    if (this.get('category.hostsBootStatus')) {
-      return this.hosts.filterProperty('bootStatus', self.get('category.hostsBootStatus'));
-    } else { // if (this.get('category') === 'All Hosts')
-      return this.hosts;
-    }
-  }.property('category', 'hosts.@each.bootStatus'),
-
-  removeHosts: function (hosts) {
-    var self = this;
-
-    App.ModalPopup.show({
-      header: Em.I18n.t('installer.step3.hosts.remove.popup.header'),
-      onPrimary: function () {
-        App.router.send('removeHosts', hosts);
-        self.hosts.removeObjects(hosts);
-        if (!self.hosts.length) {
-          self.set('isSubmitDisabled', true);
-        }
-        this.hide();
-      },
-      body: Em.I18n.t('installer.step3.hosts.remove.popup.body')
-    });
-
-  },
-
-  /* Removes a single element on the trash icon click. Called from View */
-  removeHost: function (hostInfo) {
-    this.removeHosts([hostInfo]);
-  },
-
-  removeSelectedHosts: function () {
-    if (!this.get('noHostsSelected')) {
-      var selectedHosts = this.get('visibleHosts').filterProperty('isChecked', true);
-      selectedHosts.forEach(function (_hostInfo) {
-        console.log('Removing:  ' + _hostInfo.name);
-      });
-      this.removeHosts(selectedHosts);
-    }
-  },
-
-  retryHost: function (hostInfo) {
-    this.retryHosts([hostInfo]);
-  },
-
-  retryHosts: function (hosts) {
-    var bootStrapData = JSON.stringify({'verbose': true, 'sshKey': this.get('content.installOptions.sshKey'), hosts: hosts.mapProperty('name')});
-    this.numPolls = 0;
-    if (this.get('content.installOptions.manualInstall') !== true) {
-      var requestId = App.router.get('installerController').launchBootstrap(bootStrapData);
-      this.set('content.installOptions.bootRequestId', requestId);
-      this.set('registrationStartedAt', null);
-      this.doBootstrap();
-    } else {
-      this.set('registrationStartedAt', null);
-      this.get('bootHosts').setEach('bootStatus', 'DONE');
-      this.startRegistration();
-    }
-  },
-
-  retrySelectedHosts: function () {
-    //to display all hosts
-    this.set('category', 'All');
-    if (!this.get('isRetryDisabled')) {
-      this.set('isRetryDisabled', true);
-      var selectedHosts = this.get('bootHosts').filterProperty('bootStatus', 'FAILED');
-      selectedHosts.forEach(function (_host) {
-        _host.set('bootStatus', 'RUNNING');
-        _host.set('bootLog', 'Retrying ...');
-      }, this);
-      this.retryHosts(selectedHosts);
-    }
-  },
-
-  numPolls: 0,
-
-  startBootstrap: function () {
-    //this.set('isSubmitDisabled', true);    //TODO: uncomment after actual hookup
-    this.numPolls = 0;
-    this.set('registrationStartedAt', null);
-    this.set('bootHosts', this.get('hosts'));
-    this.get('bootHosts').setEach('bootStatus', 'PENDING');
-    this.doBootstrap();
-  },
-
-  isInstallInProgress: function(){
-    var bootStatuses = this.get('bootHosts').getEach('bootStatus');
-    if(bootStatuses.length &&
-      (bootStatuses.contains('REGISTERING') ||
-        bootStatuses.contains('DONE') ||
-        bootStatuses.contains('RUNNING') ||
-        bootStatuses.contains('PENDING'))){
-      return true;
-    }
-    return false;
-  }.property('bootHosts.@each.bootStatus'),
-
-  disablePreviousSteps: function(){
-    if(this.get('isInstallInProgress')){
-      App.router.get('installerController').setLowerStepsDisable(3);
-      this.set('isSubmitDisabled', true);
-    } else {
-      App.router.get('installerController.isStepDisabled').findProperty('step', 1).set('value', false);
-      App.router.get('installerController.isStepDisabled').findProperty('step', 2).set('value', false);
-    }
-  }.observes('isInstallInProgress'),
-
-  doBootstrap: function () {
-    if (this.get('stopBootstrap')) {
-      return;
-    }
-    this.numPolls++;
-    var self = this;
-    var url = App.testMode ? '/data/wizard/bootstrap/poll_' + this.numPolls + '.json' : App.apiPrefix + '/bootstrap/' + this.get('content.installOptions.bootRequestId');
-    $.ajax({
-      type: 'GET',
-      url: url,
-      timeout: App.timeout,
-      success: function (data) {
-        if (data.hostsStatus !== null) {
-          // in case of bootstrapping just one host, the server returns an object rather than an array, so
-          // force into an array
-          if (!(data.hostsStatus instanceof Array)) {
-            data.hostsStatus = [ data.hostsStatus ];
-          }
-          console.log("TRACE: In success function for the GET bootstrap call");
-          var keepPolling = self.parseHostInfo(data.hostsStatus);
-
-          // Single host : if the only hostname is invalid (data.status == 'ERROR')
-          // Multiple hosts : if one or more hostnames are invalid
-          // following check will mark the bootStatus as 'FAILED' for the invalid hostname
-          if (data.status == 'ERROR' || data.hostsStatus.length != self.get('bootHosts').length) {
-
-            var hosts = self.get('bootHosts');
-
-            for (var i = 0; i < hosts.length; i++) {
-
-              var isValidHost = data.hostsStatus.someProperty('hostName', hosts[i].get('name'));
-              if(hosts[i].get('bootStatus') !== 'REGISTERED'){
-                if (!isValidHost) {
-                  hosts[i].set('bootStatus', 'FAILED');
-                  hosts[i].set('bootLog', 'Registration with the server failed.');
-                }
-              }
-            }
-          }
-
-          if (data.hostsStatus.someProperty('status', 'DONE') || data.hostsStatus.someProperty('status', 'FAILED')) {
-            // kicking off registration polls after at least one host has succeeded
-            self.startRegistration();
-          }
-          if (keepPolling) {
-            window.setTimeout(function () {
-              self.doBootstrap()
-            }, 3000);
-            return;
-          }
-        }
-      },
-      statusCode: require('data/statusCodes')
-    }).retry({times: App.maxRetries, timeout: App.timeout}).then(null,
-      function () {
-        App.showReloadPopup();
-        console.log('Bootstrap failed');
-      }
-    );
-
-  },
-
-  /*
-   stopBootstrap: function () {
-   console.log('stopBootstrap() called');
-   Ember.run.later(this, function () {
-   this.startRegistration();
-   }, 1000);
-   },
-   */
-
-  startRegistration: function () {
-    if (this.get('registrationStartedAt') == null) {
-      this.set('registrationStartedAt', new Date().getTime());
-      console.log('registration started at ' + this.get('registrationStartedAt'));
-      this.isHostsRegistered();
-    }
-  },
-
-  isHostsRegistered: function () {
-    if (this.get('stopBootstrap')) {
-      return;
-    }
-    var self = this;
-    var hosts = this.get('bootHosts');
-    var url = App.testMode ? '/data/wizard/bootstrap/single_host_registration.json' : App.apiPrefix + '/hosts';
-
-    $.ajax({
-      type: 'GET',
-      url: url,
-      timeout: App.timeout,
-      success: function (data) {
-        console.log('registration attempt...');
-        var jsonData = App.testMode ? data : jQuery.parseJSON(data);
-        if (!jsonData) {
-          console.log("Error: jsonData is null");
-          return;
-        }
-
-        // keep polling until all hosts have registered/failed, or registrationTimeout seconds after the last host finished bootstrapping
-        var stopPolling = true;
-        hosts.forEach(function (_host, index) {
-          // Change name of first host for test mode.
-          if (App.testMode) {
-            if (index == 0) {
-              _host.set('name', 'localhost.localdomain');
-            }
-          }
-          // actions to take depending on the host's current bootStatus
-          // RUNNING - bootstrap is running; leave it alone
-          // DONE - bootstrap is done; transition to REGISTERING
-          // REGISTERING - bootstrap is done but has not registered; transition to REGISTERED if host found in polling API result
-          // REGISTERED - bootstrap and registration is done; leave it alone
-          // FAILED - either bootstrap or registration failed; leave it alone
-          console.log(_host.name + ' bootStatus=' + _host.get('bootStatus'));
-          switch (_host.get('bootStatus')) {
-            case 'DONE':
-              _host.set('bootStatus', 'REGISTERING');
-              _host.set('bootLog', (_host.get('bootLog') != null ? _host.get('bootLog') : '') + '\nRegistering with the server...');
-              // update registration timestamp so that the timeout is computed from the last host that finished bootstrapping
-              self.set('registrationStartedAt', new Date().getTime());
-              stopPolling = false;
-              break;
-            case 'REGISTERING':
-              if (jsonData.items.someProperty('Hosts.host_name', _host.name)) {
-                console.log(_host.name + ' has been registered');
-                _host.set('bootStatus', 'REGISTERED');
-                _host.set('bootLog', (_host.get('bootLog') != null ? _host.get('bootLog') : '') + '\nRegistration with the server succeeded.');
-              } else {
-                console.log(_host.name + ' is registering...');
-                stopPolling = false;
-              }
-              break;
-            case 'RUNNING':
-              stopPolling = false;
-              break;
-            case 'REGISTERED':
-            case 'FAILED':
-            default:
-              break;
-          }
-        }, this);
-
-        if (stopPolling) {
-          self.getHostInfo();
-        } else if (hosts.someProperty('bootStatus', 'RUNNING') || new Date().getTime() - self.get('registrationStartedAt') < self.get('registrationTimeoutSecs') * 1000) {
-          // we want to keep polling for registration status if any of the hosts are still bootstrapping (so we check for RUNNING).
-          window.setTimeout(function () {
-            self.isHostsRegistered();
-          }, 3000);
-        } else {
-          // registration timed out.  mark all REGISTERING hosts to FAILED
-          console.log('registration timed out');
-          hosts.filterProperty('bootStatus', 'REGISTERING').forEach(function (_host) {
-            _host.set('bootStatus', 'FAILED');
-            _host.set('bootLog', (_host.get('bootLog') != null ? _host.get('bootLog') : '') + '\nRegistration with the server failed.');
-          });
-          self.getHostInfo();
-        }
-      },
-      statusCode: require('data/statusCodes')
-    }).retry({times: App.maxRetries, timeout: App.timeout}).then(null, function () {
-        App.showReloadPopup();
-        console.log('Error: Getting registered host information from the server');
-      });
-  },
-
-  registerErrPopup: function (header, message) {
-    App.ModalPopup.show({
-      header: header,
-      secondary: false,
-      onPrimary: function () {
-        this.hide();
-      },
-      bodyClass: Ember.View.extend({
-        template: Ember.Handlebars.compile(['<p>{{view.message}}</p>'].join('\n')),
-        message: message
-      })
-    });
-  },
-
-  /**
-   * Get disk info and cpu count of booted hosts from server
-   */
-  getHostInfo: function () {
-    var self = this;
-    var kbPerGb = 1024;
-    var hosts = this.get('bootHosts');
-    var url = App.testMode ? '/data/wizard/bootstrap/two_hosts_information.json' : App.apiPrefix + '/hosts?fields=Hosts/total_mem,Hosts/cpu_count,Hosts/disk_info,Hosts/last_agent_env';
-    var method = 'GET';
-    $.ajax({
-      type: 'GET',
-      url: url,
-      contentType: 'application/json',
-      timeout: App.timeout,
-      success: function (data) {
-        var jsonData = (App.testMode) ? data : jQuery.parseJSON(data);
-        self.parseWarnings(jsonData);
-        hosts.forEach(function (_host) {
-          var host = (App.testMode) ? jsonData.items[0] : jsonData.items.findProperty('Hosts.host_name', _host.name);
-          if (App.skipBootstrap) {
-            _host.cpu = 2;
-            _host.memory = ((parseInt(2000000))).toFixed(2);
-            _host.disk_info = [{"mountpoint": "/", "type":"ext4"},{"mountpoint": "/grid/0", "type":"ext4"}, {"mountpoint": "/grid/1", "type":"ext4"}, {"mountpoint": "/grid/2", "type":"ext4"}];
-          } else if (host) {
-            _host.cpu = host.Hosts.cpu_count;
-            _host.memory = ((parseInt(host.Hosts.total_mem))).toFixed(2);
-            _host.disk_info = host.Hosts.disk_info;
-
-            console.log("The value of memory is: " + _host.memory);
-          }
-        });
-        self.set('bootHosts', hosts);
-        console.log("The value of hosts: " + JSON.stringify(hosts));
-        self.stopRegistration();
-      },
-
-      error: function () {
-        console.log('INFO: Getting host information(cpu_count and total_mem) from the server failed');
-        self.registerErrPopup(Em.I18n.t('installer.step3.hostInformation.popup.header'), Em.I18n.t('installer.step3.hostInformation.popup.body'));
-      },
-      statusCode: require('data/statusCodes')
-    });
-  },
-
-  stopRegistration: function () {
-    this.set('isSubmitDisabled', !this.get('bootHosts').someProperty('bootStatus', 'REGISTERED'));
-    this.set('isRetryDisabled', !this.get('bootHosts').someProperty('bootStatus', 'FAILED'));
-  },
-
-  selectCategory: function(event, context){
-    this.set('category', event.context);
-  },
-
-  submit: function () {
-    if (!this.get('isSubmitDisabled')) {
-      this.set('content.hosts', this.get('bootHosts'));
-      App.router.send('next');
-    }
-  },
-
-  hostLogPopup: function (event, context) {
-    var host = event.context;
-
-    App.ModalPopup.show({
-
-      header: Em.I18n.t('installer.step3.hostLog.popup.header').format(host.get('name')),
-      secondary: null,
-
-      onPrimary: function () {
-        this.hide();
-      },
-
-      bodyClass: Ember.View.extend({
-        templateName: require('templates/wizard/step3_host_log_popup'),
-        host: host,
-        didInsertElement: function () {
-          var self = this;
-          var button = $(this.get('element')).find('.textTrigger');
-          button.click(function () {
-            if (self.get('isTextArea')) {
-              $(this).text('click to highlight');
-            } else {
-              $(this).text('press CTRL+C');
-            }
-            self.set('isTextArea', !self.get('isTextArea'));
-          });
-          $(this.get('element')).find('.content-area').mouseenter(
-            function () {
-              var element = $(this);
-              element.css('border', '1px solid #dcdcdc');
-              button.css('visibility', 'visible');
-            }).mouseleave(
-            function () {
-              var element = $(this);
-              element.css('border', 'none');
-              button.css('visibility', 'hidden');
-            })
-        },
-        isTextArea: false,
-        textArea: Em.TextArea.extend({
-          didInsertElement: function () {
-            var element = $(this.get('element'));
-            element.width($(this.get('parentView').get('element')).width() - 10);
-            element.height($(this.get('parentView').get('element')).height());
-            element.select();
-            element.css('resize', 'none');
-          },
-          readOnly: true,
-          value: function () {
-            return this.get('content');
-          }.property('content')
-        })
-      })
-    });
-  },
-  /**
-   * check warnings from server and put it in parsing
-    */
-  rerunChecks: function(){
-    var self = this;
-    var url = App.testMode ? '/data/wizard/bootstrap/two_hosts_information.json' : App.apiPrefix + '/hosts?fields=Hosts/last_agent_env';
-    var currentProgress = 0;
-    var interval = setInterval(function(){
-      self.set('checksUpdateProgress', Math.ceil((++currentProgress/60)*100))
-    }, 1000);
-    setTimeout(function(){
-      clearInterval(interval);
-      $.ajax({
-        type: 'GET',
-        url: url,
-        contentType: 'application/json',
-        timeout: App.timeout,
-        success: function (data) {
-          var jsonData = (App.testMode) ? data : jQuery.parseJSON(data);
-          self.set('checksUpdateProgress', 100);
-          self.set('checksUpdateStatus', 'SUCCESS');
-          self.parseWarnings(jsonData);
-        },
-        error: function () {
-          self.set('checksUpdateProgress', 100);
-          self.set('checksUpdateStatus', 'FAILED');
-          console.log('INFO: Getting host information(last_agent_env) from the server failed');
-        },
-        statusCode: require('data/statusCodes')
-      })
-    }, this.get('warningsTimeInterval'));
-
-  },
-  warnings: [],
-  warningsTimeInterval: 60000,
-  /**
-   * check are hosts have any warnings
-   */
-  isHostHaveWarnings: function(){
-    var isWarning = false;
-    this.get('warnings').forEach(function(warning){
-      if(!isWarning && (warning.directoriesFiles.someProperty('isWarn', true) ||
-      warning.packages.someProperty('isWarn', true) ||
-      warning.processes.someProperty('isWarn', true))){
-        isWarning = true;
-      }
-    }, this);
-    return isWarning;
-  }.property('warnings'),
-  isWarningsBoxVisible: function(){
-    return (App.testMode) ? true : !this.get('isSubmitDisabled');
-  }.property('isSubmitDisabled'),
-  checksUpdateProgress:0,
-  checksUpdateStatus: null,
-  /**
-   * filter data for warnings parse
-   * is data from host in bootStrap
-   * @param data
-   * @return {Object}
-   */
-  filterBootHosts: function(data){
-    var bootHosts = this.get('bootHosts');
-    var filteredData = {
-      href: data.href,
-      items: []
-    };
-    bootHosts.forEach(function(bootHost){
-      data.items.forEach(function(host){
-        if(host.Hosts.host_name == bootHost.get('name')){
-          filteredData.items.push(host);
-        }
-      })
-    })
-    return filteredData;
-  },
-  /**
-   * parse warnings data for each host and total
-   * @param data
-   */
-  parseWarnings: function(data){
-    data = this.filterBootHosts(data);
-    var warnings = [];
-    var totalWarnings = {
-      hostName: 'All Hosts',
-      directoriesFiles: [],
-      packages: [],
-      processes: []
-    }
-    //alphabetical sorting
-    var sortingFunc = function(a, b){
-      var a1= a.name, b1= b.name;
-      if(a1== b1) return 0;
-      return a1> b1? 1: -1;
-    }
-    data.items.forEach(function(host){
-      var warningsByHost = {
-        hostName: host.Hosts.host_name,
-        directoriesFiles: [],
-        packages: [],
-        processes: []
-      };
-
-      //render all directories and files for each host
-      if (!host.Hosts.last_agent_env) {
-        // in some unusual circumstances when last_agent_env is not available from the host,
-        // skip the host and proceed to process the rest of the hosts.
-        console.log("last_agent_env is missing for " + host.Hosts.host_name + ".  Skipping host check.");
-        return;
-      }
-      host.Hosts.last_agent_env.paths.forEach(function(path){
-        var parsedPath = {
-          name: path.name,
-          isWarn: (path.type == 'not_exist') ? false : true,
-          message: (path.type == 'not_exist') ? 'OK' : 'WARN: already exists on host'
-        }
-        warningsByHost.directoriesFiles.push(parsedPath);
-        // parsing total warnings
-        if(!totalWarnings.directoriesFiles.someProperty('name', parsedPath.name)){
-          totalWarnings.directoriesFiles.push({
-            name:parsedPath.name,
-            isWarn: parsedPath.isWarn,
-            message: (parsedPath.isWarn) ? 'WARN: already exists on 1 host': 'OK',
-            warnCount: (parsedPath.isWarn) ? 1 : 0
-          })
-        } else if(parsedPath.isWarn){
-            totalWarnings.directoriesFiles.forEach(function(item, index){
-              if(item.name == parsedPath.name){
-                totalWarnings.directoriesFiles[index].isWarn = true;
-                totalWarnings.directoriesFiles[index].warnCount++;
-                totalWarnings.directoriesFiles[index].message = 'WARN: already exists on '+ totalWarnings.directoriesFiles[index].warnCount +' hosts';
-              }
-            });
-        }
-      }, this);
-
-      //render all packages for each host
-      host.Hosts.last_agent_env.rpms.forEach(function(_package){
-        var parsedPackage = {
-          name: _package.name,
-          isWarn: _package.installed,
-          message: (_package.installed) ? 'WARN: already installed on host' : 'OK'
-        }
-        warningsByHost.packages.push(parsedPackage);
-        // parsing total warnings
-        if(!totalWarnings.packages.someProperty('name', parsedPackage.name)){
-          totalWarnings.packages.push({
-            name:parsedPackage.name,
-            isWarn: parsedPackage.isWarn,
-            message: (parsedPackage.isWarn) ? 'WARN: already exists on 1 host': 'OK',
-            warnCount: (parsedPackage.isWarn) ? 1 : 0
-          })
-        } else if(parsedPackage.isWarn){
-          totalWarnings.packages.forEach(function(item, index){
-            if(item.name == parsedPackage.name){
-              totalWarnings.packages[index].isWarn = true;
-              totalWarnings.packages[index].warnCount++;
-              totalWarnings.packages[index].message = 'WARN: already exists on '+ totalWarnings.packages[index].warnCount +' hosts';
-            }
-          });
-        }
-      }, this);
-
-      // render all process for each host
-      host.Hosts.last_agent_env.javaProcs.forEach(function(process){
-          var parsedProcess = {
-            user: process.user,
-            isWarn: process.hadoop,
-            pid: process.pid,
-            command: process.command,
-            shortCommand: (process.command.substr(0, 15)+'...'),
-            message: (process.hadoop) ? 'WARN: running on host' : 'OK'
-          }
-          warningsByHost.processes.push(parsedProcess);
-          // parsing total warnings
-          if(!totalWarnings.processes.someProperty('pid', parsedProcess.name)){
-            totalWarnings.processes.push({
-              user: process.user,
-              pid: process.pid,
-              command: process.command,
-              shortCommand: (process.command.substr(0, 15)+'...'),
-              isWarn: parsedProcess.isWarn,
-              message: (parsedProcess.isWarn) ? 'WARN: running on 1 host': 'OK',
-              warnCount: (parsedProcess.isWarn) ? 1 : 0
-            })
-          } else if(parsedProcess.isWarn){
-            totalWarnings.processes.forEach(function(item, index){
-              if(item.pid == parsedProcess.pid){
-                totalWarnings.processes[index].isWarn = true;
-                totalWarnings.processes[index].warnCount++;
-                totalWarnings.processes[index].message = 'WARN: running on '+ totalWarnings.processes[index].warnCount +' hosts';
-              }
-            });
-          }
-      }, this);
-      warningsByHost.directoriesFiles.sort(sortingFunc);
-      warningsByHost.packages.sort(sortingFunc);
-      warnings.push(warningsByHost);
-    }, this);
-
-    totalWarnings.directoriesFiles.sort(sortingFunc);
-    totalWarnings.packages.sort(sortingFunc);
-    warnings.unshift(totalWarnings);
-    this.set('warnings', warnings);
-  },
-  /**
-   * open popup that contain hosts' warnings
-   * @param event
-   */
-  hostWarningsPopup: function(event){
-    var self = this;
-    App.ModalPopup.show({
-
-      header: Em.I18n.t('installer.step3.warnings.popup.header'),
-      secondary: 'Rerun Checks',
-      primary: 'Close',
-      onPrimary: function () {
-        self.set('checksUpdateStatus', null);
-        this.hide();
-      },
-      onClose: function(){
-        self.set('checksUpdateStatus', null);
-        this.hide();
-      },
-      onSecondary: function() {
-        self.rerunChecks();
-      },
-
-      footerClass: Ember.View.extend({
-        template: Ember.Handlebars.compile([
-          '<div class="update-progress pull-left">',
-          '{{#if view.isUpdateInProgress}}',
-          '<div class="progress-info active progress">',
-          '<div class="bar" {{bindAttr style="view.progressWidth"}}></div></div>',
-          '{{else}}<label {{bindAttr class="view.updateStatusClass"}}>{{view.updateStatus}}</label>',
-          '{{/if}}</div>',
-          '{{#if view.parentView.secondary}}<button type="button" class="btn btn-info" {{bindAttr disabled="view.isUpdateInProgress"}} {{action onSecondary target="view.parentView"}}><i class="icon-repeat"></i>&nbsp;{{view.parentView.secondary}}</button>{{/if}}',
-          '{{#if view.parentView.primary}}<button type="button" class="btn" {{action onPrimary target="view.parentView"}}>{{view.parentView.primary}}</button>{{/if}}'
-        ].join('')),
-        classNames: ['modal-footer', 'host-checks-update'],
-        progressWidth: function(){
-          return 'width:'+App.router.get('wizardStep3Controller.checksUpdateProgress')+'%';
-        }.property('App.router.wizardStep3Controller.checksUpdateProgress'),
-        isUpdateInProgress: function(){
-          if((App.router.get('wizardStep3Controller.checksUpdateProgress') > 0) &&
-             (App.router.get('wizardStep3Controller.checksUpdateProgress') < 100)){
-            return true;
-          }
-        }.property('App.router.wizardStep3Controller.checksUpdateProgress'),
-        updateStatusClass:function(){
-          var status = App.router.get('wizardStep3Controller.checksUpdateStatus');
-          if(status === 'SUCCESS'){
-            return 'text-success';
-          } else if(status === 'FAILED'){
-            return 'text-error';
-          } else {
-            return null;
-          }
-        }.property('App.router.wizardStep3Controller.checksUpdateStatus'),
-        updateStatus:function(){
-          var status = App.router.get('wizardStep3Controller.checksUpdateStatus');
-          if(status === 'SUCCESS'){
-            return Em.I18n.t('installer.step3.warnings.updateChecks.success');
-          } else if(status === 'FAILED'){
-            return Em.I18n.t('installer.step3.warnings.updateChecks.failed');
-          } else {
-            return null;
-          }
-        }.property('App.router.wizardStep3Controller.checksUpdateStatus')
-      }),
-
-      bodyClass: Ember.View.extend({
-        templateName: require('templates/wizard/step3_host_warnings_popup'),
-        warnings: function(){
-          return App.router.get('wizardStep3Controller.warnings');
-        }.property('App.router.wizardStep3Controller.warnings'),
-        categories: function(){
-          var categories = this.get('warnings').getEach('hostName');
-          return categories;
-        }.property('warnings'),
-        category: 'All Hosts',
-        content: function(){
-          return this.get('warnings').findProperty('hostName', this.get('category'));
-        }.property('category', 'warnings'),
-        /**
-         * generate detailed content to show it in new window
-         */
-        contentInDetails: function(){
-          var content = this.get('content');
-          var newContent = '';
-          if(content.hostName == 'All Hosts'){
-            newContent += '<h4>Warnings across all hosts</h4>';
-          } else {
-            newContent += '<h4>Warnings on ' + content.hostName + '</h4>';
-          }
-          newContent += '<div>DIRECTORIES AND FILES</div><div>';
-          content.directoriesFiles.filterProperty('isWarn', true).forEach(function(path){
-              newContent += path.name + '&nbsp;'
-          });
-          if(content.directoriesFiles.filterProperty('isWarn', true).length == 0){
-            newContent += 'No warnings';
-          }
-          newContent += '</div><br/><div>PACKAGES</div><div>';
-          content.packages.filterProperty('isWarn', true).forEach(function(_package){
-              newContent += _package.name + '&nbsp;'
-          });
-          if(content.packages.filterProperty('isWarn', true).length == 0){
-            newContent += 'No warnings';
-          }
-          newContent += '</div><br/><div>PROCESSES</div><div>';
-          content.processes.filterProperty('isWarn', true).forEach(function(process, index){
-              newContent += '(' + content.hostName + ',' + process.pid + ',' + process.user + ')';
-              newContent += (index != (content.processes.filterProperty('isWarn', true).length-1)) ? ',' : '';
-          })
-          if(content.processes.filterProperty('isWarn', true).length == 0){
-            newContent += 'No warnings';
-          }
-          return newContent;
-        }.property('content'),
-        /**
-         * open new browser tab with detailed content
-         */
-        openWarningsInDialog: function(){
-          var newWindow = window.open('', this.get('category')+' warnings');
-          var newDocument = newWindow.document;
-          newDocument.write(this.get('contentInDetails'));
-          newWindow.focus();
-        }
-      })
-    })
-  },
-
-  // TODO: dummy button. Remove this after the hook up with actual REST API.
-  mockBtn: function () {
-    this.set('isSubmitDisabled', false);
-    this.hosts.clear();
-    var hostInfo = this.mockData;
-    this.renderHosts(hostInfo);
-  },
-
-  pollBtn: function () {
-    if (this.get('isSubmitDisabled')) {
-      return;
-    }
-    var hosts = this.get('visibleHosts');
-    var selectedHosts = hosts.filterProperty('isChecked', true);
-    selectedHosts.forEach(function (_host) {
-      console.log('Retrying:  ' + _host.name);
-    });
-
-    var mockHosts = this.mockRetryData;
-    mockHosts.forEach(function (_host) {
-      console.log('Retrying:  ' + _host.name);
-    });
-    if (this.parseHostInfo(mockHosts, selectedHosts)) {
-      // this.saveHostInfoToDb();
-    }
-  }
-
-});
-
diff --git a/branch-1.2/ambari-web/app/controllers/wizard/step4_controller.js b/branch-1.2/ambari-web/app/controllers/wizard/step4_controller.js
deleted file mode 100644
index 16f57a0..0000000
--- a/branch-1.2/ambari-web/app/controllers/wizard/step4_controller.js
+++ /dev/null
@@ -1,144 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.WizardStep4Controller = Em.ArrayController.extend({
-
-  name: 'wizardStep4Controller',
-  content: [],
-
-  isSubmitDisabled:function(){
-    return this.filterProperty('isSelected', true).filterProperty('isInstalled', false).length === 0;
-  }.property("@each.isSelected"),
-
-  /**
-   * Check whether all properties are selected
-   */
-  isAll: function () {
-    return this.everyProperty('isSelected', true);
-  }.property('@each.isSelected'),
-
-  /**
-   * Check whether none properties(minimum) are selected
-   */
-  isMinimum: function () {
-    return this.filterProperty('isDisabled', false).everyProperty('isSelected', false);
-  }.property('@each.isSelected'),
-
-  /**
-   * Update hidden services. Make them to have the same status as master ones.
-   */
-  checkDependencies: function () {
-    var hbase = this.findProperty('serviceName', 'HBASE');
-    var zookeeper = this.findProperty('serviceName', 'ZOOKEEPER');
-    var hive = this.findProperty('serviceName', 'HIVE');
-    var hcatalog = this.findProperty('serviceName', 'HCATALOG');
-    var webhcat = this.findProperty('serviceName', 'WEBHCAT');
-
-    // prevent against getting error when not all elements have been loaded yet
-    if (hbase && zookeeper && hive && hcatalog && webhcat) {
-      zookeeper.set('isSelected', hbase.get('isSelected') || hive.get('isSelected'));
-      hcatalog.set('isSelected', hive.get('isSelected'));
-      webhcat.set('isSelected', hive.get('isSelected'));
-    }
-  }.observes('@each.isSelected'),
-
-  /**
-   * Onclick handler for <code>select all</code> link
-   */
-  selectAll: function () {
-    this.setEach('isSelected', true);
-  },
-
-  /**
-   * onclick handler for <code>select minimum</code> link
-   */
-  selectMinimum: function () {
-    this.filterProperty('isDisabled', false).setEach('isSelected', false);
-  },
-
-  /**
-   * Check whether we should turn on <code>MapReduce</code> service
-   * @return {Boolean}
-   */
-  needToAddMapReduce: function () {
-    if (this.findProperty('serviceName', 'MAPREDUCE').get('isSelected') === false) {
-      var mapreduceDependentServices = this.filter(function (item) {
-        return ['PIG', 'OOZIE', 'HIVE'].contains(item.get('serviceName')) && item.get('isSelected', true);
-      });
-      return (mapreduceDependentServices.get('length') > 0);
-    }
-
-    return false;
-  },
-
-  /**
-   * Check do we have any monitoring service turned on
-   * @return {Boolean}
-   */
-  gangliaOrNagiosNotSelected: function () {
-    return (this.findProperty('serviceName', 'GANGLIA').get('isSelected') === false || this.findProperty('serviceName', 'NAGIOS').get('isSelected') === false);
-  },
-
-  /**
-   * Check whether user turned on monitoring service and go to next step
-   */
-  validateMonitoring: function () {
-    if (this.gangliaOrNagiosNotSelected()) {
-      App.ModalPopup.show({
-        header: Em.I18n.t('installer.step4.monitoringCheck.popup.header'),
-        body: Em.I18n.t('installer.step4.monitoringCheck.popup.body'),
-        onPrimary: function () {
-          this.hide();
-          App.router.send('next');
-        },
-        onSecondary: function () {
-          this.hide();
-        }
-      });
-    } else {
-      App.router.send('next');
-    }
-  },
-
-  /**
-   * Onclick handler for <code>Next</code> button
-   */
-  submit: function () {
-    if(!this.get("isSubmitDisabled")){
-      var self = this;
-      if (this.needToAddMapReduce()) {
-        App.ModalPopup.show({
-          header: Em.I18n.t('installer.step4.mapreduceCheck.popup.header'),
-          body: Em.I18n.t('installer.step4.mapreduceCheck.popup.body'),
-          onPrimary: function () {
-            self.findProperty('serviceName', 'MAPREDUCE').set('isSelected', true);
-            this.hide();
-            self.validateMonitoring();
-          },
-          onSecondary: function () {
-            this.hide();
-          }
-        });
-      } else {
-        self.validateMonitoring();
-      }
-    }
-  }
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/controllers/wizard/step5_controller.js b/branch-1.2/ambari-web/app/controllers/wizard/step5_controller.js
deleted file mode 100644
index 305bf27..0000000
--- a/branch-1.2/ambari-web/app/controllers/wizard/step5_controller.js
+++ /dev/null
@@ -1,618 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.WizardStep5Controller = Em.Controller.extend({
-
-  name:"wizardStep5Controller",
-
-  hosts:[],
-
-  selectedServices:[],
-  selectedServicesMasters:[],
-  zId:0,
-
-  hasHiveServer: function () {
-    return this.get('selectedServicesMasters').findProperty('component_name', 'HIVE_SERVER');
-  }.property('selectedServicesMasters'),
-
-  updateHiveCoHosts: function () {
-    var hiveServer =  this.get('selectedServicesMasters').findProperty('component_name', 'HIVE_SERVER');
-    var hiveMetastore = this.get('selectedServicesMasters').findProperty('component_name', 'HIVE_METASTORE');
-    var webHCatServer = this.get('selectedServicesMasters').findProperty('component_name', 'WEBHCAT_SERVER');
-    if (hiveServer && hiveMetastore && webHCatServer) {
-      this.get('selectedServicesMasters').findProperty('component_name', 'HIVE_METASTORE').set('selectedHost', hiveServer.get('selectedHost'));
-      this.get('selectedServicesMasters').findProperty('component_name', 'WEBHCAT_SERVER').set('selectedHost', hiveServer.get('selectedHost'));
-    }
-  }.observes('selectedServicesMasters.@each.selectedHost'),
-
-  components:require('data/service_components'),
-
-  clearStep:function () {
-    this.set('hosts', []);
-    this.set('selectedServices', []);
-    this.set('selectedServicesMasters', []);
-    this.set('zId', 0);
-  },
-
-  loadStep:function () {
-    console.log("WizardStep5Controller: Loading step5: Assign Masters");
-    this.clearStep();
-    this.renderHostInfo();
-    this.renderComponents(this.loadComponents());
-
-    if (!this.get("selectedServicesMasters").filterProperty('isInstalled', false).length) {
-      console.log('no master components to add');
-      App.router.send('next');
-    }
-  },
-
-  /**
-   * Load active host list to <code>hosts</code> variable
-   */
-  renderHostInfo:function () {
-
-    var hostInfo = this.get('content.hosts');
-
-    for (var index in hostInfo) {
-      var _host = hostInfo[index];
-      if (_host.bootStatus === 'REGISTERED') {
-        var hostObj = Ember.Object.create({
-          host_name:_host.name,
-
-          cpu:_host.cpu,
-          memory:_host.memory,
-          disk_info:_host.disk_info,
-          host_info:"%@ (%@, %@ cores)".fmt(_host.name, (_host.memory * 1024).bytesToSize(1, 'parseFloat'), _host.cpu)
-
-//          Uncomment to test sorting with random cpu, memory, host_info
-//          cpu:function () {
-//            return parseInt(2 + Math.random() * 4);
-//          }.property(),
-//          memory:function () {
-//            return parseInt((Math.random() * 4000000000) + 4000000000);
-//          }.property(),
-//
-//          host_info:function () {
-//            return "%@ (%@, %@ cores)".fmt(this.get('host_name'), (this.get('memory') * 1024).bytesToSize(1, 'parseFloat'), this.get('cpu'));
-//          }.property('cpu', 'memory')
-
-        });
-
-        this.get("hosts").pushObject(hostObj);
-      }
-    }
-  },
-
-  /**
-   * Load services info to appropriate variable and return masterComponentHosts
-   * @return {Ember.Set}
-   */
-  loadComponents:function () {
-
-    var services = this.get('content.services')
-      .filterProperty('isSelected', true).mapProperty('serviceName'); //list of shown services
-
-    services.forEach(function (item) {
-      this.get("selectedServices").pushObject(Ember.Object.create({service_name:item}));
-    }, this);
-
-    var masterHosts = this.get('content.masterComponentHosts'); //saved to local storadge info
-
-    var resultComponents = new Ember.Set();
-
-    var masterComponents = this.get('components').filterProperty('isMaster', true); //get full list from mock data
-
-    var servicesLength = services.length;
-    for (var index = 0; index < servicesLength; index++) {
-      var componentInfo = masterComponents.filterProperty('service_name', services[index]);
-
-      componentInfo.forEach(function (_componentInfo) {
-        if (_componentInfo.component_name == 'ZOOKEEPER_SERVER') {
-          var savedComponents = masterHosts.filterProperty('component', _componentInfo.component_name);
-          if (savedComponents.length) {
-
-            savedComponents.forEach(function (item) {
-              var zooKeeperHost = {};
-              zooKeeperHost.display_name = _componentInfo.display_name;
-              zooKeeperHost.component_name = _componentInfo.component_name;
-              zooKeeperHost.selectedHost = item.hostName;
-              zooKeeperHost.availableHosts = [];
-              zooKeeperHost.serviceId = services[index];
-              zooKeeperHost.isInstalled = item.isInstalled;
-              resultComponents.add(zooKeeperHost);
-            })
-
-          } else {
-
-            var zooHosts = this.selectHost(_componentInfo.component_name);
-            zooHosts.forEach(function (_host) {
-              var zooKeeperHost = {};
-              zooKeeperHost.display_name = _componentInfo.display_name;
-              zooKeeperHost.component_name = _componentInfo.component_name;
-              zooKeeperHost.selectedHost = _host;
-              zooKeeperHost.availableHosts = [];
-              zooKeeperHost.serviceId = services[index];
-              zooKeeperHost.isInstalled = false;
-              zooKeeperHost.isHiveCoHost = false;
-              resultComponents.add(zooKeeperHost);
-            });
-
-          }
-        } else {
-          var savedComponent = masterHosts.findProperty('component', _componentInfo.component_name);
-          var componentObj = {};
-          componentObj.component_name = _componentInfo.component_name;
-          componentObj.display_name = _componentInfo.display_name;
-          componentObj.selectedHost = savedComponent ? savedComponent.hostName : this.selectHost(_componentInfo.component_name);   // call the method that plays selectNode algorithm or fetches from server
-          componentObj.isInstalled = savedComponent ? savedComponent.isInstalled : App.HostComponent.find().someProperty('componentName', _componentInfo.component_name);
-          componentObj.serviceId = services[index];
-          componentObj.availableHosts = [];
-          componentObj.isHiveCoHost = ['HIVE_METASTORE', 'WEBHCAT_SERVER'].contains(_componentInfo.component_name);
-          resultComponents.add(componentObj);
-        }
-      }, this);
-    }
-
-    return resultComponents;
-  },
-
-  /**
-   * Put master components to <code>selectedServicesMasters</code>, which will be automatically rendered in template
-   * @param masterComponents
-   */
-  renderComponents:function (masterComponents) {
-    var zookeeperComponent = null, componentObj = null;
-    var services = this.get('selectedServicesMasters').slice(0);
-    if (services.length) {
-      this.set('selectedServicesMasters', []);
-    }
-
-    var countZookeeper = masterComponents.filterProperty('display_name', 'ZooKeeper').length;
-
-    masterComponents.forEach(function (item) {
-      //add the zookeeper component at the end if exists
-      console.log("TRACE: render master component name is: " + item.component_name);
-      if (item.display_name === "ZooKeeper") {
-        if (services.length) {
-          services.forEach(function (_service) {
-            this.get('selectedServicesMasters').pushObject(_service);
-          }, this);
-        }
-        this.set('zId', parseInt(this.get('zId')) + 1);
-        zookeeperComponent = Ember.Object.create(item);
-        zookeeperComponent.set('zId', this.get('zId'));
-        zookeeperComponent.set("showRemoveControl", countZookeeper > 1);
-        zookeeperComponent.set("availableHosts", this.get("hosts").slice(0));
-        this.get("selectedServicesMasters").pushObject(zookeeperComponent);
-
-      } else {
-        componentObj = Ember.Object.create(item);
-        componentObj.set("availableHosts", this.get("hosts").slice(0));
-        this.get("selectedServicesMasters").pushObject(componentObj);
-      }
-    }, this);
-
-  },
-
-  getKerberosServer:function (noOfHosts) {
-    var hosts = this.get('hosts');
-    if (noOfHosts === 1) {
-      return hosts[0];
-    } else if (noOfHosts < 3) {
-      return hosts[1];
-    } else if (noOfHosts <= 5) {
-      return hosts[1];
-    } else if (noOfHosts <= 30) {
-      return hosts[3];
-    } else {
-      return hosts[5];
-    }
-  },
-
-  getNameNode:function (noOfHosts) {
-    var hosts = this.get('hosts');
-    return hosts[0];
-  },
-
-  getSNameNode:function (noOfHosts) {
-    var hosts = this.get('hosts');
-    if (noOfHosts === 1) {
-      return hosts[0];
-    } else {
-      return hosts[1];
-    }
-  },
-
-  getJobTracker:function (noOfHosts) {
-    var hosts = this.get('hosts');
-    if (noOfHosts === 1) {
-      return hosts[0];
-    } else if (noOfHosts < 3) {
-      return hosts[1];
-    } else if (noOfHosts <= 5) {
-      return hosts[1];
-    } else if (noOfHosts <= 30) {
-      return hosts[1];
-    } else {
-      return hosts[2];
-    }
-  },
-
-  getHBaseMaster:function (noOfHosts) {
-    var hosts = this.get('hosts');
-    if (noOfHosts === 1) {
-      return hosts[0];
-    } else if (noOfHosts < 3) {
-      return hosts[0];
-    } else if (noOfHosts <= 5) {
-      return hosts[0];
-    } else if (noOfHosts <= 30) {
-      return hosts[2];
-    } else {
-      return hosts[3];
-    }
-  },
-
-  getOozieServer:function (noOfHosts) {
-    var hosts = this.get('hosts');
-    if (noOfHosts === 1) {
-      return hosts[0];
-    } else if (noOfHosts < 3) {
-      return hosts[1];
-    } else if (noOfHosts <= 5) {
-      return hosts[1];
-    } else if (noOfHosts <= 30) {
-      return hosts[2];
-    } else {
-      return hosts[3];
-    }
-  },
-
-  getOozieServer:function (noOfHosts) {
-    var hosts = this.get('hosts');
-    if (noOfHosts === 1) {
-      return hosts[0];
-    } else if (noOfHosts < 3) {
-      return hosts[1];
-    } else if (noOfHosts <= 5) {
-      return hosts[1];
-    } else if (noOfHosts <= 30) {
-      return hosts[2];
-    } else {
-      return hosts[3];
-    }
-  },
-
-  getHiveServer:function (noOfHosts) {
-    var hosts = this.get('hosts');
-    if (noOfHosts === 1) {
-      return hosts[0];
-    } else if (noOfHosts < 3) {
-      return hosts[1];
-    } else if (noOfHosts <= 5) {
-      return hosts[1];
-    } else if (noOfHosts <= 30) {
-      return hosts[2];
-    } else {
-      return hosts[4];
-    }
-  },
-
-  getHiveMetastore:function (noOfHosts) {
-    return this.getHiveServer(noOfHosts);
-  },
-
-  getWebHCatServer:function (noOfHosts) {
-    return this.getHiveServer(noOfHosts);
-  },
-
-  getZooKeeperServer:function (noOfHosts) {
-    var hosts = this.get('hosts');
-    if (noOfHosts < 3) {
-      return [hosts[0].host_name];
-    } else {
-      return [hosts[0].host_name, hosts[1].host_name, hosts[2].host_name];
-    }
-  },
-
-  getGangliaServer:function (noOfHosts) {
-    var hosts = this.get('hosts');
-    var hostnames = [];
-    var inc = 0;
-    hosts.forEach(function (_hostname) {
-      hostnames[inc] = _hostname.host_name;
-      inc++;
-    });
-    var hostExcAmbari = hostnames.without(location.hostname);
-    if (noOfHosts > 1) {
-      return hostExcAmbari[0];
-    } else {
-      return hostnames[0];
-    }
-  },
-
-  getNagiosServer:function (noOfHosts) {
-    var hosts = this.get('hosts');
-    var hostnames = [];
-    var inc = 0;
-    hosts.forEach(function (_hostname) {
-      hostnames[inc] = _hostname.host_name;
-      inc++;
-    });
-    var hostExcAmbari = hostnames.without(location.hostname);
-    if (noOfHosts > 1) {
-      return hostExcAmbari[0];
-    } else {
-      return hostnames[0];
-    }
-  },
-
-
-  /**
-   * Return hostName of masterNode for specified service
-   * @param componentName
-   * @return {*}
-   */
-  selectHost:function (componentName) {
-    var noOfHosts = this.get('hosts').length;
-    switch (componentName) {
-      case 'KERBEROS_SERVER':
-        return this.getKerberosServer(noOfHosts).host_name;
-      case 'NAMENODE':
-        return this.getNameNode(noOfHosts).host_name;
-      case 'SECONDARY_NAMENODE':
-        return this.getSNameNode(noOfHosts).host_name;
-      case 'JOBTRACKER':
-        return this.getJobTracker(noOfHosts).host_name;
-      case 'HBASE_MASTER':
-        return this.getHBaseMaster(noOfHosts).host_name;
-      case 'OOZIE_SERVER':
-        return this.getOozieServer(noOfHosts).host_name;
-      case 'HIVE_SERVER':
-        return this.getHiveServer(noOfHosts).host_name;
-      case 'HIVE_METASTORE':
-        return this.getHiveMetastore(noOfHosts).host_name;
-      case 'WEBHCAT_SERVER':
-        return this.getWebHCatServer(noOfHosts).host_name;
-      case 'ZOOKEEPER_SERVER':
-        return this.getZooKeeperServer(noOfHosts);
-      case 'GANGLIA_SERVER':
-        return this.getGangliaServer(noOfHosts);
-      case 'NAGIOS_SERVER':
-        return this.getNagiosServer(noOfHosts);
-    }
-  },
-
-  masterHostMapping:function () {
-    var mapping = [], mappingObject, self = this, mappedHosts, hostObj, hostInfo;
-    //get the unique assigned hosts and find the master services assigned to them
-
-    mappedHosts = this.get("selectedServicesMasters").mapProperty("selectedHost").uniq();
-
-    mappedHosts.forEach(function (item) {
-      hostObj = self.get("hosts").findProperty("host_name", item);
-      console.log("Name of the host is: " + hostObj.host_name);
-
-      mappingObject = Ember.Object.create({
-        host_name:item,
-        hostInfo:hostObj.host_info,
-        masterServices:self.get("selectedServicesMasters").filterProperty("selectedHost", item)
-      });
-
-      mapping.pushObject(mappingObject);
-    }, this);
-
-    mapping.sort(this.sortHostsByName);
-
-    return mapping;
-
-  }.property("selectedServicesMasters.@each.selectedHost"),
-
-  remainingHosts:function () {
-    return (this.get("hosts.length") - this.get("masterHostMapping.length"));
-  }.property("selectedServicesMasters.@each.selectedHost"),
-
-  hasZookeeper:function () {
-    return this.selectedServices.findProperty("service_name", "ZooKeeper");
-  }.property("selectedServices"),
-
-  //methods
-  getAvailableHosts:function (componentName) {
-    var assignableHosts = [],
-      zookeeperHosts = null;
-
-    if (componentName === "ZooKeeper") {
-      zookeeperHosts = this.get("selectedServicesMasters").filterProperty("display_name", "ZooKeeper").mapProperty("selectedHost").uniq();
-      this.get("hosts").forEach(function (item) {
-        if (!(zookeeperHosts.contains(item.get("host_name")))) {
-          assignableHosts.pushObject(item);
-        }
-      }, this);
-      return assignableHosts;
-
-    } else {
-      return this.get("hosts");
-    }
-  },
-
-  assignHostToMaster:function (masterService, selectedHost, zId) {
-    if (selectedHost && masterService) {
-      if ((masterService === "ZooKeeper") && zId) {
-        this.get('selectedServicesMasters').findProperty("zId", zId).set("selectedHost", selectedHost);
-        this.rebalanceZookeeperHosts();
-      }
-      else {
-        this.get('selectedServicesMasters').findProperty("display_name", masterService).set("selectedHost", selectedHost);
-      }
-
-    }
-  },
-
-  lastZooKeeper:function () {
-    var currentZooKeepers = this.get("selectedServicesMasters").filterProperty("display_name", "ZooKeeper");
-    if (currentZooKeepers) {
-      return currentZooKeepers.get("lastObject");
-    }
-
-    return null;
-  },
-
-  addZookeepers:function () {
-    /*
-     *Logic: If ZooKeeper service is selected then there can be
-     * minimum 1 ZooKeeper master in total, and
-     * maximum 1 ZooKeeper on every host
-     */
-
-    var maxNumZooKeepers = this.get("hosts.length"),
-      currentZooKeepers = this.get("selectedServicesMasters").filterProperty("display_name", "ZooKeeper"),
-      newZookeeper = null,
-      zookeeperHosts = null,
-      suggestedHost = null,
-      i = 0,
-      lastZoo = null;
-    console.log('hosts legth is: ' + maxNumZooKeepers);
-    //work only if the Zookeeper service is selected in previous step
-    if (!this.get("selectedServices").mapProperty("service_name").contains("ZOOKEEPER")) {
-      console.log('ALERT: Zookeeper service was not selected');
-      return false;
-    }
-
-    if (currentZooKeepers.get("length") < maxNumZooKeepers) {
-      console.log('currentZookeeper length less than maximum. Its: ' + currentZooKeepers.get("length"))
-      currentZooKeepers.set("lastObject.showAddControl", false);
-      if (currentZooKeepers.get("length") >= 1) {
-        currentZooKeepers.set("lastObject.showRemoveControl", true);
-      }
-
-      //create a new zookeeper based on an existing one
-      newZookeeper = Ember.Object.create({});
-      lastZoo = currentZooKeepers.get("lastObject");
-      newZookeeper.set("display_name", lastZoo.get("display_name"));
-      newZookeeper.set("component_name", lastZoo.get("component_name"));
-      newZookeeper.set("selectedHost", lastZoo.get("selectedHost"));
-      newZookeeper.set("availableHosts", this.getAvailableHosts("ZooKeeper"));
-
-      if (currentZooKeepers.get("length") === (maxNumZooKeepers - 1)) {
-        newZookeeper.set("showAddControl", false);
-      } else {
-        newZookeeper.set("showAddControl", true);
-      }
-      newZookeeper.set("showRemoveControl", true);
-
-      //get recommended host for the new Zookeeper server
-      zookeeperHosts = currentZooKeepers.mapProperty("selectedHost").uniq();
-
-      for (i = 0; i < this.get("hosts.length"); i++) {
-        if (!(zookeeperHosts.contains(this.get("hosts")[i].get("host_name")))) {
-          suggestedHost = this.get("hosts")[i].get("host_name");
-          break;
-        }
-      }
-
-      newZookeeper.set("selectedHost", suggestedHost);
-      newZookeeper.set("zId", (currentZooKeepers.get("lastObject.zId") + 1));
-      this.set('zId', parseInt(this.get('zId')) + 1);
-
-      this.get("selectedServicesMasters").pushObject(newZookeeper);
-
-      this.rebalanceZookeeperHosts();
-
-      return true;
-    }
-    return false;//if no more zookeepers can be added
-  },
-
-  removeZookeepers:function (zId) {
-    var currentZooKeepers;
-
-    //work only if the Zookeeper service is selected in previous step
-    if (!this.get("selectedServices").mapProperty("service_name").contains("ZOOKEEPER")) {
-      return false;
-    }
-
-    currentZooKeepers = this.get("selectedServicesMasters").filterProperty("display_name", "ZooKeeper");
-
-    if (currentZooKeepers.get("length") > 1) {
-      this.get("selectedServicesMasters").removeAt(this.get("selectedServicesMasters").indexOf(this.get("selectedServicesMasters").findProperty("zId", zId)));
-
-      currentZooKeepers = this.get("selectedServicesMasters").filterProperty("display_name", "ZooKeeper");
-      if (currentZooKeepers.get("length") < this.get("hosts.length")) {
-        currentZooKeepers.set("lastObject.showAddControl", true);
-      }
-
-      if (currentZooKeepers.get("length") === 1) {
-        currentZooKeepers.set("lastObject.showRemoveControl", false);
-      }
-      this.set('zId', parseInt(this.get('zId')) - 1);
-      this.rebalanceZookeeperHosts();
-
-      return true;
-    }
-
-    return false;
-
-  },
-
-  rebalanceZookeeperHosts:function () {
-    //for a zookeeper update the available hosts for the other zookeepers
-
-    var currentZooKeepers = this.get("selectedServicesMasters").filterProperty("display_name", "ZooKeeper"),
-      zooHosts = currentZooKeepers.mapProperty("selectedHost"),
-      availableZooHosts = [],
-      preparedAvailableHosts = null;
-
-    //get all hosts available for zookeepers
-    this.get("hosts").forEach(function (item) {
-      if (!zooHosts.contains(item.get("host_name"))) {
-        availableZooHosts.pushObject(item);
-      }
-    }, this);
-
-    currentZooKeepers.forEach(function (item) {
-      preparedAvailableHosts = availableZooHosts.slice(0);
-      preparedAvailableHosts.pushObject(this.get("hosts").findProperty("host_name", item.get("selectedHost")))
-      preparedAvailableHosts.sort(this.sortHostsByConfig, this);
-      item.set("availableHosts", preparedAvailableHosts);
-    }, this);
-  },
-
-  sortHostsByConfig:function (a, b) {
-    //currently handling only total memory on the host
-    if (a.memory < b.memory) {
-      return 1;
-    }
-    else {
-      return -1;
-    }
-  },
-
-  sortHostsByName:function (a, b) {
-    if (a.host_name > b.host_name) {
-      return 1;
-    }
-    else {
-      return -1;
-    }
-  }
-});
-
-
-
diff --git a/branch-1.2/ambari-web/app/controllers/wizard/step6_controller.js b/branch-1.2/ambari-web/app/controllers/wizard/step6_controller.js
deleted file mode 100644
index 4285e5a..0000000
--- a/branch-1.2/ambari-web/app/controllers/wizard/step6_controller.js
+++ /dev/null
@@ -1,346 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-var db = require('utils/db');
-
-/**
- * By Step 6, we have the following information stored in App.db and set on this
- * controller by the router:
- *
- *   hosts: App.db.hosts (list of all hosts the user selected in Step 3)
- *   selectedServiceNames: App.db.selectedServiceNames (the services that the user selected in Step 4)
- *   masterComponentHosts: App.db.masterComponentHosts (master-components-to-hosts mapping the user selected in Step 5)
- *
- * Step 6 will set the following information in App.db:
- *   slaveComponentHosts: App.db.slaveComponentHosts (slave-components-to-hosts mapping the user selected in Step 6)
- *
- */
-App.WizardStep6Controller = Em.Controller.extend({
-
-  hosts: [],
-
-  isAddHostWizard: function(){
-    return this.get('content.controllerName') === 'addHostController';
-  }.property('content.controllerName'),
-
-  isAllDataNodes: function () {
-    return this.get('hosts').everyProperty('isDataNode', true);
-  }.property('hosts.@each.isDataNode'),
-
-  isAllTaskTrackers: function () {
-    return this.get('hosts').everyProperty('isTaskTracker', true);
-  }.property('hosts.@each.isTaskTracker'),
-
-  isAllRegionServers: function () {
-    return this.get('hosts').everyProperty('isRegionServer', true);
-  }.property('hosts.@each.isRegionServer'),
-
-  isAllClients: function () {
-    return this.get('hosts').everyProperty('isClient', true);
-  }.property('hosts.@each.isClient'),
-
-  isNoDataNodes: function () {
-    return this.get('hosts').everyProperty('isDataNode', false);
-  }.property('hosts.@each.isDataNode'),
-
-  isNoTaskTrackers: function () {
-    return this.get('hosts').everyProperty('isTaskTracker', false);
-  }.property('hosts.@each.isTaskTracker'),
-
-  isNoRegionServers: function () {
-    return this.get('hosts').everyProperty('isRegionServer', false);
-  }.property('hosts.@each.isRegionServer'),
-
-  isNoClients: function () {
-    return this.get('hosts').everyProperty('isClient', false);
-  }.property('hosts.@each.isClient'),
-
-  /**
-   * Return whether Hbase service was selected or not.
-   * Calculate this information on <code>content.services</code> variable
-   * @return Boolean
-   */
-  isHbSelected: function () {
-    return this.get('content.services').findProperty('serviceName', 'HBASE').get('isSelected');
-  }.property('content.services'),
-
-  /**
-   * Return whether MapReduce service was selected or not.
-   * Calculate this information on <code>content.services</code> variable
-   * @return Boolean
-   */
-  isMrSelected: function () {
-    return this.get('content.services').findProperty('serviceName', 'MAPREDUCE').get('isSelected');
-  }.property('content.services'),
-
-  clearError: function () {
-    var isError = false;
-    var hosts = this.get('hosts');
-    if (this.get('isNoDataNodes') === false &&
-      (this.get('isNoTaskTrackers') === false || this.get('isMrSelected') === false) &&
-      (this.get('isNoRegionServers') === false || this.get('isHbSelected') === false) &&
-      this.get('isNoClients') === false) {
-      this.set('errorMessage', '');
-    }
-    if(this.get('isAddHostWizard')){
-      for(var i = 0; i < hosts.length; i++){
-        isError = !(hosts[i].get('isDataNode') || hosts[i].get('isClient')
-          || ( this.get('isMrSelected') && hosts[i].get('isTaskTracker'))
-          || ( this.get('isHbSelected') && hosts[i].get('isRegionServer')));
-        if (isError) {
-          break;
-        } else {
-          this.set('errorMessage', '');
-        }
-      }
-    }
-  }.observes('isNoDataNodes', 'isNoTaskTrackers', 'isNoRegionServers', 'isNoClients'),
-
-  /**
-   * Check whether current host is currently selected as master
-   * @param hostName
-   * @return {Boolean}
-   */
-  hasMasterComponents: function (hostName) {
-    return this.get('content.masterComponentHosts').someProperty('hostName', hostName);
-  },
-
-  selectAllDataNodes: function () {
-    var forFilter = this.get('hosts').filterProperty('isDataNodeInstalled', false);
-    forFilter.setEach('isDataNode', true);
-  },
-
-  selectAllTaskTrackers: function () {
-    var forFilter = this.get('hosts').filterProperty('isTaskTrackerInstalled', false);
-    forFilter.setEach('isTaskTracker', true);
-  },
-
-  selectAllRegionServers: function () {
-    var forFilter = this.get('hosts').filterProperty('isRegionServerInstalled', false);
-    forFilter.setEach('isRegionServer', true);
-  },
-
-  selectAllClients: function () {
-    var forFilter = this.get('hosts').filterProperty('isClientInstalled', false);
-    forFilter.setEach('isClient', true);
-  },
-
-  deselectAllDataNodes: function () {
-    var forFilter = this.get('hosts').filterProperty('isDataNodeInstalled', false);
-    forFilter.setEach('isDataNode', false);
-  },
-
-  deselectAllTaskTrackers: function () {
-    var forFilter = this.get('hosts').filterProperty('isTaskTrackerInstalled', false);
-    forFilter.setEach('isTaskTracker', false);
-  },
-
-  deselectAllRegionServers: function () {
-    var forFilter = this.get('hosts').filterProperty('isRegionServerInstalled', false);
-    forFilter.setEach('isRegionServer', false);
-  },
-
-  deselectAllClients: function () {
-    var forFilter = this.get('hosts').filterProperty('isClientInstalled', false);
-    forFilter.setEach('isClient', false);
-  },
-
-  clearStep: function () {
-    this.set('hosts', []);
-    this.clearError();
-  },
-
-  loadStep: function () {
-    console.log("WizardStep6Controller: Loading step6: Assign Slaves");
-    this.clearStep();
-    this.renderSlaveHosts();
-
-    if(this.get('content.missSlavesStep')){
-      App.router.send('next');
-    }
-  },
-
-  /**
-   * Get active host names
-   * @return {Array}
-   */
-  getHostNames: function () {
-    var hostInfo = this.get('content.hosts');
-    var hostNames = [];
-    for (var index in hostInfo) {
-      if (hostInfo[index].bootStatus === 'REGISTERED') {
-        hostNames.push(hostInfo[index].name);
-      }
-    }
-    return hostNames;
-  },
-
-  /**
-   * Load all data needed for this module. Then it automatically renders in template
-   * @return {Ember.Set}
-   */
-  renderSlaveHosts: function () {
-    var hostsObj = Em.Set.create();
-    var allHosts = this.getHostNames();
-    // TODO - Hard coding should be removed.
-    var maxNoofHostComponents = 11;
-    var slaveComponents = this.get('content.slaveComponentHosts');
-
-    allHosts.forEach(function (_hostName) {
-      hostsObj.push(Em.Object.create({
-        hostName: _hostName,
-        isMaster: false,
-        isDataNode: false,
-        isTaskTracker: false,
-        isRegionServer: false,
-        isClient: false,
-        isDataNodeInstalled: false,
-        isTaskTrackerInstalled: false,
-        isRegionServerInstalled: false,
-        isClientInstalled: false
-      }));
-    });
-
-    if (!slaveComponents) { // we are at this page for the first time
-      if (allHosts.length > 3) {             //multiple nodes scenario
-        hostsObj.forEach(function (host) {
-          host.isMaster = this.hasMasterComponents(host.hostName);
-          host.isDataNode = host.isTaskTracker
-            = host.isRegionServer = !host.isMaster;
-        }, this);
-
-        if (hostsObj.someProperty('isDataNode', true)) {
-          hostsObj.findProperty('isDataNode', true).set('isClient', true);
-        }
-      } else {
-        var masterObj = {
-          host: null,
-          masterComponents: maxNoofHostComponents
-        };
-        hostsObj.forEach(function (host) {
-          host.isMaster = this.hasMasterComponents(host.hostName);
-          var countMasterComp = this.getMasterComponentsForHost(host.hostName).length;
-          if (countMasterComp <= masterObj.masterComponents) {
-            masterObj.masterComponents = countMasterComp;
-            masterObj.host = host;
-          }
-        }, this);
-        masterObj.host.set('isClient', true);
-        masterObj.host.set('isDataNode', true);
-        masterObj.host.set('isTaskTracker', true);
-        masterObj.host.set('isRegionServer', true);
-
-      }
-
-    } else {
-
-      var dataNodes = slaveComponents.findProperty('componentName', 'DATANODE');
-      dataNodes.hosts.forEach(function (_dataNode) {
-        var dataNode = hostsObj.findProperty('hostName', _dataNode.hostName);
-        if (dataNode) {
-          dataNode.set('isDataNode', true);
-          dataNode.set('isDataNodeInstalled', _dataNode.isInstalled);
-        }
-      });
-
-      if (this.get('isMrSelected')) {
-        var taskTrackers = slaveComponents.findProperty('componentName', 'TASKTRACKER');
-        taskTrackers.hosts.forEach(function (_taskTracker) {
-          var taskTracker = hostsObj.findProperty('hostName', _taskTracker.hostName);
-          if (taskTracker) {
-            taskTracker.set('isTaskTracker', true);
-            taskTracker.set('isTaskTrackerInstalled', _taskTracker.isInstalled);
-          }
-        });
-      }
-
-      if (this.get('isHbSelected')) {
-        var regionServers = slaveComponents.findProperty('componentName', 'HBASE_REGIONSERVER');
-        regionServers.hosts.forEach(function (_regionServer) {
-          var regionServer = hostsObj.findProperty('hostName', _regionServer.hostName);
-          if (regionServer) {
-            regionServer.set('isRegionServer', true);
-            regionServer.set('isRegionServerInstalled', _regionServer.isInstalled);
-          }
-        });
-      }
-
-      var clients = slaveComponents.findProperty('componentName', 'CLIENT');
-      clients.hosts.forEach(function (_client) {
-        var client = hostsObj.findProperty('hostName', _client.hostName);
-        if (client) {
-          client.set('isClient', true);
-          client.set('isClientInstalled', _client.isInstalled);
-        }
-      }, this);
-
-      allHosts.forEach(function (_hostname) {
-        var host = hostsObj.findProperty('hostName', _hostname);
-        if (host) {
-          host.set('isMaster', this.hasMasterComponents(_hostname));
-        }
-      }, this);
-
-    }
-
-    hostsObj.forEach(function (host) {
-      this.get('hosts').pushObject(host);
-    }, this);
-  },
-
-  /**
-   * Return list of master components for specified <code>hostname</code>
-   * @param hostName
-   * @return {*}
-   */
-  getMasterComponentsForHost: function (hostName) {
-    return this.get('content.masterComponentHosts').filterProperty('hostName', hostName).mapProperty('component');
-  },
-
-
-  /**
-   * Validate form. Return do we have errors or not
-   * @return {Boolean}
-   */
-  validate: function () {
-    var isError = false;
-    var hosts = this.get('hosts');
-    if(this.get('isAddHostWizard')){
-      for(var i = 0; i < hosts.length; i++){
-        isError = !(hosts[i].get('isDataNode') || hosts[i].get('isClient')
-          || ( this.get('isMrSelected') && hosts[i].get('isTaskTracker'))
-          || ( this.get('isHbSelected') && hosts[i].get('isRegionServer')));
-        if (isError) {
-          this.set('errorMessage', Ember.I18n.t('installer.step6.error.mustSelectOneForHost'));
-          break;
-        }
-      }
-    } else {
-      isError = this.get('isNoDataNodes') || this.get('isNoClients')
-        || ( this.get('isMrSelected') && this.get('isNoTaskTrackers'))
-        || ( this.get('isHbSelected') && this.get('isNoRegionServers'));
-      if (isError) {
-        this.set('errorMessage', Ember.I18n.t('installer.step6.error.mustSelectOne'));
-      }
-    }
-
-    return !isError;
-  }
-
-});
diff --git a/branch-1.2/ambari-web/app/controllers/wizard/step7_controller.js b/branch-1.2/ambari-web/app/controllers/wizard/step7_controller.js
deleted file mode 100644
index cc8bc80..0000000
--- a/branch-1.2/ambari-web/app/controllers/wizard/step7_controller.js
+++ /dev/null
@@ -1,364 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-/**
- * By Step 7, we have the following information stored in App.db and set on this
- * controller by the router.
- *
- *   selectedServices: App.db.selectedServices (the services that the user selected in Step 4)
- *   masterComponentHosts: App.db.masterComponentHosts (master-components-to-hosts mapping the user selected in Step 5)
- *   slaveComponentHosts: App.db.slaveComponentHosts (slave-components-to-hosts mapping the user selected in Step 6)
- *
- */
-
-App.WizardStep7Controller = Em.Controller.extend({
-
-  name: 'wizardStep7Controller',
-
-  stepConfigs: [], //contains all field properties that are viewed in this step
-
-  selectedService: null,
-
-  slaveHostToGroup: null,
-
-  isSubmitDisabled: function () {
-    return !this.stepConfigs.filterProperty('showConfig', true).everyProperty('errorCount', 0);
-  }.property('stepConfigs.@each.errorCount'),
-
-  selectedServiceNames: function () {
-    return this.get('content.services').filterProperty('isSelected', true).filterProperty('isInstalled', false).mapProperty('serviceName');
-  }.property('content.services').cacheable(),
-
-  allInstalledServiceNames: function () {
-    return this.get('content.services').filterProperty('isSelected', true).mapProperty('serviceName');
-  }.property('content.services').cacheable(),
-
-  masterComponentHosts: function () {
-    return this.get('content.masterComponentHosts');
-  }.property('content.masterComponentHosts'),
-
-  slaveComponentHosts: function () {
-    return this.get('content.slaveGroupProperties');
-  }.property('content.slaveGroupProperties', 'content.slaveComponentHosts'),
-
-  serviceConfigs: require('data/service_configs'),
-  configMapping: require('data/config_mapping'),
-  customConfigs: require('data/custom_configs'),
-  customData: [],
-
-  clearStep: function () {
-    this.get('stepConfigs').clear();
-  },
-
-  /**
-   * On load function
-   */
-  loadStep: function () {
-    console.log("TRACE: Loading step7: Configure Services");
-    this.clearStep();
-    var serviceConfigs = this.get('serviceConfigs');
-    var advancedConfig = this.get('content.advancedServiceConfig') || [];
-    this.loadAdvancedConfig(serviceConfigs, advancedConfig);
-    this.loadCustomConfig();
-    this.renderServiceConfigs(serviceConfigs);
-    var storedServices = this.get('content.serviceConfigProperties');
-    if (storedServices) {
-      var configs = new Ember.Set();
-
-      // for all services`
-      this.get('stepConfigs').forEach(function (_content) {
-        //for all components
-        _content.get('configs').forEach(function (_config) {
-
-          var componentVal = storedServices.findProperty('name', _config.get('name'));
-          //if we have config for specified component
-          if (componentVal) {
-            //set it
-            _config.set('value', componentVal.value)
-          }
-
-        }, this);
-      }, this);
-
-    }
-  },
-
-  /*
-   Loads the advanced configs fetched from the server metadata libarary
-   */
-
-  loadAdvancedConfig: function (serviceConfigs, advancedConfig) {
-    advancedConfig.forEach(function (_config) {
-      if (_config) {
-        var service = serviceConfigs.findProperty('serviceName', _config.serviceName);
-        if (service) {
-          if (this.get('configMapping').someProperty('name', _config.name)) {
-          } else if (!(service.configs.someProperty('name', _config.name))) {
-            _config.id = "site property";
-            _config.category = 'Advanced';
-            _config.displayName = _config.name;
-            _config.defaultValue = _config.value;
-            // make all advanced configs optional and populated by default
-            /*
-             if (/\${.*}/.test(_config.value) || (service.serviceName !== 'OOZIE' && service.serviceName !== 'HBASE')) {
-             _config.isRequired = false;
-             _config.value = '';
-             } else if (/^\s+$/.test(_config.value)) {
-             _config.isRequired = false;
-             }
-             */
-            _config.isRequired = false;
-            _config.isVisible = true;
-            _config.displayType = 'advanced';
-            service.configs.pushObject(_config);
-          }
-        }
-      }
-    }, this);
-  },
-
-
-  /**
-   * Render a custom conf-site box for entering properties that will be written in *-site.xml files of the services
-   */
-  loadCustomConfig: function () {
-    var serviceConfigs = this.get('serviceConfigs');
-    this.get('customConfigs').forEach(function (_config) {
-      var service = serviceConfigs.findProperty('serviceName', _config.serviceName);
-      if (service) {
-        if (!(service.configs.someProperty('name', _config.name))) {
-          service.configs.pushObject(_config);
-        }
-      }
-    }, this);
-  },
-
-  /**
-   * Render configs for active services
-   * @param serviceConfigs
-   */
-  renderServiceConfigs: function (serviceConfigs) {
-    serviceConfigs.forEach(function (_serviceConfig) {
-
-      var serviceConfig = App.ServiceConfig.create({
-        filename: _serviceConfig.filename,
-        serviceName: _serviceConfig.serviceName,
-        displayName: _serviceConfig.displayName,
-        configCategories: _serviceConfig.configCategories,
-        showConfig: false,
-        configs: []
-      });
-
-      if (this.get('allInstalledServiceNames').contains(serviceConfig.serviceName) || serviceConfig.serviceName === 'MISC') {
-
-        this.loadComponentConfigs(_serviceConfig, serviceConfig);
-
-        console.log('pushing ' + serviceConfig.serviceName, serviceConfig);
-
-        if (this.get('selectedServiceNames').contains(serviceConfig.serviceName) || serviceConfig.serviceName === 'MISC') {
-          serviceConfig.showConfig = true;
-        }
-
-        this.get('stepConfigs').pushObject(serviceConfig);
-
-      } else {
-        console.log('skipping ' + serviceConfig.serviceName);
-      }
-    }, this);
-
-    var miscConfigs = this.get('stepConfigs').findProperty('serviceName', 'MISC').configs;
-    var showProxyGroup = this.get('selectedServiceNames').contains('HIVE') ||
-      this.get('selectedServiceNames').contains('HCATALOG') ||
-      this.get('selectedServiceNames').contains('OOZIE');
-    miscConfigs.findProperty('name', 'proxyuser_group').set('isVisible', showProxyGroup);
-    miscConfigs.findProperty('name', 'hbase_user').set('isVisible', this.get('selectedServiceNames').contains('HBASE'));
-    miscConfigs.findProperty('name', 'mapred_user').set('isVisible', this.get('selectedServiceNames').contains('MAPREDUCE'));
-    miscConfigs.findProperty('name', 'hive_user').set('isVisible', this.get('selectedServiceNames').contains('HIVE'));
-    miscConfigs.findProperty('name', 'hcat_user').set('isVisible', this.get('selectedServiceNames').contains('HCATALOG'));
-    miscConfigs.findProperty('name', 'webhcat_user').set('isVisible', this.get('selectedServiceNames').contains('WEBHCAT'));
-    miscConfigs.findProperty('name', 'oozie_user').set('isVisible', this.get('selectedServiceNames').contains('OOZIE'));
-    miscConfigs.findProperty('name', 'zk_user').set('isVisible', this.get('selectedServiceNames').contains('ZOOKEEPER'));
-
-    this.set('selectedService', this.get('stepConfigs').filterProperty('showConfig', true).objectAt(0));
-  },
-
-  /**
-   * Load child components to service config object
-   * @param _componentConfig
-   * @param componentConfig
-   */
-  loadComponentConfigs: function (_componentConfig, componentConfig) {
-    _componentConfig.configs.forEach(function (_serviceConfigProperty) {
-      var serviceConfigProperty = App.ServiceConfigProperty.create(_serviceConfigProperty);
-      serviceConfigProperty.serviceConfig = componentConfig;
-      serviceConfigProperty.initialValue();
-      componentConfig.configs.pushObject(serviceConfigProperty);
-      serviceConfigProperty.validate();
-    }, this);
-  },
-
-  validateCustomConfig: function () {
-    var flag = true;
-    var serviceProperties = [];
-    this.get('stepConfigs').forEach(function (_serviceContent) {
-      var configProperties = _serviceContent.get('configs');
-      if (configProperties.someProperty('id', 'conf-site')) {
-        var serviceProperty = {};
-        serviceProperty.serviceName = _serviceContent.get("serviceName");
-        serviceProperty.siteProperties = [];
-        var customSite = configProperties.findProperty('id', 'conf-site');
-        var keyValue = customSite.value.split(/\n+/);
-        if (keyValue) {
-          keyValue.forEach(function (_keyValue) {
-            console.log("The value of the keyValue is: " + _keyValue.trim());
-            _keyValue = _keyValue.trim();
-            var key = _keyValue.match(/(.+)=/);
-            var value = _keyValue.match(/=(.*)/);
-            if (key) {
-              // Check that entered config is allowed to reconfigure
-              if (configProperties.someProperty('name', key[1]) || this.get('configMapping').someProperty('name', key[1])) {
-                var property = {
-                  siteProperty: key[1],
-                  displayNames: [],
-                  displayMsg: null
-                };
-                if (this.get('configMapping').someProperty('name', key[1])) {
-                  this.setPropertyDisplayNames(property.displayNames, this.get('configMapping').findProperty('name', key[1]).templateName, configProperties);
-                }
-                property.displayMsg = this.setDisplayMessage(property.siteProperty, property.displayNames);
-                serviceProperty.siteProperties.push(property);
-                flag = false;
-              }
-            }
-          }, this);
-        }
-        serviceProperties.push(serviceProperty);
-      }
-    }, this);
-    var result = {
-      flag: flag,
-      value: serviceProperties
-    };
-    return result;
-  },
-
-  /**
-   * @param: An array of display names
-   */
-  setDisplayMessage: function (siteProperty, displayNames) {
-    var displayMsg = null;
-    if (displayNames && displayNames.length) {
-      if (displayNames.length === 1) {
-        displayMsg = siteProperty + ' as ' + displayNames[0];
-      } else {
-        var name = null;
-        displayNames.forEach(function (_name, index) {
-          if (index === 0) {
-            name = _name;
-          } else if (index === displayNames.length - 1) {
-            name = name + ' and ' + _name;
-          } else {
-            name = name + ', ' + _name;
-          }
-        }, this);
-        displayMsg = siteProperty + ' as ' + name;
-      }
-    } else {
-      displayMsg = siteProperty;
-    }
-    return displayMsg;
-  },
-
-  /**
-   * Set display names of the property tfrom he puppet/global names
-   * @param displayNames: a field to be set with displayNames
-   * @param names: array of property puppet/global names
-   * @param configProperties: array of config properties of the respective service to the name param
-   */
-  setPropertyDisplayNames: function (displayNames, names, configProperties) {
-    names.forEach(function (_name, index) {
-      if (configProperties.someProperty('name', _name)) {
-        displayNames.push(configProperties.findProperty('name', _name).displayName);
-      }
-    }, this);
-  },
-
-  /**
-   * Display Error Message with service name, its custom configuration name and displaynames on the page
-   * @param customConfig: array with custom configuration, serviceName and displayNames relative to custom configuration
-   */
-  showCustomConfigErrMsg: function (customConfig) {
-
-    App.ModalPopup.show({
-      header: 'Custom configuration error: ',
-      primary: 'OK',
-      secondary: null,
-      onPrimary: function () {
-        this.hide();
-      },
-      bodyClass: Ember.View.extend({
-        message: 'Error in custom configuration. Some properties entered in the box are already exposed on this page',
-        siteProperties: customConfig,
-        getDisplayMessage: function () {
-
-        }.property('customConfig.@each.siteProperties.@each.siteProperty'),
-        customConfig: customConfig,
-        template: Ember.Handlebars.compile([
-          '<h5>{{view.message}}</h5>',
-          '<br/>',
-          '<div class="pre-scrollable" style="max-height: 250px;">',
-          '<ul>',
-          '{{#each val in view.customConfig}}',
-          '{{#if val.siteProperties}}',
-          '<li>',
-          '{{val.serviceName}}',
-          '<ul>',
-          '{{#each item in  val.siteProperties}}',
-          '<li>',
-          '{{item.displayMsg}}',
-          '</li>',
-          '{{/each}}',
-          '</ul>',
-          '</li>',
-          '{{/if}}',
-          '{{/each}}',
-          '</ul>',
-          '</div>'
-        ].join('\n'))
-      })
-    });
-  },
-
-  submit: function () {
-    if (!this.get('isSubmitDisabled')) {
-      App.router.send('next');
-      /*
-      var result = {};
-      result = this.validateCustomConfig();
-      if (result.flag === true) {
-        App.router.send('next');
-      } else {
-        this.showCustomConfigErrMsg(result.value);
-      }
-      */
-    }
-  }
-
-});
diff --git a/branch-1.2/ambari-web/app/controllers/wizard/step8_controller.js b/branch-1.2/ambari-web/app/controllers/wizard/step8_controller.js
deleted file mode 100644
index 3e0ea39..0000000
--- a/branch-1.2/ambari-web/app/controllers/wizard/step8_controller.js
+++ /dev/null
@@ -1,1560 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.WizardStep8Controller = Em.Controller.extend({
-  name: 'wizardStep8Controller',
-  rawContent: require('data/review_configs'),
-  totalHosts: [],
-  clusterInfo: [],
-  services: [],
-  configs: [],
-  globals: [],
-  configMapping: require('data/config_mapping'),
-  slaveComponentConfig: null,
-  isSubmitDisabled: false,
-  hasErrorOccurred: false,
-  servicesInstalled: false,
-
-  selectedServices: function () {
-    return this.get('content.services').filterProperty('isSelected', true).filterProperty('isInstalled', false);
-  }.property('content.services').cacheable(),
-
-  clearStep: function () {
-    this.get('services').clear();
-    this.get('configs').clear();
-    this.get('globals').clear();
-    this.get('clusterInfo').clear();
-    this.set('servicesInstalled', false);
-  },
-
-  loadStep: function () {
-    console.log("TRACE: Loading step8: Review Page");
-    this.clearStep();
-    this.loadGlobals();
-    this.loadConfigs();
-    this.setCustomConfigs();
-    //this.loadSlaveConfiguration();
-    this.loadClusterInfo();
-    this.loadServices();
-    this.set('isSubmitDisabled', false);
-  },
-
-  loadGlobals: function () {
-    var globals = this.get('content.serviceConfigProperties').filterProperty('id', 'puppet var');
-    if (globals.someProperty('name', 'hive_database')) {
-      //TODO: Hive host depends on the type of db selected. Change puppet variable name if postgres is not the default db
-      var hiveDb = globals.findProperty('name', 'hive_database');
-      if (hiveDb.value === 'New MySQL Database') {
-        if (globals.someProperty('name', 'hive_ambari_host')) {
-          globals.findProperty('name', 'hive_ambari_host').name = 'hive_mysql_hostname';
-        }
-        globals = globals.without(globals.findProperty('name', 'hive_existing_host'));
-        globals = globals.without(globals.findProperty('name', 'hive_existing_database'));
-      } else {
-        globals.findProperty('name', 'hive_existing_host').name = 'hive_mysql_hostname';
-        globals = globals.without(globals.findProperty('name', 'hive_ambari_host'));
-        globals = globals.without(globals.findProperty('name', 'hive_ambari_database'));
-      }
-    }
-    this.set('globals', globals);
-  },
-
-  loadConfigs: function () {
-    var storedConfigs = this.get('content.serviceConfigProperties').filterProperty('id', 'site property').filterProperty('value');
-    var uiConfigs = this.loadUiSideConfigs();
-    this.set('configs', storedConfigs.concat(uiConfigs));
-  },
-
-  loadUiSideConfigs: function () {
-    var uiConfig = [];
-    var configs = this.get('configMapping').filterProperty('foreignKey', null);
-    configs.forEach(function (_config) {
-      var value = this.getGlobConfigValue(_config.templateName, _config.value, _config.name);
-      uiConfig.pushObject({
-        "id": "site property",
-        "name": _config.name,
-        "value": value,
-        "filename": _config.filename
-      });
-    }, this);
-    var dependentConfig = this.get('configMapping').filterProperty('foreignKey');
-    dependentConfig.forEach(function (_config) {
-      this.setConfigValue(uiConfig, _config);
-      uiConfig.pushObject({
-        "id": "site property",
-        "name": _config.name,
-        "value": _config.value,
-        "filename": _config.filename
-      });
-    }, this);
-    return uiConfig;
-  },
-
-  getRegisteredHosts: function () {
-    var allHosts = this.get('content.hosts');
-    var hosts = [];
-    for (var hostName in allHosts) {
-      if (allHosts[hostName].bootStatus == 'REGISTERED') {
-        allHosts[hostName].hostName = allHosts[hostName].name;
-        hosts.pushObject(allHosts[hostName]);
-      }
-    }
-    return hosts;
-  },
-
-  /**
-   * Set all site property that are derived from other puppet-variable
-   */
-
-  getGlobConfigValue: function (templateName, expression, name) {
-    var express = expression.match(/<(.*?)>/g);
-    var value = expression;
-    if (express == null) {
-      return expression;
-    }
-    express.forEach(function (_express) {
-      //console.log("The value of template is: " + _express);
-      var index = parseInt(_express.match(/\[([\d]*)(?=\])/)[1]);
-      if (this.get('globals').someProperty('name', templateName[index])) {
-        //console.log("The name of the variable is: " + this.get('content.serviceConfigProperties').findProperty('name', templateName[index]).name);
-        var globValue = this.get('globals').findProperty('name', templateName[index]).value;
-        // Hack for templeton.zookeeper.hosts
-        if (value !== null) {   // if the property depends on more than one template name like <templateName[0]>/<templateName[1]> then don't proceed to the next if the prior is null or not found in the global configs
-          if (name === "templeton.zookeeper.hosts" || name === 'hbase.zookeeper.quorum') {
-            // globValue is an array of ZooKeeper Server hosts
-            var zooKeeperPort = '2181';
-            if (name === "templeton.zookeeper.hosts") {
-              var zooKeeperServers = globValue.map(function (item) {
-                return item + ':' + zooKeeperPort;
-              }).join(',');
-              value = value.replace(_express, zooKeeperServers);
-            } else {
-              value = value.replace(_express, globValue.join(','));
-            }
-          } else {
-            value = value.replace(_express, globValue);
-          }
-        }
-      } else {
-        /*
-         console.log("ERROR: The variable name is: " + templateName[index]);
-         console.log("ERROR: mapped config from configMapping file has no corresponding variable in " +
-         "content.serviceConfigProperties. Two possible reasons for the error could be: 1) The service is not selected. " +
-         "and/OR 2) The service_config metadata file has no corresponding global var for the site property variable");
-         */
-        value = null;
-      }
-    }, this);
-    return value;
-  },
-  /**
-   * Set all site property that are derived from other site-properties
-   */
-  setConfigValue: function (uiConfig, config) {
-    if (config.value == null) {
-      return;
-    }
-    var fkValue = config.value.match(/<(foreignKey.*?)>/g);
-    if (fkValue) {
-      fkValue.forEach(function (_fkValue) {
-        var index = parseInt(_fkValue.match(/\[([\d]*)(?=\])/)[1]);
-        if (uiConfig.someProperty('name', config.foreignKey[index])) {
-          var globalValue = uiConfig.findProperty('name', config.foreignKey[index]).value;
-          config.value = config.value.replace(_fkValue, globalValue);
-        } else if (this.get('content.serviceConfigProperties').someProperty('name', config.foreignKey[index])) {
-          var globalValue;
-          if (this.get('content.serviceConfigProperties').findProperty('name', config.foreignKey[index]).value === '') {
-            globalValue = this.get('content.serviceConfigProperties').findProperty('name', config.foreignKey[index]).defaultValue;
-          } else {
-            globalValue = this.get('content.serviceConfigProperties').findProperty('name', config.foreignKey[index]).value;
-          }
-          config.value = config.value.replace(_fkValue, globalValue);
-        }
-      }, this);
-    }
-    if (fkValue = config.name.match(/<(foreignKey.*?)>/g)) {
-      fkValue.forEach(function (_fkValue) {
-        var index = parseInt(_fkValue.match(/\[([\d]*)(?=\])/)[1]);
-        if (uiConfig.someProperty('name', config.foreignKey[index])) {
-          var globalValue = uiConfig.findProperty('name', config.foreignKey[index]).value;
-          config.name = config.name.replace(_fkValue, globalValue);
-        } else if (this.get('content.serviceConfigProperties').someProperty('name', config.foreignKey[index])) {
-          var globalValue;
-          if (this.get('content.serviceConfigProperties').findProperty('name', config.foreignKey[index]).value === '') {
-            globalValue = this.get('content.serviceConfigProperties').findProperty('name', config.foreignKey[index]).defaultValue;
-          } else {
-            globalValue = this.get('content.serviceConfigProperties').findProperty('name', config.foreignKey[index]).value;
-          }
-          config.name = config.name.replace(_fkValue, globalValue);
-        }
-      }, this);
-    }
-    //For properties in the configMapping file having foreignKey and templateName properties.
-
-    var templateValue = config.value.match(/<(templateName.*?)>/g);
-    if (templateValue) {
-      templateValue.forEach(function (_value) {
-        var index = parseInt(_value.match(/\[([\d]*)(?=\])/)[1]);
-        if (this.get('globals').someProperty('name', config.templateName[index])) {
-          var globalValue = this.get('globals').findProperty('name', config.templateName[index]).value;
-          config.value = config.value.replace(_value, globalValue);
-        } else {
-          config.value = null;
-        }
-      }, this);
-    }
-  },
-
-  /**
-   * override site properties with the entered key-value pair in *-site.xml
-   */
-  setCustomConfigs: function () {
-    var site = this.get('content.serviceConfigProperties').filterProperty('id', 'conf-site');
-    site.forEach(function (_site) {
-      var keyValue = _site.value.split(/\n+/);
-      if (keyValue) {
-        keyValue.forEach(function (_keyValue) {
-          _keyValue = _keyValue.trim();
-          console.log("The value of the keyValue is: " + _keyValue);
-          // split on the first = encountered (the value may contain ='s)
-          var matches = _keyValue.match(/^([^=]+)=(.*)$/);
-          if (matches) {
-            var key = matches[1];
-            var value = matches[2];
-            if (key) {
-              this.setSiteProperty(key, value, _site.name + '.xml');
-            }
-          }
-        }, this);
-      }
-    }, this);
-  },
-
-  /**
-   * Set property of the site variable
-   */
-  setSiteProperty: function (key, value, filename) {
-      this.get('configs').pushObject({
-        "id": "site property",
-        "name": key,
-        "value": value,
-        "filename": filename
-      });
-  },
-
-  loadSlaveConfiguration: function () {
-
-    var slaveComponentConfig = this.convertSlaveConfig(this.get('content.slaveGroupProperties'));
-    this.set("slaveComponentConfig", slaveComponentConfig);
-  },
-
-  convertSlaveConfig: function (slaveContent) {
-    var dest = {
-      "version": "1.0",
-      "components": [
-      ],
-      "slaveHostComponents": []
-    };
-
-    slaveContent.forEach(function (_slaveContent) {
-      var newComponent = {};
-      newComponent.componentName = _slaveContent.componentName;
-      newComponent.serviceName = this.getServiceInfo(newComponent.componentName).name;
-      newComponent.groups = [];
-      var index = 2;
-      if (_slaveContent.groups) {
-        _slaveContent.groups.forEach(function (_group) {
-          var newGroup = {};
-          newGroup.groupName = _group.name;
-          newGroup.configVersion = {config: {'global': 'version1', 'core-site': 'version1'}}; // TODO : every time a new version should be generated
-          if (this.getServiceInfo(_slaveContent.componentName)) {
-            newGroup.configVersion.config[this.getServiceInfo(_slaveContent.componentName).domain] = 'version' + index;
-            newGroup.configVersion.config[this.getServiceInfo(_slaveContent.componentName).siteName] = 'version' + index;
-          }
-          newGroup.siteVersion = 'version' + index;
-          newGroup.hostNames = _slaveContent.hosts.filterProperty("group", newGroup.groupName).mapProperty('hostName');
-          newGroup.properties = _group.properties;
-          if (!Ember.empty(newGroup.hostNames)) {
-            newComponent.groups.push(newGroup);
-          }
-          index++;
-        }, this);
-      }
-      dest.components.push(newComponent);
-    }, this);
-    var hostsInfo = this.get('content.hosts');
-
-    for (var index in hostsInfo) {
-      var hostIndex = 2;
-      var slaveHost = {name: null, configVersion: null, slaveComponents: []};
-      dest.components.forEach(function (_component) {
-        _component.groups.forEach(function (_group) {
-          if (_group.hostNames.contains(hostsInfo[index].name)) {
-            var slaveComponent = {};
-            slaveHost.name = hostsInfo[index].name;
-            slaveComponent.componentName = _component.componentName;
-            slaveComponent.groupName = _group.groupName;
-            slaveComponent.properties = _group.properties;
-            slaveHost.slaveComponents.pushObject(slaveComponent);
-          }
-        }, this);
-      }, this);
-      hostIndex++;
-      if (!Ember.none(slaveHost.name)) {
-        dest.slaveHostComponents.pushObject(slaveHost);
-      }
-
-    }
-    return dest;
-  },
-
-  getServiceInfo: function (componentName) {
-    var serviceConfig;
-    switch (componentName) {
-      case 'DATANODE':
-        serviceConfig = {
-          name: 'HDFS',
-          siteName: 'hdfs-site',
-          domain: 'datanode-global'
-        };
-        break;
-      case 'TASKTRACKER':
-        serviceConfig = {
-          name: 'MAPREDUCE',
-          siteName: 'mapred-site',
-          domain: 'tasktracker-global'
-        };
-        break;
-      case 'HBASE_REGIONSERVER':
-        serviceConfig = {
-          name: 'HBASE',
-          siteName: 'hbase-site',
-          domain: 'regionserver-global'
-        };
-        break;
-      default:
-        serviceConfig = {};
-    }
-    return serviceConfig;
-  },
-
-  /**
-   * Load all info about cluster to <code>clusterInfo</code> variable
-   */
-  loadClusterInfo: function () {
-
-    //Admin name
-    var admin = this.rawContent.findProperty('config_name', 'Admin');
-    admin.config_value = App.db.getLoginName();
-    console.log("STEP8: the value of content cluster name: " + App.db.getLoginName());
-    if (admin.config_value) {
-      this.get('clusterInfo').pushObject(Ember.Object.create(admin));
-    }
-
-    // cluster name
-    var cluster = this.rawContent.findProperty('config_name', 'cluster');
-    cluster.config_value = this.get('content.cluster.name');
-    console.log("STEP8: the value of content cluster name: " + this.get('content.cluster.name'));
-    this.get('clusterInfo').pushObject(Ember.Object.create(cluster));
-
-    //hosts
-    var masterHosts = this.get('content.masterComponentHosts').mapProperty('hostName').uniq();
-    var slaveHosts = this.get('content.slaveComponentHosts');
-
-    var hostObj = [];
-    slaveHosts.forEach(function (_hosts) {
-      hostObj = hostObj.concat(_hosts.hosts);
-    }, this);
-
-    slaveHosts = hostObj.mapProperty('hostName').uniq();
-
-    var componentHosts = masterHosts.concat(slaveHosts).uniq();
-    var totalHosts = App.Host.find().mapProperty('hostName').concat(componentHosts).uniq();
-    var newHostsCount = totalHosts.length - App.Host.find().content.length;
-    this.set('totalHosts', totalHosts);
-    var totalHostsObj = this.rawContent.findProperty('config_name', 'hosts');
-    totalHostsObj.config_value = totalHosts.length + ' (' + newHostsCount + ' new)';
-    this.get('clusterInfo').pushObject(Ember.Object.create(totalHostsObj));
-
-    //repo
-    var repoOption = this.get('content.installOption.localRepo');
-    var repoObj = this.rawContent.findProperty('config_name', 'Repo');
-    if (repoOption) {
-      repoObj.config_value = 'Yes';
-    } else {
-      repoObj.config_value = 'No';
-    }
-    this.get('clusterInfo').pushObject(Ember.Object.create(repoObj));
-  },
-
-
-  /**
-   * Load all info about services to <code>services</code> variable
-   */
-  loadServices: function () {
-    var selectedServices = this.get('selectedServices');
-    this.set('services', selectedServices.mapProperty('serviceName'));
-
-    selectedServices.forEach(function (_service) {
-      console.log('INFO: step8: Name of the service from getService function: ' + _service.serviceName);
-      var reviewService = this.rawContent.findProperty('config_name', 'services');
-      var serviceObj = reviewService.config_value.findProperty('service_name', _service.serviceName);
-
-      if (serviceObj) {
-        switch (serviceObj.service_name) {
-          case 'HDFS':
-            this.loadHDFS(serviceObj);
-            break;
-          case 'MAPREDUCE':
-            this.loadMapReduce(serviceObj);
-            break;
-          case 'HIVE':
-            this.loadHive(serviceObj);
-            break;
-          case 'HBASE':
-            this.loadHbase(serviceObj);
-            break;
-          case 'ZOOKEEPER':
-            this.loadZk(serviceObj);
-            break;
-          case 'OOZIE':
-            this.loadOozie(serviceObj);
-            break;
-          case 'NAGIOS':
-            this.loadNagios(serviceObj);
-            break;
-          case 'GANGLIA':
-            this.loadGanglia(serviceObj);
-            break;
-          /* case 'PIG':
-           this.loadPig(serviceObj);
-           break;
-           case 'SQOOP':
-           this.loadSqoop(serviceObj);
-           break;
-           */
-          case 'HCATALOG':
-            break;
-          default:
-        }
-      }
-    }, this);
-  },
-
-  /**
-   * load all info about HDFS service
-   * @param hdfsObj
-   */
-  loadHDFS: function (hdfsObj) {
-    hdfsObj.get('service_components').forEach(function (_component) {
-      switch (_component.get('display_name')) {
-        case 'NameNode':
-          this.loadNnValue(_component);
-          break;
-        case 'SecondaryNameNode':
-          this.loadSnnValue(_component);
-          break;
-        case 'DataNodes':
-          this.loadDnValue(_component);
-          break;
-        default:
-      }
-    }, this);
-    //var
-    this.get('services').pushObject(hdfsObj);
-  },
-
-  loadNnValue: function (nnComponent) {
-    var nnHostName = this.get('content.masterComponentHosts').findProperty('display_name', nnComponent.display_name);
-    nnComponent.set('component_value', nnHostName.hostName);
-  },
-
-  loadSnnValue: function (snnComponent) {
-    var snnHostName = this.get('content.masterComponentHosts').findProperty('display_name', 'SNameNode');
-    snnComponent.set('component_value', snnHostName.hostName);
-  },
-
-  loadDnValue: function (dnComponent) {
-    var dnHosts = this.get('content.slaveComponentHosts').findProperty('displayName', 'DataNode');
-    var totalDnHosts = dnHosts.hosts.length;
-    /* var totalGroups = this.get('slaveComponentConfig.components').findProperty('componentName', 'DATANODE').groups.length;
-     var groupLabel;
-     if (totalGroups == 1) {
-     groupLabel = 'group';
-     } else {
-     groupLabel = 'groups';
-     }
-     */
-    dnComponent.set('component_value', totalDnHosts + ' hosts');
-  },
-
-
-  /**
-   * Load all info about mapReduce service
-   * @param mrObj
-   */
-  loadMapReduce: function (mrObj) {
-    mrObj.get('service_components').forEach(function (_component) {
-      switch (_component.get('display_name')) {
-        case 'JobTracker':
-          this.loadJtValue(_component);
-          break;
-        case 'TaskTrackers':
-          this.loadTtValue(_component);
-          break;
-        default:
-      }
-    }, this);
-    this.get('services').pushObject(mrObj);
-  },
-
-  loadJtValue: function (jtComponent) {
-    var jtHostName = this.get('content.masterComponentHosts').findProperty('display_name', jtComponent.display_name);
-    jtComponent.set('component_value', jtHostName.hostName);
-  },
-
-  loadTtValue: function (ttComponent) {
-    var ttHosts = this.get('content.slaveComponentHosts').findProperty('displayName', 'TaskTracker');
-    var totalTtHosts = ttHosts.hosts.length;
-    /* var totalGroups = this.get('slaveComponentConfig.components').findProperty('componentName', 'TASKTRACKER').groups.length;
-     var groupLabel;
-     if (totalGroups == 1) {
-     groupLabel = 'group';
-     } else {
-     groupLabel = 'groups';
-     }
-     */
-    ttComponent.set('component_value', totalTtHosts + ' hosts');
-  },
-
-  /**
-   * Load all info about Hive service
-   * @param hiveObj
-   */
-  loadHive: function (hiveObj) {
-    hiveObj.get('service_components').forEach(function (_component) {
-      switch (_component.get('display_name')) {
-        case 'Hive Metastore':
-          this.loadHiveMetaStoreValue(_component);
-          break;
-        case 'Database':
-          this.loadHiveDbValue(_component);
-          break;
-        default:
-      }
-    }, this);
-    this.get('services').pushObject(hiveObj);
-
-  },
-
-  loadHiveMetaStoreValue: function (metaStoreComponent) {
-    var hiveHostName = this.get('content.masterComponentHosts').findProperty('display_name', 'HiveServer2');
-    metaStoreComponent.set('component_value', hiveHostName.hostName);
-  },
-
-  loadHiveDbValue: function (dbComponent) {
-    var hiveDb = App.db.getServiceConfigProperties().findProperty('name', 'hive_database');
-
-    if (hiveDb.value === 'New MySQL Database') {
-
-      dbComponent.set('component_value', 'MySQL (New Database)');
-
-    } else {
-
-      var db = App.db.getServiceConfigProperties().findProperty('name', 'hive_existing_database');
-
-      dbComponent.set('component_value', db.value + ' (' + hiveDb.value + ')');
-
-    }
-  },
-
-  /**
-   * Load all info about Hbase
-   * @param hbaseObj
-   */
-  loadHbase: function (hbaseObj) {
-    hbaseObj.service_components.forEach(function (_component) {
-      switch (_component.display_name) {
-        case 'Master':
-          this.loadMasterValue(_component);
-          break;
-        case 'Region Servers':
-          this.loadRegionServerValue(_component);
-          break;
-        default:
-      }
-    }, this);
-    this.get('services').pushObject(hbaseObj);
-  },
-
-  loadMasterValue: function (hbaseMaster) {
-    var hbaseHostName = this.get('content.masterComponentHosts').findProperty('display_name', 'HBase Master');
-    hbaseMaster.set('component_value', hbaseHostName.hostName);
-  },
-
-  loadRegionServerValue: function (rsComponent) {
-    var rsHosts = this.get('content.slaveComponentHosts').findProperty('displayName', 'RegionServer');
-    var totalRsHosts = rsHosts.hosts.length;
-    /* var totalGroups = this.get('slaveComponentConfig.components').findProperty('componentName', 'HBASE_REGIONSERVER').groups.length;
-     var groupLabel;
-     if (totalGroups == 1) {
-     groupLabel = 'group';
-     } else {
-     groupLabel = 'groups';
-     } */
-    rsComponent.set('component_value', totalRsHosts + ' hosts');
-  },
-
-  /**
-   * Load all info about ZooKeeper service
-   * @param zkObj
-   */
-  loadZk: function (zkObj) {
-    zkObj.get('service_components').forEach(function (_component) {
-      switch (_component.get('display_name')) {
-        case 'Servers':
-          this.loadZkServerValue(_component);
-          break;
-        default:
-      }
-    }, this);
-    this.get('services').pushObject(zkObj);
-  },
-
-  loadZkServerValue: function (serverComponent) {
-    var zkHostNames = this.get('content.masterComponentHosts').filterProperty('display_name', 'ZooKeeper').length;
-    var hostSuffix;
-    if (zkHostNames === 1) {
-      hostSuffix = 'host';
-    } else {
-      hostSuffix = 'hosts';
-    }
-    serverComponent.set('component_value', zkHostNames + ' ' + hostSuffix);
-  },
-
-  /**
-   * Load all info about Oozie services
-   * @param oozieObj
-   */
-  loadOozie: function (oozieObj) {
-    oozieObj.get('service_components').forEach(function (_component) {
-      switch (_component.get('display_name')) {
-        case 'Server':
-          this.loadOozieServerValue(_component);
-          break;
-        case 'Database':
-          // TODO: uncomment when ready to integrate with Oozie Database other than Derby
-          // this.loadOozieDbValue(_component);
-          break;
-        default:
-      }
-    }, this);
-    this.get('services').pushObject(oozieObj);
-  },
-
-  loadOozieServerValue: function (oozieServer) {
-    var oozieServerName = this.get('content.masterComponentHosts').findProperty('display_name', 'Oozie Server');
-    oozieServer.set('component_value', oozieServerName.hostName);
-  },
-
-  loadOozieDbValue: function (dbComponent) {
-    var oozieDb = App.db.getServiceConfigProperties().findProperty('name', 'oozie_database');
-    if (oozieDb.value === 'New PostgreSQL Database') {
-      dbComponent.set('component_value', 'PostgreSQL (New Database)');
-    } else {
-      var db = App.db.getServiceConfigProperties().findProperty('name', 'oozie_existing_database');
-      dbComponent.set('component_value', db.value + ' (' + oozieDb.value + ')');
-    }
-  },
-
-
-  /**
-   * Load all info about Nagios service
-   * @param nagiosObj
-   */
-  loadNagios: function (nagiosObj) {
-    nagiosObj.service_components.forEach(function (_component) {
-      switch (_component.display_name) {
-        case 'Server':
-          this.loadNagiosServerValue(_component);
-          break;
-        case 'Administrator':
-          this.loadNagiosAdminValue(_component);
-          break;
-        default:
-      }
-    }, this);
-    this.get('services').pushObject(nagiosObj);
-  },
-
-  loadNagiosServerValue: function (nagiosServer) {
-    var nagiosServerName = this.get('content.masterComponentHosts').findProperty('display_name', 'Nagios Server');
-    nagiosServer.set('component_value', nagiosServerName.hostName);
-  },
-
-  loadNagiosAdminValue: function (nagiosAdmin) {
-    var config = this.get('content.serviceConfigProperties');
-    var adminLoginName = config.findProperty('name', 'nagios_web_login');
-    var adminEmail = config.findProperty('name', 'nagios_contact');
-    nagiosAdmin.set('component_value', adminLoginName.value + ' / (' + adminEmail.value + ')');
-  },
-
-  /**
-   * Load all info about ganglia
-   * @param gangliaObj
-   */
-  loadGanglia: function (gangliaObj) {
-    gangliaObj.get('service_components').forEach(function (_component) {
-      switch (_component.get('display_name')) {
-        case 'Server':
-          this.loadGangliaServerValue(_component);
-          break;
-        default:
-      }
-    }, this);
-    this.get('services').pushObject(gangliaObj);
-  },
-
-  loadGangliaServerValue: function (gangliaServer) {
-    var gangliaServerName = this.get('content.masterComponentHosts').findProperty('display_name', 'Ganglia Collector');
-    gangliaServer.set('component_value', gangliaServerName.hostName);
-  },
-
-  loadSqoop: function (sqoopObj) {
-    this.get('services').pushObject(sqoopObj);
-  },
-
-  loadPig: function (pigObj) {
-    this.get('services').pushObject(pigObj);
-  },
-
-  /**
-   * Onclick handler for <code>next</code> button
-   */
-  submit: function () {
-
-    if (this.get('isSubmitDisabled')) {
-      return;
-    }
-
-    this.set('isSubmitDisabled', true);
-
-    // checkpoint the cluster status on the server so that the user can resume from where they left off
-    switch (this.get('content.controllerName')) {
-      case 'installerController':
-        App.clusterStatus.setClusterStatus({
-          clusterName: this.get('clusterName'),
-          clusterState: 'CLUSTER_DEPLOY_PREP_2',
-          wizardControllerName: this.get('content.controllerName'),
-          localdb: App.db.data
-        });
-        break;
-      case 'addHostController':
-        App.clusterStatus.setClusterStatus({
-          clusterName: this.get('clusterName'),
-          clusterState: 'ADD_HOSTS_DEPLOY_PREP_2',
-          wizardControllerName: this.get('content.controllerName'),
-          localdb: App.db.data
-        });
-        break;
-      default:
-        break;
-    }
-
-    // delete any existing clusters to start from a clean slate
-    // before creating a new cluster in install wizard
-    // TODO: modify for multi-cluster support
-    if (this.get('content.controllerName') == 'installerController') {
-      var clusterNames = this.getExistingClusterNames();
-      this.deleteClusters(clusterNames);
-    }
-
-    this.createCluster();
-    this.createSelectedServices();
-    this.createConfigurations();
-    this.applyCreatedConfToServices();
-    this.createComponents();
-    this.registerHostsToCluster();
-    this.createAllHostComponents();
-
-    this.ajaxQueueFinished = function () {
-      console.log('everything is loaded')
-      App.router.send('next');
-    };
-    this.doNextAjaxCall();
-
-  },
-
-  /**
-   * Used in progress bar
-   */
-  ajaxQueueLength: function() {
-    return this.get('ajaxQueue').length;
-  }.property('ajaxQueue.length'),
-
-  /**
-   * Used in progress bar
-   */
-  ajaxQueueLeft: 0,
-
-  setAmbariUIDb: function () {
-    var dbContent = this.get('content.slaveGroupProperties');
-    var slaveComponentConfig = this.get("slaveComponentConfig");
-    this.persistKeyValues(slaveComponentConfig.version, dbContent);
-    this.persistKeyValues('current_version', slaveComponentConfig.version);
-  },
-
-  persistKeyValues: function (key, value) {
-
-    var str = "{ '" + key + "' : '" + JSON.stringify(value) + "'}";
-    var obj = eval("(" + str + ")");
-
-    this.ajax({
-      type: "POST",
-      url: App.apiPrefix + '/persist',
-      data: JSON.stringify(obj),
-      beforeSend: function () {
-        console.log('BeforeSend: persistKeyValues', obj);
-      }
-    });
-  },
-
-  clusterName: function () {
-    return this.get('content.cluster.name');
-  }.property('content.cluster.name'),
-
-  // returns an array of existing cluster names.
-  // returns an empty array if there are no existing clusters.
-  getExistingClusterNames: function () {
-    var url = App.apiPrefix + '/clusters';
-
-    var clusterNames = [];
-
-    $.ajax({
-      type: 'GET',
-      url: url,
-      async: false,
-      success: function(data) {
-        var jsonData = jQuery.parseJSON(data);
-        clusterNames = jsonData.items.mapProperty('Clusters.cluster_name');
-        console.log("Got existing cluster names: " + clusterNames);
-      },
-      error: function () {
-        console.log("Failed to get existing cluster names");
-      }
-    });
-
-    return clusterNames;
-  },
-
-  deleteClusters: function (clusterNames) {
-    clusterNames.forEach(function(clusterName) {
-
-      var url = App.apiPrefix + '/clusters/' + clusterName;
-
-      $.ajax({
-        type: 'DELETE',
-        url: url,
-        async: false,
-        success: function () {
-          console.log('DELETE cluster ' + clusterName + ' succeeded');
-        },
-        error: function () {
-          console.log('DELETE cluster ' + clusterName + ' failed');
-        }
-      });
-    });
-  },
-
-  /**
-   *  The following create* functions are called upon submitting Step 8.
-   */
-
-  createCluster: function () {
-
-    if (this.get('content.isWizard')) {
-      return false;
-    }
-
-    var clusterName = this.get('clusterName');
-    var url = App.apiPrefix + '/clusters/' + clusterName;
-
-    var stackVersion = (this.get('content.installOptions.localRepo')) ? App.defaultLocalStackVersion : App.defaultStackVersion;
-
-    this.ajax({
-      type: 'POST',
-      url: url,
-      data: JSON.stringify({ "Clusters": {"version": stackVersion }}),
-      beforeSend: function () {
-        console.log("BeforeSend: createCluster for " + clusterName);
-      }
-    });
-
-  },
-
-  createSelectedServices: function () {
-
-    var url = App.apiPrefix + '/clusters/' + this.get('clusterName') + '/services';
-    var data = this.createServiceData();
-    var httpMethod = 'POST';
-
-    if (!data.length) {
-      return;
-    }
-
-    this.ajax({
-      type: httpMethod,
-      url: url,
-      data: JSON.stringify(data),
-      beforeSend: function () {
-        console.log('BeforeSend: createSelectedServices ', data);
-      }
-    });
-  },
-
-  createServiceData: function () {
-    var services = this.get('selectedServices').mapProperty('serviceName');
-    var data = [];
-    services.forEach(function (_service) {
-      data.pushObject({"ServiceInfo": { "service_name": _service }});
-    }, this);
-    return data;
-  },
-
-  createComponents: function () {
-
-    var serviceComponents = require('data/service_components');
-    var services = this.get('selectedServices').mapProperty('serviceName');
-    services.forEach(function (_service) {
-      var components = serviceComponents.filterProperty('service_name', _service);
-      var componentsData = components.map(function (_component) {
-        return { "ServiceComponentInfo": { "component_name": _component.component_name } };
-      });
-
-      // Service must be specified in terms of a query for creating multiple components at the same time.
-      // See AMBARI-1018.
-      var url = App.apiPrefix + '/clusters/' + this.get('clusterName') + '/services?ServiceInfo/service_name=' + _service;
-      var data = {
-        "components": componentsData
-      }
-
-      this.ajax({
-        type: 'POST',
-        url: url,
-        data: JSON.stringify(data),
-        beforeSend: function () {
-          console.log('BeforeSend: createComponents for ' + _service, componentsData);
-        }
-      });
-    }, this);
-
-  },
-
-  registerHostsToCluster: function () {
-
-    var url = App.apiPrefix + '/clusters/' + this.get('clusterName') + '/hosts';
-    var data = this.createRegisterHostData();
-
-    if (data.length == 0) {
-      return;
-    }
-
-    this.ajax({
-      type: 'POST',
-      url: url,
-      data: JSON.stringify(data),
-      beforeSend: function () {
-        console.log('BeforeSend: registerHostsToCluster', data);
-      }
-    });
-  },
-
-  createRegisterHostData: function () {
-    var hosts = this.getRegisteredHosts().filterProperty('isInstalled', false);
-    if (!hosts.length) {
-      return [];
-    }
-    return hosts.map(function (host) {
-      return {"Hosts": { "host_name": host.hostName}};
-    });
-  },
-
-  // TODO: review the code for add hosts / add services scenarios...
-  createAllHostComponents: function () {
-
-    var masterHosts = this.get('content.masterComponentHosts');
-    var slaveHosts = this.get('content.slaveComponentHosts');
-    var clients = this.get('content.clients');
-
-    // note: masterHosts has 'component' vs slaveHosts has 'componentName'
-    var masterComponents = masterHosts.mapProperty('component').uniq();
-
-    masterComponents.forEach(function (component) {
-      var hostNames = masterHosts.filterProperty('component', component).filterProperty('isInstalled', false).mapProperty('hostName');
-      this.registerHostsToComponent(hostNames, component);
-    }, this);
-
-    slaveHosts.forEach(function (_slave) {
-      if (_slave.componentName !== 'CLIENT') {
-        var hostNames = _slave.hosts.filterProperty('isInstalled', false).mapProperty('hostName');
-        this.registerHostsToComponent(hostNames, _slave.componentName);
-      } else {
-        clients.forEach(function (_client) {
-
-            var hostNames = _slave.hosts.mapProperty('hostName');
-            switch (_client.component_name) {
-              case 'HDFS_CLIENT':
-                // install HDFS_CLIENT on HBASE_MASTER, HBASE_REGIONSERVER, and WEBHCAT_SERVER hosts
-                masterHosts.filterProperty('component', 'HBASE_MASTER').filterProperty('isInstalled', false).forEach(function (_masterHost) {
-                  hostNames.pushObject(_masterHost.hostName);
-                }, this);
-                masterHosts.filterProperty('component', 'HBASE_REGIONSERVER').filterProperty('isInstalled', false).forEach(function (_masterHost) {
-                  hostNames.pushObject(_masterHost.hostName);
-                }, this);
-                masterHosts.filterProperty('component', 'WEBHCAT_SERVER').filterProperty('isInstalled', false).forEach(function (_masterHost) {
-                  hostNames.pushObject(_masterHost.hostName);
-                }, this);
-                break;
-              case 'MAPREDUCE_CLIENT':
-                // install MAPREDUCE_CLIENT on HIVE_SERVER, OOZIE_SERVER, NAGIOS_SERVER, and WEBHCAT_SERVER hosts
-                masterHosts.filterProperty('component', 'HIVE_SERVER').filterProperty('isInstalled', false).forEach(function (_masterHost) {
-                  hostNames.pushObject(_masterHost.hostName);
-                }, this);
-                masterHosts.filterProperty('component', 'OOZIE_SERVER').filterProperty('isInstalled', false).forEach(function (_masterHost) {
-                  hostNames.pushObject(_masterHost.hostName);
-                }, this);
-                masterHosts.filterProperty('component', 'NAGIOS_SERVER').filterProperty('isInstalled', false).forEach(function (_masterHost) {
-                  hostNames.pushObject(_masterHost.hostName);
-                }, this);
-                masterHosts.filterProperty('component', 'WEBHCAT_SERVER').filterProperty('isInstalled', false).forEach(function (_masterHost) {
-                  hostNames.pushObject(_masterHost.hostName);
-                }, this);
-                break;
-              case 'OOZIE_CLIENT':
-                // install OOZIE_CLIENT on NAGIOS_SERVER host
-                masterHosts.filterProperty('component', 'NAGIOS_SERVER').filterProperty('isInstalled', false).forEach(function (_masterHost) {
-                  hostNames.pushObject(_masterHost.hostName);
-                }, this);
-                break;
-              case 'ZOOKEEPER_CLIENT':
-                // install ZOOKEEPER_CLIENT on WEBHCAT_SERVER host
-                masterHosts.filterProperty('component', 'WEBHCAT_SERVER').filterProperty('isInstalled', false).forEach(function (_masterHost) {
-                  hostNames.pushObject(_masterHost.hostName);
-                }, this);
-                break;
-
-              case 'HIVE_CLIENT':
-                //install HIVE client on NAGIOS_SERVER host
-                masterHosts.filterProperty('component', 'NAGIOS_SERVER').filterProperty('isInstalled', false).forEach(function (_masterHost) {
-                  hostNames.pushObject(_masterHost.hostName);
-                }, this);
-                break;
-
-              case 'HCAT':
-                // install HCAT (client) on NAGIOS_SERVER host
-                masterHosts.filterProperty('component', 'NAGIOS_SERVER').filterProperty('isInstalled', false).forEach(function (_masterHost) {
-                  hostNames.pushObject(_masterHost.hostName);
-                }, this);
-                break;
-            }
-            hostNames = hostNames.uniq();
-
-            if(_client.isInstalled){
-              //check whether clients are already installed on selected master hosts!!!
-              var installedHosts = _slave.hosts.filterProperty('isInstalled', true).mapProperty('hostName');
-              installedHosts.forEach(function(host){
-                if(hostNames.contains(host)){
-                  hostNames.splice(hostNames.indexOf(host), 1);
-                }
-              }, this);
-            }
-
-            this.registerHostsToComponent(hostNames, _client.component_name);
-
-        }, this);
-      }
-    }, this);
-
-    // add Ganglia Monitor (Slave) to all hosts if Ganglia service is selected
-    var gangliaService = this.get('content.services').filterProperty('isSelected', true).findProperty('serviceName', 'GANGLIA');
-    if (gangliaService) {
-      var hosts = this.getRegisteredHosts();
-      if (gangliaService.get('isInstalled')) {
-        hosts = hosts.filterProperty('isInstalled', false);
-      }
-      if (hosts.length) {
-        this.registerHostsToComponent(hosts.mapProperty('hostName'), 'GANGLIA_MONITOR');
-      }
-    }
-    // add MySQL Server if Hive is selected
-    var hiveService = this.get('content.services').filterProperty('isSelected', true).filterProperty('isInstalled', false).findProperty('serviceName', 'HIVE');
-    if (hiveService) {
-      this.registerHostsToComponent(masterHosts.filterProperty('component', 'HIVE_SERVER').mapProperty('hostName'), 'MYSQL_SERVER');
-    }
-  },
-
-  registerHostsToComponent: function (hostNames, componentName) {
-
-    if (hostNames.length == 0) {
-      return;
-    }
-    console.log('registering ' + componentName + ' to ' + JSON.stringify(hostNames));
-
-    // currently we are specifying the predicate as a query string.
-    // this can hit a ~4000-character limit in Jetty server.
-    // chunk to multiple calls if needed
-    // var hostsPredicate = hostNames.map(function (hostName) {
-    //   return 'Hosts/host_name=' + hostName;
-    // }).join('|');
-
-    var queryStrArr = [];
-    var queryStr = '';
-    hostNames.forEach(function (hostName) {
-      queryStr += 'Hosts/host_name=' + hostName + '|';
-      if (queryStr.length > 3500) {
-        queryStrArr.push(queryStr.slice(0, -1));
-        queryStr = '';
-      }
-    });
-
-    if (queryStr.length > 0) {
-      queryStrArr.push(queryStr.slice(0, -1));
-    }
-
-    queryStrArr.forEach(function (queryStr) {
-      // console.log('creating host components for ' + queryStr);
-      var url = App.apiPrefix + '/clusters/' + this.get('clusterName') + '/hosts?' + queryStr;
-      var data = {
-        "host_components": [
-          {
-            "HostRoles": {
-              "component_name": componentName
-            }
-          }
-        ]
-      };
-
-      this.ajax({
-        type: 'POST',
-        url: url,
-        data: JSON.stringify(data),
-        beforeSend: function () {
-          console.log('BeforeSend: registerHostsToComponent for ' + queryStr + ' and component ' + componentName);
-        }
-      });
-    }, this);
-  },
-
-  createConfigurations: function () {
-    var selectedServices = this.get('selectedServices');
-    if (!this.get('content.isWizard')) {
-      this.createConfigSiteForService(this.createGlobalSiteObj());
-      // this.createGlobalSitePerSlaveGroup();
-      this.createConfigSiteForService(this.createCoreSiteObj());
-      this.createConfigSiteForService(this.createHdfsSiteObj());
-      //this.createHdfsSitePerSlaveGroup('HDFS');
-    }
-    if (selectedServices.someProperty('serviceName', 'MAPREDUCE')) {
-      this.createConfigSiteForService(this.createMrSiteObj());
-      //this.createMrSitePerSlaveGroup('MAPREDUCE');
-    }
-    if (selectedServices.someProperty('serviceName', 'HBASE')) {
-      this.createConfigSiteForService(this.createHbaseSiteObj());
-      //this.createHbaseSitePerSlaveGroup('HBASE');
-    }
-    if (selectedServices.someProperty('serviceName', 'OOZIE')) {
-      this.createConfigSiteForService(this.createOozieSiteObj('OOZIE'));
-    }
-    if (selectedServices.someProperty('serviceName', 'HIVE')) {
-      this.createConfigSiteForService(this.createHiveSiteObj('HIVE'));
-    }
-    if (selectedServices.someProperty('serviceName', 'WEBHCAT')) {
-      this.createConfigSiteForService(this.createWebHCatSiteObj('WEBHCAT'));
-    }
-  },
-
-  createConfigSiteForService: function (data) {
-    console.log("Inside createConfigSiteForService");
-
-    var url = App.apiPrefix + '/clusters/' + this.get('clusterName') + '/configurations';
-
-    this.ajax({
-      type: 'POST',
-      url: url,
-      data: JSON.stringify(data),
-      beforeSend: function () {
-        console.log("BeforeSend: createConfigSiteForService for " + data.type);
-      }
-    });
-  },
-
-  createGlobalSiteObj: function () {
-    var globalSiteProperties = {};
-    //this.get('globals').filterProperty('domain', 'global').forEach(function (_globalSiteObj) {
-    this.get('globals').forEach(function (_globalSiteObj) {
-      // do not pass any globals whose name ends with _host or _hosts
-      if (!/_hosts?$/.test(_globalSiteObj.name)) {
-        // append "m" to JVM memory options except for hadoop_heapsize
-        if (/_heapsize|_newsize|_maxnewsize$/.test(_globalSiteObj.name) && _globalSiteObj.name !== 'hadoop_heapsize') {
-          globalSiteProperties[_globalSiteObj.name] =  _globalSiteObj.value + "m";
-        } else {
-          globalSiteProperties[_globalSiteObj.name] = _globalSiteObj.value;
-        }
-        console.log("STEP8: name of the global property is: " + _globalSiteObj.name);
-        console.log("STEP8: value of the global property is: " + _globalSiteObj.value);
-      }
-      if (_globalSiteObj.name == 'java64_home') {
-        globalSiteProperties['java64_home'] = this.get('content.installOptions.javaHome');
-      }
-    }, this);
-    return {"type": "global", "tag": "version1", "properties": globalSiteProperties};
-  },
-
-  createGlobalSitePerSlaveGroup: function () {
-    this.get('slaveComponentConfig.components').forEach(function (_component) {
-      _component.groups.forEach(function (_group) {
-        var globalSiteProperties = {};
-        var properties = _group.properties;
-        properties.forEach(function (_property) {
-          if (!/_hosts?$/.test(_property.name)) {
-            // append "m" to JVM memory options except for hadoop_heapsize
-            if (/_heapsize|_newsize|_maxnewsize$/.test(_property.name) && _property.name !== 'hadoop_heapsize') {
-              globalSiteProperties[_property.name] = _property.value + "m";
-            } else {
-              globalSiteProperties[_property.name] = _property.storeValue;
-            }
-            console.log("STEP8: name of the global property is: " + _property.name);
-            console.log("STEP8: value of the global property is: " + _property.storeValue);
-          }
-        }, this);
-        var config = _group.configVersion.config;
-        for (var index in config) {
-          if (index === 'datanode-global' || index === 'tasktracker-global' || index === 'regionserver-global') {
-            var data = {"type": index, "tag": config[index], "properties": globalSiteProperties};
-            this.createConfigSiteForService(data);
-          }
-        }
-      }, this);
-    }, this);
-  },
-
-  createCoreSiteObj: function () {
-    var coreSiteObj = this.get('configs').filterProperty('filename', 'core-site.xml');
-    var coreSiteProperties = {};
-    // hadoop.proxyuser.oozie.hosts needs to be skipped if oozie is not selected
-    var isOozieSelected = this.get('selectedServices').someProperty('serviceName', 'OOZIE');
-    var oozieUser = this.get('globals').someProperty('name','oozie_user')  ? this.get('globals').findProperty('name','oozie_user').value : null;
-    var isHiveSelected = this.get('selectedServices').someProperty('serviceName', 'HIVE');
-    var hiveUser = this.get('globals').someProperty('name','hive_user') ? this.get('globals').findProperty('name','hive_user').value : null;
-    var isHcatSelected = this.get('selectedServices').someProperty('serviceName', 'WEBHCAT');
-    var hcatUser = this.get('globals').someProperty('name','hcat_user') ? this.get('globals').findProperty('name','hcat_user').value : null;
-    coreSiteObj.forEach(function (_coreSiteObj) {
-      if ((isOozieSelected || (_coreSiteObj.name != 'hadoop.proxyuser.' + oozieUser +'.hosts' && _coreSiteObj.name != 'hadoop.proxyuser.' + oozieUser +'.groups')) && (isHiveSelected || (_coreSiteObj.name != 'hadoop.proxyuser.' + hiveUser +'.hosts' && _coreSiteObj.name != 'hadoop.proxyuser.' + hiveUser + '.groups')) && (isHcatSelected || (_coreSiteObj.name != 'hadoop.proxyuser.' + hcatUser +'.hosts' && _coreSiteObj.name != 'hadoop.proxyuser.' + hcatUser + '.groups'))) {
-        coreSiteProperties[_coreSiteObj.name] = _coreSiteObj.value;
-      }
-      console.log("STEP*: name of the property is: " + _coreSiteObj.name);
-      console.log("STEP8: value of the property is: " + _coreSiteObj.value);
-    }, this);
-    return {"type": "core-site", "tag": "version1", "properties": coreSiteProperties};
-  },
-
-  createHdfsSiteObj: function () {
-    var hdfsSiteObj = this.get('configs').filterProperty('filename', 'hdfs-site.xml');
-    var hdfsProperties = {};
-    hdfsSiteObj.forEach(function (_configProperty) {
-      hdfsProperties[_configProperty.name] = _configProperty.value;
-      console.log("STEP*: name of the property is: " + _configProperty.name);
-      console.log("STEP8: value of the property is: " + _configProperty.value);
-    }, this);
-    return {"type": "hdfs-site", "tag": "version1", "properties": hdfsProperties };
-  },
-
-  createHdfsSitePerSlaveGroup: function (serviceName) {
-    var hdfsSite = this.createHdfsSiteObj();
-    var component = this.get('slaveComponentConfig.components').findProperty('serviceName', serviceName);
-    component.groups.forEach(function (_group) {
-      var siteProperties = hdfsSite.properties;
-      _group.properties.forEach(function (_property) {
-        this.get('configMapping').forEach(function (_config) {
-          if (_config.templateName.contains(_property.name)) {
-            this.get('globals').findProperty('name', _property.name).value = _property.storeValue;
-            var value = this.getGlobConfigValue(_config.templateName, _config.value);
-            if (siteProperties[_config.name]) {
-              siteProperties[_config.name] = value;
-            }
-          }
-        }, this);
-      }, this);
-      var data = {"type": hdfsSite.type, "tag": _group.siteVersion, "properties": siteProperties};
-      console.log("The value of globalConfig is: " + JSON.stringify(siteProperties));
-      this.createConfigSiteForService(data);
-    }, this);
-  },
-
-  createMrSiteObj: function () {
-    var configs = this.get('configs').filterProperty('filename', 'mapred-site.xml');
-    var mrProperties = {};
-    configs.forEach(function (_configProperty) {
-      mrProperties[_configProperty.name] = _configProperty.value;
-      console.log("STEP*: name of the property is: " + _configProperty.name);
-      console.log("STEP8: value of the property is: " + _configProperty.value);
-    }, this);
-    return {type: 'mapred-site', tag: 'version1', properties: mrProperties};
-  },
-
-  createMrSitePerSlaveGroup: function (serviceName) {
-    var mrSite = this.createMrSiteObj();
-    var component = this.get('slaveComponentConfig.components').findProperty('serviceName', serviceName);
-    component.groups.forEach(function (_group) {
-      var siteProperties = mrSite.properties;
-      _group.properties.forEach(function (_property) {
-        this.get('configMapping').forEach(function (_config) {
-          if (_config.templateName.contains(_property.name)) {
-            this.get('globals').findProperty('name', _property.name).value = _property.storeValue;
-            var value = this.getGlobConfigValue(_config.templateName, _config.value);
-            if (siteProperties[_config.name]) {
-              siteProperties[_config.name] = value;
-            }
-          }
-        }, this);
-      }, this);
-      var data = {"type": mrSite.type, "tag": _group.siteVersion, "properties": siteProperties};
-      this.createConfigSiteForService(data);
-    }, this);
-  },
-
-  createHbaseSiteObj: function () {
-    var configs = this.get('configs').filterProperty('filename', 'hbase-site.xml');
-    var hbaseProperties = {};
-    configs.forEach(function (_configProperty) {
-      hbaseProperties[_configProperty.name] = _configProperty.value;
-    }, this);
-    return {type: 'hbase-site', tag: 'version1', properties: hbaseProperties};
-  },
-
-  createHbaseSitePerSlaveGroup: function (serviceName) {
-    var hbaseSite = this.createHbaseSiteObj();
-    var component = this.get('slaveComponentConfig.components').findProperty('serviceName', serviceName);
-    component.groups.forEach(function (_group) {
-      var siteProperties = hbaseSite.properties;
-      _group.properties.forEach(function (_property) {
-        this.get('configMapping').forEach(function (_config) {
-          if (_config.templateName.contains(_property.name)) {
-            this.get('globals').findProperty('name', _property.name).value = _property.storeValue;
-            var value = this.getGlobConfigValue(_config.templateName, _config.value);
-            if (siteProperties[_config.name]) {
-              siteProperties[_config.name] = value;
-            }
-          }
-        }, this);
-      }, this);
-      var data = {"type": hbaseSite.type, "tag": _group.siteVersion, "properties": siteProperties};
-      this.createConfigSiteForService(data);
-    }, this);
-  },
-
-  createOozieSiteObj: function (serviceName) {
-    var configs = this.get('configs').filterProperty('filename', 'oozie-site.xml');
-    var oozieProperties = {};
-    configs.forEach(function (_configProperty) {
-      oozieProperties[_configProperty.name] = _configProperty.value;
-    }, this);
-    return {type: 'oozie-site', tag: 'version1', properties: oozieProperties};
-  },
-
-  createHiveSiteObj: function (serviceName) {
-    var configs = this.get('configs').filterProperty('filename', 'hive-site.xml');
-    var hiveProperties = {};
-    configs.forEach(function (_configProperty) {
-      hiveProperties[_configProperty.name] = _configProperty.value;
-    }, this);
-    return {type: 'hive-site', tag: 'version1', properties: hiveProperties};
-  },
-
-  createWebHCatSiteObj: function (serviceName) {
-    var configs = this.get('configs').filterProperty('filename', 'webhcat-site.xml');
-    var webHCatProperties = {};
-    configs.forEach(function (_configProperty) {
-      webHCatProperties[_configProperty.name] = _configProperty.value;
-    }, this);
-    return {type: 'webhcat-site', tag: 'version1', properties: webHCatProperties};
-  },
-
-  applyCreatedConfToServices: function () {
-    var services = this.get('selectedServices').mapProperty('serviceName');
-    services.forEach(function (_service) {
-      var data = this.getConfigForService(_service);
-      this.applyCreatedConfToService(_service, 'PUT', data);
-    }, this);
-  },
-
-  applyCreatedConfToService: function (service, httpMethod, data) {
-    console.log("Inside applyCreatedConfToService");
-
-    var url = App.apiPrefix + '/clusters/' + this.get('clusterName') + '/services/' + service;
-
-    this.ajax({
-      type: httpMethod,
-      url: url,
-      data: JSON.stringify(data),
-      beforeSend: function () {
-        console.log("BeforeSend: applyCreatedConfToService for " + service);
-      }
-    });
-  },
-
-  applyCreatedConfToSlaveGroups: function () {
-    this.get('slaveComponentConfig.components').forEach(function (_component) {
-      _component.groups.forEach(function (_group) {
-        var aggregatedHostNames = '';
-        _group.hostNames.forEach(function (_hostName, index) {
-          aggregatedHostNames += 'HostRoles/host_name=' + _hostName;
-          if (index !== _group.hostNames.length - 1) {
-            aggregatedHostNames += '|';
-          }
-        }, this);
-        console.log("The aggregated hostNames value is: " + aggregatedHostNames);
-        this.applyCreatedConfToSlaveGroup(aggregatedHostNames, 'PUT', _group.configVersion, _group.groupName);
-      }, this);
-    }, this);
-  },
-
-  applyCreatedConfToSlaveGroup: function (aggregatedHostNames, httpMethod, data, groupName) {
-    console.log("Inside applyCreatedConfToHost");
-    var url = App.apiPrefix + '/clusters/' + this.get('clusterName') + '/host_components?' + aggregatedHostNames;
-
-    this.ajax({
-      type: httpMethod,
-      url: url,
-      data: JSON.stringify(data),
-      beforeSend: function () {
-        console.log("BeforeSend: applyCreatedConfToSlaveGroup for group: " + groupName);
-      }
-    });
-  },
-
-  getConfigForService: function (serviceName) {
-    switch (serviceName) {
-      case 'HDFS':
-        return {config: {'global': 'version1', 'core-site': 'version1', 'hdfs-site': 'version1'}};
-      case 'MAPREDUCE':
-        return {config: {'global': 'version1', 'core-site': 'version1', 'mapred-site': 'version1'}};
-      case 'HBASE':
-        return {config: {'global': 'version1', 'hbase-site': 'version1'}};
-      case 'OOZIE':
-        return {config: {'global': 'version1', 'oozie-site': 'version1'}};
-      case 'HIVE':
-        return {config: {'global': 'version1', 'hive-site': 'version1'}};
-      case 'WEBHCAT':
-        return {config: {'global': 'version1', 'webhcat-site': 'version1'}};
-      default:
-        return {config: {'global': 'version1'}};
-    }
-  },
-
-  ajaxQueue: [],
-
-  ajaxQueueFinished: function () {
-    //do something
-  },
-
-  doNextAjaxCall: function () {
-
-    if (this.get('ajaxBusy')) {
-      return;
-    }
-
-    var queue = this.get('ajaxQueue');
-    if (!queue.length) {
-      this.ajaxQueueFinished();
-      return;
-    }
-
-    var first = queue[0];
-    this.set('ajaxQueue', queue.slice(1));
-    this.set('ajaxQueueLeft', this.get('ajaxQueue').length);
-
-    this.set('ajaxBusy', true);
-    console.log('AJAX send ' + first.url);
-    $.ajax(first);
-
-  },
-
-  /**
-   * We need to do a lot of ajax calls async in special order.
-   * To do this, generate array of ajax objects and then send requests step by step.
-   * All ajax objects are stored in <code>ajaxQueue</code>
-   * @param params
-   */
-
-  ajax: function (params) {
-    if (App.testMode) return;
-
-    var self = this;
-    params = jQuery.extend({
-      async: true,
-      dataType: 'text',
-      statusCode: require('data/statusCodes'),
-      timeout: App.timeout,
-      error: function (request, ajaxOptions, error) {
-        console.log('Step8: In Error ');
-        // console.log('Step8: Error message is: ' + request.responseText);
-      },
-      success: function (data) {
-        var jsonData = jQuery.parseJSON(data);
-        console.log("TRACE: STep8 -> In success function");
-        console.log("TRACE: STep8 -> value of the url is: " + params.url);
-        console.log("TRACE: STep8 -> value of the received data is: " + jsonData);
-      }
-    }, params);
-
-    var success = params.success;
-    var error = params.error;
-
-    params.success = function () {
-      if (success) {
-        success();
-      }
-
-      self.set('ajaxBusy', false);
-      self.doNextAjaxCall();
-    }
-
-    params.error = function (xhr, status, error) {
-      var responseText = JSON.parse(xhr.responseText);
-      var controller = App.router.get(App.clusterStatus.wizardControllerName);
-      controller.registerErrPopup("Error", responseText.message);
-      self.set('hasErrorOccurred', true);
-      // an error will break the ajax call chain and allow submission again
-      self.set('isSubmitDisabled', false);
-      self.get('ajaxQueue').clear();
-      self.set('ajaxBusy', false);
-    }
-    this.get('ajaxQueue').pushObject(params);
-  }
-
-})
-
-
-
-
-
-  
-  
diff --git a/branch-1.2/ambari-web/app/controllers/wizard/step9_controller.js b/branch-1.2/ambari-web/app/controllers/wizard/step9_controller.js
deleted file mode 100644
index 171f172..0000000
--- a/branch-1.2/ambari-web/app/controllers/wizard/step9_controller.js
+++ /dev/null
@@ -1,718 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-var App = require('app');
-
-App.WizardStep9Controller = Em.Controller.extend({
-  name: 'wizardStep9Controller',
-  hosts: [],
-  progress: '0',
-
-  isStepCompleted: false,
-
-  isSubmitDisabled: function () {
-    // return !this.get('isStepCompleted');
-    return !['STARTED','START FAILED'].contains(this.get('content.cluster.status'));
-  }.property('content.cluster.status'),
-
-  // links to previous steps are enabled iff install failed in installer
-  togglePreviousSteps: function () {
-    if ('INSTALL FAILED' === this.get('content.cluster.status') && this.get('content.controllerName') == 'installerController') {
-      App.router.get('installerController').setStepsEnable();
-    } else {
-      App.router.get('installerController').setLowerStepsDisable(9);
-    }
-  }.observes('content.cluster.status', 'content.controllerName'),
-
-  mockHostData: require('data/mock/step9_hosts'),
-  mockDataPrefix: '/data/wizard/deploy/5_hosts',
-  pollDataCounter: 0,
-  polledData: [],
-
-  status: function () {
-    if (this.hosts.someProperty('status', 'failed')) {
-      return 'failed';
-    }
-    if (this.hosts.someProperty('status', 'warning')) {
-      if (this.isStepFailed()) {
-        return 'failed';
-      } else {
-        return 'warning';
-      }
-    }
-    if(this.get('progress') == '100') {
-      this.set('isStepCompleted', true);
-      return 'success';
-    }
-    return 'info';
-  }.property('hosts.@each.status', 'progress'),
-
-  showRetry: function () {
-    return this.get('content.cluster.status') == 'INSTALL FAILED';
-  }.property('content.cluster.status'),
-
-  // content.cluster.status can be:
-  // PENDING: set upon successful transition from step 1 to step 2
-  // INSTALLED: set upon successful completion of install phase as well as successful invocation of start services API
-  // STARTED: set up on successful completion of start phase
-  // INSTALL FAILED: set up upon encountering a failure in install phase
-  // START FAILED: set upon unsuccessful invocation of start services API and also upon encountering a failure
-  // during start phase
-
-  // content.cluster.isCompleted
-  // set to false upon successful transition from step 1 to step 2
-  // set to true upon successful start of services in this step
-  // note: looks like this is the same thing as checking content.cluster.status == 'STARTED'
-
-
-  // navigateStep is called by App.WizardStep9View's didInsertElement and "retry" from router.
-  navigateStep: function () {
-    if (App.testMode) {
-      // this is for repeatedly testing out installs in test mode
-      this.set('content.cluster.status', 'PENDING');
-      this.set('content.cluster.isCompleted', false);
-    }
-    var clusterStatus = this.get('content.cluster.status');
-    console.log('navigateStep: clusterStatus = ' + clusterStatus);
-    if (this.get('content.cluster.isCompleted') === false) {
-      // the cluster has not yet successfully installed and started
-      if (clusterStatus === 'INSTALL FAILED') {
-        this.loadStep();
-        this.loadLogData(this.get('content.cluster.requestId'));
-        this.set('isStepCompleted', true);
-      } else if (clusterStatus === 'START FAILED') {
-        this.loadStep();
-        this.loadLogData(this.get('content.cluster.requestId'));
-        // this.hosts.setEach('status', 'info');
-        this.set('isStepCompleted', true);
-      } else {
-        // handle PENDING, INSTALLED
-        this.loadStep();
-        this.loadLogData(this.get('content.cluster.requestId'));
-        this.startPolling();
-      }
-    } else {
-      // handle STARTED
-      // the cluster has successfully installed and started
-      this.loadStep();
-      this.loadLogData(this.get('content.cluster.requestId'));
-      this.set('isStepCompleted', true);
-      this.set('progress', '100');
-    }
-  },
-  clearStep: function () {
-    this.hosts.clear();
-    this.set('status', 'info');
-    this.set('progress', '0');
-    this.set('isStepCompleted', false);
-    this.numPolls = 0;
-  },
-
-  loadStep: function () {
-    console.log("TRACE: Loading step9: Install, Start and Test");
-    this.clearStep();
-    this.renderHosts(this.loadHosts());
-  },
-  /**
-   * reset status and message of all hosts when retry install
-   */
-  resetHostsForRetry: function(){
-    var hosts = this.get('content.hosts');
-    for (var name in hosts) {
-      hosts[name].status = "pending";
-      hosts[name].message = 'Waiting';
-    }
-    this.set('content.hosts', hosts);
-  },
-
-  loadHosts: function () {
-    var hostInfo = this.get('content.hosts');
-    var hosts = new Ember.Set();
-    for (var index in hostInfo) {
-      var obj = Em.Object.create(hostInfo[index]);
-      obj.message = (obj.message) ? obj.message : 'Waiting';
-      obj.progress = 0;
-      obj.status = (obj.status) ? obj.status : 'info';
-      obj.tasks = [];
-      obj.logTasks = [];
-      hosts.add(obj);
-      console.log("TRACE: host name is: " + hostInfo[index].name);
-    }
-    return hosts.filterProperty('bootStatus', 'REGISTERED');
-  },
-
-  // sets this.hosts, where each element corresponds to a status and progress info on a host
-  renderHosts: function (hostsInfo) {
-    hostsInfo.forEach(function (_hostInfo) {
-      var hostInfo = App.HostInfo.create({
-        name: _hostInfo.name,
-        status: _hostInfo.status,
-        tasks: _hostInfo.tasks,
-        logTasks: _hostInfo.logTasks,
-        message: _hostInfo.message,
-        progress: _hostInfo.progress
-      });
-      console.log('pushing ' + hostInfo.name);
-      this.hosts.pushObject(hostInfo);
-    }, this);
-  },
-
-  replacePolledData: function (polledData) {
-    this.polledData.clear();
-    this.set('polledData', polledData);
-  },
-
-  displayMessage: function (task) {
-    var role = App.format.role(task.role);
-    console.log("In display message with task command value: " + task.command);
-    switch (task.command) {
-      case 'INSTALL':
-        switch (task.status) {
-          case 'PENDING':
-            return 'Preparing to install ' + role;
-          case 'QUEUED' :
-            return 'Waiting to install ' + role;
-          case 'IN_PROGRESS':
-            return 'Installing ' + role;
-          case 'COMPLETED' :
-            return 'Successfully installed ' + role;
-          case 'FAILED':
-            return 'Failed to install ' + role;
-        }
-      case 'UNINSTALL':
-        switch (task.status) {
-          case 'PENDING':
-            return 'Preparing to uninstall ' + role;
-          case 'QUEUED' :
-            return 'Waiting to uninstall ' + role;
-          case 'IN_PROGRESS':
-            return 'Uninstalling ' + role;
-          case 'COMPLETED' :
-            return 'Successfully uninstalled ' + role;
-          case 'FAILED':
-            return 'Failed to uninstall ' + role;
-        }
-      case 'START' :
-        switch (task.status) {
-          case 'PENDING':
-            return 'Preparing to start ' + role;
-          case 'QUEUED' :
-            return 'Waiting to start ' + role;
-          case 'IN_PROGRESS':
-            return 'Starting ' + role;
-          case 'COMPLETED' :
-            return role + ' started successfully';
-          case 'FAILED':
-            return role + ' failed to start';
-        }
-      case 'STOP' :
-        switch (task.status) {
-          case 'PENDING':
-            return 'Preparing to stop ' + role;
-          case 'QUEUED' :
-            return 'Waiting to stop ' + role;
-          case 'IN_PROGRESS':
-            return 'Stopping ' + role;
-          case 'COMPLETED' :
-            return role + ' stopped successfully';
-          case 'FAILED':
-            return role + ' failed to stop';
-        }
-      case 'EXECUTE' :
-        switch (task.status) {
-          case 'PENDING':
-            return 'Preparing to execute ' + role;
-          case 'QUEUED' :
-            return 'Waiting to execute ' + role;
-          case 'IN_PROGRESS':
-            return 'Executing ' + role;
-          case 'COMPLETED' :
-            return role + ' executed successfully';
-          case 'FAILED':
-            return role + ' failed to execute';
-        }
-      case 'ABORT' :
-        switch (task.status) {
-          case 'PENDING':
-            return 'Preparing to abort ' + role;
-          case 'QUEUED' :
-            return 'Waiting to abort ' + role;
-          case 'IN_PROGRESS':
-            return 'Aborting ' + role;
-          case 'COMPLETED' :
-            return role + ' aborted successfully';
-          case 'FAILED':
-            return role + ' failed to abort';
-        }
-    }
-  },
-
-  /**
-   * run start/check services after installation phase
-   */
-  launchStartServices: function () {
-    var self = this;
-    var clusterName = this.get('content.cluster.name');
-    var url = App.apiPrefix + '/clusters/' + clusterName + '/services?ServiceInfo/state=INSTALLED';
-    var data = '{"ServiceInfo": {"state": "STARTED"}}';
-    var method = 'PUT';
-
-    if (this.get('content.controllerName') === 'addHostController') {
-      url = App.apiPrefix + '/clusters/' + clusterName + '/host_components?(HostRoles/component_name=GANGLIA_MONITOR|HostRoles/component_name=HBASE_REGIONSERVER|HostRoles/component_name=DATANODE|HostRoles/component_name=TASKTRACKER)&(HostRoles/state=INSTALLED)';
-      data = '{"HostRoles": {"state": "STARTED"}}';
-    }
-
-    if (App.testMode) {
-      url = this.get('mockDataPrefix') + '/poll_6.json';
-      method = 'GET';
-      this.numPolls = 6;
-    }
-
-    $.ajax({
-      type: method,
-      url: url,
-      async: false,
-      data: data,
-      dataType: 'text',
-      timeout: App.timeout,
-      success: function (data) {
-        var jsonData = jQuery.parseJSON(data);
-        console.log("TRACE: Step9 -> In success function for the startService call");
-        console.log("TRACE: Step9 -> value of the url is: " + url);
-        console.log("TRACE: Step9 -> value of the received data is: " + jsonData);
-        var requestId = jsonData.Requests.id;
-        console.log('requestId is: ' + requestId);
-        var clusterStatus = {
-          status: 'INSTALLED',
-          requestId: requestId,
-          isStartError: false,
-          isCompleted: false
-        };
-
-        App.router.get(self.get('content.controllerName')).saveClusterStatus(clusterStatus);
-
-        // We need to do recovery if there is a browser crash
-        App.clusterStatus.setClusterStatus({
-          clusterState: 'SERVICE_STARTING_3',
-          localdb: App.db.data
-        });
-
-        self.startPolling();
-      },
-
-      error: function () {
-        console.log("ERROR");
-        var clusterStatus = {
-          status: 'START FAILED',
-          isStartError: true,
-          isCompleted: false
-        };
-
-        App.router.get(self.get('content.controllerName')).saveClusterStatus(clusterStatus);
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-  },
-
-  // marks a host's status as "success" if all tasks are in COMPLETED state
-  onSuccessPerHost: function (actions, contentHost) {
-    if (actions.everyProperty('Tasks.status', 'COMPLETED') && this.get('content.cluster.status') === 'INSTALLED') {
-      contentHost.set('status', 'success');
-    }
-  },
-
-  // marks a host's status as "warning" if at least one of the tasks is FAILED, ABORTED, or TIMEDOUT and marks host's status as "failed" if at least one master component install task is FAILED.
-  onErrorPerHost: function (actions, contentHost) {
-    if (actions.someProperty('Tasks.status', 'FAILED') || actions.someProperty('Tasks.status', 'ABORTED') || actions.someProperty('Tasks.status', 'TIMEDOUT')) {
-      contentHost.set('status', 'warning');
-    }
-    if (this.get('content.cluster.status') === 'PENDING' && actions.someProperty('Tasks.status', 'FAILED')) {
-      contentHost.set('status', 'failed');
-    }
-  },
-
-  onInProgressPerHost: function (tasks, contentHost) {
-    var runningAction = tasks.findProperty('Tasks.status', 'IN_PROGRESS');
-    if (runningAction === undefined || runningAction === null) {
-      runningAction = tasks.findProperty('Tasks.status', 'QUEUED');
-    }
-    if (runningAction === undefined || runningAction === null) {
-      runningAction = tasks.findProperty('Tasks.status', 'PENDING');
-    }
-    if (runningAction !== null && runningAction !== undefined) {
-      contentHost.set('message', this.displayMessage(runningAction.Tasks));
-    }
-  },
-
-  /**
-   * calculate progress of tasks per host
-   * @param actions
-   * @param contentHost
-   * @return {Number}
-   */
-  progressPerHost: function (actions, contentHost) {
-    var progress = 0;
-    var actionsPerHost = actions.length;
-    // TODO: consolidate to a single filter function for better performance
-    var completedActions = actions.filterProperty('Tasks.status', 'COMPLETED').length
-      + actions.filterProperty('Tasks.status', 'FAILED').length
-      + actions.filterProperty('Tasks.status', 'ABORTED').length
-      + actions.filterProperty('Tasks.status', 'TIMEDOUT').length;
-    var queuedActions = actions.filterProperty('Tasks.status', 'QUEUED').length;
-    var inProgressActions = actions.filterProperty('Tasks.status', 'IN_PROGRESS').length;
-    /** for the install phase (PENDING), % completed per host goes up to 33%; floor(100 / 3)
-     * for the start phase (INSTALLED), % completed starts from 34%
-     * when task in queued state means it's completed on 9%
-     * in progress - 35%
-     * completed - 100%
-     */
-    switch (this.get('content.cluster.status')) {
-      case 'PENDING':
-        progress = Math.ceil(((queuedActions * 0.09) + (inProgressActions * 0.35) + completedActions ) / actionsPerHost * 33);
-        break;
-      case 'INSTALLED':
-        progress = 34 + Math.ceil(((queuedActions * 0.09) + (inProgressActions * 0.35) + completedActions ) / actionsPerHost * 66);
-        break;
-      default:
-        progress = 100;
-        break;
-    }
-    console.log('INFO: progressPerHost is: ' + progress);
-    contentHost.set('progress', progress.toString());
-    return progress;
-  },
-
-  isSuccess: function (polledData) {
-    return polledData.everyProperty('Tasks.status', 'COMPLETED');
-  },
-
-  //return true if at least 50% of the slave host components for the particular service component fails to install
-  isStepFailed: function () {
-    var failed = false;
-    var polledData = this.get('polledData');
-    polledData.filterProperty('Tasks.command', 'INSTALL').mapProperty('Tasks.role').uniq().forEach(function (role) {
-      if (['DATANODE', 'TASKTRACKER', 'HBASE_REGIONSERVER', 'GANGLIA_MONITOR'].contains(role)) {
-        var actionsPerRole = polledData.filterProperty('Tasks.role', role);
-        var actionsFailed = actionsPerRole.filterProperty('Tasks.status', 'FAILED');
-        var actionsAborted = actionsPerRole.filterProperty('Tasks.status', 'ABORTED');
-        var actionsTimedOut = actionsPerRole.filterProperty('Tasks.status', 'TIMEDOUT');
-        if ((((actionsFailed.length + actionsAborted.length + actionsTimedOut.length) / actionsPerRole.length) * 100) > 50) {
-          failed = true;
-        }
-      }
-    }, this);
-    return failed;
-  },
-
-  // makes a state transition
-  // PENDING -> INSTALLED
-  // PENDING -> INSTALL FAILED
-  // INSTALLED -> STARTED
-  // INSTALLED -> START_FAILED
-  // returns true if polling should stop; false otherwise
-  // polling from ui stops only when no action has 'PENDING', 'QUEUED' or 'IN_PROGRESS' status
-  finishState: function (polledData) {
-    var clusterStatus = {};
-    var requestId = this.get('content.cluster.requestId');
-    if (this.get('content.cluster.status') === 'INSTALLED') {
-      if (!polledData.someProperty('Tasks.status', 'PENDING') && !polledData.someProperty('Tasks.status', 'QUEUED') && !polledData.someProperty('Tasks.status', 'IN_PROGRESS')) {
-        this.set('progress', '100');
-        clusterStatus = {
-          status: 'INSTALLED',
-          requestId: requestId,
-          isCompleted: true
-        };
-        if (this.isSuccess(polledData)) {
-          clusterStatus.status = 'STARTED';
-          var serviceStartTime = new Date().getTime();
-          var timeToStart = ((parseInt(serviceStartTime) - parseInt(this.get('content.cluster.installStartTime'))) / 60000).toFixed(2);
-          clusterStatus.installTime = timeToStart;
-        } else {
-            clusterStatus.status = 'START FAILED'; // 'START FAILED' implies to step10 that installation was successful but start failed
-        }
-        App.router.get(this.get('content.controllerName')).saveClusterStatus(clusterStatus);
-        this.set('isStepCompleted', true);
-        this.setTasksPerHost();
-        App.router.get(this.get('content.controllerName')).saveInstalledHosts(this);
-        return true;
-      }
-    } else if (this.get('content.cluster.status') === 'PENDING') {
-      if (!polledData.someProperty('Tasks.status', 'PENDING') && !polledData.someProperty('Tasks.status', 'QUEUED') && !polledData.someProperty('Tasks.status', 'IN_PROGRESS')) {
-        clusterStatus = {
-          status: 'PENDING',
-          requestId: requestId,
-          isCompleted: false
-        }
-        if (this.get('status') === 'failed') {
-          clusterStatus.status = 'INSTALL FAILED';
-          this.set('progress', '100');
-          this.get('hosts').setEach('progress', '100');
-          App.router.get(this.get('content.controllerName')).saveClusterStatus(clusterStatus);
-          this.set('isStepCompleted', true);
-        } else {
-          clusterStatus.status = 'INSTALLED';
-          this.set('progress', '34');
-          this.launchStartServices();
-        }
-        this.setTasksPerHost();
-        App.router.get(this.get('content.controllerName')).saveInstalledHosts(this);
-        return true;
-      }
-    } else if (this.get('content.cluster.status') === 'INSTALL FAILED' || this.get('content.cluster.status') === 'START FAILED' || this.get('content.cluster.status') === 'STARTED') {
-      this.set('progress', '100');
-      return true;
-    }
-    return false;
-  },
-
-  setTasksPerHost: function () {
-    var tasksData = this.get('polledData');
-    this.get('hosts').forEach(function (_host) {
-      var tasksPerHost = tasksData.filterProperty('Tasks.host_name', _host.name); // retrieved from polled Data
-      if (tasksPerHost.length === 0) {
-        //alert('For testing with mockData follow the sequence: hit referesh,"mockData btn", "pollData btn", again "pollData btn"');
-        //exit();
-      }
-      if (tasksPerHost !== null && tasksPerHost !== undefined && tasksPerHost.length !== 0) {
-        tasksPerHost.forEach(function (_taskPerHost) {
-          console.log('In step9 _taskPerHost function.');
-          //if (_taskPerHost.Tasks.status !== 'PENDING' && _taskPerHost.Tasks.status !== 'QUEUED' &&  _taskPerHost.Tasks.status !== 'IN_PROGRESS') {
-          _host.tasks.pushObject(_taskPerHost);
-          //}
-        }, this);
-      }
-    }, this);
-  },
-
-  logTasksChangesCounter: 0,
-
-  // This is done at HostRole level.
-  setLogTasksStatePerHost: function (tasksPerHost, host) {
-    console.log('In step9 setTasksStatePerHost function.');
-    tasksPerHost.forEach(function (_task) {
-      console.log('In step9 _taskPerHost function.');
-      //if (_task.Tasks.status !== 'PENDING' && _task.Tasks.status !== 'QUEUED') {
-      var task = host.logTasks.findProperty('Tasks.id', _task.Tasks.id);
-      if (task) {
-        host.logTasks.removeObject(task);
-      }
-      host.logTasks.pushObject(_task);
-      //}
-    }, this);
-    this.set('logTasksChangesCounter', this.get('logTasksChangesCounter') + 1);
-  },
-
-  parseHostInfo: function (polledData) {
-    console.log('TRACE: Entering host info function');
-    var self = this;
-    var totalProgress = 0;
-    var tasksData = polledData.tasks;
-    console.log("The value of tasksData is: ", tasksData);
-    if (!tasksData) {
-      console.log("Step9: ERROR: NO tasks available to process");
-    }
-    var requestId = this.get('content.cluster.requestId');
-    if(polledData.Requests && polledData.Requests.id && polledData.Requests.id!=requestId){
-      // We dont want to use non-current requestId's tasks data to
-      // determine the current install status.
-      // Also, we dont want to keep polling if it is not the
-      // current requestId.
-      return false;
-    }
-    this.replacePolledData(tasksData);
-    this.hosts.forEach(function (_host) {
-      var actionsPerHost = tasksData.filterProperty('Tasks.host_name', _host.name); // retrieved from polled Data
-      if (actionsPerHost.length === 0) {
-        _host.set('message', this.t('installer.step9.host.status.nothingToInstall'));
-        console.log("INFO: No task is hosted on the host");
-      }
-      if (actionsPerHost !== null && actionsPerHost !== undefined && actionsPerHost.length !== 0) {
-        this.setLogTasksStatePerHost(actionsPerHost, _host);
-        this.onSuccessPerHost(actionsPerHost, _host);     // every action should be a success
-        this.onErrorPerHost(actionsPerHost, _host);     // any action should be a failure
-        this.onInProgressPerHost(actionsPerHost, _host);  // current running action for a host
-        totalProgress += self.progressPerHost(actionsPerHost, _host);
-      }
-    }, this);
-    totalProgress = Math.floor(totalProgress / this.hosts.length);
-    this.set('progress', totalProgress.toString());
-    console.log("INFO: right now the progress is: " + this.get('progress'));
-    return this.finishState(tasksData);
-  },
-
-  startPolling: function () {
-    this.set('isSubmitDisabled', true);
-    this.doPolling();
-  },
-
-  numPolls: 0,
-
-  getUrl: function (requestId) {
-    var clusterName = this.get('content.cluster.name');
-    var requestId = requestId || this.get('content.cluster.requestId');
-    var url = App.apiPrefix + '/clusters/' + clusterName + '/requests/' + requestId + '?fields=tasks/*';
-    console.log("URL for step9 is: " + url);
-    return url;
-  },
-
-  POLL_INTERVAL: 4000,
-
-  loadLogData: function(requestId) {
-    var url = this.getUrl(requestId);
-    var requestsId = App.db.getCluster().oldRequestsId;
-    if (App.testMode) {
-      this.POLL_INTERVAL = 1;
-      this.numPolls++;
-    }
-
-    requestsId.forEach(function(requestId) {
-      url = this.getUrl(requestId);
-      if (App.testMode) {
-        this.POLL_INTERVAL = 1;
-
-        url = this.get('mockDataPrefix') + '/poll_' + this.numPolls + '.json';
-      }
-      this.getLogsByRequest(url, false);
-    }, this);
-  },
-
-  // polling: whether to continue polling for status or not
-  getLogsByRequest: function(url, polling){
-    var self = this;
-    $.ajax({
-      type: 'GET',
-      url: url,
-      async: true,
-      timeout: App.timeout,
-      dataType: 'text',
-      success: function (data) {
-        console.log("TRACE: In success function for the GET logs data");
-        console.log("TRACE: STep9 -> The value is: ", jQuery.parseJSON(data));
-        var result = self.parseHostInfo(jQuery.parseJSON(data));
-        if (!polling) {
-          return;
-        }
-        if (result !== true) {
-          window.setTimeout(function () {
-            self.doPolling();
-          }, self.POLL_INTERVAL);
-        } else {
-          self.stopPolling();
-        }
-      },
-
-      error: function (request, ajaxOptions, error) {
-        console.log("TRACE: STep9 -> In error function for the GET logs data");
-        console.log("TRACE: STep9 -> value of the url is: " + url);
-        console.log("TRACE: STep9 -> error code status is: " + request.status);
-        self.stopPolling();
-      },
-
-      statusCode: require('data/statusCodes')
-    }).retry({times: App.maxRetries, timeout: App.timeout}).then(null,
-      function () {
-        App.showReloadPopup();
-        console.log('Install services all retries failed');
-      }
-    );
-  },
-
-  doPolling: function () {
-    var url = this.getUrl();
-
-    if (App.testMode) {
-      this.numPolls++;
-      url = this.get('mockDataPrefix') + '/poll_' + this.get('numPolls') + '.json';
-
-    }
-    this.getLogsByRequest(url, true);
-  },
-
-  stopPolling: function () {
-    //TODO: uncomment following line after the hook up with the API call
-    // this.set('isStepCompleted',true);
-  },
-
-  submit: function () {
-    if (!this.get('isSubmitDisabled')) {
-      App.router.send('next');
-    }
-  },
-
-  back: function () {
-    if (!this.get('isSubmitDisabled')) {
-      App.router.send('back');
-    }
-  },
-
-  mockBtn: function () {
-    this.set('isSubmitDisabled', false);
-    this.hosts.clear();
-    var hostInfo = this.mockHostData;
-    this.renderHosts(hostInfo);
-  },
-
-  pollBtn: function () {
-    this.set('isSubmitDisabled', false);
-    var data1 = require('data/mock/step9PolledData/pollData_1');
-    var data2 = require('data/mock/step9PolledData/pollData_2');
-    var data3 = require('data/mock/step9PolledData/pollData_3');
-    var data4 = require('data/mock/step9PolledData/pollData_4');
-    var data5 = require('data/mock/step9PolledData/pollData_5');
-    var data6 = require('data/mock/step9PolledData/pollData_6');
-    var data7 = require('data/mock/step9PolledData/pollData_7');
-    var data8 = require('data/mock/step9PolledData/pollData_8');
-    var data9 = require('data/mock/step9PolledData/pollData_9');
-    console.log("TRACE: In pollBtn function data1");
-    var counter = parseInt(this.get('pollDataCounter')) + 1;
-    this.set('pollDataCounter', counter.toString());
-    switch (this.get('pollDataCounter')) {
-      case '1':
-        this.parseHostInfo(data1);
-        break;
-      case '2':
-        this.parseHostInfo(data2);
-        break;
-      case '3':
-        this.parseHostInfo(data3);
-        break;
-      case '4':
-        this.parseHostInfo(data4);
-        break;
-      case '5':
-        this.parseHostInfo(data5);
-        break;
-      case '6':
-        this.set('content.cluster.status', 'INSTALLED');
-        this.parseHostInfo(data6);
-        break;
-      case '7':
-        this.parseHostInfo(data7);
-        break;
-      case '8':
-        this.parseHostInfo(data8);
-        break;
-      case '9':
-        this.parseHostInfo(data9);
-        break;
-      default:
-        break;
-    }
-  }
-
-});
diff --git a/branch-1.2/ambari-web/app/data/config_mapping.js b/branch-1.2/ambari-web/app/data/config_mapping.js
deleted file mode 100644
index 74f03b3..0000000
--- a/branch-1.2/ambari-web/app/data/config_mapping.js
+++ /dev/null
@@ -1,797 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-module.exports = [
-  {
-    "name": "fs.default.name",
-    "templateName": ["namenode_host"],
-    "foreignKey": null,
-    "value": "hdfs://<templateName[0]>:8020",
-    "filename": "core-site.xml"
-  },
-  {
-    "name": "fs.checkpoint.dir",
-    "templateName": ["fs_checkpoint_dir"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "core-site.xml"
-  },
-  {
-    "name": "fs.checkpoint.period",
-    "templateName": ["fs_checkpoint_period"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "core-site.xml"
-  },
-  {
-    "name": "fs.checkpoint.size",
-    "templateName": ["fs_checkpoint_size"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "core-site.xml"
-  },
-  {
-    "name": "fs.checkpoint.edits.dir",
-    "templateName": [],
-    "foreignKey": ["fs.checkpoint.dir"],
-    "value": "<foreignKey[0]>",
-    "filename": "core-site.xml"
-  },
-  {
-    "name": "hadoop.proxyuser.<foreignKey[0]>.groups",
-    "templateName": ["proxyuser_group"],
-    "foreignKey": ["hive_user"],
-    "value": "<templateName[0]>",
-    "filename": "core-site.xml"
-  },
-  {
-    "name": "hadoop.proxyuser.<foreignKey[0]>.hosts",
-    "templateName": ["hivemetastore_host"],
-    "foreignKey": ["hive_user"],
-    "value": "<templateName[0]>",
-    "filename": "core-site.xml"
-  },
-  {
-    "name": "hadoop.proxyuser.<foreignKey[0]>.groups",
-    "templateName": ["proxyuser_group"],
-    "foreignKey": ["oozie_user"],
-    "value": "<templateName[0]>",
-    "filename": "core-site.xml"
-  },
-  {
-    "name": "hadoop.proxyuser.<foreignKey[0]>.hosts",
-    "templateName": ["oozieserver_host"],
-    "foreignKey": ["oozie_user"],
-    "value": "<templateName[0]>",
-    "filename": "core-site.xml"
-  },
-  {
-    "name": "hadoop.proxyuser.<foreignKey[0]>.groups",
-    "templateName": ["proxyuser_group"],
-    "foreignKey": ["webhcat_user"],
-    "value": "<templateName[0]>",
-    "filename": "core-site.xml"
-  },
-  {
-    "name": "hadoop.proxyuser.<foreignKey[0]>.hosts",
-    "templateName": ["hivemetastore_host"],
-    "foreignKey": ["webhcat_user"],
-    "value": "<templateName[0]>",
-    "filename": "core-site.xml"
-  },
-  {
-    "name": "dfs.name.dir",
-    "templateName": ["dfs_name_dir"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hdfs-site.xml"
-  },
-  /*
-  {
-    "name": "dfs.support.append",
-    "templateName": ["dfs_support_append"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hdfs-site.xml"
-  },
-  */
-  {
-    "name": "dfs.webhdfs.enabled",
-    "templateName": ["dfs_webhdfs_enabled"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.datanode.failed.volumes.tolerated",
-    "templateName": ["dfs_datanode_failed_volume_tolerated"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.block.local-path-access.user",
-    "templateName": ["dfs_block_local_path_access_user"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.data.dir",
-    "templateName": ["dfs_data_dir"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.hosts.exclude",
-    "templateName": ["hadoop_conf_dir", "dfs_exclude"],
-    "foreignKey": null,
-    "value": "<templateName[0]>\/<templateName[1]>",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.hosts",
-    "templateName": ["hadoop_conf_dir", "dfs_include"],
-    "foreignKey": null,
-    "value": "<templateName[0]>\/<templateName[1]>",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.replication",
-    "templateName": ["dfs_replication"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.datanode.address",
-    "templateName": ["dfs_datanode_address"],
-    "foreignKey": null,
-    "value": "0.0.0.0:<templateName[0]>",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.datanode.http.address",
-    "templateName": ["dfs_datanode_http_address"],
-    "foreignKey": null,
-    "value": "0.0.0.0:<templateName[0]>",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.http.address",
-    "templateName": ["namenode_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:50070",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.datanode.du.reserved",
-    "templateName": ["datanode_du_reserved"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.namenode.kerberos.principal",
-    "templateName": ["kerberos_domain"],
-    "foreignKey": null,
-    "value": "nn\/_HOST@<templateName[0]>",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.secondary.namenode.kerberos.principal",
-    "templateName": ["kerberos_domain"],
-    "foreignKey": null,
-    "value": "nn\/_HOST@<templateName[0]>",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.namenode.kerberos.https.principal",
-    "templateName": ["kerberos_domain"],
-    "foreignKey": null,
-    "value": "host\/_HOST@<templateName[0]>",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.secondary.namenode.kerberos.https.principal",
-    "templateName": ["kerberos_domain"],
-    "foreignKey": null,
-    "value": "host\/_HOST@<templateName[0]>",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.secondary.http.address",
-    "templateName": ["snamenode_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:50090",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.web.authentication.kerberos.keytab",
-    "templateName": ["keytab_path"],
-    "foreignKey": null,
-    "value": "<templateName[0]>\/spnego.service.keytab",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.datanode.kerberos.principal",
-    "templateName": ["kerberos_domain"],
-    "foreignKey": null,
-    "value": "dn\/_HOST@<templateName[0]>",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.namenode.keytab.file",
-    "templateName": ["keytab_path"],
-    "foreignKey": null,
-    "value": "<templateName[0]>\/nn.service.keytab",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.secondary.namenode.keytab.file",
-    "templateName": ["keytab_path"],
-    "foreignKey": null,
-    "value": "<templateName[0]>\/nn.service.keytab",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.datanode.keytab.file",
-    "templateName": ["keytab_path"],
-    "foreignKey": null,
-    "value": "<templateName[0]>\/dn.service.keytab",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.https.address",
-    "templateName": ["namenode_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:50470",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.datanode.data.dir.perm",
-    "templateName": ["dfs_datanode_data_dir_perm"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hdfs-site.xml"
-  },
-  /*
-   {
-   "name": "dfs.namenode.kerberos.internal.spnego.principal",
-   "templateName": [],
-   "foreignKey": ["dfs.web.authentication.kerberos.principal"],
-   "value": "<foreignKey[0]>",
-   "filename": "hdfs-site.xml"
-   },
-   {
-   "name": "dfs.secondary.namenode.kerberos.internal.spnego.principal",
-   "templateName": [],
-   "foreignKey": ["dfs.web.authentication.kerberos.principal"],
-   "value": "<foreignKey[0]>",
-   "filename": "hdfs-site.xml"
-   },
-   */
-  {
-    "name": "mapred.local.dir",
-    "templateName": ["mapred_local_dir"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  /*
-   {
-   "name": "oozie.service.StoreService.jdbc.url",
-   "templateName": ["oozie_data_dir"],
-   "foreignKey": ["oozie.db.schema.name"],
-   "value": "<templateName[0]>\/<foreignKey[0]>",
-   "filename": "oozie-site.xml"
-   },
-   */
-  {
-    "name": "oozie.base.url",
-    "templateName": ["oozieserver_host"],
-    "foreignKey": null,
-    "value": "http://<templateName[0]>:11000/oozie",
-    "filename": "oozie-site.xml"
-  },
-  /*
-   {
-   "name": "oozie.service.JPAService.jdbc.password",
-   "templateName": [],
-   "foreignKey": null,
-   "value": " ",
-   "filename": "oozie-site.xml"
-   },
-   {
-   "name": "oozie.db.schema.name",
-   "templateName": [],
-   "foreignKey": null,
-   "value": "oozie",
-   "filename": "oozie-site.xml"
-   },
-   {
-   "name": "oozie.service.JPAService.jdbc.url",
-   "templateName": [],
-   "foreignKey": null,
-   "value": "jdbc:derby:/var/data/oozie/oozie-db;create=true",
-   "filename": "oozie-site.xml"
-   },
-   {
-   "name": "oozie.action.ssh.http.command.post.options",
-   "templateName": [],
-   "foreignKey": null,
-   "value": " ",
-   "filename": "oozie-site.xml"
-   },
-   */
-  {
-    "name": "javax.jdo.option.ConnectionURL",
-    "templateName": ["hive_mysql_hostname", "hive_database_name"],
-    "foreignKey": null,
-    "value": "jdbc:mysql://<templateName[0]>/<templateName[1]>?createDatabaseIfNotExist=true",
-    "filename": "hive-site.xml"
-  },
-  {
-    "name": "javax.jdo.option.ConnectionUserName",
-    "templateName": ["hive_metastore_user_name"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hive-site.xml"
-  },
-  {
-    "name": "javax.jdo.option.ConnectionPassword",
-    "templateName": ["hive_metastore_user_passwd"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hive-site.xml"
-  },
-  {
-    "name": "hive.metastore.uris",
-    "templateName": ["hivemetastore_host"],
-    "foreignKey": null,
-    "value": "thrift://<templateName[0]>:9083",
-    "filename": "hive-site.xml"
-  },
-  {
-    "name": "mapred.jobtracker.taskScheduler",
-    "templateName": ["scheduler_name"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.tasktracker.map.tasks.maximum",
-    "templateName": ["mapred_map_tasks_max"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.tasktracker.reduce.tasks.maximum",
-    "templateName": ["mapred_red_tasks_max"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.cluster.reduce.memory.mb",
-    "templateName": ["mapred_cluster_red_mem_mb"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.job.map.memory.mb",
-    "templateName": ["mapred_job_map_mem_mb"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.cluster.max.map.memory.mb",
-    "templateName": ["mapred_cluster_max_map_mem_mb"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.cluster.max.reduce.memory.mb",
-    "templateName": ["mapred_cluster_max_red_mem_mb"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.job.reduce.memory.mb",
-    "templateName": ["mapred_job_red_mem_mb"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.hosts",
-    "templateName": ["hadoop_conf_dir", "mapred_hosts_include"],
-    "foreignKey": null,
-    "value": "<templateName[0]>/<templateName[1]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.hosts.exclude",
-    "templateName": ["hadoop_conf_dir", "mapred_hosts_exclude"],
-    "foreignKey": null,
-    "value": "<templateName[0]>/<templateName[1]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.healthChecker.script.path",
-    "templateName": ["mapred_jobstatus_dir"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.job.tracker.persist.jobstatus.dir",
-    "templateName": ["hadoop_conf_dir"],
-    "foreignKey": null,
-    "value": "<templateName[0]>/health_check",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.child.java.opts",
-    "templateName": ["mapred_child_java_opts_sz"],
-    "foreignKey": null,
-    "value": "-server -Xmx<templateName[0]>m -Djava.net.preferIPv4Stack=true",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.cluster.map.memory.mb",
-    "templateName": ["mapred_cluster_map_mem_mb"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "io.sort.mb",
-    "templateName": ["io_sort_mb"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "io.sort.spill.percent",
-    "templateName": ["io_sort_spill_percent"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.system.dir",
-    "templateName": ["mapred_system_dir"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.job.tracker",
-    "templateName": ["jobtracker_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:50300",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.job.tracker.http.address",
-    "templateName": ["jobtracker_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:50030",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.userlog.retain.hours",
-    "templateName": ["mapreduce_userlog_retainhours"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.jobtracker.maxtasks.per.job",
-    "templateName": ["maxtasks_per_job"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.task.tracker.task-controller",
-    "templateName": ["task_controller"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapreduce.jobtracker.kerberos.principal",
-    "templateName": ["kerberos_domain"],
-    "foreignKey": null,
-    "value": "jt/_HOST@<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapreduce.tasktracker.kerberos.principal",
-    "templateName": ["kerberos_domain"],
-    "foreignKey": null,
-    "value": "tt/_HOST@<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapreduce.jobtracker.keytab.file",
-    "templateName": ["keytab_path"],
-    "foreignKey": null,
-    "value": "<templateName[0]>/jt.service.keytab",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapreduce.tasktracker.keytab.file",
-    "templateName": ["keytab_path"],
-    "foreignKey": null,
-    "value": "<templateName[0]>/tt.service.keytab",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapreduce.history.server.embedded",
-    "templateName": [],
-    "foreignKey": null,
-    "value": "false",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapreduce.history.server.http.address",
-    "templateName": ["jobtracker_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:51111",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapreduce.jobhistory.kerberos.principal",
-    "templateName": ["kerberos_domain"],
-    "foreignKey": null,
-    "value": "jt/_HOST@<templateName[0]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapreduce.jobhistory.keytab.file",
-    "templateName": ["keytab_path"],
-    "foreignKey": null,
-    "value": "<templateName[0]>/jt.service.keytab",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "hbase.rootdir",
-    "templateName": ["namenode_host", "hbase_hdfs_root_dir"],
-    "foreignKey": null,
-    "value": "hdfs://<templateName[0]>:8020<templateName[1]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.tmp.dir",
-    "templateName": ["hbase_tmp_dir"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  /*
-   {
-   "name": "hbase.master.info.bindAddress",
-   "templateName": ["hbasemaster.host"],
-   "foreignKey": null,
-   "value": "<templateName[0]>",
-   "filename": "hbase-site.xml"
-   },
-   */
-  {
-    "name": "hbase.regionserver.global.memstore.upperLimit",
-    "templateName": ["regionserver_memstore_upperlimit"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.hstore.blockingStoreFiles",
-    "templateName": ["hstore_blockingstorefiles"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.hstore.compactionThreshold",
-    "templateName": ["hstore_compactionthreshold"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hfile.block.cache.size",
-    "templateName": ["hfile_blockcache_size"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.hregion.max.filesize",
-    "templateName": ["hstorefile_maxsize"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.regionserver.handler.count",
-    "templateName": ["regionserver_handlers"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.hregion.majorcompaction",
-    "templateName": ["hregion_majorcompaction"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.regionserver.global.memstore.lowerLimit",
-    "templateName": ["regionserver_memstore_lowerlimit"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.hregion.memstore.block.multiplier",
-    "templateName": ["hregion_blockmultiplier"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.hregion.memstore.mslab.enabled",
-    "templateName": ["regionserver_memstore_lab"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.hregion.memstore.flush.size",
-    "templateName": ["hregion_memstoreflushsize"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.client.scanner.caching",
-    "templateName": ["client_scannercaching"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.client.scanner.caching",
-    "templateName": ["client_scannercaching"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.cluster.distributed",
-    "templateName": [],
-    "foreignKey": null,
-    "value": "true",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.zookeeper.property.clientPort",
-    "templateName": [],
-    "foreignKey": null,
-    "value": "2181",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "zookeeper.session.timeout",
-    "templateName": ["zookeeper_sessiontimeout"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.client.keyvalue.maxsize",
-    "templateName": ["hfile_max_keyvalue_size"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.master.keytab.file",
-    "templateName": ["keytab_path"],
-    "foreignKey": null,
-    "value": "<templateName[0]>/hm.service.keytab",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.master.kerberos.principal",
-    "templateName": ["kerberos_domain"],
-    "foreignKey": null,
-    "value": "hm/_HOST@<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "hbase.regionserver.kerberos.principal",
-    "templateName": ["kerberos_domain"],
-    "foreignKey": null,
-    "value": "rs/_HOST@<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  /*
-  {
-    "name": "hbase.coprocessor.region.classes",
-    "templateName": ["preloaded_regioncoprocessor_classes"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  */
-
-  {
-    "name": "dfs.support.append",
-    "templateName": ["hdfs_support_append"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-
-  {
-    "name": "dfs.client.read.shortcircuit",
-    "templateName": ["hdfs_enable_shortcircuit_read"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "dfs.client.read.shortcircuit.skip.checksum",
-    "templateName": ["hdfs_enable_shortcircuit_skipchecksum"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  },
-  {
-    "name": "templeton.hive.properties",
-    "templateName": ["hivemetastore_host"],
-    "foreignKey": null,
-    "value": "hive.metastore.local=false,hive.metastore.uris=thrift://<templateName[0]>:9083,hive.metastore.sasl.enabled=yes,hive.metastore.execute.setugi=true",
-    "filename": "webhcat-site.xml"
-  },
-  {
-    "name": "templeton.zookeeper.hosts",
-    "templateName": ["zookeeperserver_hosts"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "webhcat-site.xml"
-  },
-  {
-    "name": "hbase.zookeeper.quorum",
-    "templateName": ["zookeeperserver_hosts"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "hbase-site.xml"
-  }
-];
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/config_properties.js b/branch-1.2/ambari-web/app/data/config_properties.js
deleted file mode 100644
index a423ab8..0000000
--- a/branch-1.2/ambari-web/app/data/config_properties.js
+++ /dev/null
@@ -1,2305 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Defines service configuration properties.
- *   name:
- *     The name of the config property that is understood by Ambari server and agent.
- *     E.g., "datanode_du_reserved"
- *
- *   displayName:
- *     The human-friendly display name of the config property.
- *     E.g., "Reserved space for HDFS"
- *
- *   description:
- *     The description of the config property.
- *     E.g., "Reserved space in GB per volume"
- *
- *   defaultValue:
- *     The default value of the config property.
- *     E.g., "1"
- *
- *   isReconfigurable:
- *     Whether the config property can be reconfigured after it has been initially set and deployed.
- *     If this is unspecified, true is assumed.
- *     E.g., true, false
- *
- *   isRequired:
- *     Whether the config property is required or not.
- *     If this is unspecified, true is assumed.
- *     E.g., true, false
- *
- *   displayType:
- *     How the config property is to be rendered for user input.
- *     If this is left unspecified, "string" is assumed
- *     E.g., "string", "int", "float", "checkbox", "directories", "custom", "email", "masterHost", "slaveHosts"
- *
- *   unit
- *     The unit for the config property.
- *     E.g., "ms", "MB", "bytes"
- *
- *   serviceName:
- *     The service that the config property belongs to.
- *     E.g., "HDFS", "MAPREDUCE", "ZOOKEEPER", etc.
- *
- *   category: the category that the config property belongs to (used for grouping config properties in the UI).
- *     if unspecified, "General" is assumed.
- *     E.g., "General", "Advanced", "NameNode", "DataNode"
- */
-
-module.exports =
-{
-  "configProperties": [
-    {
-      "id": "puppet var",
-      "name": "hbasemaster_host",
-      "displayName": "HBase Master host",
-      "value": "",
-      "defaultValue": "",
-      "description": "The host that has been assigned to run HBase Master",
-      "displayType": "masterHost",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HBASE",
-      "category": "HBase Master"
-    },
-    {
-      "id": "puppet var",
-      "name": "regionserver_hosts",
-      "displayName": "RegionServer hosts",
-      "value": "",
-      "defaultValue": "",
-      "description": "The hosts that have been assigned to run RegionServer",
-      "displayType": "slaveHosts",
-      "isVisible": true,
-      "isRequired": false,
-      "domain": "regionserver-global",
-      "serviceName": "HBASE",
-      "category": "RegionServer"
-    },
-    {
-      "id": "puppet var",
-      "name": "hbase_log_dir",
-      "displayName": "HBase Log Dir",
-      "description": "Directory for HBase logs",
-      "defaultValue": "/var/log/hbase",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HBASE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hbase_pid_dir",
-      "displayName": "HBase PID Dir",
-      "description": "Directory in which the pid files for HBase processes will be created",
-      "defaultValue": "/var/run/hbase",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HBASE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hbase_regionserver_heapsize",
-      "displayName": "HBase Region Servers maximum Java heap size",
-      "description": "Maximum Java heap size for HBase Region Servers (Java option -Xmx)",
-      "defaultValue": "1024",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "regionserver-global",
-      "serviceName": "HBASE",
-      "category": "RegionServer"
-    },
-    {
-      "id": "puppet var",
-      "name": "hbase_master_heapsize",
-      "displayName": "HBase Master Maximum Java heap size",
-      "description": "Maximum Java heap size for HBase master (Java option -Xmx)",
-      "defaultValue": "1024",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HBASE",
-      "category": "HBase Master"
-    },
-    {
-      "id": "puppet var",
-      "name": "hstore_compactionthreshold",
-      "displayName": "HBase HStore compaction threshold",
-      "description": "If more than this number of HStoreFiles in any one HStore then a compaction is run to rewrite all HStoreFiles files as one.",
-      "defaultValue": "3",
-      "displayType": "int",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HBASE"
-    },
-    {
-      "id": "puppet var",
-      "name": "hfile_blockcache_size",
-      "displayName": "HFile block cache size ",
-      "description": "Percentage of maximum heap (-Xmx setting) to allocate to block cache used by HFile/StoreFile. Set to 0 to disable but it's not recommended.",
-      "defaultValue": "0.25",
-      "displayType": "float",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HBASE"
-    },
-    {
-      "id": "puppet var",
-      "name": "hstorefile_maxsize",
-      "displayName": "Maximum HStoreFile Size",
-      "description": "If any one of a column families' HStoreFiles has grown to exceed this value, the hosting HRegion is split in two.",
-      "defaultValue": "1073741824",
-      "displayType": "int",
-      "unit": "bytes",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HBASE"
-    },
-    {
-      "id": "puppet var",
-      "name": "regionserver_handlers",
-      "displayName": "HBase Region Server Handler",
-      "description": "Count of RPC Listener instances spun up on RegionServers",
-      "defaultValue": "30",
-      "displayType": "int",
-      "isVisible": true,
-      "domain": "regionserver-global",
-      "serviceName": "HBASE",
-      "category": "RegionServer"
-    },
-    {
-      "id": "puppet var",
-      "name": "hregion_majorcompaction",
-      "displayName": "HBase Region Major Compaction",
-      "description": "The time between major compactions of all HStoreFiles in a region. Set to 0 to disable automated major compactions.",
-      "defaultValue": "86400000",
-      "displayType": "int",
-      "unit": "ms",
-      "isVisible": true,
-      "domain": "regionserver-global",
-      "serviceName": "HBASE",
-      "category": "RegionServer"
-    },
-    {
-      "id": "puppet var",
-      "name": "hregion_blockmultiplier",
-      "displayName": "HBase Region Block Multiplier",
-      "description": "Block updates if memstore has \"Multiplier * HBase Region Memstore Flush Size\" bytes. Useful preventing runaway memstore during spikes in update traffic",
-      "defaultValue": "2",
-      "displayType": "int",
-      "isVisible": true,
-      "domain": "regionserver-global",
-      "serviceName": "HBASE",
-      "category": "RegionServer"
-    },
-    {
-      "id": "puppet var",
-      "name": "hregion_memstoreflushsize",
-      "displayName": "HBase Region Memstore Flush Size",
-      "description": "Memstore will be flushed to disk if size of the memstore exceeds this number of bytes.",
-      "defaultValue": "134217728",
-      "displayType": "int",
-      "unit": "bytes",
-      "isVisible": true,
-      "domain": "regionserver-global",
-      "serviceName": "HBASE",
-      "category": "RegionServer"
-    },
-    {
-      "id": "puppet var",
-      "name": "client_scannercaching",
-      "displayName": "HBase Client Scanner Caching",
-      "description": "Number of rows that will be fetched when calling next on a scanner if it is not served from (local, client) memory. Do not set this value such that the time between invocations is greater than the scanner timeout",
-      "defaultValue": "100",
-      "displayType": "int",
-      "unit": "rows",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HBASE"
-    },
-    {
-      "id": "puppet var",
-      "name": "zookeeper_sessiontimeout",
-      "displayName": "Zookeeper timeout for HBase Session",
-      "description": "HBase passes this to the zk quorum as suggested maximum time for a session",
-      "defaultValue": "60000",
-      "displayType": "int",
-      "unit": "ms",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HBASE"
-    },
-    {
-      "id": "puppet var",
-      "name": "hfile_max_keyvalue_size",
-      "displayName": "HBase Client Maximum key-value Size",
-      "description": "Specifies the combined maximum allowed size of a KeyValue instance. It should be set to a fraction of the maximum region size.",
-      "defaultValue": "10485760",
-      "displayType": "int",
-      "unit": "bytes",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HBASE"
-    },
-    {
-      "id": "puppet var",
-      "name": "hbase_hdfs_root_dir",
-      "displayName": "Hbase relative path to HDFS",
-      "description": "Hbase relative directory to HDFS",
-      "defaultValue": "/apps/hbase/data",
-      "isRequired": true,
-      "displayType": "advanced",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HBASE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hbase_tmp_dir",
-      "displayName": "Hbase temp directory",
-      "description": "",
-      "defaultValue": "/var/log/hbase",
-      "isRequired": false,
-      "displayType": "advanced",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HBASE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hdfs_enable_shortcircuit_read",
-      "displayName": "HDFS Short-circuit read",
-      "description": "",
-      "defaultValue": true,
-      "isRequired": false,
-      "displayType": "checkbox",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HBASE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hdfs_enable_shortcircuit_skipchecksum",
-      "displayName": "HDFS shortcircuit skip checksum",
-      "description": "skip checksum for short circuit read",
-      "defaultValue": false,
-      "isRequired": false,
-      "displayType": "checkbox",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HBASE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hdfs_support_append",
-      "displayName": "HDFS append support",
-      "description": "HDFS append support",
-      "defaultValue": true,
-      "isRequired": false,
-      "displayType": "checkbox",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HBASE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hstore_blockingstorefiles",
-      "displayName": "hstore blocking storefiles",
-      "description": "If more than this number of StoreFiles in any one Store (one StoreFile is written per flush of " +
-        "MemStore) then updates are blocked for this HRegion until a compaction is completed, or until " +
-        "hbase.hstore.blockingWaitTime has been exceeded.",
-      "defaultValue": 7,
-      "isRequired": true,
-      "displayType": "init",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HBASE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "regionserver_memstore_lab",
-      "displayName": "regionserver_memstore_lab",
-      "description": "",
-      "defaultValue": true,
-      "isRequired": false,
-      "displayType": "checkbox",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HBASE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "regionserver_memstore_lowerlimit",
-      "displayName": "regionserver_memstore_lowerlimit",
-      "description": "",
-      "defaultValue": "0.35",
-      "isRequired": false,
-      "displayType": "float",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HBASE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "regionserver_memstore_upperlimit",
-      "displayName": "regionserver_memstore_upperlimit",
-      "description": "",
-      "defaultValue": "0.4",
-      "isRequired": true,
-      "displayType": "float",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HBASE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hbase_conf_dir",
-      "displayName": "HBase conf dir",
-      "description": "",
-      "defaultValue": "/etc/hbase",
-      "isRequired": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "General"
-    },
-    {
-      "id": "puppet var",
-      "name": "namenode_host",
-      "displayName": "NameNode host",
-      "value": "",
-      "defaultValue": "",
-      "description": "The host that has been assigned to run NameNode",
-      "displayType": "masterHost",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "NameNode"
-    },
-    {
-      "id": "puppet var",
-      "name": "dfs_name_dir",
-      "displayName": "NameNode directories",
-      "description": "NameNode directories for HDFS to store the file system image",
-      "defaultValue": "",
-      "defaultDirectory": "/hadoop/hdfs/namenode",
-      "isReconfigurable": false,
-      "displayType": "directories",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "NameNode"
-    },
-    {
-      "id": "puppet var",
-      "name": "snamenode_host",
-      "displayName": "SNameNode host",
-      "value": "",
-      "defaultValue": "",
-      "description": "The host that has been assigned to run SecondaryNameNode",
-      "displayType": "masterHost",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "SNameNode"
-    },
-    {
-      "id": "puppet var",
-      "name": "fs_checkpoint_dir",
-      "displayName": "SecondaryNameNode Checkpoint directory",
-      "description": "Directory on the local filesystem where the Secondary NameNode should store the temporary images to merge",
-      "defaultValue": "",
-      "defaultDirectory": "/hadoop/hdfs/namesecondary",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "SNameNode"
-    },
-    {
-      "id": "puppet var",
-      "name": "datanode_hosts", //not in the schema. For UI purpose
-      "displayName": "DataNode hosts",
-      "value": "",
-      "defaultValue": "",
-      "description": "The hosts that have been assigned to run DataNode",
-      "displayType": "slaveHosts",
-      "isRequired": false,
-      "isVisible": true,
-      "domain": "datanode-global",
-      "serviceName": "HDFS",
-      "category": "DataNode"
-    },
-    {
-      "id": "puppet var",
-      "name": "dfs_data_dir",
-      "displayName": "DataNode directories",
-      "description": "DataNode directories for HDFS to store the data blocks",
-      "defaultValue": "",
-      "defaultDirectory": "/hadoop/hdfs/data",
-      "isReconfigurable": false,
-      "displayType": "directories",
-      "isVisible": true,
-      "domain": "datanode-global",
-      "serviceName": "HDFS",
-      "category": "DataNode"
-    },
-    {
-      "id": "puppet var",
-      "name": "hdfs_log_dir_prefix",
-      "displayName": "Hadoop Log Dir Prefix",
-      "description": "The parent directory for Hadoop log files.  The HDFS log directory will be ${hadoop_log_dir_prefix} / ${hdfs_user} and the MapReduce log directory will be ${hadoop_log_dir_prefix} / ${mapred_user}.",
-      "defaultValue": "/var/log/hadoop",
-      "displayType": "directory",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hadoop_pid_dir_prefix",
-      "displayName": "Hadoop PID Dir Prefix",
-      "description": "The parent directory in which the PID files for Hadoop processes will be created.  The HDFS PID directory will be ${hadoop_pid_dir_prefix} / ${hdfs_user} and the MapReduce PID directory will be ${hadoop_pid_dir_prefix} / ${mapred_user}.",
-      "defaultValue": "/var/run/hadoop",
-      "displayType": "directory",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
-    /*
-    {
-      "id": "puppet var",
-      "name": "dfs_support_append",
-      "displayName": "Append enabled",
-      "description": "Whether to enable HDFS Append feature",
-      "defaultValue": true,
-      "displayType": "checkbox",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HDFS"
-    },
-    */
-    {
-      "id": "puppet var",
-      "name": "dfs_webhdfs_enabled",
-      "displayName": "WebHDFS enabled",
-      "description": "Whether to enable WebHDFS feature",
-      "defaultValue": false,
-      "displayType": "checkbox",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HDFS"
-    },
-    {
-      "id": "puppet var",
-      "name": "hadoop_heapsize",
-      "displayName": "Hadoop maximum Java heap size",
-      "description": "Maximum Java heap size for daemons such as Balancer (Java option -Xmx)",
-      "defaultValue": "1024",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HDFS"
-    },
-    {
-      "id": "puppet var",
-      "name": "namenode_heapsize",
-      "displayName": "NameNode Java heap size",
-      "description": "Initial and maximum Java heap size for NameNode (Java options -Xms and -Xmx)",
-      "defaultValue": "1024",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "NameNode"
-    },
-    {
-      "id": "puppet var",
-      "name": "namenode_opt_newsize",
-      "displayName": "NameNode new generation size",
-      "description": "Default size of Java new generation for NameNode (Java option -XX:NewSize)",
-      "defaultValue": "200",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "NameNode"
-    },
-    {
-      "id": "puppet var",
-      "name": "namenode_opt_maxnewsize",
-      "displayName": "NameNode maximum new generation size",
-      "description": "",
-      "defaultValue": "640",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "NameNode"
-    },
-    {
-      "id": "puppet var",
-      "name": "datanode_du_reserved",
-      "displayName": "Reserved space for HDFS",
-      "description": "Reserved space in GB per volume",
-      "defaultValue": "1",
-      "displayType": "int",
-      "unit": "GB",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HDFS"
-    },
-    {
-      "id": "puppet var",
-      "name": "dtnode_heapsize",
-      "displayName": "DataNode maximum Java heap size",
-      "description": "Maximum Java heap size for DataNode (Java option -Xmx)",
-      "defaultValue": "1024",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "datanode-global",
-      "serviceName": "HDFS",
-      "category": "DataNode"
-    },
-    {
-      "id": "puppet var",
-      "name": "dfs_datanode_failed_volume_tolerated",
-      "displayName": "DataNode volumes failure toleration",
-      "description": "The number of volumes that are allowed to fail before a DataNode stops offering service",
-      "defaultValue": "0",
-      "displayType": "int",
-      "isVisible": true,
-      "domain": "datanode-global",
-      "serviceName": "HDFS",
-      "category": "DataNode"
-    },
-    {
-      "id": "puppet var",
-      "name": "fs_checkpoint_period",
-      "displayName": "HDFS Maximum Checkpoint Delay",
-      "description": "Maximum delay between two consecutive checkpoints for HDFS",
-      "defaultValue": "21600",
-      "displayType": "int",
-      "unit": "seconds",
-      "isVisible": true,
-      "domain": "global",
-      "filename": "core-site.xml",
-      "serviceName": "HDFS"
-    },
-
-    {
-      "id": "puppet var",
-      "name": "fs_checkpoint_size",
-      "displayName": "HDFS Maximum Edit Log Size for Checkpointing",
-      "description": "Maximum size of the edits log file that forces an urgent checkpoint even if the maximum checkpoint delay is not reached",
-      "defaultValue": "0.5",
-      "displayType": "float",
-      "unit": "GB",
-      "isVisible": true,
-      "domain": "global",
-      "filename": "core-site.xml",
-      "serviceName": "HDFS"
-    },
-    {
-      "id": "puppet var",
-      "name": "proxyuser_group",
-      "displayName": "Proxy group for Hive, WebHCat, and Oozie",
-      "description": "",
-      "defaultValue": "users",
-      "isReconfigurable": false,
-      "displayType": "user",
-      "isVisible": true,
-      "filename": "core-site.xml",
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "Users and Groups"
-    },
-    {
-      "id": "puppet var",
-      "name": "dfs_exclude",
-      "displayName": "Exclude hosts",
-      "description": "Names a file that contains a list of hosts that are not permitted to connect to the namenode.  This file will be placed inside the Hadoop conf directory.",
-      "defaultValue": "dfs.exclude",
-      "displayType": "advanced",
-      "isVisible": true,
-      "filename": "hdfs-site.xml",
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "dfs_include",
-      "displayName": "Include hosts",
-      "description": "Names a file that contains a list of hosts that are permitted to connect to the namenode.  This file will be placed inside the Hadoop conf directory.",
-      "defaultValue": "dfs.include",
-      "displayType": "advanced",
-      "isVisible": true,
-      "filename": "hdfs-site.xml",
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "dfs_replication",
-      "displayName": "Block replication",
-      "description": "Default block replication.",
-      "displayType": "int",
-      "defaultValue": "3",
-      "isVisible": true,
-      "filename": "hdfs-site.xml",
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "dfs_block_local_path_access_user",
-      "displayName": "dfs.block.local-path-access.user",
-      "description": "the user who is allowed to perform short circuit reads",
-      "displayType": "advanced",
-      "defaultValue": "hbase",
-      "isVisible": true,
-      "filename": "hdfs-site.xml",
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "dfs_datanode_address",
-      "displayName": "dfs_datanode_address",
-      "description": "",
-      "defaultValue": "50010",
-      "isReconfigurable": true,
-      "displayType": "int",
-      "isVisible": true,
-      "filename": "hdfs-site.xml",
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "dfs_datanode_http_address",
-      "displayName": "dfs_datanode_http_address",
-      "description": "",
-      "defaultValue": "50075",
-      "isReconfigurable": true,
-      "displayType": "int",
-      "isVisible": true,
-      "filename": "hdfs-site.xml",
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "dfs_datanode_http_address",
-      "displayName": "dfs_datanode_http_address",
-      "description": "",
-      "defaultValue": "50075",
-      "isReconfigurable": true,
-      "displayType": "int",
-      "isVisible": true,
-      "filename": "hdfs-site.xml",
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "dfs_datanode_data_dir_perm",
-      "displayName": "dfs_datanode_data_dir_perm",
-      "description": "",
-      "defaultValue": "750",
-      "isReconfigurable": true,
-      "displayType": "int",
-      "isVisible": false,
-      "filename": "hdfs-site.xml",
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "security_enabled",
-      "displayName": "Hadoop Security",
-      "description": "Enable hadoop security",
-      "defaultValue": false,
-      "isRequired": false,
-      "displayType": "checkbox",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "kerberos_domain",
-      "displayName": "Kerberos realm",
-      "description": "Kerberos realm",
-      "defaultValue": "EXAMPLE.COM",
-      "isRequired": true,
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "kadmin_pw",
-      "displayName": "password",
-      "description": "Kerberos admin password",
-      "defaultValue": "",
-      "isRequired": true,
-      "displayType": "password",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "keytab_path",
-      "displayName": "Keytab directory",
-      "description": "Kerberos admin password",
-      "defaultValue": "/etc/security/keytabs",
-      "isRequired": true,
-      "displayType": "advanced",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hivemetastore_host",
-      "displayName": "Hive Metastore host",
-      "value": "",
-      "defaultValue": "",
-      "description": "The host that has been assigned to run Hive Metastore",
-      "displayType": "masterHost",
-      "isVisible": true,
-      "serviceName": "HIVE",
-      "category": "Hive Metastore"
-    },
-    /* Comment out the other hive_database and uncomment this for Hive with Postgres support
-    {
-      "id": "puppet var",
-      "name": "hive_database",
-      "displayName": "Hive Database",
-      "value": "",
-      "defaultValue": "New PostgreSQL Database",
-      "options": [
-        {
-          displayName: 'New PostgreSQL Database',
-          foreignKeys: ['hive_ambari_database', 'hive_ambari_host']
-        },
-        {
-          displayName: 'Existing Database',
-          foreignKeys: ['hive_existing_database', 'hive_existing_host']
-        }
-      ],
-      "description": "PostgreSQL will be installed by Ambari. Any other database will have to be installed by the user.",
-      "displayType": "radio button",
-      "radioName": "hive-database",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Hive Metastore"
-    },
-    */
-    {
-      "id": "puppet var",
-      "name": "hive_database",
-      "displayName": "Hive Database",
-      "value": "",
-      "defaultValue": "New MySQL Database",
-      "options": [
-        {
-          displayName: 'New MySQL Database',
-          foreignKeys: ['hive_ambari_database', 'hive_ambari_host']
-        },
-        {
-          displayName: 'Existing MySQL Database',
-          foreignKeys: ['hive_existing_database', 'hive_existing_host']
-        }
-      ],
-      "description": "MySQL will be installed by Ambari",
-      "displayType": "radio button",
-      "isReconfigurable": false,
-      "radioName": "hive-database",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Hive Metastore"
-    },
-    {
-      "id": "puppet var",
-      "name": "hive_existing_database",
-      "displayName": "Database Type",
-      "value": "",
-      "defaultValue": "MySQL",
-      "description": "Using an existing database for Hive Metastore",
-      "displayType": "masterHost",
-      "isVisible": false,
-      "isReconfigurable": false,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Hive Metastore"
-    },
-    /* Uncomment for Hive with Postgres support
-    {
-      "id": "puppet var",
-      "name": "hive_existing_database",
-      "displayName": "Hive Database",
-      "value": "",
-      "defaultValue": "MySQL",
-      "description": "Select the database, if you already have existing one for Hive Metastore.",
-      "displayType": "string",
-      "isVisible": false,
-      "options": ['MySQL', 'PostgreSQL'],
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Hive Metastore"
-    },
-    {
-      "id": "puppet var",
-      "name": "hive_existing_database",
-      "displayName": "Hive Database",
-      "value": "",
-      "defaultValue": "MySQL",
-      "description": "Select the database, if you already have existing one for Hive Metastore.",
-      "displayType": "combobox",
-      "isVisible": false,
-      "options": ['MySQL', 'PostgreSQL'],
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Hive Metastore"
-    },
-    */
-    {
-      "id": "puppet var",
-      "name": "hive_existing_host",
-      "displayName": "Database host",
-      "description": "Specify the host on which the existing database is hosted",
-      "defaultValue": "",
-      "isReconfigurable": false,
-      "displayType": "host",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Hive Metastore"
-    },
-    {
-      "id": "puppet var",
-      "name": "hive_ambari_database",
-      "displayName": "Database Type",
-      "value": "",
-      "defaultValue": "MySQL",
-      "description": "MySQL will be installed by Ambari",
-      "displayType": "masterHost",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Hive Metastore"
-    },
-    {
-      "id": "puppet var",
-      "name": "hive_ambari_host",
-      "value": "",
-      "defaultValue": "",
-      "displayName": "Database host",
-      "description": "Host on which the database will be created by Ambari",
-      "isReconfigurable": false,
-      "displayType": "masterHost",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Hive Metastore"
-    },
-    {
-      "id": "puppet var",
-      "name": "hive_database_name",
-      "displayName": "Database name",
-      "description": "Database name used as the Hive Metastore",
-      "defaultValue": "hive",
-      "isReconfigurable": false,
-      "displayType": "host",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Hive Metastore"
-    },
-    {
-      "id": "puppet var",
-      "name": "hive_metastore_user_name",
-      "displayName": "Database user",
-      "description": "Database user name to use to connect to the database",
-      "defaultValue": "hive",
-      "isReconfigurable": false,
-      "displayType": "user",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Hive Metastore"
-    },
-    {
-      "id": "puppet var",
-      "name": "hive_metastore_user_passwd",
-      "displayName": "Database password",
-      "description": "Database password to use to connect to the PostgreSQL database",
-      "defaultValue": "",
-      "isReconfigurable": false,
-      "displayType": "password",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Hive Metastore"
-    },
-    {
-      "id": "puppet var",
-      "name": "hive_metastore_port",
-      "displayName": "Hive metastore port",
-      "description": "",
-      "defaultValue": "9083",
-      "isReconfigurable": false,
-      "displayType": "int",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hive_lib",
-      "displayName": "Hive library",
-      "description": "",
-      "defaultValue": "/usr/lib/hive/lib/",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hive_conf_dir",
-      "displayName": "Hive lconf directory",
-      "description": "",
-      "defaultValue": "/etc/hive/conf",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hive_dbroot",
-      "displayName": "Hive lconf directory",
-      "description": "",
-      "defaultValue": "/usr/lib/hive/lib",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hive_log_dir",
-      "displayName": "Hive Log Dir",
-      "description": "Directory for Hive log files",
-      "defaultValue": "/var/log/hive",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hive_pid_dir",
-      "displayName": "Hive PID Dir",
-      "description": "Directory in which the PID files for Hive processes will be created",
-      "defaultValue": "/var/run/hive",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "gpl_artifacts_download_url",
-      "displayName": "gpl artifact download url",
-      "description": "",
-      "defaultValue": "",
-      "isReconfigurable": false,
-      "displayType": "advanced",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "General"
-    },
-    {
-      "id": "puppet var",
-      "name": "apache_artifacts_download_url",
-      "displayName": "apache artifact download url",
-      "description": "",
-      "defaultValue": "",
-      "isReconfigurable": false,
-      "displayType": "advanced",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "General"
-    },
-    {
-      "id": "puppet var",
-      "name": "mysql_connector_url",
-      "displayName": "MySQL connector url",
-      "description": "",
-      "defaultValue": "${download_url}/mysql-connector-java-5.1.18.zip",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hive_aux_jars_path",
-      "displayName": "Hive auxilary jar path",
-      "description": "",
-      "defaultValue": "/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "ganglia_conf_dir",
-      "displayName": "Ganglia conf directory",
-      "description": "",
-      "defaultValue": "/etc/ganglia/hdp",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "GANGLIA",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "ganglia_runtime_dir",
-      "displayName": "Ganglia runtime directory",
-      "description": "",
-      "defaultValue": "/var/run/ganglia/hdp",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "General"
-    },
-    {
-      "id": "puppet var",
-      "name": "gmetad_user",
-      "displayName": "gmetad_user",
-      "description": "",
-      "defaultValue": "nobody",
-      "isReconfigurable": false,
-      "displayType": "advanced",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "Users and Groups"
-    },
-    {
-      "id": "puppet var",
-      "name": "gmond_user",
-      "displayName": "gmond_user",
-      "description": "",
-      "defaultValue": "nobody",
-      "isReconfigurable": false,
-      "displayType": "advanced",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName":"MISC",
-      "category": "Users and Groups"
-    },
-    /*
-    {
-      "id": "puppet var",
-      "name": "ganglia_shell_cmds_dir",
-      "displayName": "ganglia_shell_cmds_dir",
-      "description": "",
-      "defaultValue": "/usr/libexec/hdp/ganglia",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "General"
-    },
-    {
-      "id": "puppet var",
-      "name": "webserver_group",
-      "displayName": "ganglia_shell_cmds_dir",
-      "description": "",
-      "defaultValue": "apache",
-      "isReconfigurable": false,
-      "displayType": "advanced",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "General"
-    },
-    */
-    {
-      "id": "puppet var",
-      "name": "hcat_log_dir",
-      "displayName": "WebHCat Log Dir",
-      "description": "Directory for WebHCat log files",
-      "defaultValue": "/var/log/webhcat",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hcat_pid_dir",
-      "displayName": "WebHCat PID Dir",
-      "description": "Directory in which the PID files for WebHCat processes will be created",
-      "defaultValue": "/var/run/webhcat",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "HIVE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "jobtracker_host",
-      "displayName": "JobTracker host",
-      "value": "",
-      "defaultValue": "",
-      "description": "The host that has been assigned to run JobTracker",
-      "displayType": "masterHost",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE",
-      "category": "JobTracker"
-    },
-    {
-      "id": "puppet var",
-      "name": "tasktracker_hosts",
-      "displayName": "TaskTracer hosts",
-      "value": "",
-      "defaultValue": "",
-      "description": "The hosts that have been assigned to run TaskTracker",
-      "displayType": "slaveHosts",
-      "isVisible": true,
-      "isRequired": false,
-      "domain": "tasktracker-global",
-      "serviceName": "MAPREDUCE",
-      "category": "TaskTracker"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapred_local_dir",
-      "displayName": "MapReduce local directories",
-      "description": "Directories for MapReduce to store intermediate data files",
-      "defaultValue": "",
-      "defaultDirectory": "/hadoop/mapred",
-      "displayType": "directories",
-      "isReconfigurable": false,
-      "isVisible": true,
-      "domain": "tasktracker-global",
-      "serviceName": "MAPREDUCE",
-      "category": "TaskTracker"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapred_system_dir",
-      "displayName": "MapReduce system directories",
-      "description": "",
-      "defaultValue": "/mapred/system",
-      "displayType": "directories",
-      "isReconfigurable": false,
-      "isVisible": true,
-      "isRequired": false,
-      "domain": "global",
-      "serviceName": "MAPREDUCE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "scheduler_name",
-      "displayName": "MapReduce Capacity Scheduler",
-      "description": "The scheduler to use for scheduling of MapReduce jobs",
-      "defaultValue": "org.apache.hadoop.mapred.CapacityTaskScheduler",
-      //"displayType": "directory",
-      "isVisible": true,
-      "serviceName": "MAPREDUCE"
-    },
-    {
-      "id": "puppet var",
-      "name": "jtnode_opt_newsize",
-      "displayName": "JobTracker new generation size",
-      "description": "Default size of Java new generation size for JobTracker in MB (Java option -XX:NewSize)",
-      "defaultValue": "200",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE",
-      "category": "JobTracker"
-    },
-    {
-      "id": "puppet var",
-      "name": "jtnode_opt_maxnewsize",
-      "displayName": "JobTracker maximum new generation size",
-      "description": "Maximum size of Java new generation for JobTracker in MB (Java option -XX:MaxNewSize)",
-      "defaultValue": "200",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE",
-      "category": "JobTracker"
-    },
-    {
-      "id": "puppet var",
-      "name": "jtnode_heapsize",
-      "displayName": "JobTracker maximum Java heap size",
-      "description": "Maximum Java heap size for JobTracker in MB (Java option -Xmx)",
-      "defaultValue": "1024",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE",
-      "category": "JobTracker"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapred_map_tasks_max",
-      "displayName": "Number of Map slots per node",
-      "description": "Number of slots that Map tasks that run simultaneously can occupy on a TaskTracker",
-      "defaultValue": "4",
-      "displayType": "int",
-      "isVisible": true,
-      "domain": "tasktracker-global",
-      "serviceName": "MAPREDUCE",
-      "category": "TaskTracker"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapred_red_tasks_max",
-      "displayName": "Number of Reduce slots per node",
-      "description": "Number of slots that Reduce tasks that run simultaneously can occupy on a TaskTracker.",
-      "defaultValue": "2",
-      "displayType": "int",
-      "isVisible": true,
-      "isRequired": false,
-      "domain": "tasktracker-global",
-      "serviceName": "MAPREDUCE",
-      "category": "TaskTracker"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapred_cluster_map_mem_mb",
-      "displayName": "Cluster's Map slot size (virtual memory)",
-      "description": "The virtual memory size of a single Map slot in the MapReduce framework",
-      "defaultValue": "-1",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapred_cluster_red_mem_mb",
-      "displayName": "Cluster's Reduce slot size (virtual memory)",
-      "description": "The virtual memory size of a single Reduce slot in the MapReduce framework",
-      "defaultValue": "-1",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapred_cluster_max_map_mem_mb",
-      "displayName": "Upper limit on virtual memory for single Map task",
-      "description": "Upper limit on virtual memory size for a single Map task of any MapReduce job",
-      "defaultValue": "-1",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapred_cluster_max_red_mem_mb",
-      "displayName": "Upper limit on virtual memory for single Reduce task",
-      "description": "Upper limit on virtual memory size for a single Reduce task of any MapReduce job",
-      "defaultValue": "-1",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapred_job_map_mem_mb",
-      "displayName": "Default virtual memory for a job's map-task",
-      "description": "Virtual memory for single Map task",
-      "defaultValue": "-1",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapred_job_red_mem_mb",
-      "displayName": "Default virtual memory for a job's reduce-task",
-      "description": "Virtual memory for single Reduce task",
-      "defaultValue": "-1",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapred_child_java_opts_sz",
-      "displayName": "Java options for MapReduce tasks",
-      "description": "Java options for the TaskTracker child processes.",
-      "defaultValue": "768",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "tasktracker-global",
-      "serviceName": "MAPREDUCE",
-      "category": "TaskTracker"
-    },
-    {
-      "id": "puppet var",
-      "name": "io_sort_mb",
-      "displayName": "Map-side sort buffer memory",
-      "description": "The total amount of Map-side buffer memory to use while sorting files (Expert-only configuration)",
-      "defaultValue": "200",
-      "displayType": "int",
-      "unit": "MB",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE"
-    },
-    {
-      "id": "puppet var",
-      "name": "io_sort_spill_percent",
-      "displayName": "Limit on buffer",
-      "description": "Percentage of sort buffer used for record collection (Expert-only configuration)",
-      "defaultValue": "0.9",
-      "displayType": "float",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapreduce_userlog_retainhours",
-      "displayName": "Job log retention (hours)",
-      "description": "The maximum time, in hours, for which the user-logs are to be retained after the job completion.",
-      "defaultValue": "24",
-      "displayType": "int",
-      "unit": "hours",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE"
-    },
-    {
-      "id": "puppet var",
-      "name": "maxtasks_per_job",
-      "displayName": "Maximum number tasks for a Job",
-      "description": "Maximum number of tasks for a single Job",
-      "defaultValue": "-1",
-      "displayType": "int",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE"
-    },
-    {
-      "id": "puppet var",
-      "name": "lzo_enabled",
-      "displayName": "LZO compression",
-      "description": "LZO compression enabled",
-      "defaultValue": false,
-      "displayType": "checkbox",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE"
-    },
-    {
-      "id": "puppet var",
-      "name": "snappy_enabled",
-      "displayName": "Snappy compression",
-      "description": "Snappy compression enabled",
-      "defaultValue": true,
-      "isReconfigurable": false,
-      "displayType": "checkbox",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE"
-    },
-    {
-      "id": "puppet var",
-      "name": "rca_enabled",
-      "displayName": "Enable Job Diagnostics",
-      "description": "Tools for tracing the path and troubleshooting the performance of MapReduce jobs",
-      "defaultValue": true,
-      "isReconfigurable": true,
-      "displayType": "checkbox",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MAPREDUCE"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapred_hosts_exclude",
-      "displayName": "Exclude hosts",
-      "description": "Exclude entered hosts",
-      "defaultValue": "mapred.exclude",
-      "displayType": "directories",
-      "isVisible": false,
-      "serviceName": "MAPREDUCE",
-      "domain": "global",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapred_hosts_include",
-      "displayName": "Include hosts",
-      "description": "Include enetered hosts",
-      "defaultValue": "mapred.include",
-      "displayType": "directories",
-      "isVisible": false,
-      "serviceName": "MAPREDUCE",
-      "domain": "global",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapred_jobstatus_dir",
-      "displayName": "Job Status directory",
-      "description": "Directory path to view job status",
-      "defaultValue": "file:////mapred/jobstatus",
-      "displayType": "advanced",
-      "isVisible": false,
-      "serviceName": "MAPREDUCE",
-      "domain": "global",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "task_controller",
-      "displayName": "task_controller",
-      "description": "",
-      "defaultValue": "org.apache.hadoop.mapred.DefaultTaskController",
-      "displayType": "advanced",
-      "isVisible": false,
-      "serviceName": "MAPREDUCE",
-      "domain": "global",
-      "category": "Advanced"
-    },
-    /*
-    {
-      "id": "puppet var",
-      "name": "jdk_location",
-      "displayName": "URL to download 64-bit JDK",
-      "description": "URL from where the 64-bit JDK binary can be downloaded",
-      "defaultValue": "",
-      "isRequired": false,
-      "isReconfigurable": false,
-      "displayType": "url",
-      "isVisible": true,
-      "serviceName": "MISC"
-    },
-    */
-    {
-      "id": "puppet var",
-      "name": "java64_home",
-      "displayName": "Path to 64-bit JAVA_HOME",
-      "description": "Path to 64-bit JAVA_HOME.  /usr/jdk/jdk1.6.0_31 is the default used by Ambari.  You can override this to a specific path that contains the JDK.  Note that the path must be valid on ALL hosts in your cluster.",
-      "defaultValue": "/usr/jdk64/jdk1.6.0_31",
-      "isRequired": true,
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MISC"
-    },
-    {
-      "id": "puppet var",
-      "name": "run_dir",
-      "displayName": "Hadoop run directory",
-      "description": "",
-      "defaultValue": "/var/run/hadoop",
-      "isRequired": false,
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "hadoop_conf_dir",
-      "displayName": "Hadoop conf directory",
-      "description": "",
-      "defaultValue": "/etc/hadoop",
-      "isRequired": false,
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "namenode_formatted_mark_dir",
-      "displayName": "Hadoop formatted mark directory",
-      "description": "",
-      "defaultValue": "/var/run/hadoop/hdfs/namenode/formatted/",
-      "isRequired": false,
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "NameNode"
-    },
-    {
-      "id": "puppet var",
-      "name": "hcat_conf_dir",
-      "displayName": "HCat conf directory",
-      "description": "",
-      "defaultValue": "",
-      "isRequired": false,
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
-    /*
-    {
-      "id": "puppet var",
-      "name": "hcat_metastore_port",
-      "displayName": "hcat_metastore_port",
-      "description": "",
-      "defaultValue": "/usr/lib/hcatalog/share/hcatalog",
-      "isRequired": true,
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC"
-    },
-    {
-      "id": "puppet var",
-      "name": "hcat_lib",
-      "displayName": "hcat_lib",
-      "description": "",
-      "defaultValue": "/usr/lib/hcatalog/share/hcatalog",
-      "isRequired": true,
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC"
-    },
-
-    {
-      "id": "puppet var",
-      "name": "hcat_dbroot",
-      "displayName": "hcat_dbroot",
-      "description": "",
-      "defaultValue": "/usr/lib/hcatalog/share/hcatalog",
-      "isRequired": true,
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC"
-    },
-    {
-      "id": "puppet var",
-      "name": "hcat_dbroot",
-      "displayName": "hcat_dbroot",
-      "description": "",
-      "defaultValue": "/usr/lib/hcatalog/share/hcatalog",
-      "isRequired": true,
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC"
-    },
-
-     {
-     "id": "puppet var",
-     "name": "hadoop_log_dir",
-     "displayName": "Hadoop Log Dir",
-     "description": "Directory for Hadoop log files",
-     "defaultValue": "/var/log/hadoop",
-     "isReconfigurable": false,
-     "displayType": "directory",
-     "isVisible":  true, "serviceName": "MISC",
-     "category": "Advanced"
-     },
-     {
-     "id": "puppet var",
-     "name": "hadoop_pid_dir",
-     "displayName": "Hadoop PID Dir",
-     "description": "Directory in which the pid files for Hadoop processes will be created",
-     "defaultValue": "/var/run/hadoop",
-     "isReconfigurable": false,
-     "displayType": "directory",
-     "isVisible":  true, "serviceName": "MISC",
-     "category": "Advanced"
-     },
-    {
-      "id": "puppet var",
-      "name": "using_local_repo",
-      "displayName": "Whether a local repo is being used",
-      "description": "Whether a local repo is being used",
-      "defaultValue": false,
-      "isReconfigurable": false,
-      "displayType": "checkbox",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC"
-    },
-    {
-      "id": "puppet var",
-      "name": "yum_repo_file",
-      "displayName": "Path to local repo file",
-      "description": "Path to local repository file that configures from where to download software packages",
-      "defaultValue": "/etc/yum.repos.d/hdp.repo",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MISC"
-    },
-    */
-    {
-      "id": "puppet var",
-      "name": "hdfs_user",
-      "displayName": "HDFS User",
-      "description": "User to run HDFS as",
-      "defaultValue": "hdfs",
-      "isReconfigurable": false,
-      "displayType": "user",
-      "isVisible": true,
-      "serviceName": "MISC",
-      "domain": "global",
-      "category": "Users and Groups"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapred_user",
-      "displayName": "MapReduce User",
-      "description": "User to run MapReduce as",
-      "defaultValue": "mapred",
-      "isReconfigurable": false,
-      "displayType": "user",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "Users and Groups"
-    },
-    {
-      "id": "puppet var",
-      "name": "hbase_user",
-      "displayName": "HBase User",
-      "description": "User to run HBase as",
-      "defaultValue": "hbase",
-      "isReconfigurable": false,
-      "displayType": "user",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "Users and Groups"
-    },
-    {
-      "id": "puppet var",
-      "name": "hive_user",
-      "displayName": "Hive User",
-      "description": "User to run Hive as",
-      "defaultValue": "hive",
-      "isReconfigurable": false,
-      "displayType": "user",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "Users and Groups"
-    },
-    {
-      "id": "puppet var",
-      "name": "hcat_user",
-      "displayName": "HCat User",
-      "description": "User to run HCatalog as",
-      "defaultValue": "hcat",
-      "isReconfigurable": false,
-      "displayType": "user",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "Users and Groups"
-    },
-    {
-      "id": "puppet var",
-      "name": "webhcat_user",
-      "displayName": "WebHCat User",
-      "description": "User to run WebHCat as",
-      "defaultValue": "hcat",
-      "isReconfigurable": false,
-      "displayType": "user",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "Users and Groups"
-    },
-    {
-      "id": "puppet var",
-      "name": "oozie_user",
-      "displayName": "Oozie User",
-      "description": "User to run Oozie as",
-      "defaultValue": "oozie",
-      "isReconfigurable": false,
-      "displayType": "user",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "Users and Groups"
-    },
-    /*
-    {
-      "id": "puppet var",
-      "name": "oozie_conf_dir",
-      "displayName": "Oozie conf dir",
-      "description": "",
-      "defaultValue": "/etc/oozie",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "General"
-    },
-
-    {
-      "id": "puppet var",
-      "name": "pig_conf_dir",
-      "displayName": "Pig conf dir",
-      "description": "",
-      "defaultValue": "/etc/pig",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "General"
-    },
-    */
-    /*
-    {
-      "id": "puppet var",
-      "name": "sqoop_conf_dir",
-      "displayName": "sqoop conf dir",
-      "description": "",
-      "defaultValue": "/etc/sqoop",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "General"
-    },
-    {
-      "id": "puppet var",
-      "name": "sqoop_lib",
-      "displayName": "sqoop conf dir",
-      "description": "",
-      "defaultValue": "/usr/lib/sqoop/lib/",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "General"
-    },
-    */
-    {
-      "id": "puppet var",
-      "name": "zk_user",
-      "displayName": "ZooKeeper User",
-      "description": "User to run ZooKeeper as",
-      "defaultValue": "zookeeper",
-      "isReconfigurable": false,
-      "displayType": "user",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "Users and Groups"
-    },
-    {
-      "id": "puppet var",
-      "name": "user_group",
-      "displayName": "Group",
-      "description": "Group that the users specified above belong to",
-      "defaultValue": "hadoop",
-      "isReconfigurable": false,
-      "displayType": "user",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "Users and Groups"
-    },
-    {
-      "id": "puppet var",
-      "name": "nagios_user",
-      "displayName": "Nagios username",
-      "description": "Nagios username",
-      "defaultValue": "nagios",
-      "isReconfigurable": false,
-      "displayType": "user",
-      "domain": "global",
-      "isVisible": false,
-      "serviceName": "NAGIOS"
-    },
-    {
-      "id": "puppet var",
-      "name": "nagios_group",
-      "displayName": "Nagios username",
-      "description": "Nagios username",
-      "defaultValue": "nagios",
-      "isReconfigurable": false,
-      "displayType": "user",
-      "domain": "global",
-      "isVisible": false,
-      "serviceName": "NAGIOS"
-    },
-    {
-      "id": "puppet var",
-      "name": "nagios_web_login",
-      "displayName": "Nagios Admin username",
-      "description": "Nagios Web UI Admin username",
-      "defaultValue": "nagiosadmin",
-      "isReconfigurable": false,
-      "displayType": "user",
-      "domain": "global",
-      "isVisible": true,
-      "serviceName": "NAGIOS"
-    },
-    {
-      "id": "puppet var",
-      "name": "nagios_web_password",
-      "displayName": "Nagios Admin password",
-      "description": "Nagios Web UI Admin password",
-      "defaultValue": "",
-      "isReconfigurable": false,
-      "displayType": "password",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "NAGIOS"
-    },
-    {
-      "id": "puppet var",
-      "name": "nagios_contact",
-      "displayName": "Hadoop Admin email",
-      "description": "Hadoop Administrator email for alert notification",
-      "defaultValue": "",
-      "displayType": "email",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "NAGIOS"
-    },
-    {
-      "id": "puppet var",
-      "name": "oozieserver_host",
-      "displayName": "Oozie Server host",
-      "value": "",
-      "defaultValue": "",
-      "description": "The host that has been assigned to run Oozie Server",
-      "displayType": "masterHost",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "OOZIE",
-      "category": "Oozie Server"
-    },
-    /*
-    {
-      "id": "puppet var",
-      "name": "oozie_database",
-      "displayName": "Oozie Database",
-      "value": "",
-      "defaultValue": "New PostgreSQL Database",
-      "options": [
-        {
-          displayName: 'New PostgreSQL Database',
-          foreignKeys: ['oozie_ambari_database', 'oozie_ambari_host']
-        },
-        {
-          displayName: 'Existing Database',
-          foreignKeys: ['oozie_existing_database', 'oozie_existing_host']
-        }
-      ],
-      "description": "PostgreSQL will be installed by ambari. Any other database will have to be installed by the user.",
-      "displayType": "radio button",
-      "radioName": "oozie-database",
-      "isVisible": true,
-      "serviceName": "OOZIE",
-      "category": "Oozie Server"
-    },
-    {
-      "id": "puppet var",
-      "name": "oozie_existing_database",
-      "displayName": "Oozie Database",
-      "value": "",
-      "defaultValue": "MySQL",
-      "description": "Select the database, if you already have existing one for Oozie.",
-      "displayType": "combobox",
-      "isVisible": false,
-      "options": ['MySQL', 'PostgreSQL'],
-      "serviceName": "OOZIE",
-      "category": "Oozie Server"
-    },
-    {
-      "id": "puppet var",
-      "name": "oozie_existing_host",
-      "displayName": "Database host",
-      "description": "Select the host on which the existing database is hosted.",
-      "defaultValue": "",
-      "isReconfigurable": false,
-      "displayType": "host",
-      "isVisible": false,
-      "serviceName": "OOZIE",
-      "category": "Oozie Server"
-    },
-    {
-      "id": "puppet var",
-      "name": "oozie_ambari_database",
-      "displayName": "Oozie Database",
-      "value": "",
-      "defaultValue": "PostgreSQL",
-      "description": "PostgreSQL will be installed by ambari.",
-      "displayType": "masterHost",
-      "isVisible": true,
-      "serviceName": "OOZIE",
-      "category": "Oozie Server"
-    },
-    {
-      "id": "puppet var",
-      "name": "oozie_ambari_host",
-      "value": "",
-      "defaultValue": "",
-      "displayName": "PostgreSQL host",
-      "description": "Host  on which the PostgreSQL database will be created by ambari. ",
-      "isReconfigurable": false,
-      "displayType": "masterHost",
-      "isVisible": true,
-      "serviceName": "OOZIE",
-      "category": "Oozie Server"
-    },
-    {
-      "id": "puppet var",
-      "name": "oozie_database_name",
-      "displayName": "Database name",
-      "description": "Database name used for the Oozie",
-      "defaultValue": "oozie",
-      "isReconfigurable": false,
-      "displayType": "host",
-      "isVisible": true,
-      "serviceName": "OOZIE",
-      "category": "Oozie Server"
-    },
-    {
-      "id": "puppet var",
-      "name": "oozie_metastore_user_name",
-      "displayName": "Database user",
-      "description": "Database user name to use to connect to the database",
-      "defaultValue": "oozie",
-      "isReconfigurable": false,
-      "displayType": "user",
-      "isVisible": true,
-      "serviceName": "OOZIE",
-      "category": "Oozie Server"
-    },
-    {
-      "id": "puppet var",
-      "name": "oozie_metastore_user_passwd",
-      "displayName": "Database password",
-      "description": "Database password to use to connect to the PostgreSQL database",
-      "defaultValue": "",
-      "isReconfigurable": false,
-      "displayType": "password",
-      "isVisible": true,
-      "serviceName": "OOZIE",
-      "category": "Oozie Server"
-    },
-    */
-    {
-      "id": "puppet var",
-      "name": "oozie_data_dir",
-      "displayName": "Oozie Data Dir",
-      "description": "Data directory in which the Oozie DB exists",
-      "defaultValue": "",
-      "defaultDirectory": "/hadoop/oozie/data",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": true,
-      "isRequired": false,
-      "domain": "global",
-      "serviceName": "OOZIE",
-      "category": "Oozie Server"
-    },
-    {
-      "id": "puppet var",
-      "name": "oozie_log_dir",
-      "displayName": "Oozie Log Dir",
-      "description": "Directory for oozie logs",
-      "defaultValue": "/var/log/oozie",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "OOZIE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "oozie_pid_dir",
-      "displayName": "Oozie PID Dir",
-      "description": "Directory in which the pid files for oozie processes will be created",
-      "defaultValue": "/var/run/oozie",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "OOZIE",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "zookeeperserver_hosts",
-      "displayName": "ZooKeeper Server hosts",
-      "value": "",
-      "defaultValue": "",
-      "description": "The host that has been assigned to run ZooKeeper Server",
-      "displayType": "slaveHosts",
-      "isVisible": true,
-      "isRequired": false,
-      "serviceName": "ZOOKEEPER",
-      "category": "ZooKeeper Server"
-    },
-    {
-      "id": "puppet var",
-      "name": "zk_data_dir",
-      "displayName": "ZooKeeper directory",
-      "description": "Data directory for ZooKeeper",
-      "defaultValue": "",
-      "defaultDirectory": "/hadoop/zookeeper",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "ZOOKEEPER",
-      "category": "ZooKeeper Server"
-    },
-    {
-      "id": "puppet var",
-      "name": "zk_log_dir",
-      "displayName": "ZooKeeper Log Dir",
-      "description": "Directory for ZooKeeper log files",
-      "defaultValue": "/var/log/zookeeper",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "ZOOKEEPER",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "zk_pid_dir",
-      "displayName": "ZooKeeper PID Dir",
-      "description": "Directory in which the pid files for zookeeper processes will be created",
-      "defaultValue": "/var/run/zookeeper",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "ZOOKEEPER",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "zk_pid_file",
-      "displayName": "ZooKeeper PID File",
-      "description": "",
-      "defaultValue": "/var/run/zookeeper/zookeeper_server.pid",
-      "isReconfigurable": false,
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "ZOOKEEPER",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "tickTime",
-      "displayName": "Length of single Tick",
-      "description": "The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper",
-      "defaultValue": "2000",
-      "displayType": "int",
-      "unit": "ms",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "ZOOKEEPER",
-      "category": "ZooKeeper Server"
-    },
-    {
-      "id": "puppet var",
-      "name": "initLimit",
-      "displayName": "Ticks to allow for sync at Init",
-      "description": "Amount of time, in ticks to allow followers to connect and sync to a leader",
-      "defaultValue": "10",
-      "displayType": "int",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "ZOOKEEPER",
-      "category": "ZooKeeper Server"
-    },
-    {
-      "id": "puppet var",
-      "name": "syncLimit",
-      "displayName": "Ticks to allow for sync at Runtime",
-      "description": "Amount of time, in ticks to allow followers to connect",
-      "defaultValue": "5",
-      "displayType": "int",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "ZOOKEEPER",
-      "category": "ZooKeeper Server"
-    },
-    {
-      "id": "puppet var",
-      "name": "clientPort",
-      "displayName": "Port for running ZK Server",
-      "description": "Port for running ZooKeeper server",
-      "defaultValue": "2181",
-      "displayType": "int",
-      "isVisible": true,
-      "domain": "global",
-      "serviceName": "ZOOKEEPER",
-      "category": "ZooKeeper Server"
-    }
-    /*
-    {
-      "id": "puppet var",
-      "name": "zk_conf_dir",
-      "displayName": "zk_conf_dir",
-      "description": "",
-      "defaultValue": "/etc/conf/",
-      "displayType": "directory",
-      "isVisible": false,
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "General"
-    },
-    {
-      "id": "puppet var",
-      "name": "rrdcached_base_dir",
-      "displayName": "Ganglia rrd cached base directory",
-      "description": "Default directory for saving the rrd files on ganglia server",
-      "defaultValue": "/var/lib/ganglia/rrds",
-      "displayType": "directory",
-      "domain": "global",
-      "serviceName": "MISC",
-      "category": "General"
-    }
-     */
-  ]
-};
diff --git a/branch-1.2/ambari-web/app/data/custom_configs.js b/branch-1.2/ambari-web/app/data/custom_configs.js
deleted file mode 100644
index 302c680..0000000
--- a/branch-1.2/ambari-web/app/data/custom_configs.js
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-module.exports =
-  [
-    {
-      "id": "conf-site",
-      "name": "core-site",
-      "displayName": "Custom Hadoop Configs",
-      "value": "",
-      "defaultValue": "",
-      "description": "Enter in \"key=value\" format to set core-site.xml parameters not exposed through this page.<br> New line is the delimiter for every key-value pair.",
-      "displayType": "custom",
-      "isVisible": true,
-      "isRequired": false,
-      "isReconfigurable": false,
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
-    {
-      "id": "conf-site",
-      "name": "hdfs-site",
-      "displayName": "Custom HDFS Configs",
-      "value": "",
-      "defaultValue": "",
-      "description": "Enter in \"key=value\" format to set hdfs-site.xml parameters not exposed through this page.<br> New line is the delimiter for every key-value pair.",
-      "displayType": "custom",
-      "isVisible": true,
-      "isRequired": false,
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
-    {
-      "id": "conf-site",
-      "name": "mapred-site",
-      "displayName": "Custom MapReduce Configs",
-      "value": "",
-      "defaultValue": "",
-      "description": "Enter in \"key=value\" format to set mapred-site.xml parameters not exposed through this page.<br> New line is the delimiter for every key-value pair.",
-      "displayType": "custom",
-      "isVisible": true,
-      "isRequired": false,
-      "serviceName": "MAPREDUCE",
-      "category": "Advanced"
-    },
-    {
-      "id": "conf-site",
-      "name": "hbase-site",
-      "displayName": "Custom HBase Configs",
-      "description": "Enter in \"key=value\" format to set hbase-site.xml parameters not exposed through this page.<br> New line is the delimiter for every key-value pair.",
-      "defaultValue": "",
-      "isRequired": false,
-      "displayType": "custom",
-      "isVisible": true,
-      "serviceName": "HBASE",
-      "category": "Advanced"
-    },
-    {
-      "id": "conf-site",
-      "name": "hive-site",
-      "displayName": "Custom Hive Configs",
-      "description": "Enter in \"key=value\" format to set hive-site.xml parameters not exposed through this page.<br> New line is the delimiter for every key-value pair.",
-      "defaultValue": "",
-      "isRequired": false,
-      "displayType": "custom",
-      "isVisible": true,
-      "serviceName": "HIVE",
-      "category": "Advanced"
-    },
-    {
-      "id": "conf-site",
-      "name": "webhcat-site",
-      "displayName": "Custom WebHCat Configs",
-      "description": "Enter in \"key=value\" format to set webhcat-site.xml parameters not exposed through this page.<br> New line is the delimiter for every key-value pair.",
-      "defaultValue": "",
-      "isRequired": false,
-      "displayType": "custom",
-      "isVisible": true,
-      "serviceName": "WEBHCAT",
-      "category": "Advanced"
-    },
-    {
-      "id": "conf-site",
-      "name": "oozie-site",
-      "displayName": "Custom Oozie Configs",
-      "description": "Enter in \"key=value\" format to set oozie-site.xml parameters not exposed through this page.<br> New line is the delimiter for every key-value pair.",
-      "defaultValue": "",
-      "isRequired": false,
-      "displayType": "custom",
-      "isVisible": true,
-      "serviceName": "OOZIE",
-      "category": "Advanced"
-    }
-  ]
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/mock/hosts.js b/branch-1.2/ambari-web/app/data/mock/hosts.js
deleted file mode 100644
index 2a5b6f3..0000000
--- a/branch-1.2/ambari-web/app/data/mock/hosts.js
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-module.exports = [
-  {
-    hostname: 'host0001.company.com'
-  },
-  {
-    hostname: 'host0002.company.com'
-  },
-  {
-    hostname: 'host0003.company.com'
-  },
-  {
-    hostname: 'host0004.company.com'
-  },
-  {
-    hostname: 'host0005.company.com'
-  }
-];
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/mock/master_component_hosts.js b/branch-1.2/ambari-web/app/data/mock/master_component_hosts.js
deleted file mode 100644
index 09fb9f9..0000000
--- a/branch-1.2/ambari-web/app/data/mock/master_component_hosts.js
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-module.exports = [
-  {
-    componentName: 'NameNode',
-    hosts: [
-      'host0001.company.com'
-    ]
-  },
-  {
-    componentName: 'SNameNode',
-    hosts: [
-      'host0002.company.com'
-    ]
-  },
-  {
-    componentName: 'JobTracker',
-    hosts: [
-      'host0003.company.com'
-    ]
-  },
-  {
-    componentName: 'Hive Metastore',
-    hosts: [
-      'host0003.company.com'
-    ]
-  },
-  {
-    componentName: 'HBase Master',
-    hosts: [
-      'host0003.company.com'
-    ]
-  },
-  {
-    componentName: 'Oozie Server',
-    hosts: [
-      'host0002.company.com'
-    ]
-  },
-  {
-    componentName: 'Nagios Server',
-    hosts: [
-      'host0002.company.com'
-    ]
-  },
-  {
-    componentName: 'Ganglia Collector',
-    hosts: [
-      'host0002.company.com'
-    ]
-  },
-  {
-    componentName: 'ZooKeeper Server',
-    hosts: [
-      'host0001.company.com',
-      'host0002.company.com',
-      'host0003.company.com'
-    ]
-  }
-]
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/mock/slave_component_hosts.js b/branch-1.2/ambari-web/app/data/mock/slave_component_hosts.js
deleted file mode 100644
index 48a1c38..0000000
--- a/branch-1.2/ambari-web/app/data/mock/slave_component_hosts.js
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-module.exports = [
-  {
-    componentName: 'DataNode',
-    hosts: [
-      {
-        hostname: 'host0001.company.com',
-        group: 'Default'
-      },
-      {
-        hostname: 'host0002.company.com',
-        group: 'Default'
-      },
-      {
-        hostname: 'host0003.company.com',
-        group: 'Default'
-      }
-    ]
-  },
-  {
-    componentName: 'RegionServer',
-    hosts: [
-      {
-        hostname: 'host0001.company.com',
-        group: 'Default'
-      },
-      {
-        hostname: 'host0002.company.com',
-        group: 'Default'
-      }
-    ]
-  },
-  {
-    componentName: 'TaskTracker',
-    hosts: [
-      {
-        hostname: 'host0002.company.com',
-        group: 'Default'
-      },
-      {
-        hostname: 'host0003.company.com',
-        group: 'Default'
-      }
-    ]
-  }
-]
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/mock/step3_hosts.js b/branch-1.2/ambari-web/app/data/mock/step3_hosts.js
deleted file mode 100644
index fa6e71f..0000000
--- a/branch-1.2/ambari-web/app/data/mock/step3_hosts.js
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-module.exports = new Ember.Set([
-  {
-    name: '192.168.1.1',
-    bootStatus: 'pending',
-    cpu: '2',
-    memory: '2'
-  },
-  {
-    name: '192.168.1.2',
-    bootStatus: 'success',
-    cpu: '4',
-    memory: '4'
-  },
-  {
-    name: '192.168.1.3',
-    bootStatus: 'pending',
-    cpu: '2',
-    memory: '2'
-  },
-  {
-    name: '192.168.1.4',
-    bootStatus: 'pending',
-    cpu: '2',
-    memory: '4'
-  },
-  {
-    name: '192.168.1.5',
-    bootStatus: 'success',
-    cpu: '2',
-    memory: '4'
-  },
-  {
-    name: '192.168.1.6',
-    bootStatus: 'pending',
-    cpu: '4',
-    memory: '8'
-  },
-  {
-    name: '192.168.1.7',
-    bootStatus: 'success',
-    cpu: '4',
-    memory: '4'
-
-  },
-  {
-    name: '192.168.1.8',
-    bootStatus: 'success',
-    cpu: '4',
-    memory: '2'
-  },
-  {
-    name: '192.168.1.9',
-    bootStatus: 'success',
-    cpu: '2',
-    memory: '4'
-  },
-  {
-    name: '192.168.1.10',
-    bootStatus: 'pending',
-    cpu: '4',
-    memory: '2'
-  },
-  {
-    name: '192.168.1.11',
-    bootStatus: 'success',
-    cpu: '2',
-    memory: '2'
-  },
-  {
-    name: '192.168.1.12',
-    bootStatus: 'pending',
-    cpu: '2',
-    memory: '4'
-  },
-  {
-    name: '192.168.1.13',
-    bootStatus: 'success',
-    cpu: '4',
-    memory: '8'
-  }
-]);
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/mock/step3_pollData.js b/branch-1.2/ambari-web/app/data/mock/step3_pollData.js
deleted file mode 100644
index 34f299a..0000000
--- a/branch-1.2/ambari-web/app/data/mock/step3_pollData.js
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-module.exports = new Ember.Set([
-  {
-    name: '192.168.1.1',
-    status: 'error',
-    cpu: '2',
-    memory: '2'
-  },
-  {
-    name: '192.168.1.2',
-    status: 'success',
-    cpu: '4',
-    memory: '4'
-  },
-  {
-    name: '192.168.1.3',
-    status: 'error',
-    cpu: '2',
-    memory: '2'
-  },
-  {
-    name: '192.168.1.4',
-    status: 'success',
-    cpu: '2',
-    memory: '4'
-  },
-  {
-    name: '192.168.1.5',
-    status: 'success',
-    cpu: '2',
-    memory: '4'
-  },
-  {
-    name: '192.168.1.6',
-    status: 'success',
-    cpu: '4',
-    memory: '8'
-  },
-  {
-    name: '192.168.1.7',
-    status: 'success',
-    cpu: '4',
-    memory: '4'
-
-  },
-  {
-    name: '192.168.1.8',
-    status: 'success',
-    cpu: '4',
-    memory: '2'
-  },
-  {
-    name: '192.168.1.9',
-    status: 'success',
-    cpu: '2',
-    memory: '4'
-  },
-  {
-    name: '192.168.1.10',
-    status: 'success',
-    cpu: '4',
-    memory: '2'
-  },
-  {
-    name: '192.168.1.11',
-    status: 'success',
-    cpu: '2',
-    memory: '2'
-  },
-  {
-    name: '192.168.1.12',
-    status: 'success',
-    cpu: '2',
-    memory: '4'
-  },
-  {
-    name: '192.168.1.13',
-    status: 'success',
-    cpu: '4',
-    memory: '8'
-  }
-]);
diff --git a/branch-1.2/ambari-web/app/data/mock/step3_single_host.js b/branch-1.2/ambari-web/app/data/mock/step3_single_host.js
deleted file mode 100644
index 32f9214..0000000
--- a/branch-1.2/ambari-web/app/data/mock/step3_single_host.js
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-module.exports = new Ember.Set([
-  {
-    name: '192.168.1.2',
-    bootStatus: 'success',
-    cpu: '4',
-    memory: '4'
-  }
-]);
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_1.js b/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_1.js
deleted file mode 100644
index cf7d5e0..0000000
--- a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_1.js
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-module.exports = {
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/1",
-  "Requests" : {
-    "id" : 1
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "PENDING",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "PENDING",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_2.js b/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_2.js
deleted file mode 100644
index aeae74a..0000000
--- a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_2.js
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-module.exports = {
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/1",
-  "Requests" : {
-    "id" : 1
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "QUEUED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "QUEUED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_3.js b/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_3.js
deleted file mode 100644
index 0b30b76..0000000
--- a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_3.js
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-module.exports = {
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/1",
-  "Requests" : {
-    "id" : 1
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "QUEUED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "IN_PROGRESS",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_4.js b/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_4.js
deleted file mode 100644
index ff9c15d..0000000
--- a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_4.js
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-module.exports = {
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/1",
-  "Requests" : {
-    "id" : 1
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "IN_PROGRESS",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "COMPLETED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_5.js b/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_5.js
deleted file mode 100644
index be5d504..0000000
--- a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_5.js
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-module.exports = {
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/1",
-  "Requests" : {
-    "id" : 1
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "COMPLETED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "COMPLETED",
-        "command" : "INSTALL",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_6.js b/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_6.js
deleted file mode 100644
index 8598e11..0000000
--- a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_6.js
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-module.exports = {
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/1",
-  "Requests" : {
-    "id" : 1
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "QUEUED",
-        "command" : "START",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "QUEUED",
-        "command" : "START",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_7.js b/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_7.js
deleted file mode 100644
index 0dd6ac5..0000000
--- a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_7.js
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-module.exports = {
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/1",
-  "Requests" : {
-    "id" : 1
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "QUEUED",
-        "command" : "START",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "IN_PROGRESS",
-        "command" : "START",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_8.js b/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_8.js
deleted file mode 100644
index d578d9f..0000000
--- a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_8.js
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-module.exports = {
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/1",
-  "Requests" : {
-    "id" : 1
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "IN_PROGRESS",
-        "command" : "START",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "COMPLETED",
-        "command" : "START",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_9.js b/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_9.js
deleted file mode 100644
index 3435574..0000000
--- a/branch-1.2/ambari-web/app/data/mock/step9PolledData/pollData_9.js
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-module.exports = {
-  "href" : "http://localhost:8080/api/clusters/mycluster/requests/1",
-  "Requests" : {
-    "id" : 1
-  },
-  "tasks" : [
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/1",
-      "Tasks" : {
-        "id" : "1",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "COMPLETED",
-        "command" : "START",
-        "start_time" : "-1",
-        "role" : "DATANODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    },
-    {
-      "href" : "http://localhost:8080/api/clusters/mycluster/requests/1/tasks/2",
-      "Tasks" : {
-        "id" : "2",
-        "attempt_cnt" : "0",
-        "exit_code" : "999",
-        "stdout" : "",
-        "status" : "COMPLETED",
-        "command" : "START",
-        "start_time" : "-1",
-        "role" : "NAMENODE",
-        "stderr" : "",
-        "host_name" : "localhost.localdomain",
-        "stage_id" : "1"
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/mock/step9_hosts.js b/branch-1.2/ambari-web/app/data/mock/step9_hosts.js
deleted file mode 100644
index 753392a..0000000
--- a/branch-1.2/ambari-web/app/data/mock/step9_hosts.js
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-module.exports = new Ember.Set([
-  {
-    name: '192.168.1.1',
-    progress: '0',
-    message: 'starting'
-  },
-  {
-    name: '192.168.1.2',
-    progress: '0',
-    message: 'starting'
-  },
-  {
-    name: '192.168.1.3',
-    progress: '0',
-    message: 'starting'
-  },
-  {
-    name: '192.168.1.4',
-    progress: '0',
-    message: 'starting'
-  },
-  {
-    name: '192.168.1.5',
-    progress: '0',
-    message: 'starting'
-  },
-  {
-    name: '192.168.1.6',
-    progress: '0',
-    message: 'starting'
-  },
-  {
-    name: '192.168.1.7',
-    progress: '0',
-    message: 'starting'
-  },
-  {
-    name: '192.168.1.8',
-    progress: '0',
-    message: 'starting'
-  },
-  {
-    name: '192.168.1.9',
-    progress: '0',
-    message: 'starting'
-  },
-  {
-    name: '192.168.1.10',
-    progress: '0',
-    message: 'starting'
-  },
-  {
-    name: '192.168.1.11',
-    progress: '0',
-    message: 'starting'
-  },
-  {
-    name: '192.168.1.12',
-    progress: '0',
-    message: 'starting'
-  },
-  {
-    name: '192.168.1.13',
-    progress: '0',
-    message: 'starting'
-  }
-]);
diff --git a/branch-1.2/ambari-web/app/data/review_configs.js b/branch-1.2/ambari-web/app/data/review_configs.js
deleted file mode 100644
index 20cd44d..0000000
--- a/branch-1.2/ambari-web/app/data/review_configs.js
+++ /dev/null
@@ -1,176 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-module.exports = [
-
-  {
-    config_name: 'Admin',
-    display_name: 'Admin Name',
-    config_value: ''
-  },
-  {
-    config_name: 'cluster',
-    display_name: 'Cluster Name',
-    config_value: ''
-  },
-  {
-    config_name: 'hosts',
-    display_name: 'Total Hosts',
-    config_value: ''
-  },
-  {
-    config_name: 'Repo',
-    display_name: 'Local Repository',
-    config_value: ''
-  },
-  {
-    config_name: 'services',
-    display_name: 'Services',
-    config_value: [
-      Ember.Object.create({
-        service_name: 'HDFS',
-        display_name: 'HDFS',
-        service_components: [
-          Ember.Object.create({
-            display_name: 'NameNode',
-            component_value: ''
-          }),
-          Ember.Object.create({
-            display_name: 'SecondaryNameNode',
-            component_value: ''
-          }),
-          Ember.Object.create({
-            display_name: 'DataNodes',
-            component_value: ''
-          })
-        ]
-      }),
-      Ember.Object.create({
-        service_name: 'MAPREDUCE',
-        display_name: 'MapReduce',
-        service_components: [
-          Ember.Object.create({
-            display_name: 'JobTracker',
-            component_value: ''
-          }),
-          Ember.Object.create({
-            display_name: 'TaskTrackers',
-            component_value: ''
-          })
-        ]
-      }),
-      Ember.Object.create({
-        service_name: 'HIVE',
-        display_name: 'Hive + HCatalog',
-        service_components: [
-          Ember.Object.create({
-            display_name: 'Hive Metastore',
-            component_value: ''
-          }),
-          Ember.Object.create({
-            display_name: 'Database',
-            component_value: ''
-          })
-        ]
-      }),
-      Ember.Object.create({
-        service_name: 'HBASE',
-        display_name: 'HBase',
-        service_components: [
-          Ember.Object.create({
-            display_name: 'Master',
-            component_value: ''
-          }),
-          Ember.Object.create({
-            display_name: 'Region Servers',
-            component_value: ''
-          })
-        ]
-      }),
-      Ember.Object.create({
-        service_name: 'ZOOKEEPER',
-        display_name: 'ZooKeeper',
-        service_components: [
-          Ember.Object.create({
-            display_name: 'Servers',
-            component_value: ''
-          })
-        ]
-      }),
-      Ember.Object.create({
-        service_name: 'OOZIE',
-        display_name: 'Oozie',
-        service_components: [
-          Ember.Object.create({
-            display_name: 'Server',
-            component_value: ''
-          })
-          // TODO: uncomment when ready to integrate with database other than Derby
-          // Ember.Object.create({
-          //   display_name: 'Database',
-          //   component_value: ''
-          // })
-        ]
-      }),
-      Ember.Object.create({
-        service_name: 'NAGIOS',
-        display_name: 'Nagios',
-        service_components: [
-          Ember.Object.create({
-            display_name: 'Server',
-            component_value: ''
-          }),
-          Ember.Object.create({
-            display_name: 'Administrator',
-            component_value: ''
-          })
-        ]
-      }),
-      Ember.Object.create({
-        service_name: 'GANGLIA',
-        display_name: 'Ganglia',
-        service_components: [
-          Ember.Object.create({
-            display_name: 'Server',
-            component_value: ''
-          })
-        ]
-      }),
-      Ember.Object.create({
-        service_name: 'PIG',
-        display_name: 'Pig',
-        service_components: []
-      }),
-      Ember.Object.create({
-        service_name: 'SQOOP',
-        display_name: 'Sqoop',
-        service_components: []
-      }),
-      Ember.Object.create({
-        service_name: 'HCATALOG',
-        display_name: 'HCatalog',
-        service_components: [
-          Ember.Object.create({
-
-          })
-        ]
-      })
-
-    ]
-  }
-];
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/service_components.js b/branch-1.2/ambari-web/app/data/service_components.js
deleted file mode 100644
index 1d66a10..0000000
--- a/branch-1.2/ambari-web/app/data/service_components.js
+++ /dev/null
@@ -1,253 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-module.exports = new Ember.Set([
-
-  {
-    service_name: 'HDFS',
-    component_name: 'NAMENODE',
-    display_name: 'NameNode',
-    isMaster: true,
-    isClient: false,
-    description: 'Master server that manages the file system namespace and regulates access to files by clients'
-  },
-  {
-    service_name: 'HDFS',
-    component_name: 'SECONDARY_NAMENODE',
-    display_name: 'SNameNode',
-    isMaster: true,
-    isClient: false,
-    description: 'Helper to the primary NameNode that is responsible for supporting periodic checkpoints of the HDFS metadata'
-  },
-  {
-    service_name: 'HDFS',
-    component_name: 'DATANODE',
-    display_name: 'Datanode',
-    isMaster: false,
-    isClient: false,
-    description: 'The slave for HDFS'
-  },
-  {
-    service_name: 'HDFS',
-    component_name: 'HDFS_CLIENT',
-    display_name: 'HDFS Client',
-    isMaster: false,
-    isClient: true,
-    description: 'Client component for HDFS'
-  },
-  {
-    service_name: 'MAPREDUCE',
-    component_name: 'JOBTRACKER',
-    display_name: 'JobTracker',
-    isMaster: true,
-    isClient: false,
-    description: 'Central Master service that pushes work (MR tasks) out to available TaskTracker nodes in the cluster'
-  },
-  {
-    service_name: 'MAPREDUCE',
-    component_name: 'TASKTRACKER',
-    display_name: 'TaskTracker',
-    isMaster: false,
-    isClient: false,
-    description: 'The slave for MapReduce'
-  },
-  {
-    service_name: 'MAPREDUCE',
-    component_name: 'MAPREDUCE_CLIENT',
-    display_name: 'MapReduce Client',
-    isMaster: false,
-    isClient: true,
-    description: 'Client component for MapReduce'
-  },
-  {
-    service_name: 'ZOOKEEPER',
-    component_name: 'ZOOKEEPER_SERVER',
-    display_name: 'ZooKeeper',
-    isMaster: true,
-    isClient: false,
-    description: ''
-  },
-  {
-    service_name: 'ZOOKEEPER',
-    component_name: 'ZOOKEEPER_CLIENT',
-    display_name: 'ZooKeeper Client',
-    isMaster: false,
-    isClient: true,
-    description: ''
-  },
-  {
-    service_name: 'HBASE',
-    component_name: 'HBASE_MASTER',
-    display_name: 'HBase Master',
-    isMaster: true,
-    isClient: false,
-    description: ''
-  },
-  {
-    service_name: 'HBASE',
-    component_name: 'HBASE_REGIONSERVER',
-    display_name: 'HBase Region Server',
-    isMaster: false,
-    isClient: false,
-    description: 'The slave for HBase'
-  },
-  {
-    service_name: 'HBASE',
-    component_name: 'HBASE_CLIENT',
-    display_name: 'HBase Client',
-    isMaster: false,
-    isClient: true,
-    description: 'The slave for HBase'
-  },
-  {
-    service_name: 'PIG',
-    component_name: 'PIG',
-    display_name: 'Pig',
-    isMaster: false,
-    isClient: true,
-    description: ''
-  },
-  {
-    service_name: 'SQOOP',
-    component_name: 'SQOOP',
-    display_name: 'Sqoop',
-    isMaster: false,
-    isClient: true,
-    description: ''
-  },
-  {
-    service_name: 'OOZIE',
-    component_name: 'OOZIE_SERVER',
-    display_name: 'Oozie Server',
-    isMaster: true,
-    isClient: false,
-    description: ''
-  },
-  {
-    service_name: 'OOZIE',
-    component_name: 'OOZIE_CLIENT',
-    display_name: 'Oozie Client',
-    isMaster: false,
-    isClient: true,
-    description: ''
-  },
-  {
-    service_name: 'HIVE',
-    component_name: 'HIVE_SERVER',
-    display_name: 'HiveServer2',
-    isMaster: true,
-    isClient: false,
-    description: ''
-  },
-  {
-    service_name: 'HIVE',
-    component_name: 'HIVE_METASTORE',
-    display_name: 'Hive Metastore',
-    isMaster: true,
-    isClient: false,
-    description: ''
-  },
-  {
-    service_name: 'HIVE',
-    component_name: 'HIVE_CLIENT',
-    display_name: 'Hive Client',
-    isMaster: false,
-    isClient: true,
-    description: ''
-  },
-  {
-    service_name: 'HIVE',
-    component_name: 'MYSQL_SERVER',
-    display_name: 'MySQL Server for Hive',
-    isMaster: false,
-    isClient: false,
-    description: ''
-  },
-  {
-    service_name: 'HCATALOG',
-    component_name: 'HCAT',
-    display_name: 'HCat Client',
-    isMaster: false,
-    isClient: true,
-    description: ''
-  },
-  {
-    service_name: 'WEBHCAT',
-    component_name: 'WEBHCAT_SERVER',
-    display_name: 'WebHCat Server',
-    isMaster: true,
-    isClient: false,
-    description: ''
-  },
-  {
-    service_name: 'DASHBOARD',
-    component_name: 'DASHBOARD',
-    display_name: 'Monitoring Dashboard',
-    isMaster: false,
-    isClient: false,
-    description: ''
-  },
-  {
-    service_name: 'NAGIOS',
-    component_name: 'NAGIOS_SERVER',
-    display_name: 'Nagios Server',
-    isMaster: true,
-    isClient: false,
-    description: ''
-  },
-  {
-    service_name: 'GANGLIA',
-    component_name: 'GANGLIA_SERVER',
-    display_name: 'Ganglia Collector',
-    isMaster: true,
-    isClient: false,
-    description: ''
-  },
-  {
-    service_name: 'GANGLIA',
-    component_name: 'GANGLIA_MONITOR',
-    display_name: 'Ganglia Slave',
-    isMaster: false,
-    isClient: false,
-    description: ''
-  },
-  {
-    service_name: 'KERBEROS',
-    component_name: 'KERBEROS_SERVER',
-    display_name: 'Kerberos Server',
-    isMaster: true,
-    isClient: false,
-    description: ''
-  },
-  {
-    service_name: 'KERBEROS',
-    component_name: 'KERBEROS_ADMIN_CLIENT',
-    display_name: 'Kerberos Admin Client',
-    isMaster: false,
-    isClient: true,
-    description: ''
-  },
-  {
-    service_name: 'KERBEROS',
-    component_name: 'KERBEROS_CLIENT',
-    display_name: 'Kerberos Client',
-    isMaster: false,
-    isClient: true,
-    description: ''
-  }
-]);
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/service_configs.js b/branch-1.2/ambari-web/app/data/service_configs.js
deleted file mode 100644
index e488a26..0000000
--- a/branch-1.2/ambari-web/app/data/service_configs.js
+++ /dev/null
@@ -1,126 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-require('models/service_config')
-
-var configProperties = App.ConfigProperties.create();
-
-module.exports = [
-  {
-    serviceName: 'HDFS',
-    displayName: 'HDFS',
-    filename: 'hdfs-site',
-    configCategories: [
-      App.ServiceConfigCategory.create({ name: 'NameNode'}),
-      App.ServiceConfigCategory.create({ name: 'SNameNode'}),
-      App.ServiceConfigCategory.create({ name: 'DataNode'}),
-      App.ServiceConfigCategory.create({ name: 'General'}),
-      App.ServiceConfigCategory.create({ name: 'Advanced'})
-    ],
-    configs: configProperties.filterProperty('serviceName', 'HDFS')
-  },
-
-  {
-    serviceName: 'MAPREDUCE',
-    displayName: 'MapReduce',
-    filename: 'mapred-site',
-    configCategories: [
-      App.ServiceConfigCategory.create({ name: 'JobTracker'}),
-      App.ServiceConfigCategory.create({ name: 'TaskTracker'}),
-      App.ServiceConfigCategory.create({ name: 'General'}),
-      App.ServiceConfigCategory.create({ name: 'Advanced'})
-    ],
-    configs: configProperties.filterProperty('serviceName', 'MAPREDUCE')
-  },
-
-  {
-    serviceName: 'HIVE',
-    displayName: 'Hive/HCat',
-    filename: 'hive-site',
-    configCategories: [
-      App.ServiceConfigCategory.create({ name: 'Hive Metastore'}),
-      App.ServiceConfigCategory.create({ name: 'Advanced'})
-    ],
-    configs: configProperties.filterProperty('serviceName', 'HIVE')
-  },
-
-  {
-    serviceName: 'WEBHCAT',
-    displayName: 'WebHCat',
-    filename: 'webhcat-site',
-    configCategories: [
-      App.ServiceConfigCategory.create({ name: 'Advanced'})
-    ],
-    configs: configProperties.filterProperty('serviceName', 'WEBHCAT')
-  },
-
-  {
-    serviceName: 'HBASE',
-    displayName: 'HBase',
-    filename: 'hbase-site',
-    configCategories: [
-      App.ServiceConfigCategory.create({ name: 'HBase Master'}),
-      App.ServiceConfigCategory.create({ name: 'RegionServer'}),
-      App.ServiceConfigCategory.create({ name: 'General'}),
-      App.ServiceConfigCategory.create({ name: 'Advanced'})
-    ],
-    configs: configProperties.filterProperty('serviceName', 'HBASE')
-  },
-
-  {
-    serviceName: 'ZOOKEEPER',
-    displayName: 'ZooKeeper',
-    configCategories: [
-      App.ServiceConfigCategory.create({ name: 'ZooKeeper Server'}),
-      App.ServiceConfigCategory.create({ name: 'Advanced'})
-    ],
-    configs: configProperties.filterProperty('serviceName', 'ZOOKEEPER')
-  },
-
-  {
-    serviceName: 'OOZIE',
-    displayName: 'Oozie',
-    filename: 'oozie-site',
-    configCategories: [
-      App.ServiceConfigCategory.create({ name: 'Oozie Server'}),
-      App.ServiceConfigCategory.create({ name: 'Advanced'})
-    ],
-    configs: configProperties.filterProperty('serviceName', 'OOZIE')
-  },
-
-  {
-    serviceName: 'NAGIOS',
-    displayName: 'Nagios',
-    configCategories: [
-      App.ServiceConfigCategory.create({ name: 'General'})
-    ],
-    configs: configProperties.filterProperty('serviceName', 'NAGIOS')
-  },
-
-  {
-    serviceName: 'MISC',
-    displayName: 'Misc',
-    configCategories: [
-     // App.ServiceConfigCategory.create({ name: 'General'}),
-      App.ServiceConfigCategory.create({ name: 'Users and Groups'})
-    ],
-    configs: configProperties.filterProperty('serviceName', 'MISC')
-  }
-
-]
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/services.js b/branch-1.2/ambari-web/app/data/services.js
deleted file mode 100644
index 33edd03..0000000
--- a/branch-1.2/ambari-web/app/data/services.js
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-module.exports = [
-  {
-    serviceName: 'HDFS',
-    displayName: 'HDFS',
-    isDisabled: true,
-    isSelected: true,
-    description: Em.I18n.t('services.hdfs.description')
-  },
-  {
-    serviceName: 'MAPREDUCE',
-    displayName: 'MapReduce',
-    isDisabled: false,
-    isSelected: true,
-    description: Em.I18n.t('services.mapreduce.description')
-  },
-  {
-    serviceName: 'NAGIOS',
-    displayName: 'Nagios',
-    isDisabled: false,
-    isSelected: true,
-    description: Em.I18n.t('services.nagios.description')
-  },
-  {
-    serviceName: 'GANGLIA',
-    displayName: 'Ganglia',
-    isDisabled: false,
-    isSelected: true,
-    description: Em.I18n.t('services.ganglia.description')
-  },
-  {
-    serviceName: 'HIVE',
-    displayName: 'Hive + HCat + ZooKeeper',
-    isDisabled: false,
-    isSelected: true,
-    description: Em.I18n.t('services.hive.description')
-  },
-  {
-    serviceName: 'HCATALOG',
-    displayName: 'HCatalog',
-    isDisabled: false,
-    isSelected: true,
-    isHidden: true
-  },
-  {
-    serviceName: 'WEBHCAT',
-    displayName: 'WebHCat',
-    isDisabled: false,
-    isSelected: true,
-    isHidden: true
-  },
-  {
-    serviceName: 'HBASE',
-    displayName: 'HBase + ZooKeeper',
-    isDisabled: false,
-    isSelected: true,
-    description: Em.I18n.t('services.hbase.description')
-  },
-  {
-    serviceName: 'PIG',
-    displayName: 'Pig',
-    isDisabled: false,
-    isSelected: true,
-    description: Em.I18n.t('services.pig.description')
-  },
-  {
-    serviceName: 'SQOOP',
-    displayName: 'Sqoop',
-    isDisabled: false,
-    isSelected: true,
-    description: Em.I18n.t('services.sqoop.description')
-  },
-  {
-    serviceName: 'OOZIE',
-    displayName: 'Oozie',
-    isDisabled: false,
-    isSelected: true,
-    description: Em.I18n.t('services.oozie.description')
-  },
-  {
-    serviceName: 'ZOOKEEPER',
-	  displayName: 'ZooKeeper',
-    isDisabled: false,
-    isSelected: true,
-    isHidden: true
-  },
-]
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/data/statusCodes.js b/branch-1.2/ambari-web/app/data/statusCodes.js
deleted file mode 100644
index df6a1d8..0000000
--- a/branch-1.2/ambari-web/app/data/statusCodes.js
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-module.exports = {
-	200: function () {
-		console.log("Status code 200: Success.");
-  },
-  202: function () {
-    console.log("Status code 202: Success for creation.");
-  },
-	400: function () {
-		console.log("Error code 400: Bad Request.");
-	},
-	401: function () {
-		console.log("Error code 401: Unauthorized.");
-	},
-	402: function () {
-		console.log("Error code 402: Payment Required.");
-	},
-	403: function () {
-		console.log("Error code 403: Forbidden.");
-    App.router.logOff();
-	},
-	404: function () {
-		console.log("Error code 404: URI not found.");
-	},
-	500: function () {
-		console.log("Error code 500: Internal Error on server side.");
-	},
-	501: function () {
-		console.log("Error code 501: Not implemented yet.");
-	},
-	502: function () {
-		console.log("Error code 502: Services temporarily overloaded.");
-	},
-	503: function () {
-		console.log("Error code 503: Gateway timeout.");
-	}
-}
diff --git a/branch-1.2/ambari-web/app/ember.js b/branch-1.2/ambari-web/app/ember.js
deleted file mode 100644
index 7320f89..0000000
--- a/branch-1.2/ambari-web/app/ember.js
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-module.exports=Ember;
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/initialize.js b/branch-1.2/ambari-web/app/initialize.js
deleted file mode 100644
index a9c7c4a..0000000
--- a/branch-1.2/ambari-web/app/initialize.js
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-window.App = require('app');
-
-App.testMode = false;
-App.skipBootstrap = false;
-App.alwaysGoToInstaller = false;
-App.apiPrefix = '/api/v1';
-App.defaultStackVersion = 'HDP-1.2.1';
-App.defaultLocalStackVersion = 'HDPLocal-1.2.1';
-App.defaultJavaHome = '/usr/jdk/jdk1.6.0_31';
-App.addServicesEnabled = false;
-// default AJAX timeout
-App.timeout = 180000;
-// max number of retries for certain AJAX calls
-App.maxRetries = 3;
-App.bgOperationsUpdateInterval = 6000;
-App.componentsUpdateInterval = 6000;
-App.contentUpdateInterval = 15000;
-App.maxRunsForAppBrowser = 500;
-App.pageReloadTime=3600000;
-
-// this is to make sure that IE does not cache data when making AJAX calls to the server
-$.ajaxSetup({
-  cache: false
-});
-
-require('messages');
-require('utils/base64');
-require('utils/db');
-require('utils/helper');
-require('models');
-require('controllers');
-require('templates');
-require('views');
-require('router');
-
-require('utils/updater');
-
-require('mappers/server_data_mapper');
-require('mappers/status_mapper');
-require('mappers/hosts_mapper');
-require('mappers/cluster_mapper');
-require('mappers/jobs_mapper');
-require('mappers/runs_mapper');
-require('mappers/racks_mapper');
-require('mappers/alerts_mapper');
-require('mappers/users_mapper');
-require('mappers/service_mapper');
-
-require('utils/http_client');
-
-App.initialize();
-
-/**
- * Test Mode values
- */
-App.test_hostname = 'hostname';
-
-console.log('after initialize');
-console.log('TRACE: app.js-> localStorage:Ambari.authenticated=' + localStorage.getItem('Ambari' + 'authenticated'));
-console.log('TRACE: app.js-> localStorage:currentStep=' + localStorage.getItem(App.get('router').getLoginName() + 'Installer' + 'currentStep'));
-console.log('TRACE: app.js-> router.authenticated=' + App.get('router.loggedIn'));
diff --git a/branch-1.2/ambari-web/app/mappers/alerts_mapper.js b/branch-1.2/ambari-web/app/mappers/alerts_mapper.js
deleted file mode 100644
index 65d053c..0000000
--- a/branch-1.2/ambari-web/app/mappers/alerts_mapper.js
+++ /dev/null
@@ -1,138 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-App.alertsMapper = App.QuickDataMapper.create({
-  model: App.Alert,
-  config:{
-    $alert_id:'' ,
-    title: "service_description",
-    service_type: "service_type",
-    date: "last_hard_state_change",
-    status: "current_state",
-    message: "plugin_output",
-    host_name: "host_name",
-    current_attempt: "current_attempt",
-    last_hard_state_change: "last_hard_state_change",
-    last_hard_state: "last_hard_state",
-    last_time_ok: "last_time_ok",
-    last_time_warning: "last_time_warning",
-    last_time_unknown: "last_time_unknown",
-    last_time_critical: "last_time_critical",
-    is_flapping: "is_flapping",
-    last_check: "last_check"
-  },
-  map: function (json) {
-    if (!this.get('model')) {
-      return;
-    }
-    if (json && json.items && json.items.length>0 && json.items[0].HostRoles && json.items[0].HostRoles.nagios_alerts) {
-      var alertsString = json.items[0].HostRoles.nagios_alerts;
-      var alerts = jQuery.parseJSON(alertsString).alerts;
-      if (App.Alert.find().content.length > 0) {
-        this.update(alerts);
-      } else {
-        var result = [];
-        alerts.forEach(function(item){
-          var applyConfig = jQuery.extend({}, this.config);
-          if (item.current_state && item.last_hard_state && item.current_state != item.last_hard_state) {
-            switch (item.current_state) {
-              case "0":
-                applyConfig['date'] = 'last_time_ok';
-                break;
-              case "1":
-                applyConfig['date'] = 'last_time_warning';
-                break;
-              case "2":
-                applyConfig['date'] = 'last_time_critical';
-                break;
-            }
-          }
-          result.push(this.parseIt(item, applyConfig));
-        }, this);
-        App.store.loadMany(this.get('model'), result);
-      }
-    }
-  },
-  update: function(alerts){
-    var alertsList = App.Alert.find();
-    var titleToAlertMap = {};
-    alertsList.forEach(function(alert){
-      titleToAlertMap[alert.get('serviceType') + alert.get('title') + alert.get('hostName')] = alert;
-    });
-    var newRecords = [];
-    alerts.forEach(function(item){
-      var existAlert = titleToAlertMap[item.service_type + item.service_description + item.host_name];
-      if (existAlert == null) {
-        var applyConfig = jQuery.extend({}, this.config);
-        if (item.current_state && item.last_hard_state && item.current_state != item.last_hard_state) {
-          switch (item.current_state) {
-            case "0":
-              applyConfig['date'] = 'last_time_ok';
-              break;
-            case "1":
-              applyConfig['date'] = 'last_time_warning';
-              break;
-            case "2":
-              applyConfig['date'] = 'last_time_critical';
-              break;
-          }
-        }
-        newRecords.push(this.parseIt(item, applyConfig));
-      } else {
-        // update record
-        existAlert.set('serviceType', item.service_type);
-        if (item.current_state && item.last_hard_state && item.current_state != item.last_hard_state) {
-          switch (item.current_state) {
-            case "0":
-              existAlert.set('date', DS.attr.transforms.date.from(item.last_time_ok));
-              break;
-            case "1":
-              existAlert.set('date', DS.attr.transforms.date.from(item.last_time_warning));
-              break;
-            case "2":
-              existAlert.set('date', DS.attr.transforms.date.from(item.last_time_critical));
-              break;
-            default:
-              existAlert.set('date', DS.attr.transforms.date.from(item.last_hard_state_change));
-              break;
-          }
-        }else{
-          existAlert.set('date', DS.attr.transforms.date.from(item.last_hard_state_change));
-        }
-        existAlert.set('status', item.current_state);
-        existAlert.set('message', item.plugin_output);
-        existAlert.set('lastHardStateChange', item.last_hard_state_change);
-        existAlert.set('lastHardState', item.last_hard_state);
-        existAlert.set('lastTimeOk', item.last_time_ok);
-        existAlert.set('lastTimeWarning', item.last_time_warning);
-        existAlert.set('lastTimeUnknown', item.last_time_unknown);
-        existAlert.set('lastTimeCritical', item.last_time_critical);
-        existAlert.set('lastCheck', item.last_check);
-        existAlert.set('isFlapping', item.is_flapping);
-        delete titleToAlertMap[item.service_type + item.service_description + item.host_name];
-      }
-    }, this);
-    for ( var e in titleToAlertMap) {
-      titleToAlertMap[e].deleteRecord();
-    }
-    if (newRecords.length > 0) {
-      App.store.loadMany(this.get('model'), newRecords); // Add new records
-    }
-  }
-});
diff --git a/branch-1.2/ambari-web/app/mappers/cluster_mapper.js b/branch-1.2/ambari-web/app/mappers/cluster_mapper.js
deleted file mode 100644
index 0296859..0000000
--- a/branch-1.2/ambari-web/app/mappers/cluster_mapper.js
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-App.clusterMapper = App.QuickDataMapper.create({
-    model : App.Cluster,
-    map:function(json){
-      if(!this.get('model')) {return;}
-      if(json){
-        var result = json;
-        result = this.parseIt(result, this.config);
-        App.store.load(this.get('model'), result);
-      }
-    },
-    config : {
-      id:'Clusters.cluster_id',
-      cluster_name: 'Clusters.cluster_name',
-      stack_name: 'Clusters.stack_name',
-      version: 'Clusters.version',
-      //$hosts: [1, 2, 3, 4],
-      $racks: [1],
-      max_hosts_per_rack: 'Clusters.max_hosts_per_rack'
-    }
-
-});
diff --git a/branch-1.2/ambari-web/app/mappers/hosts_mapper.js b/branch-1.2/ambari-web/app/mappers/hosts_mapper.js
deleted file mode 100644
index b178e62..0000000
--- a/branch-1.2/ambari-web/app/mappers/hosts_mapper.js
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-App.hostsMapper = App.QuickDataMapper.create({
-
-  model: App.Host,
-  config: {
-    id: 'Hosts.host_name',
-    host_name: 'Hosts.host_name',
-    public_host_name: 'Hosts.public_host_name',
-    cluster_id: 'Hosts.cluster_name',// 1
-    rack: 'Hosts.rack_info',
-    host_components_key: 'host_components',
-    host_components_type: 'array',
-    host_components: {
-      item: 'id'
-    },
-    cpu: 'Hosts.cpu_count',
-    memory: 'Hosts.total_mem',
-    disk_info: 'Hosts.disk_info',
-    disk_total: 'metrics.disk.disk_total',
-    disk_free: 'metrics.disk.disk_free',
-    health_status: 'Hosts.host_status',
-    load_one: 'metrics.load.load_one',
-    load_five: 'metrics.load.load_five',
-    load_fifteen: 'metrics.load.load_fifteen',
-    cpu_usage: 'cpu_usage',
-    memory_usage: 'memory_usage',
-    last_heart_beat_time: "Hosts.last_heartbeat_time",
-    os_arch: 'Hosts.os_arch',
-    os_type: 'Hosts.os_type',
-    ip: 'Hosts.ip'
-  },
-  map: function (json) {
-
-    if (json.items) {
-
-      var result = [];
-      json.items.forEach(function (item) {
-
-        // Disk Usage
-        if (item.metrics && item.metrics.disk && item.metrics.disk.disk_total && item.metrics.disk.disk_free) {
-          var diskUsed = item.metrics.disk.disk_total - item.metrics.disk.disk_free;
-          var diskUsedPercent = (100 * diskUsed) / item.metrics.disk.disk_total;
-          item.disk_usage = diskUsedPercent.toFixed(1);
-        }
-        // CPU Usage
-        if (item.metrics && item.metrics.cpu && item.metrics.cpu.cpu_system && item.metrics.cpu.cpu_user) {
-          var cpuUsedPercent = item.metrics.cpu.cpu_system + item.metrics.cpu.cpu_user;
-          item.cpu_usage = cpuUsedPercent.toFixed(1);
-        }
-        // Memory Usage
-        if (item.metrics && item.metrics.memory && item.metrics.memory.mem_free && item.metrics.memory.mem_total) {
-          var memUsed = item.metrics.memory.mem_total - item.metrics.memory.mem_free;
-          var memUsedPercent = (100 * memUsed) / item.metrics.memory.mem_total;
-          item.memory_usage = memUsedPercent.toFixed(1);
-        }
-
-        item.host_components.forEach(function (host_component) {
-          host_component.id = host_component.HostRoles.component_name + "_" + host_component.HostRoles.host_name;
-        }, this);
-        result.push(this.parseIt(item, this.config));
-
-      }, this);
-      result = this.sortByPublicHostName(result);
-      App.store.loadMany(this.get('model'), result);
-    }
-  },
-  /**
-   * Default data sorting by public_host_name field
-   * @param data
-   * @return {Array}
-   */
-  sortByPublicHostName: function(data) {
-    data.sort(function(a, b) {
-      var ap = a.public_host_name;
-      var bp = b.public_host_name;
-      if (ap > bp) return 1;
-      if (ap < bp) return -1;
-      return 0;
-    });
-    return data;
-  }
-
-});
diff --git a/branch-1.2/ambari-web/app/mappers/jobs_mapper.js b/branch-1.2/ambari-web/app/mappers/jobs_mapper.js
deleted file mode 100644
index 4a2594a..0000000
--- a/branch-1.2/ambari-web/app/mappers/jobs_mapper.js
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.jobsMapper = App.QuickDataMapper.create({
-  model:App.Job,
-  map:function (json) {
-    if (!this.get('model')) {
-      return;
-    }
-    if (json.jobs) {
-      var result = [];
-      json.jobs.forEach(function (item) {
-        result.push(this.parseIt(item, this.config));
-      }, this);
-
-      var r = Ember.ArrayProxy.create({"content":[]});
-      result.forEach(function(item){
-        r.content.push(App.Job2.create(item));
-      });
-
-      this.set('controller.content.jobs', r.content);
-    }
-  },
-  config:{
-    id:'jobId',
-    run_id:'workflowId',
-    job_name:'jobName',
-    workflow_entity_name:'workflowEntityName',
-    user_name:'userName',
-    submit_time:'submitTime',
-    maps:'maps',
-    reduces:'reduces',
-    status:'status',
-    input:'inputBytes',
-    output:'outputBytes',
-    elapsed_time:'elapsedTime'
-  }
-});
-
-App.jobTimeLineMapper = App.QuickDataMapper.create({
-  model: null, //model will be set outside of mapper
-  config:{
-    map:'map',
-    shuffle:'shuffle',
-    reduce:'reduce'
-  },
-  map:function (json) {
-    var job = this.get('model'); // @model App.MainAppsItemBarView
-    var parseResult = this.parseIt(json, this.config);
-
-    $.each(parseResult, function (field, value) {
-      job.set(field, value);
-    });
-  }
-});
-
-App.jobTasksMapper = App.QuickDataMapper.create({
-  model: null, //model will be set outside of mapper
-  config:{
-    mapNodeLocal:'mapNodeLocal',
-    mapRackLocal:'mapRackLocal',
-    mapOffSwitch:'mapOffSwitch',
-    reduceOffSwitch:'reduceOffSwitch',
-    submit:'submitTime',
-    finish:'finishTime'
-  },
-  map:function (json) {
-    var job = this.get('model'); // @model App.MainAppsItemBarView
-    var parseResult = this.parseIt(json, this.config);
-    $.each(parseResult, function (field, value) {
-      job.set(field, value);
-    });
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/mappers/racks_mapper.js b/branch-1.2/ambari-web/app/mappers/racks_mapper.js
deleted file mode 100644
index 47abc4d..0000000
--- a/branch-1.2/ambari-web/app/mappers/racks_mapper.js
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-
-App.racksMapper = App.QuickDataMapper.create({
-  model: App.Rack,
-  config: {
-    id: "Racks.id",
-    name: "Racks.name"
-    //$hosts: ["host01", "host06", "host05"],
-    //status: "Racks.status",
-    //live_hosts_count: "Racks.live_hosts_count",
-    //critical_hosts_count: "Racks.critical_hosts_count",
-    //dead_hosts_count: "Racks.dead_hosts_count"
-  }
-});
diff --git a/branch-1.2/ambari-web/app/mappers/runs_mapper.js b/branch-1.2/ambari-web/app/mappers/runs_mapper.js
deleted file mode 100644
index ff98679..0000000
--- a/branch-1.2/ambari-web/app/mappers/runs_mapper.js
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.runsMapper = App.QuickDataMapper.create({
-  model : App.Run,
-  map : function(json) {
-    if(!this.get('model')) {
-      return;
-    }
-    if(json && json.aaData) {
-      var result = [];
-
-      var pagination_info={
-        iTotalDisplayRecords :json.iTotalDisplayRecords ,
-        iTotalRecords:json.iTotalRecords,
-        startIndex:parseInt(json.startIndex)+1,
-        endIndex:parseInt(json.endIndex)+1
-      }
-
-      json.aaData.forEach(function(item, index) {
-        var o = this.parseIt(item, this.config);
-
-        var r = '{dag: {';
-        item.workflowContext.workflowDag.entries.forEach(function(item) {
-          r += '"' + item.source + '": [';
-          // if a standalone MapReduce job, there won't be any targets
-          if (item.targets) {
-            item.targets.forEach(function(target) {
-              r += '"' + target + '",';
-            });
-            if(item.targets.length){
-              r = r.substr(0, r.length - 1);
-            }
-          } else {
-            r += item.source;
-          }
-          r += '],';
-        });
-        r = r.substr(0, r.length - 1);
-        r += '}}';
-        o.workflowContext = r;
-        o.index = index + 1;
-        result.push(o);
-      }, this);
-
-      var r = [];
-      result.forEach(function(item){
-        r.push(App.Run2.create(item));
-      });
-
-      App.router.get('mainAppsController').set('content', r);
-      App.router.get('mainAppsController').set('serverData', json.summary);
-      App.router.get('mainAppsController').set('paginationObject', pagination_info);
-    }
-
-
-  },
-  config : {
-    id: 'workflowId',
-    appName: 'workflowName',
-    numJobsTotal: 'numJobsTotal',
-    numJobsCompleted: 'numJobsCompleted',
-    userName:'userName',
-    startTime: 'startTime',
-    elapsedTime: 'elapsedTime',
-    input: 'inputBytes',
-    output: 'outputBytes'
-  }
-});
diff --git a/branch-1.2/ambari-web/app/mappers/server_data_mapper.js b/branch-1.2/ambari-web/app/mappers/server_data_mapper.js
deleted file mode 100644
index 59dd781..0000000
--- a/branch-1.2/ambari-web/app/mappers/server_data_mapper.js
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.ServerDataMapper = Em.Object.extend({
-  jsonKey: false,
-  map: function (json) {
-    if (json) {
-      var model = this.get('model');
-      var jsonKey = this.get('jsonKey');
-
-      if (jsonKey && json[jsonKey]) { // if data come as { hdfs: {...} }
-        json = json[jsonKey];
-      }
-
-      $.each(json, function (field, value) {
-        model.set(field, value);
-      })
-    }
-  }
-});
-
-
-App.QuickDataMapper = App.ServerDataMapper.extend({
-  config: {},
-  model: null,
-  map: function (json) {
-    if (!this.get('model')) {
-      return;
-    }
-
-    if (json.items) {
-      var result = [];
-
-      json.items.forEach(function (item) {
-        result.push(this.parseIt(item, this.config));
-      }, this)
-
-      //console.log(this.get('model'), result);
-      App.store.loadMany(this.get('model'), result);
-    }
-  },
-
-  parseIt: function (data, config) {
-    var result = {};
-    for ( var i in config) {
-      if (i.substr(0, 1) === '$') {
-        i = i.substr(1, i.length);
-        result[i] = config['$' + i];
-      } else {
-        var isSpecial = false;
-        if (i.substr(-5) == '_type') {
-          var prefix = i.substr(0, i.length - 5);
-          isSpecial = config[prefix + '_key'] != null;
-        } else if (i.substr(-4) == '_key') {
-          var prefix = i.substr(0, i.length - 4);
-          isSpecial = config[prefix + '_type'] != null;
-        }
-        if (!isSpecial && typeof config[i] == 'string') {
-          result[i] = this.getJsonProperty(data, config[i]);
-        } else if (typeof config[i] == 'object') {
-          result[i] = [];
-          var _data = this.getJsonProperty(data, config[i + '_key']);
-          var _type = config[i + '_type'];
-          var l = _data.length;
-          for ( var index = 0; index < l; index++) {
-            if (_type == 'array') {
-              result[i].push(this.getJsonProperty(_data[index], config[i].item));
-            } else {
-              result[i].push(this.parseIt(_data[index], config[i]));
-            }
-          }
-          if(_type == 'array'){
-            result[i] = result[i].sort();
-          }
-        }
-      }
-    }
-    return result;
-  },
-
-  getJsonProperty: function (json, path) {
-    var pathArr = path.split('.');
-    var current = json;
-    while (pathArr.length && current) {
-      if (pathArr[0].substr(-1) == ']') {
-        var index = parseInt(pathArr[0].substr(-2, 1));
-        var attr = pathArr[0].substr(0, pathArr[0].length - 3);
-        if (attr in current) {
-          current = current[attr][index];
-        }
-      } else {
-        current = current[pathArr[0]];
-      }
-      pathArr.splice(0, 1);
-    }
-    return current;
-  },
-
-  calculateState: function (json) {
-//    var stateEqual = (json.desired_status != json.work_status);
-//    if (stateEqual) {
-//      if (json.desired_status == 'STARTED' && json.work_status == 'INSTALLED') {
-//        json.work_status = 'STARTING';
-//      } else if (json.desired_status == 'INSTALLED' && json.work_status == 'STARTED') {
-//        json.work_status = 'STOPPING';
-//      }
-//    }
-    return json;
-  }
-});
diff --git a/branch-1.2/ambari-web/app/mappers/service_mapper.js b/branch-1.2/ambari-web/app/mappers/service_mapper.js
deleted file mode 100644
index bb424cd..0000000
--- a/branch-1.2/ambari-web/app/mappers/service_mapper.js
+++ /dev/null
@@ -1,322 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-App.servicesMapper = App.QuickDataMapper.create({
-  servicesSortOrder: [
-    'HDFS',
-    'MAPREDUCE',
-    'HBASE',
-    'HIVE',
-    'HCATALOG',
-    'WEBHCAT',
-    'OOZIE',
-    'GANGLIA',
-    'NAGIOS',
-    'ZOOKEEPER',
-    'PIG',
-    'SQOOP'
-  ],
-  sortByOrder: function (sortOrder, array) {
-    var sorted = [];
-    for (var i = 0; i < sortOrder.length; i++)
-      for (var j = 0; j < array.length; j++) {
-        if (sortOrder[i] == array[j].id) {
-          sorted.push(array[j]);
-        }
-      }
-    return sorted;
-  },
-
-  model: App.Service,
-  config: {
-    id: 'ServiceInfo.service_name',
-    service_name: 'ServiceInfo.service_name',
-    work_status: 'ServiceInfo.state',
-    $alerts: [ 1, 2, 3 ],
-    host_components: 'host_components'
-  },
-  hdfsConfig: {
-    version: 'nameNodeComponent.ServiceComponentInfo.Version',
-    name_node_id: 'nameNodeComponent.host_components[0].HostRoles.host_name',
-    sname_node_id: 'snameNodeComponent.host_components[0].HostRoles.host_name',
-    data_nodes: 'data_nodes',
-    name_node_start_time: 'nameNodeComponent.ServiceComponentInfo.StartTime',
-    jvm_memory_heap_used: 'nameNodeComponent.host_components[0].metrics.jvm.memHeapUsedM',
-    jvm_memory_heap_committed: 'nameNodeComponent.host_components[0].metrics.jvm.memHeapCommittedM',
-    live_data_nodes: 'live_data_nodes',
-    dead_data_nodes: 'dead_data_nodes',
-    decommission_data_nodes: 'decommission_data_nodes',
-    capacity_used: 'nameNodeComponent.ServiceComponentInfo.CapacityUsed',
-    capacity_total: 'nameNodeComponent.ServiceComponentInfo.CapacityTotal',
-    capacity_remaining: 'nameNodeComponent.ServiceComponentInfo.CapacityRemaining',
-    dfs_total_blocks: 'nameNodeComponent.ServiceComponentInfo.BlocksTotal',
-    dfs_corrupt_blocks: 'nameNodeComponent.ServiceComponentInfo.CorruptBlocks',
-    dfs_missing_blocks: 'nameNodeComponent.ServiceComponentInfo.MissingBlocks',
-    dfs_under_replicated_blocks: 'nameNodeComponent.ServiceComponentInfo.UnderReplicatedBlocks',
-    dfs_total_files: 'nameNodeComponent.ServiceComponentInfo.TotalFiles',
-    upgrade_status: 'nameNodeComponent.ServiceComponentInfo.UpgradeFinalized',
-    safe_mode_status: 'nameNodeComponent.ServiceComponentInfo.Safemode'
-  },
-  mapReduceConfig: {
-    version: 'jobTrackerComponent.ServiceComponentInfo.Version',
-    job_tracker_id: 'jobTrackerComponent.host_components[0].HostRoles.host_name',
-    task_trackers: 'task_trackers',
-    job_tracker_start_time: 'jobTrackerComponent.ServiceComponentInfo.StartTime',
-    job_tracker_heap_used: 'jobTrackerComponent.ServiceComponentInfo.HeapMemoryUsed',
-    job_tracker_heap_max: 'jobTrackerComponent.ServiceComponentInfo.HeapMemoryMax',
-    alive_trackers: 'alive_trackers',
-    black_list_trackers: 'black_list_trackers',
-    gray_list_trackers: 'gray_list_trackers',
-    map_slots: 'map_slots',
-    reduce_slots: 'reduce_slots',
-    jobs_submitted: 'jobTrackerComponent.ServiceComponentInfo.jobtracker.jobs_submitted',
-    jobs_completed: 'jobTrackerComponent.ServiceComponentInfo.jobtracker.jobs_completed',
-    map_slots_occupied: 'jobTrackerComponent.ServiceComponentInfo.jobtracker.occupied_map_slots',
-    map_slots_reserved: 'jobTrackerComponent.ServiceComponentInfo.jobtracker.reserved_map_slots',
-    reduce_slots_occupied: 'jobTrackerComponent.ServiceComponentInfo.jobtracker.occupied_reduce_slots',
-    reduce_slots_reserved: 'jobTrackerComponent.ServiceComponentInfo.jobtracker.reserved_reduce_slots',
-    maps_running: 'jobTrackerComponent.ServiceComponentInfo.jobtracker.running_maps',
-    maps_waiting: 'jobTrackerComponent.ServiceComponentInfo.jobtracker.waiting_maps',
-    reduces_running: 'jobTrackerComponent.ServiceComponentInfo.jobtracker.running_reduces',
-    reduces_waiting: 'jobTrackerComponent.ServiceComponentInfo.jobtracker.waiting_reduces',
-    trackers_decommissioned: 'jobTrackerComponent.host_components[0].metrics.mapred.jobtracker.trackers_decommissioned'
-  },
-  hbaseConfig: {
-    version: 'masterComponent.ServiceComponentInfo.Version',
-    master_id: 'masterComponent.host_components[0].HostRoles.host_name',
-    region_servers: 'region_servers',
-    master_start_time: 'masterComponent.ServiceComponentInfo.MasterStartTime',
-    master_active_time: 'masterComponent.ServiceComponentInfo.MasterActiveTime',
-    average_load: 'masterComponent.ServiceComponentInfo.AverageLoad',
-    regions_in_transition: 'regions_in_transition',
-    revision: 'masterComponent.ServiceComponentInfo.Revision',
-    heap_memory_used: 'masterComponent.ServiceComponentInfo.HeapMemoryUsed',
-    heap_memory_max: 'masterComponent.ServiceComponentInfo.HeapMemoryMax'
-  },
-
-  model3: App.HostComponent,
-  config3: {
-    id: 'id',
-    work_status: 'HostRoles.state',
-    desired_status: 'HostRoles.desired_state',
-    component_name: 'HostRoles.component_name',
-    host_id: 'HostRoles.host_name',
-    $service_id: 'none' /* will be set outside of parse function */
-  },
-
-  map: function (json) {
-    if (!this.get('model')) {
-      return;
-    }
-
-    var start = new Date().getTime();
-    console.log('in service mapper');
-
-    if (json.items) {
-      var result = [];
-      json.items.forEach(function (item) {
-        var finalConfig = jQuery.extend({}, this.config);
-        var finalJson = [];
-        item.host_components = [];
-        item.components.forEach(function (component) {
-          component.host_components.forEach(function (host_component) {
-            host_component.id = host_component.HostRoles.component_name + "_" + host_component.HostRoles.host_name;
-            item.host_components.push(host_component.id);
-          }, this);
-        }, this);
-        item.host_components.sort();
-
-        if (item && item.ServiceInfo && item.ServiceInfo.service_name == "HDFS") {
-          // Change the JSON so that it is easy to map
-          finalJson = this.hdfsMapper(item);
-          result.push(finalJson);
-          App.store.load(App.HDFSService, finalJson);
-        } else if (item && item.ServiceInfo && item.ServiceInfo.service_name == "MAPREDUCE") {
-          finalJson = this.mapreduceMapper(item);
-          result.push(finalJson);
-          App.store.load(App.MapReduceService, finalJson);
-        } else if (item && item.ServiceInfo && item.ServiceInfo.service_name == "HBASE") {
-          finalJson = this.hbaseMapper(item);
-          result.push(finalJson);
-          App.store.load(App.HBaseService, finalJson);
-        } else {
-          result.push(this.parseIt(item, this.config));
-        }
-      }, this);
-
-
-      result = this.sortByOrder(this.get('servicesSortOrder'), result);
-      App.store.loadMany(this.get('model'), result);
-
-      // Host components
-      result = [];
-      json.items.forEach(function(item){
-        item.components.forEach(function(component){
-          var service = component.ServiceComponentInfo.service_name;
-          component.host_components.forEach(function(host_component){
-            var comp = this.parseIt(host_component, this.config3);
-            comp.service_id = service;
-            result.push(comp);
-          }, this)
-        }, this)
-      }, this);
-
-      result.forEach(function(hcJson){
-        this.calculateState(hcJson);
-      }, this);
-
-      App.store.loadMany(this.get('model3'), result);
-    }
-
-    console.log('out service mapper.  Took ' + (new Date().getTime() - start) + 'ms');
-  },
-
-  hdfsMapper: function (item) {
-    var result = [];
-    var finalConfig = jQuery.extend({}, this.config);
-    // Change the JSON so that it is easy to map
-    var hdfsConfig = this.hdfsConfig;
-    item.components.forEach(function (component) {
-      if (component.ServiceComponentInfo && component.ServiceComponentInfo.component_name == "NAMENODE") {
-        item.nameNodeComponent = component;
-        finalConfig = jQuery.extend(finalConfig, hdfsConfig);
-        // Get the live, dead & decommission nodes from string json
-        var liveNodesJson = App.parseJSON(component.ServiceComponentInfo.LiveNodes);
-        var deadNodesJson = App.parseJSON(component.ServiceComponentInfo.DeadNodes);
-        var decommissionNodesJson = App.parseJSON(component.ServiceComponentInfo.DecomNodes);
-        item.live_data_nodes = [];
-        item.dead_data_nodes = [];
-        item.decommission_data_nodes = [];
-        for (var ln in liveNodesJson) {
-          item.live_data_nodes.push(ln);
-        }
-        for (var dn in deadNodesJson) {
-          item.dead_data_nodes.push(dn);
-        }
-        for (var dcn in decommissionNodesJson) {
-          item.decommission_data_nodes.push(dcn);
-        }
-      }
-      if (component.ServiceComponentInfo && component.ServiceComponentInfo.component_name == "SECONDARY_NAMENODE") {
-        item.snameNodeComponent = component;
-      }
-      if (component.ServiceComponentInfo && component.ServiceComponentInfo.component_name == "DATANODE") {
-        if (!item.data_nodes) {
-          item.data_nodes = [];
-        }
-        if (component.host_components) {
-          component.host_components.forEach(function (hc) {
-            item.data_nodes.push(hc.HostRoles.host_name);
-          });
-        }
-      }
-    });
-    // Map
-    var finalJson = this.parseIt(item, finalConfig);
-    finalJson.quick_links = [1, 2, 3, 4];
-
-    return finalJson;
-  },
-  mapreduceMapper: function (item) {
-    // Change the JSON so that it is easy to map
-    var result = [];
-    var finalConfig = jQuery.extend({}, this.config);
-    var mapReduceConfig = this.mapReduceConfig;
-    item.components.forEach(function (component) {
-      if (component.ServiceComponentInfo && component.ServiceComponentInfo.component_name == "JOBTRACKER") {
-        item.jobTrackerComponent = component;
-        finalConfig = jQuery.extend(finalConfig, mapReduceConfig);
-        // Get the live, gray & black nodes from string json
-        item.map_slots = 0;
-        item.reduce_slots = 0;
-        var liveNodesJson = App.parseJSON(component.ServiceComponentInfo.AliveNodes);
-        var grayNodesJson = App.parseJSON(component.ServiceComponentInfo.GrayListedNodes);
-        var blackNodesJson = App.parseJSON(component.ServiceComponentInfo.BlackListedNodes);
-        item.alive_trackers = [];
-        item.gray_list_trackers = [];
-        item.black_list_trackers = [];
-        if (liveNodesJson != null) {
-          liveNodesJson.forEach(function (nj) {
-            item.alive_trackers.push(nj.hostname);
-            if (nj.slots && nj.slots.map_slots)
-              item.map_slots += nj.slots.map_slots;
-            if (nj.slots && nj.slots.map_slots_used)
-              item.map_slots_used += nj.slots.map_slots_used;
-            if (nj.slots && nj.slots.reduce_slots)
-              item.reduce_slots += nj.slots.reduce_slots;
-            if (nj.slots && nj.slots.reduce_slots_used)
-              item.reduce_slots_used += nj.slots.reduce_slots_used;
-          });
-        }
-        if (grayNodesJson != null) {
-          grayNodesJson.forEach(function (nj) {
-            item.gray_list_trackers.push(nj.hostname);
-          });
-        }
-        if (blackNodesJson != null) {
-          blackNodesJson.forEach(function (nj) {
-            item.black_list_trackers.push(nj.hostname);
-          });
-        }
-      }
-      if (component.ServiceComponentInfo && component.ServiceComponentInfo.component_name == "TASKTRACKER") {
-        if (!item.task_trackers) {
-          item.task_trackers = [];
-        }
-        if (component.host_components) {
-          component.host_components.forEach(function (hc) {
-            item.task_trackers.push(hc.HostRoles.host_name);
-          });
-        }
-      }
-    });
-    // Map
-    finalJson = this.parseIt(item, finalConfig);
-    finalJson.quick_links = [5, 6, 7, 8, 9, 10, 11, 12];
-    return finalJson;
-  },
-  hbaseMapper: function (item) {
-    // Change the JSON so that it is easy to map
-    var result = [];
-    var finalConfig = jQuery.extend({}, this.config);
-    var hbaseConfig = this.hbaseConfig;
-    item.components.forEach(function (component) {
-      if (component.ServiceComponentInfo && component.ServiceComponentInfo.component_name == "HBASE_MASTER") {
-        item.masterComponent = component;
-        finalConfig = jQuery.extend(finalConfig, hbaseConfig);
-        var regionsArray = App.parseJSON(component.ServiceComponentInfo.RegionsInTransition);
-        item.regions_in_transition = regionsArray == null ? 0 : regionsArray.length;
-      }
-      if (component.ServiceComponentInfo && component.ServiceComponentInfo.component_name == "HBASE_REGIONSERVER") {
-        if (!item.region_servers) {
-          item.region_servers = [];
-        }
-        if (component.host_components) {
-          component.host_components.forEach(function (hc) {
-            item.region_servers.push(hc.HostRoles.host_name);
-          });
-        }
-      }
-    });
-    // Map
-    finalJson = this.parseIt(item, finalConfig);
-    finalJson.quick_links = [13, 14, 15, 16, 17, 18];
-    return finalJson;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/mappers/status_mapper.js b/branch-1.2/ambari-web/app/mappers/status_mapper.js
deleted file mode 100644
index 2eb0d63..0000000
--- a/branch-1.2/ambari-web/app/mappers/status_mapper.js
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-App.statusMapper = App.QuickDataMapper.create({
-
-  config:{
-    id:'ServiceInfo.service_name',
-    work_status:'ServiceInfo.state'
-  },
-
-  config3:{
-    id:'id',
-    work_status:'HostRoles.state',
-    desired_status: 'HostRoles.desired_state'
-  },
-
-  map:function (json) {
-    var start = new Date().getTime();
-    console.log('in status mapper');
-
-    if (json.items) {
-      var result = {};
-      json.items.forEach(function (item) {
-        item = this.parseIt(item, this.config);
-        result[item.id] = item;
-      }, this);
-
-      var services = App.Service.find();
-      services.forEach(function(service) {
-        var item = result[service.get('id')];
-        if (item) {
-          service.set('workStatus', item.work_status);
-        }
-      });
-
-      //host_components
-      result = {};
-      json.items.forEach(function (item) {
-        item.components.forEach(function (component) {
-          component.host_components.forEach(function (host_component) {
-            host_component.id = host_component.HostRoles.component_name + "_" + host_component.HostRoles.host_name;
-            result[host_component.id] = this.parseIt(host_component, this.config3);
-          }, this)
-        }, this)
-      }, this);
-
-      // console.profile("App.statusMapper.map() profile");
-
-      var hostComponents = App.HostComponent.find();
-
-      hostComponents.forEach(function(hostComponent) {
-        var item = result[hostComponent.get('id')];
-        if (item) {
-         hostComponent.set('workStatus', item.work_status);
-        }
-      });
-
-      // console.profileEnd();
-
-      console.log('out status mapper.  Took ' + (new Date().getTime() - start) + 'ms');
-    }
-  }
-});
diff --git a/branch-1.2/ambari-web/app/mappers/users_mapper.js b/branch-1.2/ambari-web/app/mappers/users_mapper.js
deleted file mode 100644
index 1a476a1..0000000
--- a/branch-1.2/ambari-web/app/mappers/users_mapper.js
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-App.usersMapper = App.QuickDataMapper.create({
-  model : App.User,
-  config : {
-    id : 'Users.user_name',
-    user_name : 'Users.user_name',
-    roles : 'Users.roles',
-    is_ldap: 'Users.ldap_user',
-    admin: 'Users.admin'
-  },
-  map: function (json) {
-    var self = this;
-    json.items.forEach(function (item) {
-      var result= [] ;
-      if(!App.User.find().someProperty("userName", item.Users.user_name))
-      {
-        if(item.Users.roles.indexOf("admin") >= 0){
-          item.Users.admin = true;
-        }else{
-          item.Users.admin = false;
-        }
-        result.push(self.parseIt(item, self.config));
-        App.store.loadMany(self.get('model'), result);
-      }
-    });
-  }
-});
diff --git a/branch-1.2/ambari-web/app/messages.js b/branch-1.2/ambari-web/app/messages.js
deleted file mode 100644
index 81184f2..0000000
--- a/branch-1.2/ambari-web/app/messages.js
+++ /dev/null
@@ -1,653 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-Em.I18n.translations = {
-
-  'app.name':'Ambari',
-  'app.reloadPopup.link': 'Reload Page',
-  'app.reloadPopup.text': 'Trying to connect to server...',
-  'app.reloadPopup.header': 'Reload Page',
-
-  'app.loadingPlaceholder': 'Loading...',
-  'app.sighout':'Sign out',
-
-  'apply':'apply',
-  'and':'and',
-  'none':'none',
-  'all':'all',
-  'minimum':'minimum',
-  'from':'From',
-  'to':'To',
-
-  'common.learnMore':'Learn more',
-  'common.back':'Back',
-  'common.prev':'Prev',
-  'common.next':'Next',
-  'common.host':'Host',
-  'common.group':'Group',
-  'common.progress':'Progress',
-  'common.status':'Status',
-  'common.action':'Action',
-  'common.remove':'Remove',
-  'common.retry':'Retry',
-  'common.show':'Show',
-  'common.hide':'Hide',
-  'common.cancel':'Cancel',
-  'common.apply':'Apply',
-  'common.service': 'Service',
-  'common.version':'Version',
-  'common.description':'Description',
-  'common.client':'Client',
-  'common.regionServer':'RegionServer',
-  'common.taskTracker':'TaskTracker',
-  'common.dataNode':'DataNode',
-  'common.print':'Print',
-  'common.deploy':'Deploy',
-  'common.message':'Message',
-  'common.tasks':'Tasks',
-  'common.open':'Open',
-  'common.copy':'Copy',
-  'common.complete':'Complete',
-  'common.metrics':'Metrics',
-  'common.timeRange':'Time Range',
-  'common.name':'Name',
-  'common.ipAddress':'IP Address',
-  'common.cpu':'CPU',
-  'common.ram':'RAM',
-  'common.disk':'Disk',
-  'common.diskUsage':'Disk Usage',
-  'common.loadAvg':'Load Avg',
-  'common.components':'Components',
-  'common.quickLinks':'Quick Links',
-  'common.save':'Save',
-  'common.servers':'Servers',
-  'common.clients':'Clients',
-  'common.user': 'User',
-  'common.os':'OS',
-  'common.memory':'Memory',
-  'common.maximum':'Maximum',
-  'common.start':'Start',
-  'common.stop':'Stop',
-  'common.decommission':'Decommission',
-  'common.recommission':'Recommission',
-
-  'popup.highlight':'click to highlight',
-
-  'login.header':'Sign in',
-  'login.username':'Username',
-  'login.password':'Password',
-  'login.loginButton':'Sign in',
-  'login.error':'Invalid username/password combination.',
-  
-  'graphs.noData.title': 'No Data',
-  'graphs.noData.message': 'There was no data available. Possible reasons include inaccessible Ganglia service.',
-  'graphs.noDataAtTime.message': 'No available data for the time period.',
-  'graphs.error.title': 'Error',
-  'graphs.error.message': 'There was a problem getting data for the chart ({0}: {1})',
-  'graphs.timeRange.hour': 'Last 1 hour',
-  'graphs.timeRange.twoHours': 'Last 2 hours',
-  'graphs.timeRange.fourHours': 'Last 4 hours',
-  'graphs.timeRange.twelveHours': 'Last 12 hours',
-  'graphs.timeRange.day': 'Last 24 hours',
-  'graphs.timeRange.week': 'Last 1 week',
-  'graphs.timeRange.month': 'Last 1 month',
-  'graphs.timeRange.year': 'Last 1 year',
-
-  'users.userName.validationFail': 'Value should contains only letters and digits and start with letter',
-
-  'services.nagios.description':'Nagios Monitoring and Alerting system',
-  'services.ganglia.description':'Ganglia Metrics Collection system',
-  'services.hdfs.description':'Apache Hadoop Distributed File System',
-  'services.mapreduce.description':'Apache Hadoop Distributed Processing Framework',
-  'services.sqoop.description':'Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases',
-  'services.pig.description':'Scripting platform for analyzing large datasets',
-  'services.hive.description':'Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service',
-  'services.oozie.description':'System for workflow coordination and execution of Apache Hadoop jobs',
-  'services.zookeeper.description':'ZooKeeper desc',
-  'services.hbase.description':'Non-relational distributed database and centralized service for configuration management & synchronization',
-  'services.hive.databaseComponent':'Database Server',
-
-  'services.alerts.head':'You have {0} critical alert notification(s).',
-  'services.alerts.OK.timePrefix': 'OK for ',
-  'services.alerts.WARN.timePrefix': 'WARN for ',
-  'services.alerts.CRIT.timePrefix': 'CRIT for ',
-  'services.alerts.headingOfList': 'Alerts and Health Checks',
-  'services.alerts.goToService': 'Go to Service',
-  'services.alerts.goToNagios': 'Go to Nagios Web UI',
-
-  'topnav.logo.href':'http://incubator.apache.org/ambari/',
-  'topnav.help.href':'https://cwiki.apache.org/confluence/display/AMBARI/Ambari',
-
-  'installer.header':'Cluster Install Wizard',
-  'installer.navigation.warning.header':'Navigation Warning',
-
-  'installer.mockData':'mockData',
-  'installer.pollData':'pollData',
-  'installer.noHostsAssigned':'No host assigned',
-  'installer.slaveComponentHosts.selectHosts':'select hosts for this group',
-  'installer.slaveComponentHostsPopup.header':'Select which hosts should belong to which {0} group',
-
-  'installer.controls.serviceConfigPopover.title':'{0}<br><small>{1}</small>',
-  'installer.controls.serviceConfigMultipleHosts.other':'1 other',
-  'installer.controls.serviceConfigMultipleHosts.others':'{0} others',
-  'installer.controls.serviceConfigMasterHosts.header':'{0} Hosts',
-  'installer.controls.addSlaveComponentGroupButton.title':'Add a {0} Group',
-  'installer.controls.addSlaveComponentGroupButton.content':'If you need different settings on certain {0}s, you can add a {1} group.<br>All {2}s within the same group will have the same set of settings.  You can create multiple groups.',
-  'installer.controls.slaveComponentChangeGroupName.error':'group with this name already exist',
-
-  'installer.step1.header':'Welcome',
-  'installer.step1.body.header':'Welcome to Apache Ambari!',
-  'installer.step1.body':'Ambari makes it easy to install, manage, and monitor Hadoop clusters.<br>' +
-    'We will walk you through the cluster installation process with this step-by-step wizard.',
-  'installer.step1.clusterName':'Name your cluster',
-  'installer.step1.clusterName.tooltip.title':'Cluster Name',
-  'installer.step1.clusterName.tooltip.content':'Enter a unique cluster name. Cluster name cannot be changed later.',
-  'installer.step1.clusterName.error.required':'Cluster Name is required',
-  'installer.step1.clusterName.error.whitespaces':'Cluster Name cannot contain white spaces',
-  'installer.step1.clusterName.error.specialChar':'Cluster Name cannot contain special characters',
-
-  'installer.step2.header':'Install Options',
-  'installer.step2.body':'Enter the list of hosts to be included in the cluster and provide your SSH key.',
-  'installer.step2.targetHosts':'Target Hosts',
-  'installer.step2.targetHosts.info':'Enter a list of host names, one per line',
-  'installer.step2.hostPattern.tooltip.title':'Pattern Expressions',
-  'installer.step2.hostPattern.tooltip.content':'You can use pattern expressions to specify a number of target hosts. For example, to specify host01.domain thru host10.domain, enter host[01-10].domain in the target hosts textarea.',
-  'installer.step2.hostName.error.required':'You must specify at least one host name',
-  'installer.step2.hostName.error.already_installed':'All these hosts are already part of the cluster',
-  'installer.step2.hostName.error.notRequired':'Host Names will be ignored if not using SSH to automatically configure hosts',
-  'installer.step2.hostName.error.invalid':'Invalid Host Name(s)',
-  'installer.step2.hostName.pattern.header':'Host name pattern expressions',
-  'installer.step2.sshKey':'Host Registration Information',
-  'installer.step2.sshKey.error.required':'SSH Private Key is required',
-  'installer.step2.passphrase.error.match':'Passphrases do not match',
-  'installer.step2.manualInstall.label':'Do not use SSH to automatically configure hosts ',
-  'installer.step2.manualInstall.info':'By not using SSH to connect to the target hosts, you must manually install the ' +
-    'Ambari Agent on each host in order for the wizard to perform the necessary configurations and software installs.',
-  'installer.step2.advancedOption':'Advanced Options',
-  'installer.step2.repoConf':'Software Repository Configuration File Path',
-  'installer.step2.advancedOptions.header':'Advanced Options',
-  'installer.step2.localRepo.label_use':'Use a',
-  'installer.step2.localRepo.label_instead':'instead of downloading software packages from the Internet',
-  'installer.step2.localRepo.error.required':'Local repository file path is required',
-  'installer.step2.localRepo.tooltip.title':'Local Software Repository',
-  'installer.step2.localRepo.tooltip.content': 'The cluster install requires access to the Internet to fetch software ' +
-    'from a remote repository. In some cases, adequate bandwidth is not available and you want to prevent downloading ' +
-    'packages from the remote repository over and over again. Other times, Internet access is not available from the ' +
-    'hosts in your cluster. In these situations, you must set up a version of the repository that your machines can ' +
-    'access locally and this is called a <b>Local Software Repository</b>',
-  'installer.step2.javaHome.label' : 'Path to 64-bit JDK',
-  'installer.step2.javaHome.tooltip.title' : 'JAVA_HOME',
-  'installer.step2.javaHome.tooltip.content' : 'Path to 64-bit JAVA_HOME. /usr/jdk/jdk1.6.0_31 is the default used by Ambari. You can override this to a specific path that contains the JDK. <br/> Note: the path must be valid on <b>ALL</b> hosts in your cluster.',
-  'installer.step2.useSsh.provide' : 'Provide your',
-  'installer.step2.useSsh.provide_id_rsa' : '(id_rsa for root) and use SSH to automatically register hosts',
-  'installer.step2.useSsh.tooltip.title':'SSH Private Key',
-  'installer.step2.useSsh.tooltip.content':'The <b>SSH Private Key File</b> is used for the root access to the target hosts in your cluster.',
-  'installer.step2.manualInstall.perform':'Perform',
-  'installer.step2.manualInstall.perform_on_hosts':'on hosts and do not use SSH',
-  'installer.step2.manualInstall.tooltip.title':'manual registration',
-  'installer.step2.manualInstall.tooltip.content':'Manually registering the Ambari Agent on each host eliminates the need for SSH and should be performed prior to continuing cluster installation.',
-  'installer.step2.manualInstall.popup.header':'Before You Proceed',
-  'installer.step2.manualInstall.popup.body':'You must install Ambari Agents on each host you want to manage before you proceed.',
-  'installer.step2.orUse':'Or use',
-  'installer.step2.registerAndConfirm':'Register and Confirm',
-
-  'installer.step3.header':'Confirm Hosts',
-  'installer.step3.body':'Registering your hosts.<br>' +
-    'Please confirm the host list and remove any hosts that you do not want to include in the cluster.',
-  'installer.step3.hostLog.popup.header':'Registration log for {0}',
-  'installer.step3.hosts.remove.popup.header':'Remove Hosts',
-  'installer.step3.hosts.remove.popup.body':'Are you sure you want to remove the selected host(s)?',
-  'installer.step3.hosts.retry.popup.header':'Retry Host Discovery',
-  'installer.step3.hosts.retry.popup.body':'Are you sure you want to retry discovery of the selected host(s)?',
-  'installer.step3.hostInformation.popup.header':'Error in retrieving host Information',
-  'installer.step3.hostInformation.popup.body' : 'All bootstrapped hosts registered but unable to retrieve cpu and memory related information',
-  'installer.step3.hostWarningsPopup.details':'Show Details',
-  'installer.step3.hostWarningsPopup.directoriesAndFiles':'DIRECTORIES & FILES',
-  'installer.step3.hostWarningsPopup.packages':'PACKAGES',
-  'installer.step3.hostWarningsPopup.processes':'PROCESSES',
-  'installer.step3.hostWarningsPopup.noProcesses':'No processes to display',
-  'installer.step3.hosts.noHosts':'No hosts to display',
-  'installer.step3.warnings.popup.header':'Host Checks',
-  'installer.step3.warnings.description':'Some warnings were encountered while performing checks against the above hosts.',
-  'installer.step3.warnings.linkText':'Click here to see the warnings.',
-  'installer.step3.noWarnings.linkText':'Click here to see the check results.',
-  'installer.step3.warnings.noWarnings':'All host checks were successful.',
-  'installer.step3.warnings.updateChecks.success':'Host Checks successfully updated',
-  'installer.step3.warnings.updateChecks.failed':'Host Checks update failed',
-  'installer.step3.removeSelected':'Remove Selected',
-  'installer.step3.retryFailed':'Retry Failed',
-
-  'installer.step4.header':'Choose Services',
-  'installer.step4.body':'Choose which services you want to install on your cluster.',
-  'installer.step4.mapreduceCheck.popup.header':'MapReduce Needed',
-  'installer.step4.mapreduceCheck.popup.body':'You did not select MapReduce, but it is needed by other services you selected.  We will automatically add MapReduce.  Is this OK?',
-  'installer.step4.monitoringCheck.popup.header':'Limited Functionality Warning',
-  'installer.step4.monitoringCheck.popup.body':'You did not select Nagios and/or Ganglia.  If both are not selected, monitoring and alerts will not function properly.  Is this OK?',
-
-  'installer.step5.header':'Assign Masters',
-  'installer.step5.attention':' hosts not running master services',
-  'installer.step5.body':'Assign master components to hosts you want to run them on.',
-  'installer.step5.body.hive':'<i class="icon-asterisks">&#10037</i> HiveServer2, Hive Metastore, and WebHCat Server will be co-hosted on the same server.',
-
-  'installer.step6.header':'Assign Slaves and Clients',
-  'installer.step6.body':'Assign slave and client components to hosts you want to run them on.<br/>Hosts that are assigned master components are shown with <i class=icon-asterisks>&#10037</i>. <br/>&quot;Client&quot; will install ',
-  'installer.step6.error.mustSelectOne':'You must assign at least one host to each.',
-  'installer.step6.error.mustSelectOneForHost':'You must assign at least one slave/client component to each.',
-  'installer.step6.wizardStep6Host.title':'master components hosted on {0}',
-
-  'installer.step7.header':'Customize Services',
-  'installer.step7.body':'We have come up with recommended configurations for the services you selected. Customize them as you see fit.',
-  'installer.step7.attentionNeeded':'<b>Attention:</b> Some configurations need your attention before you can proceed.',
-
-  'installer.step8.header':'Review',
-  'installer.step8.body':'Please review the configuration before installation',
-  'installer.step8.deployPopup.message':'Preparing to Deploy: {0} of {1} tasks completed.',
-
-  'installer.step9.header':'Install, Start and Test',
-  'installer.step9.body':'Please wait while the selected services are installed and started.',
-  'installer.step9.status.success':'Successfully installed and started the services.',
-  'installer.step9.status.warning':'Installed and started the services with some warnings.',
-  'installer.step9.status.failed':'Failed to install/start the services.',
-  'installer.step9.host.status.success':'Success',
-  'installer.step9.host.status.warning':'Warnings encountered',
-  'installer.step9.host.status.failed':'Failures encountered',
-  'installer.step9.host.status.nothingToInstall':'Waiting (Nothing to install)',
-  'installer.step9.hostLog.popup.header':'Tasks. executed on ',
-  'installer.step9.hostLog.popup.categories.all':'All',
-  'installer.step9.hostLog.popup.categories.pending':'Queued / Pending',
-  'installer.step9.hostLog.popup.categories.in_progress':'In Progress',
-  'installer.step9.hostLog.popup.categories.failed':'Failed',
-  'installer.step9.hostLog.popup.categories.completed':'Success',
-  'installer.step9.hostLog.popup.categories.aborted':'Cancelled',
-  'installer.step9.hostLog.popup.categories.timedout':'Timed Out',
-  'installer.step9.hostLog.popup.noTasksToShow':'No tasks to show',
-  'installer.step9.overallProgress':'{0} % overall',
-
-  'installer.step10.header':'Summary',
-  'installer.step10.body':'Here is the summary of the install process.',
-  'installer.step10.nagiosRestartRequired':'<b>Important!</b> Restarting Nagios service is required for the alerts and notifications to work properly.  After clicking on the Complete button to dismiss this wizard, go to Services -> Nagios to restart the Nagios service.',
-
-  'form.create':'Create',
-  'form.save':'Save',
-  'form.cancel':'Cancel',
-  'form.password':'Password',
-  'form.passwordRetype':'Retype Password',
-  'form.saveSuccess':'Successfully saved.',
-  'form.saveError':'Sorry, errors occurred.',
-
-  'form.validator.invalidIp':'Please enter valid ip address',
-
-  'admin.advanced.title':'Advanced',
-  'admin.advanced.caution':'This section is for advanced user only.<br/>Proceed with caution.',
-  'admin.advanced.button.uninstallIncludingData':'Uninstall cluster including all data.',
-  'admin.advanced.button.uninstallKeepData':'Uninstall cluster but keep data.',
-
-  'admin.advanced.popup.header':'Uninstall Cluster',
-
-  /*'admin.advanced.popup.text':'Uninstall Cluster',*/
-
-  'admin.audit.grid.date':"Date/Time",
-  'admin.audit.grid.category':"Category",
-  'admin.audit.grid.operationName':"Operation",
-  'admin.audit.grid.performedBy':"Performed By",
-  'admin.audit.grid.service':"Category",
-
-  'admin.authentication.form.method.database':'Use Ambari Database to authenticate users',
-  'admin.authentication.form.method.ldap':'Use LDAP/Active Directory to authenticate',
-  'admin.authentication.form.primaryServer':'Primary Server',
-  'admin.authentication.form.secondaryServer':'Secondary Server',
-  'admin.authentication.form.useSsl':'Use SSL',
-  'admin.authentication.form.bind.anonymously':"Bind Anonymously",
-  'admin.authentication.form.bind.useCrenedtials':"Use Credentials To Bind",
-  'admin.authentication.form.bindUserDN':'Bind User DN',
-  'admin.authentication.form.searchBaseDN':'Search Base DN',
-  'admin.authentication.form.usernameAttribute':'Username Attribute',
-
-  'admin.authentication.form.userDN':'User DN',
-  'admin.authentication.form.password':'Password',
-  'admin.authentication.form.configurationTest':'Configuration Test',
-  'admin.authentication.form.testConfiguration':'Test Configuration',
-
-  'admin.authentication.form.test.success':'The configuration passes the test',
-  'admin.authentication.form.test.fail':'The configuration fails the test',
-
-  'admin.security.title':'Kerberos Security has not been enabled on this cluster.',
-  'admin.security.button.enable':'Enable Kerberos Security on this cluster',
-
-  'admin.users.ldapAuthentionUsed':'LDAP Authentication is being used to authenticate users',
-  'admin.users.delete.yourself.message':'You can\'t delete yourself',
-  'admin.users.delete.yourself.header':'Deleting warning',
-
-  'admin.users.delete.header':'Delete {0}',
-
-  'admin.users.addButton':'Add Local User',
-  'admin.users.editButton': 'Edit Local User',
-  'admin.users.delete':'delete',
-  'admin.users.edit':'edit',
-  'admin.users.privileges':'Admin',
-  'admin.users.type':'Type',
-  'admin.users.action':'Action',
-  'admin.users.password':'Password',
-  'admin.users.passwordRetype':'Retype Password',
-  'admin.users.username':'Username',
-  'admin.users.createSuccess': 'User successfully created.',
-  'admin.users.createError': 'Error occurred while user creating.',
-  'admin.users.createError.passwordValidation': 'Passwords are different',
-  'admin.users.createError.userNameExists': 'User with the same name is already exists',
-  'admin.users.editError.requiredField': 'This is required',
-
-  'admin.confirmUninstall':'Confirm Uninstall',
-
-  'question.sure':'Are you sure?',
-  'yes':'Yes',
-  'no':'No',
-
-  'services.service.start':'Start',
-  'services.service.stop':'Stop',
-  'services.service.confirmation.header':'Confirmation',
-  'services.service.confirmation.body':'Are you sure?',
-  'services.service.summary.version':'Version',
-  'services.service.summary.nameNode':'NameNode Web UI',
-  'services.service.summary.nameNodeUptime':'NameNode Uptime',
-  'services.service.summary.nameNodeHeap':'NameNode Heap',
-  'services.service.summary.pendingUpgradeStatus':'Upgrade Status',
-  'services.service.summary.pendingUpgradeStatus.pending':'Pending upgrade',
-  'services.service.summary.pendingUpgradeStatus.notPending':'No pending upgrade',
-  'services.service.summary.safeModeStatus':'Safe Mode Status',
-  'services.service.summary.safeModeStatus.inSafeMode':'In safe mode',
-  'services.service.summary.safeModeStatus.notInSafeMode':'Not in safe mode',
-  'services.service.summary.dataNodes':'DataNodes',
-  'services.service.summary.diskCapacity':'HDFS Disk Capacity',
-  'services.service.summary.blocksTotal':'Blocks (total)',
-  'services.service.summary.blockErrors':'Block Errors',
-  'services.service.summary.totalFiles':'Total Files + Dirs',
-  'services.service.summary.jobTracker':'JobTracker',
-  'services.service.summary.jobTrackerWebUI':'JobTracker Web UI',
-  'services.service.summary.jobTrackerUptime':'JobTracker Uptime',
-  'services.service.summary.trackersLiveTotal':'Trackers',
-  'services.service.summary.trackersBlacklistGraylist':'Trackers',
-  'services.service.summary.jobTrackerHeap':'JobTracker Heap',
-  'services.service.summary.totalSlotsCapacity':'Total Slots Capacity',
-  'services.service.summary.totalJobs':'Total Jobs',
-  'services.service.summary.currentSlotUtiliMaps':'Map Slots',
-  'services.service.summary.currentSlotUtiliReduces':'Reduce Slots',
-  'services.service.summary.tasksMaps':'Tasks: Maps',
-  'services.service.summary.tasksReduces':'Tasks: Reduces',
-  'services.service.summary.hbaseMaster':'HBase Master Web UI',
-  'services.service.summary.regionServerCount':'Region Server Count',
-  'services.service.summary.regionInTransition':'Region In Transition',
-  'services.service.summary.masterStarted':'Master Started',
-  'services.service.summary.masterActivated':'Master Activated',
-  'services.service.summary.averageLoad':'Average Load',
-  'services.service.summary.masterHeap':'Master Heap',
-  'services.service.summary.moreStats':'more stats here',
-  'services.service.summary.clientCount': '{0} Client Hosts',
-  'services.service.actions.run.rebalancer':'Run Rebalancer',
-  'services.service.actions.run.compaction':'Run Compaction',
-  'services.service.actions.run.smoke':'Run Smoke Test',
-  'services.service.actions.maintenance':'Maintenance',
-  'services.service.summary.unknown':'unknown',
-  'services.service.summary.notRunning':'Not Running',
-  'services.service.summary.notAvailable':'n/a',
-
-  'services.service.info.metrics.hbase.clusterRequests':'Cluster Requests',
-  'services.service.info.metrics.hbase.hlogSplitSize':'HLog Split Size',
-  'services.service.info.metrics.hbase.hlogSplitTime':'HLog Split Time',
-  'services.service.info.metrics.hbase.regionServerQueueSize':'RegionServer Queue Size',
-  'services.service.info.metrics.hbase.regionServerRegions':'RegionServer Regions',
-  'services.service.info.metrics.hbase.regionServerRequests':'Cumulative Requests',
-
-  'services.service.info.metrics.hdfs.blockStatus':'Block Status',
-  'services.service.info.metrics.hdfs.fileOperations':'File Operations',
-  'services.service.info.metrics.hdfs.gc':'Garbage Collection',
-  'services.service.info.metrics.hdfs.io':'HDFS I/O',
-  'services.service.info.metrics.hdfs.jvmHeap':'JVM Memory Status',
-  'services.service.info.metrics.hdfs.jvmThreads':'JVM Thread Status',
-  'services.service.info.metrics.hdfs.rpc':'RPC',
-  'services.service.info.metrics.hdfs.spaceUtilization':'Total Space Utilization',
-
-  'services.service.info.metrics.mapreduce.gc':'Garbage Collection',
-  'services.service.info.metrics.mapreduce.jobsStatus':'Jobs Status',
-  'services.service.info.metrics.mapreduce.jvmHeap':'JVM Memory Status',
-  'services.service.info.metrics.mapreduce.jvmThreads':'JVM Thread Status',
-  'services.service.info.metrics.mapreduce.mapSlots':'Map Slots Utilization',
-  'services.service.info.metrics.mapreduce.reduceSlots':'Reduce Slots Utilization',
-  'services.service.info.metrics.mapreduce.rpc':'RPC',
-  'services.service.info.metrics.mapreduce.tasksRunningWaiting':'Tasks (Running/Waiting)',
-
-  'services.service.info.menu.summary':'Summary',
-  'services.service.info.menu.configs':'Configs',
-  'services.service.info.summary.hostsRunningMonitor':'{0} hosts running monitor',
-  'services.service.info.summary.hostRunningMonitor':'1 host running monitor',
-  'services.service.info.summary.serversHostCount':'{0} more',
-  'services.service.info.summary.nagiosWebUI':'Nagios Web UI',
-  'services.service.info.summary.nagios.noAlerts':'No alerts',
-  'services.service.info.summary.nagios.alerts':'Nagios service required for viewing alerts',
-
-  'services.add.header':'Add Service Wizard',
-  'services.service.add':'Add Service',
-
-
-  'hosts.host.add':'Add New Hosts',
-  'hosts.host.back':'Back to Hosts',
-  'hosts.table.noHosts':'No hosts to display',
-  'hosts.table.Search': 'Search:',
-
-  'hosts.host.metrics.cpu':'CPU Usage',
-  'hosts.host.metrics.disk':'Disk Usage',
-  'hosts.host.metrics.load':'Load',
-  'hosts.host.metrics.memory':'Memory Usage',
-  'hosts.host.metrics.network':'Network Usage',
-  'hosts.host.metrics.processes':'Processes',
-
-  'hosts.host.summary.header':'Summary',
-  'hosts.host.summary.hostname':'Hostname',
-  'hosts.host.summary.agentHeartbeat':'Agent <br/> Heartbeat',
-  'hosts.host.summary.hostMetrics':'Host Metrics',
-
-  'hosts.host.details.deleteHost':'Delete Host',
-
-  'host.host.componentFilter.master':'Master Components',
-  'host.host.componentFilter.slave':'Slave Components',
-  'host.host.componentFilter.client':'Client Components',
-
-  'hosts.host.start.popup.header':'Confirmation',
-  'hosts.host.stop.popup.header':'Confirmation',
-  'hosts.host.start.popup.body':'Are you sure?',
-  'hosts.host.stop.popup.body':'Are you sure?',
-  'hosts.host.alert.noAlerts':'No alerts',
-  'hosts.host.alert.noAlerts.message':'There are no alerts for this host.',
-  'hosts.host.healthStatus.heartBeatNotReceived':'The server has not received a heartbeat from this host for more than 3 minutes.',
-  'hosts.host.healthStatus.mastersDown':"The following master components are down:\n",
-  'hosts.host.healthStatus.slavesDown':"The following slave components are down:\n",
-  'hosts.decommission.popup.body':'Are you sure?',
-  'hosts.decommission.popup.header':'Confirmation',
-  'hosts.delete.popup.body':'Are you sure?',
-  'hosts.delete.popup.header':'Confirmation',
-  'hosts.cant.do.popup.header':'Operation not allowed',
-  'hosts.cant.do.popup.masterList.body':'You cannot delete this host because it is hosting following master services: {0}.',
-  'hosts.cant.do.popup.workingList.body':'You cannot delete this host because following slave services are not fully stopped or decommissioned: {0}.',
-  'hosts.add.header':'Add Host Wizard',
-  'hosts.assignRack':'Assign Rack',
-
-  'charts.horizon.chart.showText':'show',
-  'charts.horizon.chart.hideText':'hide',
-  'charts.horizon.chart.attributes.cpu':'CPU',
-  'charts.horizon.chart.attributes.memory':'Memory',
-  'charts.horizon.chart.attributes.network':'Network',
-  'charts.horizon.chart.attributes.io':'I/O',
-
-  'charts.heatmap.selectMetric':'Select Metric...',
-
-  'charts.heatmap.category.host':'Host',
-  'charts.heatmap.item.host.memory':'Memory Used',
-  'charts.heatmap.item.host.disk':'Disk Space Used',
-  'charts.heatmap.item.host.process':'Total Running Processes',
-  'charts.heatmap.category.hdfs':'HDFS',
-  'charts.heatmap.category.mapreduce': 'MapReduce',
-  'charts.heatmap.unknown': 'Unknown',
-
-  'metric.notFound':'no items found',
-  'metric.default':'combined',
-  'metric.cpu':'cpu',
-  'metric.memory':'disk used',
-  'metric.network':'network',
-  'metric.io':'io',
-  'metric.more':'more',
-  'metric.more.cpu':'CPU',
-  'metric.more.disk':'Disk',
-  'metric.more.load':'Load',
-  'metric.more.memory':'Memory',
-  'metric.more.network':'Network',
-  'metric.more.process':'Process',
-
-  'dashboard.clusterMetrics':'Cluster Metrics',
-
-  'dashboard.clusterMetrics.cpu':'CPU Usage',
-  'dashboard.clusterMetrics.load':'Cluster Load',
-  'dashboard.clusterMetrics.memory':'Memory Usage',
-  'dashboard.clusterMetrics.network':'Network Usage',
-
-
-  'dashboard.services':'Services',
-  'dashboard.services.hosts':'Hosts',
-  'dashboard.services.uptime':'{0}',
-  'dashboard.services.hdfs.summary':'{0} of {1} nodes live, {2}% capacity used',
-  'dashboard.services.hdfs.nanmenode':'NameNode',
-  'dashboard.services.hdfs.snanmenode':'Secondary NameNode',
-  'dashboard.services.hdfs.capacity':'HDFS Disk Capacity',
-  'dashboard.services.hdfs.capacityUsed':'{0} / {1} ({2}% used)',
-  'dashboard.services.hdfs.totalFilesAndDirs':'Total Files + Directories',
-  'dashboard.services.hdfs.datanodes':'DataNodes',
-  'dashboard.services.hdfs.datanodecounts':'DataNodes Status',
-  'dashboard.services.hdfs.version':'Version',
-  'dashboard.services.hdfs.nameNodeWebUI':'NameNode Web UI',
-  'dashboard.services.hdfs.nodes.live':'live',
-  'dashboard.services.hdfs.nodes.dead':'dead',
-  'dashboard.services.hdfs.nodes.decom':'decommissioning',
-  'dashboard.services.hdfs.nodes.uptime':'NameNode Uptime',
-  'dashboard.services.hdfs.nodes.heap':'NameNode Heap',
-  'dashboard.services.hdfs.nodes.heapUsed':'{0} / {1} ({2}% used)',
-  'dashboard.services.hdfs.chart.label':'Capacity (Used/Total)',
-  'dashboard.services.hdfs.blockErrors':'{0} corrupt / {1} missing / {2} under replicated',
-
-  'dashboard.services.mapreduce.summary':'{0} of {1} trackers live, {2} jobs running, {3} jobs waiting',
-  'dashboard.services.mapreduce.taskTrackers':'TaskTrackers',
-  'dashboard.services.mapreduce.taskTrackerCounts':'TaskTrackers Status',
-  'dashboard.services.mapreduce.trackers':'Trackers',
-  'dashboard.services.mapreduce.nodes.blacklist':'blacklist',
-  'dashboard.services.mapreduce.nodes.graylist':'graylist',
-  'dashboard.services.mapreduce.slotCapacity':'Total Slots Capacity',
-  'dashboard.services.mapreduce.trackersSummary':'{0} live / {1} total',
-  'dashboard.services.mapreduce.jobs':'Total Jobs',
-  'dashboard.services.mapreduce.jobsSummary':'{0} submitted / {1} completed',
-  'dashboard.services.mapreduce.mapSlots':'Map Slots',
-  'dashboard.services.mapreduce.mapSlotsSummary':'{0} occupied / {1} reserved',
-  'dashboard.services.mapreduce.reduceSlots':'Reduce Slots',
-  'dashboard.services.mapreduce.tasks.maps':'Tasks: Maps',
-  'dashboard.services.mapreduce.tasks.reduces':'Tasks: Reduces',
-  'dashboard.services.mapreduce.reduceSlotsSummary':'{0} occupied / {1} reserved',
-  'dashboard.services.mapreduce.tasksSummary':'{0} running / {1} waiting',
-  'dashboard.services.mapreduce.slotCapacitySummary':'{0} maps / {1} reduces / {2} avg per node',
-  'dashboard.services.mapreduce.jobTrackerHeap':'JobTracker Heap',
-  'dashboard.services.mapreduce.jobTrackerHeapSummary':'{0} of {1} ({2}% used)',
-  'dashboard.services.mapreduce.jobTrackerUptime':'Job Trackers Uptime',
-  'dashboard.services.mapreduce.chart.label':'Jobs Running',
-
-  'dashboard.services.hbase.summary':'{0} region servers with {1} average load',
-  'dashboard.services.hbase.masterServer':'HBase Master',
-  'dashboard.services.hbase.masterServerHeap':'Master Heap',
-  'dashboard.services.hbase.masterServerHeap.summary':'{0} / {1} ({2}% used)',
-  'dashboard.services.hbase.masterServerUptime':'Master Server Uptime',
-  'dashboard.services.hbase.averageLoad':'Average Load',
-  'dashboard.services.hbase.averageLoadPerServer':'{0} regions per Region Server',
-  'dashboard.services.hbase.regionServers':'Region Servers',
-  'dashboard.services.hbase.regionServersSummary':'{0} live / {1} total',
-  'dashboard.services.hbase.chart.label':'Request Count',
-  'dashboard.services.hbase.version':'Version',
-  'dashboard.services.hbase.masterWebUI':'Master Web UI',
-  'dashboard.services.hbase.regions.transition':'Regions In Transition',
-  'dashboard.services.hbase.masterStarted':'Master Started',
-  'dashboard.services.hbase.masterActivated':'Master Activated',
-
-  'dashboard.services.hive.client': '{0} Hive Client',
-  'dashboard.services.hive.clients': '{0} Hive Clients',
-
-  'dashboard.services.oozie.client': '{0} Oozie Client',
-  'dashboard.services.oozie.clients': '{0} Oozie Clients',
-  'dashboard.services.oozie.webUi': 'Oozie Web UI',
-
-  'dashboard.services.ganglia.webUi': 'Ganglia Web UI',
-  'dashboard.services.ganglia.monitors': 'Ganglia Monitors',
-
-  'dashboard.services.zookeeper.prefix': '{0} of',
-  'dashboard.services.zookeeper.title': '{0} ZooKeepers',
-  'dashboard.services.zookeeper.postfix': 'running',
-
-  'dashboard.services.configs.popup.stopService.header':'Stop service',
-  'dashboard.services.configs.popup.stopService.body' : 'Service needs to be stopped for reconfiguration',
-  'dashboard.services.configs.popup.restartService.header' : 'Restart service',
-  'dashboard.services.configs.popup.restartService.body' : 'Service needs to be restarted for reconfiguration',
-  'timeRange.presets.1hour':'1h',
-  'timeRange.presets.12hour':'12h',
-  'timeRange.presets.1day':'1d',
-  'timeRange.presets.1week':'1wk',
-  'timeRange.presets.1month':'1mo',
-  'timeRange.presets.1year':'1yr',
-
-  'apps.item.dag.job': 'Job',
-  'apps.item.dag.jobId': 'Job Id',
-  'apps.item.dag.status': 'Status',
-  'apps.item.dag.maps': 'Maps',
-  'apps.item.dag.reduces': 'Reduces',
-  'apps.item.dag.input': 'Input',
-  'apps.item.dag.output': 'Output',
-  'apps.item.dag.duration': 'Duration',
-
-  'apps.avgTable.avg': 'Avg',
-  'apps.avgTable.min': 'Min',
-  'apps.avgTable.max': 'Max',
-  'apps.avgTable.jobs': 'Jobs',
-  'apps.avgTable.input': 'Input',
-  'apps.avgTable.output': 'Output',
-  'apps.avgTable.duration': 'Duration',
-  'apps.avgTable.oldest': 'Oldest',
-  'apps.avgTable.mostRecent': 'Most Recent',
-  'apps.filters.all': 'All',
-  'apps.filters.filtered': 'Filtered',
-  'apps.filters.clearFilters': 'Clear filters',
-  'apps.filters.paginationInfo': '{0} - {1} of {2}',
-  'apps.filters.customRunDate':'Run Date custom filter',
-  'apps.filters.nothingToShow': 'No jobs to display',
-  'apps.dagCharts.popup':'Job Charts',
-  'apps.dagCharts.popup.job': 'Job',
-  'apps.dagCharts.popup.dag':'DAG',
-  'apps.dagCharts.popup.tasks':'Timeline & Tasks',
-  'apps.dagCharts.popup.tasks.timeline':'Job Timeline',
-  'apps.dagCharts.popup.tasks.tasks':'Job Tasks',
-  'apps.isRunning.popup.title':'Is running',
-  'apps.isRunning.popup.content':'Job is running now',
-
-  'menu.item.dashboard':'Dashboard',
-  'menu.item.heatmaps':'Heatmaps',
-  'menu.item.services':'Services',
-  'menu.item.hosts':'Hosts',
-  'menu.item.jobs':'Jobs',
-  'menu.item.admin':'Admin'
-
-};
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/models.js b/branch-1.2/ambari-web/app/models.js
deleted file mode 100644
index a4e9987..0000000
--- a/branch-1.2/ambari-web/app/models.js
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-// load all models here
-
-require('models/form'); // should be the 1st
-require('models/authentication');
-require('models/cluster');
-require('models/cluster_states');
-require('models/hosts');
-require('models/quick_links');
-require('models/service');
-require('models/service_config');
-require('models/service_audit');
-require('models/service/hdfs');
-require('models/service/mapreduce');
-require('models/service/hbase');
-require('models/alert');
-require('models/user');
-require('models/pagination');
-require('models/host');
-require('models/rack');
-require('models/job');
-require('models/run');
-require('models/app');
-require('models/background_operation');
-require('models/host_component');
-
-require('classes/run_class');
-require('classes/job_class');
-require('classes/job_class');
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/models/alert.js b/branch-1.2/ambari-web/app/models/alert.js
deleted file mode 100644
index 451804b..0000000
--- a/branch-1.2/ambari-web/app/models/alert.js
+++ /dev/null
@@ -1,187 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.AlertStatus = {
-  negative: 'corrupt',
-  positive: 'ok'
-}
-
-/**
- * Defines structure for App.Alert class. Keys mentioned here are for JSON data
- * which comes back from NAGIOS server.
- */
-App.Alert = DS.Model.extend({
-  alertId: DS.attr('string'),
-  primaryKey: 'alertId',
-  title: DS.attr('string'),//service_description in ajax response
-  serviceType: DS.attr('string'),
-  date: DS.attr('date'),
-  status: DS.attr('string'),//current_state in ajax response
-  message: DS.attr('string'),//plugin_output in ajax response
-  hostName: DS.attr('string'),
-  currentAttempt: DS.attr('string'),
-  lastHardStateChange: DS.attr('number'),
-  lastHardState: DS.attr('number'),
-  lastTimeOk: DS.attr('number'),
-  lastTimeWarning: DS.attr('number'),
-  lastTimeUnknown: DS.attr('number'),
-  lastTimeCritical: DS.attr('number'),
-  isFlapping: DS.attr('number'),
-  lastCheck: DS.attr('number'),
-
-  /**
-   * Used to show correct icon in UI
-   */
-  isOk: function () {
-    return this.get('status') == "0";
-  }.property('status'),
-
-  /**
-   * Used to show correct icon in UI
-   */
-  isWarning: function () {
-    return this.get('status') == "1";
-  }.property('status'),
-
-  /**
-   * Used to show only required alerts at the service level
-   */
-  ignoredForServices: function() {
-    return ['TaskTracker process down', 'RegionServer process down', 'DataNode process down', 'DataNode storage full', 'ZooKeeper Server process down'].contains(this.get('title'));
-  }.property('title'),
-
-  /**
-   * Used to show only required alerts at the host level
-   */
-  ignoredForHosts: function() {
-    return this.get('title').indexOf('Percent') != -1;
-  }.property('title'),
-
-  /**
-   * Provides how long ago this alert happened.
-   * 
-   * @type {String}
-   */
-  timeSinceAlert: function () {
-    var d = this.get('date');
-    if (d) {
-      var prefix = this.t('services.alerts.OK.timePrefix');
-      switch (this.get('status')) {
-        case "1":
-          prefix = this.t('services.alerts.WARN.timePrefix');
-          break;
-        case "2":
-          prefix = this.t('services.alerts.CRIT.timePrefix');
-          break;
-      }
-      var prevSuffix = $.timeago.settings.strings.suffixAgo;
-      $.timeago.settings.strings.suffixAgo = '';
-      var since = prefix + $.timeago(this.makeTimeAtleastMinuteAgo(d));
-      $.timeago.settings.strings.suffixAgo = prevSuffix;
-      return since;
-    }
-    return "";
-  }.property('date', 'status'),
-  
-  makeTimeAtleastMinuteAgo: function(d){
-    var diff = new Date().getTime() - d.getTime();
-    if (diff < 60000) {
-      diff = 60000 - diff;
-      var newD = new Date(d.getTime() - diff );
-      //console.log("Making time more than 1 minute. New time=",newD,", Old time=",d);
-      return newD;
-    }
-    return d;
-  },
-
-  /**
-   * Provides more details about when this alert happened.
-   * 
-   * @type {String}
-   */
-  timeSinceAlertDetails: function () {
-    var details = "";
-    var date = this.get('date');
-    if (date) {
-      var dateString = date.toDateString();
-      dateString = dateString.substr(dateString.indexOf(" ") + 1);
-      dateString = "Occurred on " + dateString + ", " + date.toLocaleTimeString();
-      details += dateString;
-    }
-    var lastCheck = this.get('lastCheck');
-    if (lastCheck) {
-      lastCheck = new Date(lastCheck * 1000);
-      details = details + "<br>Last checked " + $.timeago(lastCheck);
-    }
-    return details;
-  }.property('lastCheck', 'date'),
-
-  /**
-   * Used to show appropriate service label in UI
-   */
-  serviceName: function () {
-    if (this.get('serviceType')) {
-      var type = this.get('serviceType').toLowerCase();
-      switch (type) {
-        case 'mapreduce':
-          return 'MapReduce';
-        case 'hdfs':
-          return 'HDFS';
-        case 'hbase':
-          return "HBase";
-        case 'zookeeper':
-          return "Zookeeper";
-        case 'oozie':
-          return "Oozie";
-        case 'hive':
-          return 'Hive';
-      }
-    }
-    return null;
-  }.property('serviceType'),
-
-  /**
-   * Used to provide appropriate service link in UI
-   */
-  serviceLink: function () {
-    if (this.get('serviceType')) {
-      var type = this.get('serviceType').toLowerCase();
-      switch (type) {
-        case 'mapreduce':
-          return '#/main/services/MAPREDUCE/summary';
-        case 'hdfs':
-          return '#/main/services/HDFS/summary';
-        case 'hbase':
-          return '#/main/services/HBASE/summary';
-        case 'zookeeper':
-          return '#/main/services/ZOOKEEPER/summary';
-        case 'oozie':
-          return '#/main/services/OOZIE/summary';
-        case 'hive':
-          return '#/main/services/HIVE/summary';
-      }
-    }
-    return null;
-  }.property('serviceType')
-
-});
-
-App.Alert.FIXTURES = [
-];
diff --git a/branch-1.2/ambari-web/app/models/app.js b/branch-1.2/ambari-web/app/models/app.js
deleted file mode 100644
index 2220cd7..0000000
--- a/branch-1.2/ambari-web/app/models/app.js
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.App = DS.Model.extend({
-  appName: DS.attr('string'),
-  type: DS.attr('string'),
-  numJobsTotal: DS.attr('number'),
-  userName: DS.attr('string'),
-  executionTime: DS.attr('string'),
-  runs: DS.hasMany('App.Run')
-});
-
-App.App.FIXTURES = [
-  {
-    id: 1,
-    app_name: 'pigs.sh',
-    type: 'Hive',
-    num_jobs_total: 5,
-    user_name: 'root',
-    execution_time: '1347629541543',
-    runs: [1, 2, 3]
-  },
-  {
-    id: 2,
-    app_name: 'pigsm.sh',
-    type: 'pig',
-    num_jobs_total: 3,
-    user_name: 'user1',
-    execution_time: '1347656741515',
-    runs: [6, 4, 5]
-  },
-  {
-    id: 3,
-    app_name: 'pigsmo.sh',
-    type: 'pig',
-    num_jobs_total: 4,
-    user_name: 'user3',
-    execution_time: '1347629587687',
-    runs: [7, 8, 9, 10, 11]
-  },
-  {
-    id: 4,
-    app_name: 'pigsmok.sh',
-    type: 'MapReduce',
-    num_jobs_total: 0,
-    user_name: 'root',
-    execution_time: '134762957834',
-    runs: []
-  }
-]
diff --git a/branch-1.2/ambari-web/app/models/authentication.js b/branch-1.2/ambari-web/app/models/authentication.js
deleted file mode 100644
index a56e151..0000000
--- a/branch-1.2/ambari-web/app/models/authentication.js
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-App.Authentication = DS.Model.extend({
-  method:DS.attr('boolean'), // use LDAP
-  primaryServer:DS.attr('string'),
-  secondaryServer:DS.attr('string'),
-  useSsl:DS.attr('boolean'),
-  bindMethod:DS.attr('boolean'), // use credentials
-  bindUser:DS.attr('string'),
-  password:DS.attr('string'),
-  passwordRetype:DS.attr('string'),
-  searchBaseDn:DS.attr('string'),
-  usernameAttribute:DS.attr('string')
-});
-
-App.Authentication.FIXTURES = [
-  {
-    id:1,
-    method:0,
-    primary_server:"",
-    secondary_server:"",
-    use_ssl:false,
-    bind_method:0,
-    bind_user:"",
-    password:"",
-    password_retype:"",
-    search_base_dn:"",
-    username_attribute:""
-  }
-]
-
-App.AuthenticationForm = App.Form.extend({
-  testResult:false,
-  fieldsOptions:[
-    { name:"method", displayName:"", isRequired:false, displayType:"select",
-      values:[
-        {value:0, label:Em.I18n.t("admin.authentication.form.method.database")},
-        {value:1, label:Em.I18n.t("admin.authentication.form.method.ldap")}
-      ]
-    },
-    { name:"primaryServer", displayName:Em.I18n.t("admin.authentication.form.primaryServer"), /*validator:'ipaddress',*/
-      isRequired:function () {
-        return this.get('form.field.method.value');
-      }.property('form.field.method.value')
-    },
-    { name:"secondaryServer", displayName:Em.I18n.t("admin.authentication.form.secondaryServer"), /*validator:'ipaddress',*/ isRequired:false},
-    { name:"useSsl", displayName:Em.I18n.t("admin.authentication.form.useSsl"), displayType:"checkbox", isRequired:false },
-    { name:"bindMethod", displayName:'', displayType:"select", isRequired:false,
-      values:[
-        {value:0, label:Em.I18n.t("admin.authentication.form.bind.anonymously")},
-        {value:1, label:Em.I18n.t("admin.authentication.form.bind.useCrenedtials")}
-      ]},
-    { name:"bindUser", displayName:Em.I18n.t('admin.authentication.form.bindUserDN'), isRequired:function () {
-      return this.get('form.field.bindMethod.value');
-    }.property('form.field.bindMethod.value')},
-    { name:"password", displayName:Em.I18n.t('form.password'), displayType:"password",
-      isRequired:function () {
-        return this.get('form.field.bindMethod.value');
-      }.property('form.field.bindMethod.value') },
-    { name:"passwordRetype", displayName:Em.I18n.t('form.passwordRetype'), displayType:"password",
-      validator: "passwordRetype",
-      isRequired:function () {
-        return this.get('form.field.bindMethod.value');
-      }.property('form.field.bindMethod.value')},
-    { name:"searchBaseDn", displayName:Em.I18n.t('admin.authentication.form.searchBaseDN'),
-      isRequired:function () {
-        return this.get('form.field.method.value');
-      }.property('form.field.method.value')
-    },
-    { name:"usernameAttribute", displayName:Em.I18n.t('admin.authentication.form.usernameAttribute'),
-      isRequired:function () {
-        return this.get('form.field.method.value');
-      }.property('form.field.method.value')
-    },
-
-    { name:"userDN", displayName:Em.I18n.t('admin.authentication.form.userDN') },
-    { name:"userPassword", displayName:Em.I18n.t('admin.authentication.form.password'), displayType:'password'}
-  ],
-  fields:[],
-  testConfiguration:function () {
-    console.warn('Configuration test is randomized');
-    this.set('testResult', parseInt(Math.random() * 2));
-    return true;
-  },
-  testConfigurationMessage:function () {
-    return this.get('testResult') ? Em.I18n.t('admin.authentication.form.test.success') : Em.I18n.t('admin.authentication.form.test.fail');
-  }.property('testResult'),
-  testConfigurationClass:function () {
-    return this.get('testResult') ? "text-success" : "text-error";
-  }.property('testConfigurationMessage')
-})
-;
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/models/background_operation.js b/branch-1.2/ambari-web/app/models/background_operation.js
deleted file mode 100644
index 67b52cf..0000000
--- a/branch-1.2/ambari-web/app/models/background_operation.js
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.BackgroundOperation = DS.Model.extend({
-  operationName:DS.attr('string'),
-  events: DS.hasMany('App.BackgroundOperationEvent'),
-  cluster:DS.belongsTo('App.Cluster'),
-  host:DS.belongsTo('App.Host'),
-  operationLog: DS.attr('string')
-});
-
-App.BackgroundOperation.FIXTURES = [
-  {
-    id:1,
-    operation_name:'Decommissioning host1',
-    operation_log:'Decommissioning log',
-    events:[1,2],
-    cluster_id:1,
-    host_id:1
-  },
-  {
-    id:2,
-    operation_name:'Starting DataNode on host4',
-    operation_log:'Starting DataNode log',
-    events:[3],
-    cluster_id:1,
-    host_id:1
-  }
-];
-
-App.BackgroundOperationEvent = DS.Model.extend({
-  eventName:DS.attr('string'),
-  operation:DS.belongsTo('App.BackgroundOperation'),
-  eventDate: DS.attr('string')
-});
-
-App.BackgroundOperationEvent.FIXTURES = [
-  {
-    id:1,
-    event_name:'Some intermediate operation',
-    operation_id:1,
-    event_date:'4 min ago'
-  },
-  {
-    id:2,
-    event_name:'Operation started',
-    operation_id:1,
-    event_date:'5 min ago'
-  },
-  {
-    id:3,
-    event_name:'Operation started',
-    operation_id:2,
-    event_date:'5 min ago'
-  }
-];
-
diff --git a/branch-1.2/ambari-web/app/models/cluster.js b/branch-1.2/ambari-web/app/models/cluster.js
deleted file mode 100644
index 5cf0f45..0000000
--- a/branch-1.2/ambari-web/app/models/cluster.js
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.Cluster = DS.Model.extend({
-  clusterName: DS.attr('string'),
-  stackName: DS.attr('string'),
-  version: DS.attr('string'),
-  hosts: DS.hasMany('App.Host'),
-  racks: DS.hasMany('App.Rack'),
-  maxHostsPerRack: DS.attr('number')
-});
-
-App.Cluster.FIXTURES = [/*
-  {
-    id: 1,
-    cluster_name: 'cluster1',
-    stack_name: 'HDP',
-    hosts: [1, 2, 3, 4],
-    racks: [1, 2, 3, 4, 5, 6],
-    max_hosts_per_rack: 10
-  }*/
-];
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/models/cluster_states.js b/branch-1.2/ambari-web/app/models/cluster_states.js
deleted file mode 100644
index a74a5e0..0000000
--- a/branch-1.2/ambari-web/app/models/cluster_states.js
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-var App = require('app');
-
-App.clusterStatus = Ember.Object.create({
-  clusterName: '',
-  validStates: ['CLUSTER_NOT_CREATED_1', 'CLUSTER_DEPLOY_PREP_2', 'CLUSTER_INSTALLING_3', 'SERVICE_STARTING_3', 'CLUSTER_INSTALLED_4',  'CLUSTER_STARTED_5', 'ADD_HOSTS_DEPLOY_PREP_2', 'ADD_HOSTS_INSTALLING_3', 'ADD_HOSTS_INSTALLED_4', 'ADD_HOSTS_COMPLETED_5'],
-  clusterState: 'CLUSTER_NOT_CREATED_1',
-  wizardControllerName: null,
-  localdb: null,
-  key: 'CLUSTER_CURRENT_STATUS',
-  /**
-   * get cluster data from server and update cluster status
-   */
-  updateFromServer: function(){
-    var url = App.apiPrefix + '/persist/' + this.get('key');
-    jQuery.ajax(
-      {
-        url: url,
-        context: this,
-        async: false,
-        success: function (response) {
-          if (response) {
-            var newValue = jQuery.parseJSON(response);
-            if (newValue.clusterState) {
-              this.set('clusterState', newValue.clusterState);
-            }
-            if (newValue.clusterName) {
-              this.set('clusterName', newValue.clusterName);
-            }
-            if (newValue.wizardControllerName) {
-              this.set('wizardControllerName', newValue.wizardControllerName);
-            }
-            if (newValue.localdb) {
-              this.set('localdb', newValue.localdb);
-            }
-          } else {
-            // default status already set
-          }
-        },
-        error: function (xhr) {
-          if (xhr.status == 404) {
-            // default status already set
-            console.log('Persist API did NOT find the key CLUSTER_CURRENT_STATUS');
-          }
-        },
-        statusCode: require('data/statusCodes')
-      }
-    );
-  },
-  /**
-   * update cluster status and post it on server
-   * @param newValue
-   * @return {*}
-   */
-  setClusterStatus: function(newValue){
-    if (newValue) {
-      //setter
-      if (newValue.clusterState) {
-        this.set('clusterState', newValue.clusterState);
-      }
-      if (newValue.clusterName) {
-        this.set('clusterName', newValue.clusterName);
-      }
-      if (newValue.wizardControllerName) {
-        this.set('wizardControllerName', newValue.wizardControllerName);
-      }
-      if (newValue.localdb) {
-        this.set('localdb', newValue.localdb);
-      }
-
-      var url = App.apiPrefix + '/persist/';
-      var keyValuePair = {};
-      var val = {
-        clusterName: this.get('clusterName'),
-        clusterState: this.get('clusterState'),
-        wizardControllerName: this.get('wizardControllerName'),
-        localdb: this.get('localdb')
-      };
-      keyValuePair[this.get('key')] = JSON.stringify(val);
-
-
-      jQuery.ajax({
-        async: false,
-        context: this,
-        type: "POST",
-        url: url,
-        data: JSON.stringify(keyValuePair),
-        beforeSend: function () {
-          console.log('BeforeSend: persistKeyValues', keyValuePair);
-        }
-      });
-      return newValue;
-    }
-  },
-  /**
-   * general info about cluster
-   */
-  value: function () {
-      return {
-        clusterName: this.get('clusterName'),
-        clusterState: this.get('clusterState'),
-        wizardControllerName: this.get('wizardControllerName'),
-        localdb: this.get('localdb')
-      };
-  }.property('clusterName', 'clusterState', 'localdb', 'wizardControllerName')
-
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/models/form.js b/branch-1.2/ambari-web/app/models/form.js
deleted file mode 100644
index 76f60ba..0000000
--- a/branch-1.2/ambari-web/app/models/form.js
+++ /dev/null
@@ -1,240 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-var App = require('app');
-var validator = require('utils/validator');
-
-// move this to models cause some errors
-App.Form = Em.View.extend({
-  /**
-   * generating fields from fieldsOptions
-   */
-  classNames:["form-horizontal"],
-  i18nprefix:'form.',
-  fields:[],
-  field:{},
-  messages:[],
-  object:false,
-  result:0, // save result var (-1 - error; 0 - init; 1 - success)
-  templateName:require('templates/common/form'),
-  tagName:'form',
-
-  init:function () {
-    var thisForm = this;
-    if (!this.fields.length) {
-      this.fieldsOptions.forEach(
-        function (options) {
-          var field = App.FormField.create(options);
-          field.set('form', thisForm);
-          thisForm.fields.push(field);
-          thisForm.set("field." + field.get('name'), field);
-        }
-      );
-    }
-    this._super();
-  },
-
-  getField:function (name) {
-    var field = false;
-    $.each(this.fields, function () {
-      if (this.get('name') == name) {
-        return field = this;
-      }
-    });
-    return field;
-  },
-
-  isValid:function () {
-    var isValid = true;
-    $.each(this.fields, function () {
-      this.validate();
-      if (!this.get('isValid')) {
-        isValid = false;
-        console.warn(this.get('name') + " IS INVALID : " + this.get('errorMessage'));
-      }
-    })
-
-    return isValid;
-  },
-
-  updateValues:function () {
-    var object = this.get('object');
-    if (object instanceof Em.Object) {
-      $.each(this.fields, function () {
-        this.set('value', (this.get('displayType') == 'password') ? '' : object.get(this.get('name')));
-      });
-    } else {
-      this.clearValues();
-    }
-
-  }.observes("object"),
-
-  clearValues:function () {
-    $.each(this.fields, function () {
-      this.set('value', '');
-    });
-  },
-
-  visibleFields:function () {
-    var fields = this.get('fields');
-    var visible = [];
-    fields.forEach(function (field) {
-      if (!field.get('isHiddenField')) {
-        visible.push(field);
-      }
-    });
-    return visible;
-  }.property('fields'),
-
-  resultText:function () {
-    var text = "";
-    switch (this.get('result')) {
-      case -1:
-        text = this.t("form.saveError");
-        break;
-      case 1:
-        text = this.t("form.saveSuccess");
-        break;
-    }
-
-    return text;
-  }.property('result')
-});
-
-App.FormField = Em.Object.extend({ // try to realize this as view
-  name:'',
-  displayName:'',
-//  defaultValue:'', NOT REALIZED YET
-  description:'',
-  disabled:false,
-  displayType:'string', // string, digits, number, directories, textarea, checkbox
-  disableRequiredOnPresent:false,
-  errorMessage:'',
-  form:false,
-  isRequired:true, // by default a config property is required
-  unit:'',
-  value:'',
-
-  observeValue:function () {
-
-    if (this.get('displayType') == 'hidden')
-      console.warn(" FORM FIELD VALUE: ", this.get('value'));
-
-  }.observes('value'),
-
-  isValid:function () {
-    return this.get('errorMessage') === '';
-  }.property('errorMessage'),
-
-  viewClass:function () {
-    var options = {};
-    var element = Em.TextField;
-    switch (this.get('displayType')) {
-      case 'checkbox':
-        element = Em.Checkbox;
-        options.checkedBinding = "value";
-        break;
-      case 'select':
-        element = Em.Select;
-        options.content = this.get('values');
-        options.valueBinding = "value";
-        options.optionValuePath = "content.value";
-        options.optionLabelPath = "content.label";
-        break;
-      case 'password':
-        options['type'] = 'password';
-        break;
-      case 'textarea':
-        element = Em.TextArea;
-        break;
-      case 'hidden':
-        options.type = "hidden";
-        break;
-    }
-
-    return element.extend(options);
-  }.property('displayType'),
-
-  validate:function () {
-    var digitsRegex = /^\d+$/;
-    var numberRegex = /^[-,+]?(?:\d+|\d{1,3}(?:,\d{3})+)?(?:\.\d+)?$/;
-    var value = this.get('value');
-    var isError = false;
-    this.set('errorMessage', '');
-
-    if (this.get('isRequired') && (typeof value === 'string' && value.trim().length === 0)) {
-      this.set('errorMessage', 'This is required');
-      isError = true;
-    }
-
-    if (typeof value === 'string' && value.trim().length === 0) { // this is not to validate empty field.
-      isError = true;
-    }
-
-    if (!isError) {
-      switch (this.get('validator')) {
-        case 'ipaddress':
-          if (!validator.isIpAddress(value) && !validator.isDomainName(value)) {
-            isError = true;
-            this.set('errorMessage', Em.I18n.t("form.validator.invalidIp"));
-          }
-          break;
-        case 'passwordRetype':
-          var form = this.get('form');
-          var passwordField = form.getField('password');
-          if (passwordField.get('isValid')
-            && (passwordField.get('value') != this.get('value'))
-            && passwordField.get('value') && this.get('value')
-            ) {
-            this.set('errorMessage', "Passwords are different");
-            isError = true;
-          }
-          break;
-        default:
-          break;
-      }
-
-      switch (this.get('displayType')) {
-        case 'digits':
-          if (!digitsRegex.test(value)) {
-            this.set('errorMessage', 'Must contain digits only');
-            isError = true;
-          }
-          break;
-        case 'number':
-          if (!numberRegex.test(value)) {
-            this.set('errorMessage', 'Must be a valid number');
-            isError = true;
-          }
-          break;
-        case 'directories':
-          break;
-        case 'custom':
-          break;
-        case 'password':
-          break;
-      }
-    }
-    if (!isError) {
-      this.set('errorMessage', '');
-    }
-  },
-
-  isHiddenField:function () {
-    return this.get('displayType') == 'hidden';
-  }.property('type')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/models/host.js b/branch-1.2/ambari-web/app/models/host.js
deleted file mode 100644
index 0c3f4cd..0000000
--- a/branch-1.2/ambari-web/app/models/host.js
+++ /dev/null
@@ -1,195 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-var misc = require('utils/misc');
-
-DS.attr.transforms.object = {
-  from: function(serialized) {
-    return Ember.none(serialized) ? null : Object(serialized);
-  },
-
-  to: function(deserialized) {
-    return Ember.none(deserialized) ? null : Object(deserialized);
-  }
-};
-
-App.Host = DS.Model.extend({
-  hostName: DS.attr('string'),
-  publicHostName: DS.attr('string'),
-  cluster: DS.belongsTo('App.Cluster'),
-  hostComponents: DS.hasMany('App.HostComponent'),
-  cpu: DS.attr('string'),
-  memory: DS.attr('string'),
-  diskTotal: DS.attr('number'),
-  diskFree: DS.attr('number'),
-  osArch: DS.attr('string'),
-  ip: DS.attr('string'),
-  rack: DS.attr('string'),
-  healthStatus: DS.attr('string'),
-  cpuUsage: DS.attr('number'),
-  memoryUsage: DS.attr('number'),
-  lastHeartBeatTime: DS.attr('number'),
-  osType: DS.attr("string"),
-  diskInfo: DS.attr('object'),
-  loadOne:DS.attr('number'),
-  loadFive:DS.attr('number'),
-  loadFifteen:DS.attr('number'),
-
-  criticalAlertsCount: function () {
-    return App.router.get('clusterController.alerts').filterProperty('hostName', this.get('hostName')).filterProperty('isOk', false).filterProperty('ignoredForHosts', false).length;
-  }.property('App.router.clusterController.alerts.length'),
-
-  publicHostNameFormatted: function() {
-    return this.get('publicHostName').substr(0, 20) + ' ...';
-  }.property('publicHostName'),
-  /**
-   * API return diskTotal and diskFree. Need to save their different
-   */
-  diskUsed: function(){
-    return this.get('diskTotal') - this.get('diskFree');
-  }.property('diskFree', 'diskTotal'),
-  /**
-   * Format diskUsed value to float with 2 digits (also convert to GB)
-   */
-  diskUsedFormatted: function() {
-    return Math.round(this.get('diskUsed') * Math.pow(10, 2)) / Math.pow(10, 2) + 'GB';
-  }.property('diskUsed'),
-  /**
-   * Format diskTotal value to float with 2 digits (also convert to GB)
-   */
-  diskTotalFormatted: function() {
-    return Math.round(this.get('diskTotal') * Math.pow(10, 2)) / Math.pow(10, 2) + 'GB';
-  }.property('diskTotal'),
-  /**
-   * Percent value of used disk space
-   */
-  diskUsage: function() {
-    return (this.get('diskUsed')) / this.get('diskTotal') * 100;
-  }.property('diskUsed', 'diskTotal'),
-  /**
-   * Format diskUsage to float with 2 digits
-   */
-  diskUsageFormatted: function() {
-    if (isNaN(this.get('diskUsage')) || this.get('diskUsage') < 0) {
-      return 'Data Unavailable';
-    }
-    var s = Math.round(this.get('diskUsage') * Math.pow(10, 2)) / Math.pow(10, 2);
-    if (isNaN(s)) {
-      s = 0;
-    }
-    return s + '%';
-  }.property('diskUsage'),
-
-  diskInfoBar: function() {
-    if (isNaN(this.get('diskUsage')) || this.get('diskUsage') < 0) {
-      return this.get('diskUsageFormatted');
-    }
-    return this.get('diskUsedFormatted') + '/' + this.get('diskTotalFormatted') + ' (' + this.get('diskUsageFormatted') + ' used)';
-  }.property('diskUsedFormatted', 'diskTotalFormatted'),
-  /**
-   * formatted bytes to appropriate value
-   */
-  memoryFormatted: function () {
-    return misc.formatBandwidth(this.get('memory') * 1000);
-  }.property('memory'),
-  /**
-   * Return true if host not heartbeating last 180 seconds
-   */
-  isNotHeartBeating : function(){
-    return ((new Date()).getTime() - this.get('lastHeartBeatTime')) > 180 * 1000;
-  }.property('lastHeartBeatTime'),
-
-  loadAvg: function() {
-    if (this.get('loadOne') != null) return this.get('loadOne').toFixed(2);
-    if (this.get('loadFive') != null) return this.get('loadFive').toFixed(2);
-    if (this.get('loadFifteen') != null) return this.get('loadFifteen').toFixed(2);
-  }.property('loadOne', 'loadFive', 'loadFifteen'),
-
-  // Instead of making healthStatus a computed property that listens on hostComponents.@each.workStatus,
-  // we are creating a separate observer _updateHealthStatus.  This is so that healthStatus is updated
-  // only once after the run loop.  This is because Ember invokes the computed property every time
-  // a property that it depends on changes.  For example, App.statusMapper's map function would invoke
-  // the computed property too many times and freezes the UI without this hack.
-  // See http://stackoverflow.com/questions/12467345/ember-js-collapsing-deferring-expensive-observers-or-computed-properties
-  healthClass: '',
-
-  _updateHealthClass: function(){
-    Ember.run.once(this, 'updateHealthClass');
-  }.observes('healthStatus', 'hostComponents.@each.workStatus'),
-
-  updateHealthClass: function(){
-    var healthStatus = this.get('healthStatus');
-    /**
-     * Do nothing until load
-     */
-    if (!this.get('isLoaded') || this.get('isSaving')) {
-    } else {
-      var status;
-      var masterComponents = this.get('hostComponents').filterProperty('isMaster');
-      var masterComponentsRunning = masterComponents.everyProperty('workStatus', App.HostComponentStatus.started);
-      var slaveComponents = this.get('hostComponents').filterProperty('isSlave');
-      var slaveComponentsRunning = slaveComponents.everyProperty('workStatus', App.HostComponentStatus.started);
-      if (this.get('isNotHeartBeating')) {
-        status = 'DEAD-YELLOW';
-      } else if (masterComponentsRunning && slaveComponentsRunning) {
-        status = 'LIVE';
-      } else if (masterComponents.length > 0 && !masterComponentsRunning) {
-        status = 'DEAD';
-      } else {
-        status = 'DEAD-ORANGE';
-      }
-      if (status) {
-        healthStatus = status;
-      }
-    }
-    this.set('healthClass', 'health-status-' + healthStatus);
-  },
-
-  healthToolTip: function(){
-    var hostComponents = this.get('hostComponents').filter(function(item){
-      if(item.get('workStatus') !== App.HostComponentStatus.started){
-        return true;
-      }
-    });
-    var output = '';
-    switch (this.get('healthClass')){
-      case 'health-status-DEAD':
-        hostComponents = hostComponents.filterProperty('isMaster', true);
-        output = Em.I18n.t('hosts.host.healthStatus.mastersDown');
-        hostComponents.forEach(function(hc, index){
-          output += (index == (hostComponents.length-1)) ? hc.get('displayName') : (hc.get('displayName')+", ");
-        }, this);
-        break;
-      case 'health-status-DEAD-YELLOW':
-        output = Em.I18n.t('hosts.host.healthStatus.heartBeatNotReceived');
-        break;
-      case 'health-status-DEAD-ORANGE':
-        hostComponents = hostComponents.filterProperty('isSlave', true);
-        output = Em.I18n.t('hosts.host.healthStatus.slavesDown');
-        hostComponents.forEach(function(hc, index){
-          output += (index == (hostComponents.length-1)) ? hc.get('displayName') : (hc.get('displayName')+", ");
-        }, this);
-        break;
-    }
-    return output;
-  }.property('healthClass')
-});
-
-App.Host.FIXTURES = [];
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/models/host_component.js b/branch-1.2/ambari-web/app/models/host_component.js
deleted file mode 100644
index daa24e5..0000000
--- a/branch-1.2/ambari-web/app/models/host_component.js
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.HostComponent = DS.Model.extend({
-  workStatus: DS.attr('string'),
-  componentName: DS.attr('string'),
-  host: DS.belongsTo('App.Host'),
-  service: DS.belongsTo('App.Service'),
-  isClient:function () {
-    if(['PIG', 'SQOOP', 'HCAT'].contains(this.get('componentName'))){
-      return true;
-    }
-
-    return Boolean(this.get('componentName').match(/_client/gi));
-  }.property('componentName'),
-  isRunning: function(){
-    return (this.get('workStatus') == 'STARTED' || this.get('workStatus') == 'STARTING');
-  }.property('workStatus'),
-  displayName: function () {
-    return App.format.role(this.get('componentName'));
-  }.property('componentName'),
-  isMaster: function () {
-    switch (this.get('componentName')) {
-      case 'NAMENODE':
-      case 'SECONDARY_NAMENODE':
-      case 'SNAMENODE':
-      case 'JOBTRACKER':
-      case 'ZOOKEEPER_SERVER':
-      case 'HIVE_SERVER':
-      case 'HIVE_METASTORE':
-      case 'MYSQL_SERVER':
-      case 'HBASE_MASTER':
-      case 'NAGIOS_SERVER':
-      case 'GANGLIA_SERVER':
-      case 'OOZIE_SERVER':
-      case 'WEBHCAT_SERVER':
-        return true;
-      default:
-        return false;
-    }
-  }.property('componentName'),
-  isSlave: function(){
-    switch (this.get('componentName')) {
-      case 'DATANODE':
-      case 'TASKTRACKER':
-      case 'HBASE_REGIONSERVER':
-      case 'GANGLIA_MONITOR':
-        return true;
-      default:
-        return false;
-    }
-  }.property('componentName'),
-  /**
-   * A host-component is decommissioning when it is in HDFS service's list of
-   * decomNodes.
-   */
-  isDecommissioning: function () {
-    var decommissioning = false;
-    var hostName = this.get('host.hostName');
-    var componentName = this.get('componentName');
-    if (componentName == 'DATANODE') {
-      var hdfsSvc = App.router.get('mainServiceController.hdfsService');
-      if (hdfsSvc) {
-        var decomNodes = hdfsSvc.get('decommissionDataNodes');
-        var decomNode = decomNodes != null ? decomNodes.findProperty("hostName", hostName) : null;
-        decommissioning = decomNode != null;
-      }
-    }
-    return decommissioning;
-  }.property('componentName', 'host.hostName', 'App.router.mainServiceController.hdfsService.decommissionDataNodes.@each.hostName')
-});
-
-App.HostComponent.FIXTURES = [];
-
-App.HostComponentStatus = {
-  started: "STARTED",
-  starting: "STARTING",
-  stopped: "INSTALLED",
-  stopping: "STOPPING",
-  stop_failed: "STOP_FAILED",
-  start_failed: "START_FAILED",
-  install_failed: "INSTALL_FAILED",
-
-  getKeyName:function(value){
-    switch(value){
-      case this.started:
-        return 'started';
-      case this.starting:
-        return 'starting';
-      case this.stopped:
-        return 'installed';
-      case this.stopping:
-        return 'stopping';
-      case this.stop_failed:
-        return 'stop_failed';
-      case this.start_failed:
-        return 'start_failed';
-      case this.install_failed:
-        return 'install_failed';
-    }
-    return 'none';
-  }
-}
-
diff --git a/branch-1.2/ambari-web/app/models/hosts.js b/branch-1.2/ambari-web/app/models/hosts.js
deleted file mode 100644
index 85feab5..0000000
--- a/branch-1.2/ambari-web/app/models/hosts.js
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.HostInfo = Ember.Object.extend({
-  elementId: 'host',
-  name: '',
-  cpu: null,
-  memory: null,
-  message: 'Information',
-  barColor: 'progress-info',
-  isChecked: true,
-  bootLog:null,
-  bootStatus: 'PENDING',
-  
-  bootStatusForDisplay:function () {
-    switch (this.get('bootStatus')) {
-      case 'PENDING':
-        return 'Preparing';
-      case 'REGISTERED':
-        return 'Success';
-      case 'FAILED':
-        return 'Failed';
-      case 'RUNNING':
-        return 'Installing';
-      case 'DONE':
-      case 'REGISTERING':
-      default:
-        return 'Registering';
-    }
-  }.property('bootStatus'),
-
-  bootBarColor:function () {
-    switch (this.get('bootStatus')) {
-      case 'REGISTERED':
-        return 'progress-success';
-      case 'FAILED':
-        return 'progress-danger';
-      case 'PENDING':
-      case 'RUNNING':
-      case 'DONE':
-      case 'REGISTERING':
-      default:
-        return 'progress-info';
-    }
-  }.property('bootStatus'),
-
-  bootStatusColor:function () {
-    switch (this.get('bootStatus')) {
-      case 'REGISTERED':
-        return 'text-success';
-      case 'FAILED':
-        return 'text-error';
-      case 'PENDING':
-      case 'RUNNING':
-      case 'DONE':
-      case 'REGISTERING':
-      default:
-        return 'text-info';
-    }
-  }.property('bootStatus'),
-
-  isBootDone:function () {
-    switch (this.get('bootStatus')) {
-      case 'REGISTERED':
-      case 'FAILED':
-        return true;
-      case 'PENDING':
-      case 'RUNNING':
-      case 'DONE':
-      case 'REGISTERING':
-      default:
-        return false;
-    }
-
-  }.property('bootStatus')
-});
diff --git a/branch-1.2/ambari-web/app/models/job.js b/branch-1.2/ambari-web/app/models/job.js
deleted file mode 100644
index d77518f..0000000
--- a/branch-1.2/ambari-web/app/models/job.js
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-var date = require('utils/date');
-var misc = require('utils/misc');
-
-App.Job = DS.Model.extend({
-
-  run: DS.belongsTo('App.Run'),
-
-  jobName:DS.attr('string'),
-  workflowEntityName:DS.attr('string'),
-  userName:DS.attr('string'),
-  confPath:DS.attr('string'),
-  submitTime:DS.attr('number'),
-  maps:DS.attr('number'),
-  reduces:DS.attr('number'),
-  status:DS.attr('string'),
-  input:DS.attr('number'),
-  output:DS.attr('number'),
-  elapsedTime:DS.attr('number'),
-  duration: function() {
-    return date.timingFormat(parseInt(this.get('elapsedTime')));
-  }.property('elapsedTime'),
-  jobTimeLine:DS.attr('string'),
-  jobTaskView:DS.attr('string'),
-  /**
-   *  Sum of input bandwidth for all jobs with appropriate measure
-   */
-  inputFormatted: function () {
-    var input = this.get('input');
-    return misc.formatBandwidth(input);
-  }.property('input'),
-  /**
-   *  Sum of output bandwidth for all jobs with appropriate measure
-   */
-  outputFormatted: function () {
-    var output = this.get('output');
-    return misc.formatBandwidth(output);
-  }.property('output')
-
-});
-
-App.Job.FIXTURES = [];
diff --git a/branch-1.2/ambari-web/app/models/pagination.js b/branch-1.2/ambari-web/app/models/pagination.js
deleted file mode 100644
index f48eb3f..0000000
--- a/branch-1.2/ambari-web/app/models/pagination.js
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-var App = require('app');
-
-/**
- @extends Ember.Mixin
-
- Implements common pagination management properties for controllers.
- */
-App.Pagination = Em.Mixin.create({
-
-  total: 0,
-  rangeStart: 0,
-  pageSize: 0,
-
-  rangeStop: function() {
-    var rangeStop = this.get('rangeStart') + this.get('pageSize'),
-      total = this.get('total');
-    if (rangeStop < total) {
-      return rangeStop;
-    }
-    return total;
-  }.property('total', 'rangeStart', 'pageSize').cacheable(),
-
-  hasPrevious: function() {
-    return this.get('rangeStart') > 0;
-  }.property('rangeStart').cacheable(),
-
-  hasNext: function() {
-    return this.get('rangeStop') < this.get('total');
-  }.property('rangeStop', 'total').cacheable(),
-
-  nextPage: function() {
-    if (this.get('hasNext')) {
-      this.incrementProperty('rangeStart', this.get('pageSize'));
-    }
-  },
-
-  previousPage: function() {
-    if (this.get('hasPrevious')) {
-      this.decrementProperty('rangeStart', this.get('pageSize'));
-    }
-  },
-
-  currentPage: function () {
-    return this.get('rangeStop') / this.get('pageSize');
-  }.property('rangeStop', 'pageSize').cacheable(),
-
-  startPosition: function() {
-    if (this.get('total') == 0)
-      return 0;
-    return this.get('rangeStart')  + 1;
-  }.property('rangeStart', 'total').cacheable(),
-
-  totalPages: function() {
-    return Math.ceil(this.get('total') / this.get('pageSize'));
-  }.property('total', 'pageSize').cacheable(),
-
-//  changeContent: function() {
-////    this.didRequestRange(this.get('rangeStart'), this.get('rangeStop'));
-//  }.observes('total', 'rangeStart', 'rangeStop'),
-
-  pageSizeChange: function() {
-    this.set('rangeStart', 0);
-  }.observes('pageSize')
-
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/models/quick_links.js b/branch-1.2/ambari-web/app/models/quick_links.js
deleted file mode 100644
index 745e9be..0000000
--- a/branch-1.2/ambari-web/app/models/quick_links.js
+++ /dev/null
@@ -1,136 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.QuickLinks = DS.Model.extend({
-  label: DS.attr('string'),
-  url: DS.attr('string'),
-  service_id: DS.attr('string')
-});
-
-App.QuickLinks.FIXTURES = [
-  {
-    id:1,
-    label:'NameNode UI',
-    url:'http://%@:50070/dfshealth.jsp',
-    service_id: 'HDFS'
-  },
-  {
-    id:2,
-    label:'NameNode logs',
-    url:'http://%@:50070/logs',
-    service_id: 'HDFS'
-  },
-  {
-    id:3,
-    label:'NameNode JMX',
-    url:'http://%@:50070/jmx',
-    service_id: 'HDFS'
-  },
-  {
-    id:4,
-    label:'Thread Stacks',
-    url:'http://%@:50070/stacks',
-    service_id: 'HDFS'
-  },
-  {
-    id:5,
-    label:'JobTracker UI',
-    url:'http://%@:50030/jobtracker.jsp',
-    service_id: 'MAPREDUCE'
-  },
-  {
-    id:6,
-    label:'Scheduling Info',
-    url:'http://%@:50030/scheduler',
-    service_id: 'MAPREDUCE'
-  },
-  {
-    id:7,
-    label:'Running Jobs',
-    url:'http://%@:50030/jobtracker.jsp#running_jobs',
-    service_id: 'MAPREDUCE'
-  },
-  {
-    id:8,
-    label:'Retired Jobs',
-    url:'http://%@:50030/jobtracker.jsp#retired_jobs',
-    service_id: 'MAPREDUCE'
-  },
-  {
-    id:9,
-    label:'JobHistory Server',
-    url:'http://%@:51111/jobhistoryhome.jsp',
-    service_id: 'MAPREDUCE'
-  },
-  {
-    id:10,
-    label:'JobTracker Logs',
-    url:'http://%@:50030/logs',
-    service_id: 'MAPREDUCE'
-  },
-  {
-    id:11,
-    label:'JobTracker JMX',
-    url:'http://%@:50030/jmx',
-    service_id: 'MAPREDUCE'
-  },
-  {
-    id:12,
-    label:'Thread Stacks',
-    url:'http://%@:50030/stacks',
-    service_id: 'MAPREDUCE'
-  },
-  {
-    id:13,
-    label:'HBase Master UI',
-    url:'http://%@:60010/master-status',
-    service_id: 'HBASE'
-  },
-  {
-    id:14,
-    label:'HBase Logs',
-    url:'http://%@:60010/logs',
-    service_id: 'HBASE'
-  },
-  {
-    id:15,
-    label:'Zookeeper Info',
-    url:'http://%@:60010/zk.jsp',
-    service_id: 'HBASE'
-  },
-  {
-    id:16,
-    label:'HBase Master JMX',
-    url:'http://%@:60010/jmx',
-    service_id: 'HBASE'
-  },
-  {
-    id:17,
-    label:'Debug Dump',
-    url:'http://%@:60010/dump',
-    service_id: 'HBASE'
-  },
-  {
-    id:18,
-    label:'Thread Stacks',
-    url:'http://%@:60010/stacks',
-    service_id: 'HBASE'
-  }
-];
diff --git a/branch-1.2/ambari-web/app/models/rack.js b/branch-1.2/ambari-web/app/models/rack.js
deleted file mode 100644
index dd87654..0000000
--- a/branch-1.2/ambari-web/app/models/rack.js
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.Rack = DS.Model.extend({
-  name: DS.attr('string'),
-  status: DS.attr('string'),
-  criticalHostsCount: DS.attr('number'),
-  deadHostsCount: DS.attr('number'),
-  hosts: function(){
-    return App.Host.find();
-  }.property('name'),
-  liveHostsCount: function(){
-    var count = 0;
-    this.get('hosts').forEach(function(host){
-      if(host.get('healthStatus')=="HEALTHY"){
-        count++;
-      }
-    });
-    return count;
-  }.property('hosts')
-});
-
-App.Rack.FIXTURES = [
-  //here example of data
-  /*{
-    id: 1,
-    name: 'Rack-0',
-    hosts: ['host01', 'host06', 'host05'],
-    status: 'LIVE',
-    live_hosts_count: 5,
-    critical_hosts_count: 0,
-    dead_hosts_count: 2
-  }*/
-];
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/models/run.js b/branch-1.2/ambari-web/app/models/run.js
deleted file mode 100644
index 0960ddd..0000000
--- a/branch-1.2/ambari-web/app/models/run.js
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-var date = require('utils/date');
-var misc = require('utils/misc');
-
-App.Run = DS.Model.extend({
-  appName: DS.attr('string'),
-  type: DS.attr('string'),
-  userName:DS.attr('string'),
-  numJobsTotal: DS.attr('number'),
-  numJobsCompleted: DS.attr('number'),
-  startTime:DS.attr('string'),
-  elapsedTime:DS.attr('string'),
-  workflowContext:DS.attr('string'),
-  input: DS.attr('number'),
-  output: DS.attr('number'),
-
-  loadAllJobs : false,
-
-  isStared: false,
-  isFiltered: false,
-
-  /**
-   * runId  short part
-   */
-  idFormatted: function() {
-    return this.get('id').substr(0, 20);
-  }.property('id'),
-
-  /**
-   * Jobs in the current run
-   */
-  jobs: function() {
-    return App.Job.find().filterProperty('run.id', this.get('id'));
-  }.property('loadAllJobs'),
-
-  /**
-   * Run duration
-   */
-  duration: function() {
-    return date.timingFormat(parseInt(this.get('elapsedTime')));
-  }.property('elapsedTime'),
-  /**
-   * Status of running jobs
-   */
-  isRunning: function () {
-    return !this.get('numJobsTotal') == this.get('numJobsCompleted');
-  }.property('numJobsTotal', 'numJobsCompleted'),
-  /**
-   * Sum of input bandwidth for all jobs with appropriate measure
-   */
-  inputFormatted: function () {
-    var input = this.get('input');
-    input = misc.formatBandwidth(input);
-    return input;
-  }.property('input'),
-
-  /**
-   *  Sum of output bandwidth for all jobs with appropriate measure
-   */
-  outputFormatted: function () {
-    var output = this.get('output');
-    output = misc.formatBandwidth(output);
-    return output;
-  }.property('output'),
-
-  /**
-   *
-   */
-  lastUpdateTime: function() {
-    return parseInt(this.get('startTime')) + parseInt(this.get('elapsedTime'));
-  }.property('elapsedTime', 'startTime'),
-  /**
-   *
-   */
-  lastUpdateTimeFormatted: function() {
-    return date.dateFormat(this.get('lastUpdateTime'));
-  }.property('lastUpdateTime'),
-  lastUpdateTimeFormattedShort: function(){
-    return date.dateFormatShort(this.get('lastUpdateTime'));
-  }.property('lastUpdateTime'),
-  /**
-   * Type value based on first part of id
-   */
-  type: function() {
-    if (this.get('id').indexOf('pig_') === 0) {
-      return 'Pig';
-    }
-    if (this.get('id').indexOf('hive_') === 0) {
-      return 'Hive';
-    }
-    if (this.get('id').indexOf('mr_') === 0) {
-      return 'MapReduce';
-    }
-  }.property('id')
-});
-
-App.Run.FIXTURES = [];
diff --git a/branch-1.2/ambari-web/app/models/service.js b/branch-1.2/ambari-web/app/models/service.js
deleted file mode 100644
index 62aba87..0000000
--- a/branch-1.2/ambari-web/app/models/service.js
+++ /dev/null
@@ -1,176 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.Service = DS.Model.extend({
-
-  serviceName: DS.attr('string'),
-
-  workStatus: DS.attr('string'),
-  alerts: DS.hasMany('App.Alert'),
-  quickLinks: DS.hasMany('App.QuickLinks'),
-  hostComponents: DS.hasMany('App.HostComponent'),
-  isStartDisabled: function () {
-    return !(this.get('healthStatus') == 'red');
-  }.property('healthStatus'),
-
-  isStopDisabled: function () {
-    return !(this.get('healthStatus') == 'green');
-  }.property('healthStatus'),
-
-  // Instead of making healthStatus a computed property that listens on hostComponents.@each.workStatus,
-  // we are creating a separate observer _updateHealthStatus.  This is so that healthStatus is updated
-  // only once after the run loop.  This is because Ember invokes the computed property every time
-  // a property that it depends on changes.  For example, App.statusMapper's map function would invoke
-  // the computed property too many times and freezes the UI without this hack.
-  // See http://stackoverflow.com/questions/12467345/ember-js-collapsing-deferring-expensive-observers-or-computed-properties
-  healthStatus: '',
-
-  updateHealthStatus: function () {
-    // console.log('model:service.healthStatus ' + this.get('serviceName'));
-    var components = this.get('hostComponents').filterProperty('isMaster', true);
-    var isGreen =
-      components.everyProperty('workStatus', App.HostComponentStatus.started);
-
-    if (isGreen) {
-      this.set('healthStatus', 'green');
-    } else if (components.someProperty('workStatus', App.HostComponentStatus.starting)) {
-      this.set('healthStatus', 'green-blinking');
-    } else if (components.someProperty('workStatus', App.HostComponentStatus.stopped)) {
-      this.set('healthStatus', 'red');
-    } else {
-      this.set('healthStatus', 'red-blinking');
-    }
-  },
-
-  /**
-   * Every time when changes workStatus of any component we schedule recalculating values related from them
-   */
-  _updateHealthStatus: (function() {
-    Ember.run.once(this, 'updateHealthStatus');
-    Ember.run.once(this, 'updateIsStopped');
-    Ember.run.once(this, 'updateIsStarted');
-  }).observes('hostComponents.@each.workStatus'),
-
-  isStopped: false,
-  isStarted: false,
-
-  updateIsStopped: function () {
-    var components = this.get('hostComponents');
-    var flag = true;
-    components.forEach(function (_component) {
-      if (_component.get('workStatus') !== App.HostComponentStatus.stopped && _component.get('workStatus') !== App.HostComponentStatus.install_failed) {
-        flag = false;
-      }
-    }, this);
-    this.set('isStopped', flag);
-  },
-
-  updateIsStarted: function () {
-    var components = this.get('hostComponents').filterProperty('isMaster', true);
-    this.set('isStarted',
-      components.everyProperty('workStatus', App.HostComponentStatus.started)
-    );
-  },
-
-  isMaintained: function () {
-    var maintainedServices = [
-      "HDFS",
-      "MAPREDUCE",
-      "HBASE",
-      "OOZIE",
-      "HIVE",
-      "WEBHCAT",
-      "ZOOKEEPER",
-      "PIG",
-      "SQOOP"
-    ];
-    return maintainedServices.contains(this.get('serviceName'));
-  }.property('serviceName'),
-
-  isConfigurable: function () {
-    var configurableServices = [
-      "HDFS",
-      "MAPREDUCE",
-      "HBASE",
-      "OOZIE",
-      "HIVE",
-      "WEBHCAT",
-      "ZOOKEEPER",
-      "PIG",
-      "SQOOP",
-      "NAGIOS"
-    ];
-    return configurableServices.contains(this.get('serviceName'));
-  }.property('serviceName'),
-
-  displayName: function () {
-    switch (this.get('serviceName').toLowerCase()) {
-      case 'hdfs':
-        return 'HDFS';
-      case 'mapreduce':
-        return 'MapReduce';
-      case 'hbase':
-        return 'HBase';
-      case 'oozie':
-        return 'Oozie';
-      case 'hive':
-        return 'Hive/HCat';
-      case 'hcatalog':
-        return 'HCat';
-      case 'zookeeper':
-        return 'ZooKeeper';
-      case 'pig':
-        return 'Pig';
-      case 'sqoop':
-        return 'Sqoop';
-      case 'webhcat':
-        return 'WebHCat';
-      case 'ganglia':
-        return 'Ganglia';
-      case 'nagios':
-        return 'Nagios';
-    }
-    return this.get('serviceName');
-  }.property('serviceName')
-});
-
-App.Service.Health = {
-  live: "LIVE",
-  dead: "DEAD",
-  starting: "STARTING",
-  stopping: "STOPPING",
-
-  getKeyName: function (value) {
-    switch (value) {
-      case this.live:
-        return 'live';
-      case this.dead:
-        return 'dead';
-      case this.starting:
-        return 'starting';
-      case this.stopping:
-        return 'stopping';
-    }
-    return 'none';
-  }
-};
-
-App.Service.FIXTURES = [];
diff --git a/branch-1.2/ambari-web/app/models/service/hbase.js b/branch-1.2/ambari-web/app/models/service/hbase.js
deleted file mode 100644
index 5d0e40c..0000000
--- a/branch-1.2/ambari-web/app/models/service/hbase.js
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-App.HBaseService = App.Service.extend({
-  version: DS.attr('string'),
-  master: DS.belongsTo('App.Host'),
-  regionServers: DS.hasMany('App.Host'),
-  masterStartTime: DS.attr('number'),
-  masterActiveTime: DS.attr('number'),
-  averageLoad: DS.attr('number'),
-  regionsInTransition: DS.attr('number'),
-  revision: DS.attr('string'),
-  heapMemoryUsed: DS.attr('number'),
-  heapMemoryMax: DS.attr('number')
-});
-
-App.HBaseService.FIXTURES = [];
diff --git a/branch-1.2/ambari-web/app/models/service/hdfs.js b/branch-1.2/ambari-web/app/models/service/hdfs.js
deleted file mode 100644
index f7951f1..0000000
--- a/branch-1.2/ambari-web/app/models/service/hdfs.js
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-App.HDFSService = App.Service.extend({
-  version: DS.attr('string'),
-  nameNode: DS.belongsTo('App.Host'),
-  snameNode: DS.belongsTo('App.Host'),
-  dataNodes: DS.hasMany('App.Host'),
-  nameNodeStartTime: DS.attr('number'),
-  jvmMemoryHeapUsed: DS.attr('number'),
-  jvmMemoryHeapCommitted: DS.attr('number'),
-  liveDataNodes: DS.hasMany('App.Host'),
-  deadDataNodes: DS.hasMany('App.Host'),
-  decommissionDataNodes: DS.hasMany('App.Host'),
-  capacityUsed: DS.attr('number'),
-  capacityTotal: DS.attr('number'),
-  capacityRemaining: DS.attr('number'),
-  dfsTotalBlocks: DS.attr('number'),
-  dfsCorruptBlocks: DS.attr('number'),
-  dfsMissingBlocks: DS.attr('number'),
-  dfsUnderReplicatedBlocks: DS.attr('number'),
-  dfsTotalFiles: DS.attr('number'),
-  upgradeStatus: DS.attr('boolean'),
-  safeModeStatus: DS.attr('string')
-});
-
-App.HDFSService.FIXTURES = [];
diff --git a/branch-1.2/ambari-web/app/models/service/mapreduce.js b/branch-1.2/ambari-web/app/models/service/mapreduce.js
deleted file mode 100644
index 758a704..0000000
--- a/branch-1.2/ambari-web/app/models/service/mapreduce.js
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-App.MapReduceService = App.Service.extend({
-  version: DS.attr('string'),
-  jobTracker: DS.belongsTo('App.Host'),
-  taskTrackers: DS.hasMany('App.Host'),
-  jobTrackerStartTime: DS.attr('number'),
-  jobTrackerHeapUsed: DS.attr('number'),
-  jobTrackerHeapMax: DS.attr('number'),
-  aliveTrackers: DS.hasMany('App.Host'),
-  blackListTrackers: DS.hasMany('App.Host'),
-  grayListTrackers: DS.hasMany('App.Host'),
-  mapSlots: DS.attr('number'),
-  reduceSlots: DS.attr('number'),
-  jobsSubmitted: DS.attr('number'),
-  jobsCompleted: DS.attr('number'),
-  mapSlotsOccupied: DS.attr('number'),
-  mapSlotsReserved: DS.attr('number'),
-  reduceSlotsOccupied: DS.attr('number'),
-  reduceSlotsReserved: DS.attr('number'),
-  mapsRunning: DS.attr('number'),
-  mapsWaiting: DS.attr('number'),
-  reducesRunning: DS.attr('number'),
-  reducesWaiting: DS.attr('number'),
-  trackersDecommissioned: DS.attr('number')
-});
-
-App.MapReduceService.FIXTURES = [];
diff --git a/branch-1.2/ambari-web/app/models/service_audit.js b/branch-1.2/ambari-web/app/models/service_audit.js
deleted file mode 100644
index 49270b6..0000000
--- a/branch-1.2/ambari-web/app/models/service_audit.js
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.ServiceAudit = DS.Model.extend({
-  date: DS.attr('date'),
-  service: DS.belongsTo('App.Service'),
-  operationName: DS.attr('string'),
-  user: DS.belongsTo('App.User')
-});
-
-App.ServiceAudit.FIXTURES = [
-  {
-    id: 1,
-    date: 'September 12, 2012 17:00',
-    operation_name: 'Reconfigure',
-    user_id: 2,
-    service_id: 1
-  },
-  {
-    id: 2,
-    date: 'September 13, 2012 17:00',
-    operation_name: 'Start',
-    user_id: 1,
-    service_id: 1
-  },
-  {
-    id: 3,
-    date: 'September 14, 2012 17:00',
-    operation_name: 'Install',
-    user_id: 1,
-    service_id: 1
-  },
-  {
-    id: 4,
-    date: 'September 12, 2012 17:00',
-    operation_name: 'Reconfigure',
-    user_id: 2,
-    service_id: 2
-  },
-  {
-    id: 5,
-    date: 'September 13, 2012 17:00',
-    operation_name: 'Start',
-    user_id: 1,
-    service_id: 2
-  },
-  {
-    id: 6,
-    date: 'September 14, 2012 17:00',
-    operation_name: 'Install',
-    user_id: 1,
-    service_id: 2
-  }
-];
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/models/service_config.js b/branch-1.2/ambari-web/app/models/service_config.js
deleted file mode 100644
index 76cc095..0000000
--- a/branch-1.2/ambari-web/app/models/service_config.js
+++ /dev/null
@@ -1,405 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-var validator = require('utils/validator');
-
-App.ConfigProperties = Ember.ArrayProxy.extend({
-  content: require('data/config_properties').configProperties
-});
-
-App.ServiceConfig = Ember.Object.extend({
-  serviceName: '',
-  configCategories: [],
-  configs: null,
-
-  errorCount: function () {
-    var masterErrors = this.get('configs').filterProperty('isValid', false).filterProperty('isVisible', true).get('length');
-    var slaveErrors = 0;
-    this.get('configCategories').forEach(function(_category){
-      slaveErrors += _category.get('slaveErrorCount');
-    },this);
-    return masterErrors + slaveErrors;
-  }.property('configs.@each.isValid', 'configs.@each.isVisible', 'configCategories.@each.slaveErrorCount')
-});
-
-App.ServiceConfigCategory = Ember.Object.extend({
-  name: null,
-
-  slaveConfigs: null,
-  primaryName: function () {
-    switch (this.get('name')) {
-      case 'DataNode':
-        return 'DATANODE';
-        break;
-      case 'TaskTracker':
-        return 'TASKTRACKER';
-        break;
-      case 'RegionServer':
-        return 'HBASE_REGIONSERVER';
-    }
-  }.property('name'),
-
-
-  isForMasterComponent: function () {
-    var masterServices = [ 'NameNode', 'SNameNode', 'JobTracker', 'HBase Master', 'Oozie Master',
-      'Hive Metastore', 'WebHCat Server', 'ZooKeeper Server', 'Nagios', 'Ganglia' ];
-
-    return (masterServices.contains(this.get('name')));
-  }.property('name'),
-
-  isForSlaveComponent: function () {
-    return this.get('name') === 'DataNode' || this.get('name') === 'TaskTracker' ||
-      this.get('name') === 'RegionServer';
-  }.property('name'),
-
-  slaveErrorCount: function () {
-    var length = 0;
-    if (this.get('slaveConfigs.groups')) {
-      this.get('slaveConfigs.groups').forEach(function (_group) {
-        length += _group.get('errorCount');
-      }, this);
-    }
-    return length;
-  }.property('slaveConfigs.groups.@each.errorCount')
-});
-
-
-App.SlaveConfigs = Ember.Object.extend({
-  componentName: null,
-  displayName: null,
-  hosts: null,
-  groups: null
-});
-
-App.Group = Ember.Object.extend({
-  name: null,
-  hostNames: null,
-  properties: null,
-  errorCount: function () {
-    if (this.get('properties')) {
-      return this.get('properties').filterProperty('isValid', false).filterProperty('isVisible', true).get('length');
-    }
-  }.property('properties.@each.isValid', 'properties.@each.isVisible')
-});
-
-
-App.ServiceConfigProperty = Ember.Object.extend({
-
-  id: '', //either 'puppet var' or 'site property'
-  name: '',
-  displayName: '',
-  value: '',
-  defaultValue: '',
-  defaultDirectory: '',
-  description: '',
-  displayType: 'string', // string, digits, number, directories, custom
-  unit: '',
-  category: 'General',
-  isRequired: true, // by default a config property is required
-  isReconfigurable: true, // by default a config property is reconfigurable
-  isEditable: true, // by default a config property is editable
-  isVisible: true,
-  errorMessage: '',
-  serviceConfig: null, // points to the parent App.ServiceConfig object
-  filename: '',
-
-  init: function () {
-    if (this.get('id') === 'puppet var') {
-      this.set('value', this.get('defaultValue'));
-    }
-    // TODO: remove mock data
-  },
-
-  initialValue: function () {
-    var masterComponentHostsInDB = App.db.getMasterComponentHosts();
-    //console.log("value in initialvalue: " + JSON.stringify(masterComponentHostsInDB));
-    var hostsInfo = App.db.getHosts(); // which we are setting in installerController in step3.
-    var slaveComponentHostsInDB = App.db.getSlaveComponentHosts();
-    var isOnlyFirstOneNeeded = true;
-    switch (this.get('name')) {
-      case 'namenode_host':
-        var temp = masterComponentHostsInDB.findProperty('component', 'NAMENODE');
-        this.set('value', temp.hostName);
-        break;
-      case 'snamenode_host':
-        this.set('value', masterComponentHostsInDB.findProperty('component', 'SECONDARY_NAMENODE').hostName);
-        break;
-      case 'datanode_hosts':
-        this.set('value', slaveComponentHostsInDB.findProperty('componentName', 'DATANODE').hosts.mapProperty('hostName'));
-        break;
-      case 'jobtracker_host':
-        this.set('value', masterComponentHostsInDB.findProperty('component', 'JOBTRACKER').hostName);
-        break;
-      case 'tasktracker_hosts':
-        this.set('value', slaveComponentHostsInDB.findProperty('componentName', 'TASKTRACKER').hosts.mapProperty('hostName'));
-        break;
-      case 'hbasemaster_host':
-        this.set('value', masterComponentHostsInDB.findProperty('component', 'HBASE_MASTER').hostName);
-        break;
-      case 'regionserver_hosts':
-        this.set('value', slaveComponentHostsInDB.findProperty('componentName', 'HBASE_REGIONSERVER').hosts.mapProperty('hostName'));
-        break;
-      case 'hivemetastore_host':
-        this.set('value', masterComponentHostsInDB.findProperty('component', 'HIVE_SERVER').hostName);
-        break;
-      case 'hive_ambari_host':
-        this.set('value', masterComponentHostsInDB.findProperty('component', 'HIVE_SERVER').hostName);
-        break;
-      case 'oozieserver_host':
-        this.set('value', masterComponentHostsInDB.findProperty('component', 'OOZIE_SERVER').hostName);
-        break;
-      case 'oozie_ambari_host':
-        this.set('value', masterComponentHostsInDB.findProperty('component', 'OOZIE_SERVER').hostName);
-        break;
-      case 'zookeeperserver_hosts':
-        this.set('value', masterComponentHostsInDB.filterProperty('component', 'ZOOKEEPER_SERVER').mapProperty('hostName'));
-        break;
-      case 'dfs_name_dir':
-      case 'dfs_data_dir':
-      case 'mapred_local_dir':
-        this.unionAllMountPoints(!isOnlyFirstOneNeeded);
-        break;
-      case 'fs_checkpoint_dir':
-      case 'zk_data_dir':
-      case 'oozie_data_dir':
-        this.unionAllMountPoints(isOnlyFirstOneNeeded);
-        break;
-    }
-  },
-  
-  unionAllMountPoints: function (isOnlyFirstOneNeeded) {
-    var hostname = '';
-    var mountPointsPerHost = [];
-    var mountPointAsRoot;
-    var masterComponentHostsInDB = App.db.getMasterComponentHosts();
-    var slaveComponentHostsInDB = App.db.getSlaveComponentHosts();
-    var hostsInfo = App.db.getHosts(); // which we are setting in installerController in step3.
-    App.Host.find().forEach(function(item){
-      if(!hostsInfo[item.get('id')]){
-        hostsInfo[item.get('id')] = {
-          name: item.get('id'),
-          cpu: item.get('cpu'),
-          memory: item.get('memory'),
-          disk_info: item.get('diskInfo'),
-          bootStatus: "REGISTERED",
-          isInstalled: true
-        };
-      }
-    });
-    var temp = '';
-    var setOfHostNames = [];
-    switch (this.get('name')) {
-      case 'dfs_name_dir':
-        var components = masterComponentHostsInDB.filterProperty('component', 'NAMENODE');
-        components.forEach(function (component) {
-          setOfHostNames.push(component.hostName);
-        }, this);
-        break;
-      case 'fs_checkpoint_dir':
-        var components = masterComponentHostsInDB.filterProperty('component', 'SECONDARY_NAMENODE');
-        components.forEach(function (component) {
-          setOfHostNames.push(component.hostName);
-        }, this);
-        break;
-      case 'dfs_data_dir':
-        temp = slaveComponentHostsInDB.findProperty('componentName', 'DATANODE');
-        temp.hosts.forEach(function (host) {
-          setOfHostNames.push(host.hostName);
-        }, this);
-        break;
-      case 'mapred_local_dir':
-        temp = slaveComponentHostsInDB.findProperty('componentName', 'TASKTRACKER');
-        temp.hosts.forEach(function (host) {
-          setOfHostNames.push(host.hostName);
-        }, this);
-        break;
-      case 'zk_data_dir':
-        var components = masterComponentHostsInDB.filterProperty('component', 'ZOOKEEPER_SERVER');
-        components.forEach(function (component) {
-          setOfHostNames.push(component.hostName);
-        }, this);
-        break;
-      case 'oozie_data_dir':
-        var components = masterComponentHostsInDB.filterProperty('component', 'OOZIE_SERVER');
-        components.forEach(function (component) {
-          setOfHostNames.push(component.hostName);
-        }, this);
-        break;
-    }
-
-    // In Add Host Wizard, if we did not select this slave component for any host, then we don't process any further.
-    if (setOfHostNames.length === 0) {
-      return;
-    }
-
-    var allMountPoints = [];
-    for (var i = 0; i < setOfHostNames.length; i++) {
-      hostname = setOfHostNames[i];
-
-      mountPointsPerHost = hostsInfo[hostname].disk_info;
-
-      mountPointAsRoot = mountPointsPerHost.findProperty('mountpoint', '/');
-
-      mountPointsPerHost = mountPointsPerHost.filter(function (mPoint) {
-        return !(['/', '/home', '/boot'].contains(mPoint.mountpoint) || ['devtmpfs', 'tmpfs', 'vboxsf'].contains(mPoint.type));
-      });
-
-      mountPointsPerHost.forEach(function (mPoint) {
-        if( !allMountPoints.findProperty("mountpoint", mPoint.mountpoint)) {
-          allMountPoints.push(mPoint);
-        }
-      }, this);
-    }
-    if (allMountPoints.length == 0) {
-      allMountPoints.push(mountPointAsRoot);
-    }
-    this.set('value', '');
-    if (!isOnlyFirstOneNeeded) {
-      allMountPoints.forEach(function (eachDrive) {
-        var mPoint = this.get('value');
-        if (!mPoint) {
-          mPoint = "";
-        }
-        if (eachDrive.mountpoint === "/") {
-          mPoint += this.get('defaultDirectory') + "\n";
-        } else {
-          mPoint += eachDrive.mountpoint + this.get('defaultDirectory') + "\n";
-        }
-        this.set('value', mPoint);
-        this.set('defaultValue', mPoint);
-      }, this);
-    } else {
-      var mPoint = allMountPoints[0].mountpoint;
-      if (mPoint === "/") {
-        mPoint = this.get('defaultDirectory') + "\n";
-      } else {
-        mPoint = mPoint + this.get('defaultDirectory') + "\n";
-      }
-      this.set('value', mPoint);
-      this.set('defaultValue', mPoint);
-    }
-  },
-
-  isValid: function () {
-    return this.get('errorMessage') === '';
-  }.property('errorMessage'),
-
-  viewClass: function () {
-    switch (this.get('displayType')) {
-      case 'checkbox':
-        return App.ServiceConfigCheckbox;
-      case 'password':
-        return App.ServiceConfigPasswordField;
-      case 'combobox':
-        return App.ServiceConfigComboBox;
-      case 'radio button':
-        return App.ServiceConfigRadioButtons;
-        break;
-      case 'directories':
-        return App.ServiceConfigTextArea;
-        break;
-      case 'custom':
-        return App.ServiceConfigBigTextArea;
-      case 'masterHost':
-        return App.ServiceConfigMasterHostView;
-      case 'masterHosts':
-        return App.ServiceConfigMasterHostsView;
-      case 'slaveHosts':
-        return App.ServiceConfigSlaveHostsView;
-      default:
-        if (this.get('unit')) {
-          return App.ServiceConfigTextFieldWithUnit;
-        } else {
-          return App.ServiceConfigTextField;
-        }
-    }
-  }.property('displayType'),
-
-  validate: function () {
-
-    var value = this.get('value');
-
-    var isError = false;
-
-    if (typeof value === 'string' && value.trim().length === 0) {
-      if (this.get('isRequired')) {
-        this.set('errorMessage', 'This is required');
-        isError = true;
-      } else {
-        return;
-      }
-    }
-
-    if (!isError) {
-      switch (this.get('displayType')) {
-        case 'int':
-          if (!validator.isValidInt(value)) {
-            this.set('errorMessage', 'Must contain digits only');
-            isError = true;
-          }
-          break;
-        case 'float':
-          if (!validator.isValidFloat(value)) {
-            this.set('errorMessage', 'Must be a valid number');
-            isError = true;
-          }
-          break;
-        case 'checkbox':
-          break;
-        case 'directories':
-          if (!validator.isValidDir(value)) {
-            this.set('errorMessage', 'Must be a slash at the start');
-            isError = true;
-          }
-          break;
-        case 'directory':
-          if (!validator.isValidDir(value)) {
-            this.set('errorMessage', 'Must be a slash at the start');
-            isError = true;
-          }
-          break;
-        case 'custom':
-          break;
-        case 'user':
-          if (!validator.isValidUserName(value)) {
-            this.set('errorMessage', Em.I18n.t('users.userName.validationFail'));
-            isError = true;
-          }
-          break;
-        case 'email':
-          if (!validator.isValidEmail(value)) {
-            this.set('errorMessage', 'Must be a valid email address');
-            isError = true;
-          }
-          break;
-        case 'password':
-          // retypedPassword is set by the retypePasswordView child view of App.ServiceConfigPasswordField
-          if (value !== this.get('retypedPassword')) {
-            this.set('errorMessage', 'Passwords do not match');
-            isError = true;
-          }
-      }
-    }
-    if (!isError) {
-      this.set('errorMessage', '');
-    }
-  }.observes('value', 'retypedPassword')
-
-});
-
diff --git a/branch-1.2/ambari-web/app/models/user.js b/branch-1.2/ambari-web/app/models/user.js
deleted file mode 100644
index bdaff22..0000000
--- a/branch-1.2/ambari-web/app/models/user.js
+++ /dev/null
@@ -1,171 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-var validator = require('utils/validator');
-
-App.UserModel = Em.Object.extend({
-  userName:null,
-  id:0
-});
-
-App.User = DS.Model.extend({
-  userName:DS.attr('string'),
-  id:function(){
-    return this.get('userName');
-  }.property('userName'),
-  roles:DS.attr('string'),
-  isLdap:DS.attr('boolean'),
-  type: function(){
-    if(this.get('isLdap')){
-      return 'LDAP';
-    }
-    return 'Local';
-  }.property('isLdap'),
-  auditItems:DS.hasMany('App.ServiceAudit'),
-  admin: DS.attr('boolean')
-});
-
-App.EditUserForm = App.Form.extend({
-  className:App.User,
-  object:function () {
-    return App.router.get('mainAdminUserEditController.content');
-  }.property('App.router.mainAdminUserEditController.content'),
-
-  fieldsOptions:[
-    { name:"userName", displayName:"Username" },
-    { name:"old_password", displayName:"Current Password", displayType:"password", isRequired: false },
-    { name:"new_password", displayName:"New Password", displayType:"password",  isRequired: false },
-    { name:"admin", displayName:"Admin", displayType:"checkbox", isRequired:false },
-    { name:"roles", displayName:"Role", isRequired:false, isHidden:true },
-    { name:"isLdap", displayName:"Type", isRequired:false, isHidden:true }
-  ],
-  fields:[],
-  disableUsername:function () {
-    this.getField("userName").set("disabled", "disabled");
-  }.observes('object'),
-  disableAdminCheckbox:function () {
-    var object = this.get('object');
-    if (object) {
-      this.getField("admin").set("disabled", (object.get('userName') == App.get('router').getLoginName()) ? "disabled" : false);
-    }
-  }.observes('object'),
-
-  isValid:function () {
-
-    var isValid = this._super();
-    thisForm = this;
-
-    var newPass = this.get('field.new_password');
-    var oldPass = this.get('field.old_password');
-
-    if (!validator.empty(newPass.get('value')) && validator.empty(oldPass.get('value'))) {
-        oldPass.set('errorMessage', this.t('admin.users.editError.requiredField'));
-        isValid = false;
-    }
-
-    return isValid;
-  },
-
-  save: function () {
-    var object = this.get('object');
-    var formValues = {};
-    $.each(this.get('fields'), function () {
-      formValues[this.get('name')] = this.get('value');
-    });
-
-    $.each(formValues, function (k, v) {
-      object.set(k, v);
-    });
-
-    //App.store.commit();
-    this.set('result', 1);
-
-    return true;
-  }
-});
-App.CreateUserForm = App.Form.extend({
-  className:App.User,
-  object:function () {
-    return App.router.get('mainAdminUserCreateController.content');
-  }.property('App.router.mainAdminUserCreateController.content'),
-
-  fieldsOptions:[
-    { name:"userName", displayName:"Username", toLowerCase: function(){var v = this.get('value'); this.set('value', v.toLowerCase())}.observes('value') },
-    { name:"password", displayName:"Password", displayType:"password", isRequired: true },
-    { name:"passwordRetype", displayName:"Retype Password", displayType:"password", validator:"passwordRetype", isRequired: true },
-    { name:"admin", displayName:"Admin", displayType:"checkbox", isRequired:false },
-    { name:"roles", displayName:"Role", isRequired:false, isHidden:true }
-  ],
-  fields:[],
-
-  isValid:function () {
-    var isValid = this._super();
-
-    var passField = this.get('field.password');
-    var passRetype = this.get('field.passwordRetype');
-
-    if (!validator.empty(passField.get('value'))) {
-      if (passField.get('value') != passRetype.get('value')) {
-        passRetype.set('errorMessage', this.t('admin.users.createError.passwordValidation'));
-        isValid = false;
-      }
-    }
-
-    if (isValid) {
-      var users = App.User.find();
-      var userNameField = this.getField('userName');
-      var userName = userNameField.get('value');
-
-      if (!validator.isValidUserName(userName)) {
-        userNameField.set('errorMessage', this.t('users.userName.validationFail'));
-        isValid = false;
-      }
-
-      if (users.mapProperty('userName').contains(userName)) {
-        userNameField.set('errorMessage', this.t('admin.users.createError.userNameExists'));
-        return isValid = false;
-      }
-    }
-
-    return isValid;
-  },
-
-  save: function () {
-
-    var object = this.get('object');
-    var formValues = {};
-    $.each(this.get('fields'), function () {
-      formValues[this.get('name')] = this.get('value');
-    });
-
-    if (this.get('className')) {
-      App.store.createRecord(this.get('className'), formValues);
-    }
-    else {
-      console.log("Please define class name for your form " + this.constructor);
-    }
-
-    //App.store.commit();
-    this.set('result', 1);
-
-    return true;
-  }
-});
-App.User.FIXTURES = [];
-
diff --git a/branch-1.2/ambari-web/app/router.js b/branch-1.2/ambari-web/app/router.js
deleted file mode 100644
index 13fb914..0000000
--- a/branch-1.2/ambari-web/app/router.js
+++ /dev/null
@@ -1,353 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-App.Router = Em.Router.extend({
-
-  enableLogging: true,
-  isFwdNavigation: true,
-  backBtnForHigherStep: false,
-
-  setNavigationFlow: function (step) {
-    var matches = step.match(/\d+$/);
-    var newStep;
-    if (matches) {
-      newStep = parseInt(matches[0]);
-    }
-    var previousStep = parseInt(this.getInstallerCurrentStep());
-    this.set('isFwdNavigation', newStep >= previousStep);
-  },
-
-
-  clearAllSteps: function() {
-    this.get('installerController').clear();
-    this.get('addHostController').clear();
-    this.get('addServiceController').clear();
-    for (i = 1; i<11; i++) {
-      this.set('wizardStep' + i + 'Controller.hasSubmitted', false);
-      this.set('wizardStep' + i + 'Controller.isDisabled', true);
-    }
-  },
-
-  /**
-   * Temporary fix for getting cluster name
-   * @return {*}
-   */
-
-  getClusterName: function(){
-    return App.router.get('clusterController').get('clusterName');
-  },
-
-
-  /**
-   * Get current step of Installer wizard
-   * @return {*}
-   */
-  getInstallerCurrentStep: function () {
-    return this.getWizardCurrentStep('installer');
-  },
-
-  /**
-   * Get current step for <code>wizardType</code> wizard
-   * @param wizardType one of <code>installer</code>, <code>addHost</code>, <code>addServices</code>
-   */
-  getWizardCurrentStep: function (wizardType) {
-    var loginName = this.getLoginName();
-    var currentStep = App.db.getWizardCurrentStep(wizardType);
-    console.log('getWizardCurrentStep: loginName=' + loginName + ", currentStep=" + currentStep);
-    if (!currentStep) {
-      currentStep = '1';
-    }
-    console.log('returning currentStep=' + currentStep);
-    return currentStep;
-  },
-
-  loggedIn: false,
-
-  loginName: function() {
-    return this.getLoginName();
-  }.property('loggedIn'),
-
-  getAuthenticated: function () {
-    var auth = App.db.getAuthenticated();
-    var authResp = (auth && auth === true);
-    this.set('loggedIn', authResp);
-    return authResp;
-  },
-
-  setAuthenticated: function (authenticated) {
-    console.log("TRACE: Entering router:setAuthenticated function");
-    App.db.setAuthenticated(authenticated);
-    this.set('loggedIn', authenticated);
-  },
-
-  getLoginName: function () {
-    return App.db.getLoginName();
-  },
-
-  setLoginName: function (loginName) {
-    App.db.setLoginName(loginName);
-  },
-
-  /**
-   * Set user model to local storage
-   * @param user
-   */
-  setUser: function (user) {
-    App.db.setUser(user);
-  },
-
-  /**
-   * Get user model from local storage
-   * @return {*}
-   */
-  getUser: function () {
-    return App.db.getUser();
-  },
-
-  resetAuth: function (authenticated) {
-    if (!authenticated) {
-      App.db.cleanUp();
-      this.set('loggedIn', false);
-      this.set('loginController.loginName', '');
-      this.set('loginController.password', '');
-      this.transitionTo('login');
-    }
-    return authenticated;
-  },
-
-  login: function (postLogin) {
-    var controller = this.get('loginController');
-    var loginName = controller.get('loginName').toLowerCase();
-    controller.set('loginName', loginName);
-    var hash = window.btoa(loginName + ":" + controller.get('password'));
-    var router = this;
-    var url = '';
-
-    if (App.testMode) {
-      if (loginName === "admin" && controller.get('password') === 'admin') {
-        url = '/data/users/user_admin.json';
-      } else if (loginName === 'user' && controller.get('password') === 'user') {
-        url = '/data/users/user_user.json';
-      }
-    }
-
-    $.ajax({
-      url : (App.testMode) ? url  : App.apiPrefix + '/users/' + loginName ,
-      dataType : 'json',
-      type: 'GET',
-      beforeSend: function (xhr) {
-        xhr.setRequestHeader("Authorization", "Basic " + hash);
-      },
-      statusCode: {
-        200: function () {
-          console.log("Status code 200: Success.");
-        },
-        401: function () {
-          console.log("Error code 401: Unauthorized.");
-        },
-        403: function () {
-          console.log("Error code 403: Forbidden.");
-        }
-      },
-      success: function (data) {
-        console.log('login success');
-
-        var resp = data;
-        var isAdmin = resp.Users.roles.indexOf('admin') >= 0;
-        if(isAdmin){
-          router.setAuthenticated(true);
-          router.setLoginName(loginName);
-          App.usersMapper.map({"items":[data]});
-          router.setUser(App.User.find(loginName));
-          router.transitionTo(router.getSection());
-          postLogin(true);
-        } else {
-          $.ajax({
-            url:  (App.testMode) ? '/data/clusters/info.json' : App.apiPrefix + '/clusters',
-            dataType: 'text',
-            type: 'GET',
-            success: function (data) {
-              var clusterResp = $.parseJSON(data);
-              if (clusterResp.items.length) {
-                router.setAuthenticated(true);
-                router.setLoginName(loginName);
-                App.usersMapper.map({"items":[resp]});
-                router.setUser(App.User.find(loginName));
-                router.transitionTo(router.getSection());
-                postLogin(true);
-              } else {
-                controller.set('errorMessage', "Your administrator has not set up a Hadoop cluster yet.");
-              }
-            },
-            error: function (req) {
-              console.log("Server not responding: " + req.statusCode);
-            }
-          });
-        }
-      },
-      error: function (req) {
-        console.log("login error: " + req.statusCode);
-        router.setAuthenticated(false);
-        postLogin(false);
-      }
-    });
-
-  },
-
-  setAmbariStacks: function () {
-    var self = this;
-    var method = 'GET';
-    var url = (App.testMode) ? '/data/wizard/stack/stacks.json' : App.apiPrefix + '/stacks';
-    $.ajax({
-      type: method,
-      url: url,
-      async: false,
-      dataType: 'text',
-      timeout: App.timeout,
-      success: function (data) {
-        var jsonData = jQuery.parseJSON(data);
-        console.log("TRACE: In success function for the setAmbariStacks call");
-        console.log("TRACE: value of the url is: " + url);
-        var stacks = [];
-        jsonData.forEach(function (_stack) {
-         stacks.pushObject({
-           name:_stack.name,
-           version: _stack.version
-         });
-        }, this);
-        App.db.setAmbariStacks(stacks);
-        console.log('TRACEIINNGG: ambaristacks: ' + JSON.stringify(App.db.getAmbariStacks()));
-      },
-
-      error: function (request, ajaxOptions, error) {
-        console.log("TRACE: In error function for the setAmbariStacks call");
-        console.log("TRACE: value of the url is: " + url);
-        console.log("TRACE: error code status is: " + request.status);
-        console.log('Error message is: ' + request.responseText);
-      },
-
-      statusCode: require('data/statusCodes')
-    });
-  },
-
-  getSection: function () {
-    if (App.alwaysGoToInstaller) {
-      return 'installer';
-    }
-    App.clusterStatus.updateFromServer();
-    var clusterStatusOnServer = App.clusterStatus.get('value');
-    if (clusterStatusOnServer && (clusterStatusOnServer.clusterState === 'CLUSTER_STARTED_5' || clusterStatusOnServer.clusterState === 'ADD_HOSTS_COMPLETED_5' )) {
-      return 'main.index';
-    } else if (clusterStatusOnServer && clusterStatusOnServer.wizardControllerName === App.router.get('addHostController.name')) {
-      // if wizardControllerName == "addHostController", then it means someone closed the browser or the browser was crashed when we were last in Add Hosts wizard
-      return 'main.hostAdd';
-    } else {
-      // if wizardControllerName == "installerController", then it means someone closed the browser or the browser was crashed when we were last in Installer wizard
-      return 'installer';
-    }
-  },
-
-  logOff: function(context){
-    $('title').text('Ambari');
-    var hash = window.btoa(this.get('loginController.loginName') + ":" + this.get('loginController.password'));
-
-    App.router.get('mainController').stopPolling();
-    // App.db.cleanUp() must be called before router.clearAllSteps().
-    // otherwise, this.set('installerController.currentStep, 0) would have no effect
-    // since it's a computed property but we are not setting it as a dependent of App.db.
-    App.db.cleanUp();
-    this.clearAllSteps();
-    console.log("Log off: " + App.router.getClusterName());
-    this.set('loginController.loginName', '');
-    this.set('loginController.password', '');
-
-    if (!App.testMode) {
-      $.ajax({
-        url: App.apiPrefix + '/logout',
-        dataType: 'json',
-        type: 'GET',
-        beforeSend: function (xhr) {
-          xhr.setRequestHeader("Authorization", "Basic " + hash);
-        },
-        statusCode: {
-          200: function () {
-            console.log("Status code 200: Success.");
-          },
-          401: function () {
-            console.log("Error code 401: Unauthorized.");
-          },
-          403: function () {
-            console.log("Error code 403: Forbidden.");
-          }
-        },
-        success: function (data) {
-          console.log("invoked logout on the server successfully");
-        },
-        error: function (data) {
-          console.log("failed to invoke logout on the server");
-        },
-        complete: function () {
-          console.log('done');
-        }
-      });
-    }
-
-    this.transitionTo('login', context);
-  },
-
-  root: Em.Route.extend({
-    index: Em.Route.extend({
-      route: '/',
-      redirectsTo: 'login'
-    }),
-
-    login: Em.Route.extend({
-      route: '/login',
-
-      /**
-       *  If the user is already logged in, redirect to where the user was previously
-       */
-      enter: function (router, context) {
-        if (router.getAuthenticated()) {
-          Ember.run.next(function () {
-            console.log(router.getLoginName() + ' already authenticated.  Redirecting...');
-            router.transitionTo(router.getSection(), context);
-          });
-        }
-      },
-
-      connectOutlets: function (router, context) {
-        $('title').text('Ambari');
-        console.log('/login:connectOutlet');
-        console.log('currentStep is: ' + router.getInstallerCurrentStep());
-        console.log('authenticated is: ' + router.getAuthenticated());
-        router.get('applicationController').connectOutlet('login', App.LoginView);
-      }
-    }),
-
-    installer: require('routes/installer'),
-
-    main: require('routes/main'),
-
-    logoff: function (router, context) {
-      router.logOff(context);
-    }
-
-  })
-})
diff --git a/branch-1.2/ambari-web/app/routes/add_host_routes.js b/branch-1.2/ambari-web/app/routes/add_host_routes.js
deleted file mode 100644
index b603ac1..0000000
--- a/branch-1.2/ambari-web/app/routes/add_host_routes.js
+++ /dev/null
@@ -1,341 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-module.exports = Em.Route.extend({
-  route: '/hosts/add',
-
-  enter: function (router) {
-    console.log('in /hosts/add:enter');
-
-    Ember.run.next(function () {
-      var addHostController = router.get('addHostController');
-      App.router.get('updateController').set('isWorking', false);
-      App.ModalPopup.show({
-        classNames: ['full-width-modal'],
-        header:Em.I18n.t('hosts.add.header'),
-        bodyClass:  App.AddHostView.extend({
-          controllerBinding: 'App.router.addHostController'
-        }),
-        primary:Em.I18n.t('form.cancel'),
-        secondary: null,
-        showFooter: false,
-
-        onPrimary:function () {
-          this.hide();
-          App.router.get('updateController').set('isWorking', true);
-          router.transitionTo('hosts.index');
-        },
-        onClose: function() {
-          this.hide();
-          App.router.get('updateController').set('isWorking', true);
-          router.transitionTo('hosts.index');
-        },
-        didInsertElement: function(){
-          this.fitHeight();
-        }
-      });
-      App.clusterStatus.updateFromServer();
-      var currentClusterStatus = App.clusterStatus.get('value');
-
-      if (currentClusterStatus) {
-        switch (currentClusterStatus.clusterState) {
-          case 'ADD_HOSTS_DEPLOY_PREP_2' :
-            addHostController.setCurrentStep('4');
-            App.db.data = currentClusterStatus.localdb;
-            break;
-          case 'ADD_HOSTS_INSTALLING_3' :
-          case 'SERVICE_STARTING_3' :
-            addHostController.setCurrentStep('5');
-            App.db.data = currentClusterStatus.localdb;
-            break;
-          case 'ADD_HOSTS_INSTALLED_4' :
-            addHostController.setCurrentStep('6');
-            App.db.data = currentClusterStatus.localdb;
-            break;
-          default:
-            break;
-        }
-      }
-
-      router.transitionTo('step' + addHostController.get('currentStep'));
-    });
-
-  },
-
-  /*connectOutlets: function (router, context) {
-    console.log('in /hosts/add:connectOutlets');
-    router.get('mainController').connectOutlet('addHost');
-  },*/
-
-  step1: Em.Route.extend({
-    route: '/step1',
-    connectOutlets: function (router) {
-      console.log('in addHost.step1:connectOutlets');
-      var controller = router.get('addHostController');
-      controller.setCurrentStep('1');
-      controller.set('hideBackButton', true);
-      controller.dataLoading().done(function () {
-        controller.loadServicesFromServer();
-        controller.loadAllPriorSteps();
-        controller.connectOutlet('wizardStep2', controller.get('content'));
-      })
-    },
-
-    next: function (router) {
-      var controller = router.get('addHostController');
-      controller.save('installOptions');
-      //hosts was saved to content.hosts inside wizardStep2Controller
-      controller.save('hosts');
-      router.transitionTo('step2');
-      App.db.setBootStatus(false);
-    },
-    evaluateStep: function (router) {
-      console.log('in addHost.step1:evaluateStep');
-      var addHostController = router.get('addHostController');
-      var wizardStep2Controller = router.get('wizardStep2Controller');
-
-      wizardStep2Controller.set('hasSubmitted', true);
-
-      if (!wizardStep2Controller.get('isSubmitDisabled')) {
-        wizardStep2Controller.evaluateStep();
-      }
-    }
-  }),
-
-  step2: Em.Route.extend({
-    route: '/step2',
-    connectOutlets: function (router) {
-      console.log('in addHost.step2:connectOutlets');
-      var controller = router.get('addHostController');
-      controller.setCurrentStep('2');
-      controller.dataLoading().done(function () {
-        controller.loadAllPriorSteps();
-        controller.connectOutlet('wizardStep3', controller.get('content'));
-      })
-    },
-    back: function(router, event){
-      //if install not in progress
-      if(!$(event.target).attr('disabled')){
-        router.transitionTo('step1');
-      }
-    },
-    exit: function (router) {
-      router.get('wizardStep3Controller').set('stopBootstrap', true);
-    },
-    next: function (router, context) {
-      var addHostController = router.get('addHostController');
-      var wizardStep3Controller = router.get('wizardStep3Controller');
-      addHostController.saveConfirmedHosts(wizardStep3Controller);
-      addHostController.saveClients();
-
-      App.db.setBootStatus(true);
-      router.transitionTo('step3');
-    },
-    /**
-     * Wrapper for remove host action.
-     * Since saving data stored in addHostController, we should call this from router
-     * @param router
-     * @param context Array of hosts to delete
-     */
-    removeHosts: function (router, context) {
-      console.log('in addHost.step2.removeHosts:hosts to delete ', context);
-      var controller = router.get('addHostController');
-      controller.removeHosts(context);
-    }
-  }),
-
-  step3: Em.Route.extend({
-    route: '/step3',
-    connectOutlets: function (router, context) {
-      console.log('in addHost.step3:connectOutlets');
-      var controller = router.get('addHostController');
-      controller.setCurrentStep('3');
-      controller.dataLoading().done(function () {
-        controller.loadAllPriorSteps();
-        controller.connectOutlet('wizardStep6', controller.get('content'));
-      })
-    },
-    back: Em.Router.transitionTo('step2'),
-    next: function (router) {
-      var addHostController = router.get('addHostController');
-      var wizardStep6Controller = router.get('wizardStep6Controller');
-
-      if (wizardStep6Controller.validate()) {
-        addHostController.saveSlaveComponentHosts(wizardStep6Controller);
-        addHostController.get('content').set('serviceConfigProperties', null);
-        App.db.setServiceConfigProperties(null);
-        addHostController.loadAdvancedConfigs();
-        var wizardStep7Controller = router.get('wizardStep7Controller');
-        wizardStep7Controller.set('content', addHostController.get('content'));
-        wizardStep7Controller.loadStep();
-        addHostController.saveServiceConfigProperties(wizardStep7Controller);
-        router.transitionTo('step4');
-      }
-    }
-  }),
-
-  step4: Em.Route.extend({
-    route: '/step4',
-    connectOutlets: function (router, context) {
-      console.log('in addHost.step4:connectOutlets');
-      var controller = router.get('addHostController');
-      controller.setCurrentStep('4');
-      controller.dataLoading().done(function () {
-        controller.loadAllPriorSteps();
-        controller.connectOutlet('wizardStep8', controller.get('content'));
-      })
-    },
-    back: Em.Router.transitionTo('step3'),
-    next: function (router) {
-      var addHostController = router.get('addHostController');
-      var wizardStep8Controller = router.get('wizardStep8Controller');
-      addHostController.installServices();
-      addHostController.setInfoForStep9();
-
-      // We need to do recovery based on whether we are in Add Host or Installer wizard
-      App.clusterStatus.setClusterStatus({
-        clusterName: this.get('clusterName'),
-        clusterState: 'ADD_HOSTS_INSTALLING_3',
-        wizardControllerName: App.router.get('addHostController.name'),
-        localdb: App.db.data
-      });
-      wizardStep8Controller.set('servicesInstalled', true);
-      router.transitionTo('step5');
-    }
-  }),
-
-  step5: Em.Route.extend({
-    route: '/step5',
-    connectOutlets: function (router, context) {
-      console.log('in addHost.step5:connectOutlets');
-      var controller = router.get('addHostController');
-      controller.setCurrentStep('5');
-      controller.dataLoading().done(function () {
-        controller.loadAllPriorSteps();
-        if (!App.testMode) {              //if test mode is ON don't disable prior steps link.
-          controller.setLowerStepsDisable(5);
-        }
-        controller.connectOutlet('wizardStep9', controller.get('content'));
-      })
-    },
-    back: Em.Router.transitionTo('step4'),
-    retry: function(router,context) {
-      var addHostController = router.get('addHostController');
-      var wizardStep9Controller = router.get('wizardStep9Controller');
-      if (wizardStep9Controller.get('showRetry')) {
-        if (wizardStep9Controller.get('content.cluster.status') === 'INSTALL FAILED') {
-          var isRetry = true;
-          addHostController.installServices(isRetry);
-          addHostController.setInfoForStep9();
-          wizardStep9Controller.resetHostsForRetry();
-          // We need to do recovery based on whether we are in Add Host or Installer wizard
-          App.clusterStatus.setClusterStatus({
-            clusterName: this.get('clusterName'),
-            clusterState: 'ADD_HOSTS_INSTALLING_3',
-            wizardControllerName: App.router.get('addHostController.name'),
-            localdb: App.db.data
-          });
-        }
-        wizardStep9Controller.navigateStep();
-      }
-    },
-    unroutePath: function() {
-      return false;
-    },
-    next: function (router) {
-      var addHostController = router.get('addHostController');
-      var wizardStep9Controller = router.get('wizardStep9Controller');
-      addHostController.saveInstalledHosts(wizardStep9Controller);
-
-      // We need to do recovery based on whether we are in Add Host or Installer wizard
-      App.clusterStatus.setClusterStatus({
-        clusterName: this.get('clusterName'),
-        clusterState: 'ADD_HOSTS_INSTALLED_4',
-        wizardControllerName: App.router.get('addHostController.name'),
-        localdb: App.db.data
-      });
-
-      router.transitionTo('step6');
-    }
-  }),
-
-  step6: Em.Route.extend({
-    route: '/step6',
-    connectOutlets: function (router, context) {
-      console.log('in addHost.step6:connectOutlets');
-      var controller = router.get('addHostController');
-      controller.setCurrentStep('6');
-      controller.dataLoading().done(function () {
-        controller.loadAllPriorSteps();
-        if (!App.testMode) {              //if test mode is ON don't disable prior steps link.
-          controller.setLowerStepsDisable(6);
-        }
-        controller.connectOutlet('wizardStep10', controller.get('content'));
-      })
-    },
-    back: Em.Router.transitionTo('step5'),
-    complete: function (router, context) {
-      if (true) {   // this function will be moved to installerController where it will validate
-        var addHostController = router.get('addHostController');
-        App.router.get('updateController').updateAll();
-        addHostController.finish();
-        $(context.currentTarget).parents("#modal").find(".close").trigger('click');
-
-
-        // We need to do recovery based on whether we are in Add Host or Installer wizard
-        App.clusterStatus.setClusterStatus({
-          clusterName: this.get('clusterName'),
-          clusterState: 'ADD_HOSTS_COMPLETED_5',
-          wizardControllerName: App.router.get('addHostController.name'),
-          localdb: App.db.data
-        });
-
-        router.transitionTo('main.index');
-      } else {
-        console.log('cluster installation failure');
-        //$(context.currentTarget).parents("#modal").find(".close").trigger('click');
-      }
-    }
-  }),
-
-  backToHostsList: function (router, event) {
-    App.router.get('updateController').set('isWorking', true);
-    router.transitionTo('hosts.index');
-  },
-
-  gotoStep1: Em.Router.transitionTo('step1'),
-
-  gotoStep2: Em.Router.transitionTo('step2'),
-
-  gotoStep3: Em.Router.transitionTo('step3'),
-
-  gotoStep4: Em.Router.transitionTo('step4'),
-
-  gotoStep5: Em.Router.transitionTo('step5'),
-
-  gotoStep6: Em.Router.transitionTo('step6'),
-
-  gotoStep7: Em.Router.transitionTo('step7'),
-
-  gotoStep8: Em.Router.transitionTo('step8'),
-
-  gotoStep9: Em.Router.transitionTo('step9'),
-
-  gotoStep10: Em.Router.transitionTo('step10')
-
-});
diff --git a/branch-1.2/ambari-web/app/routes/add_service_routes.js b/branch-1.2/ambari-web/app/routes/add_service_routes.js
deleted file mode 100644
index 1574f0c..0000000
--- a/branch-1.2/ambari-web/app/routes/add_service_routes.js
+++ /dev/null
@@ -1,276 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-module.exports = Em.Route.extend({
-  route: '/services/add',
-
-  enter: function (router) {
-    console.log('in /service/add:enter');
-    if (App.db.getUser().admin) {
-      Em.run.next(function () {
-        var addServiceController = router.get('addServiceController');
-        App.router.get('updateController').set('isWorking', false);
-        App.ModalPopup.show({
-          classNames: ['full-width-modal'],
-          header:Em.I18n.t('services.add.header'),
-          bodyClass:  App.AddServiceView.extend({
-            controllerBinding: 'App.router.addServiceController'
-          }),
-          primary:Em.I18n.t('form.cancel'),
-          showFooter: false,
-          secondary: null,
-
-          onPrimary:function () {
-            this.hide();
-            App.router.get('updateController').set('isWorking', true);
-            App.router.transitionTo('main.services');
-          },
-          onClose: function() {
-            this.hide();
-            App.router.get('updateController').set('isWorking', true);
-            App.router.transitionTo('main.services')
-          },
-          didInsertElement: function(){
-            this.fitHeight();
-          }
-        });
-        router.transitionTo('step' + addServiceController.get('currentStep'));
-      });
-    } else {
-      Em.run.next(function () {
-        App.router.transitionTo('main.services');
-      });
-    }
-
-  },
-
-  /*connectOutlets: function (router) {
-    console.log('in /service/add:connectOutlets');
-    router.get('mainController').connectOutlet('addService');
-  },*/
-
-  step1: Em.Route.extend({
-    route: '/step1',
-    connectOutlets: function (router) {
-      console.log('in addService.step1:connectOutlets');
-      var controller = router.get('addServiceController');
-      controller.setCurrentStep('1');
-      controller.set('hideBackButton', true);
-      controller.dataLoading().done(function () {
-        controller.loadServicesFromServer();
-        controller.loadAllPriorSteps();
-        controller.connectOutlet('wizardStep4', controller.get('content.services'));
-      })
-    },
-    next: function (router) {
-      var addServiceController = router.get('addServiceController');
-      var wizardStep4Controller = router.get('wizardStep4Controller');
-      addServiceController.saveServices(wizardStep4Controller);
-      addServiceController.saveClients(wizardStep4Controller);
-      App.db.setMasterComponentHosts(undefined);
-      router.transitionTo('step2');
-    }
-  }),
-
-  step2: Em.Route.extend({
-    route: '/step2',
-    connectOutlets: function (router) {
-      console.log('in addService.step2:connectOutlets');
-      var controller = router.get('addServiceController');
-      controller.setCurrentStep('2');
-      controller.set('hideBackButton', false);
-      controller.dataLoading().done(function () {
-        controller.loadAllPriorSteps();
-        controller.connectOutlet('wizardStep5', controller.get('content'));
-      })
-
-    },
-    back: Em.Router.transitionTo('step1'),
-    next: function (router) {
-      var addServiceController = router.get('addServiceController');
-      var wizardStep5Controller = router.get('wizardStep5Controller');
-      addServiceController.saveMasterComponentHosts(wizardStep5Controller);
-      App.db.setSlaveComponentHosts(undefined);
-      router.transitionTo('step3');
-    }
-  }),
-
-  step3: Em.Route.extend({
-    route: '/step3',
-    connectOutlets: function (router) {
-      console.log('in addService.step3:connectOutlets');
-      var controller = router.get('addServiceController');
-      controller.setCurrentStep('3');
-      controller.dataLoading().done(function () {
-        controller.loadAllPriorSteps();
-        controller.connectOutlet('wizardStep6', controller.get('content'));
-      })
-    },
-    back: function(router){
-      var controller = router.get('addServiceController');
-      if(!controller.get('content.missMasterStep')){
-        router.transitionTo('step2');
-      } else {
-        router.transitionTo('step1');
-      }
-    },
-    next: function (router) {
-      var addServiceController = router.get('addServiceController');
-      var wizardStep6Controller = router.get('wizardStep6Controller');
-
-      if (wizardStep6Controller.validate()) {
-        addServiceController.saveSlaveComponentHosts(wizardStep6Controller);
-        addServiceController.get('content').set('serviceConfigProperties', null);
-        App.db.setServiceConfigProperties(null);
-        App.db.setSlaveProperties(null);
-        //addServiceController.loadSlaveGroupProperties();
-        addServiceController.loadAdvancedConfigs();
-        router.transitionTo('step4');
-      }
-    }
-  }),
-
-  step4: Em.Route.extend({
-    route: '/step4',
-    connectOutlets: function (router) {
-      console.log('in addService.step4:connectOutlets');
-      var controller = router.get('addServiceController');
-      controller.setCurrentStep('4');
-      controller.dataLoading().done(function () {
-        controller.loadAllPriorSteps();
-        controller.connectOutlet('wizardStep7', controller.get('content'));
-      })
-    },
-    back: function(router){
-      var controller = router.get('addServiceController');
-      if(!controller.get('content.missSlavesStep')){
-        router.transitionTo('step3');
-      } else if(!controller.get('content.missMasterStep')) {
-        router.transitionTo('step2');
-      } else {
-        router.transitionTo('step1');
-      }
-    },
-    next: function (router) {
-      var addServiceController = router.get('addServiceController');
-      var wizardStep7Controller = router.get('wizardStep7Controller');
-      addServiceController.saveServiceConfigProperties(wizardStep7Controller);
-      router.transitionTo('step5');
-    }
-  }),
-
-  step5: Em.Route.extend({
-    route: '/step5',
-    connectOutlets: function (router, context) {
-      console.log('in addService.step5:connectOutlets');
-      var controller = router.get('addServiceController');
-      controller.setCurrentStep('5');
-      controller.dataLoading().done(function () {
-        controller.loadAllPriorSteps();
-        controller.connectOutlet('wizardStep8', controller.get('content'));
-      })
-    },
-    back: Em.Router.transitionTo('step4'),
-    next: function (router) {
-      var addServiceController = router.get('addServiceController');
-      var wizardStep8Controller = router.get('wizardStep8Controller');
-      addServiceController.installServices();
-      addServiceController.setInfoForStep9();
-      router.transitionTo('step6');
-    }
-  }),
-
-  step6: Em.Route.extend({
-    route: '/step6',
-    connectOutlets: function (router, context) {
-      console.log('in addService.step6:connectOutlets');
-      var controller = router.get('addServiceController');
-      controller.setCurrentStep('6');
-      controller.dataLoading().done(function () {
-        controller.loadAllPriorSteps();
-        if (!App.testMode) {              //if test mode is ON don't disable prior steps link.
-          controller.setLowerStepsDisable(6);
-        }
-        controller.connectOutlet('wizardStep9', controller.get('content'));
-      })
-    },
-    back: Em.Router.transitionTo('step5'),
-    retry: function(router,context) {
-      var addServiceController = router.get('addServiceController');
-      var wizardStep9Controller = router.get('wizardStep9Controller');
-      if (!wizardStep9Controller.get('isSubmitDisabled')) {
-        if (wizardStep9Controller.get('content.cluster.status') !== 'START FAILED') {
-        addServiceController.installServices(true);
-        addServiceController.setInfoForStep9();
-        } else {
-          wizardStep9Controller.set('content.cluster.isCompleted', false);
-        }
-        wizardStep9Controller.navigateStep();
-      }
-    },
-    unroutePath: function() {
-      return false;
-    },
-    next: function (router) {
-      var addServiceController = router.get('addServiceController');
-      var wizardStep9Controller = router.get('wizardStep9Controller');
-      addServiceController.saveInstalledHosts(wizardStep9Controller);
-      router.transitionTo('step7');
-    }
-  }),
-
-  step7: Em.Route.extend({
-    route: '/step7',
-    connectOutlets: function (router, context) {
-      console.log('in addService.step7:connectOutlets');
-      var controller = router.get('addServiceController');
-      controller.setCurrentStep('7');
-      controller.dataLoading().done(function () {
-        controller.loadAllPriorSteps();
-        controller.connectOutlet('wizardStep10', controller.get('content'));
-      })
-    },
-    back: Em.Router.transitionTo('step6'),
-    complete: function (router, context) {
-      if (true) {   // this function will be moved to installerController where it will validate
-        router.get('addServiceController').finish();
-        $(context.currentTarget).parents("#modal").find(".close").trigger('click');
-      }
-    }
-  }),
-
-  gotoStep1: Em.Router.transitionTo('step1'),
-
-  gotoStep2: Em.Router.transitionTo('step2'),
-
-  gotoStep3: Em.Router.transitionTo('step3'),
-
-  gotoStep4: Em.Router.transitionTo('step4'),
-
-  gotoStep5: Em.Router.transitionTo('step5'),
-
-  gotoStep6: Em.Router.transitionTo('step6'),
-
-  gotoStep7: Em.Router.transitionTo('step7'),
-
-  backToServices: function (router) {
-    App.router.get('updateController').set('isWorking', true);
-    router.transitionTo('services');
-  }
-
-});
diff --git a/branch-1.2/ambari-web/app/routes/installer.js b/branch-1.2/ambari-web/app/routes/installer.js
deleted file mode 100644
index 99acb32..0000000
--- a/branch-1.2/ambari-web/app/routes/installer.js
+++ /dev/null
@@ -1,406 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-module.exports = Em.Route.extend({
-  route: '/installer',
-  App: require('app'),
-
-  enter: function (router) {
-    console.log('in /installer:enter');
-
-    App.clusterStatus.set('wizardControllerName',App.router.get('installerController.name'));
-
-    if (router.getAuthenticated()) {
-      var name = 'Cluster Install Wizard';
-      $('title').text('Ambari - ' + name);
-
-      if (App.db.getUser().admin) {
-        router.get('mainController').stopPolling();
-        console.log('In installer with successful authenticated');
-        console.log('current step=' + router.get('installerController.currentStep'));
-        Ember.run.next(function () {
-          var installerController = router.get('installerController');
-
-            App.clusterStatus.updateFromServer();
-            var currentClusterStatus = App.clusterStatus.get('value');
-
-            if (currentClusterStatus) {
-              switch (currentClusterStatus.clusterState) {
-                case 'CLUSTER_DEPLOY_PREP_2' :
-                  installerController.setCurrentStep('8');
-                  App.db.data = currentClusterStatus.localdb;
-                  break;
-                case 'CLUSTER_INSTALLING_3' :
-                case 'SERVICE_STARTING_3' :
-                  if(!installerController.get('isStep9')){
-                    installerController.setCurrentStep('9');
-                  }
-                  App.db.data = currentClusterStatus.localdb;
-                  break;
-                case 'CLUSTER_INSTALLED_4' :
-                  if(!installerController.get('isStep10')){
-                    installerController.setCurrentStep('10');
-                  }
-                  App.db.data = currentClusterStatus.localdb;
-                  break;
-                case 'CLUSTER_STARTED_5' :
-                  router.transitionTo('main.index');
-                  break;
-                default:
-                  break;
-              }
-            }
-          router.transitionTo('step' + installerController.get('currentStep'));
-        });
-      } else {
-        Em.run.next(function () {
-          App.router.transitionTo('main.services');
-        });
-      }
-    } else {
-      console.log('In installer but its not authenticated');
-      console.log('value of authenticated is: ' + router.getAuthenticated());
-      Ember.run.next(function () {
-        router.transitionTo('login');
-      });
-    }
-  },
-
-  routePath: function (router, event) {
-    console.log("INFO: value of router is: " + router);
-    console.log("INFO: value of event is: " + event);
-    router.setNavigationFlow(event);
-    if (!router.isFwdNavigation) {
-      this._super(router, event);
-    } else {
-      router.set('backBtnForHigherStep', true);
-
-      var installerController = router.get('installerController');
-      router.transitionTo('step' + installerController.get('currentStep'));
-    }
-  },
-
-  connectOutlets: function (router, context) {
-    console.log('in /installer:connectOutlets');
-    router.get('applicationController').connectOutlet('installer');
-  },
-
-  step1: Em.Route.extend({
-    route: '/step1',
-    connectOutlets: function (router) {
-      console.log('in installer.step1:connectOutlets');
-      var controller = router.get('installerController');
-      controller.setCurrentStep('1');
-      controller.loadAllPriorSteps();
-      controller.connectOutlet('wizardStep1', controller.get('content'));
-    },
-
-    next: function (router) {
-      var installerController = router.get('installerController');
-      installerController.save('cluster');
-      installerController.clearInstallOptions();
-      router.transitionTo('step2');
-    }
-  }),
-
-  step2: Em.Route.extend({
-    route: '/step2',
-    connectOutlets: function (router, context) {
-      router.setNavigationFlow('step2');
-
-      var controller = router.get('installerController');
-      controller.setCurrentStep('2');
-      controller.loadAllPriorSteps();
-      controller.connectOutlet('wizardStep2', controller.get('content'));
-    },
-    back: Em.Router.transitionTo('step1'),
-    next: function (router) {
-      var controller = router.get('installerController');
-      controller.save('installOptions');
-      //hosts was saved to content.hosts inside wizardStep2Controller
-      controller.save('hosts');
-      router.transitionTo('step3');
-    }
-  }),
-
-  step3: Em.Route.extend({
-    route: '/step3',
-    connectOutlets: function (router) {
-      console.log('in installer.step3:connectOutlets');
-      var controller = router.get('installerController');
-      controller.setCurrentStep('3');
-      controller.loadAllPriorSteps();
-      controller.connectOutlet('wizardStep3', controller.get('content'));
-    },
-    back: function(router, event){
-      //if install not in progress
-      if(!$(event.target).attr('disabled')){
-        router.transitionTo('step2');
-      }
-    },
-    next: function (router, context) {
-      var installerController = router.get('installerController');
-      var wizardStep3Controller = router.get('wizardStep3Controller');
-      installerController.saveConfirmedHosts(wizardStep3Controller);
-      App.db.setBootStatus(true);
-      installerController.loadServicesFromServer();
-      router.transitionTo('step4');
-    },
-    exit: function (router) {
-      router.get('wizardStep3Controller').set('stopBootstrap', true);
-    },
-    /**
-     * Wrapper for remove host action.
-     * Since saving data stored in installerController, we should call this from router
-     * @param router
-     * @param context Array of hosts to delete
-     */
-    removeHosts: function (router, context) {
-      console.log('in installer.step2.removeHosts:hosts to delete ', context);
-      var controller = router.get('installerController');
-      controller.removeHosts(context);
-    }
-  }),
-
-  step4: Em.Route.extend({
-    route: '/step4',
-    connectOutlets: function (router, context) {
-      router.setNavigationFlow('step4');
-      var controller = router.get('installerController');
-      controller.setCurrentStep('4');
-      controller.loadAllPriorSteps();
-      controller.connectOutlet('wizardStep4', controller.get('content.services'));
-    },
-    back: Em.Router.transitionTo('step3'),
-
-    next: function (router) {
-      var controller = router.get('installerController');
-      var wizardStep4Controller = router.get('wizardStep4Controller');
-      controller.saveServices(wizardStep4Controller);
-      controller.saveClients(wizardStep4Controller);
-
-      App.db.setMasterComponentHosts(undefined);
-      router.transitionTo('step5');
-    }
-  }),
-
-  step5: Em.Route.extend({
-    route: '/step5',
-    connectOutlets: function (router, context) {
-      router.setNavigationFlow('step5');
-
-      var controller = router.get('installerController');
-      var wizardStep5Controller = router.get('wizardStep5Controller');
-      controller.setCurrentStep('5');
-      controller.loadAllPriorSteps();
-      controller.connectOutlet('wizardStep5', controller.get('content'));
-    },
-    back: Em.Router.transitionTo('step4'),
-    next: function (router) {
-      var controller = router.get('installerController');
-      var wizardStep5Controller = router.get('wizardStep5Controller');
-      controller.saveMasterComponentHosts(wizardStep5Controller);
-      App.db.setSlaveComponentHosts(undefined);
-      router.transitionTo('step6');
-    }
-  }),
-
-  step6: Em.Route.extend({
-    route: '/step6',
-    connectOutlets: function (router, context) {
-      router.setNavigationFlow('step6');
-
-      var controller = router.get('installerController');
-      controller.setCurrentStep('6');
-      controller.loadAllPriorSteps();
-      controller.connectOutlet('wizardStep6', controller.get('content'));
-    },
-    back: Em.Router.transitionTo('step5'),
-
-    next: function (router) {
-      var controller = router.get('installerController');
-      var wizardStep6Controller = router.get('wizardStep6Controller');
-
-      if (wizardStep6Controller.validate()) {
-        controller.saveSlaveComponentHosts(wizardStep6Controller);
-        controller.get('content').set('serviceConfigProperties', null);
-        App.db.setServiceConfigProperties(null);
-        App.db.setSlaveProperties(null);
-        controller.loadSlaveGroupProperties();
-        controller.loadAdvancedConfigs();
-        router.transitionTo('step7');
-      }
-    }
-  }),
-
-  step7: Em.Route.extend({
-    route: '/step7',
-    enter: function (router) {
-      console.log('in /wizardStep7Controller:enter');
-      var controller = router.get('installerController');
-      controller.setCurrentStep('7');
-      controller.loadAllPriorSteps();
-    },
-    connectOutlets: function (router, context) {
-      var controller = router.get('installerController');
-      controller.connectOutlet('wizardStep7', controller.get('content'));
-    },
-    back: Em.Router.transitionTo('step6'),
-    next: function (router) {
-      var installerController = router.get('installerController');
-      var wizardStep7Controller = router.get('wizardStep7Controller');
-      installerController.saveServiceConfigProperties(wizardStep7Controller);
-      router.transitionTo('step8');
-    }
-  }),
-
-  step8: Em.Route.extend({
-    route: '/step8',
-    connectOutlets: function (router, context) {
-      console.log('in installer.step8:connectOutlets');
-      var controller = router.get('installerController');
-      controller.setCurrentStep('8');
-      controller.loadAllPriorSteps();
-      controller.connectOutlet('wizardStep8', controller.get('content'));
-    },
-    back: Em.Router.transitionTo('step7'),
-    next: function (router) {
-      var installerController = router.get('installerController');
-      var wizardStep8Controller = router.get('wizardStep8Controller');
-      // invoke API call to install selected services
-      installerController.installServices();
-      installerController.setInfoForStep9();
-      // We need to do recovery based on whether we are in Add Host or Installer wizard
-      App.clusterStatus.setClusterStatus({
-        clusterName: this.get('clusterName'),
-        clusterState: 'CLUSTER_INSTALLING_3',
-        wizardControllerName: App.router.get('installerController.name'),
-        localdb: App.db.data
-      });
-      wizardStep8Controller.set('servicesInstalled', true);
-      router.transitionTo('step9');
-    }
-  }),
-
-  step9: Em.Route.extend({
-    route: '/step9',
-    connectOutlets: function (router, context) {
-      console.log('in installer.step9:connectOutlets');
-      var controller = router.get('installerController');
-      controller.setCurrentStep('9');
-      controller.loadAllPriorSteps();
-      if (!App.testMode) {
-        controller.setLowerStepsDisable(9);
-      }
-      controller.connectOutlet('wizardStep9', controller.get('content'));
-    },
-    back: Em.Router.transitionTo('step8'),
-    retry: function (router) {
-      var installerController = router.get('installerController');
-      var wizardStep9Controller = router.get('wizardStep9Controller');
-      if (wizardStep9Controller.get('showRetry')) {
-        if (wizardStep9Controller.get('content.cluster.status') === 'INSTALL FAILED') {
-          var isRetry = true;
-          installerController.installServices(isRetry);
-          installerController.setInfoForStep9();
-          wizardStep9Controller.resetHostsForRetry();
-          // We need to do recovery based on whether we are in Add Host or Installer wizard
-          App.clusterStatus.setClusterStatus({
-            clusterName: this.get('clusterName'),
-            clusterState: 'CLUSTER_INSTALLING_3',
-            wizardControllerName: App.router.get('installerController.name'),
-            localdb: App.db.data
-          });
-        }
-        wizardStep9Controller.navigateStep();
-      }
-    },
-    unroutePath: function () {
-      return false;
-    },
-    next: function (router) {
-      var installerController = router.get('installerController');
-      var wizardStep9Controller = router.get('wizardStep9Controller');
-      installerController.saveInstalledHosts(wizardStep9Controller);
-
-      App.clusterStatus.setClusterStatus({
-        clusterName: this.get('clusterName'),
-        clusterState: 'CLUSTER_INSTALLED_4',
-        wizardControllerName: App.router.get('installerController.name'),
-        localdb: App.db.data
-      });
-      router.transitionTo('step10');
-    }
-  }),
-
-  step10: Em.Route.extend({
-    route: '/step10',
-    connectOutlets: function (router, context) {
-      console.log('in installer.step10:connectOutlets');
-      var controller = router.get('installerController');
-      controller.setCurrentStep('10');
-      controller.loadAllPriorSteps();
-      if (!App.testMode) {
-        controller.setLowerStepsDisable(10);
-      }
-      controller.connectOutlet('wizardStep10', controller.get('content'));
-    },
-    back: Em.Router.transitionTo('step9'),
-    complete: function (router, context) {
-      if (true) {   // this function will be moved to installerController where it will validate
-        var controller = router.get('installerController');
-        controller.finish();
-
-        // We need to do recovery based on whether we are in Add Host or Installer wizard
-        App.clusterStatus.setClusterStatus({
-          clusterName: this.get('clusterName'),
-          clusterState: 'CLUSTER_STARTED_5',
-          wizardControllerName: App.router.get('installerController.name'),
-          localdb: App.db.data
-        });
-
-        router.transitionTo('main.index');
-      } else {
-        console.log('cluster installation failure');
-      }
-    }
-  }),
-
-  gotoStep1: Em.Router.transitionTo('step1'),
-
-  gotoStep2: Em.Router.transitionTo('step2'),
-
-  gotoStep3: Em.Router.transitionTo('step3'),
-
-  gotoStep4: Em.Router.transitionTo('step4'),
-
-  gotoStep5: Em.Router.transitionTo('step5'),
-
-  gotoStep6: Em.Router.transitionTo('step6'),
-
-  gotoStep7: Em.Router.transitionTo('step7'),
-
-  gotoStep8: Em.Router.transitionTo('step8'),
-
-  gotoStep9: Em.Router.transitionTo('step9'),
-
-  gotoStep10: Em.Router.transitionTo('step10')
-
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/routes/main.js b/branch-1.2/ambari-web/app/routes/main.js
deleted file mode 100644
index a717bf8..0000000
--- a/branch-1.2/ambari-web/app/routes/main.js
+++ /dev/null
@@ -1,356 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-module.exports = Em.Route.extend({
-  route:'/main',
-  enter:function (router) {
-    console.log('in /main:enter');
-    if (router.getAuthenticated()) {
-      App.router.get('clusterController').loadClusterName(false);
-      router.get('mainController').initialize();
-      // TODO: redirect to last known state
-    } else {
-      Ember.run.next(function () {
-        router.transitionTo('login');
-      });
-    }
-  },
-
-  index:Ember.Route.extend({
-    route:'/',
-    redirectsTo:'dashboard'
-  }),
-
-  test:Em.Route.extend({
-    route:'/test',
-    connectOutlets:function (router, context) {
-      router.get('mainController').connectOutlet('mainTest');
-    }
-  }),
-
-  connectOutlets:function (router, context) {
-    router.get('applicationController').connectOutlet('main');
-  },
-
-  charts:Em.Route.extend({
-    route:'/charts',
-    connectOutlets:function (router, context) {
-      router.get('mainController').connectOutlet('mainCharts');
-    },
-    enter:function (router) {
-      Em.run.next(function () {
-        router.transitionTo('heatmap');
-      });
-    },
-    index:Ember.Route.extend({
-      route:'/',
-      redirectsTo:'heatmap'
-    }),
-    heatmap:Em.Route.extend({
-      route:'/heatmap',
-      connectOutlets:function (router, context) {
-        router.get('mainChartsController').connectOutlet('mainChartsHeatmap');
-      }
-    }),
-    horizon_chart:Em.Route.extend({
-      route:'/horizon_chart',
-      connectOutlets:function (router, context) {
-        router.get('mainChartsController').connectOutlet('mainChartsHorizon');
-      }
-    }),
-    showChart:function (router, event) {
-      var parent = event.view._parentView;
-      parent.deactivateChildViews();
-      event.view.set('active', "active");
-      router.transitionTo(event.context);
-    }
-  }),
-  apps:Em.Route.extend({
-    route:'/apps',
-    connectOutlets:function (router) {
-      //router.get('clusterController').loadRuns();
-      router.get('mainAppsController').loadRuns();
-      router.get('mainController').connectOutlet('mainApps');
-    }
-  }),
-
-  hosts:Em.Route.extend({
-    route:'/hosts',
-    index:Ember.Route.extend({
-      route:'/',
-      connectOutlets:function (router, context) {
-        router.get('mainController').connectOutlet('mainHost');
-      }
-    }),
-
-    hostDetails:Em.Route.extend({
-      route:'/:host_id',
-      connectOutlets:function (router, host) {
-        router.get('mainController').connectOutlet('mainHostDetails', host);
-      },
-
-      index:Ember.Route.extend({
-        route:'/',
-        redirectsTo:'summary'
-      }),
-
-      summary:Em.Route.extend({
-        route:'/summary',
-        connectOutlets:function (router, context) {
-          router.get('mainHostDetailsController').connectOutlet('mainHostSummary');
-        }
-      }),
-
-      metrics:Em.Route.extend({
-        route:'/metrics',
-        connectOutlets:function (router, context) {
-          router.get('mainHostDetailsController').connectOutlet('mainHostMetrics');
-        }
-      }),
-
-      audit:Em.Route.extend({
-        route:'/audit',
-        connectOutlets:function (router, context) {
-          router.get('mainHostDetailsController').connectOutlet('mainHostAudit');
-        }
-      }),
-
-      hostNavigate:function (router, event) {
-        var parent = event.view._parentView;
-        parent.deactivateChildViews();
-        event.view.set('active', "active");
-        router.transitionTo(event.context);
-      }
-    }),
-
-    backToHostsList:function (router, event) {
-      router.transitionTo('hosts.index');
-    },
-
-    showDetails:function (router, event) {
-      router.get('mainHostDetailsController').setBack(true);
-      router.transitionTo('hostDetails.summary', event.context)
-    },
-
-    addHost:function (router) {
-      if(App.clusterStatus){
-        App.clusterStatus.updateFromServer();
-        var currentClusterStatus = App.clusterStatus.get('value');
-        if(currentClusterStatus && currentClusterStatus.clusterState=="ADD_HOSTS_COMPLETED_5"){
-          // The last time add hosts ran, it left the status
-          // in this state. We need to clear any previous status
-          // so that the hosts page starts from fresh.
-          currentClusterStatus.clusterState = 'CLUSTER_STARTED_5';
-        }
-      }
-      router.transitionTo('hostAdd');
-    }
-
-  }),
-
-  hostAdd:require('routes/add_host_routes'),
-
-  admin:Em.Route.extend({
-    route:'/admin',
-    enter: function(){
-      if(!App.db.getUser().admin){
-        Em.run.next(function () {
-          App.router.transitionTo('main.dashboard');
-        });
-      }
-    },
-    connectOutlets:function (router, context) {
-      router.get('mainController').connectOutlet('mainAdmin');
-    },
-
-    index:Ember.Route.extend({
-      route:'/',
-      redirectsTo:'adminUser'
-    }),
-
-    adminUser:Em.Route.extend({
-      route:'/user',
-      enter:function (router) {
-        router.set('mainAdminController.category', "user");
-        Em.run.next(function () {
-          router.transitionTo('allUsers');
-        });
-      },
-
-      // events
-      gotoUsers:Em.Router.transitionTo("allUsers"),
-      gotoCreateUser:Em.Router.transitionTo("createUser"),
-      gotoEditUser:function (router, event) {
-        router.transitionTo("editUser", event.context)
-      },
-
-      // states
-      allUsers:Em.Route.extend({
-        route:'/',
-        connectOutlets:function (router) {
-          router.get('mainAdminController').connectOutlet('mainAdminUser');
-        }
-      }),
-
-      createUser:Em.Route.extend({
-        route:'/create',
-        connectOutlets:function (router) {
-          router.get('mainAdminController').connectOutlet('mainAdminUserCreate', {});
-        }
-      }),
-
-      editUser:Em.Route.extend({
-        route:'/edit/:user_id',
-        connectOutlets:function (router, user) {
-          router.get('mainAdminController').connectOutlet('mainAdminUserEdit', user);
-        }
-      })
-    }),
-
-    adminAuthentication:Em.Route.extend({
-      route:'/authentication',
-      connectOutlets:function (router) {
-        router.set('mainAdminController.category', "authentication");
-        router.get('mainAdminController').connectOutlet('mainAdminAuthentication');
-      }
-    }),
-
-    adminSecurity:Em.Route.extend({
-      route:'/security',
-      connectOutlets:function (router) {
-        router.set('mainAdminController.category', "security");
-        router.get('mainAdminController').connectOutlet('mainAdminSecurity');
-      }
-    }),
-
-    adminAdvanced:Em.Route.extend({
-      route:'/advanced',
-      connectOutlets:function (router) {
-        router.set('mainAdminController.category', "advanced");
-        router.get('mainAdminController').connectOutlet('mainAdminAdvanced');
-      }
-    }),
-
-    adminAudit:Em.Route.extend({
-      route:'/audit',
-      connectOutlets:function (router) {
-        router.set('mainAdminController.category', "audit");
-        router.get('mainAdminController').connectOutlet('mainAdminAudit');
-      }
-    }),
-
-    adminNavigate:function (router, object) {
-      Em.run.next(function () {
-        router.transitionTo('admin' + object.context.capitalize());
-      });
-    }
-  }),
-
-  dashboard:Em.Route.extend({
-    route:'/dashboard',
-    connectOutlets:function (router, context) {
-      router.get('mainController').connectOutlet('mainDashboard');
-    },
-    showDetails:function (router, event) {
-      router.get('mainHostDetailsController').setBack(true);
-      router.transitionTo('hosts.hostDetails.summary', event.context);
-    }
-  }),
-
-  services:Em.Route.extend({
-    route:'/services',
-    index:Ember.Route.extend({
-      route:'/'
-    }),
-    enter:function (router) {
-      Ember.run.next(function () {
-        var service = router.get('mainServiceItemController.content');
-        if (!service) {
-          service = App.Service.find().objectAt(0); // getting the first service to display
-        }
-        router.transitionTo('service.summary', service);
-      });
-    },
-    connectOutlets:function (router, context) {
-      router.get('mainController').connectOutlet('mainService');
-    },
-    service:Em.Route.extend({
-      route:'/:service_id',
-      connectOutlets:function (router, service) {
-        router.get('mainServiceController').connectOutlet('mainServiceItem', service);
-        router.transitionTo('summary');
-      },
-      index:Ember.Route.extend({
-        route:'/'
-      }),
-      summary:Em.Route.extend({
-        route:'/summary',
-        connectOutlets:function (router, context) {
-          var item = router.get('mainServiceItemController.content');
-          var viewName = 'mainServiceInfoSummary';
-          router.get('mainServiceItemController').connectOutlet('mainServiceInfoSummary', item);
-        }
-      }),
-      metrics:Em.Route.extend({
-        route:'/metrics',
-        connectOutlets:function (router, context) {
-          var item = router.get('mainServiceItemController.content');
-          router.get('mainServiceItemController').connectOutlet('mainServiceInfoMetrics', item);
-        }
-      }),
-      configs:Em.Route.extend({
-        route:'/configs',
-        connectOutlets:function (router, context) {
-          var item = router.get('mainServiceItemController.content');
-          router.get('mainServiceItemController').connectOutlet('mainServiceInfoConfigs', item);
-        }
-      }),
-      audit:Em.Route.extend({
-        route:'/audit',
-        connectOutlets:function (router, context) {
-          var item = router.get('mainServiceItemController.content');
-          router.get('mainServiceItemController').connectOutlet('mainServiceInfoAudit', item);
-        }
-      }),
-      showInfo:function (router, event) {
-        var parent = event.view._parentView;
-        parent.deactivateChildViews();
-        event.view.set('active', "active");
-        router.transitionTo(event.context);
-      },
-      showDetails:function (router, event) {
-        router.get('mainHostDetailsController').setBack(true);
-        router.transitionTo('hosts.hostDetails.summary', event.context);
-      }
-    }),
-    showService:Em.Router.transitionTo('service'),
-    addService:Em.Router.transitionTo('serviceAdd')
-  }),
-
-  serviceAdd:require('routes/add_service_routes'),
-
-  selectService:Em.Route.transitionTo('services.service'),
-  selectHost:function (router, event) {
-    router.get('mainHostDetailsController').setBack(false);
-    router.transitionTo('hosts.hostDetails.index', event.context);
-  },
-  filterHosts:function (router, component) {
-    router.get('mainHostController').filterByComponent(component.context);
-    router.transitionTo('hosts.index');
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/styles/application.less b/branch-1.2/ambari-web/app/styles/application.less
deleted file mode 100644
index db5ad2d..0000000
--- a/branch-1.2/ambari-web/app/styles/application.less
+++ /dev/null
@@ -1,2799 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-.gradient(@color: #FAFAFA, @start: #FFFFFF, @stop: #F2F2F2) {
-  background: @color;
-  background: -webkit-gradient(linear, left top, left bottom, color-stop(0, @start), color-stop(1, @stop));
-  background: -ms-linear-gradient(top, @start, @stop);
-  background: -moz-linear-gradient(center top, @start 0%, @stop 100%);
-}
-
-html {
-  overflow-y: scroll;
-}
-html, body {
-  height: 100%;
-}
-
-#wrapper {
-  min-height: 100%;
-}
-
-@footer-height: 100px;
-
-#main {
-  overflow: visible;
-  padding-bottom: @footer-height;
-  min-width: 980px;
-}
-
-footer {
-  position: relative;
-  border-top: 1px solid #e5e5e5;
-  padding: 30px 0;
-  background-color: #fff;
-  color: #666666;
-  margin-top: -1 * @footer-height - 1;
-  height: @footer-height - 60px;
-}
-
-#content {
-  padding: 20px 0;
-}
-
-#top-nav {
-  .navbar {
-    font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
-  }
-
-  .navbar {
-    .logo {
-      float: left;
-      padding-top: 2px;
-      img{
-        height: 32px;
-      }
-    }
-
-    .brand {
-      color: #666666;
-      font-size: 16px;
-      font-weight: normal;
-      line-height: 32px;
-      margin-left: 0;
-      padding: 2px 5px 0 10px;
-    }
-
-    .brand.cluster-name {
-      margin-left: 10px;
-
-      .operations-count {
-        background-color: #006DCC;
-      }
-    }
-
-
-  }
-
-  .navbar .nav {
-    float: none;
-    overflow: hidden;
-  }
-
-  .navbar-inner{
-    min-height: 40px;
-  }
-
-  .navbar .nav .active > a, .navbar .nav .active > a:hover {
-    color: #FFFFFF;
-    text-decoration: none;
-  }
-
-  .navbar .nav > li > a {
-    border-radius: 8px;
-    -webkit-border-radius: 8px;
-    -moz-border-radius: 8px;
-    font-size: 13px;
-    font-weight: bold;
-    line-height: 19px;
-    margin: 1px 10px 2px 0;
-    padding: 7px 14px;
-    text-decoration: none;
-    text-shadow: none;
-  }
-
-  .navbar .nav > li > a:hover {
-    background-color: transparent;
-    color: #999999;
-    text-decoration: none;
-  }
-
-  .navbar .nav > li.right {
-    float: right;
-  }
-}
-
-#main-nav {
-
-  li {
-    font-size: 15px;
-    border-left: 1px solid #fefefe;
-    border-right: 1px solid #f0f0f0;
-    text-align: center;
-  }
-  li.span2 {
-    padding: 0;
-    margin: 0;
-    width: 140px;
-  }
-
-  li:first-child{
-    border-left: none;
-    a{
-      border-radius: 4px 0 0 4px;
-    }
-  }
-
-  .navbar-inner{
-    padding-left: 0;
-  }
-
-  margin-bottom: 20px;
-}
-
-.pre-scrollable {
-  overflow-y: auto;
-}
-
-@green: #69BE28;
-@blue: #0572ff;
-
-h1 {
-  color: @green;
-}
-
-.login.well.span4 {
-  margin: 20px auto;
-  padding: 25px;
-  float: none;
-}
-
-.login {
-  h2 {
-    margin-top: 0;
-    margin-bottom: 20px;
-  }
-  .btn {
-    margin-top: 15px;
-  }
-}
-
-.hide {
-  visibility: hidden;
-}
-
-.show {
-  visibility: visible;
-}
-
-/***************
- * Ambari wide icon colors
- ***************/
-.icon-ok {
-  color: #5AB400;
-}
-
-.icon-warning-sign {
-  color: #FDB82F;
-}
-
-.icon-empty {
-  height: 21px;
-  display: inline-block;
-  width: 8px;
-}
-
-.icon-caret-right {
-  min-width:8px;
-  padding-top: 2px;
-}
-
-.icon-caret-left {
-  min-width:8px;
-  padding-top: 2px;
-}
-
-.icon-remove {
-  color: #FF4B4B;
-}
-.tooltip {
-  z-index: 1050;
-}
-.tooltip-inner {
-  text-align: left;
-}
-
-.popover {
-  z-index: 1050;
-}
-
-.arrow-right {
-  display: inline-block;
-  margin: 0 0 20px 70px;
-  border-top: 70px solid transparent;
-  border-bottom: 70px solid transparent;
-  border-left: 20px solid transparent;
-}
-
-.arrow-left {
-  display: inline-block;
-  margin: 0 -10px 20px 3px;
-  border-top: 70px solid transparent;
-  border-bottom: 70px solid transparent;
-  border-right:20px solid transparent;
-}
-
-.visibleArrow {
-  border-right-color: #dedede;
-  border-left-color: #dedede;
-}
-
-#installer, #add-host, #add-service {
-  h2 {
-    margin-top: 0;
-  }
-  .btn.btn-success {
-  /* float: right; */
-  }
-  .btn-area {
-    margin-top: 20px;
-  }
-  #installer-content, #add-host-content, #add-service-content {
-    padding: 25px;
-    background-color: #fff;
-
-  }
-  .nav-header {
-    font-size: 13px;
-    padding-left: 0;
-  }
-  #installOptions {
-    .sshKey-file-view {
-      width: 486px;
-      height: 200px;
-      overflow-y: auto;
-      border: 1px solid #ccc;
-      margin-top: 5px;
-      padding: 8px;
-      font-family: Consolas, "Liberation Mono", Courier, monospace;
-    }
-    .sshKey-file-view.disabled {
-      background-color: #eee;
-    }
-    .manual-install {
-      margin-top: 10px;
-      width: 504px;
-      height: auto;
-    }
-    #targetHosts {
-      .target-hosts-input {
-        padding-left: 18px;
-      }
-    }
-    .span6 {
-      min-width: 504px;
-    }
-    #hostConnectivity {
-      margin-top: 20px;
-      .control-group {
-        margin-bottom: 0;
-      }
-    }
-    .advancedOptions {
-      margin-top: 20px;
-      margin-bottom: 30px;
-      label{
-        margin-bottom: 10px;
-      }
-      label.disabled{
-        color: #ccc;
-      }
-    }
-    .java-home {
-      margin-bottom: 10px
-    }
-    .ssh-key-input {
-      padding-left: 18px;
-    }
-    .ssh-key-input-indentation {
-      margin-bottom: 5px;
-    }
-    input[type="radio"]:focus {
-      outline: none;
-    }
-  }
-  #confirm-hosts {
-    #host-filter {
-      margin-top: 3px;
-      ul {
-        margin: 3px;
-        font-size: 12px;
-        li {
-          list-style: none;
-          display: block;
-          float: left;
-          padding: 0 2px;
-          a {
-            text-decoration: underline;
-          }
-        }
-        li.first {
-          font-weight: bold;
-        }
-        li.active a {
-          text-decoration: none;
-          color: #000000;
-        }
-      }
-    }
-    .progress {
-      margin-bottom: 0;
-    }
-  }
-  #step4, #step6 {
-    a.selected {
-      color: #333;
-    }
-    a.deselected {
-    }
-    i.icon-asterisks {
-      color: #FF4B4B;
-    }
-  }
-  #step6 .pre-scrollable {
-    max-height: 440px;
-  }
-  #deploy {
-    #overallProgress {
-    }
-    #deploy-status-by-host {
-      th.host {
-        width: 30%;
-      }
-      th.status {
-        width: 30%;
-      }
-      th.messgage {
-        width: 40%;
-      }
-      .progress-bar {
-        width: 80%;
-        .progress {
-          margin-bottom: 0;
-        }
-      }
-      .progress-percentage {
-        margin-left: 10px;
-        width: 20% - 10px;
-      }
-    }
-  }
-}
-
-#host-log {
-  .stderr, .stdout {
-    background-color: #f0f0f0;
-    /* max-height: 300px;
-    overflow-y: auto; */
-    border-radius: 4px;
-    padding: 8px;
-  }
-}
-
-
-#serviceConfig {
-  margin-top: 20px;
-  .spinner {
-    width:36px;
-    height:36px;
-    background: url("/img/spinner.gif");
-    margin: 0 auto;
-  }
-  .directories{
-    min-width: 280px;
-  }
-  .accordion-heading {
-    background-color: #f0f0f0;
-  }
-  .accordion-group {
-    margin-bottom: 20px;
-    .control-label {
-      text-align: left;
-    }
-    .control-group {
-      margin: 10px 0;
-    }
-    form {
-      margin-bottom: 0;
-    }
-  }
-  .accordion-group.Advanced {
-    .control-label {
-      float: none;
-      text-align: left;
-      width: auto;
-    }
-    .controls {
-      margin-left: 0;
-    }
-  }
-  .badge {
-    margin-left: 4px;
-  }
-  .slave-component-group-menu {
-    float: left;
-    .nav {
-      margin-bottom: 10px;
-    }
-    .nav li {
-      position: relative;
-      a {
-        padding-right: 24px;
-      }
-      .icon-remove {
-        border: 1px solid white;
-        position: absolute;
-        right: 7px;
-        top: 10px;
-        z-index: 2;
-        cursor: default;
-        color: #555555;
-      }
-      .icon-remove:hover {
-        border: 1px solid grey;
-      }
-    }
-  }
-  .remove-group-error {
-    display: none;
-  }
-  .add-slave-component-group {
-    margin-bottom: 10px;
-  }
-  .master-host, .master-hosts, .slave-hosts {
-    padding-top: 5px;
-    line-height: 20px;
-  }
-  #attention {
-    margin: 20px 0;
-  }
-  .retyped-password {
-    margin-left: 14px;
-  }
-  #slave-hosts-popup {
-    ul {
-      list-style-type: none;
-    }
-  }
-}
-
-a:focus {
-  outline: none;
-}
-
-@status-live-marker: url("/img/health-status-live.png");
-@status-dead-marker: url("/img/health-status-dead.png");
-@status-dead-orange-marker: url("/img/health-status-dead-orange.png");
-@status-dead-yellow-marker: url("/img/health-status-dead-yellow.png");
-@status-ok-marker: url("/img/status-ok.jpg");
-@status-corrupt-marker: url("/img/status-corrupt.jpg");
-@arrow-right: url("/img/arrow-right.png");
-
-/*Rack images*/
-@rack-status-live: url("/img/rack-status-live.png");
-@rack-status-critical: url("/img/rack-status-critical.png");
-@rack-status-dead: url("/img/rack-status-dead.png");
-@rack-state-toggler: url("/img/rack-state-toggler.png");
-@rack-state-plus: url("/img/rack-state-plus.png");
-@rack-state-minus: url("/img/rack-state-minus.png");
-
-/*****start styles for boxes*****/
-.box {
-  border: 1px solid #D4D4D4;
-  border-radius: 4px;
-  margin-bottom: 20px;
-
-  .box-header {
-    border-bottom: 1px solid #D4D4D4;
-    border-radius: 4px 4px 0 0;
-  }
-  .box-header,
-  .box-footer {
-    padding: 4px 4px 4px 10px;
-  /*background: #dedede;*/
-    .gradient(#dedede, #ffffff, #dedede)
-  }
-  .box-header:after,
-  .box-footer:after {
-    content: "";
-    display: table;
-    clear: both;
-  }
-  .box-header {
-    .btn-group {
-      float: right;
-    }
-    h4 {
-      float: left;
-      margin: 5px;
-      font-size: 15px;
-      color: #777;
-    }
-  }
-  .box-footer {
-    hr {
-      margin: 0px;
-    }
-  }
-}
-
-/*****end styles for boxes*****/
-
-
-/*90% width modal window start*/
-.full-width-modal {
-  .modal {
-    width: 90%;
-    margin: -350px 0 0 -45%;
-  }
-
-  .clear {
-    clear:both;
-  }
-  > div > .dataTable {
-    border: 1px solid silver;
-    th {
-      border-top:none;
-    }
-  }
-  .content {
-    padding: 0;
-  }
-
-//fix stripped in inner table
-  .table-striped tbody tr:nth-child(odd)
-  td .table-striped tbody
-  tr:nth-child(odd) td,
-  tr:nth-child(even) th{
-    background-color: none;
-  }
-
-}
-/*90% width modal window end*/
-
-/*60% width modal window start*/
-.sixty-percent-width-modal {
-  .modal {
-    width: 60%;
-    margin: 0 0 0 -30%;
-    max-height: 544px;
-    top:5%;
-  }
-
-  .modal-body{
-    max-height: 403px;
-  }
-
-  .clear {
-    clear:both;
-  }
-  > div > .dataTable {
-    border: 1px solid silver;
-    th {
-      border-top:none;
-    }
-  }
-  .content {
-    padding: 0;
-  }
-
-//fix stripped in inner table
-  .table-striped tbody tr:nth-child(odd)
-  td .table-striped tbody
-  tr:nth-child(odd) td,
-  tr:nth-child(even) th{
-    background-color: none;
-  }
-
-}
-/*60% width modal window end*/
-
-/*****start styles for install tasks logs*****/
-.task-list-main-warp, .task-detail-info {
-  i {
-    font-size: 20px;
-    vertical-align: middle;
-  }
-  .pending, .queued{
-    color: #999999;
-  }
-  .in_progress{
-    color: #367FE6;
-  }
-  .completed{
-    color: #0EA31C;
-  }
-  .failed {
-    color: #DF5F5F;
-  }
-  .aborted{
-    color: #FF9C09;
-  }
-  .timedout{
-    color: #FF9C09;
-  }
-}
-.task-list-main-warp{
-
-  .task-list-line-cursor{
-    cursor:pointer;
-  }
-
-  .task-top-wrap{
-
-    width:100%;
-    border-bottom: 1px solid #CCC;
-    text-align:center;
-    font-size:15px;
-    padding: 0 0 10px 0;
-
-    .select-wrap{
-      float: right;
-      margin-top: -8px;
-      select{
-        width:140px;
-      }
-    }
-  }
-
-  #host-log, {
-    .log-list-wrap{
-      padding: 10px 10px 10px 20px;
-      border-top: 1px solid #CCC;
-      border-bottom: 1px solid #CCC;
-
-      .show-details{
-        float: right;
-        cursor: pointer;
-        font-size: 16px;
-
-        i{
-          color: #333;
-        }
-      }
-
-    }
-  }
-}
-
-.task-detail-info{
-
-  .task-detail-log-clipboard{
-    display: none;
-    resize: none;
-    overflow: hidden;
-  }
-
-  .task-top-wrap{
-    width:100%;
-    border-bottom: 1px solid #CCC;
-    text-align:center;
-    font-size:15px;
-    padding: 0 0 10px 0;
-
-    .task-detail-back{
-      float: left;
-    }
-
-    .task-detail-log-rolename{
-      position: relative;
-      left: 0;
-      top: 0;
-    }
-    .task-detail-status-ico{
-      content: "";
-      margin-top: -4px;
-      box-shadow: none;
-      margin-bottom: 0px;
-      font-size:24px;
-    }
-
-    .task-detail-ico-wrap{
-      font-size: 14px;
-      float:right;
-      margin-top:-20px;
-
-      div{
-        cursor: pointer;
-      }
-      .task-detail-copy{
-        margin-right: 12px;
-        float: left;
-      }
-      .task-detail-open-dialog{
-        float: right;
-      }
-    }
-  }
-}
-/*****end styles for install tasks logs*****/
-
-/*****start styles for dashboard page*****/
-
-/*start services summary*/
-.services {
-  margin-left: 0;
-  margin-top: 0;
-  position: relative;
-
-  .tab-marker-position {
-    background-position: 6px 5px;
-    background-repeat: no-repeat;
-    list-style: none;
-    float:left;
-    min-height: 20px;
-    min-width: 20px;
-    margin-left: 0;
-  }
-  .health-status-LIVE, .health-status-STARTING {
-    .tab-marker-position;
-    background-image: @status-live-marker;
-  }
-  .health-status-DEAD, .health-status-STOPPING {
-    .tab-marker-position;
-    background-image: @status-dead-marker;
-  }
-  .health-status-DEAD-ORANGE {
-    .tab-marker-position;
-    background-image: @status-dead-orange-marker;
-  }
-  .health-status-DEAD-YELLOW {
-    .tab-marker-position;
-    background-image: @status-dead-yellow-marker;
-  }
-  dt {
-    text-align: left;
-    width: 120px;
-  }
-  dd {
-    margin-left: 145px;
-  }
-  .like_pointer {
-    cursor: pointer;
-  }
-  .service {
-    position: relative;
-    margin-top: 10px;
-    border-bottom: 1px solid #b8b8b8;
-    padding-left: 10px;
-    margin-right: 20px;
-
-    .name {
-      line-height: 21px;
-      margin-left: 0;
-      width: 145px;
-      a {
-        margin-left: 5px
-      }
-    }
-    .summary {
-      line-height: 21px;
-    }
-    .clearfix {
-      padding-bottom: 8px;
-    }
-    .service-body {
-      display: none;
-      position: relative;
-    }
-    table.table {
-      margin-top: 14px;
-      color: #666;
-      font-size: 13px;
-      //width: 80%;
-      tr > td:first-child {
-        padding-right: 10px;
-        text-align: right !important;
-      }
-      th, td {
-        padding: 4px;
-      }
-    }
-    .dashboard-mini-chart > div{
-      width: 172px;
-    }
-    .dashboard-mini-chart {
-      right: 0;
-      float: right;
-      top: 7px;
-      position: absolute;
-      overflow: visible; // for quick links
-      text-align: center;
-      //width: 180px;
-      height: 200px;
-      .dropdown-menu {
-        text-align: left;
-      }
-      .chart-container{
-        .chart-x-axis{
-          left: 0%;
-          width: 100%;
-          text-align: left;
-        }
-      }
-      .chartLabel {
-        font-size: 11px;
-        color: #7b7b7b;
-      }
-      .chart-legend {
-        left: -205px;
-        text-align: left;
-        top: 0;
-        width: 185px;
-      }
-    }
-  }
-}
-
-#summary-info {
-  border-top: none;
-  border-collapse: collapse;
-  color: #666;
-  font-size: 13px;
-
-  td.summary-label {
-    width: 180px;
-    text-align: right;
-  }
-
-/*
-  td {
-    border-top: 1px solid #eee;
-  }
-*/
-
-  tr td:first-child {
-    text-align: right;
-  }
-
-  a {
-    text-decoration: underline;
-    &:hover {
-      text-decoration: none;
-    }
-  }
-}
-
-.more-stats {
-  display: block;
-  width: 100%;
-  padding: 7px 0;
-  text-align: center;
-  color: #333333;
-  &:hover {
-    background-color: #eee;
-    color: #333;
-    text-decoration: none;
-  }
-}
-
-/*end services summary*/
-
-/*start alerts summary*/
-.alerts {
-  border: 1px solid #ddd;
-  margin: 0px;
-  max-height: 500px;
-  overflow-y: auto;
-  li {
-    border-bottom: 1px solid #eee;
-    list-style: none;
-    padding: 5px 5px 5px 5px;
-    background-position: 14px 9px;
-    background-repeat: no-repeat;
-    .date-time {
-      float: right;
-    }
-    p {
-      margin-bottom: 2px;
-    }
-    .container-fluid {
-      padding-left: 10px;
-      padding-right: 10px;
-    }
-    .title {
-      font-weight: normal;
-      font-size: 14px;
-    }
-    .row-fluid [class*="span"] {
-      min-height: 0px;
-    }
-    .status-icon {
-      padding-top: 7px;
-    }
-    .date-time {
-      color: #999;
-      font-style: italic;
-      font-size: small;
-      text-align: right;
-    }
-    .message {
-      font-size: 13px;
-      color: #777;
-    }
-    .serviceLink {
-      padding-left: 7px;
-    }
-  }
-  .alert{
-    margin: 7px;
-  }
-}
-
-.go-to {
-  float: right;
-  background-position: right center;
-  background-repeat: no-repeat;
-  background-image: @arrow-right;
-  padding-right: 40px;
-  margin-top: 4px;
-}
-
-.modal-graph-line {
-  width: 810px;
-  .modal-body {
-    min-height: 420px !important;
-    overflow: hidden;
-  }
-}
-
-/*end alerts summary*/
-
-/*Alerts popup*/
-.alertsPopupLinks {
-  padding: 20px 40px 0px;
-  text-align: right;
-
-  a:first-child {
-    float: left;
-  }
-}
-.modal-footer.align-center {
-  text-align: center;
-}
-
-/*start chart/style graphs*/
-.chart-container {
-  cursor: pointer;
-  cursor:-moz-zoom-in;
-  cursor:-webkit-zoom-in;
-
-  &.chart-container-popup {
-    cursor: auto !important;
-    margin-left: 0;
-    overflow: visible;
-    width: 650px;
-    .chart {
-      left: 60px;
-      overflow: visible;
-      position: relative;
-    }
-    .chart-y-axis {
-      position: absolute;
-      top: -15px;
-      bottom: 0;
-      width: 60px;
-    }
-    .chart-legend {
-      left: 60px;
-      top: 245px;
-    }
-    .x_tick {
-      .title {
-        margin-top: 35px !important;
-      }
-    }
-  }
-  position: relative;
-  margin: 20px 15px 0px 15px;
-
-  .chart {
-    position: relative;
-    z-index: 1;
-  }
-  .chart-y-axis {
-    position: absolute;
-    top: 0;
-    bottom: 0px;
-    width: 100px;
-    z-index: 2;
-    margin-top: 15px;
-  }
-  .chart-x-axis {
-    position: absolute;
-    top: 180px;
-    left: 35%;
-    width: 30%;
-    z-index: 2;
-  }
-  .x_tick {
-    margin-top: 5px;
-    .title {
-      padding: 0 2px 0 2px;
-      opacity: 1 !important;
-      top: 148px;
-    }
-  }
-  svg {
-    g {
-      g:nth-child(1) {
-        display: none;
-      }
-    }
-  }
-  text{
-    font-weight: 700;
-    opacity: 1 !important;
-  }
-  .chart-legend {
-    font-family: 'Courier New';
-    position: absolute;
-    top: 180px;
-    z-index: 3;
-  }
-  .rickshaw_legend {
-    background-color: #999 !important;
-    li:hover {
-      background-color: #999 !important;
-    }
-  }
-  .rickshaw_legend:empty {
-    padding: 0;
-  }
-  .rickshaw_graph {
-    .x_tick{
-      .title {
-        bottom: -6px;
-        opacity: 0.75;
-      }
-    }
-  }
-  .chart-overlay {
-    position: absolute;
-    top: 0;
-    bottom: 0;
-    width: 100%;
-    z-index: 5;
-  }
-  .chart-title {
-    text-align: center;
-    font-size: small;
-  }
-}
-
-.modal-body{
-  .show {
-    display: inline-block;
-  }
-  .time-label {
-    text-align: center;
-  }
-}
-
-.mini-chart{
-  position: absolute;
-  .chart-container{
-    width: 130px;
-    height: 130px;
-  }
-}
-
-/*end chart/graph styles*/
-
-/*****end styles for dashboard page*****/
-
-/*Services*/
-#services-menu {
-  .nav-list {
-    .tab-marker-position {
-      background-position: 6px 5px;
-      background-repeat: no-repeat;
-      list-style: none;
-      float: left;
-      height: 20px;
-      width: 20px;
-      margin-left: 0;
-      // padding-left: 30px;
-      // padding-right: 30px;
-      // background-position: 12px 9px;
-      // background-repeat: no-repeat;
-    }
-    .health-status-LIVE , .health-status-STARTING {
-      .tab-marker-position;
-      background-image: @status-live-marker;
-    }
-    .health-status-DEAD, .health-status-STOPPING {
-      .tab-marker-position;
-      background-image: @status-dead-marker;
-    }
-    .health-status-undefined {
-      .tab-marker-position;
-    }
-    li {
-      line-height: 24px;
-      a {
-        padding: 3px 10px;
-      }
-    }
-
-    li.clients {
-      a {
-        padding-left: 37px;
-      }
-    }
-  }
-  .add-service-button {
-    margin: 20px 20px 10px;
-  }
-  .operations-count{
-    background: #953B39;
-  }
-}
-
-.nav-pills.move {
-  float: right;
-  margin-top: -48px;
-}
-
-.service-content {
-  #summary-info {
-    margin-bottom: 0;
-  }
-}
-
-.service-block {
-  margin-top: 20px;
-}
-
-.service-configuration {
-  padding: 10px;
-}
-
-.service-summary {
-  background: #F6FAFD;
-  .service-block.span8 {
-    margin-left: 0;
-    border-right: 1px solid #5fa3c3;
-  }
-  .service-block.span3 {
-    padding-left: 0;
-  }
-  .service-content {
-    padding: 5px 0 0 10px;
-    .service-links {
-      padding: 5px 0;
-    }
-  }
-  h5 {
-    color: #0088CC;
-    font-size: 14px;
-  }
-  .service-links {
-    padding: 5px 0 10px 0;
-  }
-  .service-configuration .dl-horizontal {
-    dt {
-      width: 90px;
-      line-height: 19px;
-    }
-    dd {
-      margin-left: 100px;
-      line-height: 19px;
-    }
-  }
-}
-
-.service-button {
-  text-align: right;
-  margin-bottom: 5px;
-  margin-top: -55px;
-  ul.dropdown-menu {
-    li {
-      text-align: left;
-    }
-    a {
-      cursor: pointer;
-    }
-  }
-}
-
-.summary-metric-graphs {
-  [class*="span"] {
-    float: left;
-    margin-left: 10px;
-  }
-  .chart-container {
-    .chart-x-axis {
-      left: 0%;
-      width: 100%;
-    }
-  }
-}
-
-/*End Services*/
-table.graphs {
-  table-layout: fixed;
-  width: 100%;
-}
-/*Hosts*/
-#hosts {
-  .page-bar {
-    border: 1px solid silver;
-    text-align:right;
-    div {
-      display: inline-block;
-      margin:0 10px;
-    }
-    .items-on-page {
-      label {
-        display:inline;
-      }
-      select {
-        margin-bottom: 4px;
-        margin-top: 4px;
-        width:70px;
-      }
-    }
-    .paging_two_button {
-      a.paginate_disabled_next, a.paginate_disabled_previous {
-        color: gray;
-        &:hover {
-          color: gray;
-          text-decoration: none;
-          cursor: default;
-        }
-      }
-
-      a.paginate_next, a.paginate_previous {
-        &:hover {
-          text-decoration: none;
-          cursor: pointer;
-        }
-      }
-      a {
-        padding:0 5px;
-      }
-    }
-  }
-
-  .health-status-HEALTHY{
-    background-image: @status-live-marker;
-  }
-  .health-status-LIVE {
-    background-image: @status-live-marker;
-  }
-  .health-status-DEAD {
-    background-image: @status-dead-marker;
-  }
-  .health-status-DEAD-ORANGE {
-    background-image: @status-dead-orange-marker;
-  }
-  .health-status-DEAD-YELLOW {
-    background-image: @status-dead-yellow-marker;
-  }
-  .host-name-search {
-    position: relative;
-    top: 0px;
-    left: 10px;
-  }
-  .host-name-pos {
-    position: relative;
-    top: 10px;
-  }
-  .box-header {
-    margin-left: 0;
-    .btn-group {
-      float: left;
-    }
-    .btn.decommission {
-      margin-left: 5px;
-    }
-    .btn.add-host-button {
-      float: right;
-    }
-  }
-  .progress{
-    margin-bottom: 0;
-  }
-  .table {
-    //margin-bottom: 0;
-    thead {
-      //background: #EDF5FC;
-    }
-    th {
-      border-top: none;
-    }
-    th, td {
-      width: 82px;
-      border-left-width: 0;
-    }
-    th.first, td.first {
-      width: 10px !important;
-      border-left-width: 1px;
-    }
-    td.first label {
-      padding-top: 3px;
-    }
-    td.first span {
-      display: block;
-      float: left;
-      height: 13px;
-      margin: 4px 5px 0 0;
-      width: 13px;
-    }
-    ul.filter-components {
-      padding: 5px 0;
-      li {
-        display: block;
-        padding: 3px 0 3px 5px;
-        line-height: 20px;
-
-        label.checkbox {
-          padding-left: 3px;
-        }
-
-        input[type="checkbox"] {
-          margin: 4px 4px 2px 2px;
-        }
-      }
-      &>li {
-        &>ul {
-          height: 250px;
-          margin-left: 0;
-          overflow-y: scroll;
-        }
-      }
-    }
-    .sorting_asc { background: url(data:image/jpeg;base64,/9j/4AAQSkZJRgABAgAAZABkAAD/7AARRHVja3kAAQAEAAAAZAAA/+4ADkFkb2JlAGTAAAAAAf/bAIQAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQICAgICAgICAgICAwMDAwMDAwMDAwEBAQEBAQECAQECAgIBAgIDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMD/8AAEQgAEwATAwERAAIRAQMRAf/EAHgAAAMBAQAAAAAAAAAAAAAAAAAFCAYKAQACAQUAAAAAAAAAAAAAAAAABQMCBAYHCBAAAQUAAQMEAwAAAAAAAAAAAwECBAUGABESByExIghBMxQRAAIBAwMDAwUAAAAAAAAAAAECAwAEBRESBiExUUHhB2GBIhMU/9oADAMBAAIRAxEAPwDvA8k+Qc54sxGj32qlNi0ucrjTj/JqGlmROyJXQ2u/bOsZTmBExPd70/HXmQcW41lOX5+145h0L391KEHhR3Z28Ii6sx9AKgubiO1gaeU6Io19h9TUg/S/7eP+wia3NbBIFbuqiyn3VTCjIMArHHTJarEDGGiNU8vOKVsc7/VxBuGR3yV683X86/Cq/GpssrhP2S8emiSKRm1JS5VfyLH0WfQug7KwZR0CilWHy39++ObQTgkgeV9ux+xq9uc6U8pLfZzP6mClZpKWrvq1DilJAt4Mewh/0hRyBOsaUMoVKLvXtVU6t6+nL/HZTJYi4/rxU81tdbSu+N2Rtp7jcpB0OnUa9aoeOOVdsgDL4I1pFS+NPHmcsQ2+fw+UpLWOwwwWNVQ1kCaIcgaiONkmLGEZrDDXtcnXo5PfjC+5VybKWrWWSyF5cWbEEpJNI6kqdQSrMRqD1B9KjS2t423xoqt5AArb8QVPRwoo4UUcKK//2Q==) no-repeat right 50%; }
-    .sorting_desc { background: url(data:image/jpeg;base64,/9j/4AAQSkZJRgABAgAAZABkAAD/7AARRHVja3kAAQAEAAAAZAAA/+4ADkFkb2JlAGTAAAAAAf/bAIQAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQICAgICAgICAgICAwMDAwMDAwMDAwEBAQEBAQECAQECAgIBAgIDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMD/8AAEQgAEwATAwERAAIRAQMRAf/EAIEAAAIDAQAAAAAAAAAAAAAAAAAGBwgJCgEBAAIDAQAAAAAAAAAAAAAAAAMFBAYHCBAAAAUDAwMFAAAAAAAAAAAAAQIDBAUABgcSNTYRFQgTZFUWZhEAAAQEAggGAwAAAAAAAAAAAAECAxEhBAYSMjFBYRMzFDQFUZFSYmMHJFRk/9oADAMBAAIRAxEAPwDv4oAKACgCKc1tMmusb3Eph6cSgsgx7fucEZxGRks2llGIGVWgVm8q1dt0+6ogKaapSgdNbQPXTqAdwsN602bopk3vTnUW24rduwccbU2S5E8Sm1JM92czSZwNOKUYDFrCqTp1corDUFMpEcYap+Ipb4P5O8n81y9xXXlG50yY+thR3AEivqFvRDmduvSUrhuLtrFNXqCFvJm1LAQ5RMuchB6gBy13f7+tP6lsOipuz2jSGdy1ZJeNzmXnEtU+pWFTikmbxyTEjgglKKZpMU3ZanudYtTtSr8dMoYSKKvKMte0aUV5YGxgoASbD2iQ4Tyi6uB7Rvz/AHD9R8r7/wBWr64uta6/pKfq+JwUZP5/1/hwCFjIeTMrLo0np93q2xDtVCJh/9k=) no-repeat right 50%; }
-    .sorting { background: url(data:image/jpeg;base64,/9j/4AAQSkZJRgABAgAAZABkAAD/7AARRHVja3kAAQAEAAAAZAAA/+4ADkFkb2JlAGTAAAAAAf/bAIQAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQICAgICAgICAgICAwMDAwMDAwMDAwEBAQEBAQECAQECAgIBAgIDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMD/8AAEQgAEwATAwERAAIRAQMRAf/EAGgAAAIDAQAAAAAAAAAAAAAAAAUHAAYICgEBAQAAAAAAAAAAAAAAAAAAAAEQAAEEAQIFAgcAAAAAAAAAAAECAwQFABEGIRI0NQcTFDFBMmNUZRYRAQEBAQAAAAAAAAAAAAAAAAABEUH/2gAMAwEAAhEDEQA/AO93cd/XbXpLC9tHQ1Dr46nljUBby/gzGZB+p+Q6QhA+ZOApfDnllW/ha1tv6Ee7iyH5kRlvlbTIqHndWkNJ0HO7XFQbWeJUkpUeOpySrZh65UUnyFUW1ztaexRmIbaPyzoLE6vg2UWW9GC1e0XHnsSGEqfQohCwApK9OIGuAjfBP9VuG0m39vGqINVUe4r2xF21TVsuXZOI9N9lMmLBYkttQ21auBKhqtSUngCMkW5xqjKiYASh6SR2Tulr2HpOvf6j9p+V9/mwDeB//9k=) no-repeat right 50%; }
-
-    div.view-wrapper {
-      .btn-group{
-        margin-bottom: 9px;
-      }
-    }
-
-    a.ui-icon-circle-close {
-      float: right;
-      opacity: 0.2;
-      padding: 1px;
-      position: relative;
-      right: -8px;
-      margin-top: 6px;
-      z-index: 10;
-      &:hover {
-        opacity: 0.7;
-      }
-    }
-    .notActive {
-      a.ui-icon-circle-close {
-        visibility: hidden;
-      }
-    }
-  }
-
-  .page-bar {
-    border: 1px solid silver;
-    text-align:right;
-    div {
-      display: inline-block;
-      margin:0 10px;
-    }
-    .dataTables_length {
-      label {
-        display:inline;
-      }
-      select {
-        margin-bottom: 4px;
-        margin-top: 4px;
-        width:70px;
-      }
-    }
-    .dataTables_paginate {
-      a {
-        padding:0 5px;
-      }
-    }
-  }
-
-  .open-group > .dropdown-menu {
-    display: block;
-  }
-  .nav-pills li.disabled {
-    display: block;
-    margin: 2px 0;
-    padding: 8px 12px;
-    line-height: 14px;
-  }
-  .box-footer .footer-pagination {
-    float: right;
-    .nav {
-      margin-bottom: 0;
-    }
-    .dropdown {
-      margin-top: 3px;
-    }
-    .dropdown {
-      margin-top: 3px;
-    }
-    .dropdown select {
-      width: 60px;
-    }
-    .page-listing a {
-      line-height: 0;
-      border: none;
-      margin: 0;
-      margin-right: 10px;
-      cursor: pointer;
-      color: #0088CC;
-      padding: 8px 0;
-      float: left;
-      text-decoration: underline;
-    }
-    .page-listing a:hover {
-      text-decoration: none;
-    }
-    .page-listing {
-      width: 100px;
-      .table {
-        th.name {
-          width: 300px;
-          a.filter-label {
-            width: 57px;
-            display: block;
-            float: left;
-          }
-        }
-      }
-    }
-  }
-}
-
-#host-warnings {
-  .warnings-list{
-    table {
-      margin-bottom: 0;
-      margin-top: 5px;
-      td {
-        width: 50%;
-        i {
-          font-size:16px;
-          margin: 2px;
-        }
-      }
-    }
-    .category-title {
-      padding: 3px;
-      text-align: left;
-      background-color: rgb(196, 193, 193);
-    }
-  }
-}
-.host-checks-update {
-  button {
-    margin-left: 5px;
-  }
-  .update-progress {
-    width: 230px;
-    .progress {
-      margin-bottom: 0;
-    }
-  }
-}
-
-#host-details {
-
-  margin-top: 27px;
-
-  /*
-  .component-operation-button {
-    background-color: #E5E5E5;
-    background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#E5E5E5), to(#F1F1F1));
-    background-image: -webkit-linear-gradient(top, #E5E5E5, #F1F1F1);
-    background-image: -o-linear-gradient(top, #E5E5E5, #F1F1F1);
-    background-image: linear-gradient(to bottom, #E5E5E5, #F1F1F1);
-    background-image: -moz-linear-gradient(top, #E5E5E5, #F1F1F1);
-    background-repeat: repeat-x;
-    color: #000000;
-  }
-  */
-  .caret {
-    border-top-color: #000000;
-    border-bottom-color: #000000;
-  }
-  //hack to apply style only for firefox
-  @-moz-document url-prefix() {
-    .host-components .caret{
-      margin-top:-12px !important;
-    }
-  }
-  .marker {
-    background-repeat: no-repeat;
-    display: inline-block;
-    height: 13px;
-    width: 13px;
-  }
-  .health-status-started, .health-status-starting {
-    background-image: @status-live-marker;
-    .marker;
-  }
-  .health-status-installed, .health-status-stopping {
-    background-image: @status-dead-marker;
-    .marker;
-  }
-
-  .health-status-LIVE {
-    background-image: @status-live-marker;
-    .marker;
-  }
-  .health-status-DEAD {
-    background-image: @status-dead-marker;
-    .marker;
-  }
-  .health-status-DEAD-ORANGE {
-    background-image: @status-dead-orange-marker;
-    .marker;
-  }
-  .health-status-DEAD-YELLOW {
-    background-image: @status-dead-yellow-marker;
-    .marker;
-  }
-  .back {
-    display: block;
-    width: 105px;
-    margin-bottom: 5px;
-  }
-  .box-header .host-title {
-    margin: 0;
-    padding-left: 17px;
-  }
-  .box-header .button-section {
-    margin-bottom: 5px;
-  }
-  hr {
-    margin-bottom: 0;
-    clear: both;
-  }
-  .content {
-    padding: 10px;
-  }
-  .host-configuration .dl-horizontal dt {
-    width: 90px;
-    line-height: 20px;
-  }
-  .host-configuration .dl-horizontal dd {
-    margin-left: 100px;
-    line-height: 20px;
-  }
-  .host-metrics {
-    [class*="span"] {
-      float: left;
-      margin: 0;
-    }
-    .chart-container {
-      .chart-x-axis {
-        left: 30%;
-        width: 40%;
-      }
-    }
-  }
-
-  .host-components {
-    padding: 10px;
-    padding-bottom: 0;
-    border: 1px solid #DEDEDE;
-    border-radius: 4px;
-    background: #FFF;
-  }
-  .host-components .btn-group {
-    margin: 0 5px 10px 0;
-  }
-}
-
-.background-operations {
-  .progress {
-    margin-bottom: 5px;
-    .bar {
-      width: 100%;
-    }
-  }
-  .open-details {
-    clear: left;
-    display: block;
-    float: left;
-    text-decoration: none;
-    width: 16px;
-  }
-  .operation-details {
-    padding-left: 16px;
-    padding-top: 5px;
-    display: none;
-  }
-  margin-bottom: 10px;
-}
-
-.background-operations.is-open {
-  .operation-details {
-    display: block;
-  }
-}
-
-/*End Hosts*/
-
-/*assign masters*/
-
-.assign-masters {
-  .select-hosts {
-    width: 50%;
-    float: left;
-    white-space: nowrap;
-  }
-
-  label.host-name {
-    padding-top: 5px;
-  }
-
-  .round-corners {
-    border-radius: 8px;
-    -webkit-border-radius: 8px;
-    -moz-border-radius: 8px;
-  }
-
-  .host-assignments {
-    float: right;
-    width: 45%;
-  }
-
-  .remaining-hosts {
-    padding: 25px;
-    border-top: solid 1px #cccccc;
-    border-left: solid 1px #cccccc;
-    border-right: groove 5px #cccccc;
-    border-bottom: groove 5px #cccccc;
-    margin-top: 20px;
-    background-color: #FCF8E3;
-    color: #C09853;
-  }
-
-  .host-assignments .mapping-box {
-    border: solid 1px #cccccc;
-    padding: 8px;
-    margin-bottom: 10px;
-    background-color: #fafafa;
-  }
-
-  .host-assignments .assignedService {
-    padding: 2px 8px;
-    border: solid 1px #cccccc;
-    margin: 2px;
-    background-color: @green;
-    color: white;
-    white-space: nowrap;
-    font-size: 0.9em;
-    display: inline-block;
-  }
-
-  .form-horizontal .controls {
-    margin-left: 110px;
-  }
-
-  .form-horizontal .control-label {
-    width: 120px;
-    padding-right: 10px;
-  }
-
-  .form-horizontal .control-group select {
-    width: 75%;
-    min-width: 100px;
-    max-width: 250px;
-  }
-
-  .hostString {
-    margin-bottom: 5px;
-  }
-
-  .controls .badge {
-    background-color: @green;
-    color: #ffffff;
-    cursor: pointer;
-    font-weight: bold;
-    text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5);
-  }
-
-  .assign-master  .controls .badge:hover {
-    background-color: @green;
-  }
-
-  .alertFlag {
-    font-size: 1.3em;
-    color: #B94A48;
-    font-weight: bold;
-    vertical-align: middle;
-  }
-}
-
-/*end assign masters*/
-
-.assign-clients {
-  .round-corners {
-    border-radius: 8px;
-    -webkit-border-radius: 8px;
-    -moz-border-radius: 8px;
-  }
-  .assignedService {
-    padding: 2px 8px;
-    border: solid 1px #cccccc;
-    margin: 2px;
-    background-color: @blue;
-    color: white;
-    white-space: nowrap;
-    font-size: 0.9em;
-    display: inline-block;
-  }
-}
-
-/*Dashboard*/
-.alerts-count {
-  cursor: pointer;
-  margin-left: 5px;
-}
-
-ul.filter {
-  background: #ffffff;
-  list-style: none;
-  position: absolute;
-  padding: 10px;
-}
-
-#main-admin-menu {
-  padding: 8px 0;
-  ul {
-    margin-bottom: 0;
-    li {
-      line-height: 24px;
-    }
-  }
-}
-
-#user-auth-method select {
-  width: 320px;
-}
-
-/*start charts rack*/
-.rack {
-  width: 98%;
-  display: inline-block;
-  vertical-align: top;
-  border: 1px solid #D4D4D4;
-  border-radius: 5px;
-  margin-right: 4px;
-  margin-bottom: 10px;
-
-  .rackHeader {
-    background-color: #F7F7F7;
-    border-top-right-radius: 5px;
-    border-top-left-radius: 5px;
-    padding: 5px 5px 5px 10px;
-    border-bottom: 1px solid #D4D4D4;
-
-    .statusName {
-      font-size: 15px;
-      color: #006F9F;
-      text-shadow: #ffffff 0px 0px 1px;
-      font-weight: bold;
-      vertical-align: top;
-    }
-    .toggler {
-      background-repeat: no-repeat;
-      background-image: @rack-state-toggler;
-      display: block;
-      height: 22px;
-      position: relative;
-      width: 32px;
-      float: right;
-
-      span {
-        background-repeat: no-repeat;
-        background-image: @rack-state-plus;
-        display: block;
-        height: 17px;
-        left: 7px;
-        position: absolute;
-        top: 3px;
-        width: 18px;
-      }
-
-      span.isActive {
-        background-image: @rack-state-minus !important;
-        top: 10px;
-      }
-    }
-  }
-  .hostsSummary {
-    border-left: 1px solid #CDCDCD;
-    border-right: 1px solid #CDCDCD;
-    border-bottom: 1px solid #9f9f9f;
-    background-color: #f7f8fa;
-    font-size: 12px;
-    padding: 4px 0 4px 9px;
-    .textBlock {
-      color: #000000;
-      padding-right: 10px;
-
-      div {
-        height: 12px;
-        margin: 0 3px 0 0;
-        width: 12px;
-        display: inline-block;
-      }
-    }
-  }
-  .indicatorR {
-    background-color: #E2001A;
-  }
-
-  .indicatorY {
-    background-color: #F29400;
-  }
-
-  .indicatorG {
-    background-color: #88BF67;
-  }
-  .statusIndicator {
-    display: inline-block;
-    width: 20px;
-    height: 19px;
-    margin-right: 10px;
-    margin-top: 4px;
-    float: left;
-    background-repeat: no-repeat;
-  }
-  .rackName {
-    margin-top: 4px;
-    float: left;
-  }
-  .statusIndicator.LIVE {
-    background-image: @rack-status-live;
-  }
-
-  .statusIndicator.CRITICAL {
-    background-image: @rack-status-critical;
-  }
-
-  .statusIndicator.DEAD {
-    background-image: @rack-status-dead;
-  }
-
-  .hosts {
-    padding: 4px;
-    overflow: hidden;
-    display: none;
-
-    .ember-view {
-      float: left;
-      width: 100%;
-      height: 43px;
-
-      .hostBlock {
-        height: 38px;
-        position: relative;
-        border: 1px solid #D4D4D4;
-        margin-right: 3px;
-      }
-    }
-    .hostBlock {
-      border-radius: 4px;
-    }
-    .hostBlock.HEALTHY {
-      background-color: #87BE73;
-    }
-    .hostBlock.UNHEALTHY {
-      background-color: #E40024;
-    }
-    .hostBlock.CRITICAL {
-      background: #F39236;
-    }
-    .ember-view:hover {
-    }
-  }
-
-  .hosts.isActive {
-    display: block;
-  }
-}
-
-/*Start Heatmap*/
-.heatmap {
-  #heatmap-metric-title{
-    margin-left: 23px;
-  }
-  .rack.rack-5-2 {
-    .hosts {
-      height: 86px;
-    }
-  }
-  .rack.rack-5-4 {
-    .hosts {
-      height: 172px;
-    }
-  }
-  .rack.rack-5-6 {
-    .hosts {
-      height: 258px;
-    }
-  }
-  .rack.rack-5-8 {
-    .hosts {
-      height: 344px;
-    }
-  }
-  .rack.rack-5-10 {
-    .hosts {
-      height: 430px;
-    }
-  }
-  .legend-column {
-    min-width: 160px;
-  }
-  .heatmap_host_details {
-    font-size: 12px;
-    line-height: 1.6em;
-    border: 1px solid #D9D9D9;
-    background: #f9f9f9;
-    width: 280px;
-    padding: 10px 10px;
-    position: absolute;
-    z-index: 1000;
-  }
-  .container-fluid{
-    padding: 0px;
-  }
-  .row-fluid [class*="span"]{
-    margin-left: 0px;
-  }
-  .legend{
-    margin-top: 20px;
-    margin-bottom: 20px;
-    .tile{
-      width: 50px;
-      height: 1em;
-      padding: 4px;
-      border: 1px solid #D4D4D4;
-      border-radius: 5px;
-      margin-right: 10px;
-    }
-  }
-
-  h4{
-    color: #777777;
-    margin-top: 5px;
-  }
-  .heatmap-host {
-    display: block;
-    width: 100%;
-    height: 100%;
-  }
-}
-
-/*End Heatmap*/
-.noDisplay {
-  display: none !important;
-}
-
-.display {
-  display: block !important;
-}
-
-.display-inline-block {
-  display: inline-block !important;
-}
-
-/* CHARTS */
-.chart {
-  overflow: hidden;
-  /*padding-bottom: 25px;*/
-
-  .attributes {
-    width: 75px;
-    float: left;
-    margin: 45px 0 0 0;
-
-    p {
-      margin-bottom: 9px;
-    }
-  }
-
-  .hostName {
-    font-weight: bold;
-  }
-
-  .horizon {
-    border-bottom: 1px solid #000000;
-    overflow: hidden;
-    position: relative;
-  }
-  .horizon {
-    border-bottom: 1px solid #000000;
-    border-top: 1px solid #000000;
-  }
-  .horizon + .horizon {
-    border-top: medium none;
-  }
-  .horizon canvas {
-    display: block;
-  }
-  .horizon .title, .horizon .value {
-    bottom: 0;
-    line-height: 30px;
-    margin: 0 6px;
-    position: absolute;
-    text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5);
-    white-space: nowrap;
-  }
-  .horizon .title {
-    left: 0;
-  }
-  .horizon .value {
-    right: 0;
-  }
-
-  .rule {
-    display: none;
-    width: 30px;
-    height: 124px;
-    padding-top: 27px;
-    position: absolute;
-    border-left: 1px dashed #a52a2a;
-
-    .stateValue {
-      margin: 15px 0 0 3px;
-      line-height: 15px;
-    }
-  }
-
-  .axis {
-    font: 10px sans-serif;
-
-    path {
-      display: none;
-    }
-
-    line {
-      shape-rendering: crispedges;
-      stroke: #000000;
-    }
-  }
-
-  .line {
-    background: none repeat scroll 0 0 #000000;
-  /* opacity: 0.2; */
-    z-index: 2;
-  }
-
-  ul.dropdown-menu {
-    li {
-      text-align: left;
-    }
-  }
-}
-
-.linear {
-  path {
-    stroke: steelblue;
-    stroke-width: 2;
-    fill: none;
-  }
-
-  line {
-    stroke: black;
-  }
-
-  text {
-    font-family: Arial;
-    font-size: 9pt;
-  }
-}
-
-/* CHARTS END */
-
-/* UNIVERSAL STYLES */
-.align-right {
-  text-align: right;
-}
-
-.left {
-  float: left;
-}
-
-.no-borders {
-  border: none !important;
-}
-
-ul.noStyle {
-  list-style: none;
-}
-
-ul.inline li {
-  display: inline;
-}
-
-.table.no-borders th, .table.no-borders td {
-  border-top: none;
-}
-
-/* UNIVERSAL STYLES END */
-
-/* METRIC FILTERING WIDGET */
-.metric-filtering-widget {
-
-  .title {
-    padding-top: 4px;
-  }
-
-  .accordion {
-    background: none repeat scroll 0 0 #FFFFFF;
-  /*border: 1px solid;*/
-    font-size: 12px;
-  /*padding: 5px 0;*/
-    position: absolute;
-    z-index: 1000;
-
-    .accordion-group {
-      .accordion-heading {
-        i {
-          text-decoration: none;
-        }
-      }
-
-      .accordion-body {
-        .accordion-inner {
-        /*border: none;
-        padding: 0 8px;
-        width: 160px;*/
-
-          ul.items {
-            list-style: none;
-            li {
-              a:hover {
-                cursor: pointer;
-              }
-            }
-            li.disabled {
-              a {
-                color: #999999;
-              }
-            }
-
-          }
-        }
-      }
-    }
-  }
-}
-
-/* METRIC FILTERING WIDGET END */
-
-/* TIME RANGE WIDGET */
-
-/* start css for timepicker */
-.ui-timepicker-div .ui-widget-header {
-  margin-bottom: 8px;
-}
-
-.ui-timepicker-div dl {
-  text-align: left;
-}
-
-.ui-timepicker-div dl dt {
-  height: 25px;
-  margin-bottom: -25px;
-}
-
-.ui-timepicker-div dl dd {
-  margin: 0 10px 10px 65px;
-}
-
-.ui-timepicker-div td {
-  font-size: 90%;
-}
-
-.ui-tpicker-grid-label {
-  background: none;
-  border: none;
-  margin: 0;
-  padding: 0;
-}
-
-.ui-timepicker-rtl {
-  direction: rtl;
-}
-
-.ui-timepicker-rtl dl {
-  text-align: right;
-}
-
-.ui-timepicker-rtl dl dd {
-  margin: 0 65px 10px 10px;
-}
-
-/* end css for timepicker */
-
-#slider {
-  margin: 10px 0 40px;
-  width: 330px;
-
-  .now-marker {
-    position: absolute;
-    right: -15px;
-    bottom: -23px;
-  }
-  .period-marker {
-    position: absolute;
-    left: -15px;
-    bottom: -23px;
-  }
-}
-
-.screensaver{
-  width: 90%;
-  height: 157px;
-  border: 1px solid silver;
-  margin: 20px 15px 10px 15px;
-  background: url(/img/spinner.gif) no-repeat center center;
-}
-
-/* TIME RANGE WIDGET END */
-
-#host-details .host-components .btn-group > .btn {
-  min-width: 130px;
-}
-
-#host-details > .host-maintenance {
-  text-align: right;
-  margin-top: -28px;
-}
-
-#host-details > .host-maintenance > div {
-  text-align: left;
-}
-
-#host-details > .host-title {
-  font-size: 18px;
-  font-weight: 700;
-  background-position: 0px center;
-  padding-left: 17px;
-}
-
-/* fieldset begin */
-.fieldset {
-  border: 2px solid black;
-  padding: 10px;
-}
-
-.fieldset legend {
-  border-bottom: none;
-  width: auto;
-  font-size: 14px;
-}
-
-/* fieldset end */
-
-/* Start Carousel */
-.carousel-inner {
-  height: 80px;
-}
-
-/* End Carousel*/
-
-#add-host .back {
-  display: block;
-  width: 105px;
-  margin-bottom: 10px;
-}
-
-#add-service .back {
-  display: block;
-  width: 130px;
-  margin-bottom: 10px;
-}
-
-#step8-content {
-  max-height: 440px;
-}
-
-#step10-content {
-  max-height: 440px;
-}
-.content-area {
-  position: relative;
-  .textTrigger {
-    cursor: pointer;
-    position: absolute;
-    right: 0;
-    top: 0;
-    visibility: hidden;
-    background-color: #dcdcdc;
-  }
-}
-
-
-//bootstrap
-//.dropdown-menu .active > a, .dropdown-menu .active > a:hover {
-//  background-color: #49AFCD;
-//  background-image: -moz-linear-gradient(center top, #5BC0DE, #2F96B4);
-//}
-//bootstrap end
-
-// COMBOBOX FIXES
-.combobox-container .btn:hover {
-  background-position: 0;
-}
-// COMBOBOX FIXES END
-@media all and (max-width: 1200px) {
-  #main-nav {
-    li.span2 {
-      width: 120px;
-    }
-  }
-}
-
-
-//
-// Gray palette
-//
-.nav-pills > .active > a, .nav-pills > .active > a:hover {
-    background-color: #666666;
-}
-
-.nav-list > .active > a, .nav-list > .active > a:hover {
-  background-color: #666666;
-}
-
-.dropdown-menu li > a:hover, .dropdown-menu li > a:focus, .dropdown-submenu:hover > a {
-  background-color: #666666;
-  background-image: linear-gradient(to bottom, #666666, #555555);
-}
-
-.alert-info {
-  background-color: #E6F1F6;
-  border-color: #D2D9DD;
-  color: #4E575B;
-  text-shadow: none;
-  .spinner {
-    padding: 8px 35px 8px 42px;
-    background: url(/img/spinner.gif) no-repeat;
-  }
-}
-
-/*
-.progress-striped .bar {
-  background-color: #A5A5A5;
-}
-
-.progress-info.progress-striped .bar, .progress-striped .bar-info {
-  background-color: #A5A5A5;
-}
-*/
-
-.btn-primary:active, .btn-primary.active, .btn-primary.disabled, .btn-primary[disabled] {
-  background-color: #555555;
-}
-
-.modal-body {
-  max-height: none;
-}
-
-i.icon-asterisks {
-  color: #FF4B4B;
-}
-
-.rickshaw_legend {
-  .action {
-    opacity: 1;
-    color: #ffffff;
-  }
-  .action:hover {
-    opacity: 0.8;
-    text-decoration: none;
-  }
-}
-.hidden {
-  display: none;
-  visibility: hidden;
-}
-
-//styles for screen width more than 1200px
-@media (min-width: 1200px) {
-  .row {
-    margin-left: -30px;
-    *zoom: 1;
-  }
-  .row:before,
-  .row:after {
-    display: table;
-    line-height: 0;
-    content: "";
-  }
-  .row:after {
-    clear: both;
-  }
-  [class*="span"] {
-    float: left;
-    min-height: 1px;
-    margin-left: 30px;
-  }
-  .container,
-  .navbar-static-top .container,
-  .navbar-fixed-top .container,
-  .navbar-fixed-bottom .container {
-    width: 1170px;
-  }
-  .span12 {
-    width: 1170px;
-  }
-  .span11 {
-    width: 1070px;
-  }
-  .span10 {
-    width: 970px;
-  }
-  .span9 {
-    width: 870px;
-  }
-  .span8 {
-    width: 770px;
-  }
-  .span7 {
-    width: 670px;
-  }
-  .span6 {
-    width: 570px;
-  }
-  .span5 {
-    width: 470px;
-  }
-  .span4 {
-    width: 370px;
-  }
-  .span3 {
-    width: 270px;
-  }
-  .span2 {
-    width: 170px;
-  }
-  .span1 {
-    width: 70px;
-  }
-  .offset12 {
-    margin-left: 1230px;
-  }
-  .offset11 {
-    margin-left: 1130px;
-  }
-  .offset10 {
-    margin-left: 1030px;
-  }
-  .offset9 {
-    margin-left: 930px;
-  }
-  .offset8 {
-    margin-left: 830px;
-  }
-  .offset7 {
-    margin-left: 730px;
-  }
-  .offset6 {
-    margin-left: 630px;
-  }
-  .offset5 {
-    margin-left: 530px;
-  }
-  .offset4 {
-    margin-left: 430px;
-  }
-  .offset3 {
-    margin-left: 330px;
-  }
-  .offset2 {
-    margin-left: 230px;
-  }
-  .offset1 {
-    margin-left: 130px;
-  }
-  .row-fluid {
-    width: 100%;
-    *zoom: 1;
-  }
-  .row-fluid:before,
-  .row-fluid:after {
-    display: table;
-    line-height: 0;
-    content: "";
-  }
-  .row-fluid:after {
-    clear: both;
-  }
-  .row-fluid [class*="span"] {
-    display: block;
-    float: left;
-    width: 100%;
-    min-height: 30px;
-    margin-left: 2.564102564102564%;
-    *margin-left: 2.5109110747408616%;
-    -webkit-box-sizing: border-box;
-    -moz-box-sizing: border-box;
-    box-sizing: border-box;
-  }
-  .row-fluid [class*="span"]:first-child {
-    margin-left: 0;
-  }
-  .row-fluid .span12 {
-    width: 100%;
-    *width: 99.94680851063829%;
-  }
-  .row-fluid .span11 {
-    width: 91.45299145299145%;
-    *width: 91.39979996362975%;
-  }
-  .row-fluid .span10 {
-    width: 82.90598290598291%;
-    *width: 82.8527914166212%;
-  }
-  .row-fluid .span9 {
-    width: 74.35897435897436%;
-    *width: 74.30578286961266%;
-  }
-  .row-fluid .span8 {
-    width: 65.81196581196582%;
-    *width: 65.75877432260411%;
-  }
-  .row-fluid .span7 {
-    width: 57.26495726495726%;
-    *width: 57.21176577559556%;
-  }
-  .row-fluid .span6 {
-    width: 48.717948717948715%;
-    *width: 48.664757228587014%;
-  }
-  .row-fluid .span5 {
-    width: 40.17094017094017%;
-    *width: 40.11774868157847%;
-  }
-  .row-fluid .span4 {
-    width: 31.623931623931625%;
-    *width: 31.570740134569924%;
-  }
-  .row-fluid .span3 {
-    width: 23.076923076923077%;
-    *width: 23.023731587561375%;
-  }
-  .row-fluid .span2 {
-    width: 14.52991452991453%;
-    *width: 14.476723040552828%;
-  }
-  .row-fluid .span1 {
-    width: 5.982905982905983%;
-    *width: 5.929714493544281%;
-  }
-  .row-fluid .offset12 {
-    margin-left: 105.12820512820512%;
-    *margin-left: 105.02182214948171%;
-  }
-  .row-fluid .offset12:first-child {
-    margin-left: 102.56410256410257%;
-    *margin-left: 102.45771958537915%;
-  }
-  .row-fluid .offset11 {
-    margin-left: 96.58119658119658%;
-    *margin-left: 96.47481360247316%;
-  }
-  .row-fluid .offset11:first-child {
-    margin-left: 94.01709401709402%;
-    *margin-left: 93.91071103837061%;
-  }
-  .row-fluid .offset10 {
-    margin-left: 88.03418803418803%;
-    *margin-left: 87.92780505546462%;
-  }
-  .row-fluid .offset10:first-child {
-    margin-left: 85.47008547008548%;
-    *margin-left: 85.36370249136206%;
-  }
-  .row-fluid .offset9 {
-    margin-left: 79.48717948717949%;
-    *margin-left: 79.38079650845607%;
-  }
-  .row-fluid .offset9:first-child {
-    margin-left: 76.92307692307693%;
-    *margin-left: 76.81669394435352%;
-  }
-  .row-fluid .offset8 {
-    margin-left: 70.94017094017094%;
-    *margin-left: 70.83378796144753%;
-  }
-  .row-fluid .offset8:first-child {
-    margin-left: 68.37606837606839%;
-    *margin-left: 68.26968539734497%;
-  }
-  .row-fluid .offset7 {
-    margin-left: 62.393162393162385%;
-    *margin-left: 62.28677941443899%;
-  }
-  .row-fluid .offset7:first-child {
-    margin-left: 59.82905982905982%;
-    *margin-left: 59.72267685033642%;
-  }
-  .row-fluid .offset6 {
-    margin-left: 53.84615384615384%;
-    *margin-left: 53.739770867430444%;
-  }
-  .row-fluid .offset6:first-child {
-    margin-left: 51.28205128205128%;
-    *margin-left: 51.175668303327875%;
-  }
-  .row-fluid .offset5 {
-    margin-left: 45.299145299145295%;
-    *margin-left: 45.1927623204219%;
-  }
-  .row-fluid .offset5:first-child {
-    margin-left: 42.73504273504273%;
-    *margin-left: 42.62865975631933%;
-  }
-  .row-fluid .offset4 {
-    margin-left: 36.75213675213675%;
-    *margin-left: 36.645753773413354%;
-  }
-  .row-fluid .offset4:first-child {
-    margin-left: 34.18803418803419%;
-    *margin-left: 34.081651209310785%;
-  }
-  .row-fluid .offset3 {
-    margin-left: 28.205128205128204%;
-    *margin-left: 28.0987452264048%;
-  }
-  .row-fluid .offset3:first-child {
-    margin-left: 25.641025641025642%;
-    *margin-left: 25.53464266230224%;
-  }
-  .row-fluid .offset2 {
-    margin-left: 19.65811965811966%;
-    *margin-left: 19.551736679396257%;
-  }
-  .row-fluid .offset2:first-child {
-    margin-left: 17.094017094017094%;
-    *margin-left: 16.98763411529369%;
-  }
-  .row-fluid .offset1 {
-    margin-left: 11.11111111111111%;
-    *margin-left: 11.004728132387708%;
-  }
-  .row-fluid .offset1:first-child {
-    margin-left: 8.547008547008547%;
-    *margin-left: 8.440625568285142%;
-  }
-  input,
-  textarea,
-  .uneditable-input {
-    margin-left: 0;
-  }
-  .controls-row [class*="span"] + [class*="span"] {
-    margin-left: 30px;
-  }
-  input.span12,
-  textarea.span12,
-  .uneditable-input.span12 {
-    width: 1156px;
-  }
-  input.span11,
-  textarea.span11,
-  .uneditable-input.span11 {
-    width: 1056px;
-  }
-  input.span10,
-  textarea.span10,
-  .uneditable-input.span10 {
-    width: 956px;
-  }
-  input.span9,
-  textarea.span9,
-  .uneditable-input.span9 {
-    width: 856px;
-  }
-  input.span8,
-  textarea.span8,
-  .uneditable-input.span8 {
-    width: 756px;
-  }
-  input.span7,
-  textarea.span7,
-  .uneditable-input.span7 {
-    width: 656px;
-  }
-  input.span6,
-  textarea.span6,
-  .uneditable-input.span6 {
-    width: 556px;
-  }
-  input.span5,
-  textarea.span5,
-  .uneditable-input.span5 {
-    width: 456px;
-  }
-  input.span4,
-  textarea.span4,
-  .uneditable-input.span4 {
-    width: 356px;
-  }
-  input.span3,
-  textarea.span3,
-  .uneditable-input.span3 {
-    width: 256px;
-  }
-  input.span2,
-  textarea.span2,
-  .uneditable-input.span2 {
-    width: 156px;
-  }
-  input.span1,
-  textarea.span1,
-  .uneditable-input.span1 {
-    width: 56px;
-  }
-  .thumbnails {
-    margin-left: -30px;
-  }
-  .thumbnails > li {
-    margin-left: 30px;
-  }
-  .row-fluid .thumbnails {
-    margin-left: 0;
-  }
-  .summary-metric-graphs {
-    [class*="span"] {
-      float: left;
-      margin-left: 10px;
-    }
-    .chart-container {
-      .chart-x-axis {
-        left: 0%;
-        width: 100%;
-      }
-    }
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/styles/apps.less b/branch-1.2/ambari-web/app/styles/apps.less
deleted file mode 100644
index 6ad3467..0000000
--- a/branch-1.2/ambari-web/app/styles/apps.less
+++ /dev/null
@@ -1,473 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#apps{
-
-  td .red {
-    color: red;
-  }
-  .table thead th{
-    vertical-align:top;
-    padding-bottom: 0px;
-  }
-  .avg-table {
-    table-layout: fixed;
-    background-color: #F9F9F9;
-    border-left:1px solid #DDD;
-    td {
-      text-align:center;
-      border:none;
-    }
-  }
-
-  #filter_buttons a.selected{
-    cursor: default;
-  }
-  #filter_buttons a{
-    cursor: pointer;
-  }
-
-  .clear_filter{
-    width:46%;
-    a {
-      cursor: pointer;
-    }
-  }
-
-  .runsList {
-    table-layout: fixed;
-    border: 1px solid silver;
-    th {
-      border-top:none;
-    }
-    td {
-      word-wrap: break-word;
-    }
-
-    input, select{
-      width: 76%;
-    }
-
-    .no-data{
-      text-align: center;
-    }
-
-    input.input-super-mini{
-      width: 47px;
-      max-width: 57%;
-    }
-
-    label.checkbox input {
-      width: auto;
-    }
-    .col0,
-    td:first-child,
-    th:first-child {
-      width: 16%;
-    }
-    .col1,
-    td:first-child + td + td,
-    th:first-child + th + th{
-      width: 15%;
-    }
-    .col2,
-    td:first-child + td + td + td,
-    th:first-child + th + th + th{
-      width: 11%;
-    }
-    .col3,
-    td:first-child + td + td + td + td,
-    th:first-child + th + th + th + th{
-      width: 11%;
-    }
-    .col4,.col5,.col6,.col7,
-    td:first-child + td + td + td + td + td,
-    th:first-child + th + th + th + th + th,
-    td:first-child + td + td + td + td + td + td,
-    th:first-child + th + th + th + th + th + th,
-    td:first-child + td + td + td + td + td + td + td,
-    th:first-child + th + th + th + th + th + th + th,
-    td:first-child + td + td + td + td + td + td + td + td,
-    th:first-child + th + th + th + th + th + th + th + th
-    {
-      width: 9%;
-    }
-    .col8,
-    td:first-child + td + td + td + td + td + td + td + td + td,
-    th:first-child + th + th + th + th + th + th + th + th + th{
-      width: 13%;
-    }
-  }
-
-  .dropdown-menu label.checkbox {
-    margin-left: 10px;
-  }
-  .dropdown-menu label.checkbox {
-    margin-left: 10px;
-  }
-  .icon-star{
-    color: gray;
-    &.stared {
-      color: inherit;
-    }
-    &:hover {
-      text-decoration: none;
-    }
-  }
-  a.a {
-    cursor: pointer;
-    width:25px;
-    height: 25px;
-    display: block;
-    background-position: center center;
-    background-repeat: no-repeat;
-    position: relative;
-    left:50%;
-    margin-left:-13px;
-    margin-top: 36px;
-    font-size:30px;
-    color: gray;
-    &.active {
-      color: #08C;
-    }
-  }
-  .avg-info {
-    font-size:16px;
-    font-weight:700;
-  }
-  .compare-info {
-    font-size:12px;
-  }
-  .search-bar {
-
-  }
-  .clear {
-    clear:both;
-  }
-  .content {
-    padding: 0;
-  }
-  .app-table-row.hover{
-    opacity:0.8 ;
-  }
-  .app-table-row{
-    cursor: pointer;
-  }
-  .filter_info > .span4 > a.selected{
-      cursor: default;
-      text-decoration: none;
-      color: #000;
-  }
-  .page-bar {
-    border: 1px solid silver;
-    text-align:right;
-    div {
-      display: inline-block;
-      margin:0 10px;
-    }
-    .items-on-page {
-      label {
-        display:inline;
-      }
-      select {
-        margin-bottom: 4px;
-        margin-top: 4px;
-        width:70px;
-      }
-    }
-    .paging_two_button {
-      a {
-        padding:0 5px;
-      }
-    }
-  }
-  #graph1 {
-    margin-left: 30px;
-    width: 440px;
-    #legend_container {
-      margin: 40px 0 0 20px;
-    }
-  }
-  #graph2 {
-    margin-right: 30px;
-    width: 500px;
-    #tasks_legend_container {
-      margin: 40px 0 0 20px;
-    }
-  }
-
-  tr.containerRow > td{
-    background: #f9f9f9;
-  }
-  button {
-    margin: 0 2px;
-  }
-  svg{
-    vertical-align: top; //remove extra spaces after svg element
-  }
-
-  ul.nav-tabs{
-    margin-bottom: 0;
-  }
-
-  #jobs, #bars{
-    border: 1px solid #ddd;
-    border-top: none;
-    background: #fff;
-    padding: 10px;
-    box-sizing: border-box;
-    width: auto;
-  }
-
-  #jobs h2{
-    margin-top: 0;
-  }
-//fix stripped in inner table
-  .table-striped tbody .even td,
-  .table-striped tbody .even th {
-    background-color: #fff;
-  }
-  .sorting_asc { background: url(data:image/jpeg;base64,/9j/4AAQSkZJRgABAgAAZABkAAD/7AARRHVja3kAAQAEAAAAZAAA/+4ADkFkb2JlAGTAAAAAAf/bAIQAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQICAgICAgICAgICAwMDAwMDAwMDAwEBAQEBAQECAQECAgIBAgIDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMD/8AAEQgAEwATAwERAAIRAQMRAf/EAHgAAAMBAQAAAAAAAAAAAAAAAAAFCAYKAQACAQUAAAAAAAAAAAAAAAAABQMCBAYHCBAAAQUAAQMEAwAAAAAAAAAAAwECBAUGABESByExIghBMxQRAAIBAwMDAwUAAAAAAAAAAAECAwAEBRESBiExUUHhB2GBIhMU/9oADAMBAAIRAxEAPwDvA8k+Qc54sxGj32qlNi0ucrjTj/JqGlmROyJXQ2u/bOsZTmBExPd70/HXmQcW41lOX5+145h0L391KEHhR3Z28Ii6sx9AKgubiO1gaeU6Io19h9TUg/S/7eP+wia3NbBIFbuqiyn3VTCjIMArHHTJarEDGGiNU8vOKVsc7/VxBuGR3yV683X86/Cq/GpssrhP2S8emiSKRm1JS5VfyLH0WfQug7KwZR0CilWHy39++ObQTgkgeV9ux+xq9uc6U8pLfZzP6mClZpKWrvq1DilJAt4Mewh/0hRyBOsaUMoVKLvXtVU6t6+nL/HZTJYi4/rxU81tdbSu+N2Rtp7jcpB0OnUa9aoeOOVdsgDL4I1pFS+NPHmcsQ2+fw+UpLWOwwwWNVQ1kCaIcgaiONkmLGEZrDDXtcnXo5PfjC+5VybKWrWWSyF5cWbEEpJNI6kqdQSrMRqD1B9KjS2t423xoqt5AArb8QVPRwoo4UUcKK//2Q==) no-repeat right 50%; }
-  .sorting_desc { background: url(data:image/jpeg;base64,/9j/4AAQSkZJRgABAgAAZABkAAD/7AARRHVja3kAAQAEAAAAZAAA/+4ADkFkb2JlAGTAAAAAAf/bAIQAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQICAgICAgICAgICAwMDAwMDAwMDAwEBAQEBAQECAQECAgIBAgIDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMD/8AAEQgAEwATAwERAAIRAQMRAf/EAIEAAAIDAQAAAAAAAAAAAAAAAAAGBwgJCgEBAAIDAQAAAAAAAAAAAAAAAAMFBAYHCBAAAAUDAwMFAAAAAAAAAAAAAQIDBAUABgcSNTYRFQgTZFUWZhEAAAQEAggGAwAAAAAAAAAAAAECAxEhBAYSMjFBYRMzFDQFUZFSYmMHJFRk/9oADAMBAAIRAxEAPwDv4oAKACgCKc1tMmusb3Eph6cSgsgx7fucEZxGRks2llGIGVWgVm8q1dt0+6ogKaapSgdNbQPXTqAdwsN602bopk3vTnUW24rduwccbU2S5E8Sm1JM92czSZwNOKUYDFrCqTp1corDUFMpEcYap+Ipb4P5O8n81y9xXXlG50yY+thR3AEivqFvRDmduvSUrhuLtrFNXqCFvJm1LAQ5RMuchB6gBy13f7+tP6lsOipuz2jSGdy1ZJeNzmXnEtU+pWFTikmbxyTEjgglKKZpMU3ZanudYtTtSr8dMoYSKKvKMte0aUV5YGxgoASbD2iQ4Tyi6uB7Rvz/AHD9R8r7/wBWr64uta6/pKfq+JwUZP5/1/hwCFjIeTMrLo0np93q2xDtVCJh/9k=) no-repeat right 50%; }
-  .sorting { background: url(data:image/jpeg;base64,/9j/4AAQSkZJRgABAgAAZABkAAD/7AARRHVja3kAAQAEAAAAZAAA/+4ADkFkb2JlAGTAAAAAAf/bAIQAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQICAgICAgICAgICAwMDAwMDAwMDAwEBAQEBAQECAQECAgIBAgIDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMD/8AAEQgAEwATAwERAAIRAQMRAf/EAGgAAAIDAQAAAAAAAAAAAAAAAAUHAAYICgEBAQAAAAAAAAAAAAAAAAAAAAEQAAEEAQIFAgcAAAAAAAAAAAECAwQFABEGIRI0NQcTFDFBMmNUZRYRAQEBAQAAAAAAAAAAAAAAAAABEUH/2gAMAwEAAhEDEQA/AO93cd/XbXpLC9tHQ1Dr46nljUBby/gzGZB+p+Q6QhA+ZOApfDnllW/ha1tv6Ee7iyH5kRlvlbTIqHndWkNJ0HO7XFQbWeJUkpUeOpySrZh65UUnyFUW1ztaexRmIbaPyzoLE6vg2UWW9GC1e0XHnsSGEqfQohCwApK9OIGuAjfBP9VuG0m39vGqINVUe4r2xF21TVsuXZOI9N9lMmLBYkttQ21auBKhqtSUngCMkW5xqjKiYASh6SR2Tulr2HpOvf6j9p+V9/mwDeB//9k=) no-repeat right 50%; }
-
-  a.paginate_disabled_next, a.paginate_disabled_previous {
-    color: gray;
-    &:hover {
-      color: gray;
-      text-decoration: none;
-    }
-  }
-
-  a.paginate_enabled_next, a.paginate_enabled_previous {
-    &:hover {
-      text-decoration: none;
-    }
-  }
-
-  a.ui-icon-circle-close {
-    float: right;
-    opacity: 0.2;
-    padding: 1px;
-    position: relative;
-    right: -4px;
-    margin-top: 6px;
-    z-index:10;
-    &:hover {
-      opacity: 0.7;
-    }
-  }
-  .notActive {
-    a.ui-icon-circle-close {
-      visibility: hidden;
-    }
-  }
-}
-.btn-group button.single-btn-group{
-  -webkit-border-radius: 4px;
-  border-radius: 4px;
-  -moz-border-radius: 4px;
-}
-
-/*Big modal window*/
-.big-modal {
-  .modal {
-    top: -999px;
-    left: -999px;
-    .modal-body {
-      min-height: 430px;
-      max-height: none;
-    }
-    width: 1150px;
-    margin: 0;
-  }
-
-  .clear {
-    clear:both;
-  }
-  > div > .dataTable {
-    border: 1px solid silver;
-    th {
-      border-top:none;
-    }
-  }
-  .content {
-    padding: 0;
-  }
-
-  .page-bar {
-    border: 1px solid silver;
-    text-align:right;
-    div {
-      display: inline-block;
-      margin:0 10px;
-    }
-    .dataTables_length {
-      label {
-        display:inline;
-      }
-      select {
-        margin-bottom: 4px;
-        margin-top: 4px;
-        width:70px;
-      }
-    }
-    .dataTables_paginate {
-      a {
-        padding:0 5px;
-      }
-    }
-  }
-
-  .rickshaw_legend {
-    background-color: #999 !important;
-    li:hover {
-      background-color: #999 !important;
-    }
-  }
-  #graph1, #graph2 {
-    svg {
-      g {
-        g:nth-child(1) {
-          display: none;
-        }
-      }
-    }
-  }
-  #graph1 {
-    margin-left: 30px;
-    width: 440px;
-    #chart {
-      left: 30px;
-      overflow: visible;
-      position: relative;
-    }
-    #legend_container {
-      margin: 40px 0 0 20px;
-    }
-    #y-axis {
-      position: absolute;
-      top: 182px;
-      bottom: 0;
-      width: 30px;
-    }
-  }
-
-  #graph2 {
-    margin-right: 30px;
-    width: 500px;
-    #job_tasks {
-      left: 30px;
-      overflow: visible;
-      position: relative;
-    }
-    #tasks_legend_container {
-      margin: 40px 0 0 20px;
-    }
-    #y-axis2 {
-      position: absolute;
-      top: 182px;
-      bottom: 0;
-      width: 30px;
-      overflow: hidden;
-      height: 210px;
-    }
-  }
-
-  ul.nav-tabs{
-    margin-bottom: 0;
-  }
-
-  #bars {
-    height: 350px;
-  }
-
-  #jobs, #bars{
-    border: 1px solid #ddd;
-    border-top: none;
-    background: #fff;
-    padding: 10px;
-    box-sizing: border-box;
-    width: auto;
-  }
-
-  #jobs h2{
-    margin-top: 0;
-  }
-//fix stripped in inner table
-  .table-striped tbody tr:nth-child(odd)
-  td .table-striped tbody
-  tr:nth-child(odd) td,
-  tr:nth-child(even) th{
-    background-color: none;
-  }
-
-  .sorting_asc { background: url(data:image/jpeg;base64,/9j/4AAQSkZJRgABAgAAZABkAAD/7AARRHVja3kAAQAEAAAAZAAA/+4ADkFkb2JlAGTAAAAAAf/bAIQAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQICAgICAgICAgICAwMDAwMDAwMDAwEBAQEBAQECAQECAgIBAgIDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMD/8AAEQgAEwATAwERAAIRAQMRAf/EAHgAAAMBAQAAAAAAAAAAAAAAAAAFCAYKAQACAQUAAAAAAAAAAAAAAAAABQMCBAYHCBAAAQUAAQMEAwAAAAAAAAAAAwECBAUGABESByExIghBMxQRAAIBAwMDAwUAAAAAAAAAAAECAwAEBRESBiExUUHhB2GBIhMU/9oADAMBAAIRAxEAPwDvA8k+Qc54sxGj32qlNi0ucrjTj/JqGlmROyJXQ2u/bOsZTmBExPd70/HXmQcW41lOX5+145h0L391KEHhR3Z28Ii6sx9AKgubiO1gaeU6Io19h9TUg/S/7eP+wia3NbBIFbuqiyn3VTCjIMArHHTJarEDGGiNU8vOKVsc7/VxBuGR3yV683X86/Cq/GpssrhP2S8emiSKRm1JS5VfyLH0WfQug7KwZR0CilWHy39++ObQTgkgeV9ux+xq9uc6U8pLfZzP6mClZpKWrvq1DilJAt4Mewh/0hRyBOsaUMoVKLvXtVU6t6+nL/HZTJYi4/rxU81tdbSu+N2Rtp7jcpB0OnUa9aoeOOVdsgDL4I1pFS+NPHmcsQ2+fw+UpLWOwwwWNVQ1kCaIcgaiONkmLGEZrDDXtcnXo5PfjC+5VybKWrWWSyF5cWbEEpJNI6kqdQSrMRqD1B9KjS2t423xoqt5AArb8QVPRwoo4UUcKK//2Q==) no-repeat right 50%; }
-  .sorting_desc { background: url(data:image/jpeg;base64,/9j/4AAQSkZJRgABAgAAZABkAAD/7AARRHVja3kAAQAEAAAAZAAA/+4ADkFkb2JlAGTAAAAAAf/bAIQAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQICAgICAgICAgICAwMDAwMDAwMDAwEBAQEBAQECAQECAgIBAgIDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMD/8AAEQgAEwATAwERAAIRAQMRAf/EAIEAAAIDAQAAAAAAAAAAAAAAAAAGBwgJCgEBAAIDAQAAAAAAAAAAAAAAAAMFBAYHCBAAAAUDAwMFAAAAAAAAAAAAAQIDBAUABgcSNTYRFQgTZFUWZhEAAAQEAggGAwAAAAAAAAAAAAECAxEhBAYSMjFBYRMzFDQFUZFSYmMHJFRk/9oADAMBAAIRAxEAPwDv4oAKACgCKc1tMmusb3Eph6cSgsgx7fucEZxGRks2llGIGVWgVm8q1dt0+6ogKaapSgdNbQPXTqAdwsN602bopk3vTnUW24rduwccbU2S5E8Sm1JM92czSZwNOKUYDFrCqTp1corDUFMpEcYap+Ipb4P5O8n81y9xXXlG50yY+thR3AEivqFvRDmduvSUrhuLtrFNXqCFvJm1LAQ5RMuchB6gBy13f7+tP6lsOipuz2jSGdy1ZJeNzmXnEtU+pWFTikmbxyTEjgglKKZpMU3ZanudYtTtSr8dMoYSKKvKMte0aUV5YGxgoASbD2iQ4Tyi6uB7Rvz/AHD9R8r7/wBWr64uta6/pKfq+JwUZP5/1/hwCFjIeTMrLo0np93q2xDtVCJh/9k=) no-repeat right 50%; }
-  .sorting { background: url(data:image/jpeg;base64,/9j/4AAQSkZJRgABAgAAZABkAAD/7AARRHVja3kAAQAEAAAAZAAA/+4ADkFkb2JlAGTAAAAAAf/bAIQAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQICAgICAgICAgICAwMDAwMDAwMDAwEBAQEBAQECAQECAgIBAgIDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMD/8AAEQgAEwATAwERAAIRAQMRAf/EAGgAAAIDAQAAAAAAAAAAAAAAAAUHAAYICgEBAQAAAAAAAAAAAAAAAAAAAAEQAAEEAQIFAgcAAAAAAAAAAAECAwQFABEGIRI0NQcTFDFBMmNUZRYRAQEBAQAAAAAAAAAAAAAAAAABEUH/2gAMAwEAAhEDEQA/AO93cd/XbXpLC9tHQ1Dr46nljUBby/gzGZB+p+Q6QhA+ZOApfDnllW/ha1tv6Ee7iyH5kRlvlbTIqHndWkNJ0HO7XFQbWeJUkpUeOpySrZh65UUnyFUW1ztaexRmIbaPyzoLE6vg2UWW9GC1e0XHnsSGEqfQohCwApK9OIGuAjfBP9VuG0m39vGqINVUe4r2xF21TVsuXZOI9N9lMmLBYkttQ21auBKhqtSUngCMkW5xqjKiYASh6SR2Tulr2HpOvf6j9p+V9/mwDeB//9k=) no-repeat right 50%; }
-
-  a.paginate_disabled_next, a.paginate_disabled_previous {
-    color: gray;
-    &:hover {
-      color: gray;
-      text-decoration: none;
-    }
-  }
-
-  a.paginate_enabled_next, a.paginate_enabled_previous {
-    &:hover {
-      text-decoration: none;
-    }
-  }
-
-  div.view-wrapper {
-    float: left;
-  }
-
-  a.ui-icon-circle-close {
-    float: right;
-    opacity: 0.2;
-    padding: 1px;
-    position: relative;
-    top: -32px;
-    z-index: 10;
-    &:hover {
-      opacity: 0.7;
-    }
-  }
-  .notActive {
-    a.ui-icon-circle-close {
-      visibility: hidden;
-    }
-  }
-}
-
-
-
-@media all and (max-width: 1024px) {
-  .big-modal {
-    #graph1 {
-      width: 300px;
-    }
-    #graph2 {
-      width: 400px;
-  }
-  }
-}
-
-/*Big modal window end*/
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates.js b/branch-1.2/ambari-web/app/templates.js
deleted file mode 100644
index 86b79f4..0000000
--- a/branch-1.2/ambari-web/app/templates.js
+++ /dev/null
@@ -1,25 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-// load templates here
-
-require('templates/main/service/info/summary/ganglia');
-require('templates/main/service/info/summary/oozie');
-require('templates/main/service/info/summary/zookeeper');
diff --git a/branch-1.2/ambari-web/app/templates/application.hbs b/branch-1.2/ambari-web/app/templates/application.hbs
deleted file mode 100644
index 3449835..0000000
--- a/branch-1.2/ambari-web/app/templates/application.hbs
+++ /dev/null
@@ -1,61 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div id="main">
-  <div id="top-nav">
-    <div class="navbar navbar-static-top">
-      <div class="navbar-inner">
-        <div class="container">
-          <a {{translateAttr href="topnav.logo.href"}} class="logo" target="_blank"><img src="/img/logo-small.png" alt="Apache Ambari" title="Apache Ambari"></a>
-          <a class="brand" {{translateAttr href="topnav.logo.href"}} target="_blank" alt="Apache Ambari" title="Apache Ambari">{{t app.name}}</a>
-
-          {{#if isClusterDataLoaded}}
-              <a class="brand cluster-name" href="javascript:void(null);" {{bindAttr title="clusterName"}}>
-                {{clusterDisplayName}}
-
-                {{#with App.router.backgroundOperationsController}}
-                  {{#if allOperationsCount}}
-                      <span class="label operations-count" {{action "showPopup" target="App.router.backgroundOperationsController"}}>{{allOperationsCount}}</span>
-                  {{/if}}
-                {{/with}}
-
-              </a>
-          {{/if}}
-
-          {{#if App.router.loggedIn}}
-            <div class="btn-group pull-right usermenu-wrapper">
-              <button class="btn btn-group dropdown-toggle"  data-toggle="dropdown">
-                {{App.router.loginName}}&nbsp;<span class="caret"></span>
-              </button>
-              <ul class="dropdown-menu">
-                <li><a href="" {{action logoff}}>{{t app.sighout}}</a></li>
-              </ul>
-            </div>
-          {{/if}}
-        </div>
-      </div>
-    </div>
-  </div>
-  <div class="container">
-    <div id="content">
-      {{outlet}}
-    </div>
-  </div>
-</div>
-
-
diff --git a/branch-1.2/ambari-web/app/templates/common/form.hbs b/branch-1.2/ambari-web/app/templates/common/form.hbs
deleted file mode 100644
index 98909d5..0000000
--- a/branch-1.2/ambari-web/app/templates/common/form.hbs
+++ /dev/null
@@ -1,21 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{#each field in view.fields}}
-  {{view App.FormFieldTemplate fieldBinding="field"}}
-{{/each}}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/common/form/checkbox.hbs b/branch-1.2/ambari-web/app/templates/common/form/checkbox.hbs
deleted file mode 100644
index c22e107..0000000
--- a/branch-1.2/ambari-web/app/templates/common/form/checkbox.hbs
+++ /dev/null
@@ -1,24 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="controls">
-  <label class="checkbox" for="input{{unbound view.field.name}}">
-    {{view view.field.viewClass valueBinding="view.field.value" }} {{unbound view.field.displayName}}
-  </label>
-  <span class="help-inline">{{view.field.errorMessage}}</span>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/common/form/field.hbs b/branch-1.2/ambari-web/app/templates/common/form/field.hbs
deleted file mode 100644
index d89ed70..0000000
--- a/branch-1.2/ambari-web/app/templates/common/form/field.hbs
+++ /dev/null
@@ -1,25 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<label class="control-label"
-       for="input{{unbound view.field.name}}">{{unbound view.field.displayName}}</label>
-
-<div class="controls">
-  {{view view.field.viewClass valueBinding="view.field.value" }}
-  <span class="help-inline">{{view.field.errorMessage}}</span>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/common/grid/filter.hbs b/branch-1.2/ambari-web/app/templates/common/grid/filter.hbs
deleted file mode 100644
index 609c977..0000000
--- a/branch-1.2/ambari-web/app/templates/common/grid/filter.hbs
+++ /dev/null
@@ -1,25 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{#each filter in view.filters}}
-<li>
-  <label class="checkbox">
-    {{view Em.Checkbox checkedBinding="filter.checked"}} {{filter.label}}
-  </label>
-</li>
-{{/each}}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/common/grid/header.hbs b/branch-1.2/ambari-web/app/templates/common/grid/header.hbs
deleted file mode 100644
index c4d51dc..0000000
--- a/branch-1.2/ambari-web/app/templates/common/grid/header.hbs
+++ /dev/null
@@ -1,23 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{view.label}}<i {{action toggleFilter target="view"}} class="icon-filter"></i>
-{{#if view.showFilter}}
-{{view view.filter}}
-  <a {{action applyFilter target="view"}}>{{t apply}}</a>
-{{/if}}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/common/grid/pager.hbs b/branch-1.2/ambari-web/app/templates/common/grid/pager.hbs
deleted file mode 100644
index 131004c..0000000
--- a/branch-1.2/ambari-web/app/templates/common/grid/pager.hbs
+++ /dev/null
@@ -1,25 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<ul>
-    <li {{bindAttr class="view.prevPageDisabled"}}><a {{action activatePrevPage target="view"}} href="#">{{t common.prev}}</a></li>
-  {{#each page in view.pages}}
-    <li {{bindAttr class="page.activeClass"}} ><a {{action activatePage page target="view" }} href="#">{{page.number}}</a></li>
-  {{/each}}
-    <li {{bindAttr class="view.nextPageDisabled"}}><a {{action activateNextPage target="view"}} href="#">{{t common.next}}</a></li>
-</ul>
diff --git a/branch-1.2/ambari-web/app/templates/common/metric.hbs b/branch-1.2/ambari-web/app/templates/common/metric.hbs
deleted file mode 100644
index b91fa1a..0000000
--- a/branch-1.2/ambari-web/app/templates/common/metric.hbs
+++ /dev/null
@@ -1,61 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="span title">
-  {{t common.metrics}} <i class="icon-question-sign"></i>
-</div>
-<ul class="display-inline-block nav nav-pills">
-  {{#each metric in view.metrics}}
-  {{view view.itemView metricBinding="metric" widgetBinding="view"}}
-  {{/each}}
-
-  <li class="dropdown">
-    <a {{action toggleMore target="view"}} href="#">
-      {{t metric.more}}
-      <b class="caret"></b>
-    </a>
-
-    {{#if view.showMore}}
-    <div class="accordion" id="metricAccordion">
-      {{#each view.moreMetrics}}
-      <div class="accordion-group">
-        <div class="accordion-heading">
-          <a class="accordion-toggle" data-toggle="collapse" data-parent="#metricAccordion"
-             href="#{{unbound code}}Collapse">
-            <i class="icon-play"></i>{{unbound label}}
-          </a>
-        </div>
-        <div id="{{unbound code}}Collapse" class="accordion-body collapse">
-          <div class="accordion-inner">
-            {{#if items.length }}
-            <ul class="items">
-              {{#each metric in items}}
-              {{view view.moreItemView metricBinding="metric" widgetBinding="view"}}
-              {{/each}}
-            </ul>
-            {{else}}
-            {{t metric.notFound}}
-            {{/if}}
-          </div>
-        </div>
-      </div>
-      {{/each}}
-    </div>
-    {{/if}}
-  </li>
-</ul>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/common/time_range.hbs b/branch-1.2/ambari-web/app/templates/common/time_range.hbs
deleted file mode 100644
index 66ce3f9..0000000
--- a/branch-1.2/ambari-web/app/templates/common/time_range.hbs
+++ /dev/null
@@ -1,32 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="span title">
-  {{t common.timeRange}} <i class="icon-question-sign"></i>
-</div>
-<ul class="display-inline-block nav nav-pills">
-  {{#each preset in view.presets}}
-    {{view view.presetView presetBinding="preset" widgetBinding="view"}}
-  {{/each}}
-  {{t from}} {{view view.dateFromView}}
-  {{t to}} {{view view.dateToView}}
-</ul>
-<div id="slider">
-  <sapn class="period-marker">{{view.rangeLabel}}</sapn>
-  <sapn class="now-marker">{{view.nowLabel}}</sapn>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/installer.hbs b/branch-1.2/ambari-web/app/templates/installer.hbs
deleted file mode 100644
index 2415c1c..0000000
--- a/branch-1.2/ambari-web/app/templates/installer.hbs
+++ /dev/null
@@ -1,47 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div id="installer">
-  <div class="container">
-    <div class="container-fluid">
-      <div class="row-fluid">
-        <div class="span3">
-          <!--Sidebar content-->
-          <div class="well">
-            <ul class="nav nav-pills nav-stacked">
-              <li class="nav-header">{{t installer.header}}</li>
-              <li {{bindAttr class="isStep1:active view.isStep1Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep1 target="controller"}}>{{t installer.step1.header}}</a></li>
-              <li {{bindAttr class="isStep2:active view.isStep2Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep2 target="controller"}}>{{t installer.step2.header}}</a></li>
-              <li {{bindAttr class="isStep3:active view.isStep3Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep3 target="controller"}}>{{t installer.step3.header}}</a></li>
-              <li {{bindAttr class="isStep4:active view.isStep4Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep4 target="controller"}}>{{t installer.step4.header}}</a></li>
-              <li {{bindAttr class="isStep5:active view.isStep5Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep5 target="controller"}}>{{t installer.step5.header}}</a></li>
-              <li {{bindAttr class="isStep6:active view.isStep6Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep6 target="controller"}}>{{t installer.step6.header}}</a></li>
-              <li {{bindAttr class="isStep7:active view.isStep7Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep7 target="controller"}}>{{t installer.step7.header}}</a></li>
-              <li {{bindAttr class="isStep8:active view.isStep8Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep8 target="controller"}}>{{t installer.step8.header}}</a></li>
-              <li {{bindAttr class="isStep9:active view.isStep9Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep9 target="controller"}}>{{t installer.step9.header}}</a></li>
-              <li {{bindAttr class="isStep10:active view.isStep10Disabled:disabled"}}><a href="javascript:void(null);" {{action gotoStep10 target="controller"}}>{{t installer.step10.header}}</a></li>
-            </ul>
-          </div>
-        </div>
-        <div id="installer-content" class="well span9">
-          {{outlet}}
-        </div>
-      </div>
-    </div>
-  </div>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/login.hbs b/branch-1.2/ambari-web/app/templates/login.hbs
deleted file mode 100644
index d6d6059..0000000
--- a/branch-1.2/ambari-web/app/templates/login.hbs
+++ /dev/null
@@ -1,31 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="well login span4">
-    <h2>{{t login.header}}</h2>
-    {{#if errorMessage}}
-    <div class="alert alert-error">
-        {{errorMessage}}
-    </div>
-    {{/if}}
-    <label>{{t login.username}}</label>
-    {{view view.loginTextField valueBinding="loginName" class="span4"}}
-    <label>{{t login.password}}</label>
-    {{view view.passTextField type="password" valueBinding="password" class="span4"}}
-        <button class="btn btn-success" {{action "submit" target="controller"}}>{{t login.loginButton}}</button>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/main.hbs b/branch-1.2/ambari-web/app/templates/main.hbs
deleted file mode 100644
index 914842f..0000000
--- a/branch-1.2/ambari-web/app/templates/main.hbs
+++ /dev/null
@@ -1,30 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{#if isClusterDataLoaded}}
-<div id="main-nav">
-  <div class="navbar">
-    <div class="navbar-inner">
-      {{view App.MainMenuView}}
-    </div>
-  </div>
-</div>
-{{outlet}}
-{{else}}
-<h2>{{t app.loadingPlaceholder}}</h2>
-{{/if}}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/admin.hbs b/branch-1.2/ambari-web/app/templates/main/admin.hbs
deleted file mode 100644
index b2e0416..0000000
--- a/branch-1.2/ambari-web/app/templates/main/admin.hbs
+++ /dev/null
@@ -1,28 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="row-fluid">
-  <div id="main-admin-menu" class="well span2">
-  {{view App.MainAdminMenuView}}
-  </div>
-  <div class="span10">
-    <div class="row-fluid">
-      {{outlet}}
-    </div>
-  </div>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/admin/advanced.hbs b/branch-1.2/ambari-web/app/templates/main/admin/advanced.hbs
deleted file mode 100644
index 053884c..0000000
--- a/branch-1.2/ambari-web/app/templates/main/admin/advanced.hbs
+++ /dev/null
@@ -1,27 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<h5>{{t admin.advanced.title}}<i class="icon-question-sign"></i></h5>
-<div class="row">
-  {{t admin.advanced.caution}}
-</div>
-<div class="row">
-  <button {{action uninstall target="App.router.mainAdminAdvancedController"}} class="btn"><i class="icon-trash"></i>&nbsp;{{t admin.advanced.button.uninstallIncludingData}}</button>
-  <br/>
-  <button {{action uninstall view.params target="App.router.mainAdminAdvancedController"}} class="btn"><i class="icon-trash"></i>&nbsp;{{t admin.advanced.button.uninstallKeepData}}</button>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/admin/advanced/uninstall.hbs b/branch-1.2/ambari-web/app/templates/main/admin/advanced/uninstall.hbs
deleted file mode 100644
index 57a41ca..0000000
--- a/branch-1.2/ambari-web/app/templates/main/admin/advanced/uninstall.hbs
+++ /dev/null
@@ -1,19 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{t admin.confirmUninstall}}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/admin/audit.hbs b/branch-1.2/ambari-web/app/templates/main/admin/audit.hbs
deleted file mode 100644
index 80c2a5c..0000000
--- a/branch-1.2/ambari-web/app/templates/main/admin/audit.hbs
+++ /dev/null
@@ -1,35 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<table class="table table-striped">
-  <thead>
-  <tr>
-    {{#each column in view.columns}}
-    {{view column}}
-    {{/each}}
-  </tr>
-  </thead>
-  <tbody>
-  {{#each row in view.rows}}
-  {{view row}}
-  {{/each}}
-  </tbody>
-</table>
-{{#if view.pager}}
-  {{view view.pager}}
-{{/if}}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/admin/authentication.hbs b/branch-1.2/ambari-web/app/templates/main/admin/authentication.hbs
deleted file mode 100644
index 9ea974b..0000000
--- a/branch-1.2/ambari-web/app/templates/main/admin/authentication.hbs
+++ /dev/null
@@ -1,48 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{view App.FormFieldTemplate fieldBinding="view.form.field.method" id="user-auth-method"}}
-
-{{#if view.ldapChecked}}
-  {{view App.FormFieldTemplate fieldBinding="view.form.field.primaryServer"}}
-  {{view App.FormFieldTemplate fieldBinding="view.form.field.secondaryServer"}}
-  {{view App.FormFieldTemplate fieldBinding="view.form.field.useSsl"}}
-  {{view App.FormFieldTemplate fieldBinding="view.form.field.bindMethod"}}
-  {{#if view.useCredentials}}
-    {{view App.FormFieldTemplate fieldBinding="view.form.field.bindUser"}}
-    {{view App.FormFieldTemplate fieldBinding="view.form.field.password"}}
-    {{view App.FormFieldTemplate fieldBinding="view.form.field.passwordRetype"}}
-  {{/if}}
-
-    {{view App.FormFieldTemplate fieldBinding="view.form.field.searchBaseDn"}}
-    {{view App.FormFieldTemplate fieldBinding="view.form.field.usernameAttribute"}}
-
-    <h5>{{t admin.authentication.form.configurationTest}}</h5>
-    {{view App.FormFieldTemplate fieldBinding="view.form.field.userDN"}}
-    {{view App.FormFieldTemplate fieldBinding="view.form.field.userPassword"}}
-
-    <button {{action testConfiguration on="click" target="view.form" }} class="btn">{{t admin.authentication.form.testConfiguration}}</button>
-
-    {{#if view.form.testConfigurationMessage}}
-      <p {{bindAttr class="view.form.testConfigurationClass"}}>{{view.form.testConfigurationMessage}}</p>
-    {{/if}}
-{{/if}}
-<div style="margin:40px 0">
-  <button {{action updateValues target="view.form"}} class="btn">{{t form.cancel}}</button>
-  <button {{action save view.form target="App.router.mainAdminAuthenticationController"}} class="btn btn-primary">{{t form.save}}</button>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/admin/user.hbs b/branch-1.2/ambari-web/app/templates/main/admin/user.hbs
deleted file mode 100644
index bff8eb8..0000000
--- a/branch-1.2/ambari-web/app/templates/main/admin/user.hbs
+++ /dev/null
@@ -1,42 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{#if view.ldapUser}}
-<p class="text-info">{{t admin.users.ldapAuthentionUsed}}.</p>
-{{else}}
-<table class="table table-bordered table-striped span6">
-  <thead>
-  <tr>
-    <th style="width:50%">{{t admin.users.username}}</th>
-    <th style="width:10%">{{t admin.users.privileges}}</th>
-    <th style="width:20%">{{t admin.users.type}}</th>
-    <th style="width:20%">{{t admin.users.action}}</th>
-  </tr>
-  </thead>
-  <tbody>
-  {{#each user in view.users}}
-  {{view App.MainAdminUserRowView userBinding="user"}}
-  {{/each}}
-  </tbody>
-</table>
-<div class="span2">
-  <button class="btn" {{action gotoCreateUser on="click"}} >
-    <i class="icon-plus"></i>{{t admin.users.addButton}}
-  </button>
-</div>
-{{/if}}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/admin/user/create.hbs b/branch-1.2/ambari-web/app/templates/main/admin/user/create.hbs
deleted file mode 100644
index 82b51b5..0000000
--- a/branch-1.2/ambari-web/app/templates/main/admin/user/create.hbs
+++ /dev/null
@@ -1,38 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<form class="form-horizontal">
-  {{#each field in view.userForm.fields}}
-  {{#unless field.isHidden}}
-  <div {{bindAttr class="field.errorMessage:error :control-group"}}>
-    <label class="control-label" for="input{{unbound field.name}}">{{unbound field.displayName}}</label>
-    <div class="controls">
-        {{view field.viewClass valueBinding="field.value" disabledBinding="field.disabled"}}
-      <span class="help-inline">{{field.errorMessage}}</span>
-    </div>
-  </div>
-  {{/unless}}
-  {{/each}}
-  <div class="control-group">
-    <div class="controls">
-      <button type="submit" class="btn" {{action gotoUsers}}>{{t form.cancel}}</button>
-      <button type="submit"
-              class="btn btn-primary" {{action create target="view"}}>Create</button>
-    </div>
-  </div>
-</form>
diff --git a/branch-1.2/ambari-web/app/templates/main/admin/user/edit.hbs b/branch-1.2/ambari-web/app/templates/main/admin/user/edit.hbs
deleted file mode 100644
index 437fea1..0000000
--- a/branch-1.2/ambari-web/app/templates/main/admin/user/edit.hbs
+++ /dev/null
@@ -1,38 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<form class="form-horizontal">
-  {{#each field in view.userForm.fields}}
-  {{#unless field.isHidden}}
-  <div {{bindAttr class="field.errorMessage:error :control-group"}}>
-    <label class="control-label" for="input{{unbound field.name}}">{{unbound field.displayName}}</label>
-    <div class="controls">
-        {{view field.viewClass valueBinding="field.value" disabledBinding="field.disabled"}}
-      <span class="help-inline">{{field.errorMessage}}</span>
-    </div>
-  </div>
-  {{/unless}}
-  {{/each}}
-  <div class="control-group">
-    <div class="controls">
-      <button type="submit" class="btn" {{action gotoUsers}}>{{t form.cancel}}</button>
-      <button type="submit"
-              class="btn btn-primary" {{action edit target="view"}}>{{t common.save}}</button>
-    </div>
-  </div>
-</form>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/admin/user/row.hbs b/branch-1.2/ambari-web/app/templates/main/admin/user/row.hbs
deleted file mode 100644
index 3cf2148..0000000
--- a/branch-1.2/ambari-web/app/templates/main/admin/user/row.hbs
+++ /dev/null
@@ -1,27 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<tr>
-  <td>{{user.userName}}</td>
-  <td>{{view Ember.Checkbox disabledBinding="view.disableCheckBoxes" checkedBinding="user.admin"}}</td>
-  <td>{{user.type}}</td>
-  <td>
-    <a href="#" {{action gotoEditUser user on="click"}}>{{t admin.users.edit}}</a>&nbsp;
-    <a href="#" {{action deleteRecord user target="App.router.mainAdminUserController" }}>{{t admin.users.delete}}</a>
-  </td>
-</tr>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/apps.hbs b/branch-1.2/ambari-web/app/templates/main/apps.hbs
deleted file mode 100644
index 9da61b3..0000000
--- a/branch-1.2/ambari-web/app/templates/main/apps.hbs
+++ /dev/null
@@ -1,113 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div id="apps">
-  <table class="table table-bordered avg-table">
-    <tbody>
-    <tr>
-      <td></td>
-      <td>{{t apps.avgTable.jobs}}</td>
-      <td>{{t apps.avgTable.input}}</td>
-      <td>{{t apps.avgTable.output}}</td>
-      <td>{{t apps.avgTable.duration}}</td>
-      <td>{{t apps.avgTable.oldest}}</td>
-      <td>{{t apps.avgTable.mostRecent}}</td>
-    </tr>
-    <tr class="avg-info">
-      <td>{{t apps.avgTable.avg}}</td>
-      <td>{{summary.jobs.avg}}</td>
-      <td>{{summary.input.avg}}</td>
-      <td>{{summary.output.avg}}</td>
-      <td>{{summary.duration.avg}}</td>
-      <td>{{summary.times.oldest}}</td>
-      <td>{{summary.times.youngest}}</td>
-
-    </tr>
-    <tr class="compare-info">
-      <td>{{t apps.avgTable.min}} / {{t apps.avgTable.max}}</td>
-      <td>{{summary.jobs.min}} / {{summary.jobs.max}}</td>
-      <td>{{summary.input.min}} / {{summary.input.max}}</td>
-      <td>{{summary.output.min}} / {{summary.output.max}}</td>
-      <td>{{summary.duration.min}} / {{summary.duration.max}}</td>
-      <td></td>
-      <td></td>
-    </tr>
-    </tbody>
-  </table>
-
-  <div class="filter_info">
-
-    <div class="span4" id="filter_buttons">{{t common.show}}:
-      <a class="all selected" {{action "clickViewType" target="view"}} data-view-type="all">{{t apps.filters.all}}
-        ({{controller.paginationObject.iTotalRecords}})</a> &#124;
-      <a class="filtered" {{action "clickViewType" target="view"}} data-view-type="filtered">{{t apps.filters.filtered}}
-        ({{controller.filterObject.filteredDisplayRecords}})</a>
-    </div>
-    <div class="span2 clear_filter">
-        <a {{action "clearFilters" target="controller"}}>{{t apps.filters.clearFilters}}</a>
-    </div>
-    <div class="search-bar">
-        {{view view.appSearchThrough valueBinding="controller.filterObject.sSearch"}}
-    </div>
-  </div>
-
-  <table class="table table-striped runsList">
-    <thead>
-    {{#view view.wrapSorting}}
-      {{#each controller.columnsName}}
-        {{#view view.parentView.sortingColumns contentBinding="this"}}
-          {{name}}
-        {{/view}}
-      {{/each}}
-    {{/view}}
-    <tr>
-      <th>{{view view.appIdFilterView}}</th>
-      <th>{{view view.nameFilterView}}</th>
-      <th>{{view view.typeFilterView}}</th>
-      <th>{{view view.userFilterView}}</th>
-      <th>{{view view.jobsFilterView}}</th>
-      <th>{{view view.inputFilterView}}</th>
-      <th>{{view view.outputFilterView}}</th>
-      <th>{{view view.durationFilterView}}</th>
-      <th>{{view view.runDateFilterView}}</th>
-    </tr>
-    </thead>
-    <tbody>
-       {{#if  view.emptyData}}
-         <tr>
-           <td class="no-data" colspan="9">{{t apps.filters.nothingToShow}}</td>
-         </tr>
-       {{else}}
-         {{#each run in content}}
-           {{view view.containerRow runBinding="run" currentViewBinding="view.appTableRow"}}
-         {{/each}}
-       {{/if}}
-    </tbody>
-  </table>
-
-  <div class="page-bar">
-      <div class="items-on-page">
-          <label>{{t common.show}}: {{view view.runPerPageSelectView viewName="runPerPageSelectView" selectionBinding="controller.filterObject.iDisplayLength"}}</label>
-      </div>
-      <div class="info">{{view.paginationInfo}}</div>
-      <div class="paging_two_button">
-          {{view view.paginationLeft}}
-          {{view view.paginationRight}}
-      </div>
-  </div>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/main/apps/item.hbs b/branch-1.2/ambari-web/app/templates/main/apps/item.hbs
deleted file mode 100644
index c6f1c6d..0000000
--- a/branch-1.2/ambari-web/app/templates/main/apps/item.hbs
+++ /dev/null
@@ -1,34 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<td colspan="10">
-  <ul class="nav nav-tabs">
-    {{#each tab in view.menuTabs}}
-    <li {{bindAttr class="tab.active"}} {{action "switchTab" tab target="view" }}>
-      <a href="javascript:void(0)">{{tab.label}}</a>
-    </li>
-    {{/each}}
-  </ul>
-  <div class="content">
-    {{view view.containerView}}
-  </div>
- </td>
-
-
-
-
diff --git a/branch-1.2/ambari-web/app/templates/main/apps/item/bar.hbs b/branch-1.2/ambari-web/app/templates/main/apps/item/bar.hbs
deleted file mode 100644
index 37dc395..0000000
--- a/branch-1.2/ambari-web/app/templates/main/apps/item/bar.hbs
+++ /dev/null
@@ -1,57 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="btn-group">
-  <button class="btn btn-primary dropdown-toggle" data-toggle="dropdown">
-    {{t apps.dagCharts.popup.job}} {{view.activeJob.id}} - {{view.activeJob.run.id}}
-    <span class="caret"></span>
-  </button>
-  <ul class="dropdown-menu">
-    {{#each job in view.content}}
-    <li><a {{action "selectJob" job target="view"}} href="javascript:void(null)">{{t apps.dagCharts.popup.job}} {{job.id}} - {{job.run.id}}</a></li>
-    {{/each}}
-  </ul>
-</div>
-<div></div>
-<div id="graph1" class="pull-left">
-  <div class="pull-left">
-    <div id="graph1_desc" class="graph_desc">
-      <h4>{{t apps.dagCharts.popup.tasks.timeline}}</h4>
-    </div>
-    <div id="y-axis"></div>
-    <div id="chart"></div>
-    <div id="timeline1"></div>
-  </div>
-  <div id="legend_container" class="pull-left">
-    <div id="legend"></div>
-  </div>
-</div>
-<div id="graph2" class="pull-right">
-  <div class="pull-left">
-    <div id="graph2_desc" class="graph_desc">
-      <h4>{{t apps.dagCharts.popup.tasks.tasks}}</h4>
-    </div>
-    <div id="y-axis2"></div>
-    <div id="job_tasks"></div>
-    <div id="timeline2"></div>
-  </div>
-  <div id="tasks_legend_container" class="pull-left">
-    <div id="tasks_legend"></div>
-  </div>
-</div>
-<div class="clearfix"></div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/apps/item/dag.hbs b/branch-1.2/ambari-web/app/templates/main/apps/item/dag.hbs
deleted file mode 100644
index f9806cb..0000000
--- a/branch-1.2/ambari-web/app/templates/main/apps/item/dag.hbs
+++ /dev/null
@@ -1,51 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-    <div id="dag_viewer"></div>
-
-    <table class="table table-bordered table-striped" id="innerTable">
-      <thead>
-      <tr>
-        {{#view view.sortView contentBinding="controller.content.jobs"}}
-          {{#if view.parentView.hasManyJobs}}{{view view.parentView.nameSort}}{{/if}}
-          {{view view.parentView.idSort}}
-          {{view view.parentView.statusSort}}
-          {{view view.parentView.mapsSort}}
-          {{view view.parentView.reducesSort}}
-          {{view view.parentView.inputSort}}
-          {{view view.parentView.outputSort}}
-          {{view view.parentView.durationSort}}
-        {{/view}}
-      </tr>
-      </thead>
-      <tbody>
-      {{#each job in controller.content.jobs}}
-      <tr>
-        {{#if view.hasManyJobs}}<td>{{job.workflow_entity_name}}</td>{{/if}}
-        <td>{{job.id}}</td>
-        <td>{{job.status}}</td>
-        <td>{{job.maps}}</td>
-        <td>{{job.reduces}}</td>
-        <td>{{job.inputFormatted}}</td>
-        <td>{{job.outputFormatted}}</td>
-        <td>{{job.duration}}</td>
-      </tr>
-      {{/each}}
-      </tbody>
-    </table>
-
-
diff --git a/branch-1.2/ambari-web/app/templates/main/apps/list_row.hbs b/branch-1.2/ambari-web/app/templates/main/apps/list_row.hbs
deleted file mode 100644
index a926248..0000000
--- a/branch-1.2/ambari-web/app/templates/main/apps/list_row.hbs
+++ /dev/null
@@ -1,35 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<td class="appId" title="{{unbound run.id}}">{{unbound run.idFormatted}}</td>
-<td>{{unbound run.appName}}</td>
-<td>{{unbound run.type}}</td>
-<td>{{unbound run.userName}}</td>
-<td>{{unbound run.numJobsTotal}}</td>
-<td>{{unbound run.inputFormatted}}</td>
-<td>{{unbound run.outputFormatted}}</td>
-<td>{{unbound run.duration}}</td>
-{{#if run.isRunning}}
-<td rel="popover" {{translateAttr title="apps.isRunning.popup.title" data-content="apps.isRunning.popup.content"}}>
-    <span title="{{unbound run.lastUpdateTimeFormatted}}">{{unbound run.lastUpdateTimeFormattedShort}}</span>
-    <b class='red'>*</b></td>
-{{else}}
-<td>
-    <span title="{{unbound run.lastUpdateTimeFormatted}}">{{unbound run.lastUpdateTimeFormattedShort}}</span>
-</td>
-{{/if}}
diff --git a/branch-1.2/ambari-web/app/templates/main/apps/user_filter.hbs b/branch-1.2/ambari-web/app/templates/main/apps/user_filter.hbs
deleted file mode 100644
index 16f6046..0000000
--- a/branch-1.2/ambari-web/app/templates/main/apps/user_filter.hbs
+++ /dev/null
@@ -1,31 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-<button class="btn btn-info single-btn-group" {{action "clickFilterButton" target="view"}}>
-  {{t common.user}}&nbsp;<span class="caret"></span>
-</button>
-<ul class="dropdown-menu filter-components">
-  {{#each user in view.users}}
-    <li>
-      <label class="checkbox">{{view Ember.Checkbox checkedBinding="user.checked"}} {{user.name}}</label>
-    </li>
-  {{/each}}
-  <li>
-    <button class="btn" {{action "closeFilter" target="view"}}>{{t common.cancel}}</button>
-    <button class="btn btn-primary" {{action "applyFilter" target="view"}}>{{t common.apply}}</button>
-  </li>
-</ul>
diff --git a/branch-1.2/ambari-web/app/templates/main/background_operations_popup.hbs b/branch-1.2/ambari-web/app/templates/main/background_operations_popup.hbs
deleted file mode 100644
index 1b08e4e..0000000
--- a/branch-1.2/ambari-web/app/templates/main/background_operations_popup.hbs
+++ /dev/null
@@ -1,53 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{#each operation in allOperations}}
-  {{#view App.MainBackgroundOperation contentBinding="operation"}}
-<div>
-  <a {{action showOperationLog target="view"}} href="#">
-    <i {{bindAttr class="view.iconClass"}}></i>
-    {{#if view.isOpen}}{{t common.hide}}{{else}}{{t common.show}}{{/if}} info about {{operation.command}} {{operation.role}}
-    on {{operation.host_name}}
-  </a>
-  {{#if view.hasProgressBar}}
-  <div {{bindAttr class="view.isInProgress:progress-striped :active view.barColor :progress"}}>
-    <div class="bar"></div>
-  </div>
-  {{/if}}
-  <div class="content-area">
-    <div class="textTrigger">{{t popup.highlight}}</div>
-    {{#if view.isOpen}}
-    {{#if view.isTextArea}}
-    <div>
-      {{view view.textArea contentBinding="operation"}}
-    </div>
-    {{else}}
-    <div>
-      <h5>exitcode:</h5>
-        <pre class="stderr">{{#if operation.display_exit_code}}{{operation.exit_code}}{{/if}}</pre>
-      <h5>stderr:</h5>
-      <pre class="stderr">{{highlight operation.stderr [err;fail]}}</pre>
-      <h5>stdout:</h5>
-      <pre class="stdout">{{highlight operation.stdout [err;fail]}}</pre>
-    </div>
-    {{/if}}
-    {{/if}}
-  </div>
-</div>
-  {{/view}}
-{{/each}}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/charts.hbs b/branch-1.2/ambari-web/app/templates/main/charts.hbs
deleted file mode 100644
index 941bb77..0000000
--- a/branch-1.2/ambari-web/app/templates/main/charts.hbs
+++ /dev/null
@@ -1,19 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{outlet}}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/charts/heatmap.hbs b/branch-1.2/ambari-web/app/templates/main/charts/heatmap.hbs
deleted file mode 100644
index 907b7f4..0000000
--- a/branch-1.2/ambari-web/app/templates/main/charts/heatmap.hbs
+++ /dev/null
@@ -1,78 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="heatmap">
-
-  <div class="container-fluid">
-	  <div class="row-fluid">
-	    <div class="span2 legend-column">
-		     <div class="btn-group">
-				  <button class="btn">{{t charts.heatmap.selectMetric}}</button>
-				  <button class="btn dropdown-toggle" data-toggle="dropdown">
-				    <span class="caret"></span>
-				  </button>
-				  <ul class="dropdown-menu">
-				    {{#each category in controller.allMetrics}}
-               <li class="dropdown-submenu">
-                <a tabindex="-1" >{{category.label}}</a>
-                <ul class="dropdown-menu">
-                  {{#each metric in category.items}}
-                    <li>
-                      <a tabindex="-1" {{action showHeatMapMetric metric target="controller"}}>{{metric.name}}</a>
-                    </li>
-                  {{/each}}
-                </ul>
-               </li>
-            {{/each}}
-				  </ul>
-				</div>
-        {{#if controller.selectedMetric}}
-					<table class="legend">
-					  {{#each slot in controller.selectedMetric.slotDefinitions}}
-					    <tr>
-					      <td>
-					        <div class="tile" {{bindAttr style="slot.cssStyle"}}></div>
-					      </td>
-					      <td>{{slot.label}}</td>
-					    </tr>
-					  {{/each}}
-					</table>
-	        {{t common.maximum}}:
-	        <div>
-	          {{view Ember.TextField type="text" valueBinding="controller.selectedMetric.maximumValue" class="span6"}}
-	          {{controller.selectedMetric.units}}
-	        </div>
-        {{/if}}
-	    </div>
-	    <div class="span10">
-	      <h4 id="heatmap-metric-loading">
-	        <span id="heatmap-metric-title">{{controller.selectedMetric.name}}</span>
-	      </h4>
-	      <div class="row-fluid">
-				  {{#each rack in controller.cluster.racks}}
-				    <div {{bindAttr class="controller.rackClass"}}>
-				      {{view App.MainChartsHeatmapRackView rackBinding="rack" }}
-				    </div>
-				  {{/each}}
-			  </div>
-			  {{view App.MainChartsHeatmapHostDetailView}}
-	    </div>
-	  </div>
-  </div>
-
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/charts/heatmap/heatmap_host.hbs b/branch-1.2/ambari-web/app/templates/main/charts/heatmap/heatmap_host.hbs
deleted file mode 100644
index dfdc914..0000000
--- a/branch-1.2/ambari-web/app/templates/main/charts/heatmap/heatmap_host.hbs
+++ /dev/null
@@ -1,21 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div {{bindAttr class="view.hostClass"}} {{bindAttr style="view.hostTemperatureStyle"}}>
-  <a href="#" class="heatmap-host" {{action "routeHostDetail" view.content target="controller"}}></a>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/charts/heatmap/heatmap_host_detail.hbs b/branch-1.2/ambari-web/app/templates/main/charts/heatmap/heatmap_host_detail.hbs
deleted file mode 100644
index e9aba0e..0000000
--- a/branch-1.2/ambari-web/app/templates/main/charts/heatmap/heatmap_host_detail.hbs
+++ /dev/null
@@ -1,36 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<b>{{view.details.publicHostName}}</b><br/>
-{{t common.os}}: {{view.details.osType}}<br/>
-{{t common.ipAddress}}: {{view.details.ip}}<br/>
-{{#if view.details.metricName}}
-  {{view.details.metricName}}: {{view.details.metricValue}}<br/>
-{{/if}}
-{{#if view.details.diskUsage}}
-  {{t common.disk}}: {{view.details.diskUsage}}% <br/>
-{{/if}}
-{{#if view.details.cpuUsage}}
-  {{t common.cpu}}: {{view.details.cpuUsage}}%<br/>
-{{/if}}
-{{#if view.details.memoryUsage}}
-  {{t common.memory}}: {{view.details.memoryUsage}}%<br/>
-{{/if}}
-{{#if view.details.hostComponents}}
-  {{t common.components}}: {{view.details.hostComponents}}<br/>
-{{/if}}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/charts/heatmap/heatmap_rack.hbs b/branch-1.2/ambari-web/app/templates/main/charts/heatmap/heatmap_rack.hbs
deleted file mode 100644
index 0bb176b..0000000
--- a/branch-1.2/ambari-web/app/templates/main/charts/heatmap/heatmap_rack.hbs
+++ /dev/null
@@ -1,51 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="rackHeader">
-  <div class="statusName clearfix">
-    <div {{bindAttr class="view.statusIndicator rack.status"}}></div>
-    <div class="rackName">{{rack.name}}</div>
-    <!--<a href="#" class="toggler" {{!action toggleRack target="view" on="click" }}>-->
-      <!--<span {{!bindAttr class="view.heatmapTogglerClass"}}></span>-->
-    <!--</a>-->
-  </div>
-</div>
-<!--<div class="hostsSummary clearfix">-->
-  <!--<div class="textBlock">Total hosts: {{!rack.hosts.length}}</div>-->
-  <!--{{!#if view.heatmapIsOpened}}-->
-    <!--<div class="textBlock">-->
-      <!--<div class="indicatorG"></div>-->
-      <!--Hosts in live state: {{!rack.liveHostsCount}}-->
-    <!--</div>-->
-    <!--<div class="textBlock">-->
-      <!--<div class="indicatorY"></div>-->
-      <!--Hosts in critical state: {{!rack.criticalHostsCount}}-->
-    <!--</div>-->
-    <!--<div class="textBlock">-->
-      <!--<div class="indicatorR"></div>-->
-      <!--Hosts in dead state: {{!rack.deadHostsCount}}-->
-    <!--</div>-->
-  <!--{{!/if}}-->
-<!--</div>-->
-<div {{bindAttr class="view.heatmapTogglerClass view.hostsBlockClass"}}>
-  {{#each rack.hosts}}
-    <div {{bindAttr style="view.hostCssStyle"}}>
-      {{view App.MainChartsHeatmapHostView contentBinding="this"}}
-    </div>
-  {{/each}}
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/charts/linear_time.hbs b/branch-1.2/ambari-web/app/templates/main/charts/linear_time.hbs
deleted file mode 100644
index b4ed659..0000000
--- a/branch-1.2/ambari-web/app/templates/main/charts/linear_time.hbs
+++ /dev/null
@@ -1,30 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="screensaver chart-container" {{bindAttr class="view.isReady:hide"}} >
-  <div id="{{unbound view.id}}-title" class="chart-title">{{view.title}}</div>
-</div>
-<div  id="{{unbound view.id}}-container" class="chart-container hide" {{bindAttr class="view.isReady:show"}}  {{action showGraphInPopup target="view"}} title="Click to zoom">
-  <div id="{{unbound view.id}}-yaxis" class="chart-y-axis"></div>
-  <div id="{{unbound view.id}}-xaxis" class="chart-x-axis"></div>
-  <div id="{{unbound view.id}}-legend" class="chart-legend"></div>
-  <div id="{{unbound view.id}}-chart" class="chart"></div>
-  <div id="{{unbound view.id}}-timeline" class="timeline"></div>
-
-  <div id="{{unbound view.id}}-title" class="chart-title">{{view.title}}</div>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/main/dashboard.hbs b/branch-1.2/ambari-web/app/templates/main/dashboard.hbs
deleted file mode 100644
index 093a2c9..0000000
--- a/branch-1.2/ambari-web/app/templates/main/dashboard.hbs
+++ /dev/null
@@ -1,68 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="row">
-  <div class="span6">
-    <div class="row">
-      <div class="span6">
-        <div class="box">
-          <div class="box-header">
-            <h4>{{t dashboard.services}}</h4>
-          </div>
-          <dl class="dl-horizontal services">
-            {{#each item in view.content}}
-                {{view item.viewName serviceBinding="item.model"}}
-            {{/each}}
-          </dl>
-        </div>
-      </div>
-    </div>
-  </div>
-  <div class="span6">
-		<div class="box">
-			<div class="box-header">
-				<h4>{{t dashboard.clusterMetrics}}</h4>
-				<div class="btn-group">
-        </div>
-				<div class="btn-group">
-          <a class="btn" target="_blank" rel="tooltip" title="Go to Ganglia" {{bindAttr href="view.gangliaUrl"}}><i class="icon-link"></i></a>
-        </div>
-			</div>
-			<div class="graphs-container">
-        <table class="graphs">
-          <tr>
-            <td>
-              {{view App.ChartClusterMetricsNetwork}}
-            </td>
-            <td>
-              {{view App.ChartClusterMetricsLoad}}
-            </td>
-          </tr>
-          <tr>
-            <td>
-              {{view App.ChartClusterMetricsMemory}}
-            </td>
-            <td>
-              {{view App.ChartClusterMetricsCPU}}
-            </td>
-          </tr>
-        </table>
-			</div>
-		</div>
-	</div>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/main/dashboard/alert_notification_popup.hbs b/branch-1.2/ambari-web/app/templates/main/dashboard/alert_notification_popup.hbs
deleted file mode 100644
index 6ca9b58..0000000
--- a/branch-1.2/ambari-web/app/templates/main/dashboard/alert_notification_popup.hbs
+++ /dev/null
@@ -1,30 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<p>{{view.warnAlertsMessage}}</p>
-{{#if view.warnAlertsCount}}
-  <ul id='summary-alerts-list' class="alerts">
-    {{#each view.warnAlerts}}
-      {{view App.AlertItemView contentBinding="this"}}
-    {{/each}}
-  </ul>
-{{/if}}
-<p class="alertsPopupLinks">
-    <a href="#" {{action selectService}}>{{t services.alerts.goToService}}</a>
-    <a href="#" {{action "viewNagiosUrl" on="mouseUp"}}>{{t services.alerts.goToNagios}}</a>
-</p>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/dashboard/service/hbase.hbs b/branch-1.2/ambari-web/app/templates/main/dashboard/service/hbase.hbs
deleted file mode 100644
index 8b4452e..0000000
--- a/branch-1.2/ambari-web/app/templates/main/dashboard/service/hbase.hbs
+++ /dev/null
@@ -1,115 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{#unless view.showOnlyRows}}
-<div class="clearfix like_pointer" {{action toggleInfoView target="view"}}>
-  <div class="name span2">
-    {{#if view.isCollapsed}}
-    <i class="icon-caret-down pull-left"></i>
-    {{else}}
-    <i class="icon-caret-right pull-left"></i>
-    {{/if}}
-    {{view App.MainDashboardServiceHealthView serviceBinding="view.service"}}
-    <a {{action selectService view.service href=true}}>{{view.service.displayName}}</a>
-    {{#if view.criticalAlertsCount}}
-      <a href="#" class="label label-important" {{action "showAlertsPopup" view.service target="view.parentView"}}>
-        {{view.criticalAlertsCount}}
-      </a>
-    {{/if}}
-  </div>
-  <div class="summary span">
-    {{view.summaryHeader}}
-  </div>
-</div>
-<div class="service-body">
-  <div class="span4">
-    <table class="table no-borders">
-      <tbody>
-    {{/unless}}
-      <!-- HBase Master Server -->
-      <tr>
-        <td>{{t dashboard.services.hbase.masterServer}}</td>
-        <td><a href="#" {{action showDetails view.service.master}}>{{view.service.master.publicHostName}}</a></td>
-      </tr>
-      <!-- RegionServers -->
-      <tr>
-        <td>{{t dashboard.services.hbase.regionServers}}</td>
-        <td><a href="#" {{action filterHosts view.regionServerComponent}}>{{view.service.regionServers.length}} {{t dashboard.services.hbase.regionServers}}</a></td>
-      </tr>
-      <!-- Version -->
-      <tr>
-        <td>{{t dashboard.services.hbase.version}}</td>
-        <td>{{view.version}} {{view.service.revision}}</td>
-      </tr>
-      <!-- HBaseMaster Web UI -->
-      <tr>
-        <td>{{t dashboard.services.hbase.masterWebUI}}</td>
-        <td><a {{bindAttr href="view.hbaseMasterWebUrl"}} target="_blank">{{view.service.master.publicHostName}}:60010</a>
-        </td>
-      </tr>
-      <!-- Regions in Transition -->
-      <tr>
-        <td>{{t dashboard.services.hbase.regions.transition}}</td>
-        <td>{{view.service.regionsInTransition}}</td>
-      </tr>
-      <!-- HBase Master Started Time -->
-      <tr>
-        <td>{{t dashboard.services.hbase.masterStarted}}</td>
-        <td>{{view.masterStartedTime}}</td>
-      </tr>
-      <!-- HBase Master Activated Time -->
-      <tr>
-        <td>{{t dashboard.services.hbase.masterActivated}}</td>
-        <td>{{view.masterStartedTime}}</td>
-      </tr>
-      <!-- Average Load -->
-      <tr>
-        <td>{{t dashboard.services.hbase.averageLoad}}</td>
-        <td>{{view.averageLoad}}</td>
-      </tr>
-      <!-- Master Server Heap -->
-      <tr>
-        <td>{{t dashboard.services.hbase.masterServerHeap}}</td>
-        <td>{{view.masterServerHeapSummary}}</td>
-      </tr>
-
-    {{#unless view.showOnlyRows}}
-      </tbody>
-    </table>
-  </div>
-<div class="dashboard-mini-chart span2">
-  {{view App.ChartServiceMetricsHBASE_RegionServerReadWriteRequests}}
-  
-  {{#if view.service.quickLinks.length}}
-  {{#view App.QuickViewLinks contentBinding="view.service"}}
-  <div class="btn-group">
-    <a class="btn btn-mini dropdown-toggle" data-toggle="dropdown" href="#">
-      {{t common.quickLinks}}
-      <span class="caret"></span>
-    </a>
-    <ul class="dropdown-menu">
-      {{#each view.quickLinks}}
-        <li><a {{bindAttr href="url"}} target="_blank">{{label}}</a></li>
-      {{/each}}
-    </ul>
-  </div>
-  {{/view}}
-  {{/if}}
-</div>
-</div>
-{{/unless}}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/dashboard/service/hdfs.hbs b/branch-1.2/ambari-web/app/templates/main/dashboard/service/hdfs.hbs
deleted file mode 100644
index 9609d21..0000000
--- a/branch-1.2/ambari-web/app/templates/main/dashboard/service/hdfs.hbs
+++ /dev/null
@@ -1,163 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{#unless view.showOnlyRows}}
-<div class="clearfix like_pointer" {{action toggleInfoView target="view"}}>
-  <div class="name span2">
-    {{#if view.isCollapsed}}
-    <i class="icon-caret-down pull-left"></i>
-    {{else}}
-    <i class="icon-caret-right pull-left"></i>
-    {{/if}}
-    {{view App.MainDashboardServiceHealthView serviceBinding="view.service"}}
-    <a {{action selectService view.service href=true}}>{{view.service.displayName}}</a>
-    {{#if view.criticalAlertsCount}}
-      <a href="#" class="label label-important" {{action "showAlertsPopup" view.service target="view.parentView"}}>
-        {{view.criticalAlertsCount}}
-      </a>
-    {{/if}}
-  </div>
-  <div class="summary span">
-    {{view.summaryHeader}}
-  </div>
-</div>
-<div class="service-body">
-  <div class="span4">
-    <table class="table no-borders">
-      <tbody>
-    {{/unless}}
-
-      <!-- NameNode -->
-      <tr>
-        <td>{{t dashboard.services.hdfs.nanmenode}}</td>
-        <td><a href="#" {{action showDetails view.service.nameNode}}>{{view.service.nameNode.publicHostName}}</a></td>
-      </tr>
-      <!-- SecondaryNameNode -->
-      <tr>
-        <td>{{t dashboard.services.hdfs.snanmenode}}</td>
-        <td><a href="#" {{action showDetails view.service.snameNode}}>{{view.service.snameNode.publicHostName}}</a></td>
-      </tr>
-      <!-- Data Nodes -->
-      <tr>
-        <td>{{t dashboard.services.hdfs.datanodes}}</td>
-        <td>
-          <a href="#" {{action filterHosts view.dataNodeComponent}}>{{view.service.dataNodes.length}} {{t dashboard.services.hdfs.datanodes}}</a>
-        </td>
-      </tr>
-      <!-- Version -->
-      <tr>
-        <td>{{t dashboard.services.hdfs.version}}</td>
-        <td>{{view.version}}</td>
-      </tr>
-      <!-- NameNode Web UI -->
-      <tr>
-        <td>{{t dashboard.services.hdfs.nameNodeWebUI}}</td>
-        <td><a {{bindAttr href="view.nodeWebUrl"}} target="_blank">{{view.service.nameNode.publicHostName}}:50070</a>
-        </td>
-      </tr>
-      <!-- NameNode Uptime -->
-      <tr>
-        <td>{{t dashboard.services.hdfs.nodes.uptime}}</td>
-        <td>{{view.nodeUptime}}</td>
-      </tr>
-      <!-- NameNode Heap -->
-      <tr>
-        <td>{{t dashboard.services.hdfs.nodes.heap}}</td>
-        <td>{{view.nodeHeap}}</td>
-      </tr>
-      <!-- Data Node Counts -->
-      <tr>
-        <td>{{t dashboard.services.hdfs.datanodecounts}}</td>
-        <td>
-          {{view.service.liveDataNodes.length}} {{t dashboard.services.hdfs.nodes.live}} /
-          {{view.service.deadDataNodes.length}} {{t dashboard.services.hdfs.nodes.dead}} /
-          {{view.service.decommissionDataNodes.length}} {{t dashboard.services.hdfs.nodes.decom}}
-        </td>
-      </tr>
-      <!-- HDFS Capacity -->
-      <tr>
-        <td>{{t dashboard.services.hdfs.capacity}}</td>
-        <td>{{view.capacity}}</td>
-      </tr>
-      <!-- Blocks Total -->
-      <tr>
-        <td>{{t services.service.summary.blocksTotal}}</td>
-        <td>{{view.dfsTotalBlocks}}</td>
-      </tr>
-      <!-- Block Errors -->
-      <tr>
-        <td>{{t services.service.summary.blockErrors}}</td>
-        <td>
-          {{view.blockErrorsMessage}}
-        </td>
-      </tr>
-      <!-- Total Files And Directories -->
-      <tr>
-        <td>{{t dashboard.services.hdfs.totalFilesAndDirs}}</td>
-        <td>{{view.dfsTotalFiles}}</td>
-      </tr>
-      <!-- Upgrade Status -->
-      <tr>
-        <td>{{t services.service.summary.pendingUpgradeStatus}}</td>
-        <td>
-          {{#if view.service.upgradeStatus}}
-            {{t services.service.summary.pendingUpgradeStatus.notPending}}
-          {{else}}
-            {{t services.service.summary.pendingUpgradeStatus.pending}}
-          {{/if}}
-        </td>
-      </tr>
-      <!-- Safe Mode Status -->
-      <tr>
-        <td>{{t services.service.summary.safeModeStatus}}</td>
-        <td>
-          {{#if view.isSafeMode}}
-            {{t services.service.summary.safeModeStatus.inSafeMode}}
-          {{else}}
-            {{t services.service.summary.safeModeStatus.notInSafeMode}}
-          {{/if}}
-        </td>
-      </tr>
-
-    {{#unless view.showOnlyRows}}
-      </tbody>
-    </table>
-  </div>
-<div class="dashboard-mini-chart span2">
-  <div {{bindAttr title="view.capacity"}}>
-    {{view view.Chart serviceBinding="view.service"}}
-  </div>
-  <div class="chartLabel">{{t dashboard.services.hdfs.chart.label}}</div>
-  {{#if view.service.quickLinks.length}}
-    {{#view App.QuickViewLinks contentBinding="view.service"}}
-      <div class="btn-group">
-        <a class="btn btn-mini dropdown-toggle" data-toggle="dropdown" href="#">
-          {{t common.quickLinks}}
-          <span class="caret"></span>
-        </a>
-        <ul class="dropdown-menu">
-          {{#each view.quickLinks}}
-            <li><a {{bindAttr href="url"}} target="_blank">{{label}}</a></li>
-          {{/each}}
-        </ul>
-      </div>
-    {{/view}}
-  {{/if}}
-</div>
-</div>
-{{/unless}}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/dashboard/service/hive.hbs b/branch-1.2/ambari-web/app/templates/main/dashboard/service/hive.hbs
deleted file mode 100644
index 1cb4412..0000000
--- a/branch-1.2/ambari-web/app/templates/main/dashboard/service/hive.hbs
+++ /dev/null
@@ -1,37 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="clearfix">
-  <div class="name span2">
-    <i class="pull-left icon-empty"></i>
-    {{view App.MainDashboardServiceHealthView serviceBinding="view.service"}}
-    <a {{action selectService view.service href=true}}>{{view.service.displayName}}</a>
-    {{#if view.criticalAlertsCount}}
-      <a href="#" class="label label-important" {{action "showAlertsPopup" view.service target="view.parentView"}}>
-        {{view.criticalAlertsCount}}
-      </a>
-    {{/if}}
-  </div>
-  <div class="summary span">
-    {{#each component in view.titleMasters}}
-      <a href="#" {{action showDetails component.host}}>{{component.displayName}}</a>,
-    {{/each}}
-
-    <a href="#" {{action filterHosts view.clients.component}}>{{view.clients.title}}</a>
-  </div>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/dashboard/service/mapreduce.hbs b/branch-1.2/ambari-web/app/templates/main/dashboard/service/mapreduce.hbs
deleted file mode 100644
index 8959600..0000000
--- a/branch-1.2/ambari-web/app/templates/main/dashboard/service/mapreduce.hbs
+++ /dev/null
@@ -1,144 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{#unless view.showOnlyRows}}
-<div class="clearfix like_pointer" {{action toggleInfoView target="view"}}>
-  <div class="name span2">
-    {{#if view.isCollapsed}}
-    <i class="icon-caret-down pull-left"></i>
-    {{else}}
-    <i class="icon-caret-right pull-left"></i>
-    {{/if}}
-    {{view App.MainDashboardServiceHealthView serviceBinding="view.service"}}
-    <a {{action selectService view.service href=true}}>{{view.service.displayName}}</a>
-    {{#if view.criticalAlertsCount}}
-      <a href="#" class="label label-important" {{action "showAlertsPopup" view.service target="view.parentView"}}>
-        {{view.criticalAlertsCount}}
-      </a>
-    {{/if}}
-  </div>
-  <div class="summary span">
-    {{view.summaryHeader}}
-  </div>
-</div>
-<div class="service-body">
-  <div class="span4">
-    <table class="table no-borders">
-      <tbody>
-    {{/unless}}
-      <!-- JobTracker -->
-      <tr>
-        <td>{{t services.service.summary.jobTracker}}</td>
-        <td><a href="#" {{action showDetails view.service.jobTracker}}>{{view.service.jobTracker.publicHostName}}</a></td>
-      </tr>
-      <!-- TaskTrackers -->
-      <tr>
-        <td>{{t dashboard.services.mapreduce.taskTrackers}}</td>
-        <td><a href="#" {{action filterHosts view.taskTrackerComponent}}>{{view.service.taskTrackers.length}} {{t dashboard.services.mapreduce.taskTrackers}}</a></td>
-      </tr>
-      <!-- Version -->
-      <tr>
-        <td>{{t dashboard.services.hdfs.version}}</td>
-        <td>{{view.version}}</td>
-      </tr>
-      <!-- JobTracker Web UI -->
-      <tr>
-        <td>{{t services.service.summary.jobTrackerWebUI}}</td>
-        <td><a {{bindAttr href="view.jobTrackerWebUrl"}} target="_blank">{{view.service.jobTracker.publicHostName}}:50030</a>
-        </td>
-      </tr>
-      <!-- Job Tracker Uptime -->
-      <tr>
-        <td>{{t dashboard.services.mapreduce.jobTrackerUptime}}</td>
-        <td>{{view.jobTrackerUptime}}</td>
-      </tr>
-      <!-- Trackers -->
-      <tr>
-        <td>{{t dashboard.services.mapreduce.trackers}}</td>
-        <td>{{view.trackersSummary}}</td>
-      </tr>
-      <!-- TaskTracker Counts -->
-      <tr>
-        <td>{{t dashboard.services.mapreduce.taskTrackerCounts}}</td>
-        <td>
-          {{view.service.grayListTrackers.length}} {{t dashboard.services.mapreduce.nodes.blacklist}} /
-          {{view.service.blackListTrackers.length}} {{t dashboard.services.mapreduce.nodes.graylist}} /
-          {{view.service.trackersDecommissioned}} {{t dashboard.services.hdfs.nodes.decom}}
-        </td>
-      </tr>
-      <!-- JobTracker Heap -->
-      <tr>
-        <td>{{t dashboard.services.mapreduce.jobTrackerHeap}}</td>
-        <td>{{view.trackersHeapSummary}}</td>
-      </tr>
-      <!-- Total slots capacity -->
-      <tr>
-        <td>{{t dashboard.services.mapreduce.slotCapacity}}</td>
-        <td>{{view.slotsCapacitySummary}}</td>
-      </tr>
-      <!-- Jobs -->
-      <tr>
-        <td>{{t dashboard.services.mapreduce.jobs}}</td>
-        <td>{{view.jobsSummary}}</td>
-      </tr>
-      <!-- Map Slots -->
-      <tr>
-        <td>{{t dashboard.services.mapreduce.mapSlots}}</td>
-        <td>{{view.mapSlotsSummary}}</td>
-      </tr>
-      <!-- Reduce Slots -->
-      <tr>
-        <td>{{t dashboard.services.mapreduce.reduceSlots}}</td>
-        <td>{{view.reduceSlotsSummary}}</td>
-      </tr>
-      <!-- Tasks:Maps -->
-      <tr>
-        <td>{{t dashboard.services.mapreduce.tasks.maps}}</td>
-        <td>{{view.mapTasksSummary}}</td>
-      </tr>
-      <!-- Tasks:Reduces -->
-      <tr>
-        <td>{{t dashboard.services.mapreduce.tasks.reduces}}</td>
-        <td>{{view.reduceTasksSummary}}</td>
-      </tr>
-
-    {{#unless view.showOnlyRows}}
-      </tbody>
-    </table>
-  </div>
-<div class="dashboard-mini-chart span2">
-  {{view App.ChartServiceMetricsMapReduce_TasksRunningWaiting}}
-  
-  {{#if view.service.quickLinks.length}}
-  {{#view App.QuickViewLinks contentBinding="view.service"}}
-  <div class="btn-group">
-    <a class="btn btn-mini dropdown-toggle" data-toggle="dropdown" href="#">
-      {{t common.quickLinks}}
-      <span class="caret"></span>
-    </a>
-    <ul class="dropdown-menu">
-      {{#each view.quickLinks}}
-      <li><a {{bindAttr href="url"}} target="_blank">{{label}}</a></li>
-      {{/each}}
-    </ul>
-  </div>
-  {{/view}}
-  {{/if}}
-</div>
-</div>
-{{/unless}}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/dashboard/service/oozie.hbs b/branch-1.2/ambari-web/app/templates/main/dashboard/service/oozie.hbs
deleted file mode 100644
index e0db00b..0000000
--- a/branch-1.2/ambari-web/app/templates/main/dashboard/service/oozie.hbs
+++ /dev/null
@@ -1,40 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="clearfix">
-  <div class="name span2">
-    <i class="pull-left icon-empty"></i>
-    {{view App.MainDashboardServiceHealthView serviceBinding="view.service"}}
-    <a {{action selectService view.service href=true}}>{{view.service.displayName}}</a>
-    {{#if view.criticalAlertsCount}}
-      <a href="#" class="label label-important" {{action "showAlertsPopup" view.service target="view.parentView"}}>
-        {{view.criticalAlertsCount}}
-      </a>
-    {{/if}}
-  </div>
-  <div class="summary span">
-    {{#each component in view.masters}}
-      <a href="#" {{action showDetails component.host}}>{{component.displayName}}</a>,
-    {{/each}}
-
-    <a href="#" {{action filterHosts view.clients.component}}>{{view.clients.title}}</a>,
-
-    <a href="{{unbound view.webUi}}" target="_blank">{{t dashboard.services.oozie.webUi}}</a>
-
-  </div>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/dashboard/service/zookeeper.hbs b/branch-1.2/ambari-web/app/templates/main/dashboard/service/zookeeper.hbs
deleted file mode 100644
index 3a9a22c..0000000
--- a/branch-1.2/ambari-web/app/templates/main/dashboard/service/zookeeper.hbs
+++ /dev/null
@@ -1,35 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="clearfix">
-  <div class="name span2">
-    <i class="pull-left icon-empty"></i>
-    {{view App.MainDashboardServiceHealthView serviceBinding="view.service"}}
-    <a {{action selectService view.service href=true}}>{{view.service.displayName}}</a>
-    {{#if view.criticalAlertsCount}}
-      <a href="#" class="label label-important" {{action "showAlertsPopup" view.service target="view.parentView"}}>
-        {{view.criticalAlertsCount}}
-      </a>
-    {{/if}}
-  </div>
-  <div class="summary span">
-    {{view.titleInfo.pre}}
-    <a href="#" {{action filterHosts view.titleInfo.component}}>{{view.titleInfo.title}}</a>
-    {{t dashboard.services.zookeeper.postfix}}
-  </div>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/host.hbs b/branch-1.2/ambari-web/app/templates/main/host.hbs
deleted file mode 100644
index 0ec5e0f..0000000
--- a/branch-1.2/ambari-web/app/templates/main/host.hbs
+++ /dev/null
@@ -1,108 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div id="hosts">
-
-  <div class="box-header row">
-    <div class="pull-left">
-        <span>{{t hosts.table.Search}}&nbsp;</span>{{view Ember.TextField valueBinding="view.globalSearchValue"}}
-    </div>
-    {{#if App.isAdmin}}
-    <div class="button-section pull-right">
-      <button class="btn btn-inverse add-host-button" {{action addHost}}>
-        <i class="icon-plus icon-white"></i>
-        {{t hosts.host.add}}
-      </button>
-    </div>
-    {{/if}}
-  </div>
-
-  <table class="datatable table table-bordered table-striped" id="hosts-table">
-    <thead>
-    <tr>
-        {{#view view.sortView contentBinding="view.filteredContent"}}
-          <th class="first"></th>
-          {{view view.parentView.nameSort}}
-          {{view view.parentView.ipSort}}
-          {{view view.parentView.cpuSort}}
-          {{view view.parentView.memorySort}}
-          {{view view.parentView.diskUsageSort}}
-          {{view view.parentView.loadAvgSort}}
-          <th>{{t common.components}}</th>
-        {{/view}}
-    </tr>
-    <tr>
-      <th class="first">&nbsp;</th>
-      <th>{{view view.nameFilterView}}</th>
-      <th>{{view view.ipFilterView}}</th>
-      <th>{{view view.cpuFilterView}}</th>
-      <th>{{view view.ramFilterView}}</th>
-      <th></th>
-      <th>{{view view.loadAvgFilterView}}</th>
-      <th>{{view view.componentsFilterView}}</th>
-    </tr>
-    </thead>
-    <tbody>
-    {{#if view.pageContent}}
-    {{#each host in view.pageContent}}
-    {{#view view.HostView contentBinding="host"}}
-      <td class="first">
-        <span {{bindAttr class="host.healthClass"}} {{bindAttr title="host.healthToolTip" }}></span>
-      </td>
-      <td class="name">
-        <a title="{{unbound host.publicHostName}}" href="#" {{action "showDetails" host}}>{{unbound host.publicHostNameFormatted}}</a>
-        {{#if host.criticalAlertsCount}}
-          <span class="label label-important alerts-count" {{action "showAlertsPopup" host target="controller"}}>{{host.criticalAlertsCount}}</span>
-        {{/if}}
-      </td>
-      <td>{{host.ip}}</td>
-      <td>{{host.cpu}}</td>
-      <td>{{host.memoryFormatted}}</td>
-      <td>
-        <div class="progress progress-info" title="{{unbound host.diskInfoBar}}">
-          <div class="bar" {{bindAttr style="view.usageStyle"}}></div>
-        </div>
-      </td>
-      <td>{{host.loadAvg}}</td>
-      <td>
-        <span title="{{unbound view.labels}}">{{view.shortLabels}}</span>
-      </td>
-    {{/view}}
-    {{/each}}
-    {{else}}
-    <tr>
-        <td class="first"></td>
-        <td colspan="7">
-            {{t hosts.table.noHosts}}
-        </td>
-    </tr>
-    {{/if}}
-    </tbody>
-  </table>
-
-  <div class="page-bar">
-    <div class="items-on-page">
-      <label>{{t common.show}}: {{view view.hostPerPageSelectView selectionBinding="view.displayLength"}}</label>
-    </div>
-    <div class="info">{{view.paginationInfo}}</div>
-    <div class="paging_two_button">
-      {{view view.paginationLeft}}
-      {{view view.paginationRight}}
-    </div>
-  </div>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/main/host/add.hbs b/branch-1.2/ambari-web/app/templates/main/host/add.hbs
deleted file mode 100644
index a70e059..0000000
--- a/branch-1.2/ambari-web/app/templates/main/host/add.hbs
+++ /dev/null
@@ -1,46 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div id="add-host">
-  <div class="container">
-    <div class="container-fluid">
-
-      <!--<a class="btn back" {{action backToHostsList}}>← Back to Hosts</a>-->
-
-      <div class="row-fluid">
-        <div class="span3">
-          <!--Sidebar content-->
-          <div class="well">
-            <ul class="nav nav-pills nav-stacked">
-              <li class="nav-header">{{t hosts.add.header}}</li>
-              <li {{bindAttr class="isStep1:active view.isStep1Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep1 target="controller"}}>{{t installer.step2.header}}</a></li>
-              <li {{bindAttr class="isStep2:active view.isStep2Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep2 target="controller"}}>{{t installer.step3.header}}</a></li>
-              <li {{bindAttr class="isStep3:active view.isStep3Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep3 target="controller"}}>{{t installer.step6.header}}</a></li>
-              <li {{bindAttr class="isStep4:active view.isStep4Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep4 target="controller"}}>{{t installer.step8.header}}</a></li>
-              <li {{bindAttr class="isStep5:active view.isStep5Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep5 target="controller"}}>{{t installer.step9.header}}</a></li>
-              <li {{bindAttr class="isStep6:active view.isStep6Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep6 target="controller"}}>{{t installer.step10.header}}</a></li>
-            </ul>
-          </div>
-        </div>
-        <div id="add-host-content" class="well span9">
-          {{outlet}}
-        </div>
-      </div>
-    </div>
-  </div>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/main/host/alerts_popup.hbs b/branch-1.2/ambari-web/app/templates/main/host/alerts_popup.hbs
deleted file mode 100644
index 338a225..0000000
--- a/branch-1.2/ambari-web/app/templates/main/host/alerts_popup.hbs
+++ /dev/null
@@ -1,27 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{#if view.hostAlerts.length}}
-<ul id='summary-alerts-list' class="alerts">
-  {{#each view.hostAlerts}}
-  {{view App.AlertItemView contentBinding="this"}}
-  {{/each}}
-</ul>
-{{else}}
-  {{t hosts.host.alert.noAlerts.message}}
-{{/if}}
diff --git a/branch-1.2/ambari-web/app/templates/main/host/component_filter.hbs b/branch-1.2/ambari-web/app/templates/main/host/component_filter.hbs
deleted file mode 100644
index 593be59..0000000
--- a/branch-1.2/ambari-web/app/templates/main/host/component_filter.hbs
+++ /dev/null
@@ -1,73 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-    <button class="btn btn-info single-btn-group" {{action "clickFilterButton" target="view"}}>
-        {{t common.components}} <span class="caret"></span>
-    </button>
-    <ul class="dropdown-menu filter-components">
-        <li>
-            <ul>
-                <li>
-                    <label class="checkbox">
-                        {{view Ember.Checkbox checkedBinding="view.masterComponentsChecked"}} {{t host.host.componentFilter.master}}:
-                    </label>
-                    <ul>
-                        {{#each component in masterComponents}}
-                            <li>
-                                <label class="checkbox">
-                                    {{view Ember.Checkbox checkedBinding="component.checkedForHostFilter" }} {{unbound component.displayName}}
-                                </label>
-                            </li>
-                        {{/each}}
-                    </ul>
-                </li>
-                <li>
-                    <label class="checkbox">
-                        {{view Ember.Checkbox checkedBinding="view.slaveComponentsChecked"}} {{t host.host.componentFilter.slave}}:
-                    </label>
-                    <ul>
-                        {{#each component in slaveComponents}}
-                            <li>
-                                <label class="checkbox">
-                                    {{view Ember.Checkbox checkedBinding="component.checkedForHostFilter" }} {{unbound component.displayName}}
-                                </label>
-                            </li>
-                        {{/each}}
-                    </ul>
-                </li>
-                <li>
-                    <label class="checkbox">
-                        {{view Ember.Checkbox checkedBinding="view.clientComponentsChecked"}} {{t host.host.componentFilter.client}}:
-                    </label>
-                    <ul>
-                        {{#each component in clientComponents}}
-                            <li>
-                                <label class="checkbox">
-                                    {{view Ember.Checkbox checkedBinding="component.checkedForHostFilter" }} {{unbound component.displayName}}
-                                </label>
-                            </li>
-                        {{/each}}
-                    </ul>
-                </li>
-            </ul>
-        </li>
-        <li>
-            <button class="btn" {{action "closeFilter" target="view"}}>{{t common.cancel}}</button>
-            <button class="btn btn-primary" {{action "applyFilter" target="view"}}>{{t common.apply}}</button>
-        </li>
-    </ul>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/host/details.hbs b/branch-1.2/ambari-web/app/templates/main/host/details.hbs
deleted file mode 100644
index 9c7a1e4..0000000
--- a/branch-1.2/ambari-web/app/templates/main/host/details.hbs
+++ /dev/null
@@ -1,49 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div id="host-details">
-  <span {{bindAttr class="view.content.healthClass"}} {{bindAttr title="view.content.healthToolTip" }}></span><span class='host-title'>{{unbound view.content.publicHostName}}</span>
-  {{#if view.content.criticalAlertsCount}}
-    <span class="label label-important alerts-count" {{action "showAlertsPopup" content target="App.router.mainHostController"}}>{{view.content.criticalAlertsCount}}</span>
-  {{else}}
-    <span class="label label-success alerts-count" {{action "showAlertsPopup" content target="App.router.mainHostController"}}>{{t hosts.host.alert.noAlerts}}</span>
-  {{/if}}
-  <div><a href="javascript:void(null)" data-toggle="modal" {{action backToHostsList}}><i class="icon-arrow-left"></i>&nbsp;{{t hosts.host.back}}</a></div>
-<!--   {{#if App.isAdmin}} -->
-<!--   <div class="host-maintenance"> -->
-<!--     <div class="host-maintenance-btn btn-group display-inline-block"> -->
-<!--       <a class="btn dropdown-toggle" data-toggle="dropdown" href="#"> -->
-<!--         {{t services.service.actions.maintenance}} -->
-<!--         <span class="caret"></span> -->
-<!--       </a> -->
-<!--       <ul class="dropdown-menu"> -->
-<!--       dropdown menu links -->
-<!--         {{#each option in view.maintenance}} -->
-<!--         <li> -->
-<!--         <a {{action validateDeletion target="controller"}} href="#">{{option.label}}</a> -->
-<!--         </li> -->
-<!--         {{/each}} -->
-<!--       </ul> -->
-<!--     </div> -->
-<!--   </div> -->
-<!--   {{/if}} -->
-  <div class="content">
-    {{view App.MainHostMenuView}}
-    {{outlet}}
-  </div>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/main/host/metrics.hbs b/branch-1.2/ambari-web/app/templates/main/host/metrics.hbs
deleted file mode 100644
index 7c91783..0000000
--- a/branch-1.2/ambari-web/app/templates/main/host/metrics.hbs
+++ /dev/null
@@ -1,46 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="host-metrics">
-    <table class="graphs">
-        <tr>
-            <td>
-              {{view App.ChartHostMetricsCPU contentBinding="view.content"}}
-            </td>
-            <td>
-              {{view App.ChartHostMetricsDisk contentBinding="view.content"}}
-            </td>
-        </tr>
-        <tr>
-            <td>
-              {{view App.ChartHostMetricsLoad contentBinding="view.content"}}
-            </td>
-            <td>
-              {{view App.ChartHostMetricsMemory contentBinding="view.content"}}
-            </td>
-        </tr>
-        <tr>
-            <td>
-              {{view App.ChartHostMetricsNetwork contentBinding="view.content"}}
-            </td>
-            <td>
-              {{view App.ChartHostMetricsProcesses contentBinding="view.content"}}
-            </td>
-        </tr>
-    </table>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/host/summary.hbs b/branch-1.2/ambari-web/app/templates/main/host/summary.hbs
deleted file mode 100644
index e79101b..0000000
--- a/branch-1.2/ambari-web/app/templates/main/host/summary.hbs
+++ /dev/null
@@ -1,129 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="row-fluid">
-  <div class="span12">
-  <div class="span6">
-  <div class="host-configuration">
-    <div class="box">
-		  <div class="box-header">
-		    <h4>{{t hosts.host.summary.header}}</h4>
-		  </div>
-		  <div class="host-summary-content">
-		    <dl class="dl-horizontal">
-          <dt>{{t hosts.host.summary.hostname}}:</dt><dd>&nbsp;{{view.content.publicHostName}}</dd>
-		      <dt>{{t common.ipAddress}}:</dt><dd>&nbsp;{{view.content.ip}}</dd>
-          <dt>{{t common.os}}:</dt><dd>&nbsp;{{view.content.osType}}&nbsp;({{view.content.osArch}})</dd>
-		      <dt>{{t common.cpu}}:</dt><dd>&nbsp;{{view.content.cpu}}</dd>
-          <dt>{{t common.disk}}:</dt><dd>&nbsp;{{view.content.diskInfoBar}}</dd>
-          <dt>{{t common.memory}}:</dt><dd>&nbsp;{{view.content.memoryFormatted}}</dd>
-		      <dt>{{t common.loadAvg}}:</dt><dd>&nbsp;{{view.content.loadAvg}}</dd>
-		      <dt>{{t hosts.host.summary.agentHeartbeat}}:</dt><dd>{{view.timeSinceHeartBeat}}</dd>
-		    </dl>
-		  </div>
-	  </div>
-  </div>
-    {{!components}}
-      <div class="box">
-        <div class="box-header">
-          <h4>Components</h4>
-        </div>
-        {{#if view.sortedComponents.length}}
-        <div class="host-components">
-          {{#each component in view.sortedComponents}}
-          <div class="row-fluid">
-          {{#view view.ComponentView contentBinding="component" decommissionDataNodeHostNamesBinding="view.decommissionDataNodeHostNames"}}
-            <div class="span7">
-              <span {{bindAttr class="view.statusClass :components-health"}}></span>&nbsp;
-              {{component.displayName}}&nbsp;/&nbsp;
-              <a href="#" {{action routeToService component.service target="controller"  }}>{{component.service.displayName}}</a>
-            </div>
-            <div class="span5">
-              {{#if App.isAdmin}}
-              <div class="btn-group">
-                <a {{ bindAttr class=":btn :dropdown-toggle view.disabledClass"}} data-toggle="dropdown">
-                  {{t common.action}}
-                  <span class="caret pull-right"></span>
-                </a>
-                <ul class="dropdown-menu">
-                  {{#if view.isDataNode}}
-                  <li {{bindAttr class="view.isDataNodeDecommissionAvailable::hidden"}}>
-                    <a href="javascript:void(null)" data-toggle="modal" {{action "decommission" view.content target="controller"}}>
-                      {{t common.decommission}}
-                    </a>
-                  </li>
-                  <li {{bindAttr class="view.isDataNodeRecommissionAvailable::hidden"}}>
-                    <a href="javascript:void(null)" data-toggle="modal" {{action "recommission" view.content target="controller"}}>
-                      {{t common.recommission}}
-                    </a>
-                  </li>
-                  {{/if}}
-                  <li {{bindAttr class="view.isStart::hidden"}}>
-                    <a href="javascript:void(null)" data-toggle="modal" {{action "stopComponent" view.content target="controller"}}>
-                      {{t common.stop}}
-                    </a>
-                  </li>
-                  <li {{bindAttr class="view.isStart:hidden:"}}>
-                    <a href="javascript:void(null)" data-toggle="modal" {{action "startComponent" view.content target="controller"}}>
-                      {{t common.start}}
-                    </a>
-                  </li>
-                </ul>
-              </div>
-              {{/if}}
-            </div>
-          {{/view}}
-          </div>
-          {{/each}}
-          {{#if view.clients.length}}
-          <div class="clients row-fluid">
-            <div class="span8 row">
-              <div class="span2">{{t common.clients}}&nbsp;/&nbsp;</div>
-              <div class="span8">
-                {{#each component in view.clients}}
-                {{#if component.isLast}}
-                {{component.displayName}}
-                {{else}}
-                {{component.displayName}},
-                {{/if}}
-                {{/each}}
-              </div>
-            </div>
-          </div>
-          {{/if}}
-
-        </div>
-        {{/if}}
-      </div>
-    </div>
-  {{!metrics}}
- <div class="span6">
-    <div class="box">
-      <div class="box-header">
-        <h4>{{t hosts.host.summary.hostMetrics}}</h4>
-        <div class="btn-group">
-          <a class="btn" rel="tooltip" title="Go to Ganglia" {{action "showGangliaCharts" target="view"}}><i class="icon-link"></i></a>
-        </div>
-      </div>
-      <div>
-	     {{view App.MainHostMetricsView contentBinding="view.content"}}
-	    </div>
-	  </div>
-  </div>
-</div>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/menu.hbs b/branch-1.2/ambari-web/app/templates/main/menu.hbs
deleted file mode 100644
index 141290e..0000000
--- a/branch-1.2/ambari-web/app/templates/main/menu.hbs
+++ /dev/null
@@ -1,23 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<ul class="nav nav-tabs">
-    {{#each view.items}}
-    <li {{bindAttr class="active"}} ><a {{action navigate routing }} href="#">{{unbound label}}</a></li>
-    {{/each}}
-</ul>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/menu_item.hbs b/branch-1.2/ambari-web/app/templates/main/menu_item.hbs
deleted file mode 100644
index 93e2a39..0000000
--- a/branch-1.2/ambari-web/app/templates/main/menu_item.hbs
+++ /dev/null
@@ -1,31 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<a href="#/main/{{unbound view.content.routing}}">
-  {{unbound view.content.label}}
-  {{#if view.alertsCount}}
-    <span {{action "filterByAlerts" target="App.router.mainHostController"}} class="label label-important alerts-count">
-      {{view.alertsCount}}
-    </span>
-  {{/if}}
-  <!--{{#if view.hostDetailsOperationsCount}}-->
-    <!--<span class="label operations-count" {{action "showBackgroundOperationsPopup" target="App.router.mainHostDetailsController"}}>-->
-      <!--{{view.hostDetailsOperationsCount}}-->
-    <!--</span>-->
-  <!--{{/if}}-->
-</a>
diff --git a/branch-1.2/ambari-web/app/templates/main/service.hbs b/branch-1.2/ambari-web/app/templates/main/service.hbs
deleted file mode 100644
index 7424675..0000000
--- a/branch-1.2/ambari-web/app/templates/main/service.hbs
+++ /dev/null
@@ -1,36 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="row-fluid">
-  <div id="services-menu" class="well span2" style="padding: 8px 0">
-    {{view App.MainServiceMenuView}}
-    {{#if App.isAdmin}}
-    {{#if App.addServicesEnabled}}
-    <div class="add-service-button">
-      <a class="btn" {{action addService href="true"}}>
-        <i class="icon-plus"></i>
-        {{t services.service.add}}
-      </a>
-    </div>
-    {{/if}}
-    {{/if}}
-  </div>
-  <div class="span10">
-    {{outlet}}
-  </div>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/service/add.hbs b/branch-1.2/ambari-web/app/templates/main/service/add.hbs
deleted file mode 100644
index 5726147..0000000
--- a/branch-1.2/ambari-web/app/templates/main/service/add.hbs
+++ /dev/null
@@ -1,47 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div id="add-service">
-  <div class="container">
-    <div class="container-fluid">
-
-      <!--<a class="btn back" {{action backToServices}}>← Back to Services</a>-->
-
-      <div class="row-fluid">
-        <div class="span3">
-          <!--Sidebar content-->
-          <div class="well">
-            <ul class="nav nav-pills nav-stacked">
-              <li class="nav-header">{{t services.add.header}}</li>
-              <li {{bindAttr class="isStep1:active view.isStep1Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep1 target="controller"}}>{{t installer.step4.header}}</a></li>
-              <li {{bindAttr class="isStep2:active view.isStep2Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep2 target="controller"}}>{{t installer.step5.header}}</a></li>
-              <li {{bindAttr class="isStep3:active view.isStep3Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep3 target="controller"}}>{{t installer.step6.header}}</a></li>
-              <li {{bindAttr class="isStep4:active view.isStep4Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep4 target="controller"}}>{{t installer.step7.header}}</a></li>
-              <li {{bindAttr class="isStep5:active view.isStep5Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep5 target="controller"}}>{{t installer.step8.header}}</a></li>
-              <li {{bindAttr class="isStep6:active view.isStep6Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep6 target="controller"}}>{{t installer.step9.header}}</a></li>
-              <li {{bindAttr class="isStep7:active view.isStep7Disabled:disabled"}}><a href="javascript:void(null);"  {{action gotoStep7 target="controller"}}>{{t installer.step10.header}}</a></li>
-            </ul>
-          </div>
-        </div>
-        <div id="add-service-content" class="well span9">
-          {{outlet}}
-        </div>
-      </div>
-    </div>
-  </div>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/main/service/info/configs.hbs b/branch-1.2/ambari-web/app/templates/main/service/info/configs.hbs
deleted file mode 100644
index a6bfb45..0000000
--- a/branch-1.2/ambari-web/app/templates/main/service/info/configs.hbs
+++ /dev/null
@@ -1,67 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div id="serviceConfig">
-  {{#if dataIsLoaded}}
-    <div class="accordion">
-      {{#each category in selectedService.configCategories}}
-          <div class="accordion-group {{unbound category.name}}">
-              <div class="accordion-heading" {{action "onToggleBlock" category target="view"}}>
-                {{#if category.isCollapsed}}
-                    <i class='icon-caret-right pull-left accordion-toggle'></i>
-                {{else}}
-                    <i class='icon-caret-down pull-left accordion-toggle'></i>
-                {{/if}}
-                  <a class="accordion-toggle">
-                    {{category.name}}
-                  </a>
-              </div>
-
-
-            {{#view App.ServiceConfigsByCategoryView categoryBinding="category" serviceConfigsBinding="selectedService.configs"}}
-                <form class="form-horizontal">
-
-                  {{#each view.categoryConfigs}}
-                    {{#if isVisible}}
-                        <div {{bindAttr class="errorMessage:error: :control-group"}}>
-                            <label class="control-label">{{displayName}}</label>
-
-                            <div class="controls">
-                              {{view viewClass serviceConfigBinding="this" categoryConfigsBinding="view.categoryConfigs"}}
-                                <span class="help-inline">{{errorMessage}}</span>
-                            </div>
-                        </div>
-                    {{/if}}
-                  {{/each}}
-
-                </form>
-            {{/view}}
-          </div>
-      {{/each}}
-    </div>
-    {{#if App.isAdmin}}
-    <p class="pull-right">
-        <!--<input class="btn btn-primary" type="button" value="Save" {{!bindAttr disabled="isSubmitDisabled"}} />-->
-        <a class="btn btn-primary" {{bindAttr disabled="isSubmitDisabled"}}
-          {{action restartServicePopup target="controller"}}>{{t common.save}}</a>
-    </p>
-    {{/if}}
-  {{else}}
-    <div class="spinner"></div>
-  {{/if}}
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/service/info/summary.hbs b/branch-1.2/ambari-web/app/templates/main/service/info/summary.hbs
deleted file mode 100644
index 42ea448..0000000
--- a/branch-1.2/ambari-web/app/templates/main/service/info/summary.hbs
+++ /dev/null
@@ -1,172 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="row-fluid service-block">
-<div class="span6">
-{{#if view.service.quickLinks.length}}
-{{#view App.QuickViewLinks contentBinding="view.service"}}
-<ul class="nav nav-pills move">
-  <li class="dropdown">
-    <a class="dropdown-toggle" data-toggle="dropdown" href="#">{{t common.quickLinks}}<b class="caret"></b></a>
-    <ul class="dropdown-menu">
-     {{#each view.quickLinks}}
-      <a {{bindAttr href="url"}} {{bindAttr target="view.linkTarget"}}>{{label}}</a>
-      {{/each}}
-    </ul>
-  </li>
-</ul>
-{{/view}}
-{{/if}}
-
-<div class="box">
-  <div class="box-header">
-    <h4>{{controller.content.label}} {{t services.service.info.menu.summary}}</h4>
-  </div>
-  <div class="service-content">
-    <table id="summary-info" class="table no-borders table-condensed">
-      <tbody>
-      {{#unless view.serviceStatus.oozie}}
-      {{#unless view.serviceStatus.hive}}
-      {{#unless view.serviceStatus.zookeeper}}
-      {{#unless view.serviceStatus.hdfs}}
-      {{#unless view.serviceStatus.mapreduce}}
-      {{#unless view.serviceStatus.hbase}}
-      {{#unless view.serviceStatus.ganglia}}
-      {{#each component in controller.content.hostComponents}}
-        <tr>
-          {{#if component.isMaster}}
-            <td class="summary-label">{{component.displayName}}</td>
-            <td><a {{action selectHost component.host}} href="javascript:void(null)">{{component.host.publicHostName}}</a></td>
-          {{else}}
-            <td class="summary-label">{{component.displayName}}s</td>
-            <td><a {{action filterHosts component}} href="javascript:void(null)">{{component.displayName}}s</a></td>
-          {{/if}}
-        </tr>
-      {{/each}}
-      {{#if view.serviceStatus.nagios}}
-        <tr>
-          <td class="summary-label">{{t services.service.info.summary.nagiosWebUI}}</td>
-          <td><a target=_blank href="http://{{unbound view.nagiosServer}}/nagios">{{view.nagiosServer}}/nagios</a></td>
-        </tr>
-      {{/if}}
-      {{/unless}}
-      {{/unless}}
-      {{/unless}}
-      {{/unless}}
-      {{/unless}}
-      {{/unless}}
-      {{/unless}}
-      {{#if view.serviceStatus.hdfs}}
-        {{view App.MainDashboardServiceHdfsView showOnlyRows=true serviceBinding="view.service"}}
-      {{/if}}
-      {{#if view.serviceStatus.mapreduce}}
-        {{view App.MainDashboardServiceMapreduceView showOnlyRows=true serviceBinding="view.service"}}
-      {{/if}}
-      {{#if view.serviceStatus.hbase}}
-        {{view App.MainDashboardServiceHbaseView showOnlyRows=true serviceBinding="view.service"}}
-      {{/if}}
-      {{#if view.serviceStatus.zookeeper}}
-        {{template "templates/main/service/info/summary/zookeeper"}}
-      {{/if}}
-      {{#if view.serviceStatus.oozie}}
-        {{template "templates/main/service/info/summary/oozie"}}
-      {{/if}}
-      {{#if view.serviceStatus.ganglia}}
-        {{template "templates/main/service/info/summary/ganglia"}}
-      {{/if}}
-      {{#if view.serviceStatus.hive}}
-		    {{#each component in view.hiveComponentsInfo}}
-		    <tr>
-		      <td>
-		        {{component.label}}
-		      </td>
-		      <td>
-	          <a href="#" {{action selectHost component.host}}>{{component.host.publicHostName}}</a>
-		      </td>
-		    </tr>
-		    {{/each}}
-      {{/if}}
-      </tbody>
-    </table>
-    {{!view view.moreStatsView}}
-  </div>
-  {{!
-      <div class="box-footer">
-      </div>
-      }}
-</div>
-</div>
-<div class="span6">
-	<div class="box">
-		<div class="box-header">
-			<h4>{{t services.alerts.headingOfList}}</h4>
-			<div class="btn-group">
-        <a class="btn" target="_blank" rel="tooltip" title="Go to Nagios" {{bindAttr href="controller.nagiosUrl"}}><i class="icon-link"></i></a>
-			</div>
-		</div>
-		<ul id='summary-alerts-list' class="alerts">
-		  {{#if controller.alerts.length}}
-				{{#each controller.alerts}}
-          {{view App.AlertItemView contentBinding="this"}}
-				{{/each}}
-			{{else}}
-		    {{#if controller.isNagiosInstalled}}
-			    <div class="alert alert-info">
-			      {{t services.service.info.summary.nagios.noAlerts}}
-			    </div>
-			  {{else}}
-			    <div class="alert">
-			      {{t services.service.info.summary.nagios.alerts}}
-			    </div>
-			  {{/if}}
-			{{/if}}
-		</ul>
-	</div>
-</div>
-</div>
-{{#if view.serviceMetricGraphs.length}}
-<div class="row-fluid">
-  <div class="span12">
-    <div class="box">
-      <div class="box-header">
-        <h4>{{controller.content.label}} {{t common.metrics}}</h4>
-        <div class="btn-group">
-          <a class="btn" target="_blank" rel="tooltip" title="Go to Ganglia" {{bindAttr href="view.gangliaUrl"}}><i class="icon-link"></i></a>
-        </div>
-      </div>
-      
-      <div class="">
-        <table class="graphs">
-          {{#each graphs in view.serviceMetricGraphs}}
-            <tr>
-              {{#each graph in graphs}}
-                <td>
-                  <div class="">
-                    {{view graph}}
-                  </div>
-                </td>
-              {{/each}}
-            </tr>
-          {{/each}}
-        </table>
-      </div>
-      
-    </div>
-  </div>
-</div>
-{{/if}}
diff --git a/branch-1.2/ambari-web/app/templates/main/service/info/summary/ganglia.hbs b/branch-1.2/ambari-web/app/templates/main/service/info/summary/ganglia.hbs
deleted file mode 100644
index 1c62565..0000000
--- a/branch-1.2/ambari-web/app/templates/main/service/info/summary/ganglia.hbs
+++ /dev/null
@@ -1,33 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-{{#each component in controller.content.hostComponents}}
-  {{#if component.isMaster}}
-    <tr>
-        <td class="summary-label">{{component.displayName}}</td>
-        <td><a {{action selectHost component.host}} href="javascript:void(null)">{{component.host.publicHostName}}</a></td>
-    </tr>
-  {{/if}}
-{{/each}}
-<tr>
-  <td class="summary-label">{{t dashboard.services.ganglia.monitors}}</td>
-  <td><a {{action filterHosts view.monitorsObj}} href="javascript:void(null)">{{view.monitors}}</a></td>
-</tr>
-<tr>
-  <td class="summary-label">{{t dashboard.services.ganglia.webUi}}</td>
-  <td><a target=_blank href="http://{{unbound view.gangliaServer}}/ganglia">{{view.gangliaServer}}/ganglia</a></td>
-</tr>
diff --git a/branch-1.2/ambari-web/app/templates/main/service/info/summary/oozie.hbs b/branch-1.2/ambari-web/app/templates/main/service/info/summary/oozie.hbs
deleted file mode 100644
index 685c39b..0000000
--- a/branch-1.2/ambari-web/app/templates/main/service/info/summary/oozie.hbs
+++ /dev/null
@@ -1,36 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{#each component in controller.content.hostComponents}}
-  {{#if component.isMaster}}
-    <tr>
-        <td class="summary-label">{{t common.servers}}</td>
-        <td><a {{action selectHost component.host}} href="javascript:void(null)">{{component.host.publicHostName}}</a></td>
-    </tr>
-  {{/if}}
-{{/each}}
-<tr>
-  <td class="summary-label">{{t common.clients}}</td>
-  <td>
-    <a {{action filterHosts view.clientObj}} href="javascript:void(null)">{{view.clients.length}} Oozie Client{{#if view.hasManyClients}}s{{/if}}</a>
-  </td>
-</tr>
-<tr>
-  <td class="summary-label">{{t dashboard.services.oozie.webUi}}</td>
-  <td><a target=_blank href="http://{{unbound view.oozieServer}}:11000/oozie">{{view.oozieServer}}/oozie</a></td>
-</tr>
diff --git a/branch-1.2/ambari-web/app/templates/main/service/info/summary/zookeeper.hbs b/branch-1.2/ambari-web/app/templates/main/service/info/summary/zookeeper.hbs
deleted file mode 100644
index 98bd5b8..0000000
--- a/branch-1.2/ambari-web/app/templates/main/service/info/summary/zookeeper.hbs
+++ /dev/null
@@ -1,31 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<tr>
-  <td class="summary-label">{{t common.servers}}</td>
-  <td>
-      <a {{action filterHosts view.serversHost}} href="javascript:void(null)">{{view.servers.length}} ZooKeeper Server{{#if view.hasManyServers}}s{{/if}}</a>
-  </td>
-</tr>
-
-<tr>
-  <td class="summary-label">{{t common.clients}}</td>
-  <td>
-      <a {{action filterHosts view.clientObj}} href="javascript:void(null)">{{view.clients.length}} ZooKeeper Client{{#if view.hasManyClients}}s{{/if}}</a>
-  </td>
-</tr>
diff --git a/branch-1.2/ambari-web/app/templates/main/service/info/summary_alert.hbs b/branch-1.2/ambari-web/app/templates/main/service/info/summary_alert.hbs
deleted file mode 100644
index ebd9365..0000000
--- a/branch-1.2/ambari-web/app/templates/main/service/info/summary_alert.hbs
+++ /dev/null
@@ -1,41 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div class="container-fluid">
-    <div class="row-fluid">
-        <div class="span1 status-icon">
-          {{#if isOk}}
-              <i class="icon-ok icon-large"></i>
-          {{else}}
-	          {{#if isWarning}}
-	              <i class="icon-warning-sign icon-large"></i>
-	          {{else}}
-	              <i class="icon-remove icon-large"></i>
-	          {{/if}}
-          {{/if}}
-        </div>
-        <div class="span11">
-            <div class="row-fluid">
-                <div class="span7 title">{{title}}
-                </div>
-                <div rel="tooltip" {{bindAttr data-title="timeSinceAlertDetails"}} data-placement="right" class="span5 date-time">{{timeSinceAlert}}</div>
-            </div>
-            <div class="row-fluid message">{{message}}</div>
-        </div>
-    </div>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/service/item.hbs b/branch-1.2/ambari-web/app/templates/main/service/item.hbs
deleted file mode 100644
index b634238..0000000
--- a/branch-1.2/ambari-web/app/templates/main/service/item.hbs
+++ /dev/null
@@ -1,50 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{view App.MainServiceInfoMenuView configTabBinding="view.hasConfigTab"}}
-{{#if App.isAdmin}}
-<div class="service-button">
-  {{#if view.hasMaintenanceControl}}
-  <div class="btn-group display-inline-block">
-    <a class="btn dropdown-toggle" data-toggle="dropdown" href="#">
-      {{t services.service.actions.maintenance}}
-      <span class="caret"></span>
-    </a>
-    <ul class="dropdown-menu">
-      <!-- dropdown menu links -->
-      {{#each option in view.maintenance}}
-      <li {{bindAttr class="controller.content.isStopDisabled:disabled"}}>
-        <a {{action "doAction" option.action target="controller" href=true}}>{{option.label}}</a>
-      </li>
-      {{/each}}
-    </ul>
-  </div>
-  {{/if}}
-  <a href="javascript:void(null)" {{bindAttr class=":btn controller.content.isStartDisabled:disabled:btn-success" }}
-     data-toggle="modal" {{action "startService" target="controller"}}>
-    <i class="icon-play"></i>
-    {{t services.service.start}}
-  </a>
-  <a href="javascript:void(null)" {{bindAttr class=":btn controller.content.isStopDisabled:disabled:btn-danger" }}
-     data-toggle="modal" {{action "stopService" target="controller"}}>
-    <i class="icon-stop icon-white"></i>
-    {{t services.service.stop}}
-  </a>
-</div>
-{{/if}}
-{{outlet}}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/main/service/menu_item.hbs b/branch-1.2/ambari-web/app/templates/main/service/menu_item.hbs
deleted file mode 100644
index 3784482..0000000
--- a/branch-1.2/ambari-web/app/templates/main/service/menu_item.hbs
+++ /dev/null
@@ -1,27 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<a href="#/main/services/{{unbound view.content.id}}/summary">
-  {{view App.MainDashboardServiceHealthView class="service-health" serviceBinding="view.content"}}&nbsp;
-  <span>{{unbound view.content.displayName}}</span>
-  {{#if view.alertsCount}}
-    <span class="label operations-count">
-      {{view.alertsCount}}
-    </span>
-  {{/if}}
-</a>
diff --git a/branch-1.2/ambari-web/app/templates/wizard/master_hosts.hbs b/branch-1.2/ambari-web/app/templates/wizard/master_hosts.hbs
deleted file mode 100644
index c147528..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/master_hosts.hbs
+++ /dev/null
@@ -1,27 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{#if view.hasNoHosts}}
-  {{t installer.noHostsAssigned}}
-{{/if}}
-{{#if view.hasOneHost}}
-  {{value}}
-{{/if}}
-{{#if view.hasMultipleHosts}}
-  <a href="#" {{action showHosts target="view"}}>{{value.firstObject}} {{t and}} {{view.otherLength}}</a>
-{{/if}}
diff --git a/branch-1.2/ambari-web/app/templates/wizard/master_hosts_popup.hbs b/branch-1.2/ambari-web/app/templates/wizard/master_hosts_popup.hbs
deleted file mode 100644
index cd5b466..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/master_hosts_popup.hbs
+++ /dev/null
@@ -1,23 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<ul>
-  {{#each host in view.serviceConfig.value}}
-    <li>{{host}}</li>
-  {{/each}}
-</ul>
diff --git a/branch-1.2/ambari-web/app/templates/wizard/slave_component_hosts.hbs b/branch-1.2/ambari-web/app/templates/wizard/slave_component_hosts.hbs
deleted file mode 100644
index 28e0872..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/slave_component_hosts.hbs
+++ /dev/null
@@ -1,32 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{#if view.hasNoHosts}}
-{{t none}} -
-<a href="#" {{action showEditSlaveComponentGroups view.serviceConfig.category target="controller"}}>
-  {{t installer.slaveComponentHosts.selectHosts}}
-</a>
-{{else}}
-<a href="#" {{action showEditSlaveComponentGroups view.serviceConfig.category target="controller"}}>
-  {{#if view.hasMultipleHosts}}
-  {{view.hosts.firstObject.hostName}} {{t and}} {{view.otherLength}}
-  {{else}}
-  {{view.hosts.firstObject.hostName}}
-  {{/if}}
-</a>
-  {{/if}}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/wizard/slave_component_hosts_popup.hbs b/branch-1.2/ambari-web/app/templates/wizard/slave_component_hosts_popup.hbs
deleted file mode 100644
index abf6bfc..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/slave_component_hosts_popup.hbs
+++ /dev/null
@@ -1,49 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div id="slave-hosts-popup" class="alert alert-info">{{header}}</div>
-<table class="table table-striped">
-  <thead>
-    <tr>
-      <th>{{t common.host}}</th>
-      <th>{{t common.group}}</th>
-    </tr>
-  </thead>
-  <tbody>
-  {{#each host in hosts}}
-    <tr>
-      <td>
-        <label>{{host.hostName}}</label>
-      </td>
-      <td>
-        {{#view App.SlaveComponentDropDownGroupView contentBinding="host"}}
-        <select {{action changeGroup target="view" on="change"}}>
-          {{#each groupName in controller.getGroupsForDropDown}}
-            {{#view view.optionTag contentBinding="groupName"}}
-              <option value="{{unbound groupName}}" {{bindAttr selected="view.selected"}}>
-                {{groupName}}
-              </option>
-            {{/view}}
-          {{/each}}
-        </select>
-        {{/view}}
-      </td>
-    </tr>
-  {{/each}}
-  </tbody>
-</table>
diff --git a/branch-1.2/ambari-web/app/templates/wizard/slave_hosts.hbs b/branch-1.2/ambari-web/app/templates/wizard/slave_hosts.hbs
deleted file mode 100644
index 557fe73..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/slave_hosts.hbs
+++ /dev/null
@@ -1,27 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{#if view.hasNoHosts}}
-  {{t installer.noHostsAssigned}}
-{{else}}
-  {{#if view.hasMultipleHosts}}
-    {{view.value.[0]}} {{and}} {{view.otherLength}}
-  {{else}}
-    {{view.value.[0]}}
-  {{/if}}
-{{/if}}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/wizard/step1.hbs b/branch-1.2/ambari-web/app/templates/wizard/step1.hbs
deleted file mode 100644
index d948184..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/step1.hbs
+++ /dev/null
@@ -1,42 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<h2>{{t installer.step1.body.header}}</h2>
-<p class="alert alert-info">
-  {{t installer.step1.body}}
-</p>
-<div {{bindAttr class="view.onError:error :control-group"}}>
-  <label class="control-label" for="cluster-name">{{t installer.step1.clusterName}}
-    <a href="javascript:void(null)"
-       rel="popover"
-      {{translateAttr title="installer.step1.clusterName.tooltip.title"
-       data-content="installer.step1.clusterName.tooltip.content"}}>{{t common.learnMore}}</a>
-  </label>
-
-  <div class="controls">
-    {{view Ember.TextField id="cluster-name" valueBinding="content.cluster.name" placeholder="cluster name" target="controller"}}
-    <p class="help-inline">{{clusterNameError}}</p>
-  </div>
-</div>
-
-<div class="btn-area">
-  <a class="btn btn-success pull-right" {{bindAttr disabled="invalidClusterName"}} {{action "submit" target="controller"}}>{{t common.next}} &rarr;</a>
-</div>
-
-
-
diff --git a/branch-1.2/ambari-web/app/templates/wizard/step10.hbs b/branch-1.2/ambari-web/app/templates/wizard/step10.hbs
deleted file mode 100644
index 44a8e05..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/step10.hbs
+++ /dev/null
@@ -1,48 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<h2>{{t installer.step10.header}}</h2>
-{{#if isNagiosRestartRequired}}
-  <div class="alert alert-danger">
-    {{t installer.step10.nagiosRestartRequired}}
-  </div>
-{{/if}}
-<div class="alert alert-info">
-  {{t installer.step10.body}}
-</div>
-<div id="step10-content" class="well pre-scrollable">
-  {{#each item in clusterInfo}}
-  <ul>
-    <span {{bindAttr class="item.color"}}>{{item.displayStatement}}</span>
-    {{#each status in item.status}}
-    <ul>
-      <span {{bindAttr class="status.color"}}>{{status.displayStatement}}</span>
-      {{#each statement in status.statements}}
-      <ul>
-        <span {{bindAttr class="status.color"}}>{{statement.displayStatement}}</span>
-      </ul>
-      {{/each}}
-    </ul>
-    {{/each}}
-  </ul>
-  {{/each}}
-</div>
-<div class="btn-area">
-  <a class="btn pull-left" {{bindAttr disabled="isSubmitDisabled"}} {{action back}}>&larr; {{t common.back}}</a>
-  <a class="btn btn-success pull-right" {{bindAttr disabled="isSubmitDisabled"}} {{action complete}}>{{t common.complete}} &rarr;</a>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/wizard/step2.hbs b/branch-1.2/ambari-web/app/templates/wizard/step2.hbs
deleted file mode 100644
index 9c25b39..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/step2.hbs
+++ /dev/null
@@ -1,119 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div id="installOptions">
-  <h2>{{t installer.step2.header}}</h2>
-
-  <p class="alert alert-info">{{t installer.step2.body}}</p>
-
-  <div id="targetHosts">
-    <h5>{{t installer.step2.targetHosts}}</h5>
-
-    <div {{bindAttr class="hostsError:error :control-group :target-hosts-input"}}>
-      <p>{{t installer.step2.targetHosts.info}}. {{t installer.step2.orUse}}
-        <a href="javascript:void(null)"
-           rel="popover"
-          {{translateAttr title="installer.step2.hostPattern.tooltip.title" data-content="installer.step2.hostPattern.tooltip.content"}}>
-          {{t installer.step2.hostPattern.tooltip.title}}
-        </a>
-      </p>
-
-      <div class="controls">
-        {{view Ember.TextArea class="span6" valueBinding="content.installOptions.hostNames" rows="5" placeholder="host names"}}
-        {{#if hostsError}}
-        <p class="help-inline">{{hostsError}}</p>
-        {{/if}}
-      </div>
-    </div>
-  </div>
-
-  <div id="hostConnectivity">
-    <div class="ambari-agents">
-      <h5>{{t installer.step2.sshKey}}</h5>
-
-      <label class="radio">
-        {{view view.providingSSHKeyRadioButton}}
-        {{t installer.step2.useSsh.provide}}
-          <a href="javascript:void(null)"
-             rel="popover"
-            {{translateAttr title="installer.step2.useSsh.tooltip.title" data-content="installer.step2.useSsh.tooltip.content"}}>
-            {{t installer.step2.useSsh.tooltip.title}}</a>
-        {{t installer.step2.useSsh.provide_id_rsa}}
-      </label>
-      <div {{bindAttr class="sshKeyError:error :control-group :ssh-key-input"}}>
-        {{#if view.isFileApi}}
-          {{view App.SshKeyFileUploader disabledBinding="view.sshKeyState"}}
-        {{/if}}
-          <div class="controls">
-            {{view Ember.TextArea class="span6" rows="3" id="sshKey"
-            placeholder="ssh private key" disabledBinding="view.sshKeyState" valueBinding="content.installOptions.sshKey"}}
-            {{#if sshKeyError}}
-                <span class="help-inline">{{sshKeyError}}</span>
-            {{/if}}
-          </div>
-      </div>
-
-      <label class="radio">
-        {{view view.manualRegistrationRadioButton}}
-        {{t installer.step2.manualInstall.perform}}
-          <a href="javascript:void(null)"
-             rel="popover"
-            {{translateAttr title="installer.step2.manualInstall.tooltip.title" data-content="installer.step2.manualInstall.tooltip.content"}}>
-            {{t installer.step2.manualInstall.tooltip.title}}</a>
-        {{t installer.step2.manualInstall.perform_on_hosts}}
-      </label>
-
-    </div>
-  </div>
-
-  <div class="advancedOptions">
-    <h5>{{t installer.step2.advancedOptions.header}}</h5>
-
-    <label {{bindAttr class=":checkbox"}}>
-      {{view Ember.Checkbox checkedBinding="content.installOptions.localRepo"}}
-
-      {{t installer.step2.localRepo.label_use}}
-
-      <a href="javascript:void(null)"
-         rel="popover"
-        {{translateAttr title="installer.step2.localRepo.tooltip.title" data-content="installer.step2.localRepo.tooltip.content"}}>
-        {{t installer.step2.localRepo.tooltip.title}}</a>
-      {{t installer.step2.localRepo.label_instead}}
-    </label>
-
-    <label {{bindAttr class=":checkbox"}}>
-      <div class="java-home">
-      {{view Ember.Checkbox checkedBinding="content.installOptions.isJavaHome"}}
-        {{t installer.step2.javaHome.label}}
-        <a href="javascript:void(null)"
-           rel="popover"
-          {{translateAttr title="installer.step2.javaHome.tooltip.title" data-content="installer.step2.javaHome.tooltip.content"}}>
-            {{t installer.step2.javaHome.tooltip.title}}</a>
-      </div>
-      {{view App.WizardTextField valueBinding="content.installOptions.javaHome" placeholder="/usr/jdk/jdk1.6.0_31"}}
-    </label>
-  </div>
-  <div class="btn-area">
-    {{#unless view.parentView.controller.hideBackButton}}
-    <a class="btn pull-left" {{action back}}>&larr; {{t common.back}}</a>
-    {{/unless}}
-    <a class="btn btn-success pull-right" {{bindAttr disabled="isSubmitDisabled"}} {{action evaluateStep target="controller"}}>
-      {{t installer.step2.registerAndConfirm}} &rarr;</a>
-  </div>
-
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/wizard/step2ManualInstallPopup.hbs b/branch-1.2/ambari-web/app/templates/wizard/step2ManualInstallPopup.hbs
deleted file mode 100644
index b57e1ae..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/step2ManualInstallPopup.hbs
+++ /dev/null
@@ -1,19 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<p>{{t installer.step2.manualInstall.popup.body}}</p>
diff --git a/branch-1.2/ambari-web/app/templates/wizard/step3.hbs b/branch-1.2/ambari-web/app/templates/wizard/step3.hbs
deleted file mode 100644
index 9ec9661..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/step3.hbs
+++ /dev/null
@@ -1,144 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div id="confirm-hosts">
-  <h2>{{t installer.step3.header}}</h2>
-
-  <p class="alert alert-info">{{t installer.step3.body}}</p>
-  <div class="box">
-    <div class="box-header">
-      <div class="button-section">
-        <a class="btn btn-primary" {{bindAttr disabled="noHostsSelected"}}
-           href="#" {{action removeSelectedHosts target="controller" }}><i
-                class="icon-trash icon-white"></i>
-          {{t installer.step3.removeSelected}}
-        </a>
-        {{#unless isRetryDisabled}}
-        <a class="btn btn-primary decommission"
-           href="#" {{action retrySelectedHosts target="controller"}}><i
-          class="icon-repeat icon-white"></i>
-          {{t installer.step3.retryFailed}}
-        </a>
-        {{/unless}}
-        {{#if App.testMode}}
-            <a class="btn btn-info"
-               href="#" {{action mockBtn target="controller"}}>
-              {{t installer.mockData}}
-            </a>
-            <a class="btn btn-info"
-               href="#" {{action pollBtn target="controller"}}>
-                {{t installer.pollData}}
-            </a>
-        {{/if}}
-
-        <div id="host-filter" class="pull-right">
-          <ul class="clearfix">
-            <li class="first">{{t common.show}}:</li>
-            {{#each category in controller.categories}}
-              <li {{bindAttr class="category.itemClass"}}>
-                <a {{action selectCategory category target="controller"}} href="#">
-                  {{category.label}}
-                </a>
-              </li>
-              {{#unless category.last}}
-                <li>|</li>
-              {{/unless}}
-            {{/each}}
-          </ul>
-        </div>
-      </div>
-    </div>
-
-    <div class="pre-scrollable" style="max-height: 440px;">
-      <table class="table table-bordered table-striped">
-        <thead>
-        <tr>
-          <th class="span1">{{view Ember.Checkbox checkedBinding="allChecked"}}</th>
-          <th class="span3">{{t common.host}}</th>
-          <!-- retrieved from local storage initially -->
-          <th class="span3">{{t common.progress}}</th>
-          <th class="span2">{{t common.status}}</th>
-          <!-- given by the parsing function that parses data from bootstrap call, dynamically assign the color -->
-          <th class="span3">{{t common.action}}</th>
-          <!-- trash icon -->
-          <!-- retry icon -->
-        </tr>
-        </thead>
-
-        <tbody>
-        {{#if visibleHosts.length}}
-        {{#each host in visibleHosts}}
-        {{#view App.WizardHostView categoryBinding="controller.category" hostInfoBinding="host"}}
-        <td>
-          {{view Ember.Checkbox checkedBinding="host.isChecked"}}
-        </td>
-        <td>
-          {{host.name}}
-        </td>
-        <td>
-          <div {{bindAttr class="host.bootBarColor host.isBootDone::progress-striped host.isBootDone::active :progress"}}>
-            <div class="bar" style="width:100%">
-            </div>
-          </div>
-        </td>
-        <td>
-          <a href="javascript:void(null)"
-             data-toggle="modal" {{action hostLogPopup host target="controller"}}><span  {{bindAttr class="host.bootStatusColor"}}>{{host.bootStatusForDisplay}}</span></a>
-        </td>
-        <td>
-          {{#if view.isRemovable}}<a class="btn btn-mini" {{action remove target="view"}}><i class="icon-trash"></i>
-          {{t common.remove}}</a>{{/if}}
-          {{#if view.isRetryable}}<a class="btn btn-mini" {{action retry target="view"}}><i class="icon-repeat"></i>
-          {{t common.retry}}</a>{{/if}}
-        </td>
-        {{/view}}
-        {{/each}}
-        {{else}}
-        <tr>
-            <td colspan="5"><p>{{t installer.step3.hosts.noHosts}}</p></td>
-        </tr>
-        {{/if}}
-
-        </tbody>
-
-      </table>
-    </div>
-    <div class="box-footer">
-      <hr/>
-      <div class="footer-pagination">
-      </div>
-    </div>
-  </div>
-    {{#if isWarningsBoxVisible}}
-      {{#if isHostHaveWarnings}}
-      <div class="alert alert-warn">
-        {{t installer.step3.warnings.description}}<br>
-        <a href="javascript:void(0)" {{action hostWarningsPopup warnings target="controller"}}>{{t installer.step3.warnings.linkText}}</a>
-      </div>
-      {{else}}
-      <div class="alert alert-success">
-        {{t installer.step3.warnings.noWarnings}}
-        <a href="javascript:void(0)" {{action hostWarningsPopup warnings target="controller"}}>{{t installer.step3.noWarnings.linkText}}</a>
-      </div>
-      {{/if}}
-    {{/if}}
-  <div class="btn-area">
-    <a class="btn pull-left" {{bindAttr disabled="isInstallInProgress"}} {{action back}}>&larr; {{t common.back}}</a>
-    <a class="btn btn-success pull-right" {{bindAttr disabled="isSubmitDisabled"}} {{action submit target="controller"}}>{{t common.next}} &rarr;</a>
-  </div>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/wizard/step3_host_log_popup.hbs b/branch-1.2/ambari-web/app/templates/wizard/step3_host_log_popup.hbs
deleted file mode 100644
index f009e35..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/step3_host_log_popup.hbs
+++ /dev/null
@@ -1,31 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-
-<div id="host-log">
-  <div class="content-area">
-      <div class="textTrigger">{{t popup.highlight}}</div>
-    {{#if view.isTextArea}}
-    <div>
-      {{view view.textArea contentBinding="view.host.bootLog"}}
-    </div>
-    {{else}}
-    <pre class="bootLog">{{view.host.bootLog}}</pre>
-    {{/if}}
-  </div>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/wizard/step3_host_warnings_popup.hbs b/branch-1.2/ambari-web/app/templates/wizard/step3_host_warnings_popup.hbs
deleted file mode 100644
index 664c59e..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/step3_host_warnings_popup.hbs
+++ /dev/null
@@ -1,112 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-
-<div id="host-warnings">
-    <div class="row-fluid">
-            <div class="span6">
-              {{view Ember.Select
-              contentBinding="view.categories"
-              selectionBinding="view.category"
-              }}
-            </div>
-        <div class="span3 offset3">
-            <a href="javascript.void(0)" title="Show Details" {{action openWarningsInDialog target="view"}} class="task-detail-open-dialog"><i class="icon-external-link"></i> {{t installer.step3.hostWarningsPopup.details}}</a>
-        </div>
-    </div>
-    <div class="content-area">
-          <div class="warnings-list">
-              <table class="table">
-                  <thead>
-                  <tr>
-                      <th class="category-title" colspan="2">{{t installer.step3.hostWarningsPopup.directoriesAndFiles}}</th>
-                  </tr>
-                  </thead>
-                  <tbody>
-                  {{#each path in view.content.directoriesFiles}}
-                  <tr>
-                      <td>{{path.name}}</td>
-                      <td>
-                        <span>
-                        {{#if path.isWarn}}
-                          <i class="icon-warning-sign"></i>
-                        {{else}}
-                          <i class="icon-ok"></i>
-                        {{/if}}
-                        {{path.message}}
-                        </span>
-                      </td>
-                  </tr>
-                  {{/each}}
-                  </tbody>
-              </table>
-              <table class="table">
-                  <thead>
-                  <tr>
-                      <th class="category-title" colspan="2">{{t installer.step3.hostWarningsPopup.packages}}</th>
-                  </tr>
-                  </thead>
-                  <tbody>
-                    {{#each package in view.content.packages}}
-                    <tr>
-                        <td>{{package.name}}</td>
-                        <td>
-                          <span>
-                          {{#if package.isWarn}}
-                            <i class="icon-warning-sign"></i>
-                          {{else}}
-                            <i class="icon-ok"></i>
-                          {{/if}}
-                          {{package.message}}
-                          </span>
-                        </td>
-                    </tr>
-                    {{/each}}
-                  </tbody>
-              </table>
-              <table class="table">
-                  <thead>
-                  <tr>
-                      <th class="category-title" colspan="2">{{t installer.step3.hostWarningsPopup.processes}}</th>
-                  </tr>
-                  </thead>
-                  <tbody>
-                  {{#if view.content.processes.length}}
-                    {{#each process in view.content.processes}}
-                    <tr>
-                        <td {{bindAttr title="process.command"}}>{{process.shortCommand}}}</td>
-                        <td>
-                          <span>
-                          {{#if process.isWarn}}
-                            <i class="icon-warning-sign"></i>
-                          {{else}}
-                            <i class="icon-ok"></i>
-                          {{/if}}
-                          {{process.message}}
-                          </span>
-                        </td>
-                    </tr>
-                    {{/each}}
-                  {{else}}
-                  <tr><td colspan="2">{{t installer.step3.hostWarningsPopup.noProcesses}}</td></tr>
-                  {{/if}}
-                  </tbody>
-              </table>
-          </div>
-    </div>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/wizard/step4.hbs b/branch-1.2/ambari-web/app/templates/wizard/step4.hbs
deleted file mode 100644
index e82fc4f..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/step4.hbs
+++ /dev/null
@@ -1,62 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div id="step4">
-  <h2>{{t installer.step4.header}}</h2>
-
-  <div class="alert alert-info">
-    {{t installer.step4.body}}
-  </div>
-  <table class="table table-striped">
-    <thead>
-    <tr>
-      <th class="span3">{{t common.service}}
-        <span style="margin-left:10px">
-          <a href="#" {{action selectAll target="controller"}} {{bindAttr class="isAll:selected:deselected"}}>{{t all}}</a>
-                |
-                <a
-                  href="#" {{action selectMinimum target="controller"}} {{bindAttr class="isMinimum:selected:deselected"}}>{{t minimum}}</a>
-        </span>
-      </th>
-      <th>{{t common.version}}</th>
-      <th>{{t common.description}}</th>
-    </tr>
-    </thead>
-    <tbody>
-    {{#each controller}}
-    {{#unless isHidden}}
-    <tr {{bindAttr class="isSelected:success:"}}>
-      <td><label
-        class="checkbox">{{view Ember.Checkbox disabledBinding="isDisabled" checkedBinding="isSelected"}}{{displayName}}</label>
-      </td>
-      <td>{{version}}</td>
-      <td>{{description}}</td>
-    </tr>
-    {{/unless}}
-    {{/each}}
-    </tbody>
-  </table>
-
-  <div class="btn-area">
-    {{#unless view.parentView.controller.hideBackButton}}
-    <a class="btn pull-left" {{action back}}>&larr; {{t common.back}}
-    </a>
-    {{/unless}}
-    <a class="btn btn-success pull-right" {{bindAttr disabled="isSubmitDisabled"}} {{action submit target="controller"}}> {{t common.next}} &rarr;</a>
-  </div>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/wizard/step5.hbs b/branch-1.2/ambari-web/app/templates/wizard/step5.hbs
deleted file mode 100644
index 64a04f3..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/step5.hbs
+++ /dev/null
@@ -1,84 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<h2>{{t installer.step5.header}}</h2>
-<div class="alert alert-info">
-  {{t installer.step5.body}}
-  {{#if hasHiveServer}}
-    <br>
-    {{t installer.step5.body.hive}}
-  {{/if}}
-</div>
-<div class="assign-masters">
-  <div class="select-hosts">
-    <form class="form-horizontal">
-      <!-- View for array controller -->
-      {{#each selectedServicesMasters}}
-      <div class="control-group">
-        <label class="control-label">{{display_name}}:</label>
-        {{#if isHiveCoHost}}
-          <label class="host-name">{{selectedHost}}<i class="icon-asterisks">&#10037</i></label>
-        {{else}}
-        <div class="controls">
-          {{view App.SelectHostView
-            contentBinding="availableHosts"
-            optionValuePath="content.host_name"
-            optionLabelPath="content.host_info"
-            selectedHostBinding="selectedHost"
-            serviceNameBinding="display_name"
-            zIdBinding="zId"
-            disabledBinding="isInstalled"
-          }}
-          {{#if showAddControl}}
-          {{view App.AddControlView
-            componentNameBinding="display_name"
-          }}
-          {{/if}}
-          {{#if showRemoveControl}}
-          {{view App.RemoveControlView
-            zIdBinding="zId"
-          }}
-          {{/if}}
-        </div>
-        {{/if}}
-      </div>
-      {{/each}}
-    </form>
-  </div>
-
-  <div class="host-assignments">
-    {{#each masterHostMapping}}
-    <div class="mapping-box round-corners well">
-      <div class="hostString"><span>{{hostInfo}}</span></div>
-      {{#each masterServices}}
-      <span class="assignedService round-corners">{{display_name}}</span>
-      {{/each}}
-    </div>
-    {{/each}}
-
-    {{#if remainingHosts}}
-    <div class="remaining-hosts round-corners well">
-      <span><strong>{{remainingHosts}}</strong> {{t installer.step5.attention}}</span></div>
-    {{/if}}
-  </div>
-  <div style="clear: both;"></div>
-</div>
-<div class="btn-area">
-  <a class="btn pull-left" {{action back href="true"}}>&larr; {{t common.back}}</a>
-  <a class="btn btn-success pull-right" {{action next}}>{{t common.next}} &rarr;</a>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/wizard/step6.hbs b/branch-1.2/ambari-web/app/templates/wizard/step6.hbs
deleted file mode 100644
index 5dc713d..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/step6.hbs
+++ /dev/null
@@ -1,91 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div id="step6">
-  <h2>{{t installer.step6.header}}</h2>
-
-  <div class="alert alert-info">{{{view.label}}}</div>
-  {{#if errorMessage}}
-  <div class="alert alert-error">{{errorMessage}}</div>
-  {{/if}}
-
-  <div class="pre-scrollable">
-  <table class="table table-striped">
-    <thead>
-      <tr>
-        <th>{{t common.host}}</th>
-        <th>
-          <a href="#" {{bindAttr class="isAllDataNodes:selected:deselected"}}
-            {{action selectAllDataNodes target="controller"}}>{{t all}}</a> |
-
-          <a href="#" {{bindAttr class="isNoDataNodes:selected:deselected"}}
-            {{action deselectAllDataNodes target="controller"}}>{{t none}}</a>
-        </th>
-      {{#if controller.isMrSelected}}
-        <th>
-          <a href="#" {{bindAttr class="isAllTaskTrackers:selected:deselected"}}
-            {{action selectAllTaskTrackers target="controller"}}>{{t all}}</a> |
-
-          <a href="#" {{bindAttr class="isNoTaskTrackers:selected:deselected"}}
-            {{action deselectAllTaskTrackers target="controller"}}>{{t none}}</a>
-        </th>
-      {{/if}}
-      {{#if controller.isHbSelected}}
-        <th>
-          <a href="#" {{bindAttr class="isAllRegionServers:selected:deselected"}}
-            {{action selectAllRegionServers target="controller"}}>{{t all}}</a> |
-
-          <a href="#" {{bindAttr class="isNoRegionServers:selected:deselected"}}
-            {{action deselectAllRegionServers target="controller"}}>{{t none}}</a>
-        </th>
-      {{/if}}
-        <th>
-          <a href="#" {{bindAttr class="isAllClients:selected:deselected"}} {{action selectAllClients target="controller"}}>{{t all}}</a>
-          |
-          <a href="#" {{bindAttr class="isNoClients:selected:deselected"}} {{action deselectAllClients target="controller"}}>{{t none}}</a>
-        </th>
-      </tr>
-    </thead>
-    <tbody>
-
-    {{#each hosts}}
-    <tr>
-        {{#view App.WizardStep6HostView hostBinding = "this" }}
-          {{hostName}}
-          {{#if isMaster}}
-            <i class=icon-asterisks>&#10037</i>
-          {{/if}}
-        {{/view}}
-      <td><label class="checkbox">{{view Ember.Checkbox disabledBinding="isDataNodeInstalled" checkedBinding="isDataNode"}}{{t common.dataNode}}</label></td>
-      {{#if controller.isMrSelected}}
-      <td><label class="checkbox">{{view Ember.Checkbox disabledBinding="isTaskTrackerInstalled" checkedBinding="isTaskTracker"}}{{t common.taskTracker}}</label></td>
-      {{/if}}
-      {{#if controller.isHbSelected}}
-      <td><label class="checkbox">{{view Ember.Checkbox disabledBinding="isRegionServerInstalled" checkedBinding="isRegionServer"}}{{t common.regionServer}}</label></td>
-      {{/if}}
-      <td><label class="checkbox">{{view Ember.Checkbox disabledBinding="isClientInstalled" checkedBinding="isClient"}}{{t common.client}}</label></td>
-    </tr>
-    {{/each}}
-    </tbody>
-  </table>
-  </div>
-  <div class="btn-area">
-    <a class="btn" {{action back}}>&larr; {{t common.back}}</a>
-    <a class="btn btn-success pull-right" {{action next}}>{{t common.next}} &rarr;</a>
-  </div>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/wizard/step7.hbs b/branch-1.2/ambari-web/app/templates/wizard/step7.hbs
deleted file mode 100644
index 4f37472..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/step7.hbs
+++ /dev/null
@@ -1,87 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div id="serviceConfig">
-    <h2>{{t installer.step7.header}}</h2>
-
-    <div class="alert alert-info">
-      {{t installer.step7.body}}
-    </div>
-
-    <ul class="nav nav-tabs">
-      {{#each service in controller.stepConfigs}}
-        {{#if service.showConfig}}
-          {{#view App.WizardStep7.ServiceConfigTab}}
-              <a class="active" href="#{{unbound service.serviceName}}"
-                 data-toggle="tab" {{action selectService service target="view"}}>
-                {{service.displayName}}{{#if service.errorCount}}<span
-                      class="badge badge-important">{{service.errorCount}}</span>{{/if}}</a>
-          {{/view}}
-        {{/if}}
-      {{/each}}
-    </ul>
-
-    <div class="accordion">
-      {{#each category in selectedService.configCategories}}
-          <div class="accordion-group {{unbound category.name}}">
-              <div class="accordion-heading" {{action "onToggleBlock" category target="view"}}>
-                {{#if category.isCollapsed}}
-                    <i class='icon-caret-right pull-left accordion-toggle'></i>
-                {{else}}
-                    <i class='icon-caret-down pull-left accordion-toggle'></i>
-                {{/if}}
-                  <a class="accordion-toggle">
-                    {{category.name}}
-                  </a>
-              </div>
-
-
-              {{#view App.WizardStep7.ServiceConfigsByCategoryView categoryBinding="category" serviceConfigsBinding="selectedService.configs"}}
-                  <form class="form-horizontal">
-
-                    {{#each view.categoryConfigs}}
-                      {{#if isVisible}}
-                          <div {{bindAttr class="errorMessage:error: :control-group"}}>
-                              <label class="control-label">{{displayName}}</label>
-
-                              <div class="controls">
-                                {{view viewClass serviceConfigBinding="this" categoryConfigsBinding="view.categoryConfigs"}}
-                                  <span class="help-inline">{{errorMessage}}</span>
-                              </div>
-                          </div>
-                      {{/if}}
-                    {{/each}}
-
-                  </form>
-              {{/view}}
-          </div>
-      {{/each}}
-    </div>
-
-  {{#if isSubmitDisabled}}
-      <div class="alert">{{t installer.step7.attentionNeeded}}</div>
-  {{/if}}
-
-    <div class="btn-area">
-        <a class="btn" {{action back}}>&larr; {{t common.back}}</a>
-
-        <a
-                class="btn btn-success pull-right" {{bindAttr disabled="isSubmitDisabled"}}
-          {{action submit target="controller"}}>{{t common.next}} &rarr;</a>
-    </div>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/wizard/step8.hbs b/branch-1.2/ambari-web/app/templates/wizard/step8.hbs
deleted file mode 100644
index da99b07..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/step8.hbs
+++ /dev/null
@@ -1,58 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<h2>{{t installer.step8.header}}</h2>
-
-<div class="alert alert-info">
-  {{t installer.step8.body}}
-</div>
-
-<div id="step8-content" class="well pre-scrollable">
-    <div id="printReview">
-        <a class="btn btn-info pull-right" {{action printReview target="view"}}>{{t common.print}}</a> <br/>
-    </div>
-    <div id="step8-info">
-      {{#each item in controller.clusterInfo}}
-          <p>
-              <b>{{item.display_name}}</b> : {{item.config_value}}
-          </p>
-      {{/each}}
-
-        <div>
-            <p><b>{{t menu.item.services}}</b></p>
-          {{#each controller.services}}
-              <div>
-                  <ul><em><b>{{display_name}}</b></em>
-
-                      <div>
-                        {{#each component in this.service_components}}
-                            <ul><span class="text text-info">{{component.display_name }}
-                                : </span>{{component.component_value}}</ul>
-                        {{/each}}
-                      </div>
-                  </ul>
-              </div>
-          {{/each}}
-        </div>
-    </div>
-</div>
-<div class="btn-area">
-    <a class="btn pull-left" {{action back href="true"}}>&larr; {{t common.back}}</a>
-    <a class="btn btn-success pull-right"
-       id="spinner" {{bindAttr disabled="controller.isSubmitDisabled"}} {{action submit target="controller"}}>{{t common.deploy}} &rarr;</a>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/wizard/step8_log_popup.hbs b/branch-1.2/ambari-web/app/templates/wizard/step8_log_popup.hbs
deleted file mode 100644
index c6abe08..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/step8_log_popup.hbs
+++ /dev/null
@@ -1,22 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-<p>{{view.message}}</p>
-<div class="progress">
-    <div class="bar" {{bindAttr style="view.barWidth"}}>
-    </div>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/templates/wizard/step9.hbs b/branch-1.2/ambari-web/app/templates/wizard/step9.hbs
deleted file mode 100644
index b57e8d8..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/step9.hbs
+++ /dev/null
@@ -1,117 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-<div id="deploy">
-
-  <h2>{{t installer.step9.header}}</h2>
-
-  <p class="alert alert-info">{{t installer.step9.body}}</p>
-
-  <div id="overallProgress">
-    {{view.isStepCompleted}}
-    <div class="row-fluid">
-      <div class="span10">
-        <div {{bindAttr class="isStepCompleted::progress-striped isStepCompleted::active view.barColor :progress"}}>
-          <div class="bar" {{bindAttr style="view.barWidth"}}>
-          </div>
-        </div>
-      </div>
-      <div class="span2">{{view.progressMessage}}</div>
-    </div>
-  </div>
-
-  <div class="box">
-    <div class="box-header">
-      <div class="pull-left">
-        {{#if controller.showRetry}}
-        <a class="btn btn-primary"
-           href="#" {{action retry}}><i class="icon-repeat icon-white"></i>
-          {{t common.retry}}
-        </a>
-        {{/if}}
-        {{#if App.testMode}}
-        <a class="btn btn-info" href="#" {{action mockBtn target="controller"}}>
-          {{t installer.mockData}}
-        </a>
-        <a class="btn btn-primary"
-           href="#" {{action pollBtn target="controller"}}><i class="icon-repeat icon-white"></i>
-          {{t installer.pollData}}
-        </a>
-        {{/if}}
-      </div>
-
-    </div>
-    <div class="pre-scrollable" style="max-height: 750px;">
-      <table id="deploy-status-by-host" class="table table-bordered table-striped">
-        <thead>
-        <tr>
-          <th class="host">
-            {{t common.host}}
-          </th>
-          <th class="status">{{t common.status}}</th>
-          <!--  given by the parsing function that parses data from bootstrap call -->
-          <th class="message">{{t common.message}}</th>
-          <!-- retrieved from local storage initially -->
-        </tr>
-        </thead>
-
-        <tbody>
-        {{#each host in controller.hosts}}
-        {{#view App.HostStatusView objBinding="host"}}
-        <td>
-          {{host.name}}
-        </td>
-        <td>
-          <div class="progress-bar pull-left">
-            <div {{bindAttr class="view.isHostCompleted::progress-striped view.isHostCompleted::active view.barColor :progress"}}>
-              <div class="bar" {{bindAttr style="view.barWidth"}}>
-              </div>
-            </div>
-          </div>
-          <div class="progress-percentage pull-left">{{host.progress}}%</div>
-        </td>
-        <td>
-          <a {{bindAttr class="view.isFailed:text-error view.isSuccess:text-success view.isWarning:text-warning"}}
-            href="javascript:void(null)"
-            data-toggle="modal" {{action hostLogPopup host target="view"}}>{{host.message}}</a>
-        </td>
-
-        {{/view}}
-        {{/each}}
-
-        </tbody>
-      </table>
-    </div>
-    <div class="box-footer">
-      <hr/>
-      <div class="footer-pagination">
-      </div>
-    </div>
-  </div>
-
-  <div>
-    {{#if view.resultMsg}}
-    <p {{bindAttr class="view.resultMsgColor :alert"}}>{{view.resultMsg}}</p>
-    {{/if}}
-    <div class="btn-area">
-      <a class="btn btn-success pull-right" {{bindAttr disabled="isSubmitDisabled"}} {{action submit target="controller"}}>{{t common.next}} &rarr;</a>
-    </div>
-  </div>
-
-</div>
-</div>
diff --git a/branch-1.2/ambari-web/app/templates/wizard/step9HostTasksLogPopup.hbs b/branch-1.2/ambari-web/app/templates/wizard/step9HostTasksLogPopup.hbs
deleted file mode 100644
index 9765ee8..0000000
--- a/branch-1.2/ambari-web/app/templates/wizard/step9HostTasksLogPopup.hbs
+++ /dev/null
@@ -1,73 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-<div {{bindAttr class="view.isLogWrapHidden::hidden :task-list-main-warp"}}>
-  <div class="task-top-wrap">
-     {{t common.tasks}}
-    <div class="select-wrap">
-      {{t common.show}}:
-      {{view Ember.Select
-        contentBinding="view.categories"
-        optionValuePath="content.value"
-        optionLabelPath="content.label"
-        selectionBinding="view.category"
-      }}
-    </div>
-  </div>
-  <div id="host-log">
-    {{#each taskInfo in view.tasks}}
-    <div {{bindAttr class="taskInfo.isVisible::hidden :log-list-wrap"}}>
-      <div {{action toggleTaskLog taskInfo}} class="task-list-line-cursor">
-        <i {{bindAttr class="taskInfo.status taskInfo.icon"}}></i>
-        <a href="#" class="" >
-          {{{taskInfo.role}} {{taskInfo.command}}
-        </a>
-        <div class="show-details"><i class="icon-caret-right"></i></div>
-      </div>
-    </div>
-    {{/each}}
-    {{#if view.isEmptyList}}
-      <div class="log-list-wrap">{{t installer.step9.hostLog.popup.noTasksToShow}}</div>
-    {{/if}}
-  </div>
-</div>
-
-<div {{bindAttr class="view.isLogWrapHidden:hidden :task-detail-info"}}>
-  <div class="task-top-wrap">
-    <a class="task-detail-back" href="javascript:void(null)" {{action backToTaskList}} ><i class="icon-arrow-left"></i>&nbsp;{{t common.tasks}}</a>
-    <div>
-      <!--<img src="data:image/png;base64,R0lGODlhFAAUAIAAAP///wAAACH5BAEAAAAALAAAAAAUABQAAAIRhI+py+0Po5y02ouz3rz7rxUAOw==" class="task-detail-status-ico" />-->
-      <i {{bindAttr class="view.openedTask.status :task-detail-status-ico view.openedTask.icon"}} class="task-detail-status-ico"></i>
-      <span class="task-detail-log-rolename" >{{{view.openedTask.role}} {{view.openedTask.command}}</span>
-    </div>
-    <div class="task-detail-ico-wrap">
-      <div title="Click to Copy" {{action "textTrigger" taskInfo target="view"}} class="task-detail-copy"><i class="icon-copy"></i> {{t common.copy}}</div>
-      <div title="Open in New Window" {{action openTaskLogInDialog}} class="task-detail-open-dialog"><i class="icon-external-link"></i> {{t common.open}}</div>
-    </div>
-  </div>
-  <div class="task-detail-log-info">
-    <div class="content-area" >
-      <div class="task-detail-log-clipboard-wrap" ></div>
-      <div class="task-detail-log-maintext">
-        <h5>stderr:</h5>
-        <pre class="stderr">{{view.openedTask.stderr}}</pre>
-        <h5>stdout:</h5>
-        <pre class="stdout">{{view.openedTask.stdout}}</pre>
-      </div>
-    </div>
-  </div>
-</div>
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/utils/base64.js b/branch-1.2/ambari-web/app/utils/base64.js
deleted file mode 100644
index fe2dfc9..0000000
--- a/branch-1.2/ambari-web/app/utils/base64.js
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-(function () {
-
-  var
-    object = typeof window != 'undefined' ? window : exports,
-    chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=',
-    INVALID_CHARACTER_ERR = (function () {
-      // fabricate a suitable error object
-      try {
-        document.createElement('$');
-      }
-      catch (error) {
-        return error;
-      }
-    }());
-
-  // encoder
-  // [https://gist.github.com/999166] by [https://github.com/nignag]
-  object.btoa || (
-    object.btoa = function (input) {
-      for (
-        // initialize result and counter
-        var block, charCode, idx = 0, map = chars, output = '';
-        // if the next input index does not exist:
-        // change the mapping table to "="
-        // check if d has no fractional digits
-        input.charAt(idx | 0) || (map = '=', idx % 1);
-        // "8 - idx % 1 * 8" generates the sequence 2, 4, 6, 8
-        output += map.charAt(63 & block >> 8 - idx % 1 * 8)
-        ) {
-        charCode = input.charCodeAt(idx += 3 / 4);
-        if (charCode > 0xFF) throw INVALID_CHARACTER_ERR;
-        block = block << 8 | charCode;
-      }
-      return output;
-    });
-
-  // decoder
-  // [https://gist.github.com/1020396] by [https://github.com/atk]
-  object.atob || (
-    object.atob = function (input) {
-      input = input.replace(/=+$/, '')
-      if (input.length % 4 == 1) throw INVALID_CHARACTER_ERR;
-      for (
-        // initialize result and counters
-        var bc = 0, bs, buffer, idx = 0, output = '';
-        // get next character
-        buffer = input.charAt(idx++);
-        // character found in table? initialize bit storage and add its ascii value;
-        ~buffer && (bs = bc % 4 ? bs * 64 + buffer : buffer,
-          // and if not first of each 4 characters,
-          // convert the first 8 bits to one ascii character
-          bc++ % 4) ? output += String.fromCharCode(255 & bs >> (-2 * bc & 6)) : 0
-        ) {
-        // try to find character in table (0-63, not found => -1)
-        buffer = chars.indexOf(buffer);
-      }
-      return output;
-    });
-
-}());
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/utils/component.js b/branch-1.2/ambari-web/app/utils/component.js
deleted file mode 100644
index e8f27ed..0000000
--- a/branch-1.2/ambari-web/app/utils/component.js
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Here will be stored slave functions related to components
- * @type {Object}
- */
-module.exports = {
-
-  /**
-   * Return list of installed components. Syntax is:
-   *
-   * [{
-   *    id : 'DATANODE',
-   *    displayName : 'DataNode',
-   *    isMaster : true,
-   *    isSlave : false,
-   *    isClient : false
-   * }]
-   *
-   */
-  getInstalledComponents : function(){
-    var components = App.HostComponent.find();
-    var names = components.mapProperty('componentName').uniq();
-    var result = [];
-
-    names.forEach(function(componentName){
-      var component = components.findProperty('componentName', componentName);
-      result.push(Ember.Object.create({
-        id: componentName,
-        isMaster: component.get('isMaster'),
-        isSlave: component.get('isSlave'),
-        isClient: component.get('isClient'),
-        displayName: component.get('displayName')
-      }));
-    });
-
-    return result;
-  }
-};
diff --git a/branch-1.2/ambari-web/app/utils/date.js b/branch-1.2/ambari-web/app/utils/date.js
deleted file mode 100644
index a5f7c83..0000000
--- a/branch-1.2/ambari-web/app/utils/date.js
+++ /dev/null
@@ -1,166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var validator = require('utils/validator');
-
-module.exports = {
-  dateMonths:['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'],
-  dateDays:['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'],
-  dateFormatZeroFirst:function (time) {
-    if (time < 10) return '0' + time;
-    return time;
-  },
-  /**
-   * Convert timestamp to date-string 'DAY_OF_THE_WEEK, MONTH DAY, YEAR HOURS:MINUTES'
-   * @param timestamp
-   * @return string date
-   */
-  dateFormat:function (timestamp) {
-    if (!validator.isValidInt(timestamp)) return timestamp;
-    var date = new Date(timestamp * 1);
-    var months = this.dateMonths;
-    var days = this.dateDays;
-    return days[date.getDay()] + ', ' + months[date.getMonth()] + ' ' + this.dateFormatZeroFirst(date.getDate()) + ', ' + date.getFullYear() + ' ' + this.dateFormatZeroFirst(date.getHours()) + ':' + this.dateFormatZeroFirst(date.getMinutes());
-  },
-  /**
-   * Convert date-string 'DAY_OF_THE_WEEK, MONTH DAY, YEAR HOURS:MINUTES' to timestamp
-   * @param date_string
-   * @return {String}
-   */
-  dateUnformat: function(date_string) {
-    var date = date_string.substring(4);
-    var month = date.substring(1, 4);
-    var day = date.substring(5, 7);
-    var year = date.substring(9, 13);
-    var hours = date.substring(14, 16);
-    var minutes = date.substring(17, 19);
-
-    var months = this.dateMonths;
-    month = months.indexOf(month) + 1;
-    if (month < 10) month = '0' + month;
-    return year + month + day + hours + minutes;
-  },
-  /**
-   * Convert timestamp to date-string 'DAY_OF_THE_WEEK MONTH DAY YEAR'
-   * @param timestamp
-   * @return {*}
-   */
-  dateFormatShort: function(timestamp) {
-    if (!validator.isValidInt(timestamp)) return timestamp;
-
-    var date = new Date(timestamp*1);
-    var today = new Date();
-    if (date.toDateString() === today.toDateString()) {
-      return 'Today ' + date.toLocaleTimeString();
-    }
-    return date.toDateString();
-  },
-  /**
-   * Convert date-string 'DAY_OF_THE_WEEK MONTH DAY YEAR' to the timestamp
-   * @param date_string
-   * @return {Number}
-   */
-  dateUnformatShort: function(date_string) {
-    var date = new Date(date_string);
-    return date.getTime();
-  },
-  /**
-   * Convert time in mseconds to 'HOURS:MINUTES:SECONDS'
-   * @param ms_interval
-   * @return string formatted date
-   */
-  dateFormatInterval:function (ms_interval) {
-    if (!validator.isValidInt(ms_interval)) return ms_interval;
-    var hours = Math.floor(ms_interval / (60 * 60000));
-    var divisor_for_minutes = ms_interval % (60 * 60000);
-    var minutes = Math.floor(divisor_for_minutes / 60000);
-    var divisor_for_seconds = divisor_for_minutes % 60000;
-    var seconds = (divisor_for_seconds / 1000).toFixed(2);
-
-    return (hours < 10 ? '0' : '') + hours + ':' + (minutes < 10 ? '0' : '') + minutes + ':' + (seconds < 10 ? '0' : '') + seconds;
-  },
-  /**
-   * Convert 'HOURS:MINUTES:SECONDS' to time in mseconds
-   * @param formattedDate date string
-   * @return time in mseconds
-   */
-  dateUnformatInterval: function(formattedDate) {
-    var formattedDateArray = formattedDate.split(' ');
-
-    if (Object.prototype.toString.call( formattedDateArray ) === '[object Array]' && formattedDateArray.length == 2) {
-      var oneMinMs = 60000;
-      var oneHourMs = 3600000;
-      var oneDayMs = 86400000;
-
-      if (formattedDateArray['1'] == 'ms') {
-        return formattedDateArray['0'];
-      } else if (formattedDateArray['1'] == 'secs') {
-        return formattedDateArray['0'] * 1000;
-      } else if (formattedDateArray['1'] == 'mins') {
-        return formattedDateArray['0'] * oneMinMs;
-      } else if (formattedDateArray['1'] == 'hours') {
-        return formattedDateArray['0'] * oneHourMs;
-      } else if (formattedDateArray['1'] == 'days') {
-        return formattedDateArray['0'] * oneDayMs;
-      } else {
-        console.warn('function dateUnformatInterval: Undefined format');
-      }
-    } else {
-      console.warn('function dateUnformatInterval: formattedDateArray');
-    }
-  },
-  /**
-   * Convert time in mseconds to
-   * 30 ms = 30 ms
-   * 300 ms = 300 ms
-   * 999 ms = 999 ms
-   * 1000 ms = 1.00 secs
-   * 3000 ms = 3.00 secs
-   * 35000 ms = 35.00 secs
-   * 350000 ms = 350.00 secs
-   * 999999 ms = 999.99 secs
-   * 1000000 ms = 16.66 mins
-   * 3500000 secs = 58.33 mins
-   * @param time
-   * @return string formatted date
-   */
-  timingFormat:function (time) {
-    var intTime  = parseInt(time);
-    var timeStr = intTime.toString();
-    var lengthOfNumber = timeStr.length;
-    var oneMinMs = 60000;
-    var oneHourMs = 3600000;
-    var oneDayMs = 86400000;
-
-    if (lengthOfNumber < 4) {
-      return time + ' ms';
-    } else if (lengthOfNumber < 7) {
-      time = (time / 1000).toFixed(2);
-      return time + ' secs';
-    } else if (time < oneHourMs) {
-      time = (time / oneMinMs).toFixed(2);
-      return time + ' mins';
-    } else if (time < oneDayMs) {
-      time = (time / oneHourMs).toFixed(2);
-      return time + ' hours';
-    } else {
-      time = (time / oneDayMs).toFixed(2);
-      return time + ' days';
-    }
-  }
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/utils/db.js b/branch-1.2/ambari-web/app/utils/db.js
deleted file mode 100644
index 96e27b5..0000000
--- a/branch-1.2/ambari-web/app/utils/db.js
+++ /dev/null
@@ -1,359 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-var App = require('app');
-App.db = {};
-
-if (typeof Storage !== 'undefined') {
-  Storage.prototype.setObject = function (key, value) {
-    this.setItem(key, JSON.stringify(value));
-  }
-
-  Storage.prototype.getObject = function (key) {
-    var value = this.getItem(key);
-    return value && JSON.parse(value);
-  }
-} else {
-  // stub for unit testing purposes
-  window.localStorage = {};
-  localStorage.setItem = function (key, val) {
-    this[key] = val;
-  }
-  localStorage.getItem = function (key) {
-    return this[key];
-  }
-  window.localStorage.setObject = function (key, value) {
-    this[key] = value;
-  };
-  window.localStorage.getObject = function (key, value) {
-    return this[key];
-  };
-}
-
-App.db.cleanUp = function () {
-  console.log('TRACE: Entering db:cleanup function');
-  App.db.data = {
-    'app': {
-      'loginName': '',
-      'authenticated': false
-    },
-    'Installer' : {},
-    'AddHost' : {},
-    'AddService' : {}
-  };
-  console.log("In cleanup./..");
-  localStorage.setObject('ambari', App.db.data);
-};
-
-// called whenever user logs in
-if (localStorage.getObject('ambari') == null) {
-  console.log('doing a cleanup');
-  App.db.cleanUp();
-}
-
-/*
- * setter methods
- */
-
-App.db.setLoginName = function (name) {
-  console.log('TRACE: Entering db:setLoginName function');
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.app.loginName = name;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-App.db.setAmbariStacks = function (stacks) {
-  console.log('TRACE: Entering db:setAmbariStacks function');
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.app.stacks = stacks;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-/**
- * Set user model to db
- * @param user
- */
-App.db.setUser = function (user) {
-  console.log('TRACE: Entering db:setUser function');
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.app.user = user;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-App.db.setAuthenticated = function (authenticated) {
-  console.log('TRACE: Entering db:setAuthenticated function');
-
-  App.db.data = localStorage.getObject('ambari');
-  console.log('present value of authentication is: ' + App.db.data.app.authenticated);
-  console.log('desired value of authentication is: ' + authenticated);
-  App.db.data.app.authenticated = authenticated;
-  localStorage.setObject('ambari', App.db.data);
-  App.db.data = localStorage.getObject('ambari');
-  console.log('Now present value of authentication is: ' + App.db.data.app.authenticated);
-};
-
-App.db.setAllHostNames = function (hostNames) {
-  console.log('TRACE: Entering db:setAllHostNames function');
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.Installer.hostNames = hostNames;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-App.db.setAllHostNamesPattern = function (hostNames) {
-  console.log('TRACE: Entering db:setAllHostNamesPattern function');
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.Installer.hostNamesPattern = hostNames;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-App.db.setHosts = function (hostInfo) {
-  console.log('TRACE: Entering db:setHosts function');
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.Installer.hostInfo = hostInfo;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-App.db.setInstallOptions = function(installOptions){
-  console.log('TRACE: Entering db:setInstallOptions function');
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.Installer.installOptions = installOptions;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-App.db.setBootStatus = function (status) {
-  console.log('TRACE: Entering db:setBootStatus function');
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.Installer.bootStatus = status;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-App.db.removeHosts = function (hostInfo) {
-  console.log('TRACE: Entering db:setSoftRepo function');
-  var hostList = App.db.getHosts();
-  hostInfo.forEach(function (_hostInfo) {
-    var host = _hostInfo.hostName;
-    delete hostList[host];
-  });
-  App.db.setHosts(hostList);
-};
-
-App.db.setService = function (serviceInfo) {
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.Installer.serviceInfo = serviceInfo;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-App.db.setSelectedServiceNames = function (serviceNames) {
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.Installer.selectedServiceNames = serviceNames;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-App.db.setClientsForSelectedServices = function (clientInfo) {
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.Installer.clientInfo = clientInfo;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-App.db.setMasterComponentHosts = function (masterComponentHosts) {
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.Installer.masterComponentHosts = masterComponentHosts;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-App.db.setSlaveComponentHosts = function (slaveComponentHosts) {
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.Installer.slaveComponentHosts = slaveComponentHosts;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-App.db.setSlaveProperties = function (slaveProperties) {
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.Installer.slaveProperties = slaveProperties;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-App.db.setServiceConfigs = function (serviceConfigs) {
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.Installer.serviceConfigs = serviceConfigs;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-App.db.setAdvancedServiceConfig = function(serviceConfigs) {
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.Installer.advanceServiceConfigs = serviceConfigs;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-App.db.setServiceConfigProperties = function (configProperties) {
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.Installer.configProperties = configProperties;
-  localStorage.setObject('ambari', App.db.data);
-};
-
-App.db.setCluster = function (status) {
-  App.db.data = localStorage.getObject('ambari');
-  App.db.data.Installer.clusterStatus = status;
-  console.log('db.setClusterStatus called: ' + JSON.stringify(status));
-  localStorage.setObject('ambari', App.db.data);
-};
-
-/**
- * Set current step value for specified Wizard Type
- * @param wizardType
- * @param currentStep
- */
-App.db.setWizardCurrentStep = function (wizardType, currentStep) {
-  console.log('TRACE: Entering db:setWizardCurrentStep function');
-
-  App.db.data[wizardType.capitalize()].currentStep = currentStep;
-
-  localStorage.setObject('ambari', App.db.data);
-};
-
-
-/*
- *  getter methods
- */
-
-/**
- * Get user model from db
- * @return {*}
- */
-App.db.getUser = function () {
-  console.log('TRACE: Entering db:getUser function');
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.app.user;
-};
-
-App.db.getLoginName = function () {
-  console.log('Trace: Entering db:getLoginName function');
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.app.loginName;
-};
-
-App.db.getAuthenticated = function () {
-  console.log('Trace: Entering db:getAuthenticated function');
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.app.authenticated;
-};
-
-App.db.getAmbariStacks = function () {
-  console.log('TRACE: Entering db:setAmbariStacks function');
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.app.stacks;
-};
-
-/**
- * Return current step for specified Wizard Type
- * @param wizardType
- * @return {*}
- */
-App.db.getWizardCurrentStep = function (wizardType) {
-  console.log('Trace: Entering db:getWizardCurrentStep function for ', wizardType);
-  if (App.db.data[wizardType.capitalize()]) {
-    return App.db.data[wizardType.capitalize()].currentStep;
-  }
-  return 0;
-};
-
-App.db.getAllHostNames = function () {
-  console.log('TRACE: Entering db:getHostNames function');
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.Installer.hostNames;
-};
-
-App.db.getAllHostNamesPattern = function () {
-  console.log('TRACE: Entering db:getHostNamesPattern function');
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.Installer.hostNamesPattern;
-};
-App.db.getInstallOptions = function () {
-  console.log('TRACE: Entering db:getInstallOptions function');
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.Installer.installOptions;
-};
-
-App.db.isCompleted = function () {
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.Installer.completed;
-};
-
-App.db.getHosts = function () {
-  console.log('TRACE: Entering db:getHosts function');
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.Installer.hostInfo;
-};
-
-App.db.getBootStatus = function () {
-  console.log('TRACE: Entering db:getBootStatus function');
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.Installer.bootStatus;
-};
-
-App.db.getService = function () {
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.Installer.serviceInfo;
-};
-
-App.db.getSelectedServiceNames = function () {
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.Installer.selectedServiceNames;
-};
-
-App.db.getClientsForSelectedServices = function () {
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.Installer.clientInfo;
-};
-
-App.db.getMasterComponentHosts = function () {
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.Installer.masterComponentHosts;
-};
-
-App.db.getSlaveComponentHosts = function () {
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.Installer.slaveComponentHosts;
-};
-
-App.db.getServiceConfigs = function () {
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.Installer.serviceConfigs;
-};
-
-App.db.getAdvancedServiceConfig = function() {
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.Installer.advanceServiceConfigs;
-};
-
-App.db.getServiceConfigProperties = function () {
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.Installer.configProperties;
-};
-
-App.db.getSlaveProperties = function () {
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.Installer.slaveProperties;
-};
-
-App.db.getCluster = function () {
-  console.log('TRACE: Entering db:getClusterStatus function');
-  App.db.data = localStorage.getObject('ambari');
-  return App.db.data.Installer.clusterStatus;
-};
-
-module.exports = App.db;
diff --git a/branch-1.2/ambari-web/app/utils/graph.js b/branch-1.2/ambari-web/app/utils/graph.js
deleted file mode 100644
index 392daef..0000000
--- a/branch-1.2/ambari-web/app/utils/graph.js
+++ /dev/null
@@ -1,338 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-module.exports = {
-  uniformSeries: function () {
-    var series_min_length = 100000000;
-    for (i=0; i<arguments.length; i++) {
-      if (arguments[i].length < series_min_length) {
-        series_min_length = arguments[i].length;
-      }
-    }
-    for (i=0; i<arguments.length; i++) {
-      if (arguments[i].length > series_min_length) {
-        arguments[i].length = series_min_length;
-      }
-    }
-  },
-  /**
-   * Get min, max for X and Y for provided series
-   * @param series
-   * @return {Object}
-   */
-  getExtInSeries: function(series) {
-    var maxY = 0;
-    var maxX = 0;
-    var minX = 2147465647; // max timestamp value
-    var minY = 2147465647;
-    if (series.length > 0) {
-      series.forEach(function(item){
-        if (item.y > maxY) {
-          maxY = item.y;
-        }
-        if (item.y < minY) {
-          minY = item.y;
-        }
-        if (item.x > maxX) {
-          maxX = item.x;
-        }
-        if (item.x < minX) {
-          minX = item.x;
-        }
-      });
-    }
-    return {maxX: maxX, minX: minX, maxY: maxY, minY: minY};
-  },
-  /**
-   * Get min, max for x and Y for all provided series
-   * @param args array of series
-   * @return {Object}
-   */
-  getExtInAllSeries: function(args) {
-    var maxx = [];
-    var minx = [];
-    var maxy = [];
-    var miny = [];
-    for (var i = 0; i < args.length; i++) {
-      var localExt = this.getExtInSeries(args[i]);
-      maxx.push(localExt.maxX);
-      minx.push(localExt.minX);
-      maxy.push(localExt.maxY);
-      miny.push(localExt.minY);
-    }
-    return {
-      maxX: Math.max.apply(null, maxx),
-      minX: Math.min.apply(null, minx),
-      maxY: Math.max.apply(null, maxy),
-      minY: Math.min.apply(null, miny)
-    };
-  },
-  /**
-   * Get coordinates for new circle in the graph
-   * New circle needed to prevent cut on the borders of the graph
-   * List of arguments - series arrays
-   * @return {Object}
-   */
-  getNewCircle: function() {
-    var ext = this.getExtInAllSeries(arguments);
-    var newX;
-    if (ext.minX != 2147465647) {
-      newX = ext.maxX + Math.round((ext.maxX - ext.minX) * 0.2);
-    }
-    else {
-      newX = (new Date()).getTime();
-    }
-    var newY = ext.maxY * 1.2;
-    return {
-      x: newX,
-      y: newY,
-      r: 0,
-      io: 0
-    };
-  },
-  /**
-   *
-   * @param map
-   * @param shuffle
-   * @param reduce
-   * @param w
-   * @param h
-   * @param element
-   * @param legend_id
-   * @param timeline_id
-   */
-  drawJobTimeLine:function (map, shuffle, reduce, w, h, element, legend_id, timeline_id) {
-    map = $.parseJSON(map);
-    shuffle = $.parseJSON(shuffle);
-    reduce = $.parseJSON(reduce);
-    if (!map || !shuffle || !reduce) {
-      console.warn('drawJobTimeLine');
-      return;
-    }
-    this.uniformSeries(map, reduce, shuffle);
-    var ext = this.getExtInAllSeries([map, reduce, shuffle]);
-    var submitTime = ext.minX;
-    var maxX = ext.maxX; // Used on X-axis time stamps
-
-    var graph = new Rickshaw.Graph({
-      width:w,
-      height:h,
-      element:document.querySelector(element),
-      renderer:'area',
-      interpolation: 'step-after',
-      strokeWidth: 2,
-      stroke:true,
-      series:[
-        {
-          data:map,
-          color:'green',
-          name:'maps'
-        },
-        {
-          data:shuffle,
-          color:'lightblue',
-          name:'shuffles'
-        },
-        {
-          data:reduce,
-          color:'steelblue',
-          name:'reduces'
-        }
-      ]
-      }
-    );
-    graph.render();
-
-    var legend = new Rickshaw.Graph.Legend({
-      graph:graph,
-      element:document.getElementById(legend_id)
-    });
-
-    var shelving = new Rickshaw.Graph.Behavior.Series.Toggle({
-      graph:graph,
-      legend:legend
-    });
-
-    var order = new Rickshaw.Graph.Behavior.Series.Order({
-      graph:graph,
-      legend:legend
-    });
-
-    var highlight = new Rickshaw.Graph.Behavior.Series.Highlight({
-      graph:graph,
-      legend:legend
-    });
-
-    var xAxis = new Rickshaw.Graph.Axis.Time({
-      graph:graph,
-      timeUnit: {
-        name: 'Custom',
-        seconds: Math.round((maxX - submitTime) / 2),
-        formatter: function(d) { return (new Date(d)).getTime() / 1000 - submitTime + 's'; }
-      }
-    });
-    xAxis.render();
-
-    var yAxis = new Rickshaw.Graph.Axis.Y({
-      orientation: 'left',
-      element: document.querySelector('#y-axis'),
-      graph:graph
-    });
-    yAxis.render();
-
-    var hoverDetail = new Rickshaw.Graph.HoverDetail({
-      graph:graph,
-      yFormatter:function (y) {
-        return Math.floor(y) + " tasks"
-      }
-    });
-
-    /*var annotator = new Rickshaw.Graph.Annotate({
-      graph:graph,
-      //element:document.getElementById(timeline_id)
-    });*/
-  },
-  /**
-   *
-   * @param mapNodeLocal
-   * @param mapRackLocal
-   * @param mapOffSwitch
-   * @param reduceOffSwitch
-   * @param submitTime
-   * @param w
-   * @param h
-   * @param element
-   * @param legend_id
-   * @param timeline_id
-   */
-  drawJobTasks:function (mapNodeLocal, mapRackLocal, mapOffSwitch, reduceOffSwitch, submitTime, w, h, element, legend_id, timeline_id) {
-    mapNodeLocal = $.parseJSON(mapNodeLocal);
-    mapRackLocal = $.parseJSON(mapRackLocal);
-    mapOffSwitch = $.parseJSON(mapOffSwitch);
-    reduceOffSwitch = $.parseJSON(reduceOffSwitch);
-    if (!mapNodeLocal || !mapRackLocal || !mapOffSwitch || !reduceOffSwitch) {
-      console.warn('drawJobTasks');
-      return;
-    }
-    this.uniformSeries(mapNodeLocal, mapRackLocal, mapOffSwitch, reduceOffSwitch);
-    var newC = this.getNewCircle(mapNodeLocal, mapRackLocal, mapOffSwitch, reduceOffSwitch);
-    var ext = this.getExtInAllSeries([mapNodeLocal, mapRackLocal, mapOffSwitch, reduceOffSwitch]);
-    var maxX = ext.maxX; // Used on X-axis time stamps
-    mapNodeLocal.push(newC);
-    mapRackLocal.push(newC);
-    mapOffSwitch.push(newC);
-    reduceOffSwitch.push(newC);
-    var graph = new Rickshaw.Graph({
-      width:w,
-      height:h,
-      element:document.querySelector(element),
-      renderer:'scatterplot',
-      stroke:true,
-      series:[
-        {
-          data:mapNodeLocal,
-          color:'green',
-          name:'node_local_map'
-        },
-        {
-          data:mapRackLocal,
-          color:'#66B366',
-          name:'rack_local_map'
-        },
-        {
-          data:mapOffSwitch,
-          color:'brown',
-          name:'off_switch_map'
-        },
-        {
-          data:reduceOffSwitch,
-          color:'steelblue',
-          name:'reduce'
-        }
-      ]
-    });
-    graph.render();
-    var legend = new Rickshaw.Graph.Legend({
-      graph:graph,
-      element:document.getElementById(legend_id)
-    });
-
-    var shelving = new Rickshaw.Graph.Behavior.Series.Toggle({
-      graph:graph,
-      legend:legend
-    });
-
-    var order = new Rickshaw.Graph.Behavior.Series.Order({
-      graph:graph,
-      legend:legend
-    });
-
-    var highlight = new Rickshaw.Graph.Behavior.Series.Highlight({
-      graph:graph,
-      legend:legend
-    });
-
-    var ticksTreatment = 'glow';
-
-    var xAxis = new Rickshaw.Graph.Axis.Time({
-      graph:graph,
-      timeUnit: {
-        name: 'Custom',
-        seconds: Math.round((maxX - submitTime) / 2),
-        formatter: function(d) { return (new Date(d)).getTime() / 1000 - submitTime + 's'; }
-      },
-      ticksTreatment:ticksTreatment
-    });
-    xAxis.render();
-
-    var yAxis = new Rickshaw.Graph.Axis.Y({
-      graph:graph,
-      ticksTreatment:ticksTreatment,
-      orientation: 'left',
-      element: document.querySelector('#y-axis2'),
-      tickFormat: function(y) { return y / 1000 + 's' }
-    });
-    yAxis.render();
-
-    var hoverDetail = new Rickshaw.Graph.HoverDetail({
-      graph:graph,
-      xFormatter:function (x) {
-        return (x - submitTime) + 's'
-      },
-      yFormatter:function (y) {
-        return y / 1000 + 's'
-      },
-      formatter:function (series, x, y, formattedX, formattedY, d) {
-        var bytesFormatter = function(y) {
-          if (y >= 1125899906842624)  { return Math.floor(10 * y / 1125899906842624)/10 + " PB" }
-          else if (y >= 1099511627776){ return Math.floor(10 * y / 1099511627776)/10 + " TB" }
-          else if (y >= 1073741824)   { return Math.floor(10 * y / 1073741824)/10 + " GB" }
-          else if (y >= 1048576)      { return Math.floor(10 * y / 1048576)/10 + " MB" }
-          else if (y >= 1024)         { return Math.floor(10 * y / 1024)/10 + " KB" }
-          else                        { return y + " B"}
-        };
-        var swatch = '<span class="detail_swatch" style="background-color: ' + series.color + '"></span>';
-        return swatch + d.value.label +
-          '<br>Run-time: ' + formattedY + '<br>Wait-time: ' + formattedX +
-          '<br>I/O: ' + bytesFormatter(d.value.io) + '<br>Status: ' + d.value.status;
-      }
-
-    });
-  }
-}
diff --git a/branch-1.2/ambari-web/app/utils/helper.js b/branch-1.2/ambari-web/app/utils/helper.js
deleted file mode 100644
index 1d70c8a..0000000
--- a/branch-1.2/ambari-web/app/utils/helper.js
+++ /dev/null
@@ -1,354 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-String.prototype.trim = function () {
-  return this.replace(/^\s\s*/, '').replace(/\s\s*$/, '');
-};
-
-/**
- * convert ip address string to long int
- * @return {*}
- */
-String.prototype.ip2long = function () {
-  // *     example 1: ip2long('192.0.34.166');
-  // *     returns 1: 3221234342
-  // *     example 2: ip2long('0.0xABCDEF');
-  // *     returns 2: 11259375
-  // *     example 3: ip2long('255.255.255.256');
-  // *     returns 3: false
-  var i = 0;
-  // PHP allows decimal, octal, and hexadecimal IP components.
-  // PHP allows between 1 (e.g. 127) to 4 (e.g 127.0.0.1) components.
-  var IP = this.match(/^([1-9]\d*|0[0-7]*|0x[\da-f]+)(?:\.([1-9]\d*|0[0-7]*|0x[\da-f]+))?(?:\.([1-9]\d*|0[0-7]*|0x[\da-f]+))?(?:\.([1-9]\d*|0[0-7]*|0x[\da-f]+))?$/i); // Verify IP format.
-  if (!IP) {
-    return false; // Invalid format.
-  }
-  // Reuse IP variable for component counter.
-  IP[0] = 0;
-  for (i = 1; i < 5; i += 1) {
-    IP[0] += !!((IP[i] || '').length);
-    IP[i] = parseInt(IP[i]) || 0;
-  }
-  // Continue to use IP for overflow values.
-  // PHP does not allow any component to overflow.
-  IP.push(256, 256, 256, 256);
-  // Recalculate overflow of last component supplied to make up for missing components.
-  IP[4 + IP[0]] *= Math.pow(256, 4 - IP[0]);
-  if (IP[1] >= IP[5] || IP[2] >= IP[6] || IP[3] >= IP[7] || IP[4] >= IP[8]) {
-    return false;
-  }
-  return IP[1] * (IP[0] === 1 || 16777216) + IP[2] * (IP[0] <= 2 || 65536) + IP[3] * (IP[0] <= 3 || 256) + IP[4] * 1;
-};
-
-String.prototype.capitalize = function () {
-  return this.charAt(0).toUpperCase() + this.slice(1);
-}
-
-Em.CoreObject.reopen({
-  t:function (key, attrs) {
-    return Em.I18n.t(key, attrs)
-  }
-});
-
-Em.Handlebars.registerHelper('log', function (variable) {
-  console.log(variable);
-});
-
-Em.Handlebars.registerHelper('warn', function (variable) {
-  console.warn(variable);
-});
-
-Em.Handlebars.registerHelper('highlight', function (property, words, fn) {
-  var context = (fn.contexts && fn.contexts[0]) || this;
-  property = Em.Handlebars.getPath(context, property, fn);
-
-  words = words.split(";");
-
-//  if (highlightTemplate == undefined) {
-  var highlightTemplate = "<b>{0}</b>";
-//  }
-
-  words.forEach(function (word) {
-    var searchRegExp = new RegExp("\\b" + word + "\\b", "gi");
-    property = property.replace(searchRegExp, function (found) {
-      return highlightTemplate.format(found);
-    });
-  });
-
-  return new Em.Handlebars.SafeString(property);
-})
-/**
- * Replace {i} with argument. where i is number of argument to replace with
- * @return {String}
- */
-String.prototype.format = function () {
-  var args = arguments;
-  return this.replace(/{(\d+)}/g, function (match, number) {
-    return typeof args[number] != 'undefined' ? args[number] : match;
-  });
-};
-
-String.prototype.highlight = function (words, highlightTemplate) {
-  var self = this;
-  if (highlightTemplate == undefined) {
-    var highlightTemplate = "<b>{0}</b>";
-  }
-
-  words.forEach(function (word) {
-    var searchRegExp = new RegExp("\\b" + word + "\\b", "gi");
-    self = self.replace(searchRegExp, function (found) {
-      return highlightTemplate.format(found);
-    });
-  });
-
-  return self;
-};
-
-/**
- * Convert byte size to other metrics.
- * @param {Number} precision  Number to adjust precision of return value. Default is 0.
- * @param {String} parseType  JS method name for parse string to number. Default is "parseInt".
- * @remarks The parseType argument can be "parseInt" or "parseFloat".
- * @return {String) Returns converted value with abbreviation.
- */
-Number.prototype.bytesToSize = function (precision, parseType/* = 'parseInt' */) {
-  if (arguments[1] === undefined) {
-    parseType = 'parseInt';
-  }
-
-  var value = this;
-  var sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB'];
-  var posttxt = 0;
-  if (this == 0) return 'n/a';
-  while (value >= 1024) {
-    posttxt++;
-    value = value / 1024;
-  }
-  var parsedValue = window[parseType](value);
-
-  return parsedValue.toFixed(precision) + " " + sizes[posttxt];
-}
-
-Number.prototype.toDaysHoursMinutes = function () {
-  var formatted = {},
-    dateDiff = this,
-    secK = 1000, //ms
-    minK = 60 * secK, // sec
-    hourK = 60 * minK, // sec
-    dayK = 24 * hourK;
-
-  dateDiff = parseInt(dateDiff);
-  formatted.d = Math.floor(dateDiff / dayK);
-  dateDiff -= formatted.d * dayK;
-  formatted.h = Math.floor(dateDiff / hourK);
-  dateDiff -= formatted.h * hourK;
-  formatted.m = (dateDiff / minK).toFixed(2);
-
-  return formatted;
-}
-
-Number.prototype.countPercentageRatio = function (maxValue) {
-  var usedValue = this;
-  return Math.round((usedValue / maxValue) * 100) + "%";
-}
-
-Number.prototype.long2ip = function () {
-  // http://kevin.vanzonneveld.net
-  // +   original by: Waldo Malqui Silva
-  // *     example 1: long2ip( 3221234342 );
-  // *     returns 1: '192.0.34.166'
-  if (!isFinite(this))
-    return false;
-
-  return [this >>> 24, this >>> 16 & 0xFF, this >>> 8 & 0xFF, this & 0xFF].join('.');
-}
-
-/**
- * Formats the given URL template by replacing keys in 'substitutes'
- * with their values. If not in App.testMode, the testUrl is used.
- *
- * The substitution points in urlTemplate should be of format "...{key}..."
- * For example "http://apache.org/{projectName}".
- * The substitutes can then be{projectName: "Ambari"}.
- *
- * Keys which will be automatically taken care of are:
- * {
- *  hostName: App.test_hostname,
- *  fromSeconds: ..., // 1 hour back from now
- *  toSeconds: ..., // now
- *  stepSeconds: ..., // 15 seconds by default
- * }
- *
- * @param {String} urlTemplate  URL template on which substitutions are to be made
- * @param substitutes Object containing keys to be replaced with respective values
- * @param {String} testUrl  URL to be used if app is not in test mode (!App.testMode)
- * @return {String} Formatted URL
- */
-App.formatUrl = function (urlTemplate, substitutes, testUrl) {
-  var formatted = urlTemplate;
-  if (urlTemplate) {
-    if (!App.testMode) {
-      var toSeconds = Math.round(new Date().getTime() / 1000);
-      var allSubstitutes = {
-        toSeconds:toSeconds,
-        fromSeconds:toSeconds - 3600, // 1 hour back
-        stepSeconds:15, // 15 seconds
-        hostName:App.test_hostname
-      };
-      jQuery.extend(allSubstitutes, substitutes);
-      for (key in allSubstitutes) {
-        var useKey = '{' + key + '}';
-        formatted = formatted.replace(new RegExp(useKey, 'g'), allSubstitutes[key]);
-      }
-    } else {
-      formatted = testUrl;
-    }
-  }
-  return formatted;
-}
-
-/**
- * Certain variables can have JSON in string
- * format, or in JSON format itself.
- */
-App.parseJSON = function (value) {
-  if (typeof value == "string") {
-    return jQuery.parseJSON(value);
-  }
-  return value;
-};
-
-App.format = {
-  role:function (role) {
-    switch (role) {
-      case 'ZOOKEEPER_SERVER':
-        return 'ZooKeeper Server';
-      case 'ZOOKEEPER_CLIENT':
-        return 'ZooKeeper Client';
-      case 'NAMENODE':
-        return 'NameNode';
-      case 'NAMENODE_SERVICE_CHECK':
-        return 'NameNode Check';
-      case 'DATANODE':
-        return 'DataNode';
-      case 'HDFS_SERVICE_CHECK':
-        return 'HDFS Check';
-      case 'SECONDARY_NAMENODE':
-        return 'SNameNode';
-      case 'HDFS_CLIENT':
-        return 'HDFS Client';
-      case 'HBASE_MASTER':
-        return 'HBase Master';
-      case 'HBASE_REGIONSERVER':
-        return 'HBase RegionServer';
-      case 'HBASE_CLIENT':
-        return 'HBase Client';
-      case 'JOBTRACKER':
-        return 'JobTracker';
-      case 'TASKTRACKER':
-        return 'TaskTracker';
-      case 'MAPREDUCE_CLIENT':
-        return 'MapReduce Client';
-      case 'JAVA_JCE':
-        return 'Java JCE';
-      case 'KERBEROS_SERVER':
-        return 'Kerberos Server';
-      case 'KERBEROS_CLIENT':
-        return 'Kerberos Client';
-      case 'KERBEROS_ADMIN_CLIENT':
-        return 'Kerberos Admin Client';
-      case 'HADOOP_CLIENT':
-        return 'Hadoop Client';
-      case 'JOBTRACKER_SERVICE_CHECK':
-        return 'JobTracker Check';
-      case 'MAPREDUCE_SERVICE_CHECK':
-        return 'MapReduce Check';
-      case 'ZOOKEEPER_SERVICE_CHECK':
-        return 'ZooKeeper Check';
-      case 'ZOOKEEPER_QUORUM_SERVICE_CHECK':
-        return 'ZK Quorum Check';
-      case  'HBASE_SERVICE_CHECK':
-        return 'HBase Check';
-      case 'MYSQL_SERVER':
-        return 'MySQL Server';
-      case 'HIVE_SERVER':
-        return 'HiveServer2';
-      case 'HIVE_METASTORE':
-        return 'Hive Metastore';
-      case 'HIVE_CLIENT':
-        return 'Hive Client';
-      case 'HIVE_SERVICE_CHECK':
-        return 'Hive Check';
-      case 'HCAT':
-        return 'HCat';
-      case 'HCAT_SERVICE_CHECK':
-        return 'HCat Check';
-      case 'OOZIE_CLIENT':
-        return 'Oozie Client';
-      case 'OOZIE_SERVER':
-        return 'Oozie Server';
-      case 'OOZIE_SERVICE_CHECK':
-        return 'Oozie Check';
-      case 'PIG':
-        return 'Pig';
-      case 'PIG_SERVICE_CHECK':
-        return 'Pig Check';
-      case 'SQOOP':
-        return 'Sqoop';
-      case 'SQOOP_SERVICE_CHECK':
-        return 'Sqoop Check';
-      case 'WEBHCAT_SERVER':
-        return 'WebHCat Server';
-      case 'WEBHCAT_SERVICE_CHECK':
-        return 'WebHCat Check';
-      case 'NAGIOS_SERVER':
-        return 'Nagios Server';
-      case 'GANGLIA_SERVER':
-        return 'Ganglia Server';
-      case 'GANGLIA_MONITOR':
-        return 'Ganglia Monitor';
-      case 'GMOND_SERVICE_CHECK':
-        return 'Gmond Check'
-      case 'GMETAD_SERVICE_CHECK':
-        return 'Gmetad Check';
-      case 'DECOMMISSION_DATANODE':
-        return 'Decommission DataNode';
-    }
-  },
-
-  /**
-   * PENDING - Not queued yet for a host
-   * QUEUED - Queued for a host
-   * IN_PROGRESS - Host reported it is working
-   * COMPLETED - Host reported success
-   * FAILED - Failed
-   * TIMEDOUT - Host did not respond in time
-   * ABORTED - Operation was abandoned
-   */
-  taskStatus:function (_taskStatus) {
-    return _taskStatus.toLowerCase();
-  }
-};
-
-Array.prototype.removeAll = function(array){
-  var temp = array;
-  for(var i = 0 ; i < array.length ; i++ ){
-    temp = temp.without(array[i]);
-  }
-  return temp;
-};
diff --git a/branch-1.2/ambari-web/app/utils/http_client.js b/branch-1.2/ambari-web/app/utils/http_client.js
deleted file mode 100644
index d95f93d..0000000
--- a/branch-1.2/ambari-web/app/utils/http_client.js
+++ /dev/null
@@ -1,151 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-/**
- * App.HttpClient perform an ajax request
- */
-App.HttpClient = Em.Object.create({
-
-  /**
-   *
-   * @param jqXHR
-   * @param textStatus
-   * @param errorThrown
-   */
-  defaultErrorHandler: function (jqXHR, textStatus, errorThrown) {
-    var json = $.parseJSON(jqXHR.responseText);
-    if (json) {
-      Em.assert("HttpClient:", json);
-    } else {
-      Em.assert("HttpClient:", errorThrown);
-    }
-  },
-
-  emptyFunc: function () {
-  },
-
-  /**
-   * @param {string} url
-   * @param {Object} ajaxOptions
-   * @param {App.ServerDataMapper} mapper - json processor
-   * @param {function} errorHandler
-   * @param {number} interval - frequecy request
-   */
-  request: function (url, ajaxOptions, mapper, errorHandler) {
-
-    if (!errorHandler) {
-      errorHandler = this.defaultErrorHandler;
-    }
-
-    var xhr = new XMLHttpRequest();
-    var curTime = new Date().getTime();
-
-    xhr.open('GET', url + (url.indexOf('?') >= 0 ? '&_=' : '?_=') + curTime, true);
-    xhr.send(null);
-
-    this.onReady(xhr, "", ajaxOptions, mapper, errorHandler);
-  },
-
-  /*
-   This function checks if we get response from server
-   Not using onreadystatechange cuz of possible closure
-   */
-  onReady: function (xhr, tm, tmp_val, mapper, errorHandler) {
-    var self = this;
-    clearTimeout(tm);
-    var timeout = setTimeout(function () {
-      if (xhr.readyState == 4) {
-        if (xhr.status == 200) {
-          try {
-            App.store.commit();
-          } catch (err) {}
-          mapper.map($.parseJSON(xhr.responseText));
-          tmp_val.complete.call(self);
-          xhr.abort();
-        } else {
-          errorHandler(xhr , "error", xhr.statusText);
-        }
-
-        tmp_val = null;
-        xhr = self.emptyFunc();
-        clearTimeout(timeout);
-        timeout = null;
-
-      } else {
-        self.onReady(xhr, timeout, tmp_val, mapper, errorHandler);
-      }
-    }, 10);
-  },
-
-  /**
-   * @param {string} url
-   * @param {App.ServerDataMapper} mapper - json processor
-   * @param {Object} data - ajax data property
-   * @param {function} errorHandler
-   * @param {number} interval - frequency request
-   */
-  get: function (url, mapper, data, errorHandler, interval) {
-    var eHandler = data.complete
-    if (!errorHandler && data.error) {
-      errorHandler = data.error;
-    }
-    var client = this;
-    var request = function () {
-      client.request(url, data, mapper, errorHandler);
-      url=null;
-      data=null;
-      mapper=null;
-      errorHandler=null;
-    }
-
-    interval = "" + interval;
-    if (interval.match(/\d+/)) {
-      $.periodic({period: interval}, request);
-    } else {
-      request();
-    }
-  },
-
-  /**
-   * @param {string} url
-   * @param {Object} data - ajax data property
-   * @param {App.ServerDataMapper} mapper - json processor
-   * @param {function} errorHandler
-   * @param {number} interval - frequecy request
-   */
-  post: function (url, data, mapper, errorHandler, interval) {
-    this.get(url, data, mapper, errorHandler, interval);
-  }
-
-//  not realized yet
-//  put:function (url, mapper, errorHandler) {
-//    this.request(url, {}, mapper, errorHandler);
-//  },
-//
-//  delete:function (url, mapper, errorHandler) {
-//    this.request(url, {}, mapper, errorHandler);
-//  }
-});
-
-/*App.HttpClient.get(
- 'http://nagiosserver/hdp/nagios/nagios_alerts.php?q1=alerts&alert_type=all',
- App.alertsMapper,
- { dataType: 'jsonp', jsonp: 'jsonp' }
- );*/
diff --git a/branch-1.2/ambari-web/app/utils/misc.js b/branch-1.2/ambari-web/app/utils/misc.js
deleted file mode 100644
index 6830a73..0000000
--- a/branch-1.2/ambari-web/app/utils/misc.js
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-module.exports = {
-  /**
-   * Convert value from bytes to appropriate measure
-   */
-  formatBandwidth: function (value) {
-    if (value) {
-      if (value < 1024) {
-        value = '<1KB';
-      } else {
-        if (value < 1048576) {
-          value = (value / 1024).toFixed(1) + 'KB';
-        } else  if (value >= 1048576 && value < 1073741824){
-          value = (value / 1048576).toFixed(1) + 'MB';
-        } else {
-          value = (value / 1073741824).toFixed(2) + 'GB';
-        }
-      }
-    }
-    return value;
-  },
-  /**
-   * Convert ip address to integer
-   * @param ip
-   * @return integer
-   */
-  ipToInt: function(ip){
-    // *     example 1: ipToInt('192.0.34.166');
-    // *     returns 1: 3221234342
-    // *     example 2: ipToInt('255.255.255.256');
-    // *     returns 2: false
-    // Verify IP format.
-    if (!/^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/.test(ip)) {
-      return false; // Invalid format.
-    }
-    // Reuse ip variable for component counter.
-    var d = ip.split('.');
-    return ((((((+d[0])*256)+(+d[1]))*256)+(+d[2]))*256)+(+d[3]);
-  }
-};
diff --git a/branch-1.2/ambari-web/app/utils/string_utils.js b/branch-1.2/ambari-web/app/utils/string_utils.js
deleted file mode 100644
index 20a153b..0000000
--- a/branch-1.2/ambari-web/app/utils/string_utils.js
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-module.exports = {
-
-  pad: function(str, len, pad, dir) {
-
-    var STR_PAD_LEFT = 1;
-    var STR_PAD_RIGHT = 2;
-    var STR_PAD_BOTH = 3;
-
-    if (typeof(len) == "undefined") { var len = 0; }
-    if (typeof(pad) == "undefined") { var pad = ' '; }
-    if (typeof(dir) == "undefined") { var dir = STR_PAD_RIGHT; }
-
-    if (len + 1 >= str.length) {
-
-      switch (dir){
-
-        case STR_PAD_LEFT:
-          str = Array(len + 1 - str.length).join(pad) + str;
-          break;
-
-        case STR_PAD_BOTH:
-          var right = Math.ceil((padlen = len - str.length) / 2);
-          var left = padlen - right;
-          str = Array(left+1).join(pad) + str + Array(right+1).join(pad);
-          break;
-
-        default:
-          str = str + Array(len + 1 - str.length).join(pad);
-          break;
-
-      } // switch
-
-    }
-    return str;
-
-  },
-  underScoreToCamelCase: function(name){
-    var new_name = name.replace(/_\w/g,replacer);
-    function replacer(str, p1, p2, offset, s)
-    {
-      return str[1].toUpperCase();
-    }
-    return new_name;
-  }
-}
diff --git a/branch-1.2/ambari-web/app/utils/updater.js b/branch-1.2/ambari-web/app/utils/updater.js
deleted file mode 100644
index 52c573a..0000000
--- a/branch-1.2/ambari-web/app/utils/updater.js
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-var App = require('app');
-
-var states = {};
-
-function update(obj, name, isWorking){
-  if(typeof isWorking == 'string' && !obj.get(isWorking)){
-    return false;
-  }
-
-  var state = states[name];
-
-  if(!state){
-    var callback = function(){
-      update(obj, name, isWorking);
-    };
-    states[name] = state = {
-      timeout: null,
-      func: function(){
-        obj[name](callback);
-      },
-      callback: callback
-    };
-  }
-
-  clearTimeout(state.timeout);
-
-  state.timeout = setTimeout(state.func, App.contentUpdateInterval);
-  return true;
-};
-
-function rerun(name){
-  var state = states[name];
-  if(state){
-    clearTimeout(state.timeout);
-    state.func();
-  }
-};
-
-App.updater = {
-
-  /**
-   * Run function periodically with <code>App.contentUpdateInterval</code> delay.
-   * Example 1(wrong way, will not be working):
-   *    var obj = {
-   *      method: function(callback){
-   *        //do something
-   *      }
-   *    };
-   *    App.updater.run(obj, 'method');
-   *
-   * Will be called only once, because <code>callback</code> will never execute. Below is right way:
-   *
-   * Example 2:
-   *    var obj = {
-   *      method: function(callback){
-   *        //do something
-   *        callback();
-   *      }
-   *    };
-   *    App.updater.run(obj, 'method');
-   *
-   * Method will always be called.
-   *
-   * Example 3:
-   *    var obj = {
-   *      method: function(callback){
-   *          //do something
-   *          callback();
-   *      },
-   *      isWorking: true
-   *    };
-   *    App.updater.run(obj, 'method', 'isWorking');
-   *
-   * <code>obj.method</code> will be called automatically.
-   * Warning: You should call <code>callback</code> parameter when function finished iteration.
-   * Otherwise nothing happened next time.
-   * If <code>isWorking</code> provided, library will check <code>obj.isWorking</code> before iteration and
-   * stop working when it equals to false. Otherwise method will always be called.
-   *
-   *
-   *
-   * @param obj Object
-   * @param name Method name
-   * @param isWorking Property, which will be checked as a rule for working
-   * @return {*}
-   */
-  run: function(obj, name, isWorking){
-    return update(obj, name, isWorking);
-  },
-
-  immediateRun: function(name){
-    return rerun(name);
-  }
-
-}
diff --git a/branch-1.2/ambari-web/app/utils/validator.js b/branch-1.2/ambari-web/app/utils/validator.js
deleted file mode 100644
index c4a5c2c..0000000
--- a/branch-1.2/ambari-web/app/utils/validator.js
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-module.exports = {
-
-  isValidEmail: function(value) {
-    var emailRegex = /^((([a-z]|\d|[!#\$%&'\*\+\-\/=\?\^_`{\|}~]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])+(\.([a-z]|\d|[!#\$%&'\*\+\-\/=\?\^_`{\|}~]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])+)*)|((\x22)((((\x20|\x09)*(\x0d\x0a))?(\x20|\x09)+)?(([\x01-\x08\x0b\x0c\x0e-\x1f\x7f]|\x21|[\x23-\x5b]|[\x5d-\x7e]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(\\([\x01-\x09\x0b\x0c\x0d-\x7f]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF]))))*(((\x20|\x09)*(\x0d\x0a))?(\x20|\x09)+)?(\x22)))@((([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])*([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])))\.)+(([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])*([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])))$/i;
-    return emailRegex.test(value);
-  },
-
-  isValidInt: function(value) {
-    var intRegex = /^-?\d+$/;
-    return intRegex.test(value);
-  },
-
-  isValidFloat: function(value) {
-    if (typeof value === 'string' && value.trim() === '') {
-      return false;
-    }
-    var floatRegex = /^-?(?:\d+|\d{1,3}(?:,\d{3})+)?(?:\.\d+)?$/;
-    return floatRegex.test(value);
-  },
-  /**
-   * validate directory with slash at the start
-   * @param value
-   * @return {Boolean}
-   */
-  isValidDir: function(value){
-    var floatRegex = /^\/[0-9a-z]*/;
-    var dirs = value.replace(/,/g,' ').trim().split(new RegExp("\\s+", "g"));
-    for(var i = 0; i < dirs.length; i++){
-      if(!floatRegex.test(dirs[i])){
-        return false;
-      }
-    }
-    return true;
-  },
-
-  /**
-   * validate ip address with port
-   * @param value
-   * @return {Boolean}
-   */
-  isIpAddress: function(value) {
-    var ipRegex = /^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}(\:[0-9]{1,5})?$/;
-    return ipRegex.test(value);
-  },
-
-  /**
-   * validate hostname
-   * @param value
-   * @return {Boolean}
-   */
-  isHostname: function(value) {
-    var regex = /^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])$/;
-    return regex.test(value);
-  },
-
-  /**
-   * validate domain name with port
-   * @param value
-   * @return {Boolean}
-   */
-  isDomainName: function(value) {
-    var domainRegex = /^([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6}$/;
-    return domainRegex.test(value);
-  },
-
-  /**
-   * validate username
-   * @param value
-   * @return {Boolean}
-   */
-  isValidUserName: function(value) {
-    var usernameRegex = /^[a-z]([-a-z0-9]{0,30})\$?$/;
-    return usernameRegex.test(value);
-  },
-
-  empty:function (e) {
-    switch (e) {
-      case "":
-      case 0:
-      case "0":
-      case null:
-      case false:
-      case typeof this == "undefined":
-        return true;
-      default :
-        return false;
-    }
-}
-};
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views.js b/branch-1.2/ambari-web/app/views.js
deleted file mode 100644
index 469439f..0000000
--- a/branch-1.2/ambari-web/app/views.js
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-
-// load all views here
-
-require('views/application');
-require('views/common/chart');
-require('views/common/chart/pie');
-require('views/common/chart/linear');
-require('views/common/chart/linear_time');
-require('views/common/modal_popup');
-require('views/common/metric');
-require('views/common/time_range');
-require('views/common/form/field');
-require('views/common/quick_view_link_view');
-require('views/login');
-require('views/main');
-require('views/main/menu');
-require('views/main/charts');
-require('views/main/host');
-require('views/main/host/details');
-require('views/main/host/menu');
-require('views/main/host/summary');
-require('views/main/host/metrics');
-require('views/main/host/add_view');
-require('views/main/host/metrics/cpu');
-require('views/main/host/metrics/disk');
-require('views/main/host/metrics/load');
-require('views/main/host/metrics/memory');
-require('views/main/host/metrics/network');
-require('views/main/host/metrics/processes');
-require('views/main/admin');
-require('views/main/admin/advanced');
-require('views/main/admin/advanced/password');
-require('views/main/admin/audit');
-require('views/main/admin/authentication');
-require('views/main/admin/menu');
-require('views/main/admin/user');
-require('views/main/admin/user/create');
-require('views/main/admin/user/edit');
-require('views/main/admin/user/row');
-require('views/main/dashboard');
-require('views/main/dashboard/service');
-require('views/main/dashboard/service/hdfs');
-require('views/main/dashboard/service/mapreduce');
-require('views/main/dashboard/service/hbase');
-require('views/main/dashboard/service/hive');
-require('views/main/dashboard/service/zookeeper');
-require('views/main/dashboard/service/oozie');
-require('views/main/dashboard/cluster_metrics/cpu');
-require('views/main/dashboard/cluster_metrics/load');
-require('views/main/dashboard/cluster_metrics/memory');
-require('views/main/dashboard/cluster_metrics/network');
-require('views/main/service');
-require('views/main/service/menu');
-require('views/main/service/item');
-require('views/main/service/info/menu');
-require('views/main/service/info/summary');
-require('views/main/service/info/configs');
-require('views/main/service/info/metrics/hdfs/jvm_threads');
-require('views/main/service/info/metrics/hdfs/jvm_heap');
-require('views/main/service/info/metrics/hdfs/io');
-require('views/main/service/info/metrics/hdfs/rpc');
-require('views/main/service/info/metrics/hdfs/file_operations');
-require('views/main/service/info/metrics/hdfs/gc');
-require('views/main/service/info/metrics/hdfs/space_utilization');
-require('views/main/service/info/metrics/hdfs/block_status');
-require('views/main/service/info/metrics/mapreduce/gc');
-require('views/main/service/info/metrics/mapreduce/jvm_threads');
-require('views/main/service/info/metrics/mapreduce/jvm_heap');
-require('views/main/service/info/metrics/mapreduce/rpc');
-require('views/main/service/info/metrics/mapreduce/tasks_running_waiting');
-require('views/main/service/info/metrics/mapreduce/jobs_status');
-require('views/main/service/info/metrics/mapreduce/map_slots');
-require('views/main/service/info/metrics/mapreduce/reduce_slots');
-require('views/main/service/info/metrics/hbase/cluster_requests');
-require('views/main/service/info/metrics/hbase/regionserver_rw_requests');
-require('views/main/service/info/metrics/hbase/regionserver_regions');
-require('views/main/service/info/metrics/hbase/regionserver_queuesize');
-require('views/main/service/info/metrics/hbase/hlog_split_time');
-require('views/main/service/info/metrics/hbase/hlog_split_size');
-require('views/main/service/add_view');
-require('views/main/charts/menu');
-require('views/main/charts/heatmap');
-require('views/main/charts/heatmap/heatmap_rack');
-require('views/main/charts/heatmap/heatmap_host');
-require('views/main/charts/heatmap/heatmap_host_detail');
-require('views/main/apps_view');
-require('views/main/apps/item_view');
-require('views/main/apps/item/bar_view');
-require('views/main/apps/item/dag_view');
-require('views/installer');
-require('views/wizard/controls_view');
-require('views/wizard/step1_view');
-require('views/wizard/step2_view');
-require('views/wizard/step3_view');
-require('views/wizard/step4_view');
-require('views/wizard/step5_view');
-require('views/wizard/step6_view');
-require('views/wizard/step7_view');
-require('views/wizard/step8_view');
-require('views/wizard/step9_view');
-require('views/wizard/step10_view');
-require('views/loading');
diff --git a/branch-1.2/ambari-web/app/views/application.js b/branch-1.2/ambari-web/app/views/application.js
deleted file mode 100644
index d0a7950..0000000
--- a/branch-1.2/ambari-web/app/views/application.js
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.ApplicationView = Em.View.extend({
-    templateName: require('templates/application')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/common/chart.js b/branch-1.2/ambari-web/app/views/common/chart.js
deleted file mode 100644
index a565617..0000000
--- a/branch-1.2/ambari-web/app/views/common/chart.js
+++ /dev/null
@@ -1,290 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.ChartView = Em.View.extend({
-  dateFormat:'dd/mm/yy',
-  timeFormat:'h:m',
-  w:900,
-  p:30, // axis padding
-  shift:30,
-  ticksCount:10,
-  pointsLimit:300,
-  areaHeight:30, // px
-  axis:false,
-  x:false,
-  y:false,
-
-  init:function () {
-    this._super();
-    var renderer = this;
-
-    this.x = d3.time.scale().domain([renderer.getMinDate({}), renderer.getMaxDate({})]).range([0, this.get('w')]);
-    this.y = d3.scale.linear().domain([0, 50]).range([this.get('h'), 0]);
-    this.axis = d3.svg.axis().orient("top").scale(this.x).ticks(this.get('ticksCount'));
-  },
-
-  h: function(){
-    return this.get('p') + this.get('nodeAttributes').length * this.get('areaHeight'); //default: 160
-  }.property('nodeAttributes', 'p'),
-
-  activeH: function(){
-    return this.get('nodeAttributes').length * this.get('areaHeight'); // default 160;
-  }.property('h'),
-
-  ruleHeight: function(){
-    return this.get('nodeAttributes').length * this.get('areaHeight');
-  }.property('nodeAttributes'),
-
-  updateY: function(){
-    this.y = d3.scale.linear().domain([0, 50]).range([this.get('h'), 0]);
-  }.observes('h'),
-
-  getMinDate:function (data) {
-    if (data.length)
-      return new Date(Date.parse(data[0]['date']));
-
-    return new Date();
-  },
-
-  getMaxDate:function (data) {
-    if (data.length)
-      return new Date(Date.parse(data[data.length - 1]['date']));
-
-    return new Date();
-  },
-
-  area:function () {
-    var renderer = this;
-    var area = d3.svg.area().x(function (d) {
-      return renderer.x(renderer.getDate(d));
-    });
-
-    area.y1(function (d) {
-      return renderer.get('h') - (renderer.get('h') - renderer.y(d[$(this).attr("getter")])) / renderer.get('koef');
-    });
-
-    area.y0(function (d) {
-      return renderer.get('h');
-    });
-
-    return area;
-  },
-
-  line:function () {
-    var renderer = this;
-    var area = d3.svg.line().x(function (d) {
-      return renderer.x(renderer.getDate(d));
-    })
-      .interpolate("basis");
-
-    area.y(function (d) {
-      return renderer.get('h');
-    });
-
-    return area;
-  },
-
-  /**
-   * @todo: calculate this
-   * coefficient of compression
-   * @param shift
-   * @return {Number}
-   */
-
-  koef:function () {
-    // max value divide on area height;
-    return 2 * (this.get('nodeAttributes').length + 1);
-  }.property('h'),
-
-  getDate:function (d) {
-    return new Date(Date.parse(d.date));
-  },
-
-  dateTimeToDateObject:function (string) {
-    var ren = this;
-    return new Date($.datepicker.parseDateTime(ren.dateFormat, ren.timeFormat, string));
-  },
-
-  getDefaultShift:function () {
-    return  -1 * this.get('areaHeight') * (this.get('nodeAttributes').length - 1);
-  },
-
-  percentScaleXDefaultTranslate:function () {
-    return this.w + 3
-  },
-
-  clearPlot: function(){
-    d3.select(this.get('chartContainerSelector')).selectAll("*").remove();
-  },
-
-  drawPlot:function () {
-    this.clearPlot();
-
-    var renderer = this;
-    this.x.domain([renderer.getMinDate({}), renderer.getMaxDate({})]);
-
-    var rule = $('<div></div>').addClass("rule").css('height', renderer.get('ruleHeight')).mouseenter(function () { $(this).hide(); });
-    $(this.get('chartContainerSelector')).prepend(rule);
-
-    var vis = d3.select(this.get('chartContainerSelector'))
-      .append("svg:svg")
-      .attr("width", renderer.get('w') + 5)
-      .attr("height", renderer.get('h'))
-      .attr("rendererId", this.get('elementId'))
-      .on("mousemove", function () {
-
-        var area = d3.select(this).select("path.line");
-        var d = area.data()[0];
-        var x = d3.mouse(this)[0];
-
-        var renderer = Em.View.views[d3.select(this).attr('rendererId')];
-        var container = $(this).parent();
-        var scale = renderer.x;
-
-        // first move rule
-        var rule = $(container).children("div.rule");
-        rule.css("left", (168 + x) + "px"); // 168 - left container margin
-        rule.show();
-
-        x = x + 5; // some correction
-        var selectedDate = scale.invert(x);
-
-        // search date between this coordinates
-        var prevVal = false;
-        var nextVal = d[0];
-
-        $.each(d, function (i, point) {
-          if (renderer.getDate(point).getTime() <= selectedDate.getTime()) {
-            prevVal = nextVal;
-            nextVal = point;
-          } else {
-            return;
-          }
-        });
-
-        var len1 = Math.abs(x - scale(renderer.getDate(prevVal)));
-        var len2 = Math.abs(x - scale(renderer.getDate(nextVal)));
-
-        var clearing = 5;
-        var pointToShow = false;
-        // if the first point if closer
-        if ((len1 < len2) && (len1 <= clearing)) {
-          pointToShow = prevVal;
-        } else if (len2 <= clearing) { // the second point is closer
-          pointToShow = nextVal;
-        }
-
-        $.each(renderer.get('nodeAttributes'), function (i, v) {
-          var value = !pointToShow ? "" : pointToShow[v] + "%";
-          $(rule).children("div." + v).html(value);
-        });
-      });
-
-    vis.append("svg:g")
-      .attr("class", "axis")
-      .attr("transform", "translate(0," + this.get('p') + ")")
-      .call(renderer.axis)
-
-    $.each(this.get('nodeAttributes'), function (i, v) {
-      var element = $('<div></div>').addClass(v).addClass("stateValue").html("");
-      rule.append(element);
-    });
-
-    var shift = this.getDefaultShift();
-    vis.append("svg:path")
-      .attr("class", "horizontal-line")
-      .data([
-      {}
-    ])
-      .attr("transform", "translate(0," + (shift - this.get('areaHeight')) + ")")
-      .attr("d", renderer.line())
-      .style("stroke", "#000");
-
-    $.each(this.get('nodeAttributes'), function (i, v) {
-      vis.append("svg:path").data([
-        {}
-      ])
-        .attr("class", "line")
-        .attr("getter", v)
-        .attr("transform", "translate(0, " + shift + ")")
-        .attr("d", renderer.area())
-        .style("fill", function () {
-          return "#31a354";
-        });
-
-      vis.append("svg:path")
-        .attr("class", "horizontal-line")
-        .data([
-        {}
-      ])
-        .attr("transform", "translate(0," + shift + ")")
-        .attr("d", renderer.line())
-        .style("stroke", "#000");
-
-      shift += renderer.get('areaHeight');
-    });
-  },
-
-  getData:function (containerId) {
-    return (d3.select(containerId + " path.line").data())[0];
-  },
-
-  drawChart:function () {
-    var containerSel = this.get('chartContainerSelector');
-    var data = this.get('data');
-
-    while (data.length > this.get('pointsLimit')) {
-      data.shift();
-    }
-
-    var renderer = this;
-    var minDate = this.getMinDate(data);
-    var maxDate = this.getMaxDate(data);
-
-    this.x.domain([minDate, maxDate]);
-
-    var ticks = data.length > 10 ? 10 : data.length;
-    this.axis.scale(renderer.x).ticks(ticks);
-
-//        remove dots axis
-    $(containerSel + " svg g.axis g").remove();
-    d3.select(containerSel + " svg g.axis")
-      .call(this.axis);
-
-    $.each(this.get('nodeAttributes'), function (i, v) {
-      d3.select(containerSel + " path.line[getter='" + v + "']")
-        .data([data])
-        .transition()
-        .attr("d", renderer.area());
-    });
-
-    // lines between charts
-    $(containerSel + " path.horizontal-line").each(
-      function (i, path) {
-        d3.select(path).data([
-          [
-            {date:minDate},
-            {date:maxDate}
-          ]
-        ]).attr("d", renderer.line());
-      }
-    );
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/common/chart/linear.js b/branch-1.2/ambari-web/app/views/common/chart/linear.js
deleted file mode 100644
index 6729263..0000000
--- a/branch-1.2/ambari-web/app/views/common/chart/linear.js
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.ChartLinearView = Em.View.extend({
-  w:90,
-  h:90,
-  margin:2,
-
-  classNames: ['linear'],
-
-  init:function () {
-    this._super();
-
-    this.x = d3.scale.linear().domain([0, this.get('data').length]).range([0 + this.get('margin'), this.get('w') - this.get('margin')]);
-    this.y = d3.scale.linear().domain([0, d3.max(this.get('data'))]).range([0 + this.get('margin'), this.get('h') - this.get('margin')]);
-    this.line = d3.svg.line().x(function (d, i) { return this.x(i); }).y(function (d) {return -1 * this.y(d); })
-  },
-
-  didInsertElement:function () {
-    this._super();
-    this.appendSvg();
-  },
-
-  selector:function () {
-    return '#' + this.get('elementId');
-  }.property('elementId'),
-
-  appendSvg:function () {
-    var thisChart = this;
-
-    this.set('svg', d3.select(this.get('selector'))
-      .append("svg:svg")
-      .attr("width", thisChart.get('w'))
-      .attr("height", thisChart.get('h')));
-
-    this.set('g', thisChart.get('svg').append("svg:g").attr("transform", "translate(0, " + thisChart.get('h') + ")"));
-    this.get('g').append("svg:path").attr("d", thisChart.line(thisChart.get('data')));
-
-
-    // axis
-    this.get('g').append("svg:line")
-      .attr("x1", thisChart.x(0))
-      .attr("y1", -1 * thisChart.y(0))
-      .attr("x2", thisChart.x(this.get('data').length))
-      .attr("y2", -1 * thisChart.y(0))
-
-    this.get('g').append("svg:line")
-      .attr("x1", thisChart.x(0))
-      .attr("y1", -1 * thisChart.y(0))
-      .attr("x2", thisChart.x(0))
-      .attr("y2", -1 * thisChart.y(d3.max(thisChart.get('data'))))
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/common/chart/linear_time.js b/branch-1.2/ambari-web/app/views/common/chart/linear_time.js
deleted file mode 100644
index d4e8a13..0000000
--- a/branch-1.2/ambari-web/app/views/common/chart/linear_time.js
+++ /dev/null
@@ -1,838 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-var string_utils = require('utils/string_utils');
-
-/**
- * @class
- * 
- * This is a view which GETs data from a URL and shows it as a time based line
- * graph. Time is shown on the X axis with data series shown on Y axis. It
- * optionally also has the ability to auto refresh itself over a given time
- * interval.
- * 
- * This is an abstract class which is meant to be extended.
- * 
- * Extending classes should override the following:
- * <ul>
- * <li>url - from where the data can be retrieved
- * <li>title - Title to be displayed when showing the chart
- * <li>id - which uniquely identifies this chart in any page
- * <li>#transformToSeries(jsonData) - function to map server data into graph
- * series
- * </ul>
- * 
- * Extending classes could optionally override the following:
- * <ul>
- * <li>#colorForSeries(series) - function to get custom colors per series
- * </ul>
- * 
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartLinearTimeView = Ember.View.extend({
-  templateName: require('templates/main/charts/linear_time'),
-
-  /**
-   * The URL from which data can be retrieved.
-   *
-   * This property must be provided for the graph to show properly.
-   *
-   * @type String
-   * @default null
-   */
-  url: null,
-
-  /**
-   * A unique ID for this chart.
-   *
-   * @type String
-   * @default null
-   */
-  id: null,
-
-  /**
-   * Title to be shown under the chart.
-   *
-   * @type String
-   * @default null
-   */
-  title: null,
-
-  /**
-   * @private
-   *
-   * @type Rickshaw.Graph
-   * @default null
-   */
-  _graph: null,
-
-  _popupGraph: null,
-
-  _seriesProperties: null,
-
-  renderer: 'area',
-
-  popupSuffix: '-popup',
-
-  isPopup: false,
-
-  isReady: false,
-
-  isPopupReady: false,
-
-  hasData: true,
-  /**
-   * Current cluster name
-   */
-  clusterName: function() {
-    return App.router.get('clusterController.clusterName');
-  }.property('App.router.clusterController.clusterName'),
-  /**
-   * Url prefix common for all child views
-   */
-  urlPrefix: function() {
-    return App.apiPrefix + "/clusters/" + this.get('clusterName');
-  }.property('clusterName'),
-
-  /**
-   * Color palette used for this chart
-   *
-   * @private
-   * @type String[]
-   */
-   /*
-  _paletteScheme: [ 'rgba(181,182,169,0.4)', 'rgba(133,135,114,0.4)',
-      'rgba(120,95,67,0.4)', 'rgba(150,85,126,0.4)',
-      'rgba(70,130,180,0.4)', 'rgba(0,255,204,0.4)',
-      'rgba(255,105,180,0.4)', 'rgba(101,185,172,0.4)',
-      'rgba(115,192,58,0.4)', 'rgba(203,81,58,0.4)' ].reverse(),
-  */
-
-  selector: function () {
-    return '#' + this.get('elementId');
-  }.property('elementId'),
-
-  didInsertElement: function () {
-    this.loadData();
-    this.registerGraph();
-  },
-  registerGraph: function(){
-    var graph = {
-      name: this.get('title'),
-      id: this.get('elementId'),
-      popupId: this.get('id')
-    };
-    App.router.get('updateController.graphs').push(graph);
-  },
-
-  loadData: function() {
-    var validUrl = this.getFormattedUrl();
-    if (validUrl) {
-      var hash = {};
-      hash.url = validUrl;
-      hash.type = 'GET';
-      hash.dataType = 'json';
-      hash.contentType = 'application/json; charset=utf-8';
-      hash.context = this;
-      hash.success = this._refreshGraph,
-       hash.error = function(xhr, textStatus, errorThrown){
-        this.set('isReady', true);
-        if (xhr.readyState == 4 && xhr.status) {
-          textStatus = xhr.status + " " + textStatus;
-        }
-        this._showMessage('warn', this.t('graphs.error.title'), this.t('graphs.error.message').format(textStatus, errorThrown));
-        this.set('isPopup', false);
-        this.set('hasData', false);
-      }
-      jQuery.ajax(hash);
-    }
-  },
-
-  /**
-   * Shows a yellow warning message in place of the chart.
-   *
-   * @param type  Can be any of 'warn', 'error', 'info', 'success'
-   * @param title Bolded title for the message
-   * @param message String representing the message
-   * @type: Function
-   */
-  _showMessage: function(type, title, message){
-    var chartOverlayId = '#' + this.id + '-chart';
-    if (this.get('isPopup')) {
-      chartOverlayId += this.get('popupSuffix');
-    }
-    var typeClass;
-    switch (type) {
-      case 'error':
-        typeClass = 'alert-error';
-        break;
-      case 'success':
-        typeClass = 'alert-success';
-        break;
-      case 'info':
-        typeClass = 'alert-info';
-        break;
-      default:
-        typeClass = '';
-        break;
-    }
-    $(chartOverlayId).html('');
-    $(chartOverlayId).append('<div class=\"alert '+typeClass+'\"><strong>'+title+'</strong> '+message+'</div>');
-  },
-
-  /**
-   * Transforms the JSON data retrieved from the server into the series
-   * format that Rickshaw.Graph understands.
-   *
-   * The series object is generally in the following format: [ { name :
-   * "Series 1", data : [ { x : 0, y : 0 }, { x : 1, y : 1 } ] } ]
-   *
-   * Extending classes should override this method.
-   *
-   * @param seriesData
-   *          Data retrieved from the server
-   * @param displayName
-   *          Graph title
-   * @type: Function
-   *
-   */
-  transformData: function (seriesData, displayName) {
-    var seriesArray = [];
-    if (seriesData) {
-      // Is it a string?
-      if ("string" == typeof seriesData) {
-        seriesData = JSON.parse(seriesData);
-      }
-      // We have valid data
-      var series = {};
-      series.name = displayName;
-      series.data = [];
-      for ( var index = 0; index < seriesData.length; index++) {
-        series.data.push({
-          x: seriesData[index][1],
-          y: seriesData[index][0]
-        });
-      }
-      return series;
-    }
-  },
-
-  /**
-   * Provides the formatter to use in displaying Y axis.
-   *
-   * The default is Rickshaw.Fixtures.Number.formatKMBT which shows 10K,
-   * 300M etc.
-   *
-   * @type Function
-   */
-  yAxisFormatter: function(y) {
-    var value = Rickshaw.Fixtures.Number.formatKMBT(y);
-    if (value == '') return '0';
-    value = String(value);
-    var c = value[value.length - 1];
-    if (!isNaN(parseInt(c))) {
-      // c is digit
-      value = parseFloat(value).toFixed(3).replace(/0+$/, '').replace(/\.$/, '');
-    }
-    else {
-      // c in not digit
-      value = parseFloat(value.substr(0, value.length - 1)).toFixed(3).replace(/0+$/, '').replace(/\.$/, '') + c;
-    }
-    return value;
-  },
-
-  /**
-   * Provides the color (in any HTML color format) to use for a particular
-   * series.
-   *
-   * @param series
-   *          Series for which color is being requested
-   * @return color String. Returning null allows this chart to pick a color
-   *         from palette.
-   * @default null
-   * @type Function
-   */
-  colorForSeries: function (series) {
-    return null;
-  },
-
-  /**
-  * Check whether seriesData is correct data for chart drawing
-  * @param seriesData
-  * @return {Boolean}
-  */
-  checkSeries : function(seriesData){
-    if(!seriesData || !seriesData.length){
-      return false;
-    }
-    var result = true;
-    seriesData.forEach(function(item){
-      if(!item.data.length || !item.data[0] || typeof item.data[0].x === 'undefined'){
-        result = false;
-      }
-    });
-    return result;
-  },
-
-  /**
-   * @private
-   *
-   * Refreshes the graph with the latest JSON data.
-   *
-   * @type Function
-   */
-  _refreshGraph: function (jsonData) {
-    if(this.get('isDestroyed')){
-      return;
-    }
-    var seriesData = this.transformToSeries(jsonData);
-
-      //if graph opened as modal popup
-      var popup_path = $("#" + this.id + "-container" + this.get('popupSuffix'));
-      var graph_container = $("#" + this.id + "-container");
-      if(popup_path.length) {
-        popup_path.children().each(function () {
-          $(this).children().remove();
-        });
-        this.set('isPopup', true);
-      }
-      else {
-        graph_container.children().each(function (index, value) {
-          $(value).children().remove();
-        });
-      }
-    if (this.checkSeries(seriesData)) {
-      // Check container exists (may be not, if we go to another page and wait while graphs loading)
-      if (graph_container.length) {
-        this.draw(seriesData);
-        this.set('hasData', true);
-      }
-    }
-    else {
-      this.set('isReady', true);
-      //if Axis X time interval is default(60 minutes)
-      if(this.get('timeUnitSeconds') === 3600){
-        this._showMessage('info', this.t('graphs.noData.title'), this.t('graphs.noData.message'));
-        this.set('hasData', false);
-      } else {
-        this._showMessage('info', this.t('graphs.noData.title'), this.t('graphs.noDataAtTime.message'));
-      }
-      this.set('isPopup', false);
-    }
-  },
-
-  /**
-   * Returns a custom time unit, that depends on X axis interval length, for the graph's X axis.
-   * This is needed as Rickshaw's default time X axis uses UTC time, which can be confusing
-   * for users expecting locale specific time.
-   *
-   * If <code>null</code> is returned, Rickshaw's default time unit is used.
-   *
-   * @type Function
-   * @return Rickshaw.Fixtures.Time
-   */
-  localeTimeUnit: function(timeUnitSeconds){
-    var timeUnit = new Rickshaw.Fixtures.Time();
-    switch (timeUnitSeconds){
-      case 604800:
-        timeUnit = timeUnit.unit('day');
-        break;
-      case 2592000:
-        timeUnit = timeUnit.unit('week');
-        break;
-      case 31104000:
-        timeUnit = timeUnit.unit('month');
-        break;
-      default:
-        timeUnit = {
-          name: timeUnitSeconds / 240 + ' minute',
-          seconds: timeUnitSeconds / 4,
-          formatter: function (d) {
-            return d.toLocaleString().match(/(\d+:\d+):/)[1];
-          }
-        };
-    }
-    return timeUnit;
-  },
-
-  /**
-   * @private
-   *
-   * When a graph is given a particular width and height,the lines are drawn
-   * in a slightly bigger area thereby chopping off some of the UI. Hence
-   * after the rendering, we adjust the SVGs size in the DOM to compensate.
-   *
-   * Opened https://github.com/shutterstock/rickshaw/issues/141
-   *
-   * @type Function
-   */
-  _adjustSVGHeight: function () {
-    if (this._graph && this._graph.element
-        && this._graph.element.firstChild) {
-      var svgElement = this._graph.element.firstChild;
-      svgElement.setAttribute('height', $(this._graph.element).height()
-          + "px");
-      svgElement.setAttribute('width', $(this._graph.element).width()
-          + "px");
-    }
-  },
-  /**
-   * temporary fix for incoming data for graph
-   * to shift data time to correct time point
-   */
-  dataShiftFix: function(data){
-    var nowTime = Math.round(new Date().getTime() / 1000);
-    data.forEach(function(series){
-      var l = series.data.length;
-      var shiftDiff = nowTime - series.data[l - 1].x;
-      if(shiftDiff > 3600){
-        for(var i = 0;i < l;i++){
-          series.data[i].x = series.data[i].x + shiftDiff;
-        }
-        series.data.unshift({
-          x: nowTime - this.get('timeUnitSeconds'),
-          y: 0
-        });
-      }
-    }, this);
-  },
-
-  draw: function(seriesData) {
-    var isPopup = this.get('isPopup');
-    var p = '';
-    if (isPopup) {
-      p = this.get('popupSuffix');
-    }
-    var palette = new Rickshaw.Color.Palette({ scheme: 'munin'});
-
-    this.dataShiftFix(seriesData);
-
-    // var palette = new Rickshaw.Color.Palette({
-    //   scheme: this._paletteScheme
-    // });
-
-    var self = this;
-    var series_min_length = 100000000;
-    seriesData.forEach(function (series, index) {
-      var seriesColor = self.colorForSeries(series);
-      if (seriesColor == null) {
-        seriesColor = palette.color();
-      }
-      series.color = seriesColor;
-      series.stroke = 'rgba(0,0,0,0.3)';
-      if (isPopup) {
-        // calculate statistic data for popup legend
-        var avg = 0;
-        var min = Number.MAX_VALUE;
-        var max = Number.MIN_VALUE;
-        for (var i = 0; i < series.data.length; i++) {
-          avg += series.data[i]['y'];
-          if (series.data[i]['y'] < min) {
-            min = series.data[i]['y'];
-          }
-          else {
-            if (series.data[i]['y'] > max) {
-              max = series.data[i]['y'];
-            }
-          }
-        }
-        series.name = string_utils.pad(series.name, 30, '&nbsp;', 2) + string_utils.pad('min', 5, '&nbsp;', 3) + string_utils.pad(this.get('yAxisFormatter')(min), 12, '&nbsp;', 3) + string_utils.pad('avg', 5, '&nbsp;', 3) + string_utils.pad(this.get('yAxisFormatter')(avg/series.data.length), 12, '&nbsp;', 3) + string_utils.pad('max', 12, '&nbsp;', 3) + string_utils.pad(this.get('yAxisFormatter')(max), 5, '&nbsp;', 3);
-      }
-      if (series.data.length < series_min_length) {
-        series_min_length = series.data.length;
-      }
-    }.bind(this));
-    seriesData.forEach(function(series, index) {
-      if (series.data.length > series_min_length) {
-        series.data.length = series_min_length;
-      }
-    });
-    var chartId = "#" + this.id + "-chart" + p;
-    var chartOverlayId = "#" + this.id + "-container" + p;
-    var xaxisElementId = "#" + this.id + "-xaxis" + p;
-    var yaxisElementId = "#" + this.id + "-yaxis" + p;
-    var legendElementId = "#" + this.id + "-legend" + p;
-
-    var chartElement = document.querySelector(chartId);
-    var overlayElement = document.querySelector(chartOverlayId);
-    var xaxisElement = document.querySelector(xaxisElementId);
-    var yaxisElement = document.querySelector(yaxisElementId);
-    var legendElement = document.querySelector(legendElementId);
-
-    var strokeWidth = 1;
-    if (this.get('renderer') != 'area') {
-      strokeWidth = 2;
-    }
-
-    var height = 150;
-    var width = 400;
-    if (isPopup) {
-      height = 180;
-      width = 670;
-    } else {
-      // If not in popup, the width could vary.
-      // We determine width based on div's size.
-      var thisElement = this.get('element');
-      if (thisElement!=null) {
-        var calculatedWidth = $(thisElement).width();
-        if (calculatedWidth > 32) {
-          width = calculatedWidth-32;
-        }
-      }
-    }
-    var _graph = new Rickshaw.Graph({
-      height: height,
-      width: width,
-      element: chartElement,
-      series: seriesData,
-      interpolation: 'step-after',
-      stroke: true,
-      renderer: this.get('renderer'),
-      strokeWidth: strokeWidth
-    });
-    if (this.get('renderer') === 'area') {
-      _graph.renderer.unstack = false;
-    }
-
-    xAxis = new Rickshaw.Graph.Axis.Time({
-      graph: _graph,
-      timeUnit: this.localeTimeUnit(this.get('timeUnitSeconds'))
-    });
-
-    var orientation = 'right';
-    if (isPopup) {
-      orientation = 'left';
-    }
-    yAxis = new Rickshaw.Graph.Axis.Y({
-      tickFormat: this.yAxisFormatter,
-      element: yaxisElement,
-      orientation: orientation,
-      graph: _graph
-    });
-
-    var legend = new Rickshaw.Graph.Legend({
-      graph: _graph,
-      element: legendElement
-    });
-
-    if (!isPopup) {
-      overlayElement.addEventListener('mousemove', function () {
-        $(xaxisElement).removeClass('hide');
-        $(legendElement).removeClass('hide');
-        $(chartElement).children("div").removeClass('hide');
-      });
-      overlayElement.addEventListener('mouseout', function () {
-        $(legendElement).addClass('hide');
-      });
-      _graph.onUpdate(function () {
-        $(legendElement).addClass('hide');
-      });
-    }
-
-   var shelving = new Rickshaw.Graph.Behavior.Series.Toggle({
-      graph: _graph,
-      legend: legend
-    });
-
-    var order = new Rickshaw.Graph.Behavior.Series.Order({
-      graph: _graph,
-      legend: legend
-    });
-    //show the graph when it's loaded
-    _graph.onUpdate(function(){
-      self.set('isReady', true);
-    });
-    _graph.render();
-
-    if (isPopup) {
-      var self = this;
-      var hoverDetail = new Rickshaw.Graph.HoverDetail({
-        graph: _graph,
-        yFormatter:function (y) {
-          return self.yAxisFormatter(y);
-        },
-        xFormatter:function (x) {
-          return (new Date(x * 1000)).toLocaleTimeString();
-        },
-        formatter:function (series, x, y, formattedX, formattedY, d) {
-          return formattedY + '<br />' + formattedX;
-        }
-      });
-    }
-
-    if (isPopup) {
-      var self = this;
-      // In popup save selected metrics and show only them after data update
-      _graph.series.forEach(function(series, index) {
-        if (self.get('_seriesProperties') !== null && self.get('_seriesProperties')[index] !== null) {
-          if(self.get('_seriesProperties')[self.get('_seriesProperties').length - index - 1].length > 1) {
-            $('#'+self.get('id')+'-container'+self.get('popupSuffix')+' a.action:eq('+(self.get('_seriesProperties').length - index - 1)+')').parent('li').addClass('disabled');
-            series.disable();
-          }
-        }
-      });
-      //show the graph when it's loaded
-      _graph.onUpdate(function(){
-        self.set('isPopupReady', true);
-      });
-      _graph.update();
-
-      $('li.line').click(function() {
-        var series = [];
-        $('#'+self.get('id')+'-container'+self.get('popupSuffix')+' a.action').each(function(index, v) {
-          series[index] = v.parentNode.classList;
-        });
-        self.set('_seriesProperties', series);
-      });
-
-      this.set('_popupGraph', _graph);
-    }
-    else {
-      this.set('_graph', _graph);
-    }
-  },
-
-
-  showGraphInPopup: function() {
-    if(!this.get('hasData')){
-      return;
-    }
-
-    this.set('isPopup', true);
-    var self = this;
-    App.ModalPopup.show({
-      template: Ember.Handlebars.compile([
-        '<div class="modal-backdrop"></div><div class="modal modal-graph-line" id="modal" tabindex="-1" role="dialog" aria-labelledby="modal-label" aria-hidden="true">',
-        '<div class="modal-header">',
-        '<a class="close" {{action onClose target="view"}}>x</a>',
-        '<h3 id="modal-label">',
-        '{{#if headerClass}}{{view headerClass}}',
-        '{{else}}{{header}}{{/if}}',
-        '</h3>',
-        '</div>',
-        '<div class="modal-body">',
-        '{{#if bodyClass}}{{view bodyClass}}',
-        '{{else}}',
-          '<div class="screensaver no-borders chart-container" {{bindAttr class="view.isReady:hide"}} ></div>',
-          '<div class="time-label" {{bindAttr class="view.isReady::hidden"}}>{{view.currentTimeState.name}}</div>',
-          '{{#if view.isTimePagingEnable}}<div class="arrow-left" {{bindAttr class="view.leftArrowVisible:visibleArrow"}} {{action "switchTimeBack" target="view"}}></div>{{/if}}',
-          '<div id="'+this.get('id')+'-container'+this.get('popupSuffix')+'" class="chart-container chart-container'+this.get('popupSuffix')+' hide" {{bindAttr class="view.isReady:show"}} >',
-            '<div id="'+this.get('id')+'-yaxis'+this.get('popupSuffix')+'" class="'+this.get('id')+'-yaxis chart-y-axis"></div>',
-            '<div id="'+this.get('id')+'-xaxis'+this.get('popupSuffix')+'" class="'+this.get('id')+'-xaxis chart-x-axis"></div>',
-            '<div id="'+this.get('id')+'-legend'+this.get('popupSuffix')+'" class="'+this.get('id')+'-legend chart-legend"></div>',
-            '<div id="'+this.get('id')+'-chart'+this.get('popupSuffix')+'" class="'+this.get('id')+'-chart chart"></div>',
-            '<div id="'+this.get('id')+'-title'+this.get('popupSuffix')+'" class="'+this.get('id')+'-title chart-title">{{view.title}}</div>',
-          '</div>',
-        '{{#if view.isTimePagingEnable}}<div class="arrow-right" {{bindAttr class="view.rightArrowVisible:visibleArrow"}} {{action "switchTimeForward" "forward" target="view"}}></div>{{/if}}',
-        '{{/if}}',
-        '</div>',
-        '<div class="modal-footer">',
-        '{{#if view.primary}}<a class="btn btn-success" {{action onPrimary target="view"}}>{{view.primary}}</a>{{/if}}',
-        '</div>',
-        '</div>'
-      ].join('\n')),
-
-      header: this.get('title'),
-      self: self,
-      isReady: function(){
-        return this.get('self.isPopupReady');
-      }.property('self.isPopupReady'),
-      primary: 'OK',
-      onPrimary: function() {
-        this.hide();
-        self.set('isPopup', false);
-        self.set('timeUnitSeconds', 3600);
-      },
-      onClose: function(){
-        this.hide();
-        self.set('isPopup', false);
-        self.set('timeUnitSeconds', 3600);
-      },
-      /**
-       * check is time paging feature is enable for graph
-       */
-      isTimePagingEnable: function(){
-        return !self.get('isTimePagingDisable');
-      }.property(),
-      rightArrowVisible: function(){
-        return (this.get('isReady') && (this.get('currentTimeIndex') != 0))? true : false;
-      }.property('isReady', 'currentTimeIndex'),
-      leftArrowVisible: function(){
-        return (this.get('isReady') && (this.get('currentTimeIndex') != 7))? true : false;
-      }.property('isReady', 'currentTimeIndex'),
-      /**
-       * move graph back by time
-       * @param event
-       */
-      switchTimeBack: function(event){
-        var index = this.get('currentTimeIndex');
-        // 7 - number of last time state
-        if(index < 7){
-          this.reloadGraphByTime(++index);
-        }
-      },
-      /**
-       * move graph forward by time
-       * @param event
-       */
-      switchTimeForward: function(event){
-        var index = this.get('currentTimeIndex');
-        if(index > 0){
-          this.reloadGraphByTime(--index);
-        }
-      },
-      /**
-       * reload graph depending on the time
-       * @param index
-       */
-      reloadGraphByTime: function(index){
-        this.set('currentTimeIndex', index);
-        self.set('timeUnitSeconds', this.get('timeStates')[index].seconds);
-        self.loadData();
-      },
-      timeStates: [
-        {name: Em.I18n.t('graphs.timeRange.hour'), seconds: 3600},
-        {name: Em.I18n.t('graphs.timeRange.twoHours'), seconds: 7200},
-        {name: Em.I18n.t('graphs.timeRange.fourHours'), seconds: 14400},
-        {name: Em.I18n.t('graphs.timeRange.twelveHours'), seconds: 43200},
-        {name: Em.I18n.t('graphs.timeRange.day'), seconds: 86400},
-        {name: Em.I18n.t('graphs.timeRange.week'), seconds: 604800},
-        {name: Em.I18n.t('graphs.timeRange.month'), seconds: 2592000},
-        {name: Em.I18n.t('graphs.timeRange.year'), seconds: 31104000}
-      ],
-      currentTimeIndex: 0,
-      currentTimeState: function(){
-        return this.get('timeStates').objectAt(this.get('currentTimeIndex'));
-      }.property('currentTimeIndex')
-    });
-    Ember.run.next(function() {
-      self.loadData();
-      self.set('isPopupReady', false);
-    });
-  },
-  /**
-   * return formatted URL that depends on timeUnit
-   * on host metrics depends on host name
-   * on MapReduce metrics depends on  jobTracker Node
-   * @return {String}
-   */
-  getFormattedUrl:function(){
-    var toSeconds = Math.round(new Date().getTime() / 1000);
-    var hostName = (this.get('content')) ? this.get('content.hostName') : "";
-    var nameNodeName = (App.HDFSService.find().objectAt(0)) ?
-      App.HDFSService.find().objectAt(0).get('nameNode').get('hostName') :
-      "";
-    var jobTrackerNode = (App.MapReduceService.find().objectAt(0))
-      ? App.MapReduceService.find().objectAt(0).get('jobTracker').get('hostName')
-      : "";
-    var timeUnit = this.get('timeUnitSeconds');
-
-    return App.formatUrl(
-      this.get('urlPrefix') + this.get('sourceUrl'),
-      {
-        toSeconds: toSeconds,
-        fromSeconds: toSeconds - timeUnit,
-        stepSeconds: 15,
-        hostName: hostName,
-        nameNodeName: nameNodeName,
-        jobTrackerNode: jobTrackerNode
-      },
-      this.get('mockUrl')
-    );
-  },
-  //60 minute interval on X axis.
-  timeUnitSeconds: 3600
-});
-
-/**
- * A formatter which will turn a number into computer storage sizes of the
- * format '23 GB' etc.
- * 
- * @type Function
- */
-App.ChartLinearTimeView.BytesFormatter = function (y) {
-  if (y == 0) return '0 B';
-  var value = Rickshaw.Fixtures.Number.formatBase1024KMGTP(y);
-  if (!y || y.length < 1) {
-    value = '0 B';
-  }
-  else {
-    if ("number" == typeof value) {
-      value = String(value);
-    }
-    if ("string" == typeof value) {
-      value = value.replace(/\.\d(\d+)/, function($0, $1){ // Remove only 1-digit after decimal part
-        return $0.replace($1, '');
-      }); 
-      // Either it ends with digit or ends with character
-      value = value.replace(/(\d$)/, '$1 '); // Ends with digit like '120'
-      value = value.replace(/([a-zA-Z]$)/, ' $1'); // Ends with character like
-      // '120M'
-      value = value + 'B'; // Append B to make B, MB, GB etc.
-    }
-  }
-  return value;
-};
-
-/**
- * A formatter which will turn a number into percentage display like '42%'
- * 
- * @type Function
- */
-App.ChartLinearTimeView.PercentageFormatter = function (percentage) {
-  var value = percentage;
-  if (!value || value.length < 1) {
-    value = '0 %';
-  } else {
-    value = value.toFixed(3).replace(/0+$/, '').replace(/\.$/, '') + '%';
-  }
-  return value;
-};
-
-/**
- * A formatter which will turn elapsed time into display time like '50 ms',
- * '5s', '10 m', '3 hr' etc. Time is expected to be provided in milliseconds.
- * 
- * @type Function
- */
-App.ChartLinearTimeView.TimeElapsedFormatter = function (millis) {
-  var value = millis;
-  if (!value || value.length < 1) {
-    value = '0 ms';
-  } else if ("number" == typeof millis) {
-    var seconds = millis > 1000 ? Math.round(millis / 1000) : 0;
-    var minutes = seconds > 60 ? Math.round(seconds / 60) : 0;
-    var hours = minutes > 60 ? Math.round(minutes / 60) : 0;
-    var days = hours > 24 ? Math.round(hours / 24) : 0;
-    if (days > 0) {
-      value = days + ' d';
-    } else if (hours > 0) {
-      value = hours + ' hr';
-    } else if (minutes > 0) {
-      value = minutes + ' m';
-    } else if (seconds > 0) {
-      value = seconds + ' s';
-    } else if (millis > 0) {
-      value = millis.toFixed(3).replace(/0+$/, '').replace(/\.$/, '') + ' ms';
-    } else {
-      value = millis.toFixed(3).replace(/0+$/, '').replace(/\.$/, '') + ' ms';
-    }
-  }
-  return value;
-};
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/common/chart/pie.js b/branch-1.2/ambari-web/app/views/common/chart/pie.js
deleted file mode 100644
index e317297..0000000
--- a/branch-1.2/ambari-web/app/views/common/chart/pie.js
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.ChartPieView = Em.View.extend({
-  w:90,
-  h:90,
-  data:[300, 500],
-  palette: new Rickshaw.Color.Palette({ scheme: 'munin'}),
-  stroke: 'black',
-  strokeWidth: 2,
-  donut:d3.layout.pie().sort(null),
-
-  r:function () {
-    return Math.min(this.get('w'), this.get('h')) / 2 - this.get('strokeWidth');
-  }.property('w', 'h'),
-
-  outerR:function () {
-    return this.get('r'); // - 10;
-  }.property('r'),
-
-  innerR:function () {
-    return 0; // this.get('r') - 20;
-  }.property('r'),
-
-  arc:function () {
-    return d3.svg.arc().innerRadius(this.get('innerR')).outerRadius(this.get('outerR'));
-  }.property(),
-
-  didInsertElement:function () {
-    this._super();
-    this.appendSvg();
-  },
-
-  selector:function () {
-    return '#' + this.get('elementId');
-  }.property('elementId'),
-
-  appendSvg:function () {
-    var thisChart = this;
-
-    this.set('svg', d3.select(this.get('selector')).append("svg:svg")
-      .attr("width", thisChart.get('w'))
-      .attr("height", thisChart.get('h'))
-      .attr("stroke", this.get('stroke'))
-      .attr("stroke-width", this.get('strokeWidth'))
-      .append("svg:g")
-      .attr("transform", "translate(" + thisChart.get('w') / 2 + "," + thisChart.get('h') / 2 + ")"));
-
-    this.set('arcs', this.get('svg').selectAll("path")
-      .data(thisChart.donut(thisChart.get('data')))
-      .enter().append("svg:path")
-      .attr("fill", function (d, i) {
-        return thisChart.palette.color(i);
-      })
-      .attr("d", this.get('arc')));
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/common/filter_view.js b/branch-1.2/ambari-web/app/views/common/filter_view.js
deleted file mode 100644
index 61912c3..0000000
--- a/branch-1.2/ambari-web/app/views/common/filter_view.js
+++ /dev/null
@@ -1,382 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Wrapper View for all filter components. Layout template and common actions are located inside of it.
- * Logic specific for data component(input, select, or custom multi select, which fire any changes on interface) are
- * located in inner view - <code>filterView</code>.
- *
- * If we want to have input filter, put <code>textFieldView</code> to it.
- * All inner views implemented below this view.
- * @type {*}
- */
-var wrapperView = Ember.View.extend({
-  classNames: ['view-wrapper'],
-  layout: Ember.Handlebars.compile('<a href="#" {{action "clearFilter" target="view"}} class="ui-icon ui-icon-circle-close"></a> {{yield}}'),
-  template: Ember.Handlebars.compile('{{#if view.fieldId}}<input type="hidden" id="{{unbound view.fieldId}}" value="" />{{/if}} {{view view.filterView}}'),
-
-  value: null,
-
-  /**
-   * If this field is exists we dynamically create hidden input element and set value there.
-   * Used for some cases, where this values will be used outside of component
-   */
-  fieldId: null,
-
-  clearFilter: function(){
-    this.set('value', this.get('emptyValue'));
-    return false;
-  },
-
-  /**
-   * Use to determine whether filter is clear or not. Also when we want to set empty value
-   */
-  emptyValue: '',
-
-  /**
-   * Whether our <code>value</code> is empty or not
-   * @return {Boolean}
-   */
-  isEmpty: function(){
-    if(this.get('value') === null){
-      return true;
-    }
-    return this.get('value').toString() === this.get('emptyValue').toString();
-  },
-
-  /**
-   * Show/Hide <code>Clear filter</code> button.
-   * Also this method updates computed field related to <code>fieldId</code> if it exists.
-   * Call <code>onChangeValue</code> callback when everything is done.
-   */
-  showClearFilter: function(){
-    if(!this.get('parentNode')){
-      return;
-    }
-
-    if(this.isEmpty()){
-      this.get('parentNode').addClass('notActive');
-    } else {
-      this.get('parentNode').removeClass('notActive');
-    }
-
-    if(this.get('fieldId')){
-      this.$('> input').eq(0).val(this.get('value'));
-    }
-
-    this.onChangeValue();
-  }.observes('value'),
-
-  /**
-   * Callback for value changes
-   */
-  onChangeValue: function(){
-
-  },
-
-  /**
-   * Filter components is located here. Should be redefined
-   */
-  filterView: Em.View,
-
-  /**
-   * Update class of parentNode(hide clear filter button) on page load
-   */
-  didInsertElement: function(){
-    var parent = this.$().parent();
-    this.set('parentNode', parent);
-    parent.addClass('notActive');
-  }
-});
-
-/**
- * Simple input control for wrapperView
- */
-var textFieldView = Ember.TextField.extend({
-  type:'text',
-  placeholder: 'Any',
-  valueBinding: "parentView.value"
-});
-
-/**
- * Simple multiselect control for wrapperView.
- * Used to render blue button and popup, which opens on button click.
- * All content related logic should be implemented manually outside of it
- */
-var componentFieldView = Ember.View.extend({
-  classNames: ['btn-group'],
-  classNameBindings: ['isFilterOpen:open:'],
-
-  /**
-   * Whether popup is shown or not
-   */
-  isFilterOpen: false,
-
-  /**
-   * We have <code>value</code> property similar to inputs <code>value</code> property
-   */
-  valueBinding: 'parentView.value',
-
-  /**
-   * Clear filter to initial state
-   */
-  clearFilter: function(){
-    this.set('value', '');
-  },
-
-  /**
-   * Onclick handler for <code>cancel filter</code> button
-   */
-  closeFilter:function () {
-    $(document).unbind('click');
-    this.set('isFilterOpen', false);
-  },
-
-  /**
-   * Onclick handler for <code>apply filter</code> button
-   */
-  applyFilter:function() {
-    this.closeFilter();
-  },
-
-  /**
-   * Onclick handler for <code>show component filter</code> button.
-   * Also this function is used in some other places
-   */
-  clickFilterButton:function () {
-    var self = this;
-    this.set('isFilterOpen', !this.get('isFilterOpen'));
-    if (this.get('isFilterOpen')) {
-
-      var dropDown = this.$('.filter-components');
-      var firstClick = true;
-      $(document).bind('click', function (e) {
-        if (!firstClick && $(e.target).closest(dropDown).length == 0) {
-          self.set('isFilterOpen', false);
-          $(document).unbind('click');
-        }
-        firstClick = false;
-      });
-    }
-  }
-});
-
-/**
- * Simple select control for wrapperView
- */
-var selectFieldView = Ember.Select.extend({
-  selectionBinding: 'parentView.value',
-  contentBinding: 'parentView.content'
-});
-
-/**
- * Result object, which will be accessible outside
- * @type {Object}
- */
-module.exports = {
-  /**
-   * You can access wrapperView outside
-   */
-  wrapperView : wrapperView,
-
-  /**
-   * And also controls views if need it
-   */
-  textFieldView : textFieldView,
-  selectFieldView: selectFieldView,
-  componentFieldView: componentFieldView,
-
-  /**
-   * Quick create input filters
-   * @param config parameters of <code>wrapperView</code>
-   */
-  createTextView : function(config){
-
-    config.fieldType = config.fieldType || 'input-medium';
-    config.filterView = textFieldView.extend({
-      classNames : [ config.fieldType ]
-    });
-
-    return wrapperView.extend(config);
-  },
-
-  /**
-   * Quick create multiSelect filters
-   * @param config parameters of <code>wrapperView</code>
-   */
-  createComponentView : function(config){
-    config.clearFilter = function(){
-      this.forEachChildView(function(item){
-        if(item.clearFilter){
-          item.clearFilter();
-        }
-      });
-      return false;
-    };
-
-    return wrapperView.extend(config);
-  },
-
-  /**
-   * Quick create select filters
-   * @param config parameters of <code>wrapperView</code>
-   */
-  createSelectView: function(config){
-
-    config.fieldType = config.fieldType || 'input-medium';
-    config.filterView = selectFieldView.extend({
-      classNames : [ config.fieldType ]
-    });
-    config.emptyValue = 'Any';
-
-    return wrapperView.extend(config);
-  },
-  /**
-   * returns the filter function, which depends on the type of property
-   * @param type
-   * @param isGlobal check is search global
-   * @return {Function}
-   */
-  getFilterByType: function(type, isGlobal){
-    switch (type){
-      case 'ambari-bandwidth':
-        return function(rowValue, rangeExp){
-          var compareChar = isNaN(rangeExp.charAt(0)) ? rangeExp.charAt(0) : false;
-          var compareScale = rangeExp.charAt(rangeExp.length - 1);
-          var compareValue = compareChar ? parseFloat(rangeExp.substr(1, rangeExp.length)) : parseFloat(rangeExp.substr(0, rangeExp.length));
-          var match = false;
-          if (rangeExp.length == 1 && compareChar !== false) {
-            // User types only '=' or '>' or '<', so don't filter column values
-            match = true;
-            return match;
-          }
-          switch (compareScale) {
-            case 'g':
-              compareValue *= 1073741824;
-              break;
-            case 'm':
-              compareValue *= 1048576;
-              break;
-            case 'k':
-              compareValue *= 1024;
-              break;
-            default:
-              //default value in GB
-              compareValue *= 1073741824;
-          }
-          rowValue = (jQuery(rowValue).text()) ? jQuery(rowValue).text() : rowValue;
-
-          var convertedRowValue;
-          if (rowValue === '<1KB') {
-            convertedRowValue = 1;
-          } else {
-            var rowValueScale = rowValue.substr(rowValue.length - 2, 2);
-            switch (rowValueScale) {
-              case 'KB':
-                convertedRowValue = parseFloat(rowValue)*1024;
-                break;
-              case 'MB':
-                convertedRowValue = parseFloat(rowValue)*1048576;
-                break;
-              case 'GB':
-                convertedRowValue = parseFloat(rowValue)*1073741824;
-                break;
-            }
-          }
-
-          switch (compareChar) {
-            case '<':
-              if (compareValue > convertedRowValue) match = true;
-              break;
-            case '>':
-              if (compareValue < convertedRowValue) match = true;
-              break;
-            case false:
-            case '=':
-              if (compareValue == convertedRowValue) match = true;
-              break;
-          }
-          return match;
-        }
-        break;
-      case 'number':
-        return function(rowValue, rangeExp){
-          var compareChar = rangeExp.charAt(0);
-          var compareValue;
-          var match = false;
-          if (rangeExp.length == 1) {
-            if (isNaN(parseInt(compareChar))) {
-              // User types only '=' or '>' or '<', so don't filter column values
-              match = true;
-              return match;
-            }
-            else {
-              compareValue = parseFloat(parseFloat(rangeExp).toFixed(2));
-            }
-          }
-          else {
-            if (isNaN(parseInt(compareChar))) {
-              compareValue = parseFloat(parseFloat(rangeExp.substr(1, rangeExp.length)).toFixed(2));
-            }
-            else {
-              compareValue = parseFloat(parseFloat(rangeExp.substr(0, rangeExp.length)).toFixed(2));
-            }
-          }
-          rowValue = parseFloat((jQuery(rowValue).text()) ? jQuery(rowValue).text() : rowValue);
-          match = false;
-          switch (compareChar) {
-            case '<':
-              if (compareValue > rowValue) match = true;
-              break;
-            case '>':
-              if (compareValue < rowValue) match = true;
-              break;
-            case '=':
-              if (compareValue == rowValue) match = true;
-              break;
-            default:
-              if (rangeExp == rowValue) match = true;
-          }
-          return match;
-        }
-        break;
-      case 'multiple':
-        return function(origin, compareValue){
-          var options = compareValue.split(',');
-          var rowValue = origin.mapProperty('componentName').join(" ");
-          var str = new RegExp(compareValue, "i");
-          for (var i = 0; i < options.length; i++) {
-            if(!isGlobal) {
-              str = new RegExp('(\\W|^)' + options[i] + '(\\W|$)');
-            }
-            if (rowValue.search(str) !== -1) {
-              return true;
-            }
-          }
-          return false;
-        }
-        break;
-      case 'string':
-      default:
-        return function(origin, compareValue){
-          var regex = new RegExp(compareValue,"i");
-          return regex.test(origin);
-        }
-    }
-  }
-};
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/common/form/field.js b/branch-1.2/ambari-web/app/views/common/form/field.js
deleted file mode 100644
index 9919e85..0000000
--- a/branch-1.2/ambari-web/app/views/common/form/field.js
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.FormFieldTemplate = Em.View.extend({
-  templateDir: 'templates/common/form/',
-  defaultTemplate: 'field',
-  classNames: ["control-group"],
-  init: function(){
-    this._super();
-    this.updateTemplate();
-  },
-  updateTemplate: function(){
-    var field = this.get('field');
-    if(field) {
-      var templateName = '';
-      switch(field.get('displayType')) {
-        case 'checkbox':
-          templateName = field.get('displayType');
-          break;
-        default:
-          templateName = this.get('defaultTemplate');
-          break;
-      }
-
-      this.set('templateName', require(this.get('templateDir') + templateName));
-    }
-  }.observes('field')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/common/grid.js b/branch-1.2/ambari-web/app/views/common/grid.js
deleted file mode 100644
index eb27c8f..0000000
--- a/branch-1.2/ambari-web/app/views/common/grid.js
+++ /dev/null
@@ -1,309 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-var validator = require('utils/validator');
-
-App.GridFilterObject = Em.Object.extend({
-  checked:false
-});
-
-App.GridFilter = Em.View.extend({
-  tagName:"ul",
-  classNames:['filter'],
-  templateName:require('templates/common/grid/filter'),
-  attributeBindings:['style'],
-  getHeader:function () {
-    return this.get('header')
-  },
-  filters:function () {
-    return this.get('header._filters');
-  }.property('header._filters')
-});
-
-App.GridHeader = Em.View.extend({
-  templateName:require('templates/common/grid/header'),
-  tagName:'th',
-  filterable:true,
-  showFilter:false,
-  getGrid:function () {
-    return this.get('grid');
-  },
-  _filters:[],
-  doFilter:function () {
-    console.log(this.get('grid'));
-  },
-  toggleFilter:function () {
-    this.set('showFilter', 1 - this.get('showFilter'));
-  },
-  applyFilter:function () {
-    console.warn('APPLYING FILTERS');
-
-    var filters = this.get('_filters');
-    var filterValues = [];
-    $.each(filters, function(){
-      if(this.get('checked')) {
-        filterValues.push(this.get('value'));
-      }
-    });
-
-    var grid = this.get('grid');
-    grid.addFilters(this.get('name'), filterValues);
-    this.set('showFilter', false);
-  },
-  init:function () {
-    this._super();
-    if (!this.get('_filters').length) {
-      this.filterValues();
-      var thisHeader = this;
-      this.set('filter', App.GridFilter.extend({ header:thisHeader }));
-    }
-  },
-
-  filterValues:function () {
-    var gridFilters = this.get('grid._filters');
-    if (gridFilters && gridFilters[this.get('name')]) {
-      var filters = this.get('grid._filters')[this.get('name')];
-      // there should be something like filter preparing
-      var newFilters = [];
-      $.each(filters, function (i, v) {
-        newFilters.push(App.GridFilterObject.create({label:v, value:v}));
-      });
-
-      this.set('_filters', newFilters);
-    }
-  }.observes('grid._filters')
-});
-
-App.GridRow = Em.View.extend({
-  tagName:'tr',
-  init:function (options) {
-    var object = this.get('object');
-    var grid = this.get('grid');
-    var fieldNames = grid.get('fieldNames');
-    var template = '';
-
-    if (fieldNames) {
-      $.each(grid.get('fieldNames'), function (i, field) {
-        template += "<td>" + object.get(field) + "</td>";
-      });
-
-      this.set('template', Em.Handlebars.compile(template));
-    }
-    return this._super();
-  }
-});
-
-App.GridPage = Em.Object.extend({
-  activeClass:function () {
-    return this.get('active') ? "active" : "";
-  }.property('active'),
-  active:function () {
-    return parseInt(this.get('number')) == parseInt(this.get('pager.grid.currentPage'));
-  }.property('pager.grid.currentPage')
-});
-
-App.GridPager = Em.View.extend({
-
-  pages:[],
-  templateName:require('templates/common/grid/pager'),
-  classNames:['pagination'],
-
-  activatePrevPage:function () {
-    var current = this.get('grid.currentPage');
-    if (current > 1) this.set('grid.currentPage', current - 1);
-  },
-  activateNextPage:function () {
-    var current = this.get('grid.currentPage');
-    if (current < this.get('pages').length) this.set('grid.currentPage', current + 1);
-  },
-
-  prevPageDisabled:function () {
-    return this.get('grid.currentPage') > 1 ? false : "disabled";
-  }.property('grid.currentPage'),
-
-  nextPageDisabled:function () {
-    return this.get('grid.currentPage') < this.get('pages').length ? false : "disabled";
-  }.property('grid.currentPage'),
-
-  init:function () {
-    this._super();
-    this.clearPages()
-    this.pushPages();
-  },
-
-  activatePage:function (event) {
-    var page = event.context;
-    this.get('grid').set('currentPage', parseInt(event.context.get('number')));
-  },
-
-  clearPages:function () {
-    this.set('pages', []);
-  },
-
-  pushPages:function () {
-    var thisPager = this;
-    var pages = this.get('grid._pager.pages');
-    $.each(pages, function () {
-      var thisNumber = this;
-      thisPager.get('pages').push(App.GridPage.create({
-        number:thisNumber,
-        pager:thisPager
-      }));
-    })
-  }.observes('grid._pager')
-});
-
-App.Grid = Em.View.extend({
-  _columns:{}, // not used
-  _filters:{}, // prepared filters from data values
-  _pager:{pages:[1, 2, 3, 4, 5]}, // observed by pager to config it
-
-  _collection:{className:false, staticOptions:{}}, // collection config
-  currentPage:1,
-  fieldNames:[],
-  appliedFilters:{},
-  filteredArray:[],
-  columns:[],
-  collection:[],
-  initComleted:false,
-  rows:[],
-  templateName:require('templates/main/admin/audit'),
-
-  init:function () {
-    console.warn("  Grid INIT  ");
-    this._super();
-    this.prepareColumns(); // should be the 1
-    this.prepareCollection();
-    this.preparePager();
-  },
-
-  preparePager:function () {
-//    this.set('pager', App.GridPager.extend({ grid:this })); ask to hide
-  },
-
-  addFilters: function(field, values){
-    var filters = this.get('appliedFilters');
-    filters[field] = values;
-
-    var collection = this.get('_collection.className');
-    collection = collection.find();
-    arrayCollection = collection.filter(function(data) {
-      var oneFilterFail = false;
-      $.each(filters, function(fieldname, values){
-        if(values.length && values.indexOf(data.get(fieldname)) == -1) {
-          return oneFilterFail = true;
-        }
-      });
-      return !oneFilterFail;
-    });
-
-    this.set('filteredArray', arrayCollection);
-  },
-
-  prepareCollection:function () {
-    if (validator.empty(this.get('_collection.className'))) {
-      throw "_collection.className field is not defined";
-    }
-    var collection = this.get('_collection.className');
-    this.set('collection', collection.find(this.get('_collection.staticOptions')));
-  },
-
-  addColumn:function (options) {
-    options.grid = this;
-    if (validator.empty(options.name)) {
-      throw "define column name";
-    }
-
-    if (this.get('_columns.' + options.name)) {
-      throw "column with this '" + options.name + "' already exists";
-    }
-
-    var field = App.GridHeader.extend(options);
-    this.columns.push(field);
-
-    if (field.filterable || 1) { // .filterable - field not working :(
-      this.fieldNames.push(options.name);
-    }
-  },
-
-  clearColumns:function () {
-    this.set('_columns', {});
-    this.set('columns', []);
-    this.set('fieldNames', []);
-  },
-
-  prepareColumns:function () {
-    this.clearColumns();
-  },
-
-  prepareFilters:function () {
-    var thisGrid = this;
-    var collection = this.get('collection');
-    var fieldNames = this.get('fieldNames');
-    var options = {};
-
-    if (collection && collection.content) {
-      collection.forEach(function (object, i) {
-        $.each(fieldNames, function (j, field) {
-          if (!options[field]) {
-            options[field] = [];
-          }
-
-          var filter = object.get(field);
-          if (options[field].indexOf(filter) == -1) {
-            options[field].push(filter);
-          }
-        });
-      })
-
-      thisGrid.set('_filters', options);
-    }
-  }.observes('collection.length'),
-
-
-  clearRows:function () {
-    this.set('rows', [])
-  },
-
-  prepareRows:function () {
-    var collection = this.get('collection');
-    var thisGrid = this;
-    this.clearRows();
-    console.warn("PREPARE ROWS LEN:", collection.get('length'));
-    var i=1;
-
-    if (collection && collection.content) {
-      collection.forEach(function (object, i) {
-        var row = App.GridRow.extend({grid:thisGrid, object:object});
-        thisGrid.rows.push(row);
-      });
-    }
-  }.observes('collection.length'),
-
-  filteredRows:function () {
-    var collection = this.get('filteredArray');
-    var thisGrid = this;
-    this.clearRows();
-
-    collection.forEach(function (object) {
-      var row = App.GridRow.extend({grid:thisGrid, object:object});
-      thisGrid.rows.push(row);
-    });
-  }.observes('filteredArray')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/common/metric.js b/branch-1.2/ambari-web/app/views/common/metric.js
deleted file mode 100644
index bae2ce4..0000000
--- a/branch-1.2/ambari-web/app/views/common/metric.js
+++ /dev/null
@@ -1,171 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-/**
- * use: {{view App.MetricFilteringWidget controllerBinding="App.router.mainChartsController"}}
- * set controller.metric field with metric value
- * widget assign itself to controller like metricWidget (controller.get('metricWidget'))
- * @type {*}
- */
-App.MetricFilteringWidget = Em.View.extend({
-  classNames:['metric-filtering-widget'],
-  /**
-   * chosen metric value
-   */
-  chosenMetric:null,
-  chosenMoreMetric:null,
-  showMore:0, // toggle more metrics indicator
-  /**
-   * metrics
-   */
-  metrics:[
-    Em.Object.create({ label:Em.I18n.t('metric.default'), value:null}),
-    Em.Object.create({ label:Em.I18n.t('metric.cpu'), value:'cpu'}),
-    Em.Object.create({ label:Em.I18n.t('metric.memory'), value:'memory'}),
-    Em.Object.create({ label:Em.I18n.t('metric.network'), value:'network'}),
-    Em.Object.create({ label:Em.I18n.t('metric.io'), value:'io'})
-  ],
-
-
-  moreMetrics:[
-    Em.Object.create({ label:Em.I18n.t('metric.more.cpu'), code:'cpu', items:[
-      Em.Object.create({value:"cpu_nice"}),
-      Em.Object.create({value:"cpu_wio"}),
-      Em.Object.create({value:"cpu_user"}),
-      Em.Object.create({value:"cpu_idle"}),
-      Em.Object.create({value:"cpu_system"}),
-      Em.Object.create({value:"cpu_aidle"})
-    ] }),
-
-    Em.Object.create({ label:Em.I18n.t('metric.more.disk'), code:'disk',
-      items:[
-        Em.Object.create({value:'disk_free'}),
-        Em.Object.create({value:'disk_total'}),
-        Em.Object.create({value:'part_max_used'})
-      ]
-    }),
-
-    Em.Object.create({ label:Em.I18n.t('metric.more.load'), code:'load',
-      items:[
-        Em.Object.create({value:'load_one'}),
-        Em.Object.create({value:'load_five'}),
-        Em.Object.create({value:'load_fifteen'})
-      ]
-    }),
-
-    Em.Object.create({ label:Em.I18n.t('metric.more.memory'), code:'memory',
-      items:[
-        Em.Object.create({value:'swap_free'}),
-        Em.Object.create({value:'cpu'})
-      ]
-    }),
-
-    Em.Object.create({ label:Em.I18n.t('metric.more.network'), code:'network',
-      items:[
-        Em.Object.create({value:'bytes_out'}),
-        Em.Object.create({value:'bytes_in'}),
-        Em.Object.create({value:'pkts_in'}),
-        Em.Object.create({value:'pkts_out'})
-      ]
-    }),
-
-    Em.Object.create({ label:Em.I18n.t('metric.more.process'), code:'process',
-      items:[
-        Em.Object.create({value:'proc_run'}),
-        Em.Object.create({value:'proc_total'}),
-      ]
-    })
-
-  ],
-
-  /**
-   * return array of chosen metrics
-   */
-  chosenMetrics:function () {
-    return this.get('chosenMetric') ? [this.get('chosenMetric')] : this.get('defaultMetrics');
-  }.property('chosenMetric'),
-
-  /**
-   * metric item view
-   */
-  itemView:Em.View.extend({
-    tagName:'li',
-    classNameBindings:['disabled'],
-    disabled:function () {
-      return this.get('isActive') ? "disabled" : false;
-    }.property('isActive'),
-    isActive:function () {
-      return this.get('metric.value') == this.get('widget.chosenMetric');
-    }.property('widget.chosenMetric'),
-    label:function () {
-      return this.get('metric.label');
-    }.property('metric.label'),
-    template:Em.Handlebars.compile('<a {{action activate view.metric.value target="view.widget" href="#" }}>{{unbound view.label}}</a>')
-  }),
-
-  moreItemView:function () {
-    return this.get('itemView').extend({
-      label:function () {
-        return this.get('metric.value');
-      }.property('metric.value')
-    });
-  }.property(),
-
-  /**
-   * return default selected metrics (currently - all)
-   */
-  defaultMetrics:function () {
-    var values = [];
-    $.each(this.get('metrics'), function () {
-      if (this.value) {
-        values.push(this.value);
-      }
-    });
-    return values;
-  }.property(),
-
-  bindToController:function () {
-    var thisW = this;
-    var controller = this.get('controller');
-    controller.set('metricWidget', thisW);
-  },
-
-  toggleMore:function () {
-    this.set('showMore', 1 - this.get('showMore'));
-  },
-
-  /**
-   * assign this widget to controller, prepare items by metricsConfig
-   */
-  init:function () {
-    this._super();
-    this.bindToController();
-  },
-
-  /**
-   * write active metric to widget
-   * @param event
-   */
-  activate:function (event) {
-    this.set('chosenMetric', event.context);
-  },
-
-  templateName:require('templates/common/metric')
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/common/modal_popup.js b/branch-1.2/ambari-web/app/views/common/modal_popup.js
deleted file mode 100644
index 429d8a0..0000000
--- a/branch-1.2/ambari-web/app/views/common/modal_popup.js
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.ModalPopup = Ember.View.extend({
-
-  template: Ember.Handlebars.compile([
-    '<div class="modal-backdrop"></div><div class="modal" id="modal" tabindex="-1" role="dialog" aria-labelledby="modal-label" aria-hidden="true">',
-    '<div class="modal-header">',
-    '{{#if showCloseButton}}<a class="close" {{action onClose target="view"}}>x</a>{{/if}}',
-    '<h3 id="modal-label">',
-    '{{#if headerClass}}{{view headerClass}}',
-    '{{else}}{{header}}{{/if}}',
-    '</h3>',
-    '</div>',
-    '<div class="modal-body">',
-    '{{#if bodyClass}}{{view bodyClass}}',
-    '{{else}}{{#if encodeBody}}{{body}}{{else}}{{{body}}}{{/if}}{{/if}}',
-    '</div>',
-    '{{#if showFooter}}',
-    '{{#if footerClass}}{{view footerClass}}',
-    '{{else}}',
-    '<div class="modal-footer">',
-    '{{#if view.secondary}}<a class="btn" {{action onSecondary target="view"}}>{{view.secondary}}</a>{{/if}}',
-    '{{#if view.primary}}<a class="btn btn-success" {{action onPrimary target="view"}}>{{view.primary}}</a>{{/if}}',
-    '</div>',
-    '{{/if}}',
-    '{{/if}}',
-
-    '</div>'
-  ].join('\n')),
-
-  header: '&nbsp;',
-  body: '&nbsp;',
-  encodeBody: true,
-  // define bodyClass which extends Ember.View to use an arbitrary Handlebars template as the body
-  primary: 'OK',
-  secondary: 'Cancel',
-  autoHeight: true,
-
-  onPrimary: function() {
-  },
-
-  onSecondary: function() {
-    this.hide();
-  },
-
-  onClose: function() {
-    this.hide();
-  },
-
-  hide: function() {
-    this.destroy();
-  },
-
-  showFooter: true,
-
-  /**
-   * Hide or show 'X' button for closing popup
-   */
-  showCloseButton: true,
-
-  didInsertElement: function(){
-    if(this.autoHeight){
-      this._super();
-      var block = this.$().find('#modal > .modal-body').first();
-      block.css('max-height', $(window).height() - block.offset().top - 300 + $(window).scrollTop()); // fix popup height
-    }
-  },
-
-  fitHeight: function(){
-    var popup = this.$().find('#modal');
-    var block = this.$().find('#modal > .modal-body');
-    var wh = $(window).height();
-
-    var top = wh * .05;
-    popup.css({
-      'top' : top + 'px',
-      'marginTop' : 0
-    });
-
-    block.css('max-height', $(window).height()- top * 2 - 100);
-  }
-});
-
-App.ModalPopup.reopenClass({
-
-  show: function(options) {
-    var popup = this.create(options);
-    popup.appendTo('#wrapper');
-    return popup;
-  }
-
-})
-
-App.showReloadPopup = function(){
-  return App.ModalPopup.show({
-    primary: null,
-    secondary: null,
-    showFooter: false,
-    header: this.t('app.reloadPopup.header'),
-    body: "<div class='alert alert-info'><div class='spinner'>" + this.t('app.reloadPopup.text') + "</div></div><div><a href='#' onclick='location.reload();'>" + this.t('app.reloadPopup.link') + "</a></div>",
-    encodeBody: false
-  });
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/common/quick_view_link_view.js b/branch-1.2/ambari-web/app/views/common/quick_view_link_view.js
deleted file mode 100644
index 65f92db..0000000
--- a/branch-1.2/ambari-web/app/views/common/quick_view_link_view.js
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.QuickViewLinks = Em.View.extend({
-
-  /**
-   * Updated quick links. Here we put correct hostname to url
-   */
-  quickLinks:function () {
-    var serviceName = this.get('content.serviceName');
-    var components = this.get('content.hostComponents');
-    var host;
-
-    if (serviceName === 'HDFS') {
-      host = components.findProperty('componentName', 'NAMENODE').get('host.publicHostName');
-    } else if (serviceName === 'MAPREDUCE') {
-      host = components.findProperty('componentName', 'JOBTRACKER').get('host.publicHostName');
-    } else if (serviceName === 'HBASE') {
-      host = components.findProperty('componentName', 'HBASE_MASTER').get('host.publicHostName');
-    }
-    if (!host) {
-      return [];
-    }
-    return this.get('content.quickLinks').map(function (item) {
-      if (item.get('url')) {
-        item.set('url', item.get('url').fmt(host));
-      }
-      return item;
-    });
-  }.property('content.quickLinks.@each.label'),
-
-  linkTarget:function () {
-    switch (this.get('content.serviceName').toLowerCase()) {
-      case "hdfs":
-      case "mapreduce":
-      case "hbase":
-        return "_blank";
-        break;
-      default:
-        return "";
-        break;
-    }
-  }.property('service')
-
-});
diff --git a/branch-1.2/ambari-web/app/views/common/sort_view.js b/branch-1.2/ambari-web/app/views/common/sort_view.js
deleted file mode 100644
index 28f17b0..0000000
--- a/branch-1.2/ambari-web/app/views/common/sort_view.js
+++ /dev/null
@@ -1,125 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var misc = require('utils/misc');
-
-/**
- * Wrapper View for all sort components. Layout template and common actions are located inside of it.
- * Logic specific for sort fields
- * located in inner view - <code>fieldView</code>.
- *
- * @type {*}
- */
-var wrapperView = Em.View.extend({
-  tagName: 'tr',
-  /**
-   * sort content by property
-   * @param property
-   * @param order: true - DESC, false - ASC
-   */
-  sort: function(property, order){
-    var content = this.get('content').toArray();
-    var sortFunc = this.getSortFunc(property, order);
-    this.resetSort();
-    content.sort(sortFunc);
-    this.set('content', content);
-  },
-  /**
-   * reset all sorts fields
-   */
-  resetSort: function(){
-    this.get('childViews').setEach('status', 'sorting');
-  },
-  /**
-   * determines sort function depending on the type of sort field
-   * @param property
-   * @param order
-   * @return {*}
-   */
-  getSortFunc: function(property, order){
-    var func;
-    switch (property.get('type')){
-      case 'ip':
-        func = function (a, b) {
-          var a = misc.ipToInt(a.get(property.get('name')));
-          var b = misc.ipToInt(b.get(property.get('name')));
-          if(order){
-            return b - a;
-          } else {
-            return a - b;
-          }
-        };
-        break;
-      default:
-        func = function(a,b){
-          if(order){
-            if (a.get(property.get('name')) > b.get(property.get('name')))
-              return -1;
-            if (a.get(property.get('name')) < b.get(property.get('name')))
-              return 1;
-            return 0;
-          } else {
-            if (a.get(property.get('name')) < b.get(property.get('name')))
-              return -1;
-            if (a.get(property.get('name')) > b.get(property.get('name')))
-              return 1;
-            return 0;
-          }
-        }
-    }
-    return func;
-  }
-});
-/**
- * particular view that contain sort field properties:
- * name - name of property in content table
- * type(optional) - specific type to sort
- * displayName - label to display
- * @type {*}
- */
-var fieldView = Em.View.extend({
-  template:Em.Handlebars.compile('{{view.displayName}}'),
-  classNameBindings: ['status'],
-  tagName: 'th',
-  name: null,
-  displayName: null,
-  status: 'sorting',
-  type: null,
-  /**
-   * callback that run sorting and define order of sorting
-   * @param event
-   */
-  click: function(event){
-    if(this.get('status') === 'sorting_desc'){
-      this.get('parentView').sort(this, false);
-      this.set('status', 'sorting_asc');
-    } else {
-      this.get('parentView').sort(this, true);
-      this.set('status', 'sorting_desc');
-    }
-  }
-});
-
-/**
- * Result object, which will be accessible outside
- * @type {Object}
- */
-module.exports = {
-  wrapperView: wrapperView,
-  fieldView: fieldView
-};
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/common/time_range.js b/branch-1.2/ambari-web/app/views/common/time_range.js
deleted file mode 100644
index 3983d11..0000000
--- a/branch-1.2/ambari-web/app/views/common/time_range.js
+++ /dev/null
@@ -1,221 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-/**
- * use: {{view App.TimeRangeWidget controllerBinding="App.router.mainChartsController"}}
- * set controller.preset field with preset value
- * widget assign itself to controller like presetWidget (controller.get('presetWidget'))
- * @type {*}
- */
-App.TimeRangeWidget = Em.View.extend({
-  classNames:['time-range-widget'],
-  templateName:require('templates/common/time_range'),
-  dateFrom: null,
-  dateTo: null,
-
-  /**
-   * presets
-   */
-  presets:[
-    Em.Object.create({ label:Em.I18n.t('timeRange.presets.1hour'), value:'1h', period: 3600000, step: 300000}),
-    Em.Object.create({ label:Em.I18n.t('timeRange.presets.12hour'), value:'12h', period: 43200000, step: 3600000}),
-    Em.Object.create({ label:Em.I18n.t('timeRange.presets.1day'), value:'1d', period: 86400000, step: 3600000}),
-    Em.Object.create({ label:Em.I18n.t('timeRange.presets.1week'), value:'1wk', period: 604800000, step: 86400000}),
-    Em.Object.create({ label:Em.I18n.t('timeRange.presets.1month'), value:'1mo', period: 2592000000, step: 86400000}),
-    Em.Object.create({ label:Em.I18n.t('timeRange.presets.1year'), value:'1yr', period: 31536000000, step: 2592000000})
-  ],
-  /**
-   * chosen preset value
-   */
-  chosenPreset: null,
-
-  /**
-   * return array of chosen presets
-   */
-  chosenPresets:function () {
-    return this.get('chosenPreset') ? [this.get('chosenPreset')] : this.get('defaultPresets');
-  }.property('chosenPreset'),
-
-  /**
-   * preset item view
-   */
-  presetView:Em.View.extend({
-    tagName:'li',
-    classNameBindings:['disabled'],
-    disabled:function () {
-      return this.get('isActive') ? "disabled" : false;
-    }.property('isActive'),
-    isActive:function () {
-      return this.get('preset.value') == this.get('widget.chosenPreset.value');
-    }.property('widget.chosenPreset'),
-    template:Em.Handlebars.compile('<a {{action activate view.preset target="view.widget" href="true" }}>{{unbound view.preset.label}}</a>')
-  }),
-
-  /**
-   * return default selected presets (currently - all)
-   */
-  defaultPresets:function () {
-    var values = [];
-    $.each(this.get('presets'), function () {
-      if (this.value) {
-        values.push(this.value);
-      }
-    });
-    return values;
-  }.property(),
-
-  bindToController:function () {
-    var thisW = this;
-    var controller = this.get('controller');
-    controller.set('presetWidget', thisW);
-  },
-
-  /**
-   * assign this widget to controller, prepare items by presetsConfig
-   */
-  init:function () {
-    this._super();
-    this.bindToController();
-  },
-
-  /**
-   * write active preset to widget
-   * @param event
-   */
-  activate:function (event) {
-    if (event.context == this.get('chosenPreset')) {
-      this.set('chosenPreset', null);
-    } else {
-      this.set('chosenPreset', event.context);
-    }
-  },
-
-  dateFromView: Ember.TextField.extend({
-    elementId: 'timeRangeFrom',
-    classNames: 'timeRangeFrom',
-    attributeBindings:['readonly'],
-    readonly: true,
-    didInsertElement: function() {
-      var self = this;
-      this.$().datetimepicker({
-        dateFormat: 'dd/mm/yy',
-        timeFormat: 'hh:mm',
-        maxDate: new Date(),
-        onClose:function (dateText, inst) {
-          var endDateTextBox = $('#timeRangeTo');
-          if (endDateTextBox.val() != '') {
-            var testStartDate = new Date(dateText);
-            var testEndDate = new Date(endDateTextBox.val());
-            if (testStartDate > testEndDate)
-              endDateTextBox.val(dateText);
-          } else {
-            endDateTextBox.val(dateText);
-          }
-          self.set('dateFrom', dateText);
-        },
-        onSelect:function (selectedDateTime) {
-          var start = $(this).datetimepicker('getDate');
-          $('#timeRangeTo').datetimepicker('option', 'minDate', new Date(start.getTime()));
-        }
-      });
-      self.set('dateFrom', this.get('value'));
-    }
-  }),
-
-  dateToView: Ember.TextField.extend({
-    elementId: 'timeRangeTo',
-    classNames: 'timeRangeTo',
-    attributeBindings:['readonly'],
-    readonly: true,
-    didInsertElement: function() {
-      var self = this;
-      this.$().datetimepicker({
-        dateFormat: 'dd/mm/yy',
-        timeFormat: 'hh:mm',
-        maxDate: new Date(),
-        onClose:function (dateText, inst) {
-          var startDateTextBox = $('#timeRangeFrom');
-          if (startDateTextBox.val() != '') {
-            var testStartDate = new Date(startDateTextBox.val());
-            var testEndDate = new Date(dateText);
-            if (testStartDate > testEndDate)
-              startDateTextBox.val(dateText);
-          } else {
-            startDateTextBox.val(dateText);
-          }
-          self.set('dateTo', dateText);
-        },
-        onSelect:function (selectedDateTime) {
-          var end = $(this).datetimepicker('getDate');
-          $('#timeRangeFrom').datetimepicker('option', 'maxDate', new Date(end.getTime()));
-        }
-      });
-      self.set('dateTo', this.get('value'));
-    }
-  }),
-
-  sliderOptions: Ember.Object.extend({
-    end: null,
-    period: null,
-    start: function() {
-      return this.get('end') - this.get('period');
-    }.property('end', 'period')
-  }),
-  nowLabel: null,
-  rangeLabel: null,
-  buildSlider: function() {
-    if (this.get('chosenPreset')) {
-      var sliderOptions = this.sliderOptions.create({
-        end: function() {
-          var endDate = new Date();
-          return endDate.getTime();
-        }.property(),
-        period: this.get('chosenPreset.period'),
-        step: this.get('chosenPreset.step'),
-        countTimeAgo: function(stepValue) {
-          var msAgo = this.get('end') - stepValue;
-          return msAgo.toDaysHoursMinutes();
-        }
-      });
-      this.set('nowLabel', 'Now');
-      this.set('rangeLabel', new Date(sliderOptions.get('start')));
-
-      var self = this;
-      $('#slider').slider({
-        range: "max",
-        min: sliderOptions.get('start'),
-        max: sliderOptions.get('end'),
-        value: sliderOptions.get('start'),
-        step: sliderOptions.get('step'),
-        stop: function(event, ui) {
-          self.set('rangeLabel', new Date(ui.value));
-//          self.set('rangeLabel', sliderOptions.countTimeAgo(ui.value).h);
-        },
-        slide: function(event, ui){
-          self.set('rangeLabel', new Date(ui.value));
-//          self.set('rangeLabel', sliderOptions.countTimeAgo(ui.value).h);
-        }
-      });
-    } else {
-      console.log(this.get('chosenPreset'));
-      $("#slider").slider("destroy");
-    }
-  }.observes('chosenPreset')
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/installer.js b/branch-1.2/ambari-web/app/views/installer.js
deleted file mode 100644
index 595dd35..0000000
--- a/branch-1.2/ambari-web/app/views/installer.js
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.InstallerView = Em.View.extend({
-
-  templateName: require('templates/installer'),
-
-  isStep1Disabled: function () {
-    return this.get('controller.isStepDisabled').findProperty('step',1).get('value');
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep2Disabled: function () {
-    return this.get('controller.isStepDisabled').findProperty('step',2).get('value');
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep3Disabled: function () {
-    return this.get('controller.isStepDisabled').findProperty('step',3).get('value');
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep4Disabled: function () {
-    return this.get('controller.isStepDisabled').findProperty('step',4).get('value');
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep5Disabled: function () {
-    return this.get('controller.isStepDisabled').findProperty('step',5).get('value');
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep6Disabled: function () {
-    return this.get('controller.isStepDisabled').findProperty('step',6).get('value');
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep7Disabled: function () {
-    return this.get('controller.isStepDisabled').findProperty('step',7).get('value');
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep8Disabled: function () {
-    return this.get('controller.isStepDisabled').findProperty('step',8).get('value');
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep9Disabled: function () {
-    return this.get('controller.isStepDisabled').findProperty('step',9).get('value');
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep10Disabled: function () {
-    return this.get('controller.isStepDisabled').findProperty('step',10).get('value');
-  }.property('controller.isStepDisabled.@each.value').cacheable()
-
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/loading.js b/branch-1.2/ambari-web/app/views/loading.js
deleted file mode 100644
index a8fd389..0000000
--- a/branch-1.2/ambari-web/app/views/loading.js
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.LoadingView = Em.View.extend({
-    tagName: 'h2',
-    template: Ember.Handlebars.compile('Loading...')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/login.js b/branch-1.2/ambari-web/app/views/login.js
deleted file mode 100644
index 3c3c9fd..0000000
--- a/branch-1.2/ambari-web/app/views/login.js
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.LoginView = Em.View.extend({
-
-  templateName: require('templates/login'),
-  loginTextField: Em.TextField.extend({
-    didInsertElement: function(){
-      this._super();
-      this.$().focus();
-    }
-  }),
-  passTextField : Em.TextField.extend({
-    insertNewline: function(){
-      this.get("controller").submit();
-    }
-  })
-
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main.js b/branch-1.2/ambari-web/app/views/main.js
deleted file mode 100644
index 0e0da78..0000000
--- a/branch-1.2/ambari-web/app/views/main.js
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.MainView = Em.View.extend({
-  templateName:require('templates/main')
-});
-
-App.MainBackgroundOperation = Em.View.extend({
-  content: null,
-  classNames: ['background-operations'],
-  classNameBindings: ['isOpen'],
-  isOpen: function () {
-    return this.get('content.isOpen');
-  }.property('content.isOpen'),
-  iconClass: function(){
-    return this.get('isOpen') ? 'icon-minus' : 'icon-plus';
-  }.property('isOpen'),
-  showOperationLog:function(){
-    this.set('content.isOpen', !this.get('content.isOpen'));
-    this.set('isTextArea', false);
-  },
-  hasProgressBar: function () {
-    return this.get('content.command') == 'EXECUTE';
-  }.property('content.command'),
-  isInProgress: function () {
-    var status = this.get('content.status');
-    return status == 'IN_PROGRESS' || status == 'QUEUED' || status == 'PENDING';
-  }.property('content.status'),
-  barColor: function () {
-    if (this.get('isInProgress')) {
-      return 'progress-info';
-    } else {
-      if (this.get('content.status') == 'COMPLETED') return 'progress-success';
-      return 'progress-danger';
-    }
-  }.property('isInProgress'),
-  buttonLabel:function(){
-    var button = $(this.get('element')).find('.textTrigger');
-    if(this.get('isTextArea')){
-      button.text('press CTRL+C');
-    } else {
-      button.text('click to highlight');
-    }
-  }.observes('isTextArea'),
-  didInsertElement: function () {
-    var self = this;
-    var button = $(this.get('element')).find('.textTrigger');
-    button.click(function () {
-      self.set('isTextArea', !self.get('isTextArea'));
-    });
-    $(this.get('element')).find('.content-area').mouseenter(
-      function () {
-        var element = $(this);
-        element.css('border', '1px solid #dcdcdc');
-        button.css('visibility', 'visible');
-      }).mouseleave(
-      function () {
-        var element = $(this);
-        element.css('border', 'none');
-        button.css('visibility', 'hidden');
-      })
-  },
-  isTextArea: false,
-  textArea: Em.TextArea.extend({
-    didInsertElement: function(){
-      var element = $(this.get('element'));
-      element.width($(this.get('parentView').get('element')).width() - 10);
-      element.height($(this.get('parentView').get('element')).height());
-      element.select();
-      element.css('resize', 'none');
-    },
-    readOnly: true,
-    value: function(){
-      var operation = this.get('content');
-      var content = "";
-      content += operation.command + " " + operation.role + " on " + operation.host_name + "\n";
-      content += "exitcode: " + operation.exit_code + "\n";
-      content += "stderr: " + operation.stderr + "\n";
-      content += "stdout: " + operation.stdout + "\n";
-      return content;
-    }.property('content')
-  })
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/admin.js b/branch-1.2/ambari-web/app/views/main/admin.js
deleted file mode 100644
index 3ebb139..0000000
--- a/branch-1.2/ambari-web/app/views/main/admin.js
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAdminView = Em.View.extend({
-    templateName: require('templates/main/admin')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/admin/advanced.js b/branch-1.2/ambari-web/app/views/main/admin/advanced.js
deleted file mode 100644
index 8e0dcf2..0000000
--- a/branch-1.2/ambari-web/app/views/main/admin/advanced.js
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAdminAdvancedView = Em.View.extend({
-  templateName: require('templates/main/admin/advanced'),
-  params: {keepData: 1}
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/admin/advanced/password.js b/branch-1.2/ambari-web/app/views/main/admin/advanced/password.js
deleted file mode 100644
index f15a8b4..0000000
--- a/branch-1.2/ambari-web/app/views/main/admin/advanced/password.js
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAdminAdvancedPasswordView = App.Form.extend({
-  fieldsOptions:[
-    { name:"password", displayName:"Password", displayType:"password", disableRequiredOnExistent:true },
-    { name:"passwordRetype", displayName:"Retype Password", displayType:"passwordRetype", disableRequiredOnExistent:true }
-  ],
-  fields: []
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/admin/audit.js b/branch-1.2/ambari-web/app/views/main/admin/audit.js
deleted file mode 100644
index d9c40d8..0000000
--- a/branch-1.2/ambari-web/app/views/main/admin/audit.js
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-require('views/common/grid');
-
-App.MainAdminAuditView = App.Grid.extend({
-  _collection: {className: App.ServiceAudit},
-  prepareColumns:function () {
-    this._super();
-
-    this.addColumn({
-      name:"date",
-      label:Em.I18n.t("admin.audit.grid.date")
-    });
-    this.addColumn({
-      name:"service.label",
-      label:Em.I18n.t("admin.audit.grid.service")
-    });
-    this.addColumn({
-      name:"operationName",
-      label:Em.I18n.t("admin.audit.grid.operationName")
-    });
-    this.addColumn({
-      name:"user.userName",
-      label:Em.I18n.t("admin.audit.grid.performedBy")
-    });
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/admin/authentication.js b/branch-1.2/ambari-web/app/views/main/admin/authentication.js
deleted file mode 100644
index c0a1d1e..0000000
--- a/branch-1.2/ambari-web/app/views/main/admin/authentication.js
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAdminAuthenticationView = Em.View.extend({
-  templateName:require('templates/main/admin/authentication'),
-  form:App.AuthenticationForm.create({}),
-  pushAuthenticationToForm:function () {
-    var auth = App.Authentication.find(1);
-    App.router.set('mainAdminAuthenticationController.content', auth);
-    this.form.set('object', auth);
-  },
-  didInsertElement: function (){
-    this._super();
-    this.pushAuthenticationToForm();
-  },
-
-  ldapChecked:function () {
-    return this.get('form.field.method.value');
-  }.property('form.field.method.value'),
-
-  useCredentials:function () {
-    return this.get('form.field.bindMethod.value');
-  }.property('form.field.bindMethod.value')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/admin/menu.js b/branch-1.2/ambari-web/app/views/main/admin/menu.js
deleted file mode 100644
index 1994ba9..0000000
--- a/branch-1.2/ambari-web/app/views/main/admin/menu.js
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAdminMenuView = Em.CollectionView.extend({
-  content:[
-    {
-      route:'user',
-      label:'Users'
-    }/*,
-    {
-      route:'authentication',
-      label:'Authentication'
-    },
-    {
-      route:'security',
-      label:'Security'
-    },
-    {
-      route:'audit',
-      label:'Audit'
-    }*/
-    /*,
-    {
-      route:'advanced',
-      label:'Advanced'
-    }
-    */
-  ],
-  tagName: "ul",
-  classNames: ["nav", "nav-list"],
-
-  init: function(){
-    this._super();
-    this.activateView(); // default selected menu
-  },
-
-  activateView:function () {
-    var route = App.get('router.mainAdminController.category');
-    $.each(this._childViews, function () {
-      this.set('active', (this.get('content.route') == route ? "active" : ""));
-    });
-  }.observes('App.router.mainAdminController.category'),
-
-  itemViewClass:Em.View.extend({
-    classNameBindings:["active"],
-    active:"",
-    template:Ember.Handlebars.compile('<a {{action adminNavigate view.content.route }} href="#"> {{unbound view.content.label}}</a>')
-  })
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/admin/user.js b/branch-1.2/ambari-web/app/views/main/admin/user.js
deleted file mode 100644
index 5a7b9ae..0000000
--- a/branch-1.2/ambari-web/app/views/main/admin/user.js
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAdminUserView = Em.View.extend({
-  templateName: require('templates/main/admin/user'),
-  users: App.User.find(),
-  delete: function(event, context){
-    console.log("EVENT:");
-    console.log(event);
-
-    console.log("CONTEXT:");
-    console.log(context);
-  },
-  ldapUser: function(){
-    var auth = App.router.get('mainAdminAuthenticationController.content');
-    return auth.get('method');
-  }.property('App.router.mainAdminAuthenticationController.content')
-
-
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/admin/user/create.js b/branch-1.2/ambari-web/app/views/main/admin/user/create.js
deleted file mode 100644
index 16e6810..0000000
--- a/branch-1.2/ambari-web/app/views/main/admin/user/create.js
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAdminUserCreateView = Em.View.extend({
-  templateName: require('templates/main/admin/user/create'),
-  userId: false,
-  create: function(event){
-    var parent_controller=this.get("controller").controllers.mainAdminUserController;
-    var form = this.get("userForm");
-    if(form.isValid()) {
-      form.getField("userName").set('value', form.getField("userName").get('value').toLowerCase());
-      if(form.getField("admin").get('value') === "" || form.getField("admin").get('value') == true) {
-        form.getField("roles").set("value","admin,user");
-        form.getField("admin").set("value","true");
-      } else{
-        form.getField("roles").set("value","user");
-      }
-      parent_controller.sendCommandToServer('/users/' + form.getField("userName").get('value'), "POST" , {
-        Users: {
-          password: form.getField("password").get('value'),
-          roles: form.getField("roles").get('value')
-        }
-      }, function (success) {
-
-        if (!success) {
-          App.ModalPopup.show({
-            header: Em.I18n.t('admin.users.addButton'),
-            body: Em.I18n.t('admin.users.createError'),
-            primary: 'Ok',
-            secondary: null,
-            onPrimary: function() {
-              this.hide();
-            }
-          });
-          return;
-        }
-        App.ModalPopup.show({
-          header: Em.I18n.t('admin.users.addButton'),
-          body: Em.I18n.t('admin.users.createSuccess'),
-          primary: 'Ok',
-          secondary: null,
-          onPrimary: function() {
-            this.hide();
-          }
-        });
-        form.save();
-
-        App.router.transitionTo("allUsers");
-      })
-    }
-  },
-
-  userForm: App.CreateUserForm.create({}),
-
-  didInsertElement: function(){
-    this.get('userForm').propertyDidChange('object');
-  }
-});
diff --git a/branch-1.2/ambari-web/app/views/main/admin/user/edit.js b/branch-1.2/ambari-web/app/views/main/admin/user/edit.js
deleted file mode 100644
index 4a807a9..0000000
--- a/branch-1.2/ambari-web/app/views/main/admin/user/edit.js
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAdminUserEditView = Em.View.extend({
-  templateName: require('templates/main/admin/user/edit'),
-  userId: false,
-  edit: function(event){
-    var parent_controller=this.get("controller").controllers.mainAdminUserController;
-    var form = this.get("userForm");
-    if(form.isValid()) {
-      var Users={};
-      if(form.getField("admin").get('value') === "" || form.getField("admin").get('value') == true) {
-        form.getField("roles").set("value","admin,user");
-        form.getField("admin").set("value", true);
-      } else{
-        form.getField("roles").set("value","user");
-      }
-
-      Users.roles = form.getField("roles").get('value');
-
-      if(form.getField("new_password").get('value') != "" && form.getField("old_password").get('value') != "") {
-        Users.password = form.getField("new_password").get('value');
-        Users.old_password = form.getField("old_password").get('value');
-      }
-
-      parent_controller.sendCommandToServer('/users/' + form.getField("userName").get('value'), "PUT" , {
-       Users:Users
-      }, function (success, message) {
-        if (!success) {
-          App.ModalPopup.show({
-            header: Em.I18n.t('admin.users.editButton'),
-            body: message,
-            primary: 'Ok',
-            secondary: null,
-            onPrimary: function() {
-              this.hide();
-            }
-          });
-          return;
-        }
-
-        form.save();
-
-        App.router.transitionTo("allUsers");
-      })
-    }
-  },
-
-  userForm: App.EditUserForm.create({}),
-
-  didInsertElement: function() {
-    var form = this.get('userForm');
-    if(form.getField("isLdap").get("value")) {
-      form.getField("old_password").set("disabled",true);
-      form.getField("new_password").set("disabled",true);
-    }
-    else {
-      form.getField("old_password").set("disabled",false);
-      form.getField("new_password").set("disabled",false);
-    }
-    form.propertyDidChange('object');
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/admin/user/row.js b/branch-1.2/ambari-web/app/views/main/admin/user/row.js
deleted file mode 100644
index db4c8b3..0000000
--- a/branch-1.2/ambari-web/app/views/main/admin/user/row.js
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAdminUserRowView = Em.View.extend({
-  templateName: require('templates/main/admin/user/row'),
-  disableCheckBoxes: "disabled"
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/apps/item/bar_view.js b/branch-1.2/ambari-web/app/views/main/apps/item/bar_view.js
deleted file mode 100644
index 9621332..0000000
--- a/branch-1.2/ambari-web/app/views/main/apps/item/bar_view.js
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-var graph = require('utils/graph');
-
-App.MainAppsItemBarView = Em.View.extend({
-  elementId:'bars',
-  templateName:require('templates/main/apps/item/bar'),
-  width:300,
-  height:210,
-  /**
-   * Jobs list. Sorted by job id
-   */
-  content:function () {
-    return this.get('controller.content.jobs').sort(function(a, b) {
-      var jobIdA = a.get('id').toLowerCase(), jobIdB = b.get('id').toLowerCase();
-      if (jobIdA < jobIdB)
-        return -1;
-      if (jobIdA > jobIdB)
-        return 1;
-      return 0;
-    });
-  }.property('controller.content.jobs'),
-  firstJob:function () {
-    return this.get('content').get('firstObject');
-  }.property('content'),
-  activeJob:null,
-  selectJob:function (event) {
-    this.set('activeJob', event.context);
-
-  },
-  onLoad:function () {
-    if (!this.get('controller.content.loadAllJobs') || this.get('activeJob')) {
-      return;
-    }
-
-    this.set('activeJob', this.get('firstJob'));
-  }.observes('controller.content.loadAllJobs'),
-  didInsertElement:function () {
-    this.onLoad();
-  },
-  draw:function () {
-    var self = this;
-    if (!this.get('activeJob')) {
-      return;//when job is not defined
-    }
-
-    var desc1 = $('#graph1_desc');
-    var desc2 = $('#graph2_desc');
-    $('.rickshaw_graph, .rickshaw_legend, .rickshaw_annotation_timeline').html('');
-    if (null == desc1.html() || null == desc2.html()) return;
-    desc1.css('display', 'block');
-    desc2.css('display', 'block');
-
-    this.propertyDidChange('getChartData');
-
-  }.observes('activeJob'),
-
-  map:false,
-  shuffle:false,
-  reduce:false,
-
-  mapNodeLocal:false,
-  mapRackLocal:false,
-  mapOffSwitch:false,
-  reduceOffSwitch:false,
-  submit:false,
-  finish:false,
-
-  updateTimeLine:function () {
-    var url = App.testMode ? '/data/apps/jobs/timeline.json' : App.apiPrefix + "/jobhistory/task?jobId=" + this.get('activeJob').get('id') + 
-      "&width=" + this.get('width');
-    var mapper = App.jobTimeLineMapper;
-    mapper.set('model', this);
-    App.HttpClient.get(url, mapper,{
-      complete:function(jqXHR, textStatus) {
-        console.log("updateTimeLine");
-      }
-    });
-  }.observes('getChartData'),
-
-  updateTasksView:function () {
-    var url = App.testMode ? '/data/apps/jobs/taskview.json' : App.apiPrefix + "/jobhistory/tasklocality?jobId=" + this.get('activeJob').get('id');
-    var mapper = App.jobTasksMapper;
-    mapper.set('model', this);
-    App.HttpClient.get(url, mapper,{
-      complete:function(jqXHR, textStatus) {
-        console.log("updateTasksView");
-      }
-    });
-  }.observes('getChartData'),
-
-  drawJobTimeline:function () {
-    var map = JSON.stringify(this.get('map'));
-    var shuffle = JSON.stringify(this.get('shuffle'));
-    var reduce = JSON.stringify(this.get('reduce'));
-    if (!this.get('map') || !this.get('shuffle') || !this.get('reduce')) {return;}
-    $('#chart, #legend, #timeline1, #y-axis').html('');
-    graph.drawJobTimeLine(map, shuffle, reduce, this.get('width'), this.get('height'), '#chart', 'legend', 'timeline1');
-  }.observes('map', 'shuffle', 'reduce'),
-
-  drawJobTasks:function () {
-    var mapNodeLocal = JSON.stringify(this.get('mapNodeLocal'));
-    var mapRackLocal = JSON.stringify(this.get('mapRackLocal'));
-    var mapOffSwitch = JSON.stringify(this.get('mapOffSwitch'));
-    var reduceOffSwitch = JSON.stringify(this.get('reduceOffSwitch'));
-    if (!this.get('mapNodeLocal') || !this.get('mapRackLocal') || !this.get('mapOffSwitch') || !this.get('reduceOffSwitch')) {return;}
-    $('#job_tasks, #tasks_legend, #timeline2, #y-axis2').html('');
-    graph.drawJobTasks(mapNodeLocal, mapRackLocal, mapOffSwitch, reduceOffSwitch, this.get('submit'), this.get('width'), this.get('height'), '#job_tasks', 'tasks_legend', 'timeline2');
-  }.observes('mapNodeLocal', 'mapRackLocal', 'mapOffSwitch', 'reduceOffSwitch', 'submit')
-
-});
diff --git a/branch-1.2/ambari-web/app/views/main/apps/item/dag_view.js b/branch-1.2/ambari-web/app/views/main/apps/item/dag_view.js
deleted file mode 100644
index b8bd117..0000000
--- a/branch-1.2/ambari-web/app/views/main/apps/item/dag_view.js
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-var sort = require('views/common/sort_view');
-
-App.MainAppsItemDagView = Em.View.extend({
-  templateName: require('templates/main/apps/item/dag'),
-  elementId : 'jobs',
-  content:function(){
-    //if(this.get("controller.jobsLoaded") == true)
-   // {
-      return this.get('controller.content.jobs');
-  //  }
-      return this.get('controller.content.jobs');
-  //  }
-  }.property('controller.content.jobs'),
-
-  classNames:['table','dataTable'],
-  /**
-   * convert content to special jobs object for DagViewer
-   */
-  jobs: function(){
-    var c = this.get('content');
-    var result = [];
-    c.forEach(function(item, index){
-      result[index] = new Object({
-        'name' : item.get('id'),
-        'entityName' : item.get('workflow_entity_name'),
-        'status' : item.get('status') == 'SUCCESS',
-        'info' : [],
-        'input' : item.get('input'),
-        'output' : item.get('output'),
-        'submitTime' : item.get('submit_time'),
-        'elapsedTime' : item.get('elapsed_time')
-      })
-    });
-    return result;
-  }.property('content'),
-
-  loaded : false,
-
-  hasManyJobs: function(){
-    return (this.get('content') && this.get('content').length > 1);
-  }.property('content'),
-
-  onLoad:function (){
-    if(!this.get('controller.content.loadAllJobs') || this.get('loaded')){
-      return;
-    }
-
-    this.set('loaded', true);
-
-    var self = this;
-
-    Ember.run.next(function(){
-      self.draw();
-    });
-
-  }.observes('controller.content.loadAllJobs'),
-
-  resizeModal: function () {
-    var modal = $('.modal');
-    var body = $('body');
-    modal.find('.modal-body').first().css('max-height', 'none');
-    var modalHeight = modal.height() + 300;
-    var bodyHeight = body.height();
-    if (modalHeight > bodyHeight) {
-      modal.css('top', '20px');
-      $('.modal-body').height(bodyHeight - 180);
-    } else {
-      modal.css('top', (bodyHeight - modalHeight) / 2 + 'px');
-    }
-
-    var modalWidth = modal.width();
-    var bodyWidth = body.width();
-    if (modalWidth > bodyWidth) {
-      modal.css('left', '10px');
-      modal.width(bodyWidth - 20);
-    } else {
-      modal.css('left', (bodyWidth - modalWidth) / 2 + 'px');
-    }
-  },
-
-  didInsertElement: function(){
-    this.onLoad();
-  },
-
-  draw: function(){
-    var dagSchema = this.get('controller.content.workflowContext');
-    var jobs = this.get('jobs');
-    this.resizeModal();
-    var graph = new DagViewer('dag_viewer')
-        .setData(dagSchema, jobs)
-        .drawDag(this.$().width(), 300, 20);
-  },
-  sortView: sort.wrapperView,
-  nameSort: sort.fieldView.extend({
-    name:'workflow_entity_name',
-    displayName: Em.I18n.t('apps.item.dag.job')
-  }),
-  idSort: sort.fieldView.extend({
-    name:'id',
-    displayName: Em.I18n.t('apps.item.dag.jobId')
-  }),
-  statusSort: sort.fieldView.extend({
-    name:'status',
-    displayName: Em.I18n.t('apps.item.dag.status')
-  }),
-  mapsSort: sort.fieldView.extend({
-    name:'maps',
-    displayName: Em.I18n.t('apps.item.dag.maps')
-  }),
-  reducesSort: sort.fieldView.extend({
-    name:'reduces',
-    displayName: Em.I18n.t('apps.item.dag.reduces')
-  }),
-  inputSort: sort.fieldView.extend({
-    name:'input',
-    displayName: Em.I18n.t('apps.item.dag.input')
-  }),
-  outputSort: sort.fieldView.extend({
-    name:'output',
-    displayName: Em.I18n.t('apps.item.dag.output')
-  }),
-  durationSort: sort.fieldView.extend({
-    name:'elapsed_time',
-    displayName: Em.I18n.t('apps.item.dag.duration')
-  })
-});
diff --git a/branch-1.2/ambari-web/app/views/main/apps/item_view.js b/branch-1.2/ambari-web/app/views/main/apps/item_view.js
deleted file mode 100644
index 29300d1..0000000
--- a/branch-1.2/ambari-web/app/views/main/apps/item_view.js
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainAppsItemView = Em.View.extend({
-  tagName: 'tr',
-  classNames : ['containerRow'],
-  templateName:require('templates/main/apps/item'),
-  menuTabs:[
-    Em.Object.create({
-      label:Em.I18n.t('apps.dagCharts.popup.dag'),
-      active:'active',
-      content:'App.MainAppsItemDagView'
-    }),
-    Em.Object.create({
-      label:Em.I18n.t('apps.dagCharts.popup.tasks'),
-      active:'',
-      content:'App.MainAppsItemBarView'
-    })
-  ],
-  activeTab:null,
-  switchTab:function(event){
-    var tabs = this.get('menuTabs');
-    for(var i = 0; i < tabs.length; i++){
-      if(tabs[i] === event.context){
-        tabs[i].set('active', 'active');
-      }
-      else {
-        tabs[i].set('active', '');
-      }
-    }
-    this.set('activeTab', event.context);
-  },
-  didInsertElement: function(){
-    var tabs = this.get('menuTabs');
-    tabs[0].set('active', 'active');
-    tabs[1].set('active', '');
-    this.set('activeTab', tabs[0]);
-  },
-  containerView: Em.ContainerView.extend({
-    onchange:function(){
-
-      if(this.get('childViews').length){
-        this.get('childViews').get('firstObject').destroy();
-      }
-
-      var view = this.get('parentView.activeTab.content').split('.')[1];
-      view = App[view].create({
-        controllerBinding: 'App.router.mainAppsItemController'
-      });
-      this.get('childViews').pushObject(view);
-    }.observes('parentView.activeTab')
-  })
-});
diff --git a/branch-1.2/ambari-web/app/views/main/apps_view.js b/branch-1.2/ambari-web/app/views/main/apps_view.js
deleted file mode 100644
index 74eaa4e..0000000
--- a/branch-1.2/ambari-web/app/views/main/apps_view.js
+++ /dev/null
@@ -1,349 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-var filters = require('views/common/filter_view');
-
-App.MainAppsView = Em.View.extend({
-  templateName: require('templates/main/apps'),
-  paginationInfo: function() {
-    return this.t('apps.filters.paginationInfo').format(this.get('controller.paginationObject.startIndex'), this.get('controller.paginationObject.endIndex'), this.get('controller.paginationObject.iTotalDisplayRecords'));
-  }.property('controller.paginationObject.startIndex', 'controller.paginationObject.endIndex', 'controller.paginationObject.iTotalDisplayRecords'),
-  //Pagination left/right buttons css class
-  paginationLeft: Ember.View.extend({
-    tagName: 'a',
-    template: Ember.Handlebars.compile('<i class="icon-arrow-left"></i>'),
-    classNameBindings: ['class'],
-    class: "",
-    calculateClass: function () {
-      if (parseInt(this.get("controller.paginationObject.startIndex")) > 1) {
-        this.set("class", "paginate_previous");
-      } else {
-        this.set("class", "paginate_disabled_previous");
-      }
-    }.observes("controller.paginationObject"),
-    click: function (event) {
-      if (this.class == "paginate_previous") {
-        var startIndex = parseInt(this.get("controller.paginationObject.startIndex")) - 1;
-        var showRows = parseInt(this.get("controller.filterObject.iDisplayLength"));
-        var startDisplayValue = Math.max(0, startIndex - showRows);
-        this.set("controller.filterObject.iDisplayStart", startDisplayValue);
-      }
-    }
-  }),
-  paginationRight: Ember.View.extend({
-    tagName: 'a',
-    template: Ember.Handlebars.compile('<i class="icon-arrow-right"></i>'),
-    classNameBindings: ['class'],
-    class: "",
-    calculateClass: function () {
-      if ((parseInt(this.get("controller.paginationObject.endIndex")) + 1) < parseInt(this.get("controller.paginationObject.iTotalDisplayRecords"))) {
-        this.set("class", "paginate_next");
-      } else {
-        this.set("class", "paginate_disabled_next");
-      }
-    }.observes("controller.paginationObject"),
-    click: function (event) {
-      if (this.class == "paginate_next") {
-        var startDisplayValue = parseInt(this.get("controller.paginationObject.endIndex"));
-        this.set("controller.filterObject.iDisplayStart", startDisplayValue);
-      }
-    }
-  }),
-
-  /**
-   * If there are table rows with runs.
-   */
-  emptyData:true,
-
-  /*
-   If no runs to display set emptyData to true and reset Avg table data, else to set emptyData to false.
-   */
-  emptyDataObserver:function(){
-    //debugger;
-    if(this.get("controller.paginationObject.iTotalRecords") != null && this.get("controller.paginationObject.iTotalDisplayRecords")>0){
-      this.set("emptyData",false);
-    }else{
-      this.set("emptyData",true);
-      this.set("controller.serverData",null);
-    }
-  }.observes("controller.paginationObject.iTotalDisplayRecords","controller.paginationObject.iTotalRecords"),
-
-
-  /**
-   * View for RunPerPage select component
-   */
-  runPerPageSelectView: Em.Select.extend({
-    selected: '10',
-    content: ['10', '25', '50', '100']
-  }),
-
-  wrapSorting: Ember.View.extend({
-    tagName: 'tr'
-  }),
-
-  sortingColumns: Ember.View.extend({
-    tagName: 'th',
-    classNameBindings: ['class', 'widthClass'],
-    class: "sorting",
-    widthClass: "",
-    content: null,
-    defaultColumn: 8,
-
-    didInsertElement: function () {
-      this.set("widthClass", "col" + this.content.index);
-      if (this.content.index == this.defaultColumn) {
-        this.setControllerObj(this.content.index, "DESC");
-        this.set("class", "sorting_desc");
-      }
-    },
-    click: function (event) {
-      console.log(this.class);
-      if (this.class == "sorting") {
-        this.resetSortClass();
-        this.setControllerObj(this.content.index, "ASC");
-        this.set("class", "sorting_asc");
-      } else if (this.class == "sorting_asc") {
-        this.setControllerObj(this.content.index, "DESC");
-        this.set("class", "sorting_desc");
-      } else if (this.class == "sorting_desc") {
-        this.setControllerObj(this.content.index, "ASC");
-        this.set("class", "sorting_asc");
-      }
-    },
-    resetSortClass: function () {
-      this.get("parentView.childViews").map(function (a, e) {
-        a.get("childViews")[0].set("class", "sorting")
-      });
-    },
-    setControllerObj: function (col, dir) {
-      this.set("controller.filterObject.iSortCol_0", col);
-      this.set("controller.filterObject.sSortDir_0", dir);
-    }
-  }),
-
-  /**
-   * Filter-field for Search
-   */
-  appSearchThrough: Em.TextField.extend({
-    classNames: ['input-medium'],
-    type: 'text',
-    placeholder: 'Search'
-  }),
-  /**
-   * Filter-field for App ID.
-   * Based on <code>filters</code> library
-   */
-  appIdFilterView: filters.createTextView({
-    valueBinding: "controller.filterObject.sSearch_0"
-  }),
-  /**
-   * Filter-field for name.
-   * Based on <code>filters</code> library
-   */
-  nameFilterView: filters.createTextView({
-    valueBinding: "controller.filterObject.sSearch_1",
-    fieldType: 'input-small'
-  }),
-  /**
-   * Filter-field for type.
-   * Based on <code>filters</code> library
-   */
-  typeFilterView: filters.createSelectView({
-    fieldType: 'input-small',
-    valueBinding: "controller.filterObject.runType",
-    content: ['Any', 'Pig', 'Hive', 'MapReduce']
-  }),
-
-  /**
-   * Filter-list for User.
-   * Based on <code>filters</code> library
-   */
-  userFilterView: filters.createComponentView({
-    /**
-     * Inner FilterView. Used just to render component. Value bind to <code>mainview.value</code> property
-     * Base methods was implemented in <code>filters.componentFieldView</code>
-     */
-    filterView: filters.componentFieldView.extend({
-      templateName:require('templates/main/apps/user_filter'),
-
-      usersBinding: 'controller.users',
-
-      allComponentsChecked:false,
-      toggleAllComponents:function () {
-        var checked = this.get('allComponentsChecked');
-        this.get('users').setEach('checked', checked);
-      }.observes('allComponentsChecked'),
-
-      clearFilter:function() {
-        this.set('allComponentsChecked', false);
-
-        this.get('users').setEach('checked', false);
-
-        this._super();
-      },
-
-      applyFilter:function() {
-        this._super();
-
-        var chosenUsers = this.get('users').filterProperty('checked', true).mapProperty('name');
-        this.set('value', chosenUsers.toString());
-      }
-    }),
-
-    valueBinding: 'controller.filterObject.sSearch_3'
-  }),
-  /**
-   * Filter-field for jobs.
-   * Based on <code>filters</code> library
-   */
-  jobsFilterView: filters.createTextView({
-    fieldType: 'input-super-mini',
-    valueBinding: "controller.filterObject.jobs"
-  }),
-  /**
-   * Filter-field for Input.
-   * Based on <code>filters</code> library
-   */
-  inputFilterView: filters.createTextView({
-    fieldType: 'input-super-mini',
-    valueBinding: "controller.filterObject.input"
-  }),
-  /**
-   * Filter-field for Output.
-   * Based on <code>filters</code> library
-   */
-  outputFilterView: filters.createTextView({
-    fieldType: 'input-super-mini',
-    valueBinding: "controller.filterObject.output"
-  }),
-  /**
-   * Filter-field for Duration.
-   * Based on <code>filters</code> library
-   */
-  durationFilterView: filters.createTextView({
-    fieldType: 'input-super-mini',
-    valueBinding: "controller.filterObject.duration"
-  }),
-  /**
-   * Filter-field for RunDate.
-   * Based on <code>filters</code> library
-   */
-  runDateFilterView: filters.createSelectView({
-    fieldType: 'input-medium',
-    valueBinding: "controller.filterObject.runDate",
-    content: ['Any', 'Past 1 Day', 'Past 2 Days', 'Past 7 Days', 'Past 14 Days', 'Past 30 Days']
-  }),
-
-  /**
-   * Onclick handler for Show All/Filtered buttons
-   */
-  clickViewType: function (event) {
-    this.set("controller.filterObject.viewTypeClickEvent", true);
-    if ($(event.target).hasClass("filtered")) {
-      this.set("controller.filterObject.viewType", "filtered");
-    } else {
-      this.set("controller.filterObject.allFilterActivated", true);
-      this.set("controller.filterObject.viewType", "all");
-    }
-  },
-  /**
-   * Clears up last job ID when coming in fresh to page.
-   * Not doing this will result in failure to load job
-   * data, and subsequently the popup dialog.
-   */
-  didInsertElement: function(){
-    App.router.get('mainAppsItemController').set('lastJobId', null);
-  },
-  /**
-   *
-   */
-  onChangeViewType: function () {
-    var tmpViewType = this.get('controller.filterObject.viewType');
-    var filterButtons = $("#filter_buttons").children();
-    filterButtons.each(function (index, element) {
-      $(element).removeClass('selected');
-      if (tmpViewType == $(element).data('view-type')) {
-        $(element).addClass('selected');
-      }
-    });
-    this.set("controller.filterObject.viewTypeClickEvent", false);
-  }.observes("controller.filterObject.viewType"),
-
-  /**
-   * This Container View is used to render static table row(appTableRow) and additional dynamic content
-   */
-  containerRow: Em.ContainerView.extend({
-
-    /**
-     * Unique row id
-     */
-    id: function () {
-      return this.get('run.id');
-    }.property("run.id"),
-
-    /**
-     * Show/hide additional content appropriated for this row
-     */
-    expandToggle: function () {
-      //App.router.get('mainAppsItemController').set('jobsLoaded', false);
-      App.router.get('mainAppsItemController').set('content', this.get('run'));
-      App.ModalPopup.show({
-        classNames: ['big-modal'],
-        header: Em.I18n.t('apps.dagCharts.popup'),
-        bodyClass: App.MainAppsItemView.extend({
-          controllerBinding: 'App.router.mainAppsItemController'
-        }),
-        onPrimary: function () {
-          this.hide();
-        },
-        secondary: null
-      });
-    }
-  }),
-  /**
-   * Table-row view
-   */
-  appTableRow: Em.View.extend({
-    templateName: require('templates/main/apps/list_row'),
-    classNames: ['app-table-row'],
-    tagName: "tr",
-    onLoad: function() {
-      var run = this.get('parentView.run');
-      if (run.index) {
-        var strip = (run.index % 2) ? 'odd' : 'even';
-        this.$().addClass(strip);
-      }
-    }.observes('parentView.run'),
-
-    didInsertElement: function() {
-      this.onLoad();
-    },
-    mouseEnter: function (event, view) {
-      $(event.currentTarget).addClass("hover")
-    },
-    mouseLeave: function (event, view) {
-      $(event.currentTarget).removeClass("hover");
-    },
-    click: function (event, view) {
-      this.get('parentView').expandToggle();
-    }
-
-  })
-
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/charts.js b/branch-1.2/ambari-web/app/views/main/charts.js
deleted file mode 100644
index 95c24b5..0000000
--- a/branch-1.2/ambari-web/app/views/main/charts.js
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainChartsView = Em.View.extend({
-    templateName: require('templates/main/charts')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/charts/heatmap.js b/branch-1.2/ambari-web/app/views/main/charts/heatmap.js
deleted file mode 100644
index a25ad49..0000000
--- a/branch-1.2/ambari-web/app/views/main/charts/heatmap.js
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-App.MainChartsHeatmapView = Em.View.extend({
-  templateName: require('templates/main/charts/heatmap'),
-  spinner: null,
-  didInsertElement: function () {
-    this._super();
-    // set default metric
-    this.set('controller.selectedMetric', this.get('controller.allMetrics')[0].get('items')[0]);
-    $("#heatmapDetailsBlock").hide();
-  },
-  showLoading: function () {
-    if (this.get('controller.selectedMetric.loading')) {
-      var e = document.getElementById("heatmap-metric-loading");
-      if (e) {
-        var spinOpts = {
-          lines: 9,
-          length: 4,
-          width: 2,
-          radius: 3,
-          top: '0',
-          left: '0'
-        };
-        this.set('spinner', new Spinner(spinOpts).spin(e));
-      }
-    } else {
-      var spinner = this.get('spinner');
-      if (spinner) {
-        spinner.stop();
-      }
-      this.set('spinner', null);
-    }
-  }.observes('controller.selectedMetric.loading')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/charts/heatmap/heatmap_host.js b/branch-1.2/ambari-web/app/views/main/charts/heatmap/heatmap_host.js
deleted file mode 100644
index b926fad..0000000
--- a/branch-1.2/ambari-web/app/views/main/charts/heatmap/heatmap_host.js
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var date = require('utils/date');
-
-var App = require('app');
-
-App.MainChartsHeatmapHostView = Em.View.extend({
-  templateName: require('templates/main/charts/heatmap/heatmap_host'),
-  /** @private */
-  hostClass: 'hostBlock',
-
-  /**
-   * show Host details block and move it near the cursor
-   * 
-   * @param {Object}
-   *          e
-   * @this App.MainChartsHeatmapHostView
-   */
-  mouseEnter: function (e) {
-    var host = this.get('content');
-    var view = App.MainChartsHeatmapHostDetailView.create();
-    $.each(view.get('details'), function(i){
-      var val = host.get(i);
-      if (i == 'diskUsage') {
-        if (val == undefined || isNaN(val) || val == Infinity || val == -Infinity) {
-          val = null;
-        } else {
-          val = val.toFixed(1);
-        }
-      } else if (i == 'cpuUsage') {
-        if (val == undefined || isNaN(val)) {
-          val = null;
-        } else {
-          val = val.toFixed(1);
-        }
-      } else if (i == 'memoryUsage') {
-        if (val == undefined || isNaN(val) || val == Infinity || val == -Infinity) {
-          val = null;
-        } else {
-          val = val.toFixed(1);
-        }
-      } else if (i == 'hostComponents') {
-        if (val == undefined) {
-          val = null;
-        } else {
-          val = val.filterProperty('isMaster').concat(val.filterProperty('isSlave')).mapProperty('displayName').join(', ');
-        }
-      }
-      view.set('details.' + i, val);
-    });
-    var selectedMetric = this.get('controller.selectedMetric');
-    if (selectedMetric) {
-      var metricName = selectedMetric.get('name');
-      var h2vMap = selectedMetric.get('hostToValueMap');
-      if (h2vMap && metricName) {
-        var value = h2vMap[host.get('hostName')];
-        if (value == undefined || value == null) {
-          value = this.t('charts.heatmap.unknown');
-        } else {
-          if (metricName == 'Garbage Collection Time') {
-            value = date.timingFormat(parseInt(value));
-          } else {
-            if (isNaN(value)) {
-              value = this.t('charts.heatmap.unknown');
-            } else {
-              value = value + selectedMetric.get('units');
-            }
-          }
-        }
-        view.set('details.metricName', metricName);
-        view.set('details.metricValue', value);
-      }
-    }
-    var detailsBlock = $("#heatmapDetailsBlock");
-    detailsBlock.css('top', e.pageY + 10);
-    detailsBlock.css('left', e.pageX + 10);
-    detailsBlock.show();
-  },
-
-  /**
-   * hide Host details block
-   * 
-   * @param {Object}
-   *          e
-   * @this App.MainChartsHeatmapHostView
-   */
-  mouseLeave: function (e) {
-    $("#heatmapDetailsBlock").hide();
-  },
-
-  hostTemperatureStyle: function () {
-    var controller = this.get('controller');
-    var h2sMap = controller.get('hostToSlotMap');
-    if (h2sMap) {
-      var hostname = this.get('content.hostName');
-      if (hostname) {
-        var slot = h2sMap[hostname];
-        if (slot > -1) {
-          var slotDefs = controller.get('selectedMetric.slotDefinitions');
-          if (slotDefs && slotDefs.length > slot) {
-            return slotDefs[slot].get('cssStyle');
-          }
-        }
-      }
-    }
-    return '';
-  }.property('controller.hostToSlotMap')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/charts/heatmap/heatmap_host_detail.js b/branch-1.2/ambari-web/app/views/main/charts/heatmap/heatmap_host_detail.js
deleted file mode 100644
index db2fc41..0000000
--- a/branch-1.2/ambari-web/app/views/main/charts/heatmap/heatmap_host_detail.js
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainChartsHeatmapHostDetailView = Em.View.extend({
-  templateName: require('templates/main/charts/heatmap/heatmap_host_detail'),
-  /** @private */ classNames:['heatmap_host_details'],
-  /** @private */ elementId:'heatmapDetailsBlock',
-  /** @private */ details:{
-    hostName:'test node',
-    publicHostName:'test node',
-    osType: 'OS',
-    ip: '192.168.0.0',
-    metricName: 'metric-name',
-    metricValue: 'metric-value',
-    diskUsage: '10',
-    cpuUsage: '10',
-    memoryUsage: '10',
-    hostComponents: 'host components'
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/charts/heatmap/heatmap_rack.js b/branch-1.2/ambari-web/app/views/main/charts/heatmap/heatmap_rack.js
deleted file mode 100644
index 9783643..0000000
--- a/branch-1.2/ambari-web/app/views/main/charts/heatmap/heatmap_rack.js
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainChartsHeatmapRackView = Em.View.extend({
-  templateName: require('templates/main/charts/heatmap/heatmap_rack'),
-  classNames: ['rack'],
-  classNameBindings: ['visualSchema'],
-  /**
-   * this is the class for binding attribute to hosts block
-   * @private
-   */
-  hostsBlockClass:"hosts clearfix",
-  heatmapIsOpened: 1,
-
-  /** rack status block class */
-  statusIndicator:'statusIndicator',
-
-  toggleRack: function(){
-    var newHeatmapStatus = 1 - this.get('heatmapIsOpened');
-    this.set('heatmapIsOpened', newHeatmapStatus);
-  },
-
-  /**
-   * change toggler class, depends on heatmapIsOpened property
-   * @this App.MainChartsHeatmapRackView
-   */
-  heatmapTogglerClass:function () {
-    if (this.heatmapIsOpened) {
-      return "isActive"
-    }
-    return "";
-  }.property("heatmapIsOpened"),
-  
-  /**
-   * Provides the CSS style for an individual host.
-   * This can be used as the 'style' attribute of element.
-   */
-  hostCssStyle: function(){
-    var rack = this.get('rack');
-    var widthPercent = 100;
-    var hostCount = rack.get('hosts.length');
-    if(hostCount<11){
-      widthPercent = (100/hostCount)-0.5;
-    }else{
-      widthPercent = 10; // max out at 10%
-    }
-    return "width:"+widthPercent+"%;float:left;";
-  }.property('rack')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/charts/menu.js b/branch-1.2/ambari-web/app/views/main/charts/menu.js
deleted file mode 100644
index b60b2dd..0000000
--- a/branch-1.2/ambari-web/app/views/main/charts/menu.js
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainChartsMenuView = Em.CollectionView.extend({
-  tagName: 'ul',
-  classNames: ["nav", "nav-tabs"],
-  content:[
-    /*{ label:'Heatmap', routing:'heatmap', active:"active"},
-    { label:'Horizon Chart', routing:'horizon_chart'}*/
-  ],
-
-  init: function(){ this._super(); this.activateView(); },
-
-  activateView:function () {
-    $.each(this._childViews, function () {
-      this.set('active', (this.get('content.routing') == 'heatmap' ? "active" : ""));
-    });
-  },
-
-  deactivateChildViews: function() {
-    $.each(this._childViews, function(){
-      this.set('active', "");
-    });
-  },
-
-  itemViewClass: Em.View.extend({
-    classNameBindings: ["active"],
-    active: "",
-    template: Ember.Handlebars.compile('<a {{action showChart view.content.routing }} href="#"> {{unbound view.content.label}}</a>')
-  })
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/dashboard.js b/branch-1.2/ambari-web/app/views/main/dashboard.js
deleted file mode 100644
index f0ed1ef..0000000
--- a/branch-1.2/ambari-web/app/views/main/dashboard.js
+++ /dev/null
@@ -1,143 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainDashboardView = Em.View.extend({
-  templateName:require('templates/main/dashboard'),
-  didInsertElement:function () {
-    this.services();
-  },
-  content:[],
-  updateServices: function(){
-    var services = App.Service.find();
-    services.forEach(function (item) {
-      var view;
-      switch (item.get('serviceName')) {
-        case "HDFS":
-          view = this.get('content').filterProperty('viewName', App.MainDashboardServiceHdfsView);
-          view.objectAt(0).set('model', App.HDFSService.find(item.get('id')));
-          break;
-        case "MAPREDUCE":
-          view = this.get('content').filterProperty('viewName', App.MainDashboardServiceMapreduceView);
-          view.objectAt(0).set('model', App.MapReduceService.find(item.get('id')));
-          break;
-        case "HBASE":
-          view = this.get('content').filterProperty('viewName', App.MainDashboardServiceHbaseView);
-          view.objectAt(0).set('model', App.HBaseService.find(item.get('id')));
-      }
-    }, this);
-  }.observes('App.router.updateController.isUpdate'),
-  services: function () {
-    var services = App.Service.find();
-    if (this.get('content').length > 0) {
-      return false
-    }
-    services.forEach(function (item) {
-      var vName;
-      var item2;
-      switch (item.get('serviceName')) {
-        case "HDFS":
-          vName = App.MainDashboardServiceHdfsView;
-          item2 = App.HDFSService.find(item.get('id'));
-          break;
-        case "MAPREDUCE":
-          vName = App.MainDashboardServiceMapreduceView;
-          item2 = App.MapReduceService.find(item.get('id'));
-          break;
-        case "HBASE":
-          vName = App.MainDashboardServiceHbaseView;
-          item2 = App.HBaseService.find(item.get('id'));
-          break;
-        case "HIVE":
-          vName = App.MainDashboardServiceHiveView;
-          break;
-        case "ZOOKEEPER":
-          vName = App.MainDashboardServiceZookeperView;
-          break;
-        case "OOZIE":
-          vName = App.MainDashboardServiceOozieView;
-          break;
-        default:
-          vName = Em.View;
-      }
-      this.get('content').pushObject(Em.Object.create({
-        viewName: vName,
-        model: item2 || item
-      }))
-    }, this);
-  },
-
-  gangliaUrl: function () {
-    return App.router.get('clusterController.gangliaUrl') + "/?r=hour&cs=&ce=&m=&s=by+name&c=HDPSlaves&tab=m&vn=";
-  }.property('App.router.clusterController.gangliaUrl'),
-
-  showAlertsPopup: function (event) {
-    App.ModalPopup.show({
-      header: this.t('services.alerts.headingOfList'),
-      bodyClass: Ember.View.extend({
-        service: event.context,
-        warnAlerts: function () {
-          var allAlerts = App.router.get('clusterController.alerts');
-          var serviceId = this.get('service.serviceName');
-          if (serviceId) {
-            return allAlerts.filterProperty('serviceType', serviceId).filterProperty('isOk', false).filterProperty('ignoredForServices', false);
-          }
-          return 0;
-        }.property('App.router.clusterController.alerts'),
-
-        warnAlertsCount: function () {
-          return this.get('warnAlerts').length;
-        }.property('warnAlerts'),
-
-        warnAlertsMessage: function() {
-          return Em.I18n.t('services.alerts.head').format(this.get('warnAlertsCount'));
-        }.property('warnAlertsCount'),
-
-        nagiosUrl: function () {
-          return App.router.get('clusterController.nagiosUrl');
-        }.property('App.router.clusterController.nagiosUrl'),
-
-        closePopup: function () {
-          this.get('parentView').hide();
-        },
-
-        viewNagiosUrl: function () {
-          window.open(this.get('nagiosUrl'), "_blank");
-          this.closePopup();
-        },
-
-        selectService: function () {
-          App.router.transitionTo('services.service.summary', event.context)
-          this.closePopup();
-        },
-        templateName: require('templates/main/dashboard/alert_notification_popup')
-      }),
-      primary: 'Close',
-      onPrimary: function() {
-        this.hide();
-      },
-      secondary : null,
-      didInsertElement: function () {
-        this.$().find('.modal-footer').addClass('align-center');
-        this.$().children('.modal').css({'margin-top': '-350px'});
-      }
-    });
-    event.stopPropagation();
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/dashboard/cluster_metrics/cpu.js b/branch-1.2/ambari-web/app/views/main/dashboard/cluster_metrics/cpu.js
deleted file mode 100644
index 6a5a2b3..0000000
--- a/branch-1.2/ambari-web/app/views/main/dashboard/cluster_metrics/cpu.js
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartClusterMetricsCPU = App.ChartLinearTimeView.extend({
-  id: "cluster-metrics-cpu",
-  sourceUrl: "?fields=metrics/cpu[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/cluster_metrics/cpu_1hr.json",
-
-  title: Em.I18n.t('dashboard.clusterMetrics.cpu'),
-  yAxisFormatter: App.ChartLinearTimeView.PercentageFormatter,
-  isTimePagingDisable: true,
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.cpu) {
-      var cpu_idle;
-      for ( var name in jsonData.metrics.cpu) {
-        var displayName = name;
-        var seriesData = jsonData.metrics.cpu[name];
-        if (seriesData) {
-          var s = this.transformData(seriesData, displayName);
-          if ('Idle' == s.name) {
-            cpu_idle = s;
-          }
-          else {
-            seriesArray.push(s);
-          }
-        }
-      }
-      seriesArray.push(cpu_idle);
-    }
-    return seriesArray;
-  },
-  
-  colorForSeries: function (series) {
-    if ("Idle" == series.name){
-      return '#CFECEC';
-    }
-    return null;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/dashboard/cluster_metrics/load.js b/branch-1.2/ambari-web/app/views/main/dashboard/cluster_metrics/load.js
deleted file mode 100644
index 678928b..0000000
--- a/branch-1.2/ambari-web/app/views/main/dashboard/cluster_metrics/load.js
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster load
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartClusterMetricsLoad = App.ChartLinearTimeView.extend({
-  id: "cluster-metrics-load",
-  sourceUrl: "?fields=metrics/load[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/cluster_metrics/load_1hr.json",
-  isTimePagingDisable: true,
-  renderer: 'line',
-  title: Em.I18n.t('dashboard.clusterMetrics.load'),
-  
-  transformToSeries: function(jsonData){
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.load) {
-      for ( var name in jsonData.metrics.load) {
-        var displayName = name;
-        var seriesData = jsonData.metrics.load[name];
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/dashboard/cluster_metrics/memory.js b/branch-1.2/ambari-web/app/views/main/dashboard/cluster_metrics/memory.js
deleted file mode 100644
index 5bc02f7..0000000
--- a/branch-1.2/ambari-web/app/views/main/dashboard/cluster_metrics/memory.js
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster memory metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartClusterMetricsMemory = App.ChartLinearTimeView.extend({
-  id: "cluster-metrics-memory",
-  sourceUrl: "?fields=metrics/memory[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/cluster_metrics/memory_1hr.json",
-  isTimePagingDisable: true,
-  title: Em.I18n.t('dashboard.clusterMetrics.memory'),
-  yAxisFormatter: App.ChartLinearTimeView.BytesFormatter,
-  renderer: 'line',
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.memory) {
-      for ( var name in jsonData.metrics.memory) {
-        var displayName = name;
-        var seriesData = jsonData.metrics.memory[name];
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/dashboard/cluster_metrics/network.js b/branch-1.2/ambari-web/app/views/main/dashboard/cluster_metrics/network.js
deleted file mode 100644
index fe09e24..0000000
--- a/branch-1.2/ambari-web/app/views/main/dashboard/cluster_metrics/network.js
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster network metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartClusterMetricsNetwork = App.ChartLinearTimeView.extend({
-  id: "cluster-metrics-network",
-  sourceUrl: "?fields=metrics/network[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/cluster_metrics/network_1hr.json",
-  isTimePagingDisable: true,
-  title: Em.I18n.t('dashboard.clusterMetrics.network'),
-  yAxisFormatter: App.ChartLinearTimeView.BytesFormatter,
-  renderer: 'line',
-
-  transformToSeries : function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.network) {
-      for ( var name in jsonData.metrics.network) {
-        var displayName = name;
-        var seriesData = jsonData.metrics.network[name];
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/dashboard/service.js b/branch-1.2/ambari-web/app/views/main/dashboard/service.js
deleted file mode 100644
index 26f7f0b..0000000
--- a/branch-1.2/ambari-web/app/views/main/dashboard/service.js
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-require('models/alert');
-
-App.MainDashboardServiceHealthView = Em.View.extend({
-  classNameBindings: ["healthStatus"],
-  //template: Em.Handlebars.compile(""),
-  blink: false,
-  tagName: 'span',
-  
-  /**
-   * When set to true, extending classes should
-   * show only tabular rows as they will be 
-   * embedded into other tables.
-   */
-  showOnlyRows: false,
-
-  startBlink: function () {
-    this.set('blink', true);
-  },
-
-  doBlink: function () {
-    if (this.get('blink') && (this.get("state") == "inDOM")) {
-      this.$().effect("pulsate", { times: 1 }, "slow", function () {
-        var view = Em.View.views[$(this).attr('id')];
-        view.doBlink();
-      });
-    }
-  }.observes('blink'),
-
-  stopBlink: function () {
-    this.set('blink', false);
-  },
-
-  healthStatus: function () {
-    var status = this.get('service.healthStatus');
-    switch (status) {
-      case 'green':
-        status = App.Service.Health.live;
-        this.stopBlink();
-        break;
-      case 'green-blinking':
-        status = App.Service.Health.live;
-        this.startBlink();
-        break;
-      case 'red-blinking':
-        status = App.Service.Health.dead;
-        this.startBlink();
-        break;
-      default:
-        status = App.Service.Health.dead;
-        this.stopBlink();
-        break;
-    }
-
-    return 'health-status-' + status;
-  }.property('service.healthStatus'),
-
-  didInsertElement: function () {
-    this.doBlink(); // check for blink availability
-  }
-});
-
-App.MainDashboardServiceView = Em.View.extend({
-  classNames: ['service', 'clearfix'],
-
-  data: function () {
-    return this.get('controller.data.' + this.get('serviceName'));
-  }.property('controller.data'),
-
-  formatUnavailable: function(value){
-    return (value || value == 0) ? value : this.t('services.service.summary.notAvailable');
-  },
-
-  criticalAlertsCount: function () {
-    var alerts = App.router.get('clusterController.alerts');
-    return alerts.filterProperty('serviceType', this.get('service.id')).filterProperty('isOk', false).filterProperty('ignoredForServices', false).length;
-  }.property('App.router.clusterController.alerts'),
-
-  isCollapsed: false,
-
-  toggleInfoView: function () {
-    this.$('.service-body').toggle('blind', 200);
-    this.set('isCollapsed', !this.isCollapsed);
-  },
-
-  masters: function(){
-    return this.get('service.hostComponents').filterProperty('isMaster', true);
-  }.property('service'),
-
-  clients: function(){
-    var clients = this.get('service.hostComponents').filterProperty('isClient', true);
-    var len = clients.length;
-    var template = 'dashboard.services.{0}.client'.format(this.get('serviceName').toLowerCase());
-    if(len > 1){
-      template += 's';
-    }
-
-    return {
-      title: this.t(template).format(len),
-      component: clients.objectAt(0)
-    };
-  }.property('service')
-
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/dashboard/service/hbase.js b/branch-1.2/ambari-web/app/views/main/dashboard/service/hbase.js
deleted file mode 100644
index 2079086..0000000
--- a/branch-1.2/ambari-web/app/views/main/dashboard/service/hbase.js
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-var date = require('utils/date');
-
-App.MainDashboardServiceHbaseView = App.MainDashboardServiceView.extend({
-  templateName: require('templates/main/dashboard/service/hbase'),
-  serviceName: 'hbase',
-
-  masterServerHeapSummary: function () {
-    var heapUsed = this.get('service').get('heapMemoryUsed');
-    var heapMax = this.get('service').get('heapMemoryMax');
-    var percent = heapMax > 0 ? 100 * heapUsed / heapMax : 0;
-    var heapString = heapUsed > 0 ? heapUsed.bytesToSize(1, "parseFloat") : 0;
-    var heapMaxString = heapMax > 0 ? heapMax.bytesToSize(1, "parseFloat") : 0;
-    return this.t('dashboard.services.hbase.masterServerHeap.summary').format(heapString, heapMaxString, percent.toFixed(1));
-  }.property('service.heapMemoryUsed', 'service.heapMemoryMax'),
-
-  version: function(){
-    return this.formatUnavailable(this.get('service.version'));
-  }.property('service.version'),
-
-  summaryHeader: function () {
-    var avgLoad = this.get('service.averageLoad');
-    if (avgLoad == null) {
-      avgLoad = this.t("services.service.summary.unknown");
-    }
-    return this.t("dashboard.services.hbase.summary").format(this.get('service.regionServers.length'), avgLoad);
-  }.property('service.regionServers', 'service.averageLoad'),
-
-  hbaseMasterWebUrl: function () {
-    return "http://" + this.get('service').get('master').get('publicHostName') + ":60010";
-  }.property('service.master'),
-
-  averageLoad: function () {
-    var avgLoad = this.get('service.averageLoad');
-    if (avgLoad == null) {
-      avgLoad = this.t('services.service.summary.notAvailable');
-    }
-    return this.t('dashboard.services.hbase.averageLoadPerServer').format(avgLoad);
-  }.property("service.averageLoad"),
-
-  masterStartedTime: function () {
-    var uptime = this.get('service').get('masterStartTime');
-    if (uptime && uptime > 0) {
-      var diff = (new Date()).getTime() - uptime;
-      if (diff < 0) {
-        diff = 0;
-      }
-      var formatted = date.timingFormat(diff);
-      return this.t('dashboard.services.uptime').format(formatted);
-    }
-    return this.t('services.service.summary.notRunning');
-  }.property("service.masterStartTime"),
-
-  masterActivatedTime: function () {
-    var uptime = this.get('service').get('masterActiveTime');
-    if (uptime && uptime > 0) {
-      var diff = (new Date()).getTime() - uptime;
-      if (diff < 0) {
-        diff = 0;
-      }
-      var formatted = date.timingFormat(diff);
-      return this.t('dashboard.services.uptime').format(formatted);
-    }
-    return this.t('services.service.summary.notRunning');
-  }.property("service.masterActiveTime"),
-
-  regionServerComponent: function () {
-    return App.HostComponent.find().findProperty('componentName', 'HBASE_REGIONSERVER');
-  }.property()
-
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/dashboard/service/hdfs.js b/branch-1.2/ambari-web/app/views/main/dashboard/service/hdfs.js
deleted file mode 100644
index eec736b..0000000
--- a/branch-1.2/ambari-web/app/views/main/dashboard/service/hdfs.js
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-var date = require('utils/date');
-
-App.MainDashboardServiceHdfsView = App.MainDashboardServiceView.extend({
-  templateName: require('templates/main/dashboard/service/hdfs'),
-  serviceName: 'HDFS',
-  Chart: App.ChartPieView.extend({
-    service: null,
-    color: '#0066B3',
-    stroke: '#0066B3',
-    palette: new Rickshaw.Color.Palette({
-      scheme: [ 'rgba(0,102,179,0)', 'rgba(0,102,179,1)'].reverse()
-    }),
-    data: function () {
-      var total = this.get('service.capacityTotal') + 0;
-      var remaining = (this.get('service.capacityRemaining') + 0);
-      var used = total - remaining;
-      return [ used, remaining ];
-    }.property('service.capacityUsed', 'service.capacityTotal')
-  }),
-
-  version: function(){
-    return this.formatUnavailable(this.get('service.version'));
-  }.property('service.version'),
-  dfsTotalBlocks: function(){
-    return this.formatUnavailable(this.get('service.dfsTotalBlocks'));
-  }.property('service.dfsTotalBlocks'),
-  dfsTotalFiles: function(){
-    return this.formatUnavailable(this.get('service.dfsTotalFiles'));
-  }.property('service.dfsTotalFiles'),
-  dfsCorruptBlocks: function(){
-    return this.formatUnavailable(this.get('service.dfsCorruptBlocks'));
-  }.property('service.dfsCorruptBlocks'),
-  dfsMissingBlocks: function(){
-    return this.formatUnavailable(this.get('service.dfsMissingBlocks'));
-  }.property('service.dfsMissingBlocks'),
-  dfsUnderReplicatedBlocks: function(){
-    return this.formatUnavailable(this.get('service.dfsUnderReplicatedBlocks'));
-  }.property('service.dfsUnderReplicatedBlocks'),
-
-  blockErrorsMessage: function() {
-    return Em.I18n.t('dashboard.services.hdfs.blockErrors').format(this.get('dfsCorruptBlocks'), this.get('dfsMissingBlocks'), this.get('dfsUnderReplicatedBlocks'));
-  }.property('dfsCorruptBlocks','dfsMissingBlocks','dfsUnderReplicatedBlocks'),
-
-  nodeUptime: function () {
-    var uptime = this.get('service').get('nameNodeStartTime');
-    if (uptime && uptime > 0){
-      var diff = (new Date()).getTime() - uptime;
-      if (diff < 0) {
-        diff = 0;
-      }
-      var formatted = date.timingFormat(diff);
-      return this.t('dashboard.services.uptime').format(formatted);
-    }
-    return this.t('services.service.summary.notRunning');
-  }.property("service.nameNodeStartTime"),
-
-  nodeWebUrl: function () {
-    return "http://" + this.get('service').get('nameNode').get('publicHostName') + ":50070";
-  }.property('service.nameNode'),
-
-  nodeHeap: function () {
-    var memUsed = this.get('service').get('jvmMemoryHeapUsed') * 1000000;
-    var memCommitted = this.get('service').get('jvmMemoryHeapCommitted') * 1000000;
-    var percent = memCommitted > 0 ? ((100 * memUsed) / memCommitted) : 0;
-    return this.t('dashboard.services.hdfs.nodes.heapUsed').format(memUsed.bytesToSize(1, 'parseFloat'), memCommitted.bytesToSize(1, 'parseFloat'), percent.toFixed(1));
-
-  }.property('service.jvmMemoryHeapUsed', 'service.jvmMemoryHeapCommitted'),
-
-  summaryHeader: function () {
-    var text = this.t("dashboard.services.hdfs.summary");
-    var svc = this.get('service');
-    var liveCount = svc.get('liveDataNodes').get('length');
-    var totalCount = svc.get('dataNodes').get('length');
-    var total = this.get('service.capacityTotal') + 0;
-    var remaining = this.get('service.capacityRemaining') + 0;
-    var used = total - remaining;
-    var percent = total > 0 ? ((used * 100) / total).toFixed(1) : 0;
-    if (percent == "NaN" || percent < 0) {
-      percent = "n/a ";
-    }
-    return text.format(liveCount, totalCount, percent);
-  }.property('service.liveDataNodes', 'service.dataNodes', 'service.capacityUsed', 'service.capacityTotal'),
-
-  capacity: function () {
-    var text = this.t("dashboard.services.hdfs.capacityUsed");
-    var total = this.get('service.capacityTotal') + 0;
-    var remaining = this.get('service.capacityRemaining') + 0;
-    var used = total - remaining;
-    var percent = total > 0 ? ((used * 100) / total).toFixed(1) : 0;
-    if (percent == "NaN" || percent < 0) {
-      percent = "n/a ";
-    }
-    if (used < 0) {
-      used = 0;
-    }
-    if (total < 0) {
-      total = 0;
-    }
-    return text.format(used.bytesToSize(1, 'parseFloat'), total.bytesToSize(1, 'parseFloat'), percent);
-  }.property('service.capacityUsed', 'service.capacityTotal'),
-
-  dataNodeComponent: function () {
-    return App.HostComponent.find().findProperty('componentName', 'DATANODE');
-  }.property('+'),
-
-  isSafeMode: function () {
-    var safeMode = this.get('service.safeModeStatus');
-    return safeMode != null && safeMode.length > 0;
-  }.property('service.safeModeStatus')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/dashboard/service/hive.js b/branch-1.2/ambari-web/app/views/main/dashboard/service/hive.js
deleted file mode 100644
index 7dbd323..0000000
--- a/branch-1.2/ambari-web/app/views/main/dashboard/service/hive.js
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainDashboardServiceHiveView = App.MainDashboardServiceView.extend({
-  templateName: require('templates/main/dashboard/service/hive'),
-  serviceName: 'hive',
-
-  titleMasters: function(){
-    var masters = this.get('masters');
-    return [masters.findProperty('componentName', 'HIVE_SERVER'), masters.findProperty('componentName', 'HIVE_METASTORE')];
-  }.property('service')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/dashboard/service/mapreduce.js b/branch-1.2/ambari-web/app/views/main/dashboard/service/mapreduce.js
deleted file mode 100644
index 9abd8c9..0000000
--- a/branch-1.2/ambari-web/app/views/main/dashboard/service/mapreduce.js
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-var date = require('utils/date');
-
-App.MainDashboardServiceMapreduceView = App.MainDashboardServiceView.extend({
-  templateName: require('templates/main/dashboard/service/mapreduce'),
-  serviceName: 'MAPREDUCE',
-  jobTrackerWebUrl: function () {
-    return "http://" + this.get('service').get('jobTracker').get('publicHostName') + ":50030";
-  }.property('service.nameNode'),
-
-  Chart: App.ChartLinearView.extend({
-    data: function () {
-      return this.get('_parentView.data.chart');
-    }.property('_parentView.data.chart')
-  }),
-
-  version: function(){
-    return this.formatUnavailable(this.get('service.version'));
-  }.property('service.version'),
-
-  jobTrackerUptime: function () {
-    var uptime = this.get('service').get('jobTrackerStartTime');
-    if (uptime && uptime > 0){
-      var diff = (new Date()).getTime() - uptime;
-      if (diff < 0) {
-        diff = 0;
-      }
-      var formatted = date.timingFormat(diff);
-      return this.t('dashboard.services.uptime').format(formatted);
-
-    }
-    return this.t('services.service.summary.notRunning');
-  }.property("service.jobTrackerStartTime"),
-
-  summaryHeader: function () {
-    var svc = this.get('service');
-    var liveCount = svc.get('aliveTrackers').get('length');
-    var allCount = svc.get('taskTrackers').get('length');
-    var runningCount = svc.get('mapsRunning') + svc.get('reducesRunning');
-    var waitingCount = svc.get('mapsWaiting') + svc.get('reducesWaiting');
-    var template = this.t('dashboard.services.mapreduce.summary');
-    return template.format(liveCount, allCount, runningCount, waitingCount);
-  }.property('service.aliveTrackers', 'service.taskTrackers','service.mapsRunning', 'service.mapsWaiting', 'service.reducesRunning', 'service.reducesWaiting'),
-
-  trackersSummary: function () {
-    var svc = this.get('service');
-    var liveCount = svc.get('aliveTrackers').get('length');
-    var totalCount = svc.get('taskTrackers').get('length');
-    var template = this.t('dashboard.services.mapreduce.trackersSummary');
-    return template.format(liveCount, totalCount);
-  }.property('service.aliveTrackers.length', 'service.taskTrackers.length'),
-
-  trackersHeapSummary: function () {
-    var heapUsed = this.get('service').get('jobTrackerHeapUsed') || 0;
-    var heapMax = this.get('service').get('jobTrackerHeapMax') || 0;
-    var percent = heapMax > 0 ? 100 * heapUsed / heapMax : 0;
-    return this.t('dashboard.services.mapreduce.jobTrackerHeapSummary').format(heapUsed.bytesToSize(1, "parseFloat"), heapMax.bytesToSize(1, "parseFloat"), percent.toFixed(1));
-  }.property('service.jobTrackerHeapUsed', 'service.jobTrackerHeapMax'),
-
-  jobsSummary: function () {
-    var svc = this.get('service');
-    var template = this.t('dashboard.services.mapreduce.jobsSummary');
-    return template.format(this.formatUnavailable(svc.get('jobsSubmitted')), this.formatUnavailable(svc.get('jobsCompleted')));
-  }.property('service.jobsSubmitted', 'service.jobsCompleted'),
-
-  mapSlotsSummary: function () {
-    var svc = this.get('service');
-    var template = this.t('dashboard.services.mapreduce.mapSlotsSummary');
-    return template.format(this.formatUnavailable(svc.get('mapSlotsOccupied')), this.formatUnavailable(svc.get('mapSlotsReserved')));
-  }.property('service.mapSlotsOccupied', 'service.mapSlotsReserved'),
-
-  reduceSlotsSummary: function () {
-    var svc = this.get('service');
-    var template = this.t('dashboard.services.mapreduce.reduceSlotsSummary');
-    return template.format(this.formatUnavailable(svc.get('reduceSlotsOccupied')), this.formatUnavailable(svc.get('reduceSlotsReserved')));
-  }.property('service.reduceSlotsOccupied', 'service.reduceSlotsReserved'),
-
-  mapTasksSummary: function () {
-    var svc = this.get('service');
-    var template = this.t('dashboard.services.mapreduce.tasksSummary');
-    return template.format(this.formatUnavailable(svc.get('mapsRunning')), this.formatUnavailable(svc.get('mapsWaiting')));
-  }.property('service.mapsRunning', 'service.mapsWaiting'),
-
-  reduceTasksSummary: function () {
-    var svc = this.get('service');
-    var template = this.t('dashboard.services.mapreduce.tasksSummary');
-    return template.format(this.formatUnavailable(svc.get('reducesRunning')), this.formatUnavailable(svc.get('reducesWaiting')));
-  }.property('service.reducesRunning', 'service.reducesWaiting'),
-
-  slotsCapacitySummary: function () {
-    var mapSlots = this.get('service').get('mapSlots');
-    var reduceSlots = this.get('service').get('reduceSlots');
-    var liveNodeCount = this.get('service').get('aliveTrackers').get('length');
-    if(liveNodeCount != 0){
-      var avg = (mapSlots + reduceSlots) / liveNodeCount;
-    }else{
-      avg = "n/a ";
-    }
-    return this.t('dashboard.services.mapreduce.slotCapacitySummary').format(mapSlots, reduceSlots, avg);
-  }.property('service.mapSlots', 'service.reduceSlots', 'service.aliveTrackers'),
-
-  taskTrackerComponent: function () {
-    return App.HostComponent.find().findProperty('componentName', 'TASKTRACKER');
-  }.property()
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/dashboard/service/oozie.js b/branch-1.2/ambari-web/app/views/main/dashboard/service/oozie.js
deleted file mode 100644
index 9c7d8db..0000000
--- a/branch-1.2/ambari-web/app/views/main/dashboard/service/oozie.js
+++ /dev/null
@@ -1,29 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainDashboardServiceOozieView = App.MainDashboardServiceView.extend({
-  serviceName: 'oozie',
-  templateName: require('templates/main/dashboard/service/oozie'),
-
-  webUi: function () {
-    var hostName = this.get('service.hostComponents').findProperty('componentName', 'OOZIE_SERVER').get('host.publicHostName');
-    return "http://{0}:11000/oozie".format(hostName);
-  }.property('service')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/dashboard/service/zookeeper.js b/branch-1.2/ambari-web/app/views/main/dashboard/service/zookeeper.js
deleted file mode 100644
index 16939d8..0000000
--- a/branch-1.2/ambari-web/app/views/main/dashboard/service/zookeeper.js
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainDashboardServiceZookeperView = App.MainDashboardServiceView.extend({
-  templateName: require('templates/main/dashboard/service/zookeeper'),
-  serviceName: 'zookeeper',
-
-  titleInfo: function(){
-    var components = this.get('service.hostComponents').filterProperty('componentName', 'ZOOKEEPER_SERVER');
-    var running = 0;
-    components.forEach(function(item){
-      if(item.get('workStatus') === App.HostComponentStatus.started){
-        running += 1;
-      }
-    });
-
-    return {
-      pre: this.t('dashboard.services.zookeeper.prefix').format(running),
-      title: this.t('dashboard.services.zookeeper.title').format(components.length),
-      component: components.objectAt(0)
-    };
-  }.property('service')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/host.js b/branch-1.2/ambari-web/app/views/main/host.js
deleted file mode 100644
index abba203..0000000
--- a/branch-1.2/ambari-web/app/views/main/host.js
+++ /dev/null
@@ -1,440 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-var filters = require('views/common/filter_view');
-var sort = require('views/common/sort_view');
-var date = require('utils/date');
-
-App.MainHostView = Em.View.extend({
-  templateName:require('templates/main/host'),
-  content:function () {
-    return App.router.get('mainHostController.content');
-  }.property('App.router.mainHostController.content'),
-  oTable: null,
-
-  didInsertElement:function () {
-    this.filter();
-    if (this.get('controller.comeWithAlertsFilter')) {
-      this.set('controller.comeWithAlertsFilter', false);
-      this.set('controller.filteredByAlerts', true);
-    } else {
-      this.set('controller.filteredByAlerts', false);
-    }
-  },
-
-  /**
-   * return pagination information displayed on the hosts page
-   */
-  paginationInfo: function () {
-    return this.t('apps.filters.paginationInfo').format(this.get('startIndex'), this.get('endIndex'), this.get('filteredContent.length'));
-  }.property('displayLength', 'filteredContent.length', 'startIndex', 'endIndex'),
-
-  paginationLeft: Ember.View.extend({
-    tagName: 'a',
-    template: Ember.Handlebars.compile('<i class="icon-arrow-left"></i>'),
-    classNameBindings: ['class'],
-    class: function () {
-      if (this.get("parentView.startIndex") > 1) {
-       return "paginate_previous";
-      }
-      return "paginate_disabled_previous";
-    }.property("parentView.startIndex", 'filteredContent.length'),
-
-    click: function () {
-      this.get('parentView').previousPage();
-    }
-  }),
-
-  paginationRight: Ember.View.extend({
-    tagName: 'a',
-    template: Ember.Handlebars.compile('<i class="icon-arrow-right"></i>'),
-    classNameBindings: ['class'],
-    class: function () {
-      if ((this.get("parentView.endIndex")) < this.get("parentView.filteredContent.length")) {
-       return "paginate_next";
-      }
-      return "paginate_disabled_next";
-    }.property("parentView.endIndex", 'filteredContent.length'),
-
-    click: function () {
-      this.get('parentView').nextPage();
-    }
-  }),
-
-  hostPerPageSelectView: Em.Select.extend({
-    content: ['10', '25', '50']
-  }),
-
-  // start index for displayed content on the hosts page
-  startIndex: 1,
-
-  // calculate end index for displayed content on the hosts page
-  endIndex: function () {
-    return Math.min(this.get('filteredContent.length'), this.get('startIndex') + parseInt(this.get('displayLength')) - 1);
-  }.property('startIndex', 'displayLength', 'filteredContent.length'),
-
-  /**
-   * onclick handler for previous page button on the hosts page
-   */
-  previousPage: function () {
-    var result = this.get('startIndex') - parseInt(this.get('displayLength'));
-    if (result  < 2) {
-      result = 1;
-    }
-    this.set('startIndex', result);
-  },
-
-  /**
-   * onclick handler for next page button on the hosts page
-   */
-  nextPage: function () {
-    var result = this.get('startIndex') + parseInt(this.get('displayLength'));
-    if (result - 1 < this.get('filteredContent.length')) {
-      this.set('startIndex', result);
-    }
-  },
-
-  // the number of hosts to show on every page of the hosts page view
-  displayLength: null,
-
-  // calculates default value for startIndex property after applying filter or changing displayLength
-  updatePaging: function () {
-      this.set('startIndex', Math.min(1, this.get('filteredContent.length')));
-  }.observes('displayLength', 'filteredContent.length'),
-
-  sortView: sort.wrapperView,
-  nameSort: sort.fieldView.extend({
-    name:'publicHostName',
-    displayName: Em.I18n.t('common.name')
-  }),
-  ipSort: sort.fieldView.extend({
-    name:'ip',
-    displayName: Em.I18n.t('common.ipAddress'),
-    type: 'ip'
-  }),
-  cpuSort: sort.fieldView.extend({
-    name:'cpu',
-    displayName: Em.I18n.t('common.cpu')
-  }),
-  memorySort: sort.fieldView.extend({
-    name:'memory',
-    displayName: Em.I18n.t('common.ram')
-  }),
-  diskUsageSort: sort.fieldView.extend({
-    name:'diskUsage',
-    displayName: Em.I18n.t('common.diskUsage')
-  }),
-  loadAvgSort: sort.fieldView.extend({
-    name:'loadAvg',
-    displayName: Em.I18n.t('common.loadAvg')
-  }),
-  HostView:Em.View.extend({
-    content:null,
-    tagName: 'tr',
-    shortLabels: function() {
-      var labels = this.get('content.hostComponents').getEach('displayName');
-      var shortLabels = '';
-      var c = 0;
-      labels.forEach(function(label) {
-        if (label) {
-          if (c < 2) {
-            shortLabels += label.replace(/[^A-Z]/g, '') + ', ';
-            c++;
-          }
-        }
-      });
-      shortLabels = shortLabels.substr(0, shortLabels.length - 2);
-      if (labels.length > 2) {
-        shortLabels += ' and ' + (labels.length - 2) + ' more';
-      }
-      return shortLabels;
-    }.property('labels'),
-
-    labels: function(){
-      return this.get('content.hostComponents').getEach('displayName').join('\n');
-    }.property('content.hostComponents.@each'),
-
-    usageStyle:function () {
-      return "width:" + this.get('content.diskUsage') + "%";
-      //return "width:" + (25+Math.random()*50) + "%"; // Just for tests purposes
-    }.property('content.diskUsage')
-
-  }),
-
-  /**
-   * Filter view for name column
-   * Based on <code>filters</code> library
-   */
-  nameFilterView: filters.createTextView({
-    onChangeValue: function(){
-      this.get('parentView').updateFilter(1, this.get('value'), 'string');
-    }
-  }),
-
-  /**
-   * Filter view for ip column
-   * Based on <code>filters</code> library
-   */
-  ipFilterView: filters.createTextView({
-    onChangeValue: function(){
-      this.get('parentView').updateFilter(2, this.get('value'), 'string');
-    }
-  }),
-
-  /**
-   * Filter view for Cpu column
-   * Based on <code>filters</code> library
-   */
-  cpuFilterView: filters.createTextView({
-    fieldType: 'input-mini',
-    fieldId: 'cpu_filter',
-    onChangeValue: function(){
-      this.get('parentView').updateFilter(3, this.get('value'), 'number');
-    }
-  }),
-
-  /**
-   * Filter view for LoadAverage column
-   * Based on <code>filters</code> library
-   */
-  loadAvgFilterView: filters.createTextView({
-    fieldType: 'input-mini',
-    fieldId: 'load_avg_filter',
-    onChangeValue: function(){
-      this.get('parentView').updateFilter(5, this.get('value'), 'number');
-    }
-  }),
-
-  /**
-   * Filter view for Ram column
-   * Based on <code>filters</code> library
-   */
-  ramFilterView: filters.createTextView({
-    fieldType: 'input-mini',
-    fieldId: 'ram_filter',
-    onChangeValue: function(){
-      this.get('parentView').updateFilter(4, this.get('value'), 'ambari-bandwidth');
-    }
-  }),
-
-  /**
-   * Filter view for HostComponents column
-   * Based on <code>filters</code> library
-   */
-  componentsFilterView: filters.createComponentView({
-    /**
-     * Inner FilterView. Used just to render component. Value bind to <code>mainview.value</code> property
-     * Base methods was implemented in <code>filters.componentFieldView</code>
-     */
-    filterView: filters.componentFieldView.extend({
-      templateName: require('templates/main/host/component_filter'),
-
-      /**
-       * Next three lines bind data to this view
-       */
-      masterComponentsBinding: 'controller.masterComponents',
-      slaveComponentsBinding: 'controller.slaveComponents',
-      clientComponentsBinding: 'controller.clientComponents',
-
-      /**
-       * Checkbox for quick selecting/deselecting of master components
-       */
-      masterComponentsChecked:false,
-      toggleMasterComponents:function () {
-        this.get('masterComponents').setEach('checkedForHostFilter', this.get('masterComponentsChecked'));
-      }.observes('masterComponentsChecked'),
-
-      /**
-       * Checkbox for quick selecting/deselecting of slave components
-       */
-      slaveComponentsChecked:false,
-      toggleSlaveComponents:function () {
-        this.get('slaveComponents').setEach('checkedForHostFilter', this.get('slaveComponentsChecked'));
-      }.observes('slaveComponentsChecked'),
-
-      /**
-       * Checkbox for quick selecting/deselecting of client components
-       */
-      clientComponentsChecked: false,
-      toggleClientComponents: function() {
-        this.get('clientComponents').setEach('checkedForHostFilter', this.get('clientComponentsChecked'));
-      }.observes('clientComponentsChecked'),
-
-      /**
-       * Clear filter.
-       * Called by parent view, when user clicks on <code>x</code> button(clear button)
-       */
-      clearFilter:function() {
-        this.set('masterComponentsChecked', false);
-        this.set('slaveComponentsChecked', false);
-        this.set('clientComponentsChecked', false);
-
-        this.get('masterComponents').setEach('checkedForHostFilter', false);
-        this.get('slaveComponents').setEach('checkedForHostFilter', false);
-        this.get('clientComponents').setEach('checkedForHostFilter', false);
-
-        this._super();
-      },
-
-      /**
-       * Onclick handler for <code>Apply filter</code> button
-       */
-      applyFilter:function() {
-        this._super();
-
-        var chosenComponents = [];
-
-        this.get('masterComponents').filterProperty('checkedForHostFilter', true).forEach(function(item){
-          chosenComponents.push(item.get('id'));
-        });
-        this.get('slaveComponents').filterProperty('checkedForHostFilter', true).forEach(function(item){
-          chosenComponents.push(item.get('id'));
-        });
-        this.get('clientComponents').filterProperty('checkedForHostFilter', true).forEach(function(item){
-          chosenComponents.push(item.get('id'));
-        });
-        this.set('value', chosenComponents.toString());
-      },
-
-      didInsertElement:function () {
-        if (this.get('controller.comeWithFilter')) {
-          this.applyFilter();
-          this.set('controller.comeWithFilter', false);
-        } else {
-          this.clearFilter();
-        }
-      }
-
-    }),
-    onChangeValue: function(){
-      this.get('parentView').updateFilter(6, this.get('value'), 'multiple');
-    }
-  }),
-
-  /**
-   * Filter hosts by hosts with at least one alert
-   */
-  filterByAlerts:function() {
-    if (this.get('controller.filteredByAlerts')) {
-      this.updateFilter(7, '>0', 'number')
-    } else {
-      this.updateFilter(7, '', 'number')
-    }
-  }.observes('controller.filteredByAlerts'),
-
-  /**
-   * Apply each filter to host
-   *
-   * @param iColumn number of column by which filter
-   * @param value
-   */
-  updateFilter: function(iColumn, value, type){
-    var filterCondition = this.get('filterConditions').findProperty('iColumn', iColumn);
-    if(filterCondition) {
-      filterCondition.value = value;
-    } else {
-      filterCondition = {
-        iColumn: iColumn,
-        value: value,
-        type: type
-      }
-      this.get('filterConditions').push(filterCondition);
-    }
-    this.filter();
-  },
-  /**
-   * associations between host property and column index
-   */
-  colPropAssoc: function(){
-    var associations = [];
-    associations[1] = 'publicHostName';
-    associations[2] = 'ip';
-    associations[3] = 'cpu';
-    associations[4] = 'memoryFormatted';
-    associations[5] = 'loadAvg';
-    associations[6] = 'hostComponents';
-    associations[7] = 'criticalAlertsCount';
-    return associations;
-  }.property(),
-  globalSearchValue:null,
-  /**
-   * filter table by all fields
-   */
-  globalFilter: function(){
-    var content = this.get('content');
-    var searchValue = this.get('globalSearchValue');
-    var result;
-    if(searchValue){
-      result = content.filter(function(host){
-        var match = false;
-        this.get('colPropAssoc').forEach(function(item){
-          var filterFunc = filters.getFilterByType('string', false);
-          if(item === 'hostComponents'){
-            filterFunc = filters.getFilterByType('multiple', true);
-          }
-          if(!match){
-            match = filterFunc(host.get(item), searchValue);
-          }
-        });
-        return match;
-      }, this);
-      this.set('filteredContent', result);
-    } else {
-      this.filter();
-    }
-  }.observes('globalSearchValue', 'content'),
-  /**
-   * contain filter conditions for each column
-   */
-  filterConditions: [],
-  filteredContent: [],
-
-  // contain content to show on the current page of hosts page view
-  pageContent: function () {
-    return this.get('filteredContent').slice(this.get('startIndex') - 1, this.get('endIndex'));
-  }.property('filteredContent.length', 'startIndex', 'endIndex'),
-
-  /**
-   * filter table by filterConditions
-   */
-  filter: function(){
-    var content = this.get('content');
-    var filterConditions = this.get('filterConditions').filterProperty('value');
-    var result;
-    var self = this;
-    var assoc = this.get('colPropAssoc');
-    if(!this.get('globalSearchValue')){
-      if(filterConditions.length){
-        result = content.filter(function(host){
-          var match = true;
-          filterConditions.forEach(function(condition){
-            var filterFunc = filters.getFilterByType(condition.type, false);
-            if(match){
-              match = filterFunc(host.get(assoc[condition.iColumn]), condition.value);
-            }
-          });
-          return match;
-        });
-        this.set('filteredContent', result);
-      } else {
-        this.set('filteredContent', content.toArray());
-      }
-    }
-  }.observes('content')
-});
diff --git a/branch-1.2/ambari-web/app/views/main/host/add_view.js b/branch-1.2/ambari-web/app/views/main/host/add_view.js
deleted file mode 100644
index 300af3e..0000000
--- a/branch-1.2/ambari-web/app/views/main/host/add_view.js
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.AddHostView = Em.View.extend({
-
-  templateName: require('templates/main/host/add'),
-
-  isStep1Disabled: function () {
-    return this.isStepDisabled(1);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep2Disabled: function () {
-    return this.isStepDisabled(2);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep3Disabled: function () {
-    return this.isStepDisabled(3);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep4Disabled: function () {
-    return this.isStepDisabled(4);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep5Disabled: function () {
-    return this.isStepDisabled(5);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep6Disabled: function () {
-    return this.isStepDisabled(6);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep7Disabled: function () {
-    return this.isStepDisabled(7);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep8Disabled: function () {
-    return this.isStepDisabled(8);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep9Disabled: function () {
-    return this.isStepDisabled(9);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep10Disabled: function () {
-    return this.isStepDisabled(10);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStepDisabled: function (index) {
-    return this.get('controller.isStepDisabled').findProperty('step', index).get('value');
-  }
-
-});
diff --git a/branch-1.2/ambari-web/app/views/main/host/details.js b/branch-1.2/ambari-web/app/views/main/host/details.js
deleted file mode 100644
index caccb9c..0000000
--- a/branch-1.2/ambari-web/app/views/main/host/details.js
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-var date = require('utils/date');
-
-App.MainHostDetailsView = Em.View.extend({
-  templateName: require('templates/main/host/details'),
-
-  content: function(){
-    return App.router.get('mainHostDetailsController.content');
-  }.property('App.router.mainHostDetailsController.content'),
-
-  maintenance: function(){
-    var options = [{action: 'deleteHost', 'label': this.t('hosts.host.details.deleteHost')}];
-    return options;
-  }.property('controller.content')
-});
diff --git a/branch-1.2/ambari-web/app/views/main/host/menu.js b/branch-1.2/ambari-web/app/views/main/host/menu.js
deleted file mode 100644
index a2a9570..0000000
--- a/branch-1.2/ambari-web/app/views/main/host/menu.js
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainHostMenuView = Em.CollectionView.extend({
-  tagName: 'ul',
-  classNames: ["nav", "nav-tabs"],
-  content:[
- /*   { label:'Summary', routing:'summary'},
-    { label:'Audit', routing:'audit'}*/
-  ],
-
-  init: function(){ this._super(); this.activateView(); },
-
-  activateView:function () {
-    $.each(this._childViews, function () {
-      this.set('active', (this.get('content.routing') == 'summary' ? "active" : ""));
-    });
-  },
-
-  deactivateChildViews: function() {
-    $.each(this._childViews, function(){
-      this.set('active', "");
-    });
-  },
-
-  itemViewClass: Em.View.extend({
-    classNameBindings: ["active"],
-    active: "",
-    template: Ember.Handlebars.compile('<a {{action hostNavigate view.content.routing }} href="#"> {{unbound view.content.label}}</a>')
-  })
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/host/metrics.js b/branch-1.2/ambari-web/app/views/main/host/metrics.js
deleted file mode 100644
index 3fb0a14..0000000
--- a/branch-1.2/ambari-web/app/views/main/host/metrics.js
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainHostMetricsView = Em.View.extend({
-  templateName: require('templates/main/host/metrics'),
-  content:function(){
-    return App.router.get('mainHostDetailsController.content');
-  }.property('App.router.mainHostDetailsController.content')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/host/metrics/cpu.js b/branch-1.2/ambari-web/app/views/main/host/metrics/cpu.js
deleted file mode 100644
index a775653..0000000
--- a/branch-1.2/ambari-web/app/views/main/host/metrics/cpu.js
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing Host CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartHostMetricsCPU = App.ChartLinearTimeView.extend({
-  id: "host-metrics-cpu",
-  title: Em.I18n.t('hosts.host.metrics.cpu'),
-  yAxisFormatter: App.ChartLinearTimeView.PercentageFormatter,
-
-  sourceUrl: "/hosts/{hostName}?fields=metrics/cpu/cpu_user[{fromSeconds},{toSeconds},{stepSeconds}],metrics/cpu/cpu_wio[{fromSeconds},{toSeconds},{stepSeconds}],metrics/cpu/cpu_nice[{fromSeconds},{toSeconds},{stepSeconds}],metrics/cpu/cpu_aidle[{fromSeconds},{toSeconds},{stepSeconds}],metrics/cpu/cpu_system[{fromSeconds},{toSeconds},{stepSeconds}],metrics/cpu/cpu_idle[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/hosts/metrics/cpu.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.cpu) {
-      var cpu_idle;
-      for ( var name in jsonData.metrics.cpu) {
-        var displayName;
-        var seriesData = jsonData.metrics.cpu[name];
-        switch (name) {
-          case "cpu_wio":
-            displayName = "CPU I/O Idle";
-            break;
-          case "cpu_idle":
-            displayName = "CPU Idle";
-            break;
-          case "cpu_nice":
-            displayName = "CPU Nice";
-            break;
-          case "cpu_aidle":
-            displayName = "CPU Boot Idle";
-            break;
-          case "cpu_system":
-            displayName = "CPU System";
-            break;
-          case "cpu_user":
-            displayName = "CPU User";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          var s = this.transformData(seriesData, displayName);
-          if ('CPU Idle' == s.name) {
-            cpu_idle = s;
-          }
-          else {
-            seriesArray.push(s);
-          }
-        }
-      }
-      seriesArray.push(cpu_idle);
-    }
-    return seriesArray;
-  },
-
-  colorForSeries: function (series) {
-    if ("CPU Idle" == series.name) {
-      return '#CFECEC';
-    }
-    return null;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/host/metrics/disk.js b/branch-1.2/ambari-web/app/views/main/host/metrics/disk.js
deleted file mode 100644
index bf7dec6..0000000
--- a/branch-1.2/ambari-web/app/views/main/host/metrics/disk.js
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing host disk usage
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartHostMetricsDisk = App.ChartLinearTimeView.extend({
-  id: "host-metrics-disk",
-  title: Em.I18n.t('hosts.host.metrics.disk'),
-  yAxisFormatter: App.ChartLinearTimeView.BytesFormatter,
-  renderer: 'line',
-  sourceUrl: "/hosts/{hostName}?fields=metrics/disk/disk_total[{fromSeconds},{toSeconds},{stepSeconds}],metrics/disk/disk_free[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/hosts/metrics/disk.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    var GB = Math.pow(2, 30);
-    if (jsonData && jsonData.metrics && jsonData.metrics.disk) {
-      if(jsonData.metrics.part_max_used){
-        jsonData.metrics.disk.part_max_used = jsonData.metrics.part_max_used;
-      }
-      for ( var name in jsonData.metrics.disk) {
-        var displayName;
-        var seriesData = jsonData.metrics.disk[name];
-        switch (name) {
-          case "disk_total":
-            displayName = "Total";
-            break;
-          case "disk_free":
-            displayName = "Available";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          var s = this.transformData(seriesData, displayName);
-          for (var i = 0; i < s.data.length; i++) {
-            s.data[i].y *= GB;
-          }
-          seriesArray.push(s);
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/host/metrics/load.js b/branch-1.2/ambari-web/app/views/main/host/metrics/load.js
deleted file mode 100644
index a01ddfd..0000000
--- a/branch-1.2/ambari-web/app/views/main/host/metrics/load.js
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing host load
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartHostMetricsLoad = App.ChartLinearTimeView.extend({
-  id: "host-metrics-load",
-  title: Em.I18n.t('hosts.host.metrics.load'),
-  renderer: 'line',
-  sourceUrl: "/hosts/{hostName}?fields=metrics/load/load_fifteen[{fromSeconds},{toSeconds},{stepSeconds}],metrics/load/load_one[{fromSeconds},{toSeconds},{stepSeconds}],metrics/load/load_five[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/hosts/metrics/load.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.load) {
-      for ( var name in jsonData.metrics.load) {
-        var displayName;
-        var seriesData = jsonData.metrics.load[name];
-        switch (name) {
-          case "load_fifteen":
-            displayName = "15 Minute Load";
-            break;
-          case "load_one":
-            displayName = "1 Minute Load";
-            break;
-          case "load_five":
-            displayName = "5 Minute Load";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/host/metrics/memory.js b/branch-1.2/ambari-web/app/views/main/host/metrics/memory.js
deleted file mode 100644
index b78fde6..0000000
--- a/branch-1.2/ambari-web/app/views/main/host/metrics/memory.js
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing host memory metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartHostMetricsMemory = App.ChartLinearTimeView.extend({
-  id: "host-metrics-memory",
-  title: Em.I18n.t('hosts.host.metrics.memory'),
-  yAxisFormatter: App.ChartLinearTimeView.BytesFormatter,
-  renderer: 'line',
-  sourceUrl: "/hosts/{hostName}?fields=metrics/memory/swap_free[{fromSeconds},{toSeconds},{stepSeconds}],metrics/memory/mem_shared[{fromSeconds},{toSeconds},{stepSeconds}],metrics/memory/mem_free[{fromSeconds},{toSeconds},{stepSeconds}],metrics/memory/mem_cached[{fromSeconds},{toSeconds},{stepSeconds}],metrics/memory/mem_buffers[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/hosts/metrics/memory.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    var KB = Math.pow(2, 10);
-    if (jsonData && jsonData.metrics && jsonData.metrics.memory) {
-      for ( var name in jsonData.metrics.memory) {
-        var displayName;
-        var seriesData = jsonData.metrics.memory[name];
-        switch (name) {
-          case "mem_shared":
-            displayName = "Shared";
-            break;
-          case "swap_free":
-            displayName = "Swap";
-            break;
-          case "mem_buffers":
-            displayName = "Buffers";
-            break;
-          case "mem_free":
-            displayName = "Free";
-            break;
-          case "mem_cached":
-            displayName = "Cached";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          var s = this.transformData(seriesData, displayName);
-          for (var i = 0; i < s.data.length; i++) {
-            s.data[i].y *= KB;
-          }
-          seriesArray.push(s);
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/host/metrics/network.js b/branch-1.2/ambari-web/app/views/main/host/metrics/network.js
deleted file mode 100644
index 64cc466..0000000
--- a/branch-1.2/ambari-web/app/views/main/host/metrics/network.js
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing host network metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartHostMetricsNetwork = App.ChartLinearTimeView.extend({
-  id: "host-metrics-network",
-  title: Em.I18n.t('hosts.host.metrics.network'),
-  yAxisFormatter: App.ChartLinearTimeView.BytesFormatter,
-  renderer: 'line',
-  sourceUrl: "/hosts/{hostName}?fields=metrics/network/bytes_in[{fromSeconds},{toSeconds},{stepSeconds}],metrics/network/bytes_out[{fromSeconds},{toSeconds},{stepSeconds}],metrics/network/pkts_in[{fromSeconds},{toSeconds},{stepSeconds}],metrics/network/pkts_out[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/hosts/metrics/network.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.network) {
-      for ( var name in jsonData.metrics.network) {
-        var displayName;
-        var seriesData = jsonData.metrics.network[name];
-        switch (name) {
-          case "pkts_out":
-            displayName = "Packets Out";
-            break;
-          case "bytes_in":
-            displayName = "Bytes In";
-            break;
-          case "bytes_out":
-            displayName = "Bytes Out";
-            break;
-          case "pkts_in":
-            displayName = "Packets In";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/host/metrics/processes.js b/branch-1.2/ambari-web/app/views/main/host/metrics/processes.js
deleted file mode 100644
index d3f09a3..0000000
--- a/branch-1.2/ambari-web/app/views/main/host/metrics/processes.js
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing host process counts
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartHostMetricsProcesses = App.ChartLinearTimeView.extend({
-  id: "host-metrics-processes",
-  title: Em.I18n.t('hosts.host.metrics.processes'),
-  renderer: 'line',
-  sourceUrl: "/hosts/{hostName}?fields=metrics/process/proc_total[{fromSeconds},{toSeconds},{stepSeconds}],metrics/process/proc_run[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/hosts/metrics/processes.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.process) {
-      for ( var name in jsonData.metrics.process) {
-        var displayName;
-        var seriesData = jsonData.metrics.process[name];
-        switch (name) {
-          case "proc_total":
-            displayName = "Total Processes";
-            break;
-          case "proc_run":
-            displayName = "Processes Run";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/host/summary.js b/branch-1.2/ambari-web/app/views/main/host/summary.js
deleted file mode 100644
index 070fbc3..0000000
--- a/branch-1.2/ambari-web/app/views/main/host/summary.js
+++ /dev/null
@@ -1,238 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainHostSummaryView = Em.View.extend({
-  templateName: require('templates/main/host/summary'),
-
-  content: function () {
-    return App.router.get('mainHostDetailsController.content');
-  }.property('App.router.mainHostDetailsController.content'),
-
-  showGangliaCharts: function () {
-    var name = this.get('content.hostName');
-    var gangliaMobileUrl = App.router.get('clusterController.gangliaUrl') + "/mobile_helper.php?show_host_metrics=1&h=" + name + "&c=HDPNameNode&r=hour&cs=&ce=";
-    window.open(gangliaMobileUrl);
-  },
-
-  /**
-   * @type: [{String}]
-   */
-  decommissionDataNodeHostNames: null,
-
-  loadDecommissionNodesList: function () {
-    var self = this;
-    var clusterName = App.router.get('clusterController.clusterName');
-    var persistUrl = App.apiPrefix + '/persist';
-    var clusterUrl = App.apiPrefix + '/clusters/' + clusterName;
-    var getConfigAjax = {
-      type: 'GET',
-      url: persistUrl,
-      dataType: 'json',
-      timeout: App.timeout,
-      success: function (data) {
-        if (data && data.decommissionDataNodesTag) {
-          // We know the tag which contains the decommisioned nodes.
-          var configsUrl = clusterUrl + '/configurations?type=hdfs-exclude-file&tag=' + data.decommissionDataNodesTag;
-          var decomNodesAjax = {
-            type: 'GET',
-            url: configsUrl,
-            dataType: 'json',
-            timeout: App.timeout,
-            success: function (data) {
-              if (data && data.items) {
-                var csv = data.items[0].properties.datanodes;
-                self.set('decommissionDataNodeHostNames', csv.split(','));
-              }
-            },
-            error: function (xhr, textStatus, errorThrown) {
-              console.log(textStatus);
-              console.log(errorThrown);
-            }
-          };
-          jQuery.ajax(decomNodesAjax);
-        }
-      },
-      error: function (xhr, textStatus, errorThrown) {
-        // No tag pointer in persist. Rely on service's decomNodes.
-        var hdfsSvcs = App.HDFSService.find();
-        if (hdfsSvcs && hdfsSvcs.get('length') > 0) {
-          var hdfsSvc = hdfsSvcs.objectAt(0);
-          if (hdfsSvc) {
-            var hostNames = [];
-            var decomNodes = hdfsSvc.get('decommissionDataNodes');
-            decomNodes.forEach(function (decomNode) {
-              hostNames.push(decomNode.get('hostName'));
-            });
-            self.set('decommissionDataNodeHostNames', hostNames);
-          }
-        }
-      }
-    }
-    jQuery.ajax(getConfigAjax);
-  },
-  didInsertElement: function () {
-    this.loadDecommissionNodesList();
-  },
-  sortedComponents: function() {
-    var slaveComponents = [];
-    var masterComponents = [];
-    this.get('content.hostComponents').forEach(function(component){
-      if(component.get('isMaster')){
-        masterComponents.push(component);
-      } else if(component.get('isSlave')) {
-        slaveComponents.push(component);
-      }
-    }, this);
-    return masterComponents.concat(slaveComponents);
-  }.property('content'),
-  clients: function(){
-    var clients = [];
-    this.get('content.hostComponents').forEach(function(component){
-      if(!component.get('componentName')){
-        //temporary fix because of different data in hostComponents and serviceComponents
-        return;
-      }
-      if (!component.get('isSlave') && !component.get('isMaster')) {
-        if (clients.length) {
-          clients[clients.length-1].set('isLast', false);
-        }
-        component.set('isLast', true);
-        clients.push(component);
-      }
-    }, this);
-    return clients;
-  }.property('content'),
-
-  ComponentView: Em.View.extend({
-    content: null,
-    didInsertElement: function () {
-      if (this.get('isInProgress')) {
-        this.doBlinking();
-      }
-    },
-    hostComponent: function(){
-      var hostComponent = null;
-      var serviceComponent = this.get('content');
-      var host = App.router.get('mainHostDetailsController.content');
-      if(host){
-        var hostComponent = host.get('hostComponents').findProperty('componentName', serviceComponent.get('componentName'));
-      }
-      return hostComponent;
-    }.property('content', 'App.router.mainHostDetailsController.content'),
-    workStatus: function(){
-      var workStatus = this.get('content.workStatus');
-      var hostComponent = this.get('hostComponent');
-      if(hostComponent){
-        workStatus = hostComponent.get('workStatus');
-      }
-      return workStatus;
-    }.property('content.workStatus','hostComponent.workStatus'),
-    statusClass: function(){
-      var statusClass = null;
-      if(this.get('isDataNode')){
-        if(this.get('isDataNodeRecommissionAvailable') && this.get('isStart')){
-          // Orange is shown only when service is started/starting and it is decommissioned.
-          return 'health-status-DEAD-ORANGE';
-        }
-      }
-      return 'health-status-' + App.HostComponentStatus.getKeyName(this.get('workStatus'));
-    }.property('workStatus', 'isDataNodeRecommissionAvailable'),
-    /**
-     * Disable element while component is starting/stopping
-     */
-    disabledClass:function(){
-      var workStatus = this.get('workStatus');
-      if([App.HostComponentStatus.starting, App.HostComponentStatus.stopping].contains(workStatus) ){
-        return 'disabled';
-      } else {
-        return '';
-      }
-    }.property('workStatus'),
-    /**
-     * Do blinking for 1 minute
-     */
-    doBlinking : function(){
-      var workStatus = this.get('workStatus');
-      var self = this;
-      var pulsate = [ App.HostComponentStatus.starting, App.HostComponentStatus.stopping ].contains(workStatus);
-      if (!pulsate && this.get('isDataNode')) {
-        var dataNodeComponent = this.get('content');
-        if (dataNodeComponent)
-          pulsate = dataNodeComponent.get('isDecommissioning');
-      }
-      if (pulsate) {
-        this.$('.components-health').effect("pulsate", null, 1000, function () {
-          self.doBlinking();
-        });
-      }
-    },
-    /**
-     * Start blinking when host component is starting/stopping
-     */
-    startBlinking:function(){
-      this.$('.components-health').stop(true, true);
-      this.$('.components-health').css({opacity: 1.0});
-      this.doBlinking();
-    }.observes('workStatus'),
-
-    isStart : function() {
-      return (this.get('workStatus') === App.HostComponentStatus.started || this.get('workStatus') === App.HostComponentStatus.starting);
-    }.property('workStatus'),
-
-    isInProgress : function() {
-      return (this.get('workStatus') === App.HostComponentStatus.stopping || this.get('workStatus') === App.HostComponentStatus.starting);
-    }.property('workStatus'),
-    /**
-     * Shows whether we need to show Decommision/Recomission buttons
-     */
-    isDataNode: function() {
-      return this.get('content.componentName') === 'DATANODE';
-    }.property('content'),
-
-    /**
-     * Set in template via binding from parent view
-     */
-    decommissionDataNodeHostNames: null,
-    /**
-     * Decommission is available whenever the service is started.
-     */
-    isDataNodeDecommissionAvailable: function () {
-      return this.get('isStart') && !this.get('isDataNodeRecommissionAvailable');
-    }.property('isStart', 'isDataNodeRecommissionAvailable'),
-
-    /**
-     * Recommission is available only when this hostname shows up in the
-     * 'decommissionDataNodeHostNames'
-     */
-    isDataNodeRecommissionAvailable: function () {
-      var decommissionHostNames = this.get('decommissionDataNodeHostNames');
-      var hostName = App.router.get('mainHostDetailsController.content.hostName');
-      return decommissionHostNames!=null && decommissionHostNames.contains(hostName);
-    }.property('App.router.mainHostDetailsController.content', 'decommissionDataNodeHostNames')
-
-  }),
-  timeSinceHeartBeat: function () {
-    var d = this.get('content.lastHeartBeatTime');
-    if (d) {
-      return $.timeago(d);
-    }
-    return "";
-  }.property('content.lastHeartBeatTime')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/menu.js b/branch-1.2/ambari-web/app/views/main/menu.js
deleted file mode 100644
index 8fd4b07..0000000
--- a/branch-1.2/ambari-web/app/views/main/menu.js
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-/**
- * this menu extended by other with modifying content and itemViewClass.template
- * @type {*}
- */
-App.MainMenuView = Em.CollectionView.extend({
-  tagName:'ul',
-  classNames:['nav'],
-  content:function(){
-    var result = [
-      { label:Em.I18n.t('menu.item.dashboard'), routing:'dashboard', active:'active'},
-      { label:Em.I18n.t('menu.item.heatmaps'), routing:'charts'},
-      { label:Em.I18n.t('menu.item.services'), routing:'services'},
-      { label:Em.I18n.t('menu.item.hosts'), routing:'hosts'},
-      { label:Em.I18n.t('menu.item.jobs'), routing:'apps'}
-
-    ];
-      if(App.db.getUser().admin) result.push({ label:Em.I18n.t('menu.item.admin'), routing:'admin'});
-    return result;
-  }.property(),
-    /**
-     *    Adds observer on lastSetURL and calls navigation sync procedure
-     */
-  didInsertElement:function () {
-    App.router.location.addObserver('lastSetURL', this, 'renderOnRoute');
-    this.renderOnRoute();
-  },
-
-  /**
-   *    Syncs navigation menu with requested URL
-   */
-  renderOnRoute:function () {
-    var last_url = App.router.location.lastSetURL || location.href.replace(/^[^#]*#/, '');
-    if (last_url.substr(1, 4) !== 'main' || !this._childViews) {
-      return;
-    }
-    var reg = /^\/main\/([a-z]+)/g;
-    var sub_url = reg.exec(last_url);
-    var chunk = (null != sub_url) ? sub_url[1] : 'dashboard';
-    $.each(this._childViews, function () {
-      this.set('active', this.get('content.routing') == chunk ? "active" : "");
-    });
-  },
-
-  itemViewClass:Em.View.extend({
-
-    classNameBindings:['active', ':span2'],
-    active:'',
-
-    alertsCount:function () {
-      if (this.get('content').routing == 'hosts') {
-        return App.router.get('mainHostController.alerts').length;
-      }
-    }.property('App.router.mainHostController.alerts.length'),
-
-    templateName: require('templates/main/menu_item')
-  })
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service.js b/branch-1.2/ambari-web/app/views/main/service.js
deleted file mode 100644
index 9ed9d61..0000000
--- a/branch-1.2/ambari-web/app/views/main/service.js
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainServiceView = Em.View.extend({
-  templateName:require('templates/main/service')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/add_view.js b/branch-1.2/ambari-web/app/views/main/service/add_view.js
deleted file mode 100644
index a78cced..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/add_view.js
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.AddServiceView = Em.View.extend({
-
-  templateName: require('templates/main/service/add'),
-
-  isStep1Disabled: function () {
-    return this.isStepDisabled(1);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep2Disabled: function () {
-    return this.isStepDisabled(2);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep3Disabled: function () {
-    return this.isStepDisabled(3);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep4Disabled: function () {
-    return this.isStepDisabled(4);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep5Disabled: function () {
-    return this.isStepDisabled(5);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep6Disabled: function () {
-    return this.isStepDisabled(6);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStep7Disabled: function () {
-    return this.isStepDisabled(7);
-  }.property('controller.isStepDisabled.@each.value').cacheable(),
-
-  isStepDisabled: function (index) {
-    return this.get('controller.isStepDisabled').findProperty('step', index).get('value');
-  }
-
-});
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/configs.js b/branch-1.2/ambari-web/app/views/main/service/info/configs.js
deleted file mode 100644
index 3367dc2..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/configs.js
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainServiceInfoConfigsView = Em.View.extend({
-  templateName: require('templates/main/service/info/configs'),
-  didInsertElement: function () {
-    var controller = this.get('controller');
-    controller.loadStep();
-  },
-  onToggleBlock: function(event){
-    $(document.getElementById(event.context.name)).toggle('blind', 500);
-    event.context.set('isCollapsed', !event.context.get('isCollapsed'));
-  }
-});
-
-App.ServiceConfigsByCategoryView = Ember.View.extend({
-
-  content: null,
-
-
-  category: null,
-  serviceConfigs: null, // General, Advanced, NameNode, SNameNode, DataNode, etc.
-
-  categoryConfigs: function () {
-    return this.get('serviceConfigs').filterProperty('category', this.get('category.name'))
-  }.property('serviceConfigs.@each').cacheable(),
-  didInsertElement: function () {
-    if (this.get('category.name') == 'Advanced') {
-      this.set('category.isCollapsed', true);
-      $("#Advanced").hide();
-    } else {
-      this.set('category.isCollapsed', false);
-    }
-  },
-  layout: Ember.Handlebars.compile('<div {{bindAttr id="view.category.name"}} class="accordion-body collapse in"><div class="accordion-inner">{{yield}}</div></div>')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/menu.js b/branch-1.2/ambari-web/app/views/main/service/info/menu.js
deleted file mode 100644
index c38aa53..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/menu.js
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainServiceInfoMenuView = Em.CollectionView.extend({
-  tagName: 'ul',
-  classNames: ["nav", "nav-tabs"],
-  content:function(){
-    var menuItems = [
-      { label: Em.I18n.t('services.service.info.menu.summary'), routing:'summary', active:"active"}
-      //{ label:'Audit', routing:'audit'}
-    ];
-    if(this.get('configTab')) menuItems.push({ label: Em.I18n.t('services.service.info.menu.configs'), routing:'configs'});
-    return menuItems;
-  }.property(),
-
-  init: function(){ this._super(); this.activateView(); },
-
-  activateView:function () {
-    $.each(this._childViews, function () {
-      this.set('active', (this.get('content.routing') == 'summary' ? "active" : ""));
-    });
-  },
-
-  deactivateChildViews: function() {
-    $.each(this._childViews, function(){
-      this.set('active', "");
-    });
-  },
-
-  itemViewClass: Em.View.extend({
-    classNameBindings: ["active"],
-    active: "",
-    template: Ember.Handlebars.compile('<a {{action showInfo view.content.routing }} href="#"> {{unbound view.content.label}}</a>')
-  })
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/cluster_requests.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/cluster_requests.js
deleted file mode 100644
index 1fe841a..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/cluster_requests.js
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing HBase Cluster Requests
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsHBASE_ClusterRequests = App.ChartLinearTimeView.extend({
-  id: "service-metrics-hbase-cluster-requests",
-  title: Em.I18n.t('services.service.info.metrics.hbase.clusterRequests'),
-  sourceUrl: "/services/HBASE/components/HBASE_MASTER?fields=metrics/hbase/master/cluster_requests[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/hbase/cluster_requests.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.hbase && jsonData.metrics.hbase.master) {
-      for ( var name in jsonData.metrics.hbase.master) {
-        var displayName;
-        var seriesData = jsonData.metrics.hbase.master[name];
-        switch (name) {
-          case "cluster_requests":
-            displayName = "Request Count";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/hlog_split_size.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/hlog_split_size.js
deleted file mode 100644
index 42d97ae..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/hlog_split_size.js
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing HBase Cluster Requests
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsHBASE_HlogSplitSize = App.ChartLinearTimeView.extend({
-  id: "service-metrics-hbase-hlog-split-size",
-  title: Em.I18n.t('services.service.info.metrics.hbase.hlogSplitSize'),
-  yAxisFormatter: App.ChartLinearTimeView.BytesFormatter,
-
-  sourceUrl: "/services/HBASE/components/HBASE_MASTER?fields=metrics/hbase/master/splitSize_avg_time[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/hbase/hlog_split_size.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.hbase && jsonData.metrics.hbase.master) {
-      for ( var name in jsonData.metrics.hbase.master) {
-        var displayName;
-        var seriesData = jsonData.metrics.hbase.master[name];
-        switch (name) {
-          case "splitSize_avg_time":
-            displayName = "Split Size";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/hlog_split_time.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/hlog_split_time.js
deleted file mode 100644
index e091436..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/hlog_split_time.js
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing HBase Cluster Requests
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsHBASE_HlogSplitTime = App.ChartLinearTimeView.extend({
-  id: "service-metrics-hbase-hlog-split-time",
-  title: Em.I18n.t('services.service.info.metrics.hbase.hlogSplitTime'),
-  yAxisFormatter: App.ChartLinearTimeView.TimeElapsedFormatter,
-
-  sourceUrl: "/services/HBASE/components/HBASE_MASTER?fields=metrics/hbase/master/splitTime_avg_time[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/hbase/hlog_split_time.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.hbase && jsonData.metrics.hbase.master) {
-      for ( var name in jsonData.metrics.hbase.master) {
-        var displayName;
-        var seriesData = jsonData.metrics.hbase.master[name];
-        switch (name) {
-          case "splitTime_avg_time":
-            displayName = "Split Time";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/regionserver_queuesize.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/regionserver_queuesize.js
deleted file mode 100644
index 309c997..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/regionserver_queuesize.js
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing HBase Cluster Requests
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsHBASE_RegionServerQueueSize = App.ChartLinearTimeView.extend({
-  id: "service-metrics-hbase-regionserver-queuesize",
-  title: Em.I18n.t('services.service.info.metrics.hbase.regionServerQueueSize'),
-  renderer: 'line',
-  sourceUrl: "/services/HBASE/components/HBASE_REGIONSERVER?fields=metrics/hbase/regionserver/flushQueueSize[{fromSeconds},{toSeconds},{stepSeconds}],metrics/hbase/regionserver/compactionQueueSize[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/hbase/regionserver_queuesize.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.hbase && jsonData.metrics.hbase.regionserver) {
-      for ( var name in jsonData.metrics.hbase.regionserver) {
-        var displayName;
-        var seriesData = jsonData.metrics.hbase.regionserver[name];
-        switch (name) {
-          case "compactionQueueSize":
-            displayName = "Compaction Queue Size";
-            break;
-          case "flushQueueSize":
-            displayName = "Flush Queue Size";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/regionserver_regions.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/regionserver_regions.js
deleted file mode 100644
index 6041ef2..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/regionserver_regions.js
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing HBase Cluster Requests
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsHBASE_RegionServerRegions = App.ChartLinearTimeView.extend({
-  id: "service-metrics-hbase-regionserver-regions",
-  title: Em.I18n.t('services.service.info.metrics.hbase.regionServerRegions'),
-
-  sourceUrl: "/services/HBASE/components/HBASE_REGIONSERVER?fields=metrics/hbase/regionserver/regions[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/hbase/regionserver_regions.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.hbase && jsonData.metrics.hbase.regionserver) {
-      for ( var name in jsonData.metrics.hbase.regionserver) {
-        var displayName;
-        var seriesData = jsonData.metrics.hbase.regionserver[name];
-        switch (name) {
-          case "regions":
-            displayName = "Regions";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/regionserver_rw_requests.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/regionserver_rw_requests.js
deleted file mode 100644
index d033339..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hbase/regionserver_rw_requests.js
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing HBase Cluster Requests
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsHBASE_RegionServerReadWriteRequests = App.ChartLinearTimeView.extend({
-  id: "service-metrics-hbase-regionserver-rw-requests",
-  title: Em.I18n.t('services.service.info.metrics.hbase.regionServerRequests'),
-  renderer: 'line',
-  sourceUrl: "/services/HBASE/components/HBASE_REGIONSERVER?fields=metrics/hbase/regionserver/readRequestsCount[{fromSeconds},{toSeconds},{stepSeconds}],metrics/hbase/regionserver/writeRequestsCount[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/hbase/regionserver_rw_requests.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.hbase && jsonData.metrics.hbase.regionserver) {
-      for ( var name in jsonData.metrics.hbase.regionserver) {
-        var displayName;
-        var seriesData = jsonData.metrics.hbase.regionserver[name];
-        switch (name) {
-          case "writeRequestsCount":
-            displayName = "Write Requests";
-            break;
-          case "readRequestsCount":
-            displayName = "Read Requests";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/block_status.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/block_status.js
deleted file mode 100644
index bf91e2c..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/block_status.js
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsHDFS_BlockStatus = App.ChartLinearTimeView.extend({
-  id: "service-metrics-hdfs-block-status",
-  title: Em.I18n.t('services.service.info.metrics.hdfs.blockStatus'),
-  renderer: 'line',
-  sourceUrl: "/hosts/{nameNodeName}/host_components/NAMENODE?fields=metrics/dfs/FSNamesystem/PendingReplicationBlocks[{fromSeconds},{toSeconds},{stepSeconds}],metrics/dfs/FSNamesystem/UnderReplicatedBlocks[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl:"/data/services/metrics/hdfs/block_status.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.dfs && jsonData.metrics.dfs.FSNamesystem) {
-      for ( var name in jsonData.metrics.dfs.FSNamesystem) {
-        var displayName;
-        var seriesData = jsonData.metrics.dfs.FSNamesystem[name];
-        switch (name) {
-          case "PendingReplicationBlocks":
-            displayName = "Pending Replication Blocks";
-            break;
-          case "UnderReplicatedBlocks":
-            displayName = "Under Replicated Blocks";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/file_operations.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/file_operations.js
deleted file mode 100644
index 133dba2..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/file_operations.js
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsHDFS_FileOperations = App.ChartLinearTimeView.extend({
-  id: "service-metrics-hdfs-file-operations",
-  title: Em.I18n.t('services.service.info.metrics.hdfs.fileOperations'),
-  renderer: 'line',
-  sourceUrl: "/hosts/{nameNodeName}/host_components/NAMENODE?fields=metrics/dfs/namenode/FileInfoOps[{fromSeconds},{toSeconds},{stepSeconds}],metrics/dfs/namenode/CreateFileOps[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/hdfs/file_operations.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.dfs && jsonData.metrics.dfs.namenode) {
-      for ( var name in jsonData.metrics.dfs.namenode) {
-        var displayName;
-        var seriesData = jsonData.metrics.dfs.namenode[name];
-        switch (name) {
-          case "FileInfoOps":
-            displayName = "File Information Operations";
-            break;
-          case "DeleteFileOps":
-            displayName = "Delete File Operations";
-            break;
-          case "CreateFileOps":
-            displayName = "Create File Operations";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/gc.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/gc.js
deleted file mode 100644
index 5857ca7..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/gc.js
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsHDFS_GC = App.ChartLinearTimeView.extend({
-  id: "service-metrics-hdfs-gc",
-  title: Em.I18n.t('services.service.info.metrics.hdfs.gc'),
-  yAxisFormatter: App.ChartLinearTimeView.TimeElapsedFormatter,
-
-  sourceUrl: "/hosts/{nameNodeName}/host_components/NAMENODE?fields=metrics/jvm/gcTimeMillis[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/hdfs/gc.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.jvm) {
-      for ( var name in jsonData.metrics.jvm) {
-        var displayName;
-        var seriesData = jsonData.metrics.jvm[name];
-        switch (name) {
-          case "gcTimeMillis":
-            displayName = "Time";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/io.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/io.js
deleted file mode 100644
index 4797d01..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/io.js
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsHDFS_IO = App.ChartLinearTimeView.extend({
-  id: "service-metrics-hdfs-io",
-  title: Em.I18n.t('services.service.info.metrics.hdfs.io'),
-  yAxisFormatter: App.ChartLinearTimeView.BytesFormatter,
-  renderer: 'line',
-  sourceUrl: "/services/HDFS/components/DATANODE?fields=metrics/dfs/datanode/bytes_written[{fromSeconds},{toSeconds},{stepSeconds}],metrics/dfs/datanode/bytes_read[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/hdfs/io.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.dfs && jsonData.metrics.dfs.datanode) {
-      for ( var name in jsonData.metrics.dfs.datanode) {
-        var displayName;
-        var seriesData = jsonData.metrics.dfs.datanode[name];
-        switch (name) {
-          case "bytes_written":
-            displayName = "Bytes Written";
-            break;
-          case "bytes_read":
-            displayName = "Bytes Read";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/jvm_heap.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/jvm_heap.js
deleted file mode 100644
index c36d48c..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/jvm_heap.js
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsHDFS_JVMHeap = App.ChartLinearTimeView.extend({
-  id: "service-metrics-hdfs-jvm-heap",
-  title: Em.I18n.t('services.service.info.metrics.hdfs.jvmHeap'),
-  yAxisFormatter: App.ChartLinearTimeView.BytesFormatter,
-  renderer: 'line',
-  sourceUrl:"/hosts/{nameNodeName}/host_components/NAMENODE?fields=metrics/jvm/memNonHeapUsedM[{fromSeconds},{toSeconds},{stepSeconds}],metrics/jvm/memNonHeapCommittedM[{fromSeconds},{toSeconds},{stepSeconds}],metrics/jvm/memHeapUsedM[{fromSeconds},{toSeconds},{stepSeconds}],metrics/jvm/memHeapCommittedM[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/hdfs/jvm_heap.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    var MB = Math.pow(2, 20);
-    if (jsonData && jsonData.metrics && jsonData.metrics.jvm) {
-      for ( var name in jsonData.metrics.jvm) {
-        var displayName;
-        var seriesData = jsonData.metrics.jvm[name];
-        switch (name) {
-          case "memHeapCommittedM":
-            displayName = "Heap Memory Committed";
-            break;
-          case "memNonHeapUsedM":
-            displayName = "Non Heap Memory Used";
-            break;
-          case "memHeapUsedM":
-            displayName = "Heap Memory Used";
-            break;
-          case "memNonHeapCommittedM":
-            displayName = "Non Heap Memory Committed";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          var s = this.transformData(seriesData, displayName);
-          for (var i = 0; i < s.data.length; i++) {
-            s.data[i].y *= MB;
-          }
-          seriesArray.push(s);
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/jvm_threads.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/jvm_threads.js
deleted file mode 100644
index e35d1ee..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/jvm_threads.js
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsHDFS_JVMThreads = App.ChartLinearTimeView.extend({
-  id: "service-metrics-hdfs-jvm-threads",
-  title: Em.I18n.t('services.service.info.metrics.hdfs.jvmThreads'),
-  renderer: 'line',
-  sourceUrl: "/hosts/{nameNodeName}/host_components/NAMENODE?fields=metrics/jvm/threadsRunnable[{fromSeconds},{toSeconds},{stepSeconds}],metrics/jvm/threadsBlocked[{fromSeconds},{toSeconds},{stepSeconds}],metrics/jvm/threadsWaiting[{fromSeconds},{toSeconds},{stepSeconds}],metrics/jvm/threadsTimedWaiting[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/hdfs/jvm_threads.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.jvm) {
-      for ( var name in jsonData.metrics.jvm) {
-        var displayName;
-        var seriesData = jsonData.metrics.jvm[name];
-        switch (name) {
-          case "threadsBlocked":
-            displayName = "Threads Blocked";
-            break;
-          case "threadsWaiting":
-            displayName = "Threads Waiting";
-            break;
-          case "threadsTimedWaiting":
-            displayName = "Threads Timed Waiting";
-            break;
-          case "threadsRunnable":
-            displayName = "Threads Runnable";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/rpc.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/rpc.js
deleted file mode 100644
index f8f91e7..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/rpc.js
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsHDFS_RPC = App.ChartLinearTimeView.extend({
-  id: "service-metrics-hdfs-rpc",
-  title: Em.I18n.t('services.service.info.metrics.hdfs.rpc'),
-  yAxisFormatter: App.ChartLinearTimeView.TimeElapsedFormatter,
-  sourceUrl: "/hosts/{nameNodeName}/host_components/NAMENODE?fields=metrics/rpc/RpcQueueTime_avg_time[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/hdfs/rpc.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.rpc) {
-      for ( var name in jsonData.metrics.rpc) {
-        var displayName;
-        var seriesData = jsonData.metrics.rpc[name];
-        switch (name) {
-          case "RpcQueueTime_avg_time":
-            displayName = "Queue Average Wait Time";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/space_utilization.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/space_utilization.js
deleted file mode 100644
index e5131f7..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/hdfs/space_utilization.js
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsHDFS_SpaceUtilization = App.ChartLinearTimeView.extend({
-  id: "service-metrics-hdfs-space-utilization",
-  title: Em.I18n.t('services.service.info.metrics.hdfs.spaceUtilization'),
-  yAxisFormatter: App.ChartLinearTimeView.BytesFormatter,
-  renderer: 'line',
-  sourceUrl: "/hosts/{nameNodeName}/host_components/NAMENODE?fields=metrics/dfs/FSNamesystem/CapacityRemainingGB[{fromSeconds},{toSeconds},{stepSeconds}],metrics/dfs/FSNamesystem/CapacityUsedGB[{fromSeconds},{toSeconds},{stepSeconds}],metrics/dfs/FSNamesystem/CapacityTotalGB[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/hdfs/space_utilization.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    var GB = Math.pow(2, 30);
-    if (jsonData && jsonData.metrics && jsonData.metrics.dfs && jsonData.metrics.dfs.FSNamesystem) {
-      for ( var name in jsonData.metrics.dfs.FSNamesystem) {
-        var displayName;
-        var seriesData = jsonData.metrics.dfs.FSNamesystem[name];
-        switch (name) {
-          case "CapacityRemainingGB":
-            displayName = "Capacity Remaining";
-            break;
-          case "CapacityUsedGB":
-            displayName = "Capacity Used";
-            break;
-          case "CapacityTotalGB":
-            displayName = "Capacity Total";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          var s = this.transformData(seriesData, displayName);
-          for (var i = 0; i < s.data.length; i++) {
-            s.data[i].y *= GB;
-          }
-          seriesArray.push(s);
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/gc.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/gc.js
deleted file mode 100644
index 1055d45..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/gc.js
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsMapReduce_GC = App.ChartLinearTimeView.extend({
-  id: "service-metrics-mapreduce-gc",
-  title: Em.I18n.t('services.service.info.metrics.mapreduce.gc'),
-  yAxisFormatter: App.ChartLinearTimeView.TimeElapsedFormatter,
-  sourceUrl: "/hosts/{jobTrackerNode}/host_components/JOBTRACKER?fields=metrics/jvm/gcTimeMillis[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/mapreduce/gc.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.jvm) {
-      for ( var name in jsonData.metrics.jvm) {
-        var displayName;
-        var seriesData = jsonData.metrics.jvm[name];
-        switch (name) {
-          case "gcTimeMillis":
-            displayName = "Time";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/jobs_status.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/jobs_status.js
deleted file mode 100644
index f4cfa0a..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/jobs_status.js
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsMapReduce_JobsStatus = App.ChartLinearTimeView.extend({
-  id: "service-metrics-mapreduce-jobs-status",
-  title: Em.I18n.t('services.service.info.metrics.mapreduce.jobsStatus'),
-  renderer: 'line',
-  sourceUrl: "/services/MAPREDUCE/components/JOBTRACKER?fields=metrics/mapred/jobtracker/jobs_completed[{fromSeconds},{toSeconds},{stepSeconds}],metrics/mapred/jobtracker/jobs_preparing[{fromSeconds},{toSeconds},{stepSeconds}],metrics/mapred/jobtracker/jobs_failed[{fromSeconds},{toSeconds},{stepSeconds}],metrics/mapred/jobtracker/jobs_submitted[{fromSeconds},{toSeconds},{stepSeconds}],metrics/mapred/jobtracker/jobs_failed[{fromSeconds},{toSeconds},{stepSeconds}],metrics/mapred/jobtracker/jobs_running[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/mapreduce/jobs_status.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.mapred && jsonData.metrics.mapred.jobtracker) {
-      for ( var name in jsonData.metrics.mapred.jobtracker) {
-        var displayName;
-        var seriesData = jsonData.metrics.mapred.jobtracker[name];
-        switch (name) {
-          case "jobs_running":
-            displayName = "Running";
-            break;
-          case "jobs_failed":
-            displayName = "Failed";
-            break;
-          case "jobs_completed":
-            displayName = "Succeeded";
-            break;
-          case "jobs_preparing":
-            displayName = "Preparing";
-            break;
-          case "jobs_submitted":
-            displayName = "Submitted";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/jvm_heap.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/jvm_heap.js
deleted file mode 100644
index 6f9ae9a..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/jvm_heap.js
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsMapReduce_JVMHeap = App.ChartLinearTimeView.extend({
-  id: "service-metrics-mapreduce-jvm-heap",
-  title: Em.I18n.t('services.service.info.metrics.mapreduce.jvmHeap'),
-  yAxisFormatter: App.ChartLinearTimeView.BytesFormatter,
-  renderer: 'line',
-  sourceUrl: "/hosts/{jobTrackerNode}/host_components/JOBTRACKER?fields=metrics/jvm/memNonHeapUsedM[{fromSeconds},{toSeconds},{stepSeconds}],metrics/jvm/memNonHeapCommittedM[{fromSeconds},{toSeconds},{stepSeconds}],metrics/jvm/memHeapUsedM[{fromSeconds},{toSeconds},{stepSeconds}],metrics/jvm/memHeapCommittedM[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/mapreduce/jvm_heap.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    var MB = Math.pow(2, 20);
-    if (jsonData && jsonData.metrics && jsonData.metrics.jvm) {
-      for ( var name in jsonData.metrics.jvm) {
-        var displayName;
-        var seriesData = jsonData.metrics.jvm[name];
-        switch (name) {
-          case "memHeapCommittedM":
-            displayName = "Heap Memory Committed";
-            break;
-          case "memNonHeapUsedM":
-            displayName = "Non Heap Memory Used";
-            break;
-          case "memHeapUsedM":
-            displayName = "Heap Memory Used";
-            break;
-          case "memNonHeapCommittedM":
-            displayName = "Non Heap Memory Committed";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          var s = this.transformData(seriesData, displayName);
-          for (var i = 0; i < s.data.length; i++) {
-            s.data[i].y *= MB;
-          }
-          seriesArray.push(s);
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/jvm_threads.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/jvm_threads.js
deleted file mode 100644
index a71c616..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/jvm_threads.js
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsMapReduce_JVMThreads = App.ChartLinearTimeView.extend({
-  id: "service-metrics-mapreduce-jvm-threads",
-  title: Em.I18n.t('services.service.info.metrics.mapreduce.jvmThreads'),
-  renderer: 'line',
-  sourceUrl: "/hosts/{jobTrackerNode}/host_components/JOBTRACKER?fields=metrics/jvm/threadsRunnable[{fromSeconds},{toSeconds},{stepSeconds}],metrics/jvm/threadsBlocked[{fromSeconds},{toSeconds},{stepSeconds}],metrics/jvm/threadsWaiting[{fromSeconds},{toSeconds},{stepSeconds}],metrics/jvm/threadsTimedWaiting[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/mapreduce/jvm_threads.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.jvm) {
-      for ( var name in jsonData.metrics.jvm) {
-        var displayName;
-        var seriesData = jsonData.metrics.jvm[name];
-        switch (name) {
-          case "threadsBlocked":
-            displayName = "Threads Blocked";
-            break;
-          case "threadsWaiting":
-            displayName = "Threads Waiting";
-            break;
-          case "threadsTimedWaiting":
-            displayName = "Threads Timed Waiting";
-            break;
-          case "threadsRunnable":
-            displayName = "Threads Runnable";
-            break;
-          default:
-            break;
-        }
-
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/map_slots.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/map_slots.js
deleted file mode 100644
index 03dab99..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/map_slots.js
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsMapReduce_MapSlots = App.ChartLinearTimeView.extend({
-  id: "service-metrics-mapreduce-map-slots",
-  title: Em.I18n.t('services.service.info.metrics.mapreduce.mapSlots'),
-  renderer: 'line',
-  sourceUrl: "/services/MAPREDUCE/components/JOBTRACKER?fields=metrics/mapred/jobtracker/occupied_map_slots[{fromSeconds},{toSeconds},{stepSeconds}],metrics/mapred/jobtracker/reserved_map_slots[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/mapreduce/map_slots.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.mapred && jsonData.metrics.mapred.jobtracker) {
-      for ( var name in jsonData.metrics.mapred.jobtracker) {
-        var displayName;
-        var seriesData = jsonData.metrics.mapred.jobtracker[name];
-        switch (name) {
-          case "reserved_map_slots":
-            displayName = "Map Slots Reserved";
-            break;
-          case "occupied_map_slots":
-            displayName = "Map Slots Occupied";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/reduce_slots.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/reduce_slots.js
deleted file mode 100644
index f29b7a3..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/reduce_slots.js
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsMapReduce_ReduceSlots = App.ChartLinearTimeView.extend({
-  id: "service-metrics-mapreduce-reduce-slots",
-  title: Em.I18n.t('services.service.info.metrics.mapreduce.reduceSlots'),
-  renderer: 'line',
-  sourceUrl: "/services/MAPREDUCE/components/JOBTRACKER?fields=metrics/mapred/jobtracker/occupied_reduce_slots[{fromSeconds},{toSeconds},{stepSeconds}],metrics/mapred/jobtracker/reserved_reduce_slots[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/mapreduce/reduce_slots.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.mapred && jsonData.metrics.mapred.jobtracker) {
-      for ( var name in jsonData.metrics.mapred.jobtracker) {
-        var displayName;
-        var seriesData = jsonData.metrics.mapred.jobtracker[name];
-        switch (name) {
-          case "reserved_reduce_slots":
-            displayName = "Reduce Slots Reserved";
-            break;
-          case "occupied_reduce_slots":
-            displayName = "Reduce Slots Occupied";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/rpc.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/rpc.js
deleted file mode 100644
index 189104f..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/rpc.js
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsMapReduce_RPC = App.ChartLinearTimeView.extend({
-  id: "service-metrics-mapreduce-rpc",
-  title: Em.I18n.t('services.service.info.metrics.mapreduce.rpc'),
-  yAxisFormatter: App.ChartLinearTimeView.TimeElapsedFormatter,
-  sourceUrl: "/hosts/{jobTrackerNode}/host_components/JOBTRACKER?fields=metrics/rpc/RpcQueueTime_avg_time[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/mapreduce/rpc.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.rpc) {
-      for ( var name in jsonData.metrics.rpc) {
-        var displayName;
-        var seriesData = jsonData.metrics.rpc[name];
-        switch (name) {
-          case "RpcQueueTime_avg_time":
-            displayName = "Queue Average Wait Time";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/tasks_running_waiting.js b/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/tasks_running_waiting.js
deleted file mode 100644
index 001dc10..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/metrics/mapreduce/tasks_running_waiting.js
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-/**
- * @class
- * 
- * This is a view for showing cluster CPU metrics
- * 
- * @extends App.ChartLinearTimeView
- * @extends Ember.Object
- * @extends Ember.View
- */
-App.ChartServiceMetricsMapReduce_TasksRunningWaiting = App.ChartLinearTimeView.extend({
-  id: "service-metrics-mapreduce-tasks-running-waiting",
-  title: Em.I18n.t('services.service.info.metrics.mapreduce.tasksRunningWaiting'),
-  renderer: 'line',
-  sourceUrl: "/services/MAPREDUCE/components/JOBTRACKER?fields=metrics/mapred/jobtracker/running_maps[{fromSeconds},{toSeconds},{stepSeconds}],metrics/mapred/jobtracker/running_reduces[{fromSeconds},{toSeconds},{stepSeconds}],metrics/mapred/jobtracker/waiting_maps[{fromSeconds},{toSeconds},{stepSeconds}],metrics/mapred/jobtracker/waiting_reduces[{fromSeconds},{toSeconds},{stepSeconds}]",
-  mockUrl: "/data/services/metrics/mapreduce/tasks_running_waiting.json",
-
-  transformToSeries: function (jsonData) {
-    var seriesArray = [];
-    if (jsonData && jsonData.metrics && jsonData.metrics.mapred && jsonData.metrics.mapred.jobtracker) {
-      for ( var name in jsonData.metrics.mapred.jobtracker) {
-        var displayName;
-        var seriesData = jsonData.metrics.mapred.jobtracker[name];
-        switch (name) {
-          case "running_maps":
-            displayName = "Running Map Tasks";
-            break;
-          case "running_reduces":
-            displayName = "Running Reduce Tasks";
-            break;
-          case "waiting_maps":
-            displayName = "Waiting Map Tasks";
-            break;
-          case "waiting_reduces":
-            displayName = "Waiting Reduce Tasks";
-            break;
-          default:
-            break;
-        }
-        if (seriesData) {
-          seriesArray.push(this.transformData(seriesData, displayName));
-        }
-      }
-    }
-    return seriesArray;
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/info/summary.js b/branch-1.2/ambari-web/app/views/main/service/info/summary.js
deleted file mode 100644
index 30e1a04..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/info/summary.js
+++ /dev/null
@@ -1,398 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-var App = require('app');
-
-App.AlertItemView = Em.View.extend({
-  tagName:"li",
-  templateName: require('templates/main/service/info/summary_alert'),
-  classNameBindings: ["status"],
-  status: function () {
-    return "status-" + this.get("content.status");
-  }.property('content'),
-  didInsertElement: function () {
-    // Tooltips for alerts need to be enabled.
-    $("div[rel=tooltip]").tooltip();
-    $(".tooltip").remove();
-  }
-})
-
-App.MainServiceInfoSummaryView = Em.View.extend({
-  templateName: require('templates/main/service/info/summary'),
-  attributes:null,
-  serviceStatus:{
-    hdfs:false,
-    mapreduce:false,
-    hbase:false,
-    zookeeper:false,
-    oozie:false,
-    hive:false,
-    ganglia:false,
-    nagios:false
-  },
-
-  clients: function () {
-    var result = [];
-    var service = this.get('controller.content');
-    if (service.get("id") == "OOZIE" || service.get("id") == "ZOOKEEPER") {
-      return service.get('hostComponents').filterProperty('isClient');
-    }
-    return [];
-  }.property('controller.content'),
-
-  hasManyServers: function () {
-    if (this.get('servers').length > 1) {
-      return true;
-    }
-    return false;
-  }.property('servers'),
-
-  hasManyClients: function () {
-    if (this.get('clients').length > 1) {
-      return true;
-    }
-    return false;
-  }.property('clients'),
-
-  servers: function () {
-    var result = [];
-    var service = this.get('controller.content');
-    if (service.get("id") == "ZOOKEEPER") {
-      var servers = service.get('hostComponents').filterProperty('isMaster');
-      if (servers.length > 0) {
-        result = [{
-          'host': servers[0].get('displayName'),
-          'isComma': false,
-          'isAnd': false
-        }];
-      }
-      if (servers.length > 1) {
-        result[0].isComma = true;
-        result.push({
-          'host': servers[1].get('displayName'),
-          'isComma': false,
-          'isAnd': false
-        });
-      }
-      if (servers.length > 2) {
-        result[1].isAnd = true;
-        result.push({
-          'host': Em.I18n.t('services.service.info.summary.serversHostCount').format(servers.length - 2),
-          'isComma': false,
-          'isAnd': false
-        });
-      }
-    }
-    return result;
-  }.property('controller.content'),
-
-  monitors: function () {
-    var result = '';
-    var service = this.get('controller.content');
-    if (service.get("id") == "GANGLIA") {
-      var monitors = service.get('hostComponents').filterProperty('isMaster', false);
-      if (monitors.length) {
-        result = monitors.length - 1 ? Em.I18n.t('services.service.info.summary.hostsRunningMonitor').format(monitors.length) : Em.I18n.t('services.service.info.summary.hostRunningMonitor');
-      }
-    }
-    return result;
-  }.property('controller.content'),
-
-  /**
-   * Property related to GANGLIA service, is unused for other services
-   * @return {Object}
-   */
-  monitorsObj: function(){
-    var service = this.get('controller.content');
-    if (service.get("id") == "GANGLIA") {
-      var monitors = service.get('hostComponents').filterProperty('isMaster', false);
-      if (monitors.length) {
-        return monitors[0];
-      }
-    }
-    return {};
-  }.property('controller.content'),
-
-  /**
-   * Property related to ZOOKEEPER service, is unused for other services
-   * @return {Object}
-   */
-  serversHost: function() {
-    var service = this.get('controller.content');
-    if (service.get("id") == "ZOOKEEPER") {
-      var servers = service.get('hostComponents').filterProperty('isMaster');
-      if (servers.length > 0) {
-        return servers[0];
-      }
-    }
-    return {};
-  }.property('controller.content'),
-
-  /**
-   * Property related to OOZIE and ZOOKEEPER services, is unused for other services
-   * @return {Object}
-   */
-  clientObj: function() {
-    var service = this.get('controller.content');
-    if (service.get("id") == "OOZIE" || service.get("id") == "ZOOKEEPER") {
-      var clients = service.get('hostComponents').filterProperty('isMaster', false);
-      if (clients.length > 0) {
-        return clients[0];
-      }
-    }
-    return {};
-  }.property('controller.content'),
-
-  data:{
-    hive:{
-      "database":"PostgreSQL",
-      "databaseName":"hive",
-      "user":"hive"
-    }
-  },
-  gangliaServer:function(){
-    var service=this.get('controller.content');
-    if(service.get("id") == "GANGLIA"){
-      return service.get("hostComponents").findProperty('isMaster', true).get("host").get("publicHostName");
-    }else{
-      return "";
-    }
-  }.property('controller.content'),
-  nagiosServer:function(){
-    var service=this.get('controller.content');
-    if(service.get("id") == "NAGIOS"){
-      return service.get("hostComponents").findProperty('isMaster', true).get("host").get("publicHostName");
-    }else{
-      return "";
-    }
-  }.property('controller.content'),
-  oozieServer:function(){
-    var service=this.get('controller.content');
-    if(service.get("id") == "OOZIE"){
-      return service.get("hostComponents").findProperty('isMaster', true).get("host").get("publicHostName");
-    }else{
-      return "";
-    }
-  }.property('controller.content'),
-  /**
-   * Returns hive components information in 
-   * the following format:
-   * {
-   *  label: "Component Name",
-   *  host: Host,
-   *  
-   */
-  hiveComponentsInfo: function(){
-    var componentInfos = [];
-    var service=this.get('controller.content');
-    if(service.get("id") == "HIVE"){
-      var self = this;
-      var components = service.get("hostComponents");
-      if(components){
-        components.forEach(function(component){
-          var ci = {
-              label: component.get('displayName'),
-              host: component.get('host')
-          };
-          if(component.get('id')=='MYSQL_SERVER'){
-            ci.label = self.t('services.hive.databaseComponent');
-          }
-          componentInfos.push(ci);
-        });
-      }
-    }
-    return componentInfos;
-  }.property('controller.content'),
-  service:function () {
-    var svc = this.get('controller.content');
-    var svcName = svc.get('serviceName');
-    if (svcName) {
-      switch (svcName.toLowerCase()) {
-        case 'hdfs':
-          svc = App.HDFSService.find().objectAt(0);
-          break;
-        case 'mapreduce':
-          svc = App.MapReduceService.find().objectAt(0);
-          break;
-        case 'hbase':
-          svc = App.HBaseService.find().objectAt(0);
-          break;
-        default:
-          break;
-      }
-    }
-    return svc;
-  }.property('controller.content.serviceName').volatile(),
-
-  isHide:true,
-  moreStatsView:Em.View.extend({
-    tagName:"a",
-    template:Ember.Handlebars.compile('{{t services.service.summary.moreStats}}'),
-    attributeBindings:[ 'href' ],
-    classNames:[ 'more-stats' ],
-    click:function (event) {
-      this._parentView._parentView.set('isHide', false);
-      this.remove();
-    },
-    href:'javascript:void(null)'
-  }),
-
-  serviceName:function () {
-    return this.get('service.serviceName');
-  }.property('service'),
-
-  oldServiceName:'',
-
-  /**
-   * Contains graphs for this particular service
-   */
-  serviceMetricGraphs:function () {
-    var svcName = this.get('service.serviceName');
-    var graphs = [];
-    if (svcName) {
-      switch (svcName.toLowerCase()) {
-        case 'hdfs':
-          graphs = [ [App.ChartServiceMetricsHDFS_SpaceUtilization.extend(),
-            App.ChartServiceMetricsHDFS_FileOperations.extend(),
-            App.ChartServiceMetricsHDFS_BlockStatus.extend(),
-            App.ChartServiceMetricsHDFS_IO.extend()],
-            [App.ChartServiceMetricsHDFS_RPC.extend(),
-            App.ChartServiceMetricsHDFS_GC.extend(),
-            App.ChartServiceMetricsHDFS_JVMHeap.extend(),
-            App.ChartServiceMetricsHDFS_JVMThreads.extend()]];
-          break;
-        case 'mapreduce':
-          graphs = [ [App.ChartServiceMetricsMapReduce_JobsStatus.extend(),
-            App.ChartServiceMetricsMapReduce_TasksRunningWaiting.extend(),
-            App.ChartServiceMetricsMapReduce_MapSlots.extend(),
-            App.ChartServiceMetricsMapReduce_ReduceSlots.extend()],
-            [App.ChartServiceMetricsMapReduce_GC.extend(),
-            App.ChartServiceMetricsMapReduce_RPC.extend(),
-            App.ChartServiceMetricsMapReduce_JVMHeap.extend(),
-            App.ChartServiceMetricsMapReduce_JVMThreads.extend()]];
-          break;
-        case 'hbase':
-          graphs = [  [App.ChartServiceMetricsHBASE_ClusterRequests.extend(),
-            App.ChartServiceMetricsHBASE_RegionServerReadWriteRequests.extend(),
-            App.ChartServiceMetricsHBASE_RegionServerRegions.extend(),
-            App.ChartServiceMetricsHBASE_RegionServerQueueSize.extend()],
-            [App.ChartServiceMetricsHBASE_HlogSplitTime.extend(),
-            App.ChartServiceMetricsHBASE_HlogSplitSize.extend()]];
-          break;
-        default:
-          break;
-      }
-    }
-    return graphs;
-  }.property(''),
-
-  loadServiceSummary:function (serviceName) {
-
-    var serviceName = this.get('serviceName');
-    if (!serviceName) {
-      return;
-    }
-
-    if (this.get('oldServiceName')) {
-      // do not delete it!
-      return;
-    }
-
-    var summaryView = this;
-    var serviceStatus = summaryView.get('serviceStatus');
-    $.each(serviceStatus, function (key, value) {
-      if (key.toUpperCase() == serviceName) {
-        summaryView.set('serviceStatus.' + key, true);
-      } else {
-        summaryView.set('serviceStatus.' + key, false);
-      }
-    });
-
-    console.log('load ', serviceName, ' info');
-    this.set('oldServiceName', serviceName);
-    serviceName = serviceName.toLowerCase();
-  }.observes('serviceName'),
-
-  gangliaUrl:function () {
-    var gangliaUrl = App.router.get('clusterController.gangliaUrl');
-    var svcName = this.get('service.serviceName');
-    if (svcName) {
-      switch (svcName.toLowerCase()) {
-        case 'hdfs':
-          gangliaUrl += "/?r=hour&cs=&ce=&m=&s=by+name&c=HDPNameNode&tab=m&vn=";
-          break;
-        case 'mapreduce':
-          gangliaUrl += "/?r=hour&cs=&ce=&m=&s=by+name&c=HDPJobTracker&tab=m&vn=";
-          break;
-        case 'hbase':
-          gangliaUrl += "?r=hour&cs=&ce=&m=&s=by+name&c=HDPHBaseMaster&tab=m&vn=";
-          break;
-        default:
-          break;
-      }
-    }
-    return gangliaUrl;
-  }.property('App.router.clusterController.gangliaUrl', 'service.serviceName'),
-
-  didInsertElement:function () {
-    // We have to make the height of the Alerts section
-    // match the height of the Summary section.
-    var summaryTable = document.getElementById('summary-info');
-    var alertsList = document.getElementById('summary-alerts-list');
-    if (summaryTable && alertsList) {
-      var rows = $(summaryTable).find('tr');
-      if (rows != null && rows.length > 0) {
-        var minimumHeight = 50;
-        var calculatedHeight = summaryTable.clientHeight;
-        if (calculatedHeight < minimumHeight) {
-          $(alertsList).attr('style', "height:" + minimumHeight + "px;");
-          $(summaryTable).append('<tr><td></td></tr>');
-          $(summaryTable).attr('style', "height:" + minimumHeight + "px;");
-        } else {
-          $(alertsList).attr('style', "height:" + calculatedHeight + "px;");
-        }
-      } else if (alertsList.clientHeight > 0) {
-        $(summaryTable).append('<tr><td></td></tr>');
-        $(summaryTable).attr('style', "height:" + alertsList.clientHeight + "px;");
-      }
-    }
-  },
-
-  clientHosts:App.Host.find(),
-
-  clientHostsLength:function () {
-    var text = this.t('services.service.summary.clientCount');
-    var self = this;
-    return text.format(self.get('clientHosts.length'));
-  }.property('clientHosts'),
-
-  clientComponents:function () {
-    return App.HostComponent.find().filterProperty('isClient', true);
-  }.property(),
-
-  clientComponentsString:function () {
-    var components = this.get('clientComponents');
-    var names = [];
-    components.forEach(function (component) {
-      if (names.indexOf(component.get('displayName')) == -1) {
-        names.push(component.get('displayName'));
-      }
-    });
-
-    return names.length ? names.join(', ') : false;
-  }.property('clientComponents')
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/item.js b/branch-1.2/ambari-web/app/views/main/service/item.js
deleted file mode 100644
index f6dd4ba..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/item.js
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainServiceItemView = Em.View.extend({
-  templateName: require('templates/main/service/item'),
-  maintenance: function(){
-    var options = [];
-    var service = this.get('controller.content');
-    switch(service.get('serviceName')) {
-//      case 'HDFS':
-//        options.push({action: 'runRebalancer', 'label': Em.I18n.t('services.service.actions.run.rebalancer')});
-//        break;
-//      case 'HBASE':
-//        options.push({action: 'runCompaction', 'label': Em.I18n.t('services.service.actions.run.compaction')});
-//        break;
-      default:
-        options.push({action: 'runSmokeTest', 'label': Em.I18n.t('services.service.actions.run.smoke')});
-    }
-    return options;
-  }.property('controller.content'),
-  hasMaintenanceControl: function(){
-    return this.get("controller.content.isMaintained");
-  }.property('controller.content.isMaintained'),
-  hasConfigTab: function(){
-    return this.get("controller.content.isConfigurable");
-  }.property('controller.content.isConfigurable')
-});
-
-App.MainServiceItemOperations = Em.View.extend({
-  content: null,
-  classNames: ['background-operations'],
-  classNameBindings: ['isOpen'],
-  isOpen: false,
-  logDetails: null,
-  isOpenShowLog: false,
-  iconClass: function(){
-    return this.get('isOpen') ? 'icon-minus' : 'icon-plus';
-  }.property('isOpen'),
-  openDetails: function(){
-    this.set('isOpen', !this.get('isOpen'))
-  },
-  showOperationLog:function(){
-    this.set('isOpenShowLog', !this.get('isOpenShowLog'))
-  }
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/main/service/menu.js b/branch-1.2/ambari-web/app/views/main/service/menu.js
deleted file mode 100644
index c911e84..0000000
--- a/branch-1.2/ambari-web/app/views/main/service/menu.js
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.MainServiceMenuView = Em.CollectionView.extend({
-  content:function () {
-    var items = App.router.get('mainServiceController.content').filter(function(item){
-      if(['PIG', 'SQOOP', 'HCATALOG'].contains(item.get('id'))){
-        return false;
-      }
-      return true;
-    });
-    return items;
-  }.property('App.router.mainServiceController.content'),
-
-
-  didInsertElement:function () {
-    App.router.location.addObserver('lastSetURL', this, 'renderOnRoute');
-    this.renderOnRoute();
-  },
-
-  activeServiceId:null,
-
-  /**
-   *    Syncs navigation menu with requested URL
-   */
-  renderOnRoute:function () {
-    var last_url = App.router.location.lastSetURL || location.href.replace(/^[^#]*#/, '');
-    if (last_url.substr(1, 4) !== 'main' || !this._childViews) {
-      return;
-    }
-    var reg = /^\/main\/services\/(\S+)\//g;
-    var sub_url = reg.exec(last_url);
-    var service_id = (null != sub_url) ? sub_url[1] : 1;
-    this.set('activeServiceId', service_id);
-
-  },
-
-  tagName:'ul',
-  classNames:["nav", "nav-list", "nav-services"],
-
-  itemViewClass:Em.View.extend({
-    classNameBindings:["active", "clients"],
-    active:function () {
-      return this.get('content.id') == this.get('parentView.activeServiceId') ? 'active' : '';
-    }.property('parentView.activeServiceId'),
-    alertsCount: function () {
-      var allAlerts = App.router.get('clusterController.alerts');
-      var serviceId = this.get('content.serviceName');
-      if (serviceId) {
-        return allAlerts.filterProperty('serviceType', serviceId).filterProperty('isOk', false).filterProperty('ignoredForServices', false).length;
-      }
-      return 0;
-    }.property('App.router.clusterController.alerts'),
-
-    templateName:require('templates/main/service/menu_item')
-  })
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/wizard/controls_view.js b/branch-1.2/ambari-web/app/views/wizard/controls_view.js
deleted file mode 100644
index ac7e394..0000000
--- a/branch-1.2/ambari-web/app/views/wizard/controls_view.js
+++ /dev/null
@@ -1,470 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-/**
- * Abstract view for config fields.
- * Add popover support to control
- */
-App.ServiceConfigPopoverSupport = Ember.Mixin.create({
-
-  /**
-   * Config object. It will instance of App.ServiceConfigProperty
-   */
-  serviceConfig: null,
-  placeholderBinding: 'serviceConfig.defaultValue',
-  isPopoverEnabled: true,
-
-  didInsertElement: function () {
-    if (this.get('isPopoverEnabled') !== 'false') {
-      this.$().popover({
-        title: Em.I18n.t('installer.controls.serviceConfigPopover.title').format(this.get('serviceConfig.displayName'), this.get('serviceConfig.name')),
-        content: this.get('serviceConfig.description'),
-        placement: 'right',
-        trigger: 'hover'
-      });
-    }
-  }
-});
-
-/**
- * Default input control
- * @type {*}
- */
-App.ServiceConfigTextField = Ember.TextField.extend(App.ServiceConfigPopoverSupport, {
-
-  valueBinding: 'serviceConfig.value',
-  classNameBindings: 'textFieldClassName',
-  placeholderBinding: 'serviceConfig.defaultValue',
-
-  textFieldClassName: function () {
-    // sets the width of the field depending on display type
-    if (['directory', 'url', 'email', 'user', 'host'].contains(this.get('serviceConfig.displayType'))) {
-      return ['span6'];
-    } else if (this.get('serviceConfig.displayType') === 'advanced') {
-      return ['span6'];
-    } else {
-      return ['input-small'];
-    }
-  }.property('serviceConfig.displayType'),
-
-  disabled: function () {
-    return !this.get('serviceConfig.isEditable');
-  }.property('serviceConfig.isEditable')
-
-});
-
-/**
- * Customized input control with Utits type specified
- * @type {*}
- */
-App.ServiceConfigTextFieldWithUnit = Ember.View.extend(App.ServiceConfigPopoverSupport, {
-  valueBinding: 'serviceConfig.value',
-  classNames: [ 'input-append' ],
-  placeholderBinding: 'serviceConfig.defaultValue',
-
-  template: Ember.Handlebars.compile('{{view App.ServiceConfigTextField serviceConfigBinding="view.serviceConfig" isPopoverEnabled="false"}}<span class="add-on">{{view.serviceConfig.unit}}</span>'),
-
-  disabled: function () {
-    return !this.get('serviceConfig.isEditable');
-  }.property('serviceConfig.isEditable')
-
-});
-
-/**
- * Password control
- * @type {*}
- */
-App.ServiceConfigPasswordField = Ember.TextField.extend({
-
-  serviceConfig: null,
-  type: 'password',
-  valueBinding: 'serviceConfig.value',
-  classNames: [ 'span3' ],
-  placeholder: 'Type password',
-
-  template: Ember.Handlebars.compile('{{view view.retypePasswordView placeholder="Retype password"}}'),
-
-  retypePasswordView: Ember.TextField.extend({
-    type: 'password',
-    classNames: [ 'span3', 'retyped-password' ],
-    valueBinding: 'parentView.serviceConfig.retypedPassword'
-  })
-
-});
-
-/**
- * Textarea control
- * @type {*}
- */
-App.ServiceConfigTextArea = Ember.TextArea.extend(App.ServiceConfigPopoverSupport, {
-
-  valueBinding: 'serviceConfig.value',
-  rows: 4,
-  classNames: ['span6', 'directories'],
-  placeholderBinding: 'serviceConfig.defaultValue',
-
-  disabled: function () {
-    return !this.get('serviceConfig.isEditable');
-  }.property('serviceConfig.isEditable')
-
-});
-
-/**
- * Textarea control with bigger height
- * @type {*}
- */
-App.ServiceConfigBigTextArea = App.ServiceConfigTextArea.extend({
-  rows: 10
-});
-
-/**
- * Checkbox control
- * @type {*}
- */
-App.ServiceConfigCheckbox = Ember.Checkbox.extend(App.ServiceConfigPopoverSupport, {
-
-  checkedBinding: 'serviceConfig.value',
-
-  disabled: function () {
-    return !this.get('serviceConfig.isEditable');
-  }.property('serviceConfig.isEditable')
-
-});
-
-App.ServiceConfigRadioButtons = Ember.View.extend({
-  template: Ember.Handlebars.compile([
-    '{{#each option in view.options}}',
-    '<label class="radio">',
-    '{{#view App.ServiceConfigRadioButton nameBinding = "view.name" valueBinding = "option.displayName"}}',
-    '{{/view}}',
-    '{{option.displayName}} &nbsp;',
-    '</label>',
-    '{{/each}}'
-  ].join('\n')),
-  serviceConfig: null,
-  categoryConfigs: null,
-  nameBinding: 'serviceConfig.radioName',
-  optionsBinding: 'serviceConfig.options',
-  disabled: function () {
-    return !this.get('serviceConfig.isEditable');
-  }.property('serviceConfig.isEditable')
-});
-
-App.ServiceConfigRadioButton = Ember.Checkbox.extend({
-  tagName: 'input',
-  attributeBindings: ['type', 'name', 'value', 'checked'],
-  checked: false,
-  type: 'radio',
-  name: null,
-  value: null,
-
-  didInsertElement: function () {
-    if (this.get('parentView.serviceConfig.value') === this.get('value')) {
-      this.set('checked', true);
-    }
-  },
-
-  click: function () {
-    this.set('checked', true);
-    this.onChecked();
-  },
-
-  onChecked: function () {
-    this.set('parentView.serviceConfig.value', this.get('value'));
-    var components = this.get('parentView.serviceConfig.options');
-    components.forEach(function (_component) {
-      _component.foreignKeys.forEach(function (_componentName) {
-        if (this.get('parentView.categoryConfigs').someProperty('name', _componentName)) {
-          var component = this.get('parentView.categoryConfigs').findProperty('name', _componentName);
-          if (_component.displayName === this.get('value')) {
-            component.set('isVisible', true);
-          } else {
-            component.set('isVisible', false);
-          }
-        }
-      }, this);
-    }, this);
-  }.observes('checked') ,
-
-  disabled: function () {
-    return !this.get('parentView.serviceConfig.isEditable');
-  }.property('parentView.serviceConfig.isEditable')
-});
-
-App.ServiceConfigComboBox = Ember.Select.extend(App.ServiceConfigPopoverSupport, {
-  contentBinding: 'serviceConfig.options',
-  selectionBinding: 'serviceConfig.value',
-  classNames: [ 'span3' ],
-  disabled: function () {
-    return !this.get('serviceConfig.isEditable');
-  }.property('serviceConfig.isEditable')
-});
-
-
-/**
- * Base component for host config with popover support
- */
-App.ServiceConfigHostPopoverSupport = Ember.Mixin.create({
-
-  /**
-   * Config object. It will instance of App.ServiceConfigProperty
-   */
-  serviceConfig: null,
-
-  didInsertElement: function () {
-    this.$().popover({
-      title: this.get('serviceConfig.displayName'),
-      content: this.get('serviceConfig.description'),
-      placement: 'right',
-      trigger: 'hover'
-    });
-  }
-});
-
-/**
- * Master host component.
- * Show hostname without ability to edit it
- * @type {*}
- */
-App.ServiceConfigMasterHostView = Ember.View.extend(App.ServiceConfigHostPopoverSupport, {
-
-  classNames: ['master-host', 'span6'],
-  valueBinding: 'serviceConfig.value',
-
-  template: Ember.Handlebars.compile('{{value}}')
-
-});
-
-/**
- * Base component to display Multiple hosts
- * @type {*}
- */
-App.ServiceConfigMultipleHostsDisplay = Ember.Mixin.create(App.ServiceConfigHostPopoverSupport, {
-
-  hasNoHosts: function () {
-    console.log('view', this.get('viewName')); //to know which View cause errors
-    console.log('controller', this.get('controller').name); //should be slaveComponentGroupsController
-    if (!this.get('value')) {
-      // debugger;
-      return true;
-    }
-    return this.get('value').length === 0;
-  }.property('value'),
-
-  hasOneHost: function () {
-    return this.get('value').length > 0;
-  }.property('value'),
-
-  hasMultipleHosts: function () {
-    return (this.get('value').length > 1 && typeof(this.get('value')) == 'object');
-  }.property('value'),
-
-  otherLength: function () {
-    var len = this.get('value').length;
-    if (len > 2) {
-      return Em.I18n.t('installer.controls.serviceConfigMultipleHosts.others').format(len - 1);
-    } else {
-      return Em.I18n.t('installer.controls.serviceConfigMultipleHosts.other');
-    }
-  }.property('value')
-
-})
-
-
-/**
- * Multiple master host component.
- * Show hostnames without ability to edit it
- * @type {*}
- */
-App.ServiceConfigMasterHostsView = Ember.View.extend(App.ServiceConfigMultipleHostsDisplay, {
-
-  viewName: "serviceConfigMasterHostsView",
-  valueBinding: 'serviceConfig.value',
-
-  classNames: ['master-hosts', 'span6'],
-  templateName: require('templates/wizard/master_hosts'),
-
-  /**
-   * Onclick handler for link
-   */
-  showHosts: function () {
-    var serviceConfig = this.get('serviceConfig');
-    App.ModalPopup.show({
-      header: Em.I18n.t('installer.controls.serviceConfigMasterHosts.header').format(serviceConfig.category),
-      bodyClass: Ember.View.extend({
-        serviceConfig: serviceConfig,
-        templateName: require('templates/wizard/master_hosts_popup')
-      }),
-      onPrimary: function () {
-        this.hide();
-      },
-      secondary: null
-    });
-  }
-
-});
-
-/**
- * Show tabs list for slave hosts
- * @type {*}
- */
-App.SlaveComponentGroupsMenu = Em.CollectionView.extend({
-
-  content: function () {
-    return this.get('controller.componentGroups');
-  }.property('controller.componentGroups'),
-
-  tagName: 'ul',
-  classNames: ["nav", "nav-tabs"],
-
-  itemViewClass: Em.View.extend({
-    classNameBindings: ["active"],
-
-    active: function () {
-      return this.get('content.active');
-    }.property('content.active'),
-
-    errorCount: function () {
-      return this.get('content.properties').filterProperty('isValid', false).filterProperty('isVisible', true).get('length');
-    }.property('content.properties.@each.isValid', 'content.properties.@each.isVisible'),
-
-    template: Ember.Handlebars.compile('<a {{action showSlaveComponentGroup view.content target="controller"}} href="#"> {{view.content.name}}{{#if view.errorCount}}<span class="badge badge-important">{{view.errorCount}}</span>{{/if}}</a><i {{action removeSlaveComponentGroup view.content target="controller"}} class="icon-remove"></i>')
-  })
-});
-
-/**
- * <code>Add group</code> button
- * @type {*}
- */
-App.AddSlaveComponentGroupButton = Ember.View.extend({
-
-  tagName: 'span',
-  slaveComponentName: null,
-
-  didInsertElement: function () {
-    this.$().popover({
-      title: Em.I18n.t('installer.controls.addSlaveComponentGroupButton.title').format(this.get('slaveComponentName')),
-      content: Em.I18n.t('installer.controls.addSlaveComponentGroupButton.content').format(this.get('slaveComponentName'), this.get('slaveComponentName'), this.get('slaveComponentName')),
-      placement: 'right',
-      trigger: 'hover'
-    });
-  }
-
-});
-
-/**
- * Multiple Slave Hosts component
- * @type {*}
- */
-App.ServiceConfigSlaveHostsView = Ember.View.extend(App.ServiceConfigMultipleHostsDisplay, {
-
-  viewName: 'serviceConfigSlaveHostsView',
-
-  classNames: ['slave-hosts', 'span6'],
-
-  valueBinding: 'serviceConfig.value',
-
-  templateName: require('templates/wizard/slave_hosts')
-
-});
-
-/**
- * properties for present active slave group
- * @type {*}
- */
-App.SlaveGroupPropertiesView = Ember.View.extend({
-
-  viewName: 'serviceConfigSlaveHostsView',
-
-  group: function () {
-    return this.get('controller.activeGroup');
-  }.property('controller.activeGroup'),
-
-  groupConfigs: function () {
-    console.log("************************************************************************");
-    console.log("The value of group is: " + this.get('group'));
-    console.log("************************************************************************");
-    return this.get('group.properties');
-  }.property('group.properties.@each').cacheable(),
-
-  errorCount: function () {
-    return this.get('group.properties').filterProperty('isValid', false).filterProperty('isVisible', true).get('length');
-  }.property('configs.@each.isValid', 'configs.@each.isVisible')
-});
-
-/**
- * DropDown component for <code>select hosts for groups</code> popup
- * @type {*}
- */
-App.SlaveComponentDropDownGroupView = Ember.View.extend({
-
-  viewName: "slaveComponentDropDownGroupView",
-
-  /**
-   * On change handler for <code>select hosts for groups</code> popup
-   * @param event
-   */
-  changeGroup: function (event) {
-    var host = this.get('content');
-    var groupName = $('#' + this.get('elementId') + ' select').val();
-    this.get('controller').changeHostGroup(host, groupName);
-  },
-
-  optionTag: Ember.View.extend({
-
-    /**
-     * Whether current value(OptionTag value) equals to host value(assigned to SlaveComponentDropDownGroupView.content)
-     */
-    selected: function () {
-      return this.get('parentView.content.group') === this.get('content');
-    }.property('content')
-  })
-});
-
-/**
- * Show info about current group
- * @type {*}
- */
-App.SlaveComponentChangeGroupNameView = Ember.View.extend({
-
-  contentBinding: 'controller.activeGroup',
-  classNames: ['control-group'],
-  classNameBindings: 'error',
-  error: false,
-  setError: function () {
-    this.set('error', false);
-  }.observes('controller.activeGroup'),
-  errorMessage: function () {
-    return this.get('error') ? Em.I18n.t('installer.controls.slaveComponentChangeGroupName.error') : '';
-  }.property('error'),
-
-  /**
-   * Onclick handler for saving updated group name
-   * @param event
-   */
-  changeGroupName: function (event) {
-    var inputVal = $('#' + this.get('elementId') + ' input[type="text"]').val();
-    if (inputVal !== this.get('content.name')) {
-      var result = this.get('controller').changeSlaveGroupName(this.get('content'), inputVal);
-      this.set('error', result);
-    }
-  }
-});
-
diff --git a/branch-1.2/ambari-web/app/views/wizard/step10_view.js b/branch-1.2/ambari-web/app/views/wizard/step10_view.js
deleted file mode 100644
index a93d982..0000000
--- a/branch-1.2/ambari-web/app/views/wizard/step10_view.js
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.WizardStep10View = Em.View.extend({
-
-  templateName: require('templates/wizard/step10'),
-
-  didInsertElement: function () {
-    var controller = this.get('controller');
-    controller.loadStep();
-  }
-
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/wizard/step1_view.js b/branch-1.2/ambari-web/app/views/wizard/step1_view.js
deleted file mode 100644
index 955eb5c..0000000
--- a/branch-1.2/ambari-web/app/views/wizard/step1_view.js
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.WizardStep1View = Em.View.extend({
-
-  tagName: "form",
-  templateName: require('templates/wizard/step1'),
-
-  didInsertElement: function () {
-    $("[rel=popover]").popover({'placement': 'right', 'trigger': 'hover'});
-    console.log("The value of cluster controller is: "+ this.get('controller.name'));
-    this.get('controller').loadStep();
-  },
-
-  onError: function () {
-    return this.get('controller.clusterNameError') !== '';
-  }.property('controller.clusterNameError'),
-
-  submit: function(event) {
-    this.get('controller').submit();
-    return false;
-  }
-
-});
diff --git a/branch-1.2/ambari-web/app/views/wizard/step2_view.js b/branch-1.2/ambari-web/app/views/wizard/step2_view.js
deleted file mode 100644
index 47ad8e0..0000000
--- a/branch-1.2/ambari-web/app/views/wizard/step2_view.js
+++ /dev/null
@@ -1,131 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * 'License'); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an 'AS IS' BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.SshKeyFileUploader = Ember.View.extend({
-  template:Ember.Handlebars.compile('<input type="file" {{bindAttr disabled="view.disabled"}} />'),
-  classNames: ['ssh-key-input-indentation'],
-
-  change: function (e) {
-    var self=this;
-    if (e.target.files && e.target.files.length == 1) {
-      var file = e.target.files[0];
-      var reader = new FileReader();
-
-      reader.onload = (function(theFile) {
-        return function(e) {
-          $('#sshKey').html(e.target.result);
-          self.get("controller").setSshKey(e.target.result);
-        };
-      })(file);
-      reader.readAsText(file);
-    }
-  }
-});
-
-App.WizardTextField = Ember.TextField.extend({
-  disabled: function(){
-    return !this.get('controller.content.installOptions.isJavaHome');
-  }.property('controller.content.installOptions.isJavaHome'),
-  click: function(){
-    return false;
-  }
-})
-
-App.WizardStep2View = Em.View.extend({
-
-  templateName: require('templates/wizard/step2'),
-  hostNameErr: false,
-  hostsInfo: null,
-
-  didInsertElement: function () {
-    $("[rel=popover]").popover({'placement': 'right', 'trigger': 'hover'});
-    this.set('hostNameErr', false);
-    this.set('controller.hostsError',null);
-    this.set('controller.sshKeyError',null);
-    this.loadHostsInfo();
-  },
-
-  loadHostsInfo: function(){
-    var hostsInfo = Em.Object.create();
-    this.set('hostsInfo', hostsInfo);
-  },
-
-  onHostNameErr: function () {
-    if (this.get('controller.hostNameEmptyError') === false && this.get('controller.hostNameNotRequiredErr') === false && this.get('controller.hostNameErr') === false) {
-      this.set('hostNameErr', false);
-    } else {
-      this.set('hostNameErr', true);
-    }
-  }.observes('controller.hostNameEmptyError', 'controller.hostNameNotRequiredErr', 'controller.hostNameErr'),
-
-  sshKeyState: function(){
-    return this.get("controller.content.installOptions.manualInstall");
-  }.property("controller.content.installOptions.manualInstall"),
-
-  isFileApi: function () {
-    return (window.File && window.FileReader && window.FileList) ? true : false ;
-  }.property(),
-
-  manualInstallPopup: function(){
-    if(!this.get('controller.content.installOptions.useSsh')){
-      App.ModalPopup.show({
-        header: "Warning",
-        body: Em.I18n.t('installer.step2.manualInstall.info'),
-        encodeBody: false,
-        onPrimary: function () {
-          this.hide();
-        },
-        secondary: null
-      });
-    }
-    this.set('controller.content.installOptions.manualInstall', !this.get('controller.content.installOptions.useSsh'));
-  }.observes('controller.content.installOptions.useSsh'),
-
-  providingSSHKeyRadioButton: Ember.Checkbox.extend({
-    tagName: 'input',
-    attributeBindings: ['type', 'checked'],
-    checked: function () {
-      return this.get('controller.content.installOptions.useSsh');
-    }.property('controller.content.installOptions.useSsh'),
-    type: 'radio',
-
-    click: function () {
-      this.set('controller.content.installOptions.useSsh', true);
-      this.set('controller.content.installOptions.manualInstall', false);
-    }
-  }),
-
-  manualRegistrationRadioButton: Ember.Checkbox.extend({
-    tagName: 'input',
-    attributeBindings: ['type', 'checked'],
-    checked: function () {
-      return this.get('controller.content.installOptions.manualInstall');
-    }.property('controller.content.installOptions.manualInstall'),
-    type: 'radio',
-
-    click: function () {
-      this.set('controller.content.installOptions.manualInstall', true);
-      this.set('controller.content.installOptions.useSsh', false);
-    }
-  })
-});
-
-
diff --git a/branch-1.2/ambari-web/app/views/wizard/step3_view.js b/branch-1.2/ambari-web/app/views/wizard/step3_view.js
deleted file mode 100644
index af29376..0000000
--- a/branch-1.2/ambari-web/app/views/wizard/step3_view.js
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.WizardStep3View = Em.View.extend({
-
-  templateName: require('templates/wizard/step3'),
-  category: '',
-
-  didInsertElement: function () {
-    this.get('controller').navigateStep();
-  }
-});
-
-App.WizardHostView = Em.View.extend({
-
-  tagName: 'tr',
-  classNameBindings: ['hostInfo.bootStatus'],
-  hostInfo: null,
-
-  remove: function () {
-    this.get('controller').removeHost(this.get('hostInfo'));
-  },
-
-  retry: function() {
-    this.get('controller').retryHost(this.get('hostInfo'));
-  },
-
-  isRemovable: function () {
-    return true;
-  }.property(),
-
-  isRetryable: function() {
-    // return ['FAILED'].contains(this.get('hostInfo.bootStatus'));
-    return false;
-  }.property('hostInfo.bootStatus')
-
-});
-
-
diff --git a/branch-1.2/ambari-web/app/views/wizard/step4_view.js b/branch-1.2/ambari-web/app/views/wizard/step4_view.js
deleted file mode 100644
index 95c721b..0000000
--- a/branch-1.2/ambari-web/app/views/wizard/step4_view.js
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.WizardStep4View = Em.View.extend({
-
-  templateName: require('templates/wizard/step4')
-
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/app/views/wizard/step5_view.js b/branch-1.2/ambari-web/app/views/wizard/step5_view.js
deleted file mode 100644
index c38e0b2..0000000
--- a/branch-1.2/ambari-web/app/views/wizard/step5_view.js
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.WizardStep5View = Em.View.extend({
-
-  templateName:require('templates/wizard/step5'),
-
-  didInsertElement:function () {
-    var controller = this.get('controller');
-    controller.loadStep();
-
-    if (controller.lastZooKeeper()) {
-      if (controller.get("selectedServicesMasters").filterProperty("display_name", "ZooKeeper").length < controller.get("hosts.length")) {
-        controller.lastZooKeeper().set('showAddControl', true);
-      } else {
-        controller.lastZooKeeper().set('showRemoveControl', false);
-      }
-      controller.rebalanceZookeeperHosts();
-    }
-  }
-
-});
-
-App.SelectHostView = Em.Select.extend({
-  content:[],
-  zId:null,
-  selectedHost:null,
-  serviceName:null,
-  attributeBindings:['disabled'],
-
-  filterContent:function () {
-    this.get('content').sort(function (a, b) {
-      if (a.get('memory') == b.get('memory')) {
-        if (a.get('cpu') == b.get('cpu')) {
-
-//          try to compare as ipaddresses
-          if (a.get('host_name').ip2long() && b.get('host_name').ip2long()) {
-            return a.get('host_name').ip2long() - b.get('host_name').ip2long(); // hostname asc
-          }
-
-//          try to compare as strings
-          if (a.get('host_name') > b.get('host_name')) {
-            return 1;
-          }
-
-          if (b.get('host_name') > a.get('host_name')) {
-            return -1;
-          }
-
-          return 0;
-        }
-        return b.get('cpu') - a.get('cpu'); // cores desc
-      }
-
-      return b.get('memory') - a.get('memory'); // ram desc
-    });
-
-  }.observes('content'),
-
-  init:function () {
-    this._super();
-    this.propertyDidChange('content');
-  },
-
-  change:function () {
-    this.get('controller').assignHostToMaster(this.get("serviceName"), this.get("value"), this.get("zId"));
-  },
-
-  didInsertElement:function () {
-    this.set("value", this.get("selectedHost"));
-  }
-});
-
-App.AddControlView = Em.View.extend({
-  componentName:null,
-  tagName:"span",
-  classNames:["badge", "badge-important"],
-  template:Ember.Handlebars.compile('+'),
-
-  click:function () {
-    this.get('controller').addZookeepers();
-  }
-});
-
-App.RemoveControlView = Em.View.extend({
-  zId:null,
-  tagName:"span",
-  classNames:["badge", "badge-important"],
-  template:Ember.Handlebars.compile('-'),
-
-  click:function () {
-    this.get('controller').removeZookeepers(this.get("zId"));
-  }
-});
diff --git a/branch-1.2/ambari-web/app/views/wizard/step6_view.js b/branch-1.2/ambari-web/app/views/wizard/step6_view.js
deleted file mode 100644
index db6f9d5..0000000
--- a/branch-1.2/ambari-web/app/views/wizard/step6_view.js
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.WizardStep6View = Em.View.extend({
-
-  templateName: require('templates/wizard/step6'),
-
-  didInsertElement: function () {
-    var controller = this.get('controller');
-    this.setLabel();
-    $('body').tooltip({
-      selector: '[rel=tooltip]'
-    });
-    controller.loadStep();
-  },
-
-  setLabel: function () {
-    var label = Em.I18n.t('installer.step6.body');
-    var clients = this.get('controller.content.clients');
-    clients.forEach(function (_client) {
-      if (clients.length === 1) {
-        label = label + ' ' + _client.display_name;
-      } else if (_client !== clients[clients.length - 1]) {           // [clients.length - 1]
-        label = label + ' ' + _client.display_name;
-        if(_client !== clients[clients.length - 2]) {
-          label = label + ',';
-  }
-      } else {
-        label = label + ' and ' + _client.display_name + '.';
-      }
-    }, this);
-    this.set('label', label);
-  }
-});
-
-App.WizardStep6HostView = Em.View.extend({
-
-  host: null,
-  tagName: 'td',
-
-  didInsertElement: function () {
-    var components = this.get('controller').getMasterComponentsForHost(this.get('host.hostName'));
-    if (components && components.length > 0) {
-      components = components.map(function(_component) {
-        return App.format.role(_component);
-      });
-      components = components.join(" /\n");
-      this.$().popover({
-        title: Em.I18n.t('installer.step6.wizardStep6Host.title').format(this.get('host.hostName')),
-        content: components,
-        placement: 'right',
-        trigger: 'hover'
-      });
-    }
-  }
-});
diff --git a/branch-1.2/ambari-web/app/views/wizard/step7_view.js b/branch-1.2/ambari-web/app/views/wizard/step7_view.js
deleted file mode 100644
index dcd48a4..0000000
--- a/branch-1.2/ambari-web/app/views/wizard/step7_view.js
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.WizardStep7View = Em.View.extend({
-
-  templateName: require('templates/wizard/step7'),
-
-  didInsertElement: function () {
-    var controller = this.get('controller');
-    var slaveController = App.router.get('slaveComponentGroupsController');
-    controller.loadStep();
-    //slaveController.loadStep();  // TODO: remove it to enable slaveConfiguration
-  },
-  onToggleBlock: function(event){
-    $(document.getElementById(event.context.name)).toggle('blind', 500);
-    event.context.set('isCollapsed', !event.context.get('isCollapsed'));
-  }
-
-});
-
-/**
- * Since we need to use local Views and Controllers we should put them into separate context
- * @type {*|Object}
- */
-App.WizardStep7 = App.WizardStep7 || {};
-
-App.WizardStep7.ServiceConfigsByCategoryView = Ember.View.extend({
-
-  content: null,
-
-  category: null,
-  serviceConfigs: null, // General, Advanced, NameNode, SNameNode, DataNode, etc.
-
-  categoryConfigs: function () {
-    return this.get('serviceConfigs').filterProperty('category', this.get('category.name'))
-  }.property('serviceConfigs.@each').cacheable(),
-  didInsertElement: function () {
-    if (this.get('category.name') == 'Advanced') {
-      this.set('category.isCollapsed', true);
-      $("#Advanced").hide();
-    } else {
-      this.set('category.isCollapsed', false);
-    }
-  },
-  layout: Ember.Handlebars.compile('<div {{bindAttr id="view.category.name"}} class="accordion-body collapse in"><div class="accordion-inner">{{yield}}</div></div>')
-});
-
-App.WizardStep7.ServiceConfigTab = Ember.View.extend({
-
-  tagName: 'li',
-
-  selectService: function (event) {
-    this.set('controller.selectedService', event.context);
-  },
-
-  didInsertElement: function () {
-    var serviceName = this.get('controller.selectedService.serviceName');
-    this.$('a[href="#' + serviceName + '"]').tab('show');
-  }
-});
diff --git a/branch-1.2/ambari-web/app/views/wizard/step8_view.js b/branch-1.2/ambari-web/app/views/wizard/step8_view.js
deleted file mode 100644
index 4301768..0000000
--- a/branch-1.2/ambari-web/app/views/wizard/step8_view.js
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var App = require('app');
-
-App.WizardStep8View = Em.View.extend({
-
-  templateName: require('templates/wizard/step8'),
-
-  didInsertElement: function () {
-    var controller = this.get('controller');
-    controller.loadStep();
-  },
-
-  spinner : null,
-
-  printReview: function() {
-    var o = $("#step8-info");
-    o.jqprint();
-  },
-
-  ajaxQueueLength: function() {
-    return this.get('controller.ajaxQueueLength');
-  }.property('controller.ajaxQueueLength'),
-
-  ajaxQueueLeft: function() {
-    return this.get('controller.ajaxQueueLeft');
-  }.property('controller.ajaxQueueLeft'),
-
-  // reference to modalPopup to make sure only one instance is created
-  modalPopup: null,
-
-  showLoadingIndicator: function() {
-    if (!this.get('controller.isSubmitDisabled')) {
-      if (this.get('modalPopup')) {
-        this.get('modalPopup').hide();
-        this.set('modalPopup', null);
-      }
-      return;
-    }
-    // don't create popup if it already exists
-    if (this.get('modalPopup')) {
-      return;
-    }
-    this.set('modalPopup', App.ModalPopup.show({
-      header: '',
-
-      showFooter: false,
-
-      showCloseButton: false,
-
-      bodyClass: Ember.View.extend({
-        templateName: require('templates/wizard/step8_log_popup'),
-
-        message: function() {
-          return Em.I18n.t('installer.step8.deployPopup.message').format(this.get('ajaxQueueComplete'), this.get('ajaxQueueLength'));
-        }.property('ajaxQueueComplete', 'ajaxQueueLength'),
-
-        controllerBinding: 'App.router.wizardStep8Controller',
-
-        ajaxQueueLength: function() {
-          return this.get('controller.ajaxQueueLength');
-        }.property(),
-
-        ajaxQueueComplete: function() {
-          return this.get('ajaxQueueLength') - this.get('controller.ajaxQueueLeft');
-        }.property('controller.ajaxQueueLeft', 'ajaxQueueLength'),
-
-        barWidth: function () {
-          return 'width: ' + (this.get('ajaxQueueComplete') / this.get('ajaxQueueLength') * 100) + '%;';
-        }.property('ajaxQueueComplete', 'ajaxQueueLength'),
-
-        autoHide: function() {
-          if (this.get('controller.servicesInstalled')) {
-            this.get('parentView').hide();
-          }
-        }.observes('controller.servicesInstalled')
-      })
-    }));
-  }.observes('controller.isSubmitDisabled')
-});
-
diff --git a/branch-1.2/ambari-web/app/views/wizard/step9_view.js b/branch-1.2/ambari-web/app/views/wizard/step9_view.js
deleted file mode 100644
index c8c740e..0000000
--- a/branch-1.2/ambari-web/app/views/wizard/step9_view.js
+++ /dev/null
@@ -1,340 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-App.WizardStep9View = Em.View.extend({
-
-  templateName:require('templates/wizard/step9'),
-  barColor:'',
-  resultMsg:'',
-  resultMsgColor:'',
-
-  didInsertElement:function () {
-    var controller = this.get('controller');
-    this.get('controller.hosts').setEach('status', 'info');
-    this.onStatus();
-    controller.navigateStep();
-  },
-
-  barWidth:function () {
-    var controller = this.get('controller');
-    var barWidth = 'width: ' + controller.get('progress') + '%;';
-    return barWidth;
-  }.property('controller.progress'),
-
-  progressMessage: function() {
-    return Em.I18n.t('installer.step9.overallProgress').format(this.get('controller.progress'));
-  }.property('controller.progress'),
-
-  onStatus:function () {
-    if (this.get('controller.status') === 'info') {
-      this.set('resultMsg', '');
-      this.set('barColor', 'progress-info');
-    } else if (this.get('controller.status') === 'warning') {
-      this.set('barColor', 'progress-warning');
-      this.set('resultMsg', Em.I18n.t('installer.step9.status.warning'));
-      this.set('resultMsgColor', 'alert-warning');
-    } else if (this.get('controller.status') === 'failed') {
-      this.set('barColor', 'progress-danger');
-      console.log('TRACE: Inside error view step9');
-      this.set('resultMsg', Em.I18n.t('installer.step9.status.failed'));
-      this.set('resultMsgColor', 'alert-error');
-    } else if (this.get('controller.status') === 'success') {
-      console.log('TRACE: Inside success view step9');
-      this.set('barColor', 'progress-success');
-      this.set('resultMsg', Em.I18n.t('installer.step9.status.success'));
-      this.set('resultMsgColor', 'alert-success');
-    }
-  }.observes('controller.status')
-});
-
-App.HostStatusView = Em.View.extend({
-  tagName:'tr',
-  obj:'null',
-  barColor:'',
-
-  didInsertElement:function () {
-    var controller = this.get('controller');
-    this.onStatus();
-  },
-
-  barWidth:function () {
-    var barWidth = 'width: ' + this.get('obj.progress') + '%;';
-    return barWidth;
-  }.property('obj.progress'),
-
-  onStatus:function () {
-    if (this.get('obj.status') === 'info') {
-      this.set('barColor', 'progress-info');
-    } else if (this.get('obj.status') === 'warning') {
-      this.set('barColor', 'progress-warning');
-      if (this.get('obj.progress') === '100') {
-        this.set('obj.message', Em.I18n.t('installer.step9.host.status.warning'));
-      }
-    } else if (this.get('obj.status') === 'failed') {
-      this.set('barColor', 'progress-danger');
-      if (this.get('obj.progress') === '100') {
-        this.set('obj.message', Em.I18n.t('installer.step9.host.status.failed'));
-      }
-    } else if (this.get('obj.status') === 'success') {
-      this.set('barColor', 'progress-success');
-      if (this.get('obj.progress') === '100') {
-        this.set('obj.message', Em.I18n.t('installer.step9.host.status.success'));
-      }
-    }
-  }.observes('obj.status', 'obj.progress'),
-
-  isFailed:function () {
-    if (this.get('controller.isStepCompleted') === true && this.get('obj.status') === 'failed') {
-      return true;
-    } else {
-      return false;
-    }
-  }.property('controller.isStepCompleted', 'controller.status'),
-
-  isSuccess:function () {
-    if (this.get('controller.isStepCompleted') === true && this.get('obj.status') === 'success') {
-      return true;
-    } else {
-      return false;
-    }
-  }.property('controller.isStepCompleted', 'controller.status'),
-
-  isWarning:function () {
-    if (this.get('controller.isStepCompleted') === true && this.get('obj.status') === 'warning') {
-      return true;
-    } else {
-      return false;
-    }
-  }.property('controller.isStepCompleted', 'controller.status'),
-
-  isHostCompleted:function () {
-    return this.get('obj.progress') == 100 || this.get('controller.isStepCompleted');
-  }.property('controller.isStepCompleted', 'obj.progress'),
-
-  hostLogPopup:function (event, context) {
-    var self = this;
-    var host = event.context;
-    App.ModalPopup.show({
-      header: event.context.get('name'),
-      classNames: ['sixty-percent-width-modal'],
-      autoHeight: false,
-      onPrimary:function () {
-        this.hide();
-      },
-      secondary:null,
-
-      bodyClass:Ember.View.extend({
-        templateName:require('templates/wizard/step9HostTasksLogPopup'),
-        isLogWrapHidden: true,
-        showTextArea: false,
-        isEmptyList:true,
-        controllerBinding:context,
-        hostObj:function () {
-          return this.get('parentView.obj');
-        }.property('parentView.obj'),
-
-        task: null, // set in showTaskLog; contains task info including stdout and stderr
-        /**
-         * sort task array by Id
-         * @param tasks
-         * @return {Array}
-         */
-
-        sortTasksById: function(tasks){
-          var result = [];
-          var id = 1;
-          for(var i = 0; i < tasks.length; i++){
-            id = (tasks[i].Tasks.id > id) ? tasks[i].Tasks.id : id;
-          }
-          while(id >= 1){
-            for(var j = 0; j < tasks.length; j++){
-              if(id == tasks[j].Tasks.id){
-                result.push(tasks[j]);
-              }
-            }
-            id--;
-          }
-          result.reverse();
-          return result;
-        },
-
-        groupTasksByRole: function (tasks) {
-          var sortedTasks = [];
-          var taskRoles = tasks.mapProperty('Tasks.role').uniq();
-          for (var i = 0; i < taskRoles.length; i++) {
-            sortedTasks = sortedTasks.concat(tasks.filterProperty('Tasks.role', taskRoles[i]))
-          }
-          return sortedTasks;
-        },
-
-        visibleTasks: function () {
-          var self = this;
-          self.set("isEmptyList", true);
-          if (this.get('category.value')) {
-            var filter = this.get('category.value');
-            var tasks = this.get('tasks');
-            tasks.setEach("isVisible", false);
-
-            if (filter == "all") {
-              tasks.setEach("isVisible", true);
-            }
-            else if (filter == "pending") {
-              tasks.filterProperty("status", "pending").setEach("isVisible", true);
-              tasks.filterProperty("status", "queued").setEach("isVisible", true);
-            }
-            else if (filter == "in_progress") {
-              tasks.filterProperty("status", "in_progress").setEach("isVisible", true);
-            }
-            else if (filter == "failed") {
-              tasks.filterProperty("status", "failed").setEach("isVisible", true);
-            }
-            else if (filter == "completed") {
-              tasks.filterProperty("status", "completed").setEach("isVisible", true);
-            }
-            else if (filter == "aborted") {
-              tasks.filterProperty("status", "aborted").setEach("isVisible", true);
-            }
-            else if (filter == "timedout") {
-              tasks.filterProperty("status", "timedout").setEach("isVisible", true);
-            }
-
-            if (tasks.filterProperty("isVisible", true).length > 0) {
-              self.set("isEmptyList", false);
-            }
-          }
-        }.observes('category', 'tasks'),
-
-        categories: [
-            Ember.Object.create({value: 'all', label: Em.I18n.t('installer.step9.hostLog.popup.categories.all') }),
-            Ember.Object.create({value: 'pending', label: Em.I18n.t('installer.step9.hostLog.popup.categories.pending')}),
-            Ember.Object.create({value: 'in_progress', label: Em.I18n.t('installer.step9.hostLog.popup.categories.in_progress')}),
-            Ember.Object.create({value: 'failed', label: Em.I18n.t('installer.step9.hostLog.popup.categories.failed') }),
-            Ember.Object.create({value: 'completed', label: Em.I18n.t('installer.step9.hostLog.popup.categories.completed') }),
-            Ember.Object.create({value: 'aborted', label: Em.I18n.t('installer.step9.hostLog.popup.categories.aborted') }),
-            Ember.Object.create({value: 'timedout', label: Em.I18n.t('installer.step9.hostLog.popup.categories.timedout') })
-        ],
-
-        category: null,
-
-        tasks: function () {
-          var tasksArr = [];
-          var tasks = this.getStartedTasks(host);
-          tasks = this.sortTasksById(tasks);
-          tasks = this.groupTasksByRole(tasks);
-          if (tasks.length) {
-            tasks.forEach(function (_task) {
-              var taskInfo = Ember.Object.create({});
-              taskInfo.set('id', _task.Tasks.id);
-              taskInfo.set('command', _task.Tasks.command.toLowerCase());
-              taskInfo.set('status', App.format.taskStatus(_task.Tasks.status));
-              taskInfo.set('role', App.format.role(_task.Tasks.role));
-              taskInfo.set('stderr', _task.Tasks.stderr);
-              taskInfo.set('stdout', _task.Tasks.stdout);
-              taskInfo.set('isVisible', true);
-              taskInfo.set('icon', '');
-              if (taskInfo.get('status') == 'pending' || taskInfo.get('status') == 'queued') {
-                taskInfo.set('icon', 'icon-cog');
-              } else if (taskInfo.get('status') == 'in_progress') {
-                taskInfo.set('icon', 'icon-cogs');
-              } else if (taskInfo.get('status') == 'completed') {
-                taskInfo.set('icon', ' icon-ok');
-              } else if (taskInfo.get('status') == 'failed') {
-                taskInfo.set('icon', 'icon-exclamation-sign');
-              } else if (taskInfo.get('status') == 'aborted') {
-                taskInfo.set('icon', 'icon-minus');
-              } else if (taskInfo.get('status') == 'timedout') {
-                taskInfo.set('icon', 'icon-time');
-              }
-              tasksArr.push(taskInfo);
-            }, this);
-          }
-          return tasksArr;
-        }.property('App.router.wizardStep9Controller.logTasksChangesCounter'),
-
-        backToTaskList: function(event, context) {
-          this.destroyClipBoard();
-          this.set("isLogWrapHidden",true);
-        },
-
-        getStartedTasks:function (host) {
-          var startedTasks = host.logTasks.filter(function (task) {
-            return task.Tasks.status;
-            //return task.Tasks.status != 'PENDING' && task.Tasks.status != 'QUEUED';
-          });
-          return startedTasks;
-        },
-
-        openTaskLogInDialog: function(){
-          newwindow=window.open();
-          newdocument=newwindow.document;
-          newdocument.write($(".task-detail-log-info").html());
-          newdocument.close();
-        },
-
-        openedTaskId: 0,
-
-        openedTask: function () {
-          if (!this.get('openedTaskId')) {
-            return Ember.Object.create();
-          }
-          return this.get('tasks').findProperty('id', this.get('openedTaskId'));
-        }.property('tasks', 'openedTaskId'),
-
-        toggleTaskLog: function (event, context) {
-          if (this.isLogWrapHidden) {
-            var taskInfo = event.context;
-            this.set("isLogWrapHidden", false);
-            this.set('openedTaskId', taskInfo.id);
-            $(".modal").scrollTop(0);
-            $(".modal-body").scrollTop(0);
-          } else {
-            this.set("isLogWrapHidden", true);
-            this.set('openedTaskId', 0);
-          }
-        },
-
-        textTrigger:function (event) {
-          if($(".task-detail-log-clipboard").length > 0)
-          {
-            this.destroyClipBoard();
-          }else
-          {
-            this.createClipBoard();
-          };
-        },
-        createClipBoard:function(){
-          $(".task-detail-log-clipboard-wrap").html('<textarea class="task-detail-log-clipboard"></textarea>');
-          $(".task-detail-log-clipboard")
-              .html("stderr: \n"+$(".stderr").html()+"\n stdout:\n"+$(".stdout").html())
-              .css("display","block")
-              .width($(".task-detail-log-maintext").width())
-              .height($(".task-detail-log-maintext").height())
-              .select();
-          $(".task-detail-log-maintext").css("display","none")
-        },
-        destroyClipBoard:function(){
-          $(".task-detail-log-clipboard").remove();
-          $(".task-detail-log-maintext").css("display","block");
-        }
-      })
-    });
-  }
-
-});
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/config.coffee b/branch-1.2/ambari-web/config.coffee
deleted file mode 100644
index 1342cb3..0000000
--- a/branch-1.2/ambari-web/config.coffee
+++ /dev/null
@@ -1,90 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-fs   = require 'fs'
-path = require 'path'
-
-# See docs at http://brunch.readthedocs.org/en/latest/config.html.
-
-exports.config =
-
-  files:
-
-    javascripts:
-      joinTo:
-        'javascripts/app.js': /^app/
-        'javascripts/vendor.js': /^vendor/
-        'test/javascripts/test.js': /^test(\/|\\)(?!vendor)/
-        'test/javascripts/test-vendor.js': /^test(\/|\\)(?=vendor)/
-      order:
-        before: [
-          'vendor/scripts/console-helper.js',
-          'vendor/scripts/jquery-1.7.2.min.js',
-          'vendor/scripts/handlebars-1.0.0.beta.6.js',
-          'vendor/scripts/ember-latest.js',
-          'vendor/scripts/ember-data-latest.js',
-          'vendor/scripts/ember-i18n-1.2.0.js',
-          'vendor/scripts/bootstrap.js',
-          'vendor/scripts/bootstrap-combobox.js'
-          'vendor/scripts/d3.v2.js',
-          'vendor/scripts/sinon-1.4.2.js',
-          'vendor/scripts/cubism.v1.js',
-          'vendor/scripts/jquery.ui.core.js',
-          'vendor/scripts/jquery.ui.widget.js',
-          'vendor/scripts/jquery.ui.mouse.js',
-          'vendor/scripts/jquery.ui.datepicker.js',
-          'vendor/scripts/jquery-ui-timepicker-addon.js',
-          'vendor/scripts/jquery.ui.slider.js',
-          'vendor/scripts/jquery.ui.sortable.js',
-          'vendor/scripts/jquery.ui.custom-effects.js',
-          'vendor/scripts/jquery.timeago.js',
-          'vendor/scripts/jquery.ajax-retry.js',
-          'vendor/scripts/workflow_visualization.js',
-          'vendor/scripts/rickshaw.js',
-          'vendor/scripts/spin.js',
-          'vendor/scripts/jquery.flexibleArea.js'
-          ]
-
-    stylesheets:
-      defaultExtension: 'css'
-      joinTo: 'stylesheets/app.css'
-      order:
-        before: [
-          'vendor/styles/bootstrap.css',
-#          'vendor/styles/datepicker.css'
-          'vendor/styles/font-awesome.css'
-          'vendor/styles/font-awesome-ie7.css',
-          'vendor/styles/cubism.css',
-          'vendor/styles/rickshaw.css'
-          'vendor/styles/bootstrap-combobox.css'
-        ]
-
-    templates:
-      precompile: true
-      defaultExtension: 'hbs'
-      joinTo: 'javascripts/app.js' : /^app/
-      paths:
-        jquery: 'vendor/scripts/jquery-1.7.2.min.js'
-        handlebars: 'vendor/scripts/handlebars-1.0.0.beta.6.js'
-        ember: 'vendor/scripts/ember-latest.js'
-
-  server:
-    port: 3333
-    base: '/'
-    run: no
-
-
diff --git a/branch-1.2/ambari-web/package.json b/branch-1.2/ambari-web/package.json
deleted file mode 100644
index f2d826a..0000000
--- a/branch-1.2/ambari-web/package.json
+++ /dev/null
@@ -1,31 +0,0 @@
-{
-  "name":"Ambari",
-  "description":"Front-end package for the Apache Ambari Project",
-  "version":"1.0.0",
-  "homepage":"",
-  "repository":{
-    "type":"git",
-    "url":""
-  },
-  "engines":{
-    "node":"~0.6.10 || 0.8 || 0.9"
-  },
-  "scripts":{
-    "start":"brunch watch --server"
-  },
-  "dependencies":{
-    "javascript-brunch":">= 1.0 < 1.5",
-    "css-brunch":">= 1.0 < 1.5",
-    "uglify-js-brunch":">= 1.0 < 1.5",
-    "clean-css-brunch":">= 1.0 < 1.5",
-    "ember-precompiler-brunch":">= 1.0 < 1.5",
-    "less-brunch":">= 1.0 < 1.5"
-  },
-  "devDependencies":{
-    "mocha":"0.14.0",
-    "chai":"1.2.0",
-    "sinon":"1.4.2",
-    "sinon-chai":"2.1.2",
-    "express":"2.5.8"
-  }
-}
diff --git a/branch-1.2/ambari-web/pom.xml b/branch-1.2/ambari-web/pom.xml
deleted file mode 100644
index 23cf6b4..0000000
--- a/branch-1.2/ambari-web/pom.xml
+++ /dev/null
@@ -1,153 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-  <parent>
-    <groupId>org.apache.ambari</groupId>
-    <artifactId>ambari-project</artifactId>
-    <version>1.2.2-SNAPSHOT</version>
-    <relativePath>../ambari-project</relativePath>
-  </parent>
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.apache.ambari</groupId>
-  <artifactId>ambari-web</artifactId>
-  <packaging>pom</packaging>
-  <name>Ambari Web</name>
-  <version>1.2.2-SNAPSHOT</version>
-  <description>Ambari Web</description>
-  <properties>
-    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-  </properties>
-  <build>
-    <plugins>
-      <plugin>
-        <artifactId>maven-compiler-plugin</artifactId>
-        <version>3.0</version>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>rpm-maven-plugin</artifactId>
-        <version>2.0.1</version>
-        <executions>
-          <execution>
-            <!-- unbinds rpm creation from maven lifecycle -->
-            <phase>none</phase>
-            <goals>
-              <goal>attached-rpm</goal>
-            </goals>
-          </execution>
-        </executions>
-        <configuration>
-          <copyright>2012, Apache Software Foundation</copyright>
-          <group>Development</group>
-          <description>Maven Recipe: RPM Package.</description>
-          <mappings/>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-antrun-plugin</artifactId>
-        <version>1.7</version>
-        <executions>
-          <execution>
-            <id>clean</id>
-            <phase>clean</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-            <configuration>
-              <target name="ambari-web-clean">
-                <exec dir="${basedir}" executable="rm" failonerror="false">
-                  <arg value="-rf"/>
-                  <arg value="public"/>
-                </exec>
-              </target>
-            </configuration>
-          </execution>
-          <execution>
-            <id>compile</id>
-            <phase>compile</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-            <configuration>
-              <target name="ambari-web-compile">
-                <exec dir="${basedir}" executable="npm" failonerror="false">
-                  <env key="PYTHON" value="python2.6" />
-                  <arg value="install"/>
-                </exec>
-                <exec dir="${basedir}" executable="brunch" failonerror="false">
-                  <arg value="build"/>
-                </exec>
-                <exec dir="${basedir}" executable="gzip" failonerror="false">
-                  <arg value="public/javascripts/app.js"/>
-                  <arg value="public/javascripts/vendor.js"/>
-                  <arg value="public/stylesheets/app.css"/>
-                </exec>
-              </target>
-            </configuration>
-          </execution>
-          <execution>
-            <id>test</id>
-            <phase>test</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-            <configuration>
-              <target name="ambari-web-test">
-                <!-- 
-                <exec dir="${basedir}" executable="brunch" failonerror="false">
-                  <arg value="test"/>
-                </exec>
-                -->
-              </target>
-            </configuration>
-          </execution>
-          <execution>
-            <id>package</id>
-            <phase>package</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-            <configuration>
-              <target name="ambari-web-package">
-                <!--
-                <copy toDir="${project.build.directory}/ambari-server-${project.version}-dist/ambari-server-${project.version}/web/">
-                  <fileset dir="${basedir}/../ambari-web/public"/>
-                </copy>
-                -->
-              </target>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <configuration>
-          <excludes>
-            <exclude>package.json</exclude>
-            <exclude>public/**</exclude>
-            <exclude>app/assets/**</exclude>
-            <exclude>vendor/**</exclude>
-            <exclude>node_modules/**</exclude>
-          </excludes>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/branch-1.2/ambari-web/test/installer/step1_test.js b/branch-1.2/ambari-web/test/installer/step1_test.js
deleted file mode 100644
index 589badb..0000000
--- a/branch-1.2/ambari-web/test/installer/step1_test.js
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-require('controllers/wizard/step1_controller');
-
-/*
-describe('App.InstallerStep1Controller', function () {
-
-  describe('#validateStep1()', function () {
-    it('should return false and sets invalidClusterName to true if cluster name is empty', function () {
-      var controller = App.InstallerStep1Controller.create();
-      controller.set('clusterName', '');
-      expect(controller.validateStep1()).to.equal(false);
-      expect(controller.get('invalidClusterName')).to.equal(true);
-    })
-    it('should return false and sets invalidClusterName to true if cluster name has whitespaces', function () {
-      var controller = App.InstallerStep1Controller.create();
-      controller.set('clusterName', 'My Cluster');
-      expect(controller.validateStep1()).to.equal(false);
-      expect(controller.get('invalidClusterName')).to.equal(true);
-    })
-    it('should return false and sets invalidClusterName to true if cluster name has special characters', function () {
-      var controller = App.InstallerStep1Controller.create();
-      controller.set('clusterName', 'my-cluster');
-      expect(controller.validateStep1()).to.equal(false);
-      expect(controller.get('invalidClusterName')).to.equal(true);
-    })
-    it('should return true, sets invalidClusterName to false if cluster name is valid', function () {
-      var controller = App.InstallerStep1Controller.create();
-      var clusterName = 'mycluster1';
-      controller.set('clusterName', clusterName);
-      expect(controller.validateStep1()).to.equal(true);
-      expect(controller.get('invalidClusterName')).to.equal(false);
-    })
-  })
-
-})*/
diff --git a/branch-1.2/ambari-web/test/installer/step2_test.js b/branch-1.2/ambari-web/test/installer/step2_test.js
deleted file mode 100644
index 9551519..0000000
--- a/branch-1.2/ambari-web/test/installer/step2_test.js
+++ /dev/null
@@ -1,162 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-var Ember = require('ember');
-require('controllers/wizard/step2_controller');
-
-describe.skip('App.WizardStep2Controller', function () {
-
-  /*describe('#hostsError()', function () {
-
-    it('should return t(installer.step2.hostName.error.required) if manualInstall is false, hostNames is empty, and hasSubmitted is true', function () {
-      var controller = App.InstallerStep2Controller.create();
-      controller.set('manualInstall', false);
-      controller.set('hostNames', '');
-      controller.set('hasSubmitted', true);
-      expect(controller.get('hostsError')).to.equal(Ember.I18n.t('installer.step2.hostName.error.required'));
-    })
-
-    it('should return null if manualInstall is false, hostNames is not empty, and hasSubmitted is true', function () {
-      var controller = App.InstallerStep2Controller.create();
-      controller.set('manualInstall', false);
-      controller.set('hostNames', 'ambari');
-      controller.set('hasSubmitted', true);
-      expect(controller.get('hostsError')).to.equal(null);
-    })
-
-    it('should return t(installer.step2.hostName.error.invalid) if manualInstall is false and hostNames has an element ' +
-      'that starts with a hyphen', function () {
-      var controller = App.InstallerStep2Controller.create();
-      controller.set('manualInstall', false);
-      controller.set('hostNames', "-apache");
-      expect(controller.get('hostsError')).to.equal(Ember.I18n.t('installer.step2.hostName.error.invalid'));
-    })
-
-    it('should return t(installer.step2.hostName.error.invalid) if manualInstall is false and hostNames has an element ' +
-      'that ends with a hyphen', function () {
-      var controller = App.InstallerStep2Controller.create();
-      controller.set('manualInstall', false);
-      controller.set('hostNames', 'apache-');
-      expect(controller.get('hostsError')).to.equal(Ember.I18n.t('installer.step2.hostName.error.invalid'));
-    })
-
-    it('should return t(installer.step2.hostName.error.required) if manualInstall is true, hostNames is empty, and ' +
-      'hasSubmitted is true', function () {
-      var controller = App.InstallerStep2Controller.create();
-      controller.set('manualInstall', true);
-      controller.set('hostNames', '');
-      controller.set('hasSubmitted', true);
-      expect(controller.get('hostsError')).to.equal(Ember.I18n.t('installer.step2.hostName.error.required'));
-    })
-
-  })
-
-  describe('#sshKeyError()', function () {
-    it('should return t(installer.step2.sshKey.error.required) to true if manualInstall is false, sshKey is empty, ' +
-      'and hasSubmitted is true', function () {
-      var controller = App.InstallerStep2Controller.create();
-      controller.set('manualInstall', false);
-      controller.set('sshKey', '');
-      controller.set('hasSubmitted', true);
-      expect(controller.get('sshKeyError')).to.equal(Ember.I18n.t('installer.step2.sshKey.error.required'));
-    })
-
-    it('should return null if manualInstall is true, sshKey is empty, and hasSubmitted is true', function () {
-      var controller = App.InstallerStep2Controller.create();
-      controller.set('sshKey', '');
-      controller.set('manualInstall', true);
-      controller.set('hasSubmitted', true);
-      expect(controller.get('sshKeyError')).to.equal(null);
-    })
-
-    it('should return null if sshKey is not null and hasSubmitted is true', function () {
-      var controller = App.InstallerStep2Controller.create();
-      controller.set('sshKey', 'ambari');
-      controller.set('hasSubmitted', true);
-      expect(controller.get('sshKeyError')).to.equal(null);
-    })
-
-  })*/
-    /* Passphrase has been disabled, so commenting out tests
-    it('should set passphraseMatchErr to true if ' +
-      'passphrase and confirmPassphrase doesn\'t match ', function () {
-      var controller = App.InstallerStep2Controller.create();
-      controller.set('manualInstall', false);
-      controller.set('passphrase', 'apache ambari');
-      controller.set('confirmPassphrase', 'ambari');
-      controller.validateStep2();
-      expect(controller.get('passphraseMatchErr')).to.equal(true);
-    })
-
-    it('should set passphraseMatchErr to false if passphrase and ' +
-      'confirmPassphrase doesn\'t match but manualInstall is true ', function () {
-      var controller = App.InstallerStep2Controller.create();
-      controller.set('passphrase', 'apache ambari');
-      controller.set('confirmPassphrase', 'ambari');
-      controller.set('manualInstall', true);
-      controller.validateStep2();
-      expect(controller.get('passphraseMatchErr')).to.equal(false);
-    })
-
-    it('should set passphraseMatchErr to true if passphrase and ' +
-      'confirmPassphrase matches', function () {
-      var controller = App.InstallerStep2Controller.create();
-      controller.set('passphrase', 'apache ambari');
-      controller.set('confirmPassphrase', 'apache ambari');
-      controller.validateStep2();
-      expect(controller.get('passphraseMatchErr')).to.equal(false);
-    })
-    */
-
-  /*describe('#localRepoError()', function() {
-
-    it('should return t(installer.step2.localRepo.error.required) localRepo is true, localRepoPath is empty, and hasSubmitted is true', function () {
-      var controller = App.InstallerStep2Controller.create();
-      controller.set('localRepo', true);
-      controller.set('localRepoPath', '');
-      controller.set('hasSubmitted', true);
-      expect(controller.get('localRepoError')).to.equal(Ember.I18n.t('installer.step2.localRepo.error.required'));
-    })
-
-    it('should return null if localRepo is true, localRepoPath is not empty, and hasSubmitted is true', function () {
-      var controller = App.InstallerStep2Controller.create();
-      controller.set('localRepo', true);
-      controller.set('localRepoPath', '/etc/');
-      controller.set('hasSubmitted', true);
-      expect(controller.get('localRepoError')).to.equal(null);
-    })
-
-    it('should return null if localRepo is false, localRepoPath is empty, and hasSubmitted is true', function () {
-      var controller = App.InstallerStep2Controller.create();
-      controller.set('localRepo', false);
-      controller.set('localRepoPath', '');
-      controller.set('hasSubmitted', true);
-      expect(controller.get('localRepoError')).to.equal(null);
-    })
-  })
-
-  describe('#evaluateStep2(): On hitting step2 \"next\" button', function () {
-    it('should return false if isSubmitDisabled is true ', function () {
-      var controller = App.InstallerStep2Controller.create();
-      controller.set('isSubmitDisabled', true);
-      expect(controller.evaluateStep2()).to.equal(false);
-    })
-  })*/
-
-})
diff --git a/branch-1.2/ambari-web/test/installer/step3_test.js b/branch-1.2/ambari-web/test/installer/step3_test.js
deleted file mode 100644
index 532b300..0000000
--- a/branch-1.2/ambari-web/test/installer/step3_test.js
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var Ember = require('ember');
-var App = require('app');
-require('models/hosts');
-require('controllers/wizard/step3_controller');
-
-/*
-describe('App.InstallerStep3Controller', function () {
-  //var controller = App.InstallerStep3Controller.create();
-
-  describe('#parseHostInfo', function () {
-    var controller = App.InstallerStep3Controller.create();
-    it('should return true if there is no host with pending status in the data provided by REST bootstrap call.  It should also update the status on the client side', function () {
-      var hostFromServer = [
-        {
-          name: '192.168.1.1',
-          status: 'error'
-        },
-        {
-          name: '192.168.1.2',
-          status: 'success'
-        },
-        {
-          name: '192.168.1.3',
-          status: 'error'
-        },
-        {
-          name: '192.168.1.4',
-          status: 'success'
-        }
-      ];
-      controller.content.pushObject(App.HostInfo.create({
-        name: '192.168.1.1',
-        status: 'error'
-      }));
-      controller.content.pushObject(App.HostInfo.create({
-        name: '192.168.1.2',
-        status: 'success'
-      }));
-      controller.content.pushObject(App.HostInfo.create({
-        name: '192.168.1.3',
-        status: 'pending'        //status should be overriden to 'error' after the parseHostInfo call
-      }));
-      controller.content.pushObject(App.HostInfo.create({
-        name: '192.168.1.4',
-        status: 'success'
-      }));
-
-      var result = controller.parseHostInfo(hostFromServer, controller.content);
-      var host = controller.content.findProperty('name', '192.168.1.3');
-      expect(result).to.equal(true);
-      expect(host.bootStatus).to.equal('error');
-    })
-  })
-
-
-  describe('#onAllChecked', function () {
-    var controller = App.InstallerStep3Controller.create();
-    it('should set all visible hosts\'s isChecked to true upon checking the "all" checkbox', function () {
-      controller.set('category', 'All Hosts');
-      controller.set('allChecked', true);
-      controller.content.pushObject(App.HostInfo.create({
-        name: '192.168.1.1',
-        status: 'error',
-        isChecked: false
-      }));
-      controller.content.pushObject(App.HostInfo.create({
-        name: '192.168.1.2',
-        status: 'success',
-        isChecked: false
-      }));
-      controller.content.pushObject(App.HostInfo.create({
-        name: '192.168.1.3',
-        status: 'pending', //status should be overriden to 'error' after the parseHostInfo call
-        isChecked: true
-      }));
-      controller.content.pushObject(App.HostInfo.create({
-        name: '192.168.1.4',
-        status: 'success',
-        isChecked: false
-      }));
-      controller.onAllChecked();
-      controller.content.forEach(function (host) {
-        var result = host.get('isChecked');
-        expect(result).to.equal(true);
-      });
-
-    })
-  })
-})
-
-*/
diff --git a/branch-1.2/ambari-web/test/installer/step4_test.js b/branch-1.2/ambari-web/test/installer/step4_test.js
deleted file mode 100644
index 8a9ecc7..0000000
--- a/branch-1.2/ambari-web/test/installer/step4_test.js
+++ /dev/null
@@ -1,167 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var Ember = require('ember');
-var App = require('app');
-require('controllers/wizard/step4_controller');
-
-/*
-describe('App.InstallerStep4Controller', function () {
-
-  var DEFAULT_SERVICES = ['HDFS'];
-  var OPTIONAL_SERVICES = ['MAPREDUCE', 'NAGIOS', 'GANGLIA', 'OOZIE', 'HIVE', 'HBASE', 'PIG', 'SQOOP', 'ZOOKEEPER', 'HCATALOG'];
-
-  var controller = App.InstallerStep4Controller.create();
-  controller.rawContent.forEach(function(item){
-    item.isSelected = true;
-    controller.pushObject(Ember.Object.create(item));
-    });
-
-  describe('#selectMinimum()', function () {
-    it('should set isSelected is false on all non-default services and isSelected is true on all default services', function() {
-      controller.selectMinimum();
-      DEFAULT_SERVICES.forEach(function (serviceName) {
-        expect(controller.findProperty('serviceName', serviceName).get('isSelected')).to.equal(true);
-      });
-      OPTIONAL_SERVICES.forEach(function (serviceName) {
-        expect(controller.findProperty('serviceName', serviceName).get('isSelected')).to.equal(false);
-      });
-    })
-  })
-
-  describe('#selectAll()', function () {
-    it('should set isSelected is true on all non-default services and isSelected is true on all default services', function() {
-      controller.selectAll();
-      DEFAULT_SERVICES.forEach(function (serviceName) {
-        expect(controller.findProperty('serviceName', serviceName).get('isSelected')).to.equal(true);
-      });
-      OPTIONAL_SERVICES.forEach(function (serviceName) {
-        expect(controller.findProperty('serviceName', serviceName).get('isSelected')).to.equal(true);
-      });
-    })
-  })
-
-  describe('#isAll()', function () {
-
-    beforeEach(function() {
-      DEFAULT_SERVICES.forEach(function(serviceName) {
-        controller.findProperty('serviceName', serviceName).set('isSelected', true);
-      });
-      OPTIONAL_SERVICES.forEach(function(serviceName) {
-        controller.findProperty('serviceName', serviceName).set('isSelected', true);
-      });
-    });
-
-    it('should return true if isSelected is true for all services', function() {
-      expect(controller.get('isAll')).to.equal(true);
-    })
-
-    it('should return false if isSelected is false for one of the services', function() {
-      controller.findProperty('serviceName', 'HBASE').set('isSelected', false);
-      expect(controller.get('isAll')).to.equal(false);
-    })
-  })
-
-  describe('#isMinimum()', function () {
-
-    beforeEach(function() {
-      DEFAULT_SERVICES.forEach(function(serviceName) {
-        controller.findProperty('serviceName', serviceName).set('isSelected', true);
-      });
-      OPTIONAL_SERVICES.forEach(function(serviceName) {
-        controller.findProperty('serviceName', serviceName).set('isSelected', false);
-      });
-    });
-
-    it('should return true if isSelected is true for all default services and isSelected is false for all optional services', function() {
-      expect(controller.get('isMinimum')).to.equal(true);
-    })
-
-    it('should return false if isSelected is true for all default serices and isSelected is true for one of optional services', function() {
-      controller.findProperty('serviceName', 'HBASE').set('isSelected', true);
-      expect(controller.get('isMinimum')).to.equal(false);
-    })
-
-  })
-
-  describe('#needToAddMapReduce', function() {
-
-    describe('mapreduce not selected', function() {
-      beforeEach(function() {
-        controller.findProperty('serviceName', 'MAPREDUCE').set('isSelected', false);
-      })
-
-      it('should return true if Hive is selected and MapReduce is not selected', function() {
-        controller.findProperty('serviceName', 'HIVE').set('isSelected', true);
-        expect(controller.needToAddMapReduce()).to.equal(true);
-      })
-      it('should return true if Pig is selected and MapReduce is not selected', function() {
-        controller.findProperty('serviceName', 'PIG').set('isSelected', true);
-        expect(controller.needToAddMapReduce()).to.equal(true);
-      })
-      it('should return true if Oozie is selected and MapReduce is not selected', function() {
-        controller.findProperty('serviceName', 'OOZIE').set('isSelected', true);
-        expect(controller.needToAddMapReduce()).to.equal(true);
-      })
-    })
-
-    describe('mapreduce not selected', function() {
-      beforeEach(function() {
-        controller.findProperty('serviceName', 'MAPREDUCE').set('isSelected', true);
-      })
-
-      it('should return false if Hive is selected and MapReduce is selected', function() {
-        controller.findProperty('serviceName', 'HIVE').set('isSelected', true);
-        expect(controller.needToAddMapReduce()).to.equal(false);
-      })
-      it('should return false if Pig is selected and MapReduce is not selected', function() {
-        controller.findProperty('serviceName', 'PIG').set('isSelected', true);
-        expect(controller.needToAddMapReduce()).to.equal(false);
-      })
-      it('should return false if Oozie is selected and MapReduce is not selected', function() {
-        controller.findProperty('serviceName', 'OOZIE').set('isSelected', true);
-        expect(controller.needToAddMapReduce()).to.equal(false);
-      })
-    })
-
-  })
-
-  describe('#saveSelectedServiceNamesToDB', function() {
-
-    beforeEach(function() {
-      DEFAULT_SERVICES.forEach(function(serviceName) {
-        controller.findProperty('serviceName', serviceName).set('isSelected', true);
-      });
-      OPTIONAL_SERVICES.forEach(function(serviceName) {
-        controller.findProperty('serviceName', serviceName).set('isSelected', true);
-      });
-    });
-
-    it('should store the selected service names in App.db.selectedServiceNames', function() {
-      App.db.setLoginName('tester');
-      App.db.setClusterName('test');
-      controller.saveSelectedServiceNamesToDB();
-      // console.log('controller length=' + controller.get('length'));
-      var selectedServiceNames = App.db.getSelectedServiceNames();
-      // console.log('service length=' + selectedServiceNames.get('length'));
-      expect(selectedServiceNames.length === DEFAULT_SERVICES.length + OPTIONAL_SERVICES.length).to.equal(true);
-    })
-
-  })
-
-})*/
diff --git a/branch-1.2/ambari-web/test/installer/step5_test.js b/branch-1.2/ambari-web/test/installer/step5_test.js
deleted file mode 100644
index 2368937..0000000
--- a/branch-1.2/ambari-web/test/installer/step5_test.js
+++ /dev/null
@@ -1,184 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var Ember = require('ember');
-var App = require('app');
-require('controllers/wizard/step5_controller');
-
-/*
-describe('App.InstallerStep5Controller', function () {
-  var controller = App.InstallerStep5Controller.create();
-  controller.get("selectedServices").pushObject({service_name: 'ZOOKEEPER'});
-  var cpu = 2, memory = 4;
-  var HOST = ['host1', 'host2', 'host3', 'host4', 'host5'];
-  var hosts = [];
-  HOST.forEach(function (_host) {
-    controller.get('hosts').pushObject(Ember.Object.create({
-      host_name: _host,
-      cpu: cpu,
-      memory: memory
-    }));
-  });
-
-  var componentObj = Ember.Object.create({
-    component_name: 'ZooKeeper',
-    selectedHost: 'host2', // call the method that plays selectNode algorithm or fetches from server
-    availableHosts: []
-  });
-  componentObj.set('availableHosts', controller.get('hosts').slice(0));
-  componentObj.set('zId', 1);
-  componentObj.set("showAddControl", true);
-  componentObj.set("showRemoveControl", false);
-  controller.get("selectedServicesMasters").pushObject(componentObj);
-
-
-  describe('#getAvailableHosts()', function () {
-    it('should generate available hosts for a new zookeeper service', function () {
-      var hostsForNewZookeepers = controller.getAvailableHosts("ZooKeeper"),
-        ok = true, i = 0, masters = null;
-
-      //test that the hosts found, do not have Zookeeper master assigned to them
-      for (i = 0; i < hostsForNewZookeepers.get("length"); i++) {
-        masters = controller.get("selectedServicesMasters").filterProperty(hostsForNewZookeepers[i].get("host_name"));
-        if (masters.findProperty("component_name", "ZooKeeper")) {
-          ok = false;
-          break;
-        }
-      }
-
-      expect(ok).to.equal(true);
-    })
-
-    it('should return all hosts for services other than ZooKeeper', function () {
-      var hostsForNewZookeepers = controller.getAvailableHosts("");
-
-      expect(hostsForNewZookeepers.get("length")).to.equal(controller.get("hosts.length"));
-    })
-  })
-
-  describe('#assignHostToMaster()', function () {
-    it('should assign the selected host to the non-ZooKeeper master service', function () {
-      //test non-zookeeper master
-      var SERVICE_MASTER = "NameNode",
-        HOST = "host4", ZID, status;
-      var nonZookeeperObj = Ember.Object.create({
-        component_name: SERVICE_MASTER,
-        selectedHost: HOST, // call the method that plays selectNode algorithm or fetches from server
-        availableHosts: []
-      });
-      controller.get("selectedServicesMasters").pushObject(nonZookeeperObj);
-      controller.assignHostToMaster(SERVICE_MASTER, HOST);
-      expect(controller.get("selectedServicesMasters").findProperty("component_name", SERVICE_MASTER).get("selectedHost")).to.equal(HOST);
-    })
-
-    it('should assign the selected host to the ZooKeeper master service', function () {
-      //test non-zookeeper master
-      var SERVICE_MASTER = "ZooKeeper",
-        HOST = "host4", ZID = 2;
-
-      //test zookeeper master assignment with
-      if (controller.addZookeepers()) {
-        controller.assignHostToMaster(SERVICE_MASTER, HOST, ZID);
-        expect(controller.get("selectedServicesMasters").filterProperty("component_name", "ZooKeeper").findProperty("zId", ZID).get("selectedHost")).to.equal(HOST);
-      }
-    })
-  })
-
-  describe('#addZookeepers()', function () {
-
-    it('should add a new ZooKeeper', function () {
-      var newLength = 0;
-      if (controller.get("selectedServices").mapProperty("service_name").contains("ZOOKEEPER")
-        && controller.get("selectedServicesMasters").filterProperty("component_name", "ZooKeeper").get("length") < controller.get("hosts.length")) {
-        newLength = controller.get("selectedServicesMasters").filterProperty("component_name", "ZooKeeper").get("length");
-        controller.addZookeepers();
-        expect(controller.get("selectedServicesMasters").filterProperty("component_name", "ZooKeeper").get("length")).to.equal(newLength + 1);
-      }
-    })
-
-    it('should add ZooKeepers up to the number of hosts', function () {
-
-      var currentZooKeepers = controller.get("selectedServicesMasters").filterProperty("component_name", "ZooKeeper").length,
-        success = true;
-
-      //add ZooKeepers as long as possible
-      if (currentZooKeepers) {
-
-        while (success) {
-          success = controller.addZookeepers();
-        }
-        var services = controller.get("selectedServicesMasters").filterProperty("component_name", "ZooKeeper");
-        var length = services.length;
-        expect(controller.get("selectedServicesMasters").filterProperty("component_name", "ZooKeeper").length).to.equal(controller.get("hosts.length"));
-      }
-    })
-  })
-
-  describe('#removeZookeepers()', function () {
-    it('should remove a ZooKeeper', function () {
-      if (controller.get("selectedServices").mapProperty("service_name").contains("ZOOKEEPER")) {
-        if (controller.addZookeepers()) {
-          newLength = controller.get("selectedServicesMasters").filterProperty("component_name", "ZooKeeper").get("length");
-          controller.removeZookeepers(2);
-          expect(controller.get("selectedServicesMasters").filterProperty("component_name", "ZooKeeper").get("length")).to.equal(newLength - 1);
-        }
-      }
-    })
-
-    it('should fail to remove a ZooKeeper if there is only 1', function () {
-      var currentZooKeepers = controller.get("selectedServicesMasters").filterProperty("component_name", "ZooKeeper").length,
-        success = true;
-      //remove ZooKeepers as long as possible
-
-      if (currentZooKeepers) {
-        while (success) {
-          success = controller.removeZookeepers(controller.get("selectedServicesMasters").filterProperty("component_name", "ZooKeeper").get("lastObject.zId"));
-        }
-        expect(controller.get("selectedServicesMasters").filterProperty("component_name", "ZooKeeper").get("length")).to.equal(1);
-      }
-    })
-
-  })
-
-  describe('#rebalanceZookeeperHosts()', function () {
-
-    it('should rebalance hosts for ZooKeeper', function () {
-      //assign a host to a zookeeper and then rebalance the available hosts for the other zookeepers
-      var zookeepers = controller.get("selectedServicesMasters").filterProperty("component_name", "ZooKeeper"),
-        aZookeeper = null, aHost = null, i = 0, ok = true;
-
-      if (zookeepers.get("length") > 1) {
-        aZookeeper = controller.get("selectedServicesMasters").filterProperty("component_name", "ZooKeeper").findProperty("zId", 1);
-        aHost = aZookeeper.get("availableHosts")[0];
-        aZookeeper.set("selectedHost", aHost.get("host_name"));
-
-        controller.rebalanceZookeeperHosts();
-
-        for (i = 0; i < zookeepers.get("length"); i++) {
-          if (zookeepers[i].get("availableHosts").mapProperty("host_name").contains(aHost)) {
-            ok = false;
-            break;
-          }
-        }
-
-        expect(ok).to.equal(true);
-      }
-    })
-  })
-
-})*/
diff --git a/branch-1.2/ambari-web/test/installer/step6_test.js b/branch-1.2/ambari-web/test/installer/step6_test.js
deleted file mode 100644
index 393c9bf..0000000
--- a/branch-1.2/ambari-web/test/installer/step6_test.js
+++ /dev/null
@@ -1,154 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var Ember = require('ember');
-var App = require('app');
-require('controllers/wizard/step6_controller');
-
-/*
-describe('App.InstallerStep6Controller', function () {
-
-  var HOSTS = [ 'host1', 'host2', 'host3', 'host4' ];
-  //App.InstallerStep6Controller.set.rawHosts = HOSTS;
-  var controller = App.InstallerStep6Controller.create();
-  controller.set('showHbase', false);
-  HOSTS.forEach(function (_hostName) {
-    controller.get('hosts').pushObject(Ember.Object.create({
-      hostname: _hostName,
-      isDataNode: true,
-      isTaskTracker: true,
-      isRegionServer: true
-    }));
-  });
-
-
-
-describe('#selectAllDataNodes()', function () {
-  controller.get('hosts').setEach('isDataNode', false);
-
-  it('should set isDataNode to true on all hosts', function () {
-    controller.selectAllDataNodes();
-    expect(controller.get('hosts').everyProperty('isDataNode', true)).to.equal(true);
-  })
-})
-
-describe('#selectAllTaskTrackers()', function () {
-  it('should set isTaskTracker to true on all hosts', function () {
-    controller.selectAllTaskTrackers();
-    expect(controller.get('hosts').everyProperty('isTaskTracker', true)).to.equal(true);
-  })
-})
-
-describe('#selectAllRegionServers()', function () {
-  it('should set isRegionServer to true on all hosts', function () {
-    controller.selectAllRegionServers();
-    expect(controller.get('hosts').everyProperty('isRegionServer', true)).to.equal(true);
-  })
-})
-
-describe('#isAllDataNodes()', function () {
-
-  beforeEach(function () {
-    controller.get('hosts').setEach('isDataNode', true);
-  })
-
-  it('should return true if isDataNode is true for all services', function () {
-    expect(controller.get('isAllDataNodes')).to.equal(true);
-  })
-
-  it('should return false if isDataNode is false for one host', function () {
-    controller.get('hosts')[0].set('isDataNode', false);
-    expect(controller.get('isAllDataNodes')).to.equal(false);
-  })
-})
-
-describe('#isAllTaskTrackers()', function () {
-
-  beforeEach(function () {
-    controller.get('hosts').setEach('isTaskTracker', true);
-  })
-
-  it('should return true if isTaskTracker is true for all hosts', function () {
-    expect(controller.get('isAllTaskTrackers')).to.equal(true);
-  })
-
-  it('should return false if isTaskTracker is false for one host', function () {
-    controller.get('hosts')[0].set('isTaskTracker', false);
-    expect(controller.get('isAllTaskTrackers')).to.equal(false);
-  })
-
-})
-
-describe('#isAllRegionServers()', function () {
-
-  beforeEach(function () {
-    controller.get('hosts').setEach('isRegionServer', true);
-  });
-
-  it('should return true if isRegionServer is true for all hosts', function () {
-    expect(controller.get('isAllRegionServers')).to.equal(true);
-  })
-
-  it('should return false if isRegionServer is false for one host', function () {
-    controller.get('hosts')[0].set('isRegionServer', false);
-    expect(controller.get('isAllRegionServers')).to.equal(false);
-  })
-
-})
-
-describe('#validate()', function () {
-
-  beforeEach(function () {
-    controller.get('hosts').setEach('isDataNode', true);
-    controller.get('hosts').setEach('isTaskTracker', true);
-    controller.get('hosts').setEach('isRegionServer', true);
-  });
-
-  it('should return false if isDataNode is false for all hosts', function () {
-    controller.get('hosts').setEach('isDataNode', false);
-    expect(controller.validate()).to.equal(false);
-  })
-
-  it('should return false if isTaskTracker is false for all hosts', function () {
-    controller.get('hosts').setEach('isTaskTracker', false);
-    expect(controller.validate()).to.equal(false);
-  })
-
-  it('should return false if isRegionServer is false for all hosts', function () {
-    controller.get('hosts').setEach('isRegionServer', false);
-    expect(controller.validate()).to.equal(false);
-  })
-
-  it('should return true if isDataNode, isTaskTracker, and isRegionServer is true for all hosts', function () {
-    expect(controller.validate()).to.equal(true);
-  })
-
-  it('should return true if isDataNode, isTaskTracker, and isRegionServer is true for only one host', function () {
-    controller.get('hosts').setEach('isDataNode', false);
-    controller.get('hosts').setEach('isTaskTracker', false);
-    controller.get('hosts').setEach('isRegionServer', false);
-    var host = controller.get('hosts')[0];
-    host.set('isDataNode', true);
-    host.set('isTaskTracker', true);
-    host.set('isRegionServer', true);
-    expect(controller.validate()).to.equal(true);
-  })
-
-})
-
-})*/
diff --git a/branch-1.2/ambari-web/test/installer/step7_test.js b/branch-1.2/ambari-web/test/installer/step7_test.js
deleted file mode 100644
index 5be7d50..0000000
--- a/branch-1.2/ambari-web/test/installer/step7_test.js
+++ /dev/null
@@ -1,25 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-require('controllers/wizard/step7_controller');
-
-/*
-describe('App.InstallerStep7Controller', function () {
-
-})*/
diff --git a/branch-1.2/ambari-web/test/installer/step9_test.js b/branch-1.2/ambari-web/test/installer/step9_test.js
deleted file mode 100644
index 580e6b0..0000000
--- a/branch-1.2/ambari-web/test/installer/step9_test.js
+++ /dev/null
@@ -1,165 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-var Ember = require('ember');
-var App = require('app');
-require('models/hosts');
-require('controllers/wizard/step9_controller');
-
-/*describe('App.InstallerStep9Controller', function () {
-  //var controller = App.InstallerStep3Controller.create();
-
-  describe('#isStepFailed', function () {
-    var controller = App.InstallerStep9Controller.create();
-    it('should return true if even a single action of a role with 100% success factor fails', function () {
-      var polledData = new Ember.Set([
-        {
-          actionId: '1',
-          name: '192.168.1.1',
-          status: 'completed',
-          sf: '100',
-          role: 'DataNode',
-          message: 'completed 30%'
-        },
-        {
-          actionId: '2',
-          name: '192.168.1.2',
-          status: 'completed',
-          sf: '100',
-          role: 'DataNode',
-          message: 'completed 20%'
-        },
-        {
-          actionId: '3',
-          name: '192.168.1.3',
-          status: 'completed',
-          sf: '100',
-          role: 'DataNode',
-          message: 'completed 30%'
-        },
-        {
-          actionId: '4',
-          name: '192.168.1.4',
-          status: 'failed',
-          sf: '100',
-          role: 'DataNode',
-          message: 'completed 40%'
-        }
-      ]);
-
-
-      expect(controller.isStepFailed(polledData)).to.equal(true);
-
-    })
-
-    it('should return false if action of a role fails but with less percentage than success factor of the role', function () {
-      var polledData = new Ember.Set([
-        {
-          actionId: '1',
-          name: '192.168.1.1',
-          status: 'failed',
-          sf: '30',
-          role: 'DataNode',
-          message: 'completed 30%'
-        },
-        {
-          actionId: '2',
-          name: '192.168.1.2',
-          status: 'failed',
-          sf: '30',
-          role: 'DataNode',
-          message: 'completed 20%'
-        },
-        {
-          actionId: '3',
-          name: '192.168.1.3',
-          status: 'completed',
-          sf: '30',
-          role: 'DataNode',
-          message: 'completed 30%'
-        },
-        {
-          actionId: '4',
-          name: '192.168.1.4',
-          status: 'completed',
-          sf: '30',
-          role: 'DataNode',
-          message: 'completed 40%'
-        }
-      ]);
-
-      expect(controller.isStepFailed(polledData)).to.equal(false);
-
-    })
-
-  })
-
-  describe('#setHostsStatus', function () {
-    var controller = App.InstallerStep9Controller.create();
-    it('sets the status of all hosts in the content to the passed status value', function () {
-      var mockData = new Ember.Set(
-        {
-          actionId: '1',
-          name: '192.168.1.1',
-          status: 'completed',
-          sf: '100',
-          role: 'DataNode',
-          message: 'completed 30%'
-        },
-        {
-          actionId: '2',
-          name: '192.168.1.2',
-          status: 'completed',
-          sf: '100',
-          role: 'DataNode',
-          message: 'completed 20%'
-        },
-        {
-          actionId: '3',
-          name: '192.168.1.3',
-          status: 'completed',
-          sf: '100',
-          role: 'DataNode',
-          message: 'completed 30%'
-        },
-        {
-          actionId: '4',
-          name: '192.168.1.4',
-          status: 'completed',
-          sf: '100',
-          role: 'DataNode',
-          message: 'completed 40%'
-        }
-      );
-      mockData.forEach(function(_polledData){
-        controller.content.pushObject(_polledData);
-      });
-
-      controller.setHostsStatus(mockData,'finish');
-      var result = controller.content.everyProperty('status','finish');
-      //console.log('value of pop is: '+ result.pop.actionId);
-      expect(result).to.equal(true);
-
-    })
-  })
-
-
-})*/
-
-
diff --git a/branch-1.2/ambari-web/test/login_test.js b/branch-1.2/ambari-web/test/login_test.js
deleted file mode 100644
index 23c0711..0000000
--- a/branch-1.2/ambari-web/test/login_test.js
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-require('controllers/login_controller');
-
-describe('App.LoginController', function () {
-
-  var loginController = App.LoginController.create();
-
-  describe('#validateCredentials()', function () {
-    /*
-    it('should return undefined if no username is present', function () {
-      loginController.set('loginName', '');
-      expect(loginController.validateCredentials()).to.equal(undefined);
-    })
-    it('should return undefined if no password is present', function () {
-      loginController.set('password', '');
-      expect(loginController.validateCredentials()).to.equal(undefined);
-    })
-    it('should return the user object with the specified username and password (dummy until actual integration)', function () {
-      loginController.set('loginName', 'admin');
-      loginController.set('password', 'admin');
-      expect(loginController.validateCredentials().get('loginName'), 'admin');
-    })
-    */
-  })
-})
diff --git a/branch-1.2/ambari-web/test/main/dashboard_test.js b/branch-1.2/ambari-web/test/main/dashboard_test.js
deleted file mode 100644
index a731ef0..0000000
--- a/branch-1.2/ambari-web/test/main/dashboard_test.js
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-var App = require('app');
-
-require('models/alert'); 
-App.Alert.FIXTURES = [{ status: 'ok' }, { status: 'corrupt' }, { status: 'corrupt',}];
-require('controllers/main/dashboard');
- 
-describe('MainDashboard', function () {
- 
-  var controller = App.MainDashboardController.create();
-  
-  describe('#alertsCount', function () {
-    it('should return 2 if 2 alerts has status corrupt', function () {
-        expect(controller.get('alertsCount')).to.equal(2);
-    })
-  })
-})
-*/
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/test/main/host/details_test.js b/branch-1.2/ambari-web/test/main/host/details_test.js
deleted file mode 100644
index 3660f34..0000000
--- a/branch-1.2/ambari-web/test/main/host/details_test.js
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-var Ember = require('ember');
-var App = require('app');
- 
-require('controllers/main/host/details');
-
-describe('MainHostdetails', function () {
-  var controller = App.MainHostDetailsController.create();
-  controller.content = Ember.Object.create({});
-   
-  describe('#setBack(value)', function () {
-    it('should return true if value is true', function () {
-      controller.setBack(true);
-      expect(controller.get('isFromHosts')).to.equal(true);
-    })
-  })
-  describe('#workStatus positive', function () {
-    it('should return true if workstatus is true', function () {
-      controller.content.set('workStatus',true);   
-      expect(controller.get('isStarting')).to.equal(true);
-      })
-    it('should return false if workStatus is true', function () {
-      expect(controller.get('isStopping')).to.equal(false);
-    })
-    it('should return false if workstatus is false', function () {
-      controller.content.set('workStatus',false);   
-      expect(controller.get('isStarting')).to.equal(false);
-      })
-    it('should return true if workStatus is false', function () {
-      expect(controller.get('isStopping')).to.equal(true);
-    })
-  })
-})
-*/
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/test/main/host_test.js b/branch-1.2/ambari-web/test/main/host_test.js
deleted file mode 100644
index 1260935..0000000
--- a/branch-1.2/ambari-web/test/main/host_test.js
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-var App = require('app');
-require('models/cluster');
-require('models/service');
-require('models/pagination');
-require('controllers/main/host');
-
-describe('MainHostController', function () {
-    describe('#sortByName()', function () {
-        it('should change isSort value to true', function () {
-            var mainHostController = App.MainHostController.create();
-            mainHostController.set('isSort', false);
-            mainHostController.sortByName();
-            expect(mainHostController.get('isSort')).to.equal(true);
-        });
-
-
-        it('should inverse sortingAsc ', function () {
-            var mainHostController = App.MainHostController.create();
-            mainHostController.set('sortingAsc', false);
-            mainHostController.sortByName();
-            expect(mainHostController.get('sortingAsc')).to.equal(true);
-            mainHostController.sortByName();
-            expect(mainHostController.get('sortingAsc')).to.equal(false);
-        })
-    });
-
-
-    describe('#showNextPage, #showPreviousPage()', function () {
-        it('should change rangeStart according to page', function () {
-            var mainHostController = App.MainHostController.create();
-            mainHostController.set('pageSize', 3);
-            mainHostController.showNextPage();
-            expect(mainHostController.get('rangeStart')).to.equal(3);
-            mainHostController.showPreviousPage();
-            expect(mainHostController.get('rangeStart')).to.equal(0);
-        })
-    });
-
-
-    describe('#sortClass()', function () {
-        it('should return \'icon-arrow-down\' if sortingAsc is true', function () {
-            var mainHostController = App.MainHostController.create({});
-            mainHostController.set('sortingAsc', true);
-            expect(mainHostController.get('sortClass')).to.equal('icon-arrow-down');
-        });
-        it('should return \'icon-arrow-up\' if sortingAsc is false', function () {
-            var mainHostController = App.MainHostController.create({});
-            mainHostController.set('sortingAsc', false);
-            expect(mainHostController.get('sortClass')).to.equal('icon-arrow-up');
-        })
-    });
-
-
-    describe('#allChecked', function () {
-        it('should fill selectedhostsids array', function () {
-            var mainHostController = App.MainHostController.create();
-            mainHostController.set('allChecked', false);
-            expect(mainHostController.get('selectedHostsIds').length).to.equal(0);
-            mainHostController.set('allChecked', true);
-            expect(!!(mainHostController.get('selectedHostsIds').length)).to.equal(true);
-        })
-    });
-
-
-});
-*/
diff --git a/branch-1.2/ambari-web/test/main/item_test.js b/branch-1.2/ambari-web/test/main/item_test.js
deleted file mode 100644
index 3068bba..0000000
--- a/branch-1.2/ambari-web/test/main/item_test.js
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-var App = require('app');
-require('views/common/modal_popup');
-require('controllers/main/service/item');
-
-describe('App.MainServiceItemController', function () {
-
-    describe('#showRebalancer', function () {
-        it('should return true if serviceName is hdfs', function () {
-            var mainServiceItemController = App.MainServiceItemController.create({
-            });
-            mainServiceItemController.content.set('serviceName', 'hdfs');
-            expect(mainServiceItemController.get('showRebalancer')).to.equal(true);
-        })
-    })
-})
-*/
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/test/test-helpers.coffee b/branch-1.2/ambari-web/test/test-helpers.coffee
deleted file mode 100644
index 2f136b8..0000000
--- a/branch-1.2/ambari-web/test/test-helpers.coffee
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This file will be automatically required when using `brunch test` command.
-chai = require 'chai'
-sinonChai = require 'sinon-chai'
-chai.use sinonChai
-
-module.exports =
-  expect: chai.expect
-  sinon: require 'sinon'
diff --git a/branch-1.2/ambari-web/test/utils/form_field_test.js b/branch-1.2/ambari-web/test/utils/form_field_test.js
deleted file mode 100644
index 5da13b8..0000000
--- a/branch-1.2/ambari-web/test/utils/form_field_test.js
+++ /dev/null
@@ -1,164 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-require('models/form');
-
-
-/*
- * formField.isValid property doesn't update correctly, so I have to work with errorMessage property
- */
-describe('App.FormField', function () {
-
-  describe('#validate()', function () {
-    /*DIGITS TYPE*/
-    it('123456789 is correct digits', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'digits');
-      formField.set('value', 123456789);
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(true);
-    })
-    it('"a33bc" is incorrect digits', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'digits');
-      formField.set('value', 'a33bc');
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(false);
-    })
-    /*DIGITS TYPE END*/
-    /*NUMBER TYPE*/
-    it('+1234 is correct number', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'number');
-      formField.set('value', '+1234');
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(true);
-    })
-    it('-1234 is correct number', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'number');
-      formField.set('value', '-1234');
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(true);
-    })
-    it('-1.23.6 is incorrect number', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'number');
-      formField.set('value', '-1.23.6');
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(false);
-    })
-    it('+1.6 is correct number', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'number');
-      formField.set('value', +1.6);
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(true);
-    })
-    it('-1.6 is correct number', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'number');
-      formField.set('value', -1.6);
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(true);
-    })
-    it('1.6 is correct number', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'number');
-      formField.set('value', 1.6);
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(true);
-    })
-    it('-.356 is correct number', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'number');
-      formField.set('value', '-.356');
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(true);
-    })
-    it('+.356 is correct number', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'number');
-      formField.set('value', '+.356');
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(true);
-    })
-    it('-1. is incorrect number', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'number');
-      formField.set('value', '-1.');
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(false);
-    })
-    it('+1. is incorrect number', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'number');
-      formField.set('value', '+1.');
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(false);
-    })
-    it('1. is incorrect number', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'number');
-      formField.set('value', '1.');
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(false);
-    })
-    it('-1,23,6 is incorrect number', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'number');
-      formField.set('value', '-1,23,6');
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(false);
-    })
-    it('-1234567890 is correct number', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'number');
-      formField.set('value', '-1234567890');
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(true);
-    })
-    it('+1234567890 is correct number', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'number');
-      formField.set('value', '+1234567890');
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(true);
-    })
-    it('123eed is incorrect number', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'number');
-      formField.set('value', '123eed');
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(false);
-    })
-    /*NUMBER TYPE END*/
-    /*REQUIRE*/
-    it('Required field shouldn\'t be empty', function () {
-      var formField = App.FormField.create();
-      formField.set('displayType', 'string');
-      formField.set('value', '');
-      formField.set('isRequired', true);
-      formField.validate();
-      expect(formField.get('errorMessage') === '').to.equal(false);
-    })
-    /*REQUIRE END*/
-
-  })
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/test/utils/validator_test.js b/branch-1.2/ambari-web/test/utils/validator_test.js
deleted file mode 100644
index d5d6a3f..0000000
--- a/branch-1.2/ambari-web/test/utils/validator_test.js
+++ /dev/null
@@ -1,202 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var validator = require('utils/validator');
-
-describe('validator', function () {
-
-  describe('#isValidEmail(value)', function () {
-    it('should return false if value is null', function () {
-      expect(validator.isValidEmail(null)).to.equal(false);
-    })
-    it('should return false if value is ""', function () {
-      expect(validator.isValidEmail('')).to.equal(false);
-    })
-    it('should return false if value is "a.com"', function () {
-      expect(validator.isValidEmail('a.com')).to.equal(false);
-    })
-    it('should return false if value is "@a.com"', function () {
-      expect(validator.isValidEmail('@a.com')).to.equal(false);
-    })
-    it('should return false if value is "a@.com"', function () {
-      expect(validator.isValidEmail('a@.com')).to.equal(false);
-    })
-    it('should return true if value is "a@a.com"', function () {
-      expect(validator.isValidEmail('a@a.com')).to.equal(true);
-    })
-    it('should return true if value is "user@a.b.com"', function () {
-      expect(validator.isValidEmail('user@a.b.com')).to.equal(true);
-    })
-  })
-
-  describe('#isValidInt(value)', function () {
-    it('should return false if value is null', function () {
-      expect(validator.isValidInt(null)).to.equal(false);
-    })
-    it('should return false if value is ""', function () {
-      expect(validator.isValidInt('')).to.equal(false);
-    })
-    it('should return false if value is "abc"', function () {
-      expect(validator.isValidInt('abc')).to.equal(false);
-    })
-    it('should return false if value is "0xff"', function () {
-      expect(validator.isValidInt('0xff')).to.equal(false);
-    })
-    it('should return false if value is " 1""', function () {
-      expect(validator.isValidInt(' 1')).to.equal(false);
-    })
-    it('should return false if value is "1 "', function () {
-      expect(validator.isValidInt('1 ')).to.equal(false);
-    })
-    it('should return true if value is "10"', function () {
-      expect(validator.isValidInt('10')).to.equal(true);
-    })
-    it('should return true if value is "-123"', function () {
-      expect(validator.isValidInt('-123')).to.equal(true);
-    })
-    it('should return true if value is "0"', function () {
-      expect(validator.isValidInt('0')).to.equal(true);
-    })
-    it('should return true if value is 10', function () {
-      expect(validator.isValidInt(10)).to.equal(true);
-    })
-    it('should return true if value is -123', function () {
-      expect(validator.isValidInt(10)).to.equal(true);
-    })
-    it('should return true if value is 0', function () {
-      expect(validator.isValidInt(10)).to.equal(true);
-    })
-  })
-
-  describe('#isValidFloat(value)', function () {
-    it('should return false if value is null', function () {
-      expect(validator.isValidFloat(null)).to.equal(false);
-    })
-    it('should return false if value is ""', function () {
-      expect(validator.isValidFloat('')).to.equal(false);
-    })
-    it('should return false if value is "abc"', function () {
-      expect(validator.isValidFloat('abc')).to.equal(false);
-    })
-    it('should return false if value is "0xff"', function () {
-      expect(validator.isValidFloat('0xff')).to.equal(false);
-    })
-    it('should return false if value is " 1""', function () {
-      expect(validator.isValidFloat(' 1')).to.equal(false);
-    })
-    it('should return false if value is "1 "', function () {
-      expect(validator.isValidFloat('1 ')).to.equal(false);
-    })
-    it('should return true if value is "10"', function () {
-      expect(validator.isValidFloat('10')).to.equal(true);
-    })
-    it('should return true if value is "-123"', function () {
-      expect(validator.isValidFloat('-123')).to.equal(true);
-    })
-    it('should return true if value is "0"', function () {
-      expect(validator.isValidFloat('0')).to.equal(true);
-    })
-    it('should return true if value is 10', function () {
-      expect(validator.isValidFloat(10)).to.equal(true);
-    })
-    it('should return true if value is -123', function () {
-      expect(validator.isValidFloat(10)).to.equal(true);
-    })
-    it('should return true if value is 0', function () {
-      expect(validator.isValidFloat(10)).to.equal(true);
-    })
-    it('should return true if value is "0.0"', function () {
-      expect(validator.isValidFloat("0.0")).to.equal(true);
-    })
-    it('should return true if value is "10.123"', function () {
-      expect(validator.isValidFloat("10.123")).to.equal(true);
-    })
-    it('should return true if value is "-10.123"', function () {
-      expect(validator.isValidFloat("-10.123")).to.equal(true);
-    })
-    it('should return true if value is 10.123', function () {
-      expect(validator.isValidFloat(10.123)).to.equal(true);
-    })
-    it('should return true if value is -10.123', function () {
-      expect(validator.isValidFloat(-10.123)).to.equal(true);
-    })
-
-  })
-  /*describe('#isIpAddress(value)', function () {
-    it('"127.0.0.1" - valid IP', function () {
-      expect(validator.isIpAddress('127.0.0.1')).to.equal(true);
-    })
-    it('"227.3.67.196" - valid IP', function () {
-      expect(validator.isIpAddress('227.3.67.196')).to.equal(true);
-    })
-    it('"327.0.0.0" - invalid IP', function () {
-      expect(validator.isIpAddress('327.0.0.0')).to.equal(false);
-    })
-    it('"127.0.0." - invalid IP', function () {
-      expect(validator.isIpAddress('127.0.0.')).to.equal(false);
-    })
-    it('"127.0." - invalid IP', function () {
-      expect(validator.isIpAddress('127.0.')).to.equal(false);
-    })
-    it('"127" - invalid IP', function () {
-      expect(validator.isIpAddress('127')).to.equal(false);
-    })
-    it('"127.333.0.1" - invalid IP', function () {
-      expect(validator.isIpAddress('127.333.0.1')).to.equal(false);
-    })
-    it('"127.0.333.1" - invalid IP', function () {
-      expect(validator.isIpAddress('127.0.333.1')).to.equal(false);
-    })
-    it('"127.0.1.333" - invalid IP', function () {
-      expect(validator.isIpAddress('127.0.1.333')).to.equal(false);
-    })
-    it('"127.0.0.0:45555" - valid IP', function () {
-      expect(validator.isIpAddress('127.0.0.0:45555')).to.equal(true);
-    })
-    it('"327.0.0.0:45555" - invalid IP', function () {
-      expect(validator.isIpAddress('327.0.0.0:45555')).to.equal(false);
-    })
-    it('"0.0.0.0" - invalid IP', function () {
-      expect(validator.isIpAddress('0.0.0.0')).to.equal(false);
-    })
-    it('"0.0.0.0:12" - invalid IP', function () {
-      expect(validator.isIpAddress('0.0.0.0:12')).to.equal(false);
-    })
-    it('"1.0.0.0:0" - invalid IP', function () {
-      expect(validator.isIpAddress('1.0.0.0:0')).to.equal(false);
-    })
-  })*/
-  describe('#isDomainName(value)', function () {
-    it('"google.com" - valid Domain Name', function () {
-      expect(validator.isDomainName('google.com')).to.equal(true);
-    })
-    it('"google" - invalid Domain Name', function () {
-      expect(validator.isDomainName('google')).to.equal(false);
-    })
-    it('"123.123" - invalid Domain Name', function () {
-      expect(validator.isDomainName('123.123')).to.equal(false);
-    })
-    it('"4goog.le" - valid Domain Name', function () {
-      expect(validator.isDomainName('4goog.le')).to.equal(true);
-    })
-    it('"55454" - invalid Domain Name', function () {
-      expect(validator.isDomainName('55454')).to.equal(false);
-    })
-  })
-
-})
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/vendor/scripts/bootstrap-combobox.js b/branch-1.2/ambari-web/vendor/scripts/bootstrap-combobox.js
deleted file mode 100644
index 09c2fba..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/bootstrap-combobox.js
+++ /dev/null
@@ -1,250 +0,0 @@
-/* =============================================================
- * bootstrap-combobox.js v1.0.0
- * =============================================================
- * Copyright 2012 Daniel Farrell
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============================================================ */
-
-!function ($) {
-
-  "use strict"
-
-  var Combobox = function (element, options) {
-    this.options = $.extend({}, $.fn.combobox.defaults, options)
-    this.$container = this.setup(element)
-    this.$element = this.$container.find('input')
-    this.$button = this.$container.find('.dropdown-toggle')
-    this.$target = this.$container.find('select')
-    this.matcher = this.options.matcher || this.matcher
-    this.sorter = this.options.sorter || this.sorter
-    this.highlighter = this.options.highlighter || this.highlighter
-    this.$menu = $(this.options.menu).appendTo('body')
-    this.placeholder = this.options.placeholder || this.$target.attr('data-placeholder')
-    this.$element.attr('placeholder', this.placeholder)
-    this.shown = false
-    this.selected = false
-    this.refresh()
-    this.listen()
-  }
-
-  /* NOTE: COMBOBOX EXTENDS BOOTSTRAP-TYPEAHEAD.js
-   ========================================== */
-
-  Combobox.prototype = $.extend({}, $.fn.typeahead.Constructor.prototype, {
-
-    constructor:Combobox, setup:function (element) {
-      var select = $(element)
-        , combobox = $(this.options.template)
-      select.before(combobox)
-      select.detach()
-      combobox.append(select)
-      return combobox
-    },
-    parse:function () {
-      var map = {}
-        , source = []
-        , selected = false
-      this.$target.find('option').each(function () {
-        var option = $(this)
-        map[option.text()] = option.val()
-        source.push(option.text())
-        if (option.attr('selected')) selected = option.html()
-      })
-      this.map = map
-      if (selected) {
-        this.$element.val(selected)
-        this.$container.addClass('combobox-selected')
-        this.selected = true
-      }
-      return source
-    },
-    toggle:function () {
-      if (this.$container.hasClass('combobox-selected')) {
-        this.clearTarget()
-        this.$element.val('').focus()
-      } else {
-        if (this.shown) {
-          this.hide()
-        } else {
-          this.lookup()
-        }
-      }
-    },
-    clearTarget:function () {
-      this.$target.val('')
-      this.$container.removeClass('combobox-selected')
-      this.selected = false
-      this.$target.trigger('change')
-    },
-    refresh:function () {
-      this.source = this.parse()
-      this.options.items = this.source.length
-    }
-
-    // modified typeahead function adding container and target handling
-    , select:function () {
-      var val = this.$menu.find('.active').attr('data-value')
-      this.$element.val(val)
-      this.$container.addClass('combobox-selected')
-      this.$target.val(this.map[val])
-      this.$target.trigger('change')
-      this.selected = true
-      return this.hide()
-    }
-
-    // modified typeahead function removing the blank handling
-    , lookup:function (event) {
-      var that = this
-        , items
-        , q
-
-      this.query = this.$element.val()
-
-      items = $.grep(this.source, function (item) {
-        if (that.matcher(item)) return item
-      })
-
-      items = this.sorter(items)
-
-      if (!items.length) {
-        return this.shown ? this.hide() : this
-      }
-
-      return this.render(items.slice(0, this.options.items)).show()
-    }
-
-    // modified typeahead function adding button handling
-    , listen:function () {
-      this.$element
-        .on('blur', $.proxy(this.blur, this))
-        .on('keypress', $.proxy(this.keypress, this))
-        .on('keyup', $.proxy(this.keyup, this))
-
-      if ($.browser.webkit || $.browser.msie) {
-        this.$element.on('keydown', $.proxy(this.keypress, this))
-      }
-
-//      hide menu hack
-      this.$button.on('mouseenter', $.proxy(this.addClassOnMouseEnter, this.$button))
-        .on('mouseleave', $.proxy(this.addClassOnMouseLeave, this.$button));
-
-      $(window).on('click', $.proxy(this.hideList, this));
-//      hide menu hack end
-
-
-      this.$menu
-        .on('click', $.proxy(this.click, this))
-        .on('mouseenter', 'li', $.proxy(this.mouseenter, this))
-
-      this.$button
-        .on('click', $.proxy(this.toggle, this))
-    }
-
-    // modified typeahead function to clear on type and prevent on moving around
-    , keyup:function (e) {
-      switch (e.keyCode) {
-        case 40: // down arrow
-        case 39: // right arrow
-        case 38: // up arrow
-        case 37: // left arrow
-        case 36: // home
-        case 35: // end
-        case 16: // shift
-          break
-
-        case 9: // tab
-        case 13: // enter
-          if (!this.shown) return
-          this.select()
-          break
-
-        case 27: // escape
-          if (!this.shown) return
-          this.hide()
-          break
-
-        default:
-          this.clearTarget()
-          this.lookup()
-      }
-
-      e.stopPropagation()
-      e.preventDefault()
-    },
-
-    addClassOnMouseEnter:function (e) {
-      console.warn("Enter");
-      this.addClass('hover');
-    },
-
-    addClassOnMouseLeave:function (e) {
-      console.warn("Leave");
-      this.removeClass('hover');
-    },
-
-    // modified typeahead function to only hide menu if it is visible
-    blur:function (e) {
-      var that = this
-      e.stopPropagation()
-      e.preventDefault()
-      var val = this.$element.val()
-      if (!this.selected && val != "") {
-        this.$element.val("")
-        this.$target.val("").trigger('change')
-      }
-      if (this.shown) {
-        setTimeout(function () {
-          that.hide()
-        }, 150)
-      }
-    },
-
-    /**
-     * hide list
-     * @param e
-     */
-    hideList:function (e) {
-      if (!this.$button.is(".hover")) {
-        var that = this;
-        if (this.shown) {
-          setTimeout(function () {
-            that.hide()
-          }, 150)
-        }
-      }
-    }
-  })
-
-  /* COMBOBOX PLUGIN DEFINITION
-   * =========================== */
-
-  $.fn.combobox = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('combobox')
-        , options = typeof option == 'object' && option
-      if (!data) $this.data('combobox', (data = new Combobox(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  $.fn.combobox.defaults = {
-    template:'<div class="combobox-container"><input type="text" autocomplete="off" /><span class="add-on btn dropdown-toggle" data-dropdown="dropdown"><span class="caret"/><span class="combobox-clear"><i class="icon-remove"/></span></span></div>',
-    menu:'<ul class="typeahead typeahead-long dropdown-menu"></ul>',
-    item:'<li><a href="#"></a></li>', placeholder:null
-  }
-
-  $.fn.combobox.Constructor = Combobox
-
-}(window.jQuery);
diff --git a/branch-1.2/ambari-web/vendor/scripts/bootstrap.js b/branch-1.2/ambari-web/vendor/scripts/bootstrap.js
deleted file mode 100644
index f73fcb8..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/bootstrap.js
+++ /dev/null
@@ -1,2027 +0,0 @@
-/* ===================================================
- * bootstrap-transition.js v2.1.1
- * http://twitter.github.com/bootstrap/javascript.html#transitions
- * ===================================================
- * Copyright 2012 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ========================================================== */
-
-
-!function ($) {
-
-  $(function () {
-
-    "use strict"; // jshint ;_;
-
-
-    /* CSS TRANSITION SUPPORT (http://www.modernizr.com/)
-     * ======================================================= */
-
-    $.support.transition = (function () {
-
-      var transitionEnd = (function () {
-
-        var el = document.createElement('bootstrap')
-          , transEndEventNames = {
-               'WebkitTransition' : 'webkitTransitionEnd'
-            ,  'MozTransition'    : 'transitionend'
-            ,  'OTransition'      : 'oTransitionEnd otransitionend'
-            ,  'transition'       : 'transitionend'
-            }
-          , name
-
-        for (name in transEndEventNames){
-          if (el.style[name] !== undefined) {
-            return transEndEventNames[name]
-          }
-        }
-
-      }())
-
-      return transitionEnd && {
-        end: transitionEnd
-      }
-
-    })()
-
-  })
-
-}(window.jQuery);/* ==========================================================
- * bootstrap-alert.js v2.1.1
- * http://twitter.github.com/bootstrap/javascript.html#alerts
- * ==========================================================
- * Copyright 2012 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ========================================================== */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* ALERT CLASS DEFINITION
-  * ====================== */
-
-  var dismiss = '[data-dismiss="alert"]'
-    , Alert = function (el) {
-        $(el).on('click', dismiss, this.close)
-      }
-
-  Alert.prototype.close = function (e) {
-    var $this = $(this)
-      , selector = $this.attr('data-target')
-      , $parent
-
-    if (!selector) {
-      selector = $this.attr('href')
-      selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') //strip for ie7
-    }
-
-    $parent = $(selector)
-
-    e && e.preventDefault()
-
-    $parent.length || ($parent = $this.hasClass('alert') ? $this : $this.parent())
-
-    $parent.trigger(e = $.Event('close'))
-
-    if (e.isDefaultPrevented()) return
-
-    $parent.removeClass('in')
-
-    function removeElement() {
-      $parent
-        .trigger('closed')
-        .remove()
-    }
-
-    $.support.transition && $parent.hasClass('fade') ?
-      $parent.on($.support.transition.end, removeElement) :
-      removeElement()
-  }
-
-
- /* ALERT PLUGIN DEFINITION
-  * ======================= */
-
-  $.fn.alert = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('alert')
-      if (!data) $this.data('alert', (data = new Alert(this)))
-      if (typeof option == 'string') data[option].call($this)
-    })
-  }
-
-  $.fn.alert.Constructor = Alert
-
-
- /* ALERT DATA-API
-  * ============== */
-
-  $(function () {
-    $('body').on('click.alert.data-api', dismiss, Alert.prototype.close)
-  })
-
-}(window.jQuery);/* ============================================================
- * bootstrap-button.js v2.1.1
- * http://twitter.github.com/bootstrap/javascript.html#buttons
- * ============================================================
- * Copyright 2012 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============================================================ */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* BUTTON PUBLIC CLASS DEFINITION
-  * ============================== */
-
-  var Button = function (element, options) {
-    this.$element = $(element)
-    this.options = $.extend({}, $.fn.button.defaults, options)
-  }
-
-  Button.prototype.setState = function (state) {
-    var d = 'disabled'
-      , $el = this.$element
-      , data = $el.data()
-      , val = $el.is('input') ? 'val' : 'html'
-
-    state = state + 'Text'
-    data.resetText || $el.data('resetText', $el[val]())
-
-    $el[val](data[state] || this.options[state])
-
-    // push to event loop to allow forms to submit
-    setTimeout(function () {
-      state == 'loadingText' ?
-        $el.addClass(d).attr(d, d) :
-        $el.removeClass(d).removeAttr(d)
-    }, 0)
-  }
-
-  Button.prototype.toggle = function () {
-    var $parent = this.$element.closest('[data-toggle="buttons-radio"]')
-
-    $parent && $parent
-      .find('.active')
-      .removeClass('active')
-
-    this.$element.toggleClass('active')
-  }
-
-
- /* BUTTON PLUGIN DEFINITION
-  * ======================== */
-
-  $.fn.button = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('button')
-        , options = typeof option == 'object' && option
-      if (!data) $this.data('button', (data = new Button(this, options)))
-      if (option == 'toggle') data.toggle()
-      else if (option) data.setState(option)
-    })
-  }
-
-  $.fn.button.defaults = {
-    loadingText: 'loading...'
-  }
-
-  $.fn.button.Constructor = Button
-
-
- /* BUTTON DATA-API
-  * =============== */
-
-  $(function () {
-    $('body').on('click.button.data-api', '[data-toggle^=button]', function ( e ) {
-      var $btn = $(e.target)
-      if (!$btn.hasClass('btn')) $btn = $btn.closest('.btn')
-      $btn.button('toggle')
-    })
-  })
-
-}(window.jQuery);/* ==========================================================
- * bootstrap-carousel.js v2.1.1
- * http://twitter.github.com/bootstrap/javascript.html#carousel
- * ==========================================================
- * Copyright 2012 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ========================================================== */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* CAROUSEL CLASS DEFINITION
-  * ========================= */
-
-  var Carousel = function (element, options) {
-    this.$element = $(element)
-    this.options = options
-    this.options.slide && this.slide(this.options.slide)
-    this.options.pause == 'hover' && this.$element
-      .on('mouseenter', $.proxy(this.pause, this))
-      .on('mouseleave', $.proxy(this.cycle, this))
-  }
-
-  Carousel.prototype = {
-
-    cycle: function (e) {
-      if (!e) this.paused = false
-      this.options.interval
-        && !this.paused
-        && (this.interval = setInterval($.proxy(this.next, this), this.options.interval))
-      return this
-    }
-
-  , to: function (pos) {
-      var $active = this.$element.find('.item.active')
-        , children = $active.parent().children()
-        , activePos = children.index($active)
-        , that = this
-
-      if (pos > (children.length - 1) || pos < 0) return
-
-      if (this.sliding) {
-        return this.$element.one('slid', function () {
-          that.to(pos)
-        })
-      }
-
-      if (activePos == pos) {
-        return this.pause().cycle()
-      }
-
-      return this.slide(pos > activePos ? 'next' : 'prev', $(children[pos]))
-    }
-
-  , pause: function (e) {
-      if (!e) this.paused = true
-      if (this.$element.find('.next, .prev').length && $.support.transition.end) {
-        this.$element.trigger($.support.transition.end)
-        this.cycle()
-      }
-      clearInterval(this.interval)
-      this.interval = null
-      return this
-    }
-
-  , next: function () {
-      if (this.sliding) return
-      return this.slide('next')
-    }
-
-  , prev: function () {
-      if (this.sliding) return
-      return this.slide('prev')
-    }
-
-  , slide: function (type, next) {
-      var $active = this.$element.find('.item.active')
-        , $next = next || $active[type]()
-        , isCycling = this.interval
-        , direction = type == 'next' ? 'left' : 'right'
-        , fallback  = type == 'next' ? 'first' : 'last'
-        , that = this
-        , e = $.Event('slide', {
-            relatedTarget: $next[0]
-          })
-
-      this.sliding = true
-
-      isCycling && this.pause()
-
-      $next = $next.length ? $next : this.$element.find('.item')[fallback]()
-
-      if ($next.hasClass('active')) return
-
-      if ($.support.transition && this.$element.hasClass('slide')) {
-        this.$element.trigger(e)
-        if (e.isDefaultPrevented()) return
-        $next.addClass(type)
-        $next[0].offsetWidth // force reflow
-        $active.addClass(direction)
-        $next.addClass(direction)
-        this.$element.one($.support.transition.end, function () {
-          $next.removeClass([type, direction].join(' ')).addClass('active')
-          $active.removeClass(['active', direction].join(' '))
-          that.sliding = false
-          setTimeout(function () { that.$element.trigger('slid') }, 0)
-        })
-      } else {
-        this.$element.trigger(e)
-        if (e.isDefaultPrevented()) return
-        $active.removeClass('active')
-        $next.addClass('active')
-        this.sliding = false
-        this.$element.trigger('slid')
-      }
-
-      isCycling && this.cycle()
-
-      return this
-    }
-
-  }
-
-
- /* CAROUSEL PLUGIN DEFINITION
-  * ========================== */
-
-  $.fn.carousel = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('carousel')
-        , options = $.extend({}, $.fn.carousel.defaults, typeof option == 'object' && option)
-        , action = typeof option == 'string' ? option : options.slide
-      if (!data) $this.data('carousel', (data = new Carousel(this, options)))
-      if (typeof option == 'number') data.to(option)
-      else if (action) data[action]()
-      else if (options.interval) data.cycle()
-    })
-  }
-
-  $.fn.carousel.defaults = {
-    interval: 5000
-  , pause: 'hover'
-  }
-
-  $.fn.carousel.Constructor = Carousel
-
-
- /* CAROUSEL DATA-API
-  * ================= */
-
-  $(function () {
-    $('body').on('click.carousel.data-api', '[data-slide]', function ( e ) {
-      var $this = $(this), href
-        , $target = $($this.attr('data-target') || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) //strip for ie7
-        , options = !$target.data('modal') && $.extend({}, $target.data(), $this.data())
-      $target.carousel(options)
-      e.preventDefault()
-    })
-  })
-
-}(window.jQuery);/* =============================================================
- * bootstrap-collapse.js v2.1.1
- * http://twitter.github.com/bootstrap/javascript.html#collapse
- * =============================================================
- * Copyright 2012 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============================================================ */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* COLLAPSE PUBLIC CLASS DEFINITION
-  * ================================ */
-
-  var Collapse = function (element, options) {
-    this.$element = $(element)
-    this.options = $.extend({}, $.fn.collapse.defaults, options)
-
-    if (this.options.parent) {
-      this.$parent = $(this.options.parent)
-    }
-
-    this.options.toggle && this.toggle()
-  }
-
-  Collapse.prototype = {
-
-    constructor: Collapse
-
-  , dimension: function () {
-      var hasWidth = this.$element.hasClass('width')
-      return hasWidth ? 'width' : 'height'
-    }
-
-  , show: function () {
-      var dimension
-        , scroll
-        , actives
-        , hasData
-
-      if (this.transitioning) return
-
-      dimension = this.dimension()
-      scroll = $.camelCase(['scroll', dimension].join('-'))
-      actives = this.$parent && this.$parent.find('> .accordion-group > .in')
-
-      if (actives && actives.length) {
-        hasData = actives.data('collapse')
-        if (hasData && hasData.transitioning) return
-        actives.collapse('hide')
-        hasData || actives.data('collapse', null)
-      }
-
-      this.$element[dimension](0)
-      this.transition('addClass', $.Event('show'), 'shown')
-      $.support.transition && this.$element[dimension](this.$element[0][scroll])
-    }
-
-  , hide: function () {
-      var dimension
-      if (this.transitioning) return
-      dimension = this.dimension()
-      this.reset(this.$element[dimension]())
-      this.transition('removeClass', $.Event('hide'), 'hidden')
-      this.$element[dimension](0)
-    }
-
-  , reset: function (size) {
-      var dimension = this.dimension()
-
-      this.$element
-        .removeClass('collapse')
-        [dimension](size || 'auto')
-        [0].offsetWidth
-
-      this.$element[size !== null ? 'addClass' : 'removeClass']('collapse')
-
-      return this
-    }
-
-  , transition: function (method, startEvent, completeEvent) {
-      var that = this
-        , complete = function () {
-            if (startEvent.type == 'show') that.reset()
-            that.transitioning = 0
-            that.$element.trigger(completeEvent)
-          }
-
-      this.$element.trigger(startEvent)
-
-      if (startEvent.isDefaultPrevented()) return
-
-      this.transitioning = 1
-
-      this.$element[method]('in')
-
-      $.support.transition && this.$element.hasClass('collapse') ?
-        this.$element.one($.support.transition.end, complete) :
-        complete()
-    }
-
-  , toggle: function () {
-      this[this.$element.hasClass('in') ? 'hide' : 'show']()
-    }
-
-  }
-
-
- /* COLLAPSIBLE PLUGIN DEFINITION
-  * ============================== */
-
-  $.fn.collapse = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('collapse')
-        , options = typeof option == 'object' && option
-      if (!data) $this.data('collapse', (data = new Collapse(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  $.fn.collapse.defaults = {
-    toggle: true
-  }
-
-  $.fn.collapse.Constructor = Collapse
-
-
- /* COLLAPSIBLE DATA-API
-  * ==================== */
-
-  $(function () {
-    $('body').on('click.collapse.data-api', '[data-toggle=collapse]', function (e) {
-      var $this = $(this), href
-        , target = $this.attr('data-target')
-          || e.preventDefault()
-          || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '') //strip for ie7
-        , option = $(target).data('collapse') ? 'toggle' : $this.data()
-      $this[$(target).hasClass('in') ? 'addClass' : 'removeClass']('collapsed')
-      $(target).collapse(option)
-    })
-  })
-
-}(window.jQuery);/* ============================================================
- * bootstrap-dropdown.js v2.1.1
- * http://twitter.github.com/bootstrap/javascript.html#dropdowns
- * ============================================================
- * Copyright 2012 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============================================================ */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* DROPDOWN CLASS DEFINITION
-  * ========================= */
-
-  var toggle = '[data-toggle=dropdown]'
-    , Dropdown = function (element) {
-        var $el = $(element).on('click.dropdown.data-api', this.toggle)
-        $('html').on('click.dropdown.data-api', function () {
-          $el.parent().removeClass('open')
-        })
-      }
-
-  Dropdown.prototype = {
-
-    constructor: Dropdown
-
-  , toggle: function (e) {
-      var $this = $(this)
-        , $parent
-        , isActive
-
-      if ($this.is('.disabled, :disabled')) return
-
-      $parent = getParent($this)
-
-      isActive = $parent.hasClass('open')
-
-      clearMenus()
-
-      if (!isActive) {
-        $parent.toggleClass('open')
-        $this.focus()
-      }
-
-      return false
-    }
-
-  , keydown: function (e) {
-      var $this
-        , $items
-        , $active
-        , $parent
-        , isActive
-        , index
-
-      if (!/(38|40|27)/.test(e.keyCode)) return
-
-      $this = $(this)
-
-      e.preventDefault()
-      e.stopPropagation()
-
-      if ($this.is('.disabled, :disabled')) return
-
-      $parent = getParent($this)
-
-      isActive = $parent.hasClass('open')
-
-      if (!isActive || (isActive && e.keyCode == 27)) return $this.click()
-
-      $items = $('[role=menu] li:not(.divider) a', $parent)
-
-      if (!$items.length) return
-
-      index = $items.index($items.filter(':focus'))
-
-      if (e.keyCode == 38 && index > 0) index--                                        // up
-      if (e.keyCode == 40 && index < $items.length - 1) index++                        // down
-      if (!~index) index = 0
-
-      $items
-        .eq(index)
-        .focus()
-    }
-
-  }
-
-  function clearMenus() {
-    getParent($(toggle))
-      .removeClass('open')
-  }
-
-  function getParent($this) {
-    var selector = $this.attr('data-target')
-      , $parent
-
-    if (!selector) {
-      selector = $this.attr('href')
-      selector = selector && /#/.test(selector) && selector.replace(/.*(?=#[^\s]*$)/, '') //strip for ie7
-    }
-
-    $parent = $(selector)
-    $parent.length || ($parent = $this.parent())
-
-    return $parent
-  }
-
-
-  /* DROPDOWN PLUGIN DEFINITION
-   * ========================== */
-
-  $.fn.dropdown = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('dropdown')
-      if (!data) $this.data('dropdown', (data = new Dropdown(this)))
-      if (typeof option == 'string') data[option].call($this)
-    })
-  }
-
-  $.fn.dropdown.Constructor = Dropdown
-
-
-  /* APPLY TO STANDARD DROPDOWN ELEMENTS
-   * =================================== */
-
-  $(function () {
-    $('html')
-      .on('click.dropdown.data-api touchstart.dropdown.data-api', clearMenus)
-    $('body')
-      .on('click.dropdown touchstart.dropdown.data-api', '.dropdown form', function (e) { e.stopPropagation() })
-      .on('click.dropdown.data-api touchstart.dropdown.data-api'  , toggle, Dropdown.prototype.toggle)
-      .on('keydown.dropdown.data-api touchstart.dropdown.data-api', toggle + ', [role=menu]' , Dropdown.prototype.keydown)
-  })
-
-}(window.jQuery);/* =========================================================
- * bootstrap-modal.js v2.1.1
- * http://twitter.github.com/bootstrap/javascript.html#modals
- * =========================================================
- * Copyright 2012 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ========================================================= */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* MODAL CLASS DEFINITION
-  * ====================== */
-
-  var Modal = function (element, options) {
-    this.options = options
-    this.$element = $(element)
-      .delegate('[data-dismiss="modal"]', 'click.dismiss.modal', $.proxy(this.hide, this))
-    this.options.remote && this.$element.find('.modal-body').load(this.options.remote)
-  }
-
-  Modal.prototype = {
-
-      constructor: Modal
-
-    , toggle: function () {
-        return this[!this.isShown ? 'show' : 'hide']()
-      }
-
-    , show: function () {
-        var that = this
-          , e = $.Event('show')
-
-        this.$element.trigger(e)
-
-        if (this.isShown || e.isDefaultPrevented()) return
-
-        $('body').addClass('modal-open')
-
-        this.isShown = true
-
-        this.escape()
-
-        this.backdrop(function () {
-          var transition = $.support.transition && that.$element.hasClass('fade')
-
-          if (!that.$element.parent().length) {
-            that.$element.appendTo(document.body) //don't move modals dom position
-          }
-
-          that.$element
-            .show()
-
-          if (transition) {
-            that.$element[0].offsetWidth // force reflow
-          }
-
-          that.$element
-            .addClass('in')
-            .attr('aria-hidden', false)
-            .focus()
-
-          that.enforceFocus()
-
-          transition ?
-            that.$element.one($.support.transition.end, function () { that.$element.trigger('shown') }) :
-            that.$element.trigger('shown')
-
-        })
-      }
-
-    , hide: function (e) {
-        e && e.preventDefault()
-
-        var that = this
-
-        e = $.Event('hide')
-
-        this.$element.trigger(e)
-
-        if (!this.isShown || e.isDefaultPrevented()) return
-
-        this.isShown = false
-
-        $('body').removeClass('modal-open')
-
-        this.escape()
-
-        $(document).off('focusin.modal')
-
-        this.$element
-          .removeClass('in')
-          .attr('aria-hidden', true)
-
-        $.support.transition && this.$element.hasClass('fade') ?
-          this.hideWithTransition() :
-          this.hideModal()
-      }
-
-    , enforceFocus: function () {
-        var that = this
-        $(document).on('focusin.modal', function (e) {
-          if (that.$element[0] !== e.target && !that.$element.has(e.target).length) {
-            that.$element.focus()
-          }
-        })
-      }
-
-    , escape: function () {
-        var that = this
-        if (this.isShown && this.options.keyboard) {
-          this.$element.on('keyup.dismiss.modal', function ( e ) {
-            e.which == 27 && that.hide()
-          })
-        } else if (!this.isShown) {
-          this.$element.off('keyup.dismiss.modal')
-        }
-      }
-
-    , hideWithTransition: function () {
-        var that = this
-          , timeout = setTimeout(function () {
-              that.$element.off($.support.transition.end)
-              that.hideModal()
-            }, 500)
-
-        this.$element.one($.support.transition.end, function () {
-          clearTimeout(timeout)
-          that.hideModal()
-        })
-      }
-
-    , hideModal: function (that) {
-        this.$element
-          .hide()
-          .trigger('hidden')
-
-        this.backdrop()
-      }
-
-    , removeBackdrop: function () {
-        this.$backdrop.remove()
-        this.$backdrop = null
-      }
-
-    , backdrop: function (callback) {
-        var that = this
-          , animate = this.$element.hasClass('fade') ? 'fade' : ''
-
-        if (this.isShown && this.options.backdrop) {
-          var doAnimate = $.support.transition && animate
-
-          this.$backdrop = $('<div class="modal-backdrop ' + animate + '" />')
-            .appendTo(document.body)
-
-          if (this.options.backdrop != 'static') {
-            this.$backdrop.click($.proxy(this.hide, this))
-          }
-
-          if (doAnimate) this.$backdrop[0].offsetWidth // force reflow
-
-          this.$backdrop.addClass('in')
-
-          doAnimate ?
-            this.$backdrop.one($.support.transition.end, callback) :
-            callback()
-
-        } else if (!this.isShown && this.$backdrop) {
-          this.$backdrop.removeClass('in')
-
-          $.support.transition && this.$element.hasClass('fade')?
-            this.$backdrop.one($.support.transition.end, $.proxy(this.removeBackdrop, this)) :
-            this.removeBackdrop()
-
-        } else if (callback) {
-          callback()
-        }
-      }
-  }
-
-
- /* MODAL PLUGIN DEFINITION
-  * ======================= */
-
-  $.fn.modal = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('modal')
-        , options = $.extend({}, $.fn.modal.defaults, $this.data(), typeof option == 'object' && option)
-      if (!data) $this.data('modal', (data = new Modal(this, options)))
-      if (typeof option == 'string') data[option]()
-      else if (options.show) data.show()
-    })
-  }
-
-  $.fn.modal.defaults = {
-      backdrop: true
-    , keyboard: true
-    , show: true
-  }
-
-  $.fn.modal.Constructor = Modal
-
-
- /* MODAL DATA-API
-  * ============== */
-
-  $(function () {
-    $('body').on('click.modal.data-api', '[data-toggle="modal"]', function ( e ) {
-      var $this = $(this)
-        , href = $this.attr('href')
-        , $target = $($this.attr('data-target') || (href && href.replace(/.*(?=#[^\s]+$)/, ''))) //strip for ie7
-        , option = $target.data('modal') ? 'toggle' : $.extend({ remote: !/#/.test(href) && href }, $target.data(), $this.data())
-
-      e.preventDefault()
-
-      $target
-        .modal(option)
-        .one('hide', function () {
-          $this.focus()
-        })
-    })
-  })
-
-}(window.jQuery);/* ===========================================================
- * bootstrap-tooltip.js v2.1.1
- * http://twitter.github.com/bootstrap/javascript.html#tooltips
- * Inspired by the original jQuery.tipsy by Jason Frame
- * ===========================================================
- * Copyright 2012 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ========================================================== */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* TOOLTIP PUBLIC CLASS DEFINITION
-  * =============================== */
-
-  var Tooltip = function (element, options) {
-    this.init('tooltip', element, options)
-  }
-
-  Tooltip.prototype = {
-
-    constructor: Tooltip
-
-  , init: function (type, element, options) {
-      var eventIn
-        , eventOut
-
-      this.type = type
-      this.$element = $(element)
-      this.options = this.getOptions(options)
-      this.enabled = true
-
-      if (this.options.trigger == 'click') {
-        this.$element.on('click.' + this.type, this.options.selector, $.proxy(this.toggle, this))
-      } else if (this.options.trigger != 'manual') {
-        eventIn = this.options.trigger == 'hover' ? 'mouseenter' : 'focus'
-        eventOut = this.options.trigger == 'hover' ? 'mouseleave' : 'blur'
-        this.$element.on(eventIn + '.' + this.type, this.options.selector, $.proxy(this.enter, this))
-        this.$element.on(eventOut + '.' + this.type, this.options.selector, $.proxy(this.leave, this))
-      }
-
-      this.options.selector ?
-        (this._options = $.extend({}, this.options, { trigger: 'manual', selector: '' })) :
-        this.fixTitle()
-    }
-
-  , getOptions: function (options) {
-      options = $.extend({}, $.fn[this.type].defaults, options, this.$element.data())
-
-      if (options.delay && typeof options.delay == 'number') {
-        options.delay = {
-          show: options.delay
-        , hide: options.delay
-        }
-      }
-
-      return options
-    }
-
-  , enter: function (e) {
-      var self = $(e.currentTarget)[this.type](this._options).data(this.type)
-
-      if (!self.options.delay || !self.options.delay.show) return self.show()
-
-      clearTimeout(this.timeout)
-      self.hoverState = 'in'
-      this.timeout = setTimeout(function() {
-        if (self.hoverState == 'in') self.show()
-      }, self.options.delay.show)
-    }
-
-  , leave: function (e) {
-      var self = $(e.currentTarget)[this.type](this._options).data(this.type)
-
-      if (this.timeout) clearTimeout(this.timeout)
-      if (!self.options.delay || !self.options.delay.hide) return self.hide()
-
-      self.hoverState = 'out'
-      this.timeout = setTimeout(function() {
-        if (self.hoverState == 'out') self.hide()
-      }, self.options.delay.hide)
-    }
-
-  , show: function () {
-      var $tip
-        , inside
-        , pos
-        , actualWidth
-        , actualHeight
-        , placement
-        , tp
-
-      if (this.hasContent() && this.enabled) {
-        $tip = this.tip()
-        this.setContent()
-
-        if (this.options.animation) {
-          $tip.addClass('fade')
-        }
-
-        placement = typeof this.options.placement == 'function' ?
-          this.options.placement.call(this, $tip[0], this.$element[0]) :
-          this.options.placement
-
-        inside = /in/.test(placement)
-
-        $tip
-          .remove()
-          .css({ top: 0, left: 0, display: 'block' })
-          .appendTo(inside ? this.$element : document.body)
-
-        pos = this.getPosition(inside)
-
-        actualWidth = $tip[0].offsetWidth
-        actualHeight = $tip[0].offsetHeight
-
-        switch (inside ? placement.split(' ')[1] : placement) {
-          case 'bottom':
-            tp = {top: pos.top + pos.height, left: pos.left + pos.width / 2 - actualWidth / 2}
-            break
-          case 'top':
-            tp = {top: pos.top - actualHeight, left: pos.left + pos.width / 2 - actualWidth / 2}
-            break
-          case 'left':
-            tp = {top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left - actualWidth}
-            break
-          case 'right':
-            tp = {top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left + pos.width}
-            break
-        }
-
-        $tip
-          .css(tp)
-          .addClass(placement)
-          .addClass('in')
-      }
-    }
-
-  , setContent: function () {
-      var $tip = this.tip()
-        , title = this.getTitle()
-
-      $tip.find('.tooltip-inner')[this.options.html ? 'html' : 'text'](title)
-      $tip.removeClass('fade in top bottom left right')
-    }
-
-  , hide: function () {
-      var that = this
-        , $tip = this.tip()
-
-      $tip.removeClass('in')
-
-      function removeWithAnimation() {
-        var timeout = setTimeout(function () {
-          $tip.off($.support.transition.end).remove()
-        }, 500)
-
-        $tip.one($.support.transition.end, function () {
-          clearTimeout(timeout)
-          $tip.remove()
-        })
-      }
-
-      $.support.transition && this.$tip.hasClass('fade') ?
-        removeWithAnimation() :
-        $tip.remove()
-
-      return this
-    }
-
-  , fixTitle: function () {
-      var $e = this.$element
-      if ($e.attr('title') || typeof($e.attr('data-original-title')) != 'string') {
-        $e.attr('data-original-title', $e.attr('title') || '').removeAttr('title')
-      }
-    }
-
-  , hasContent: function () {
-      return this.getTitle()
-    }
-
-  , getPosition: function (inside) {
-      return $.extend({}, (inside ? {top: 0, left: 0} : this.$element.offset()), {
-        width: this.$element[0].offsetWidth
-      , height: this.$element[0].offsetHeight
-      })
-    }
-
-  , getTitle: function () {
-      var title
-        , $e = this.$element
-        , o = this.options
-
-      title = $e.attr('data-original-title')
-        || (typeof o.title == 'function' ? o.title.call($e[0]) :  o.title)
-
-      return title
-    }
-
-  , tip: function () {
-      return this.$tip = this.$tip || $(this.options.template)
-    }
-
-  , validate: function () {
-      if (!this.$element[0].parentNode) {
-        this.hide()
-        this.$element = null
-        this.options = null
-      }
-    }
-
-  , enable: function () {
-      this.enabled = true
-    }
-
-  , disable: function () {
-      this.enabled = false
-    }
-
-  , toggleEnabled: function () {
-      this.enabled = !this.enabled
-    }
-
-  , toggle: function () {
-      this[this.tip().hasClass('in') ? 'hide' : 'show']()
-    }
-
-  , destroy: function () {
-      this.hide().$element.off('.' + this.type).removeData(this.type)
-    }
-
-  }
-
-
- /* TOOLTIP PLUGIN DEFINITION
-  * ========================= */
-
-  $.fn.tooltip = function ( option ) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('tooltip')
-        , options = typeof option == 'object' && option
-      if (!data) $this.data('tooltip', (data = new Tooltip(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  $.fn.tooltip.Constructor = Tooltip
-
-  $.fn.tooltip.defaults = {
-    animation: true
-  , placement: 'top'
-  , selector: false
-  , template: '<div class="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>'
-  , trigger: 'hover'
-  , title: ''
-  , delay: 0
-  , html: true
-  }
-
-}(window.jQuery);
-/* ===========================================================
- * bootstrap-popover.js v2.1.1
- * http://twitter.github.com/bootstrap/javascript.html#popovers
- * ===========================================================
- * Copyright 2012 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * =========================================================== */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* POPOVER PUBLIC CLASS DEFINITION
-  * =============================== */
-
-  var Popover = function (element, options) {
-    this.init('popover', element, options)
-  }
-
-
-  /* NOTE: POPOVER EXTENDS BOOTSTRAP-TOOLTIP.js
-     ========================================== */
-
-  Popover.prototype = $.extend({}, $.fn.tooltip.Constructor.prototype, {
-
-    constructor: Popover
-
-  , setContent: function () {
-      var $tip = this.tip()
-        , title = this.getTitle()
-        , content = this.getContent()
-
-      $tip.find('.popover-title')[this.options.html ? 'html' : 'text'](title)
-      $tip.find('.popover-content > *')[this.options.html ? 'html' : 'text'](content)
-
-      $tip.removeClass('fade top bottom left right in')
-    }
-
-  , hasContent: function () {
-      return this.getTitle() || this.getContent()
-    }
-
-  , getContent: function () {
-      var content
-        , $e = this.$element
-        , o = this.options
-
-      content = $e.attr('data-content')
-        || (typeof o.content == 'function' ? o.content.call($e[0]) :  o.content)
-
-      return content
-    }
-
-  , tip: function () {
-      if (!this.$tip) {
-        this.$tip = $(this.options.template)
-      }
-      return this.$tip
-    }
-
-  , destroy: function () {
-      this.hide().$element.off('.' + this.type).removeData(this.type)
-    }
-
-  })
-
-
- /* POPOVER PLUGIN DEFINITION
-  * ======================= */
-
-  $.fn.popover = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('popover')
-        , options = typeof option == 'object' && option
-      if (!data) $this.data('popover', (data = new Popover(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  $.fn.popover.Constructor = Popover
-
-  $.fn.popover.defaults = $.extend({} , $.fn.tooltip.defaults, {
-    placement: 'right'
-  , trigger: 'click'
-  , content: ''
-  , template: '<div class="popover"><div class="arrow"></div><div class="popover-inner"><h3 class="popover-title"></h3><div class="popover-content"><p></p></div></div></div>'
-  })
-
-}(window.jQuery);/* =============================================================
- * bootstrap-scrollspy.js v2.1.1
- * http://twitter.github.com/bootstrap/javascript.html#scrollspy
- * =============================================================
- * Copyright 2012 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============================================================== */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* SCROLLSPY CLASS DEFINITION
-  * ========================== */
-
-  function ScrollSpy(element, options) {
-    var process = $.proxy(this.process, this)
-      , $element = $(element).is('body') ? $(window) : $(element)
-      , href
-    this.options = $.extend({}, $.fn.scrollspy.defaults, options)
-    this.$scrollElement = $element.on('scroll.scroll-spy.data-api', process)
-    this.selector = (this.options.target
-      || ((href = $(element).attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) //strip for ie7
-      || '') + ' .nav li > a'
-    this.$body = $('body')
-    this.refresh()
-    this.process()
-  }
-
-  ScrollSpy.prototype = {
-
-      constructor: ScrollSpy
-
-    , refresh: function () {
-        var self = this
-          , $targets
-
-        this.offsets = $([])
-        this.targets = $([])
-
-        $targets = this.$body
-          .find(this.selector)
-          .map(function () {
-            var $el = $(this)
-              , href = $el.data('target') || $el.attr('href')
-              , $href = /^#\w/.test(href) && $(href)
-            return ( $href
-              && $href.length
-              && [[ $href.position().top, href ]] ) || null
-          })
-          .sort(function (a, b) { return a[0] - b[0] })
-          .each(function () {
-            self.offsets.push(this[0])
-            self.targets.push(this[1])
-          })
-      }
-
-    , process: function () {
-        var scrollTop = this.$scrollElement.scrollTop() + this.options.offset
-          , scrollHeight = this.$scrollElement[0].scrollHeight || this.$body[0].scrollHeight
-          , maxScroll = scrollHeight - this.$scrollElement.height()
-          , offsets = this.offsets
-          , targets = this.targets
-          , activeTarget = this.activeTarget
-          , i
-
-        if (scrollTop >= maxScroll) {
-          return activeTarget != (i = targets.last()[0])
-            && this.activate ( i )
-        }
-
-        for (i = offsets.length; i--;) {
-          activeTarget != targets[i]
-            && scrollTop >= offsets[i]
-            && (!offsets[i + 1] || scrollTop <= offsets[i + 1])
-            && this.activate( targets[i] )
-        }
-      }
-
-    , activate: function (target) {
-        var active
-          , selector
-
-        this.activeTarget = target
-
-        $(this.selector)
-          .parent('.active')
-          .removeClass('active')
-
-        selector = this.selector
-          + '[data-target="' + target + '"],'
-          + this.selector + '[href="' + target + '"]'
-
-        active = $(selector)
-          .parent('li')
-          .addClass('active')
-
-        if (active.parent('.dropdown-menu').length)  {
-          active = active.closest('li.dropdown').addClass('active')
-        }
-
-        active.trigger('activate')
-      }
-
-  }
-
-
- /* SCROLLSPY PLUGIN DEFINITION
-  * =========================== */
-
-  $.fn.scrollspy = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('scrollspy')
-        , options = typeof option == 'object' && option
-      if (!data) $this.data('scrollspy', (data = new ScrollSpy(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  $.fn.scrollspy.Constructor = ScrollSpy
-
-  $.fn.scrollspy.defaults = {
-    offset: 10
-  }
-
-
- /* SCROLLSPY DATA-API
-  * ================== */
-
-  $(window).on('load', function () {
-    $('[data-spy="scroll"]').each(function () {
-      var $spy = $(this)
-      $spy.scrollspy($spy.data())
-    })
-  })
-
-}(window.jQuery);/* ========================================================
- * bootstrap-tab.js v2.1.1
- * http://twitter.github.com/bootstrap/javascript.html#tabs
- * ========================================================
- * Copyright 2012 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ======================================================== */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* TAB CLASS DEFINITION
-  * ==================== */
-
-  var Tab = function (element) {
-    this.element = $(element)
-  }
-
-  Tab.prototype = {
-
-    constructor: Tab
-
-  , show: function () {
-      var $this = this.element
-        , $ul = $this.closest('ul:not(.dropdown-menu)')
-        , selector = $this.attr('data-target')
-        , previous
-        , $target
-        , e
-
-      if (!selector) {
-        selector = $this.attr('href')
-        selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') //strip for ie7
-      }
-
-      if ( $this.parent('li').hasClass('active') ) return
-
-      previous = $ul.find('.active a').last()[0]
-
-      e = $.Event('show', {
-        relatedTarget: previous
-      })
-
-      $this.trigger(e)
-
-      if (e.isDefaultPrevented()) return
-
-      $target = $(selector)
-
-      this.activate($this.parent('li'), $ul)
-      this.activate($target, $target.parent(), function () {
-        $this.trigger({
-          type: 'shown'
-        , relatedTarget: previous
-        })
-      })
-    }
-
-  , activate: function ( element, container, callback) {
-      var $active = container.find('> .active')
-        , transition = callback
-            && $.support.transition
-            && $active.hasClass('fade')
-
-      function next() {
-        $active
-          .removeClass('active')
-          .find('> .dropdown-menu > .active')
-          .removeClass('active')
-
-        element.addClass('active')
-
-        if (transition) {
-          element[0].offsetWidth // reflow for transition
-          element.addClass('in')
-        } else {
-          element.removeClass('fade')
-        }
-
-        if ( element.parent('.dropdown-menu') ) {
-          element.closest('li.dropdown').addClass('active')
-        }
-
-        callback && callback()
-      }
-
-      transition ?
-        $active.one($.support.transition.end, next) :
-        next()
-
-      $active.removeClass('in')
-    }
-  }
-
-
- /* TAB PLUGIN DEFINITION
-  * ===================== */
-
-  $.fn.tab = function ( option ) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('tab')
-      if (!data) $this.data('tab', (data = new Tab(this)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  $.fn.tab.Constructor = Tab
-
-
- /* TAB DATA-API
-  * ============ */
-
-  $(function () {
-    $('body').on('click.tab.data-api', '[data-toggle="tab"], [data-toggle="pill"]', function (e) {
-      e.preventDefault()
-      $(this).tab('show')
-    })
-  })
-
-}(window.jQuery);/* =============================================================
- * bootstrap-typeahead.js v2.1.1
- * http://twitter.github.com/bootstrap/javascript.html#typeahead
- * =============================================================
- * Copyright 2012 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============================================================ */
-
-
-!function($){
-
-  "use strict"; // jshint ;_;
-
-
- /* TYPEAHEAD PUBLIC CLASS DEFINITION
-  * ================================= */
-
-  var Typeahead = function (element, options) {
-    this.$element = $(element)
-    this.options = $.extend({}, $.fn.typeahead.defaults, options)
-    this.matcher = this.options.matcher || this.matcher
-    this.sorter = this.options.sorter || this.sorter
-    this.highlighter = this.options.highlighter || this.highlighter
-    this.updater = this.options.updater || this.updater
-    this.$menu = $(this.options.menu).appendTo('body')
-    this.source = this.options.source
-    this.shown = false
-    this.listen()
-  }
-
-  Typeahead.prototype = {
-
-    constructor: Typeahead
-
-  , select: function () {
-      var val = this.$menu.find('.active').attr('data-value')
-      this.$element
-        .val(this.updater(val))
-        .change()
-      return this.hide()
-    }
-
-  , updater: function (item) {
-      return item
-    }
-
-  , show: function () {
-      var pos = $.extend({}, this.$element.offset(), {
-        height: this.$element[0].offsetHeight
-      })
-
-      this.$menu.css({
-        top: pos.top + pos.height
-      , left: pos.left
-      })
-
-      this.$menu.show()
-      this.shown = true
-      return this
-    }
-
-  , hide: function () {
-      this.$menu.hide()
-      this.shown = false
-      return this
-    }
-
-  , lookup: function (event) {
-      var items
-
-      this.query = this.$element.val()
-
-      if (!this.query || this.query.length < this.options.minLength) {
-        return this.shown ? this.hide() : this
-      }
-
-      items = $.isFunction(this.source) ? this.source(this.query, $.proxy(this.process, this)) : this.source
-
-      return items ? this.process(items) : this
-    }
-
-  , process: function (items) {
-      var that = this
-
-      items = $.grep(items, function (item) {
-        return that.matcher(item)
-      })
-
-      items = this.sorter(items)
-
-      if (!items.length) {
-        return this.shown ? this.hide() : this
-      }
-
-      return this.render(items.slice(0, this.options.items)).show()
-    }
-
-  , matcher: function (item) {
-      return ~item.toLowerCase().indexOf(this.query.toLowerCase())
-    }
-
-  , sorter: function (items) {
-      var beginswith = []
-        , caseSensitive = []
-        , caseInsensitive = []
-        , item
-
-      while (item = items.shift()) {
-        if (!item.toLowerCase().indexOf(this.query.toLowerCase())) beginswith.push(item)
-        else if (~item.indexOf(this.query)) caseSensitive.push(item)
-        else caseInsensitive.push(item)
-      }
-
-      return beginswith.concat(caseSensitive, caseInsensitive)
-    }
-
-  , highlighter: function (item) {
-      var query = this.query.replace(/[\-\[\]{}()*+?.,\\\^$|#\s]/g, '\\$&')
-      return item.replace(new RegExp('(' + query + ')', 'ig'), function ($1, match) {
-        return '<strong>' + match + '</strong>'
-      })
-    }
-
-  , render: function (items) {
-      var that = this
-
-      items = $(items).map(function (i, item) {
-        i = $(that.options.item).attr('data-value', item)
-        i.find('a').html(that.highlighter(item))
-        return i[0]
-      })
-
-      items.first().addClass('active')
-      this.$menu.html(items)
-      return this
-    }
-
-  , next: function (event) {
-      var active = this.$menu.find('.active').removeClass('active')
-        , next = active.next()
-
-      if (!next.length) {
-        next = $(this.$menu.find('li')[0])
-      }
-
-      next.addClass('active')
-    }
-
-  , prev: function (event) {
-      var active = this.$menu.find('.active').removeClass('active')
-        , prev = active.prev()
-
-      if (!prev.length) {
-        prev = this.$menu.find('li').last()
-      }
-
-      prev.addClass('active')
-    }
-
-  , listen: function () {
-      this.$element
-        .on('blur',     $.proxy(this.blur, this))
-        .on('keypress', $.proxy(this.keypress, this))
-        .on('keyup',    $.proxy(this.keyup, this))
-
-      if ($.browser.chrome || $.browser.webkit || $.browser.msie) {
-        this.$element.on('keydown', $.proxy(this.keydown, this))
-      }
-
-      this.$menu
-        .on('click', $.proxy(this.click, this))
-        .on('mouseenter', 'li', $.proxy(this.mouseenter, this))
-    }
-
-  , move: function (e) {
-      if (!this.shown) return
-
-      switch(e.keyCode) {
-        case 9: // tab
-        case 13: // enter
-        case 27: // escape
-          e.preventDefault()
-          break
-
-        case 38: // up arrow
-          e.preventDefault()
-          this.prev()
-          break
-
-        case 40: // down arrow
-          e.preventDefault()
-          this.next()
-          break
-      }
-
-      e.stopPropagation()
-    }
-
-  , keydown: function (e) {
-      this.suppressKeyPressRepeat = !~$.inArray(e.keyCode, [40,38,9,13,27])
-      this.move(e)
-    }
-
-  , keypress: function (e) {
-      if (this.suppressKeyPressRepeat) return
-      this.move(e)
-    }
-
-  , keyup: function (e) {
-      switch(e.keyCode) {
-        case 40: // down arrow
-        case 38: // up arrow
-          break
-
-        case 9: // tab
-        case 13: // enter
-          if (!this.shown) return
-          this.select()
-          break
-
-        case 27: // escape
-          if (!this.shown) return
-          this.hide()
-          break
-
-        default:
-          this.lookup()
-      }
-
-      e.stopPropagation()
-      e.preventDefault()
-  }
-
-  , blur: function (e) {
-      var that = this
-      setTimeout(function () { that.hide() }, 150)
-    }
-
-  , click: function (e) {
-      e.stopPropagation()
-      e.preventDefault()
-      this.select()
-    }
-
-  , mouseenter: function (e) {
-      this.$menu.find('.active').removeClass('active')
-      $(e.currentTarget).addClass('active')
-    }
-
-  }
-
-
-  /* TYPEAHEAD PLUGIN DEFINITION
-   * =========================== */
-
-  $.fn.typeahead = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('typeahead')
-        , options = typeof option == 'object' && option
-      if (!data) $this.data('typeahead', (data = new Typeahead(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  $.fn.typeahead.defaults = {
-    source: []
-  , items: 8
-  , menu: '<ul class="typeahead dropdown-menu"></ul>'
-  , item: '<li><a href="#"></a></li>'
-  , minLength: 1
-  }
-
-  $.fn.typeahead.Constructor = Typeahead
-
-
- /*   TYPEAHEAD DATA-API
-  * ================== */
-
-  $(function () {
-    $('body').on('focus.typeahead.data-api', '[data-provide="typeahead"]', function (e) {
-      var $this = $(this)
-      if ($this.data('typeahead')) return
-      e.preventDefault()
-      $this.typeahead($this.data())
-    })
-  })
-
-}(window.jQuery);
-/* ==========================================================
- * bootstrap-affix.js v2.1.1
- * http://twitter.github.com/bootstrap/javascript.html#affix
- * ==========================================================
- * Copyright 2012 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ========================================================== */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* AFFIX CLASS DEFINITION
-  * ====================== */
-
-  var Affix = function (element, options) {
-    this.options = $.extend({}, $.fn.affix.defaults, options)
-    this.$window = $(window).on('scroll.affix.data-api', $.proxy(this.checkPosition, this))
-    this.$element = $(element)
-    this.checkPosition()
-  }
-
-  Affix.prototype.checkPosition = function () {
-    if (!this.$element.is(':visible')) return
-
-    var scrollHeight = $(document).height()
-      , scrollTop = this.$window.scrollTop()
-      , position = this.$element.offset()
-      , offset = this.options.offset
-      , offsetBottom = offset.bottom
-      , offsetTop = offset.top
-      , reset = 'affix affix-top affix-bottom'
-      , affix
-
-    if (typeof offset != 'object') offsetBottom = offsetTop = offset
-    if (typeof offsetTop == 'function') offsetTop = offset.top()
-    if (typeof offsetBottom == 'function') offsetBottom = offset.bottom()
-
-    affix = this.unpin != null && (scrollTop + this.unpin <= position.top) ?
-      false    : offsetBottom != null && (position.top + this.$element.height() >= scrollHeight - offsetBottom) ?
-      'bottom' : offsetTop != null && scrollTop <= offsetTop ?
-      'top'    : false
-
-    if (this.affixed === affix) return
-
-    this.affixed = affix
-    this.unpin = affix == 'bottom' ? position.top - scrollTop : null
-
-    this.$element.removeClass(reset).addClass('affix' + (affix ? '-' + affix : ''))
-  }
-
-
- /* AFFIX PLUGIN DEFINITION
-  * ======================= */
-
-  $.fn.affix = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('affix')
-        , options = typeof option == 'object' && option
-      if (!data) $this.data('affix', (data = new Affix(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  $.fn.affix.Constructor = Affix
-
-  $.fn.affix.defaults = {
-    offset: 0
-  }
-
-
- /* AFFIX DATA-API
-  * ============== */
-
-  $(window).on('load', function () {
-    $('[data-spy="affix"]').each(function () {
-      var $spy = $(this)
-        , data = $spy.data()
-
-      data.offset = data.offset || {}
-
-      data.offsetBottom && (data.offset.bottom = data.offsetBottom)
-      data.offsetTop && (data.offset.top = data.offsetTop)
-
-      $spy.affix(data)
-    })
-  })
-
-
-}(window.jQuery);
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/vendor/scripts/console-helper.js b/branch-1.2/ambari-web/vendor/scripts/console-helper.js
deleted file mode 100644
index 9fd4e3d..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/console-helper.js
+++ /dev/null
@@ -1,11 +0,0 @@
-// Make it safe to do console.log() always.
-(function (con) {
-  var method;
-  var dummy = function() {};
-  var methods = ('assert,count,debug,dir,dirxml,error,exception,group,' +
-     'groupCollapsed,groupEnd,info,log,markTimeline,profile,profileEnd,' + 
-     'time,timeEnd,trace,warn').split(',');
-  while (method = methods.pop()) {
-    con[method] = con[method] || dummy;
-  }
-})(window.console = window.console || {});
diff --git a/branch-1.2/ambari-web/vendor/scripts/cubism.v1.js b/branch-1.2/ambari-web/vendor/scripts/cubism.v1.js
deleted file mode 100644
index be9ff4d..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/cubism.v1.js
+++ /dev/null
@@ -1,1085 +0,0 @@
-(function (exports) {
-  var cubism = exports.cubism = {version:"1.2.0"};
-  var cubism_id = 0;
-
-  function cubism_identity(d) {
-    return d;
-  }
-
-  cubism.option = function (name, defaultValue) {
-    var values = cubism.options(name);
-    return values.length ? values[0] : defaultValue;
-  };
-
-  cubism.options = function (name, defaultValues) {
-    var options = location.search.substring(1).split("&"),
-      values = [],
-      i = -1,
-      n = options.length,
-      o;
-    while (++i < n) {
-      if ((o = options[i].split("="))[0] == name) {
-        values.push(decodeURIComponent(o[1]));
-      }
-    }
-    return values.length || arguments.length < 2 ? values : defaultValues;
-  };
-  cubism.context = function () {
-    var context = new cubism_context,
-      step = 1e4, // ten seconds, in milliseconds
-      size = 1440, // four hours at ten seconds, in pixels
-      start0, stop0, // the start and stop for the previous change event
-      start1, stop1, // the start and stop for the next prepare event
-      serverDelay = 5e3,
-      clientDelay = 5e3,
-      event = d3.dispatch("prepare", "beforechange", "change", "focus"),
-      scale = context.scale = d3.time.scale().range([0, size]),
-      timeout,
-      focus;
-
-    function update() {
-      var now = Date.now();
-      stop0 = new Date(Math.floor((now - serverDelay - clientDelay) / step) * step);
-      start0 = new Date(stop0 - size * step);
-      stop1 = new Date(Math.floor((now - serverDelay) / step) * step);
-      start1 = new Date(stop1 - size * step);
-      scale.domain([start0, stop0]);
-      return context;
-    }
-
-    context.start = function () {
-      if (timeout) clearTimeout(timeout);
-      var delay = +stop1 + serverDelay - Date.now();
-
-      // If we're too late for the first prepare event, skip it.
-      if (delay < clientDelay) delay += step;
-
-      timeout = setTimeout(function prepare() {
-        stop1 = new Date(Math.floor((Date.now() - serverDelay) / step) * step);
-        start1 = new Date(stop1 - size * step);
-        event.prepare.call(context, start1, stop1);
-
-        setTimeout(function () {
-          scale.domain([start0 = start1, stop0 = stop1]);
-          event.beforechange.call(context, start1, stop1);
-          event.change.call(context, start1, stop1);
-          event.focus.call(context, focus);
-        }, clientDelay);
-
-        timeout = setTimeout(prepare, step);
-      }, delay);
-      return context;
-    };
-
-    context.stop = function () {
-      timeout = clearTimeout(timeout);
-      return context;
-    };
-
-    timeout = setTimeout(context.start, 10);
-
-    // Set or get the step interval in milliseconds.
-    // Defaults to ten seconds.
-    context.step = function (_) {
-      if (!arguments.length) return step;
-      step = +_;
-      return update();
-    };
-
-    // Set or get the context size (the count of metric values).
-    // Defaults to 1440 (four hours at ten seconds).
-    context.size = function (_) {
-      if (!arguments.length) return size;
-      scale.range([0, size = +_]);
-      return update();
-    };
-
-    // The server delay is the amount of time we wait for the server to compute a
-    // metric. This delay may result from clock skew or from delays collecting
-    // metrics from various hosts. Defaults to 4 seconds.
-    context.serverDelay = function (_) {
-      if (!arguments.length) return serverDelay;
-      serverDelay = +_;
-      return update();
-    };
-
-    // The client delay is the amount of additional time we wait to fetch those
-    // metrics from the server. The client and server delay combined represent the
-    // age of the most recent displayed metric. Defaults to 1 second.
-    context.clientDelay = function (_) {
-      if (!arguments.length) return clientDelay;
-      clientDelay = +_;
-      return update();
-    };
-
-    // Sets the focus to the specified index, and dispatches a "focus" event.
-    context.focus = function (i) {
-      event.focus.call(context, focus = i);
-      return context;
-    };
-
-    // Add, remove or get listeners for events.
-    context.on = function (type, listener) {
-      if (arguments.length < 2) return event.on(type);
-
-      event.on(type, listener);
-
-      // Notify the listener of the current start and stop time, as appropriate.
-      // This way, metrics can make requests for data immediately,
-      // and likewise the axis can display itself synchronously.
-      if (listener != null) {
-        if (/^prepare(\.|$)/.test(type)) listener.call(context, start1, stop1);
-        if (/^beforechange(\.|$)/.test(type)) listener.call(context, start0, stop0);
-        if (/^change(\.|$)/.test(type)) listener.call(context, start0, stop0);
-        if (/^focus(\.|$)/.test(type)) listener.call(context, focus);
-      }
-
-      return context;
-    };
-
-    d3.select(window).on("keydown.context-" + ++cubism_id, function () {
-      switch (!d3.event.metaKey && d3.event.keyCode) {
-        case 37: // left
-          if (focus == null) focus = size - 1;
-          if (focus > 0) context.focus(--focus);
-          break;
-        case 39: // right
-          if (focus == null) focus = size - 2;
-          if (focus < size - 1) context.focus(++focus);
-          break;
-        default:
-          return;
-      }
-      d3.event.preventDefault();
-    });
-
-    return update();
-  };
-
-  function cubism_context() {
-  }
-
-  var cubism_contextPrototype = cubism.context.prototype = cubism_context.prototype;
-
-  cubism_contextPrototype.constant = function (value) {
-    return new cubism_metricConstant(this, +value);
-  };
-  cubism_contextPrototype.cube = function (host) {
-    if (!arguments.length) host = "";
-    var source = {},
-      context = this;
-
-    source.metric = function (expression) {
-      return context.metric(function (start, stop, step, callback) {
-        d3.json(host + "/1.0/metric"
-          + "?expression=" + encodeURIComponent(expression)
-          + "&start=" + cubism_cubeFormatDate(start)
-          + "&stop=" + cubism_cubeFormatDate(stop)
-          + "&step=" + step, function (data) {
-          if (!data) return callback(new Error("unable to load data"));
-          callback(null, data.map(function (d) {
-            return d.value;
-          }));
-        });
-      }, expression += "");
-    };
-
-    // Returns the Cube host.
-    source.toString = function () {
-      return host;
-    };
-
-    return source;
-  };
-
-  var cubism_cubeFormatDate = d3.time.format.iso;
-  cubism_contextPrototype.graphite = function (host) {
-    if (!arguments.length) host = "";
-    var source = {},
-      context = this;
-
-    source.metric = function (expression) {
-      var sum = "sum";
-
-      var metric = context.metric(function (start, stop, step, callback) {
-        var target = expression;
-
-        // Apply the summarize, if necessary.
-        if (step !== 1e4) target = "summarize(" + target + ",'"
-          + (!(step % 36e5) ? step / 36e5 + "hour" : !(step % 6e4) ? step / 6e4 + "min" : step + "sec")
-          + "','" + sum + "')";
-
-        d3.text(host + "/render?format=raw"
-          + "&target=" + encodeURIComponent("alias(" + target + ",'')")
-          + "&from=" + cubism_graphiteFormatDate(start - 2 * step) // off-by-two?
-          + "&until=" + cubism_graphiteFormatDate(stop - 1000), function (text) {
-          if (!text) return callback(new Error("unable to load data"));
-          callback(null, cubism_graphiteParse(text));
-        });
-      }, expression += "");
-
-      metric.summarize = function (_) {
-        sum = _;
-        return metric;
-      };
-
-      return metric;
-    };
-
-    source.find = function (pattern, callback) {
-      d3.json(host + "/metrics/find?format=completer"
-        + "&query=" + encodeURIComponent(pattern), function (result) {
-        if (!result) return callback(new Error("unable to find metrics"));
-        callback(null, result.metrics.map(function (d) {
-          return d.path;
-        }));
-      });
-    };
-
-    // Returns the graphite host.
-    source.toString = function () {
-      return host;
-    };
-
-    return source;
-  };
-
-// Graphite understands seconds since UNIX epoch.
-  function cubism_graphiteFormatDate(time) {
-    return Math.floor(time / 1000);
-  }
-
-// Helper method for parsing graphite's raw format.
-  function cubism_graphiteParse(text) {
-    var i = text.indexOf("|"),
-      meta = text.substring(0, i),
-      c = meta.lastIndexOf(","),
-      b = meta.lastIndexOf(",", c - 1),
-      a = meta.lastIndexOf(",", b - 1),
-      start = meta.substring(a + 1, b) * 1000,
-      step = meta.substring(c + 1) * 1000;
-    return text
-      .substring(i + 1)
-      .split(",")
-      .slice(1)// the first value is always None?
-      .map(function (d) {
-        return +d;
-      });
-  }
-
-  function cubism_metric(context) {
-    if (!(context instanceof cubism_context)) throw new Error("invalid context");
-    this.context = context;
-  }
-
-  var cubism_metricPrototype = cubism_metric.prototype;
-
-  cubism.metric = cubism_metric;
-
-  cubism_metricPrototype.valueAt = function () {
-    return NaN;
-  };
-
-  cubism_metricPrototype.alias = function (name) {
-    this.toString = function () {
-      return name;
-    };
-    return this;
-  };
-
-  cubism_metricPrototype.extent = function () {
-    var i = 0,
-      n = this.context.size(),
-      value,
-      min = Infinity,
-      max = -Infinity;
-    while (++i < n) {
-      value = this.valueAt(i);
-      if (value < min) min = value;
-      if (value > max) max = value;
-    }
-    return [min, max];
-  };
-
-  cubism_metricPrototype.on = function (type, listener) {
-    return arguments.length < 2 ? null : this;
-  };
-
-  cubism_metricPrototype.shift = function () {
-    return this;
-  };
-
-  cubism_metricPrototype.on = function () {
-    return arguments.length < 2 ? null : this;
-  };
-
-  cubism_contextPrototype.metric = function (request, name) {
-    var context = this,
-      metric = new cubism_metric(context),
-      id = ".metric-" + ++cubism_id,
-      start = -Infinity,
-      stop,
-      step = context.step(),
-      size = context.size(),
-      values = [],
-      event = d3.dispatch("change"),
-      listening = 0,
-      fetching;
-
-    // Prefetch new data into a temporary array.
-    function prepare(start1, stop) {
-      var steps = Math.min(size, Math.round((start1 - start) / step));
-      if (!steps || fetching) return; // already fetched, or fetching!
-      fetching = true;
-      steps = Math.min(size, steps + cubism_metricOverlap);
-      var start0 = new Date(stop - steps * step);
-      request(start0, stop, step, function (error, data) {
-        fetching = false;
-        if (error) return console.warn(error);
-        var i = isFinite(start) ? Math.round((start0 - start) / step) : 0;
-        for (var j = 0, m = data.length; j < m; ++j) values[j + i] = data[j];
-        event.change.call(metric, start, stop);
-      });
-    }
-
-    // When the context changes, switch to the new data, ready-or-not!
-    function beforechange(start1, stop1) {
-      if (!isFinite(start)) start = start1;
-      values.splice(0, Math.max(0, Math.min(size, Math.round((start1 - start) / step))));
-      start = start1;
-      stop = stop1;
-    }
-
-    //
-    metric.valueAt = function (i) {
-      return values[i];
-    };
-
-    //
-    metric.shift = function (offset) {
-      return context.metric(cubism_metricShift(request, +offset));
-    };
-
-    //
-    metric.on = function (type, listener) {
-      if (!arguments.length) return event.on(type);
-
-      // If there are no listeners, then stop listening to the context,
-      // and avoid unnecessary fetches.
-      if (listener == null) {
-        if (event.on(type) != null && --listening == 0) {
-          context.on("prepare" + id, null).on("beforechange" + id, null);
-        }
-      } else {
-        if (event.on(type) == null && ++listening == 1) {
-          context.on("prepare" + id, prepare).on("beforechange" + id, beforechange);
-        }
-      }
-
-      event.on(type, listener);
-
-      // Notify the listener of the current start and stop time, as appropriate.
-      // This way, charts can display synchronous metrics immediately.
-      if (listener != null) {
-        if (/^change(\.|$)/.test(type)) listener.call(context, start, stop);
-      }
-
-      return metric;
-    };
-
-    //
-    if (arguments.length > 1) metric.toString = function () {
-      return name;
-    };
-
-    return metric;
-  };
-
-// Number of metric to refetch each period, in case of lag.
-  var cubism_metricOverlap = 6;
-
-// Wraps the specified request implementation, and shifts time by the given offset.
-  function cubism_metricShift(request, offset) {
-    return function (start, stop, step, callback) {
-      request(new Date(+start + offset), new Date(+stop + offset), step, callback);
-    };
-  }
-
-  function cubism_metricConstant(context, value) {
-    cubism_metric.call(this, context);
-    value = +value;
-    var name = value + "";
-    this.valueOf = function () {
-      return value;
-    };
-    this.toString = function () {
-      return name;
-    };
-  }
-
-  var cubism_metricConstantPrototype = cubism_metricConstant.prototype = Object.create(cubism_metric.prototype);
-
-  cubism_metricConstantPrototype.valueAt = function () {
-    return +this;
-  };
-
-  cubism_metricConstantPrototype.extent = function () {
-    return [+this, +this];
-  };
-  function cubism_metricOperator(name, operate) {
-
-    function cubism_metricOperator(left, right) {
-      if (!(right instanceof cubism_metric)) right = new cubism_metricConstant(left.context, right);
-      else if (left.context !== right.context) throw new Error("mismatch context");
-      cubism_metric.call(this, left.context);
-      this.left = left;
-      this.right = right;
-      this.toString = function () {
-        return left + " " + name + " " + right;
-      };
-    }
-
-    var cubism_metricOperatorPrototype = cubism_metricOperator.prototype = Object.create(cubism_metric.prototype);
-
-    cubism_metricOperatorPrototype.valueAt = function (i) {
-      return operate(this.left.valueAt(i), this.right.valueAt(i));
-    };
-
-    cubism_metricOperatorPrototype.shift = function (offset) {
-      return new cubism_metricOperator(this.left.shift(offset), this.right.shift(offset));
-    };
-
-    cubism_metricOperatorPrototype.on = function (type, listener) {
-      if (arguments.length < 2) return this.left.on(type);
-      this.left.on(type, listener);
-      this.right.on(type, listener);
-      return this;
-    };
-
-    return function (right) {
-      return new cubism_metricOperator(this, right);
-    };
-  }
-
-  cubism_metricPrototype.add = cubism_metricOperator("+", function (left, right) {
-    return left + right;
-  });
-
-  cubism_metricPrototype.subtract = cubism_metricOperator("-", function (left, right) {
-    return left - right;
-  });
-
-  cubism_metricPrototype.multiply = cubism_metricOperator("*", function (left, right) {
-    return left * right;
-  });
-
-  cubism_metricPrototype.divide = cubism_metricOperator("/", function (left, right) {
-    return left / right;
-  });
-  cubism_contextPrototype.horizon = function () {
-    var context = this,
-      mode = "offset",
-      buffer = document.createElement("canvas"),
-      width = buffer.width = context.size(),
-      height = buffer.height = 30,
-      scale = d3.scale.linear().interpolate(d3.interpolateRound),
-      metric = cubism_identity,
-      extent = null,
-      title = cubism_identity,
-      format = d3.format(".2s"),
-      colors = ["#08519c", "#3182bd", "#6baed6", "#bdd7e7", "#bae4b3", "#74c476", "#31a354", "#006d2c"];
-
-    function horizon(selection) {
-
-      selection
-        .on("mousemove.horizon", function () {
-          context.focus(d3.mouse(this)[0]);
-        })
-        .on("mouseout.horizon", function () {
-          context.focus(null);
-        });
-
-      selection.append("canvas")
-        .attr("width", width)
-        .attr("height", height);
-
-      selection.append("span")
-        .attr("class", "title")
-        .text(title);
-
-      selection.append("span")
-        .attr("class", "value");
-
-      selection.each(function (d, i) {
-        var that = this,
-          id = ++cubism_id,
-          metric_ = typeof metric === "function" ? metric.call(that, d, i) : metric,
-          colors_ = typeof colors === "function" ? colors.call(that, d, i) : colors,
-          extent_ = typeof extent === "function" ? extent.call(that, d, i) : extent,
-          start = -Infinity,
-          step = context.step(),
-          canvas = d3.select(that).select("canvas"),
-          span = d3.select(that).select(".value"),
-          max_,
-          m = colors_.length >> 1,
-          ready;
-
-        canvas.datum({id:id, metric:metric_});
-        canvas = canvas.node().getContext("2d");
-
-        function change(start1, stop) {
-          canvas.save();
-
-          // compute the new extent and ready flag
-          var extent = metric_.extent();
-          ready = extent.every(isFinite);
-          if (extent_ != null) extent = extent_;
-
-          // if this is an update (with no extent change), copy old values!
-          var i0 = 0, max = Math.max(-extent[0], extent[1]);
-          if (this === context) {
-            if (max == max_) {
-              i0 = width - cubism_metricOverlap;
-              var dx = (start1 - start) / step;
-              if (dx < width) {
-                var canvas0 = buffer.getContext("2d");
-                canvas0.clearRect(0, 0, width, height);
-                canvas0.drawImage(canvas.canvas, dx, 0, width - dx, height, 0, 0, width - dx, height);
-                canvas.clearRect(0, 0, width, height);
-                canvas.drawImage(canvas0.canvas, 0, 0);
-              }
-            }
-            start = start1;
-          }
-
-          // update the domain
-          scale.domain([0, max_ = max]);
-
-          // clear for the new data
-          canvas.clearRect(i0, 0, width - i0, height);
-
-          // record whether there are negative values to display
-          var negative;
-
-          // positive bands
-          for (var j = 0; j < m; ++j) {
-            canvas.fillStyle = colors_[m + j];
-
-            // Adjust the range based on the current band index.
-            var y0 = (j - m + 1) * height;
-            scale.range([m * height + y0, y0]);
-            y0 = scale(0);
-
-            for (var i = i0, n = width, y1; i < n; ++i) {
-              y1 = metric_.valueAt(i);
-              if (y1 <= 0) {
-                negative = true;
-                continue;
-              }
-              canvas.fillRect(i, y1 = scale(y1), 1, y0 - y1);
-            }
-          }
-
-          if (negative) {
-            // enable offset mode
-            if (mode === "offset") {
-              canvas.translate(0, height);
-              canvas.scale(1, -1);
-            }
-
-            // negative bands
-            for (var j = 0; j < m; ++j) {
-              canvas.fillStyle = colors_[m - 1 - j];
-
-              // Adjust the range based on the current band index.
-              var y0 = (j - m + 1) * height;
-              scale.range([m * height + y0, y0]);
-              y0 = scale(0);
-
-              for (var i = i0, n = width, y1; i < n; ++i) {
-                y1 = metric_.valueAt(i);
-                if (y1 >= 0) continue;
-                canvas.fillRect(i, scale(-y1), 1, y0 - scale(-y1));
-              }
-            }
-          }
-
-          canvas.restore();
-        }
-
-        function focus(i) {
-          if (i == null) i = width - 1;
-          var value = metric_.valueAt(i);
-          span.datum(value).text(isNaN(value) ? null : format);
-        }
-
-        // Update the chart when the context changes.
-        context.on("change.horizon-" + id, change);
-        context.on("focus.horizon-" + id, focus);
-
-        // Display the first metric change immediately,
-        // but defer subsequent updates to the canvas change.
-        // Note that someone still needs to listen to the metric,
-        // so that it continues to update automatically.
-        metric_.on("change.horizon-" + id, function (start, stop) {
-          change(start, stop), focus();
-          if (ready) metric_.on("change.horizon-" + id, cubism_identity);
-        });
-      });
-    }
-
-    horizon.remove = function (selection) {
-
-      selection
-        .on("mousemove.horizon", null)
-        .on("mouseout.horizon", null);
-
-      selection.selectAll("canvas")
-        .each(remove)
-        .remove();
-
-      selection.selectAll(".title,.value")
-        .remove();
-
-      function remove(d) {
-        d.metric.on("change.horizon-" + d.id, null);
-        context.on("change.horizon-" + d.id, null);
-        context.on("focus.horizon-" + d.id, null);
-      }
-    };
-
-    horizon.mode = function (_) {
-      if (!arguments.length) return mode;
-      mode = _ + "";
-      return horizon;
-    };
-
-    horizon.height = function (_) {
-      if (!arguments.length) return height;
-      buffer.height = height = +_;
-      return horizon;
-    };
-
-    horizon.metric = function (_) {
-      if (!arguments.length) return metric;
-      metric = _;
-      return horizon;
-    };
-
-    horizon.scale = function (_) {
-      if (!arguments.length) return scale;
-      scale = _;
-      return horizon;
-    };
-
-    horizon.extent = function (_) {
-      if (!arguments.length) return extent;
-      extent = _;
-      return horizon;
-    };
-
-    horizon.title = function (_) {
-      if (!arguments.length) return title;
-      title = _;
-      return horizon;
-    };
-
-    horizon.format = function (_) {
-      if (!arguments.length) return format;
-      format = _;
-      return horizon;
-    };
-
-    horizon.colors = function (_) {
-      if (!arguments.length) return colors;
-      colors = _;
-      return horizon;
-    };
-
-    return horizon;
-  };
-  cubism_contextPrototype.comparison = function () {
-    var context = this,
-      width = context.size(),
-      height = 120,
-      scale = d3.scale.linear().interpolate(d3.interpolateRound),
-      primary = function (d) {
-        return d[0];
-      },
-      secondary = function (d) {
-        return d[1];
-      },
-      extent = null,
-      title = cubism_identity,
-      formatPrimary = cubism_comparisonPrimaryFormat,
-      formatChange = cubism_comparisonChangeFormat,
-      colors = ["#9ecae1", "#225b84", "#a1d99b", "#22723a"],
-      strokeWidth = 1.5;
-
-    function comparison(selection) {
-
-      selection
-        .on("mousemove.comparison", function () {
-          context.focus(d3.mouse(this)[0]);
-        })
-        .on("mouseout.comparison", function () {
-          context.focus(null);
-        });
-
-      selection.append("canvas")
-        .attr("width", width)
-        .attr("height", height);
-
-      selection.append("span")
-        .attr("class", "title")
-        .text(title);
-
-      selection.append("span")
-        .attr("class", "value primary");
-
-      selection.append("span")
-        .attr("class", "value change");
-
-      selection.each(function (d, i) {
-        var that = this,
-          id = ++cubism_id,
-          primary_ = typeof primary === "function" ? primary.call(that, d, i) : primary,
-          secondary_ = typeof secondary === "function" ? secondary.call(that, d, i) : secondary,
-          extent_ = typeof extent === "function" ? extent.call(that, d, i) : extent,
-          div = d3.select(that),
-          canvas = div.select("canvas"),
-          spanPrimary = div.select(".value.primary"),
-          spanChange = div.select(".value.change"),
-          ready;
-
-        canvas.datum({id:id, primary:primary_, secondary:secondary_});
-        canvas = canvas.node().getContext("2d");
-
-        function change(start, stop) {
-          canvas.save();
-          canvas.clearRect(0, 0, width, height);
-
-          // update the scale
-          var primaryExtent = primary_.extent(),
-            secondaryExtent = secondary_.extent(),
-            extent = extent_ == null ? primaryExtent : extent_;
-          scale.domain(extent).range([height, 0]);
-          ready = primaryExtent.concat(secondaryExtent).every(isFinite);
-
-          // consistent overplotting
-          var round = start / context.step() & 1
-            ? cubism_comparisonRoundOdd
-            : cubism_comparisonRoundEven;
-
-          // positive changes
-          canvas.fillStyle = colors[2];
-          for (var i = 0, n = width; i < n; ++i) {
-            var y0 = scale(primary_.valueAt(i)),
-              y1 = scale(secondary_.valueAt(i));
-            if (y0 < y1) canvas.fillRect(round(i), y0, 1, y1 - y0);
-          }
-
-          // negative changes
-          canvas.fillStyle = colors[0];
-          for (i = 0; i < n; ++i) {
-            var y0 = scale(primary_.valueAt(i)),
-              y1 = scale(secondary_.valueAt(i));
-            if (y0 > y1) canvas.fillRect(round(i), y1, 1, y0 - y1);
-          }
-
-          // positive values
-          canvas.fillStyle = colors[3];
-          for (i = 0; i < n; ++i) {
-            var y0 = scale(primary_.valueAt(i)),
-              y1 = scale(secondary_.valueAt(i));
-            if (y0 <= y1) canvas.fillRect(round(i), y0, 1, strokeWidth);
-          }
-
-          // negative values
-          canvas.fillStyle = colors[1];
-          for (i = 0; i < n; ++i) {
-            var y0 = scale(primary_.valueAt(i)),
-              y1 = scale(secondary_.valueAt(i));
-            if (y0 > y1) canvas.fillRect(round(i), y0 - strokeWidth, 1, strokeWidth);
-          }
-
-          canvas.restore();
-        }
-
-        function focus(i) {
-          if (i == null) i = width - 1;
-          var valuePrimary = primary_.valueAt(i),
-            valueSecondary = secondary_.valueAt(i),
-            valueChange = (valuePrimary - valueSecondary) / valueSecondary;
-
-          spanPrimary
-            .datum(valuePrimary)
-            .text(isNaN(valuePrimary) ? null : formatPrimary);
-
-          spanChange
-            .datum(valueChange)
-            .text(isNaN(valueChange) ? null : formatChange)
-            .attr("class", "value change " + (valueChange > 0 ? "positive" : valueChange < 0 ? "negative" : ""));
-        }
-
-        // Display the first primary change immediately,
-        // but defer subsequent updates to the context change.
-        // Note that someone still needs to listen to the metric,
-        // so that it continues to update automatically.
-        primary_.on("change.comparison-" + id, firstChange);
-        secondary_.on("change.comparison-" + id, firstChange);
-        function firstChange(start, stop) {
-          change(start, stop), focus();
-          if (ready) {
-            primary_.on("change.comparison-" + id, cubism_identity);
-            secondary_.on("change.comparison-" + id, cubism_identity);
-          }
-        }
-
-        // Update the chart when the context changes.
-        context.on("change.comparison-" + id, change);
-        context.on("focus.comparison-" + id, focus);
-      });
-    }
-
-    comparison.remove = function (selection) {
-
-      selection
-        .on("mousemove.comparison", null)
-        .on("mouseout.comparison", null);
-
-      selection.selectAll("canvas")
-        .each(remove)
-        .remove();
-
-      selection.selectAll(".title,.value")
-        .remove();
-
-      function remove(d) {
-        d.primary.on("change.comparison-" + d.id, null);
-        d.secondary.on("change.comparison-" + d.id, null);
-        context.on("change.comparison-" + d.id, null);
-        context.on("focus.comparison-" + d.id, null);
-      }
-    };
-
-    comparison.height = function (_) {
-      if (!arguments.length) return height;
-      height = +_;
-      return comparison;
-    };
-
-    comparison.primary = function (_) {
-      if (!arguments.length) return primary;
-      primary = _;
-      return comparison;
-    };
-
-    comparison.secondary = function (_) {
-      if (!arguments.length) return secondary;
-      secondary = _;
-      return comparison;
-    };
-
-    comparison.scale = function (_) {
-      if (!arguments.length) return scale;
-      scale = _;
-      return comparison;
-    };
-
-    comparison.extent = function (_) {
-      if (!arguments.length) return extent;
-      extent = _;
-      return comparison;
-    };
-
-    comparison.title = function (_) {
-      if (!arguments.length) return title;
-      title = _;
-      return comparison;
-    };
-
-    comparison.formatPrimary = function (_) {
-      if (!arguments.length) return formatPrimary;
-      formatPrimary = _;
-      return comparison;
-    };
-
-    comparison.formatChange = function (_) {
-      if (!arguments.length) return formatChange;
-      formatChange = _;
-      return comparison;
-    };
-
-    comparison.colors = function (_) {
-      if (!arguments.length) return colors;
-      colors = _;
-      return comparison;
-    };
-
-    comparison.strokeWidth = function (_) {
-      if (!arguments.length) return strokeWidth;
-      strokeWidth = _;
-      return comparison;
-    };
-
-    return comparison;
-  };
-
-  var cubism_comparisonPrimaryFormat = d3.format(".2s"),
-    cubism_comparisonChangeFormat = d3.format("+.0%");
-
-  function cubism_comparisonRoundEven(i) {
-    return i & 0xfffffe;
-  }
-
-  function cubism_comparisonRoundOdd(i) {
-    return ((i + 1) & 0xfffffe) - 1;
-  }
-
-  cubism_contextPrototype.axis = function () {
-    var context = this,
-      scale = context.scale,
-      axis_ = d3.svg.axis().scale(scale);
-
-    var format = context.step() < 6e4 ? cubism_axisFormatSeconds
-      : context.step() < 864e5 ? cubism_axisFormatMinutes
-      : cubism_axisFormatDays;
-
-    function axis(selection) {
-      var id = ++cubism_id,
-        tick;
-
-      var g = selection.append("svg")
-        .datum({id:id})
-        .attr("width", context.size())
-        .attr("height", Math.max(28, -axis.tickSize()))
-        .append("g")
-        .attr("transform", "translate(0," + (axis_.orient() === "top" ? 27 : 4) + ")")
-        .call(axis_);
-
-      context.on("change.axis-" + id, function () {
-        g.call(axis_);
-        if (!tick) tick = d3.select(g.node().appendChild(g.selectAll("text").node().cloneNode(true)))
-          .style("display", "none")
-          .text(null);
-      });
-
-      context.on("focus.axis-" + id, function (i) {
-        if (tick) {
-          if (i == null) {
-            tick.style("display", "none");
-            g.selectAll("text").style("fill-opacity", null);
-          } else {
-            tick.style("display", null).attr("x", i).text(format(scale.invert(i)));
-            var dx = tick.node().getComputedTextLength() + 6;
-            g.selectAll("text").style("fill-opacity", function (d) {
-              return Math.abs(scale(d) - i) < dx ? 0 : 1;
-            });
-          }
-        }
-      });
-    }
-
-    axis.remove = function (selection) {
-
-      selection.selectAll("svg")
-        .each(remove)
-        .remove();
-
-      function remove(d) {
-        context.on("change.axis-" + d.id, null);
-        context.on("focus.axis-" + d.id, null);
-      }
-    };
-
-    return d3.rebind(axis, axis_,
-      "orient",
-      "ticks",
-      "tickSubdivide",
-      "tickSize",
-      "tickPadding",
-      "tickFormat");
-  };
-
-  var cubism_axisFormatSeconds = d3.time.format("%I:%M:%S %p"),
-    cubism_axisFormatMinutes = d3.time.format("%I:%M %p"),
-    cubism_axisFormatDays = d3.time.format("%B %d");
-  cubism_contextPrototype.rule = function () {
-    var context = this,
-      metric = cubism_identity;
-
-    function rule(selection) {
-      var id = ++cubism_id;
-
-      var line = selection.append("div")
-        .datum({id:id})
-        .attr("class", "line")
-        .call(cubism_ruleStyle);
-
-      selection.each(function (d, i) {
-        var that = this,
-          id = ++cubism_id,
-          metric_ = typeof metric === "function" ? metric.call(that, d, i) : metric;
-
-        if (!metric_) return;
-
-        function change(start, stop) {
-          var values = [];
-
-          for (var i = 0, n = context.size(); i < n; ++i) {
-            if (metric_.valueAt(i)) {
-              values.push(i);
-            }
-          }
-
-          var lines = selection.selectAll(".metric").data(values);
-          lines.exit().remove();
-          lines.enter().append("div").attr("class", "metric line").call(cubism_ruleStyle);
-          lines.style("left", cubism_ruleLeft);
-        }
-
-        context.on("change.rule-" + id, change);
-        metric_.on("change.rule-" + id, change);
-      });
-
-      context.on("focus.rule-" + id, function (i) {
-        line.datum(i)
-          .style("display", i == null ? "none" : null)
-          .style("left", cubism_ruleLeft);
-      });
-    }
-
-    rule.remove = function (selection) {
-
-      selection.selectAll(".line")
-        .each(remove)
-        .remove();
-
-      function remove(d) {
-        context.on("focus.rule-" + d.id, null);
-      }
-    };
-
-    rule.metric = function (_) {
-      if (!arguments.length) return metric;
-      metric = _;
-      return rule;
-    };
-
-    return rule;
-  };
-
-  function cubism_ruleStyle(line) {
-    line
-      .style("position", "absolute")
-      .style("top", 0)
-      .style("bottom", 0)
-      .style("width", "1px")
-      .style("pointer-events", "none");
-  }
-
-  function cubism_ruleLeft(i) {
-    return i + "px";
-  }
-})(this);
diff --git a/branch-1.2/ambari-web/vendor/scripts/d3.v2.js b/branch-1.2/ambari-web/vendor/scripts/d3.v2.js
deleted file mode 100644
index 714656b..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/d3.v2.js
+++ /dev/null
@@ -1,7033 +0,0 @@
-(function() {
-  function d3_class(ctor, properties) {
-    try {
-      for (var key in properties) {
-        Object.defineProperty(ctor.prototype, key, {
-          value: properties[key],
-          enumerable: false
-        });
-      }
-    } catch (e) {
-      ctor.prototype = properties;
-    }
-  }
-  function d3_arrayCopy(pseudoarray) {
-    var i = -1, n = pseudoarray.length, array = [];
-    while (++i < n) array.push(pseudoarray[i]);
-    return array;
-  }
-  function d3_arraySlice(pseudoarray) {
-    return Array.prototype.slice.call(pseudoarray);
-  }
-  function d3_Map() {}
-  function d3_identity(d) {
-    return d;
-  }
-  function d3_this() {
-    return this;
-  }
-  function d3_true() {
-    return true;
-  }
-  function d3_functor(v) {
-    return typeof v === "function" ? v : function() {
-      return v;
-    };
-  }
-  function d3_rebind(target, source, method) {
-    return function() {
-      var value = method.apply(source, arguments);
-      return arguments.length ? target : value;
-    };
-  }
-  function d3_number(x) {
-    return x != null && !isNaN(x);
-  }
-  function d3_zipLength(d) {
-    return d.length;
-  }
-  function d3_splitter(d) {
-    return d == null;
-  }
-  function d3_collapse(s) {
-    return s.trim().replace(/\s+/g, " ");
-  }
-  function d3_range_integerScale(x) {
-    var k = 1;
-    while (x * k % 1) k *= 10;
-    return k;
-  }
-  function d3_dispatch() {}
-  function d3_dispatch_event(dispatch) {
-    function event() {
-      var z = listeners, i = -1, n = z.length, l;
-      while (++i < n) if (l = z[i].on) l.apply(this, arguments);
-      return dispatch;
-    }
-    var listeners = [], listenerByName = new d3_Map;
-    event.on = function(name, listener) {
-      var l = listenerByName.get(name), i;
-      if (arguments.length < 2) return l && l.on;
-      if (l) {
-        l.on = null;
-        listeners = listeners.slice(0, i = listeners.indexOf(l)).concat(listeners.slice(i + 1));
-        listenerByName.remove(name);
-      }
-      if (listener) listeners.push(listenerByName.set(name, {
-        on: listener
-      }));
-      return dispatch;
-    };
-    return event;
-  }
-  function d3_format_precision(x, p) {
-    return p - (x ? 1 + Math.floor(Math.log(x + Math.pow(10, 1 + Math.floor(Math.log(x) / Math.LN10) - p)) / Math.LN10) : 1);
-  }
-  function d3_format_typeDefault(x) {
-    return x + "";
-  }
-  function d3_format_group(value) {
-    var i = value.lastIndexOf("."), f = i >= 0 ? value.substring(i) : (i = value.length, ""), t = [];
-    while (i > 0) t.push(value.substring(i -= 3, i + 3));
-    return t.reverse().join(",") + f;
-  }
-  function d3_formatPrefix(d, i) {
-    var k = Math.pow(10, Math.abs(8 - i) * 3);
-    return {
-      scale: i > 8 ? function(d) {
-        return d / k;
-      } : function(d) {
-        return d * k;
-      },
-      symbol: d
-    };
-  }
-  function d3_ease_clamp(f) {
-    return function(t) {
-      return t <= 0 ? 0 : t >= 1 ? 1 : f(t);
-    };
-  }
-  function d3_ease_reverse(f) {
-    return function(t) {
-      return 1 - f(1 - t);
-    };
-  }
-  function d3_ease_reflect(f) {
-    return function(t) {
-      return .5 * (t < .5 ? f(2 * t) : 2 - f(2 - 2 * t));
-    };
-  }
-  function d3_ease_identity(t) {
-    return t;
-  }
-  function d3_ease_poly(e) {
-    return function(t) {
-      return Math.pow(t, e);
-    };
-  }
-  function d3_ease_sin(t) {
-    return 1 - Math.cos(t * Math.PI / 2);
-  }
-  function d3_ease_exp(t) {
-    return Math.pow(2, 10 * (t - 1));
-  }
-  function d3_ease_circle(t) {
-    return 1 - Math.sqrt(1 - t * t);
-  }
-  function d3_ease_elastic(a, p) {
-    var s;
-    if (arguments.length < 2) p = .45;
-    if (arguments.length < 1) {
-      a = 1;
-      s = p / 4;
-    } else s = p / (2 * Math.PI) * Math.asin(1 / a);
-    return function(t) {
-      return 1 + a * Math.pow(2, 10 * -t) * Math.sin((t - s) * 2 * Math.PI / p);
-    };
-  }
-  function d3_ease_back(s) {
-    if (!s) s = 1.70158;
-    return function(t) {
-      return t * t * ((s + 1) * t - s);
-    };
-  }
-  function d3_ease_bounce(t) {
-    return t < 1 / 2.75 ? 7.5625 * t * t : t < 2 / 2.75 ? 7.5625 * (t -= 1.5 / 2.75) * t + .75 : t < 2.5 / 2.75 ? 7.5625 * (t -= 2.25 / 2.75) * t + .9375 : 7.5625 * (t -= 2.625 / 2.75) * t + .984375;
-  }
-  function d3_eventCancel() {
-    d3.event.stopPropagation();
-    d3.event.preventDefault();
-  }
-  function d3_eventSource() {
-    var e = d3.event, s;
-    while (s = e.sourceEvent) e = s;
-    return e;
-  }
-  function d3_eventDispatch(target) {
-    var dispatch = new d3_dispatch, i = 0, n = arguments.length;
-    while (++i < n) dispatch[arguments[i]] = d3_dispatch_event(dispatch);
-    dispatch.of = function(thiz, argumentz) {
-      return function(e1) {
-        try {
-          var e0 = e1.sourceEvent = d3.event;
-          e1.target = target;
-          d3.event = e1;
-          dispatch[e1.type].apply(thiz, argumentz);
-        } finally {
-          d3.event = e0;
-        }
-      };
-    };
-    return dispatch;
-  }
-  function d3_transform(m) {
-    var r0 = [ m.a, m.b ], r1 = [ m.c, m.d ], kx = d3_transformNormalize(r0), kz = d3_transformDot(r0, r1), ky = d3_transformNormalize(d3_transformCombine(r1, r0, -kz)) || 0;
-    if (r0[0] * r1[1] < r1[0] * r0[1]) {
-      r0[0] *= -1;
-      r0[1] *= -1;
-      kx *= -1;
-      kz *= -1;
-    }
-    this.rotate = (kx ? Math.atan2(r0[1], r0[0]) : Math.atan2(-r1[0], r1[1])) * d3_transformDegrees;
-    this.translate = [ m.e, m.f ];
-    this.scale = [ kx, ky ];
-    this.skew = ky ? Math.atan2(kz, ky) * d3_transformDegrees : 0;
-  }
-  function d3_transformDot(a, b) {
-    return a[0] * b[0] + a[1] * b[1];
-  }
-  function d3_transformNormalize(a) {
-    var k = Math.sqrt(d3_transformDot(a, a));
-    if (k) {
-      a[0] /= k;
-      a[1] /= k;
-    }
-    return k;
-  }
-  function d3_transformCombine(a, b, k) {
-    a[0] += k * b[0];
-    a[1] += k * b[1];
-    return a;
-  }
-  function d3_interpolateByName(name) {
-    return name == "transform" ? d3.interpolateTransform : d3.interpolate;
-  }
-  function d3_uninterpolateNumber(a, b) {
-    b = b - (a = +a) ? 1 / (b - a) : 0;
-    return function(x) {
-      return (x - a) * b;
-    };
-  }
-  function d3_uninterpolateClamp(a, b) {
-    b = b - (a = +a) ? 1 / (b - a) : 0;
-    return function(x) {
-      return Math.max(0, Math.min(1, (x - a) * b));
-    };
-  }
-  function d3_rgb(r, g, b) {
-    return new d3_Rgb(r, g, b);
-  }
-  function d3_Rgb(r, g, b) {
-    this.r = r;
-    this.g = g;
-    this.b = b;
-  }
-  function d3_rgb_hex(v) {
-    return v < 16 ? "0" + Math.max(0, v).toString(16) : Math.min(255, v).toString(16);
-  }
-  function d3_rgb_parse(format, rgb, hsl) {
-    var r = 0, g = 0, b = 0, m1, m2, name;
-    m1 = /([a-z]+)\((.*)\)/i.exec(format);
-    if (m1) {
-      m2 = m1[2].split(",");
-      switch (m1[1]) {
-       case "hsl":
-        {
-          return hsl(parseFloat(m2[0]), parseFloat(m2[1]) / 100, parseFloat(m2[2]) / 100);
-        }
-       case "rgb":
-        {
-          return rgb(d3_rgb_parseNumber(m2[0]), d3_rgb_parseNumber(m2[1]), d3_rgb_parseNumber(m2[2]));
-        }
-      }
-    }
-    if (name = d3_rgb_names.get(format)) return rgb(name.r, name.g, name.b);
-    if (format != null && format.charAt(0) === "#") {
-      if (format.length === 4) {
-        r = format.charAt(1);
-        r += r;
-        g = format.charAt(2);
-        g += g;
-        b = format.charAt(3);
-        b += b;
-      } else if (format.length === 7) {
-        r = format.substring(1, 3);
-        g = format.substring(3, 5);
-        b = format.substring(5, 7);
-      }
-      r = parseInt(r, 16);
-      g = parseInt(g, 16);
-      b = parseInt(b, 16);
-    }
-    return rgb(r, g, b);
-  }
-  function d3_rgb_hsl(r, g, b) {
-    var min = Math.min(r /= 255, g /= 255, b /= 255), max = Math.max(r, g, b), d = max - min, h, s, l = (max + min) / 2;
-    if (d) {
-      s = l < .5 ? d / (max + min) : d / (2 - max - min);
-      if (r == max) h = (g - b) / d + (g < b ? 6 : 0); else if (g == max) h = (b - r) / d + 2; else h = (r - g) / d + 4;
-      h *= 60;
-    } else {
-      s = h = 0;
-    }
-    return d3_hsl(h, s, l);
-  }
-  function d3_rgb_lab(r, g, b) {
-    r = d3_rgb_xyz(r);
-    g = d3_rgb_xyz(g);
-    b = d3_rgb_xyz(b);
-    var x = d3_xyz_lab((.4124564 * r + .3575761 * g + .1804375 * b) / d3_lab_X), y = d3_xyz_lab((.2126729 * r + .7151522 * g + .072175 * b) / d3_lab_Y), z = d3_xyz_lab((.0193339 * r + .119192 * g + .9503041 * b) / d3_lab_Z);
-    return d3_lab(116 * y - 16, 500 * (x - y), 200 * (y - z));
-  }
-  function d3_rgb_xyz(r) {
-    return (r /= 255) <= .04045 ? r / 12.92 : Math.pow((r + .055) / 1.055, 2.4);
-  }
-  function d3_rgb_parseNumber(c) {
-    var f = parseFloat(c);
-    return c.charAt(c.length - 1) === "%" ? Math.round(f * 2.55) : f;
-  }
-  function d3_hsl(h, s, l) {
-    return new d3_Hsl(h, s, l);
-  }
-  function d3_Hsl(h, s, l) {
-    this.h = h;
-    this.s = s;
-    this.l = l;
-  }
-  function d3_hsl_rgb(h, s, l) {
-    function v(h) {
-      if (h > 360) h -= 360; else if (h < 0) h += 360;
-      if (h < 60) return m1 + (m2 - m1) * h / 60;
-      if (h < 180) return m2;
-      if (h < 240) return m1 + (m2 - m1) * (240 - h) / 60;
-      return m1;
-    }
-    function vv(h) {
-      return Math.round(v(h) * 255);
-    }
-    var m1, m2;
-    h = h % 360;
-    if (h < 0) h += 360;
-    s = s < 0 ? 0 : s > 1 ? 1 : s;
-    l = l < 0 ? 0 : l > 1 ? 1 : l;
-    m2 = l <= .5 ? l * (1 + s) : l + s - l * s;
-    m1 = 2 * l - m2;
-    return d3_rgb(vv(h + 120), vv(h), vv(h - 120));
-  }
-  function d3_hcl(h, c, l) {
-    return new d3_Hcl(h, c, l);
-  }
-  function d3_Hcl(h, c, l) {
-    this.h = h;
-    this.c = c;
-    this.l = l;
-  }
-  function d3_hcl_lab(h, c, l) {
-    return d3_lab(l, Math.cos(h *= Math.PI / 180) * c, Math.sin(h) * c);
-  }
-  function d3_lab(l, a, b) {
-    return new d3_Lab(l, a, b);
-  }
-  function d3_Lab(l, a, b) {
-    this.l = l;
-    this.a = a;
-    this.b = b;
-  }
-  function d3_lab_rgb(l, a, b) {
-    var y = (l + 16) / 116, x = y + a / 500, z = y - b / 200;
-    x = d3_lab_xyz(x) * d3_lab_X;
-    y = d3_lab_xyz(y) * d3_lab_Y;
-    z = d3_lab_xyz(z) * d3_lab_Z;
-    return d3_rgb(d3_xyz_rgb(3.2404542 * x - 1.5371385 * y - .4985314 * z), d3_xyz_rgb(-.969266 * x + 1.8760108 * y + .041556 * z), d3_xyz_rgb(.0556434 * x - .2040259 * y + 1.0572252 * z));
-  }
-  function d3_lab_hcl(l, a, b) {
-    return d3_hcl(Math.atan2(b, a) / Math.PI * 180, Math.sqrt(a * a + b * b), l);
-  }
-  function d3_lab_xyz(x) {
-    return x > .206893034 ? x * x * x : (x - 4 / 29) / 7.787037;
-  }
-  function d3_xyz_lab(x) {
-    return x > .008856 ? Math.pow(x, 1 / 3) : 7.787037 * x + 4 / 29;
-  }
-  function d3_xyz_rgb(r) {
-    return Math.round(255 * (r <= .00304 ? 12.92 * r : 1.055 * Math.pow(r, 1 / 2.4) - .055));
-  }
-  function d3_selection(groups) {
-    d3_arraySubclass(groups, d3_selectionPrototype);
-    return groups;
-  }
-  function d3_selection_selector(selector) {
-    return function() {
-      return d3_select(selector, this);
-    };
-  }
-  function d3_selection_selectorAll(selector) {
-    return function() {
-      return d3_selectAll(selector, this);
-    };
-  }
-  function d3_selection_attr(name, value) {
-    function attrNull() {
-      this.removeAttribute(name);
-    }
-    function attrNullNS() {
-      this.removeAttributeNS(name.space, name.local);
-    }
-    function attrConstant() {
-      this.setAttribute(name, value);
-    }
-    function attrConstantNS() {
-      this.setAttributeNS(name.space, name.local, value);
-    }
-    function attrFunction() {
-      var x = value.apply(this, arguments);
-      if (x == null) this.removeAttribute(name); else this.setAttribute(name, x);
-    }
-    function attrFunctionNS() {
-      var x = value.apply(this, arguments);
-      if (x == null) this.removeAttributeNS(name.space, name.local); else this.setAttributeNS(name.space, name.local, x);
-    }
-    name = d3.ns.qualify(name);
-    return value == null ? name.local ? attrNullNS : attrNull : typeof value === "function" ? name.local ? attrFunctionNS : attrFunction : name.local ? attrConstantNS : attrConstant;
-  }
-  function d3_selection_classedRe(name) {
-    return new RegExp("(?:^|\\s+)" + d3.requote(name) + "(?:\\s+|$)", "g");
-  }
-  function d3_selection_classed(name, value) {
-    function classedConstant() {
-      var i = -1;
-      while (++i < n) name[i](this, value);
-    }
-    function classedFunction() {
-      var i = -1, x = value.apply(this, arguments);
-      while (++i < n) name[i](this, x);
-    }
-    name = name.trim().split(/\s+/).map(d3_selection_classedName);
-    var n = name.length;
-    return typeof value === "function" ? classedFunction : classedConstant;
-  }
-  function d3_selection_classedName(name) {
-    var re = d3_selection_classedRe(name);
-    return function(node, value) {
-      if (c = node.classList) return value ? c.add(name) : c.remove(name);
-      var c = node.className, cb = c.baseVal != null, cv = cb ? c.baseVal : c;
-      if (value) {
-        re.lastIndex = 0;
-        if (!re.test(cv)) {
-          cv = d3_collapse(cv + " " + name);
-          if (cb) c.baseVal = cv; else node.className = cv;
-        }
-      } else if (cv) {
-        cv = d3_collapse(cv.replace(re, " "));
-        if (cb) c.baseVal = cv; else node.className = cv;
-      }
-    };
-  }
-  function d3_selection_style(name, value, priority) {
-    function styleNull() {
-      this.style.removeProperty(name);
-    }
-    function styleConstant() {
-      this.style.setProperty(name, value, priority);
-    }
-    function styleFunction() {
-      var x = value.apply(this, arguments);
-      if (x == null) this.style.removeProperty(name); else this.style.setProperty(name, x, priority);
-    }
-    return value == null ? styleNull : typeof value === "function" ? styleFunction : styleConstant;
-  }
-  function d3_selection_property(name, value) {
-    function propertyNull() {
-      delete this[name];
-    }
-    function propertyConstant() {
-      this[name] = value;
-    }
-    function propertyFunction() {
-      var x = value.apply(this, arguments);
-      if (x == null) delete this[name]; else this[name] = x;
-    }
-    return value == null ? propertyNull : typeof value === "function" ? propertyFunction : propertyConstant;
-  }
-  function d3_selection_dataNode(data) {
-    return {
-      __data__: data
-    };
-  }
-  function d3_selection_filter(selector) {
-    return function() {
-      return d3_selectMatches(this, selector);
-    };
-  }
-  function d3_selection_sortComparator(comparator) {
-    if (!arguments.length) comparator = d3.ascending;
-    return function(a, b) {
-      return comparator(a && a.__data__, b && b.__data__);
-    };
-  }
-  function d3_selection_on(type, listener, capture) {
-    function onRemove() {
-      var wrapper = this[name];
-      if (wrapper) {
-        this.removeEventListener(type, wrapper, wrapper.$);
-        delete this[name];
-      }
-    }
-    function onAdd() {
-      function wrapper(e) {
-        var o = d3.event;
-        d3.event = e;
-        args[0] = node.__data__;
-        try {
-          listener.apply(node, args);
-        } finally {
-          d3.event = o;
-        }
-      }
-      var node = this, args = arguments;
-      onRemove.call(this);
-      this.addEventListener(type, this[name] = wrapper, wrapper.$ = capture);
-      wrapper._ = listener;
-    }
-    var name = "__on" + type, i = type.indexOf(".");
-    if (i > 0) type = type.substring(0, i);
-    return listener ? onAdd : onRemove;
-  }
-  function d3_selection_each(groups, callback) {
-    for (var j = 0, m = groups.length; j < m; j++) {
-      for (var group = groups[j], i = 0, n = group.length, node; i < n; i++) {
-        if (node = group[i]) callback(node, i, j);
-      }
-    }
-    return groups;
-  }
-  function d3_selection_enter(selection) {
-    d3_arraySubclass(selection, d3_selection_enterPrototype);
-    return selection;
-  }
-  function d3_transition(groups, id, time) {
-    d3_arraySubclass(groups, d3_transitionPrototype);
-    var tweens = new d3_Map, event = d3.dispatch("start", "end"), ease = d3_transitionEase;
-    groups.id = id;
-    groups.time = time;
-    groups.tween = function(name, tween) {
-      if (arguments.length < 2) return tweens.get(name);
-      if (tween == null) tweens.remove(name); else tweens.set(name, tween);
-      return groups;
-    };
-    groups.ease = function(value) {
-      if (!arguments.length) return ease;
-      ease = typeof value === "function" ? value : d3.ease.apply(d3, arguments);
-      return groups;
-    };
-    groups.each = function(type, listener) {
-      if (arguments.length < 2) return d3_transition_each.call(groups, type);
-      event.on(type, listener);
-      return groups;
-    };
-    d3.timer(function(elapsed) {
-      return d3_selection_each(groups, function(node, i, j) {
-        function start(elapsed) {
-          if (lock.active > id) return stop();
-          lock.active = id;
-          tweens.forEach(function(key, value) {
-            if (value = value.call(node, d, i)) {
-              tweened.push(value);
-            }
-          });
-          event.start.call(node, d, i);
-          if (!tick(elapsed)) d3.timer(tick, 0, time);
-          return 1;
-        }
-        function tick(elapsed) {
-          if (lock.active !== id) return stop();
-          var t = (elapsed - delay) / duration, e = ease(t), n = tweened.length;
-          while (n > 0) {
-            tweened[--n].call(node, e);
-          }
-          if (t >= 1) {
-            stop();
-            d3_transitionId = id;
-            event.end.call(node, d, i);
-            d3_transitionId = 0;
-            return 1;
-          }
-        }
-        function stop() {
-          if (!--lock.count) delete node.__transition__;
-          return 1;
-        }
-        var tweened = [], delay = node.delay, duration = node.duration, lock = (node = node.node).__transition__ || (node.__transition__ = {
-          active: 0,
-          count: 0
-        }), d = node.__data__;
-        ++lock.count;
-        delay <= elapsed ? start(elapsed) : d3.timer(start, delay, time);
-      });
-    }, 0, time);
-    return groups;
-  }
-  function d3_transition_each(callback) {
-    var id = d3_transitionId, ease = d3_transitionEase, delay = d3_transitionDelay, duration = d3_transitionDuration;
-    d3_transitionId = this.id;
-    d3_transitionEase = this.ease();
-    d3_selection_each(this, function(node, i, j) {
-      d3_transitionDelay = node.delay;
-      d3_transitionDuration = node.duration;
-      callback.call(node = node.node, node.__data__, i, j);
-    });
-    d3_transitionId = id;
-    d3_transitionEase = ease;
-    d3_transitionDelay = delay;
-    d3_transitionDuration = duration;
-    return this;
-  }
-  function d3_tweenNull(d, i, a) {
-    return a != "" && d3_tweenRemove;
-  }
-  function d3_tweenByName(b, name) {
-    return d3.tween(b, d3_interpolateByName(name));
-  }
-  function d3_timer_step() {
-    var elapsed, now = Date.now(), t1 = d3_timer_queue;
-    while (t1) {
-      elapsed = now - t1.then;
-      if (elapsed >= t1.delay) t1.flush = t1.callback(elapsed);
-      t1 = t1.next;
-    }
-    var delay = d3_timer_flush() - now;
-    if (delay > 24) {
-      if (isFinite(delay)) {
-        clearTimeout(d3_timer_timeout);
-        d3_timer_timeout = setTimeout(d3_timer_step, delay);
-      }
-      d3_timer_interval = 0;
-    } else {
-      d3_timer_interval = 1;
-      d3_timer_frame(d3_timer_step);
-    }
-  }
-  function d3_timer_flush() {
-    var t0 = null, t1 = d3_timer_queue, then = Infinity;
-    while (t1) {
-      if (t1.flush) {
-        t1 = t0 ? t0.next = t1.next : d3_timer_queue = t1.next;
-      } else {
-        then = Math.min(then, t1.then + t1.delay);
-        t1 = (t0 = t1).next;
-      }
-    }
-    return then;
-  }
-  function d3_mousePoint(container, e) {
-    var svg = container.ownerSVGElement || container;
-    if (svg.createSVGPoint) {
-      var point = svg.createSVGPoint();
-      if (d3_mouse_bug44083 < 0 && (window.scrollX || window.scrollY)) {
-        svg = d3.select(document.body).append("svg").style("position", "absolute").style("top", 0).style("left", 0);
-        var ctm = svg[0][0].getScreenCTM();
-        d3_mouse_bug44083 = !(ctm.f || ctm.e);
-        svg.remove();
-      }
-      if (d3_mouse_bug44083) {
-        point.x = e.pageX;
-        point.y = e.pageY;
-      } else {
-        point.x = e.clientX;
-        point.y = e.clientY;
-      }
-      point = point.matrixTransform(container.getScreenCTM().inverse());
-      return [ point.x, point.y ];
-    }
-    var rect = container.getBoundingClientRect();
-    return [ e.clientX - rect.left - container.clientLeft, e.clientY - rect.top - container.clientTop ];
-  }
-  function d3_noop() {}
-  function d3_scaleExtent(domain) {
-    var start = domain[0], stop = domain[domain.length - 1];
-    return start < stop ? [ start, stop ] : [ stop, start ];
-  }
-  function d3_scaleRange(scale) {
-    return scale.rangeExtent ? scale.rangeExtent() : d3_scaleExtent(scale.range());
-  }
-  function d3_scale_nice(domain, nice) {
-    var i0 = 0, i1 = domain.length - 1, x0 = domain[i0], x1 = domain[i1], dx;
-    if (x1 < x0) {
-      dx = i0, i0 = i1, i1 = dx;
-      dx = x0, x0 = x1, x1 = dx;
-    }
-    if (nice = nice(x1 - x0)) {
-      domain[i0] = nice.floor(x0);
-      domain[i1] = nice.ceil(x1);
-    }
-    return domain;
-  }
-  function d3_scale_niceDefault() {
-    return Math;
-  }
-  function d3_scale_linear(domain, range, interpolate, clamp) {
-    function rescale() {
-      var linear = Math.min(domain.length, range.length) > 2 ? d3_scale_polylinear : d3_scale_bilinear, uninterpolate = clamp ? d3_uninterpolateClamp : d3_uninterpolateNumber;
-      output = linear(domain, range, uninterpolate, interpolate);
-      input = linear(range, domain, uninterpolate, d3.interpolate);
-      return scale;
-    }
-    function scale(x) {
-      return output(x);
-    }
-    var output, input;
-    scale.invert = function(y) {
-      return input(y);
-    };
-    scale.domain = function(x) {
-      if (!arguments.length) return domain;
-      domain = x.map(Number);
-      return rescale();
-    };
-    scale.range = function(x) {
-      if (!arguments.length) return range;
-      range = x;
-      return rescale();
-    };
-    scale.rangeRound = function(x) {
-      return scale.range(x).interpolate(d3.interpolateRound);
-    };
-    scale.clamp = function(x) {
-      if (!arguments.length) return clamp;
-      clamp = x;
-      return rescale();
-    };
-    scale.interpolate = function(x) {
-      if (!arguments.length) return interpolate;
-      interpolate = x;
-      return rescale();
-    };
-    scale.ticks = function(m) {
-      return d3_scale_linearTicks(domain, m);
-    };
-    scale.tickFormat = function(m) {
-      return d3_scale_linearTickFormat(domain, m);
-    };
-    scale.nice = function() {
-      d3_scale_nice(domain, d3_scale_linearNice);
-      return rescale();
-    };
-    scale.copy = function() {
-      return d3_scale_linear(domain, range, interpolate, clamp);
-    };
-    return rescale();
-  }
-  function d3_scale_linearRebind(scale, linear) {
-    return d3.rebind(scale, linear, "range", "rangeRound", "interpolate", "clamp");
-  }
-  function d3_scale_linearNice(dx) {
-    dx = Math.pow(10, Math.round(Math.log(dx) / Math.LN10) - 1);
-    return dx && {
-      floor: function(x) {
-        return Math.floor(x / dx) * dx;
-      },
-      ceil: function(x) {
-        return Math.ceil(x / dx) * dx;
-      }
-    };
-  }
-  function d3_scale_linearTickRange(domain, m) {
-    var extent = d3_scaleExtent(domain), span = extent[1] - extent[0], step = Math.pow(10, Math.floor(Math.log(span / m) / Math.LN10)), err = m / span * step;
-    if (err <= .15) step *= 10; else if (err <= .35) step *= 5; else if (err <= .75) step *= 2;
-    extent[0] = Math.ceil(extent[0] / step) * step;
-    extent[1] = Math.floor(extent[1] / step) * step + step * .5;
-    extent[2] = step;
-    return extent;
-  }
-  function d3_scale_linearTicks(domain, m) {
-    return d3.range.apply(d3, d3_scale_linearTickRange(domain, m));
-  }
-  function d3_scale_linearTickFormat(domain, m) {
-    return d3.format(",." + Math.max(0, -Math.floor(Math.log(d3_scale_linearTickRange(domain, m)[2]) / Math.LN10 + .01)) + "f");
-  }
-  function d3_scale_bilinear(domain, range, uninterpolate, interpolate) {
-    var u = uninterpolate(domain[0], domain[1]), i = interpolate(range[0], range[1]);
-    return function(x) {
-      return i(u(x));
-    };
-  }
-  function d3_scale_polylinear(domain, range, uninterpolate, interpolate) {
-    var u = [], i = [], j = 0, k = Math.min(domain.length, range.length) - 1;
-    if (domain[k] < domain[0]) {
-      domain = domain.slice().reverse();
-      range = range.slice().reverse();
-    }
-    while (++j <= k) {
-      u.push(uninterpolate(domain[j - 1], domain[j]));
-      i.push(interpolate(range[j - 1], range[j]));
-    }
-    return function(x) {
-      var j = d3.bisect(domain, x, 1, k) - 1;
-      return i[j](u[j](x));
-    };
-  }
-  function d3_scale_log(linear, log) {
-    function scale(x) {
-      return linear(log(x));
-    }
-    var pow = log.pow;
-    scale.invert = function(x) {
-      return pow(linear.invert(x));
-    };
-    scale.domain = function(x) {
-      if (!arguments.length) return linear.domain().map(pow);
-      log = x[0] < 0 ? d3_scale_logn : d3_scale_logp;
-      pow = log.pow;
-      linear.domain(x.map(log));
-      return scale;
-    };
-    scale.nice = function() {
-      linear.domain(d3_scale_nice(linear.domain(), d3_scale_niceDefault));
-      return scale;
-    };
-    scale.ticks = function() {
-      var extent = d3_scaleExtent(linear.domain()), ticks = [];
-      if (extent.every(isFinite)) {
-        var i = Math.floor(extent[0]), j = Math.ceil(extent[1]), u = pow(extent[0]), v = pow(extent[1]);
-        if (log === d3_scale_logn) {
-          ticks.push(pow(i));
-          for (; i++ < j; ) for (var k = 9; k > 0; k--) ticks.push(pow(i) * k);
-        } else {
-          for (; i < j; i++) for (var k = 1; k < 10; k++) ticks.push(pow(i) * k);
-          ticks.push(pow(i));
-        }
-        for (i = 0; ticks[i] < u; i++) {}
-        for (j = ticks.length; ticks[j - 1] > v; j--) {}
-        ticks = ticks.slice(i, j);
-      }
-      return ticks;
-    };
-    scale.tickFormat = function(n, format) {
-      if (arguments.length < 2) format = d3_scale_logFormat;
-      if (arguments.length < 1) return format;
-      var k = Math.max(.1, n / scale.ticks().length), f = log === d3_scale_logn ? (e = -1e-12, Math.floor) : (e = 1e-12, Math.ceil), e;
-      return function(d) {
-        return d / pow(f(log(d) + e)) <= k ? format(d) : "";
-      };
-    };
-    scale.copy = function() {
-      return d3_scale_log(linear.copy(), log);
-    };
-    return d3_scale_linearRebind(scale, linear);
-  }
-  function d3_scale_logp(x) {
-    return Math.log(x < 0 ? 0 : x) / Math.LN10;
-  }
-  function d3_scale_logn(x) {
-    return -Math.log(x > 0 ? 0 : -x) / Math.LN10;
-  }
-  function d3_scale_pow(linear, exponent) {
-    function scale(x) {
-      return linear(powp(x));
-    }
-    var powp = d3_scale_powPow(exponent), powb = d3_scale_powPow(1 / exponent);
-    scale.invert = function(x) {
-      return powb(linear.invert(x));
-    };
-    scale.domain = function(x) {
-      if (!arguments.length) return linear.domain().map(powb);
-      linear.domain(x.map(powp));
-      return scale;
-    };
-    scale.ticks = function(m) {
-      return d3_scale_linearTicks(scale.domain(), m);
-    };
-    scale.tickFormat = function(m) {
-      return d3_scale_linearTickFormat(scale.domain(), m);
-    };
-    scale.nice = function() {
-      return scale.domain(d3_scale_nice(scale.domain(), d3_scale_linearNice));
-    };
-    scale.exponent = function(x) {
-      if (!arguments.length) return exponent;
-      var domain = scale.domain();
-      powp = d3_scale_powPow(exponent = x);
-      powb = d3_scale_powPow(1 / exponent);
-      return scale.domain(domain);
-    };
-    scale.copy = function() {
-      return d3_scale_pow(linear.copy(), exponent);
-    };
-    return d3_scale_linearRebind(scale, linear);
-  }
-  function d3_scale_powPow(e) {
-    return function(x) {
-      return x < 0 ? -Math.pow(-x, e) : Math.pow(x, e);
-    };
-  }
-  function d3_scale_ordinal(domain, ranger) {
-    function scale(x) {
-      return range[((index.get(x) || index.set(x, domain.push(x))) - 1) % range.length];
-    }
-    function steps(start, step) {
-      return d3.range(domain.length).map(function(i) {
-        return start + step * i;
-      });
-    }
-    var index, range, rangeBand;
-    scale.domain = function(x) {
-      if (!arguments.length) return domain;
-      domain = [];
-      index = new d3_Map;
-      var i = -1, n = x.length, xi;
-      while (++i < n) if (!index.has(xi = x[i])) index.set(xi, domain.push(xi));
-      return scale[ranger.t].apply(scale, ranger.a);
-    };
-    scale.range = function(x) {
-      if (!arguments.length) return range;
-      range = x;
-      rangeBand = 0;
-      ranger = {
-        t: "range",
-        a: arguments
-      };
-      return scale;
-    };
-    scale.rangePoints = function(x, padding) {
-      if (arguments.length < 2) padding = 0;
-      var start = x[0], stop = x[1], step = (stop - start) / (Math.max(1, domain.length - 1) + padding);
-      range = steps(domain.length < 2 ? (start + stop) / 2 : start + step * padding / 2, step);
-      rangeBand = 0;
-      ranger = {
-        t: "rangePoints",
-        a: arguments
-      };
-      return scale;
-    };
-    scale.rangeBands = function(x, padding, outerPadding) {
-      if (arguments.length < 2) padding = 0;
-      if (arguments.length < 3) outerPadding = padding;
-      var reverse = x[1] < x[0], start = x[reverse - 0], stop = x[1 - reverse], step = (stop - start) / (domain.length - padding + 2 * outerPadding);
-      range = steps(start + step * outerPadding, step);
-      if (reverse) range.reverse();
-      rangeBand = step * (1 - padding);
-      ranger = {
-        t: "rangeBands",
-        a: arguments
-      };
-      return scale;
-    };
-    scale.rangeRoundBands = function(x, padding, outerPadding) {
-      if (arguments.length < 2) padding = 0;
-      if (arguments.length < 3) outerPadding = padding;
-      var reverse = x[1] < x[0], start = x[reverse - 0], stop = x[1 - reverse], step = Math.floor((stop - start) / (domain.length - padding + 2 * outerPadding)), error = stop - start - (domain.length - padding) * step;
-      range = steps(start + Math.round(error / 2), step);
-      if (reverse) range.reverse();
-      rangeBand = Math.round(step * (1 - padding));
-      ranger = {
-        t: "rangeRoundBands",
-        a: arguments
-      };
-      return scale;
-    };
-    scale.rangeBand = function() {
-      return rangeBand;
-    };
-    scale.rangeExtent = function() {
-      return d3_scaleExtent(ranger.a[0]);
-    };
-    scale.copy = function() {
-      return d3_scale_ordinal(domain, ranger);
-    };
-    return scale.domain(domain);
-  }
-  function d3_scale_quantile(domain, range) {
-    function rescale() {
-      var k = 0, n = domain.length, q = range.length;
-      thresholds = [];
-      while (++k < q) thresholds[k - 1] = d3.quantile(domain, k / q);
-      return scale;
-    }
-    function scale(x) {
-      if (isNaN(x = +x)) return NaN;
-      return range[d3.bisect(thresholds, x)];
-    }
-    var thresholds;
-    scale.domain = function(x) {
-      if (!arguments.length) return domain;
-      domain = x.filter(function(d) {
-        return !isNaN(d);
-      }).sort(d3.ascending);
-      return rescale();
-    };
-    scale.range = function(x) {
-      if (!arguments.length) return range;
-      range = x;
-      return rescale();
-    };
-    scale.quantiles = function() {
-      return thresholds;
-    };
-    scale.copy = function() {
-      return d3_scale_quantile(domain, range);
-    };
-    return rescale();
-  }
-  function d3_scale_quantize(x0, x1, range) {
-    function scale(x) {
-      return range[Math.max(0, Math.min(i, Math.floor(kx * (x - x0))))];
-    }
-    function rescale() {
-      kx = range.length / (x1 - x0);
-      i = range.length - 1;
-      return scale;
-    }
-    var kx, i;
-    scale.domain = function(x) {
-      if (!arguments.length) return [ x0, x1 ];
-      x0 = +x[0];
-      x1 = +x[x.length - 1];
-      return rescale();
-    };
-    scale.range = function(x) {
-      if (!arguments.length) return range;
-      range = x;
-      return rescale();
-    };
-    scale.copy = function() {
-      return d3_scale_quantize(x0, x1, range);
-    };
-    return rescale();
-  }
-  function d3_scale_threshold(domain, range) {
-    function scale(x) {
-      return range[d3.bisect(domain, x)];
-    }
-    scale.domain = function(_) {
-      if (!arguments.length) return domain;
-      domain = _;
-      return scale;
-    };
-    scale.range = function(_) {
-      if (!arguments.length) return range;
-      range = _;
-      return scale;
-    };
-    scale.copy = function() {
-      return d3_scale_threshold(domain, range);
-    };
-    return scale;
-  }
-  function d3_scale_identity(domain) {
-    function identity(x) {
-      return +x;
-    }
-    identity.invert = identity;
-    identity.domain = identity.range = function(x) {
-      if (!arguments.length) return domain;
-      domain = x.map(identity);
-      return identity;
-    };
-    identity.ticks = function(m) {
-      return d3_scale_linearTicks(domain, m);
-    };
-    identity.tickFormat = function(m) {
-      return d3_scale_linearTickFormat(domain, m);
-    };
-    identity.copy = function() {
-      return d3_scale_identity(domain);
-    };
-    return identity;
-  }
-  function d3_svg_arcInnerRadius(d) {
-    return d.innerRadius;
-  }
-  function d3_svg_arcOuterRadius(d) {
-    return d.outerRadius;
-  }
-  function d3_svg_arcStartAngle(d) {
-    return d.startAngle;
-  }
-  function d3_svg_arcEndAngle(d) {
-    return d.endAngle;
-  }
-  function d3_svg_line(projection) {
-    function line(data) {
-      function segment() {
-        segments.push("M", interpolate(projection(points), tension));
-      }
-      var segments = [], points = [], i = -1, n = data.length, d, fx = d3_functor(x), fy = d3_functor(y);
-      while (++i < n) {
-        if (defined.call(this, d = data[i], i)) {
-          points.push([ +fx.call(this, d, i), +fy.call(this, d, i) ]);
-        } else if (points.length) {
-          segment();
-          points = [];
-        }
-      }
-      if (points.length) segment();
-      return segments.length ? segments.join("") : null;
-    }
-    var x = d3_svg_lineX, y = d3_svg_lineY, defined = d3_true, interpolate = d3_svg_lineLinear, interpolateKey = interpolate.key, tension = .7;
-    line.x = function(_) {
-      if (!arguments.length) return x;
-      x = _;
-      return line;
-    };
-    line.y = function(_) {
-      if (!arguments.length) return y;
-      y = _;
-      return line;
-    };
-    line.defined = function(_) {
-      if (!arguments.length) return defined;
-      defined = _;
-      return line;
-    };
-    line.interpolate = function(_) {
-      if (!arguments.length) return interpolateKey;
-      if (typeof _ === "function") interpolateKey = interpolate = _; else interpolateKey = (interpolate = d3_svg_lineInterpolators.get(_) || d3_svg_lineLinear).key;
-      return line;
-    };
-    line.tension = function(_) {
-      if (!arguments.length) return tension;
-      tension = _;
-      return line;
-    };
-    return line;
-  }
-  function d3_svg_lineX(d) {
-    return d[0];
-  }
-  function d3_svg_lineY(d) {
-    return d[1];
-  }
-  function d3_svg_lineLinear(points) {
-    return points.join("L");
-  }
-  function d3_svg_lineLinearClosed(points) {
-    return d3_svg_lineLinear(points) + "Z";
-  }
-  function d3_svg_lineStepBefore(points) {
-    var i = 0, n = points.length, p = points[0], path = [ p[0], ",", p[1] ];
-    while (++i < n) path.push("V", (p = points[i])[1], "H", p[0]);
-    return path.join("");
-  }
-  function d3_svg_lineStepAfter(points) {
-    var i = 0, n = points.length, p = points[0], path = [ p[0], ",", p[1] ];
-    while (++i < n) path.push("H", (p = points[i])[0], "V", p[1]);
-    return path.join("");
-  }
-  function d3_svg_lineCardinalOpen(points, tension) {
-    return points.length < 4 ? d3_svg_lineLinear(points) : points[1] + d3_svg_lineHermite(points.slice(1, points.length - 1), d3_svg_lineCardinalTangents(points, tension));
-  }
-  function d3_svg_lineCardinalClosed(points, tension) {
-    return points.length < 3 ? d3_svg_lineLinear(points) : points[0] + d3_svg_lineHermite((points.push(points[0]), points), d3_svg_lineCardinalTangents([ points[points.length - 2] ].concat(points, [ points[1] ]), tension));
-  }
-  function d3_svg_lineCardinal(points, tension, closed) {
-    return points.length < 3 ? d3_svg_lineLinear(points) : points[0] + d3_svg_lineHermite(points, d3_svg_lineCardinalTangents(points, tension));
-  }
-  function d3_svg_lineHermite(points, tangents) {
-    if (tangents.length < 1 || points.length != tangents.length && points.length != tangents.length + 2) {
-      return d3_svg_lineLinear(points);
-    }
-    var quad = points.length != tangents.length, path = "", p0 = points[0], p = points[1], t0 = tangents[0], t = t0, pi = 1;
-    if (quad) {
-      path += "Q" + (p[0] - t0[0] * 2 / 3) + "," + (p[1] - t0[1] * 2 / 3) + "," + p[0] + "," + p[1];
-      p0 = points[1];
-      pi = 2;
-    }
-    if (tangents.length > 1) {
-      t = tangents[1];
-      p = points[pi];
-      pi++;
-      path += "C" + (p0[0] + t0[0]) + "," + (p0[1] + t0[1]) + "," + (p[0] - t[0]) + "," + (p[1] - t[1]) + "," + p[0] + "," + p[1];
-      for (var i = 2; i < tangents.length; i++, pi++) {
-        p = points[pi];
-        t = tangents[i];
-        path += "S" + (p[0] - t[0]) + "," + (p[1] - t[1]) + "," + p[0] + "," + p[1];
-      }
-    }
-    if (quad) {
-      var lp = points[pi];
-      path += "Q" + (p[0] + t[0] * 2 / 3) + "," + (p[1] + t[1] * 2 / 3) + "," + lp[0] + "," + lp[1];
-    }
-    return path;
-  }
-  function d3_svg_lineCardinalTangents(points, tension) {
-    var tangents = [], a = (1 - tension) / 2, p0, p1 = points[0], p2 = points[1], i = 1, n = points.length;
-    while (++i < n) {
-      p0 = p1;
-      p1 = p2;
-      p2 = points[i];
-      tangents.push([ a * (p2[0] - p0[0]), a * (p2[1] - p0[1]) ]);
-    }
-    return tangents;
-  }
-  function d3_svg_lineBasis(points) {
-    if (points.length < 3) return d3_svg_lineLinear(points);
-    var i = 1, n = points.length, pi = points[0], x0 = pi[0], y0 = pi[1], px = [ x0, x0, x0, (pi = points[1])[0] ], py = [ y0, y0, y0, pi[1] ], path = [ x0, ",", y0 ];
-    d3_svg_lineBasisBezier(path, px, py);
-    while (++i < n) {
-      pi = points[i];
-      px.shift();
-      px.push(pi[0]);
-      py.shift();
-      py.push(pi[1]);
-      d3_svg_lineBasisBezier(path, px, py);
-    }
-    i = -1;
-    while (++i < 2) {
-      px.shift();
-      px.push(pi[0]);
-      py.shift();
-      py.push(pi[1]);
-      d3_svg_lineBasisBezier(path, px, py);
-    }
-    return path.join("");
-  }
-  function d3_svg_lineBasisOpen(points) {
-    if (points.length < 4) return d3_svg_lineLinear(points);
-    var path = [], i = -1, n = points.length, pi, px = [ 0 ], py = [ 0 ];
-    while (++i < 3) {
-      pi = points[i];
-      px.push(pi[0]);
-      py.push(pi[1]);
-    }
-    path.push(d3_svg_lineDot4(d3_svg_lineBasisBezier3, px) + "," + d3_svg_lineDot4(d3_svg_lineBasisBezier3, py));
-    --i;
-    while (++i < n) {
-      pi = points[i];
-      px.shift();
-      px.push(pi[0]);
-      py.shift();
-      py.push(pi[1]);
-      d3_svg_lineBasisBezier(path, px, py);
-    }
-    return path.join("");
-  }
-  function d3_svg_lineBasisClosed(points) {
-    var path, i = -1, n = points.length, m = n + 4, pi, px = [], py = [];
-    while (++i < 4) {
-      pi = points[i % n];
-      px.push(pi[0]);
-      py.push(pi[1]);
-    }
-    path = [ d3_svg_lineDot4(d3_svg_lineBasisBezier3, px), ",", d3_svg_lineDot4(d3_svg_lineBasisBezier3, py) ];
-    --i;
-    while (++i < m) {
-      pi = points[i % n];
-      px.shift();
-      px.push(pi[0]);
-      py.shift();
-      py.push(pi[1]);
-      d3_svg_lineBasisBezier(path, px, py);
-    }
-    return path.join("");
-  }
-  function d3_svg_lineBundle(points, tension) {
-    var n = points.length - 1;
-    if (n) {
-      var x0 = points[0][0], y0 = points[0][1], dx = points[n][0] - x0, dy = points[n][1] - y0, i = -1, p, t;
-      while (++i <= n) {
-        p = points[i];
-        t = i / n;
-        p[0] = tension * p[0] + (1 - tension) * (x0 + t * dx);
-        p[1] = tension * p[1] + (1 - tension) * (y0 + t * dy);
-      }
-    }
-    return d3_svg_lineBasis(points);
-  }
-  function d3_svg_lineDot4(a, b) {
-    return a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3];
-  }
-  function d3_svg_lineBasisBezier(path, x, y) {
-    path.push("C", d3_svg_lineDot4(d3_svg_lineBasisBezier1, x), ",", d3_svg_lineDot4(d3_svg_lineBasisBezier1, y), ",", d3_svg_lineDot4(d3_svg_lineBasisBezier2, x), ",", d3_svg_lineDot4(d3_svg_lineBasisBezier2, y), ",", d3_svg_lineDot4(d3_svg_lineBasisBezier3, x), ",", d3_svg_lineDot4(d3_svg_lineBasisBezier3, y));
-  }
-  function d3_svg_lineSlope(p0, p1) {
-    return (p1[1] - p0[1]) / (p1[0] - p0[0]);
-  }
-  function d3_svg_lineFiniteDifferences(points) {
-    var i = 0, j = points.length - 1, m = [], p0 = points[0], p1 = points[1], d = m[0] = d3_svg_lineSlope(p0, p1);
-    while (++i < j) {
-      m[i] = (d + (d = d3_svg_lineSlope(p0 = p1, p1 = points[i + 1]))) / 2;
-    }
-    m[i] = d;
-    return m;
-  }
-  function d3_svg_lineMonotoneTangents(points) {
-    var tangents = [], d, a, b, s, m = d3_svg_lineFiniteDifferences(points), i = -1, j = points.length - 1;
-    while (++i < j) {
-      d = d3_svg_lineSlope(points[i], points[i + 1]);
-      if (Math.abs(d) < 1e-6) {
-        m[i] = m[i + 1] = 0;
-      } else {
-        a = m[i] / d;
-        b = m[i + 1] / d;
-        s = a * a + b * b;
-        if (s > 9) {
-          s = d * 3 / Math.sqrt(s);
-          m[i] = s * a;
-          m[i + 1] = s * b;
-        }
-      }
-    }
-    i = -1;
-    while (++i <= j) {
-      s = (points[Math.min(j, i + 1)][0] - points[Math.max(0, i - 1)][0]) / (6 * (1 + m[i] * m[i]));
-      tangents.push([ s || 0, m[i] * s || 0 ]);
-    }
-    return tangents;
-  }
-  function d3_svg_lineMonotone(points) {
-    return points.length < 3 ? d3_svg_lineLinear(points) : points[0] + d3_svg_lineHermite(points, d3_svg_lineMonotoneTangents(points));
-  }
-  function d3_svg_lineRadial(points) {
-    var point, i = -1, n = points.length, r, a;
-    while (++i < n) {
-      point = points[i];
-      r = point[0];
-      a = point[1] + d3_svg_arcOffset;
-      point[0] = r * Math.cos(a);
-      point[1] = r * Math.sin(a);
-    }
-    return points;
-  }
-  function d3_svg_area(projection) {
-    function area(data) {
-      function segment() {
-        segments.push("M", interpolate(projection(points1), tension), L, interpolateReverse(projection(points0.reverse()), tension), "Z");
-      }
-      var segments = [], points0 = [], points1 = [], i = -1, n = data.length, d, fx0 = d3_functor(x0), fy0 = d3_functor(y0), fx1 = x0 === x1 ? function() {
-        return x;
-      } : d3_functor(x1), fy1 = y0 === y1 ? function() {
-        return y;
-      } : d3_functor(y1), x, y;
-      while (++i < n) {
-        if (defined.call(this, d = data[i], i)) {
-          points0.push([ x = +fx0.call(this, d, i), y = +fy0.call(this, d, i) ]);
-          points1.push([ +fx1.call(this, d, i), +fy1.call(this, d, i) ]);
-        } else if (points0.length) {
-          segment();
-          points0 = [];
-          points1 = [];
-        }
-      }
-      if (points0.length) segment();
-      return segments.length ? segments.join("") : null;
-    }
-    var x0 = d3_svg_lineX, x1 = d3_svg_lineX, y0 = 0, y1 = d3_svg_lineY, defined = d3_true, interpolate = d3_svg_lineLinear, interpolateKey = interpolate.key, interpolateReverse = interpolate, L = "L", tension = .7;
-    area.x = function(_) {
-      if (!arguments.length) return x1;
-      x0 = x1 = _;
-      return area;
-    };
-    area.x0 = function(_) {
-      if (!arguments.length) return x0;
-      x0 = _;
-      return area;
-    };
-    area.x1 = function(_) {
-      if (!arguments.length) return x1;
-      x1 = _;
-      return area;
-    };
-    area.y = function(_) {
-      if (!arguments.length) return y1;
-      y0 = y1 = _;
-      return area;
-    };
-    area.y0 = function(_) {
-      if (!arguments.length) return y0;
-      y0 = _;
-      return area;
-    };
-    area.y1 = function(_) {
-      if (!arguments.length) return y1;
-      y1 = _;
-      return area;
-    };
-    area.defined = function(_) {
-      if (!arguments.length) return defined;
-      defined = _;
-      return area;
-    };
-    area.interpolate = function(_) {
-      if (!arguments.length) return interpolateKey;
-      if (typeof _ === "function") interpolateKey = interpolate = _; else interpolateKey = (interpolate = d3_svg_lineInterpolators.get(_) || d3_svg_lineLinear).key;
-      interpolateReverse = interpolate.reverse || interpolate;
-      L = interpolate.closed ? "M" : "L";
-      return area;
-    };
-    area.tension = function(_) {
-      if (!arguments.length) return tension;
-      tension = _;
-      return area;
-    };
-    return area;
-  }
-  function d3_svg_chordSource(d) {
-    return d.source;
-  }
-  function d3_svg_chordTarget(d) {
-    return d.target;
-  }
-  function d3_svg_chordRadius(d) {
-    return d.radius;
-  }
-  function d3_svg_chordStartAngle(d) {
-    return d.startAngle;
-  }
-  function d3_svg_chordEndAngle(d) {
-    return d.endAngle;
-  }
-  function d3_svg_diagonalProjection(d) {
-    return [ d.x, d.y ];
-  }
-  function d3_svg_diagonalRadialProjection(projection) {
-    return function() {
-      var d = projection.apply(this, arguments), r = d[0], a = d[1] + d3_svg_arcOffset;
-      return [ r * Math.cos(a), r * Math.sin(a) ];
-    };
-  }
-  function d3_svg_symbolSize() {
-    return 64;
-  }
-  function d3_svg_symbolType() {
-    return "circle";
-  }
-  function d3_svg_symbolCircle(size) {
-    var r = Math.sqrt(size / Math.PI);
-    return "M0," + r + "A" + r + "," + r + " 0 1,1 0," + -r + "A" + r + "," + r + " 0 1,1 0," + r + "Z";
-  }
-  function d3_svg_axisX(selection, x) {
-    selection.attr("transform", function(d) {
-      return "translate(" + x(d) + ",0)";
-    });
-  }
-  function d3_svg_axisY(selection, y) {
-    selection.attr("transform", function(d) {
-      return "translate(0," + y(d) + ")";
-    });
-  }
-  function d3_svg_axisSubdivide(scale, ticks, m) {
-    subticks = [];
-    if (m && ticks.length > 1) {
-      var extent = d3_scaleExtent(scale.domain()), subticks, i = -1, n = ticks.length, d = (ticks[1] - ticks[0]) / ++m, j, v;
-      while (++i < n) {
-        for (j = m; --j > 0; ) {
-          if ((v = +ticks[i] - j * d) >= extent[0]) {
-            subticks.push(v);
-          }
-        }
-      }
-      for (--i, j = 0; ++j < m && (v = +ticks[i] + j * d) < extent[1]; ) {
-        subticks.push(v);
-      }
-    }
-    return subticks;
-  }
-  function d3_behavior_zoomDelta() {
-    if (!d3_behavior_zoomDiv) {
-      d3_behavior_zoomDiv = d3.select("body").append("div").style("visibility", "hidden").style("top", 0).style("height", 0).style("width", 0).style("overflow-y", "scroll").append("div").style("height", "2000px").node().parentNode;
-    }
-    var e = d3.event, delta;
-    try {
-      d3_behavior_zoomDiv.scrollTop = 1e3;
-      d3_behavior_zoomDiv.dispatchEvent(e);
-      delta = 1e3 - d3_behavior_zoomDiv.scrollTop;
-    } catch (error) {
-      delta = e.wheelDelta || -e.detail * 5;
-    }
-    return delta;
-  }
-  function d3_layout_bundlePath(link) {
-    var start = link.source, end = link.target, lca = d3_layout_bundleLeastCommonAncestor(start, end), points = [ start ];
-    while (start !== lca) {
-      start = start.parent;
-      points.push(start);
-    }
-    var k = points.length;
-    while (end !== lca) {
-      points.splice(k, 0, end);
-      end = end.parent;
-    }
-    return points;
-  }
-  function d3_layout_bundleAncestors(node) {
-    var ancestors = [], parent = node.parent;
-    while (parent != null) {
-      ancestors.push(node);
-      node = parent;
-      parent = parent.parent;
-    }
-    ancestors.push(node);
-    return ancestors;
-  }
-  function d3_layout_bundleLeastCommonAncestor(a, b) {
-    if (a === b) return a;
-    var aNodes = d3_layout_bundleAncestors(a), bNodes = d3_layout_bundleAncestors(b), aNode = aNodes.pop(), bNode = bNodes.pop(), sharedNode = null;
-    while (aNode === bNode) {
-      sharedNode = aNode;
-      aNode = aNodes.pop();
-      bNode = bNodes.pop();
-    }
-    return sharedNode;
-  }
-  function d3_layout_forceDragstart(d) {
-    d.fixed |= 2;
-  }
-  function d3_layout_forceDragend(d) {
-    d.fixed &= 1;
-  }
-  function d3_layout_forceMouseover(d) {
-    d.fixed |= 4;
-  }
-  function d3_layout_forceMouseout(d) {
-    d.fixed &= 3;
-  }
-  function d3_layout_forceAccumulate(quad, alpha, charges) {
-    var cx = 0, cy = 0;
-    quad.charge = 0;
-    if (!quad.leaf) {
-      var nodes = quad.nodes, n = nodes.length, i = -1, c;
-      while (++i < n) {
-        c = nodes[i];
-        if (c == null) continue;
-        d3_layout_forceAccumulate(c, alpha, charges);
-        quad.charge += c.charge;
-        cx += c.charge * c.cx;
-        cy += c.charge * c.cy;
-      }
-    }
-    if (quad.point) {
-      if (!quad.leaf) {
-        quad.point.x += Math.random() - .5;
-        quad.point.y += Math.random() - .5;
-      }
-      var k = alpha * charges[quad.point.index];
-      quad.charge += quad.pointCharge = k;
-      cx += k * quad.point.x;
-      cy += k * quad.point.y;
-    }
-    quad.cx = cx / quad.charge;
-    quad.cy = cy / quad.charge;
-  }
-  function d3_layout_forceLinkDistance(link) {
-    return 20;
-  }
-  function d3_layout_forceLinkStrength(link) {
-    return 1;
-  }
-  function d3_layout_stackX(d) {
-    return d.x;
-  }
-  function d3_layout_stackY(d) {
-    return d.y;
-  }
-  function d3_layout_stackOut(d, y0, y) {
-    d.y0 = y0;
-    d.y = y;
-  }
-  function d3_layout_stackOrderDefault(data) {
-    return d3.range(data.length);
-  }
-  function d3_layout_stackOffsetZero(data) {
-    var j = -1, m = data[0].length, y0 = [];
-    while (++j < m) y0[j] = 0;
-    return y0;
-  }
-  function d3_layout_stackMaxIndex(array) {
-    var i = 1, j = 0, v = array[0][1], k, n = array.length;
-    for (; i < n; ++i) {
-      if ((k = array[i][1]) > v) {
-        j = i;
-        v = k;
-      }
-    }
-    return j;
-  }
-  function d3_layout_stackReduceSum(d) {
-    return d.reduce(d3_layout_stackSum, 0);
-  }
-  function d3_layout_stackSum(p, d) {
-    return p + d[1];
-  }
-  function d3_layout_histogramBinSturges(range, values) {
-    return d3_layout_histogramBinFixed(range, Math.ceil(Math.log(values.length) / Math.LN2 + 1));
-  }
-  function d3_layout_histogramBinFixed(range, n) {
-    var x = -1, b = +range[0], m = (range[1] - b) / n, f = [];
-    while (++x <= n) f[x] = m * x + b;
-    return f;
-  }
-  function d3_layout_histogramRange(values) {
-    return [ d3.min(values), d3.max(values) ];
-  }
-  function d3_layout_hierarchyRebind(object, hierarchy) {
-    d3.rebind(object, hierarchy, "sort", "children", "value");
-    object.links = d3_layout_hierarchyLinks;
-    object.nodes = function(d) {
-      d3_layout_hierarchyInline = true;
-      return (object.nodes = object)(d);
-    };
-    return object;
-  }
-  function d3_layout_hierarchyChildren(d) {
-    return d.children;
-  }
-  function d3_layout_hierarchyValue(d) {
-    return d.value;
-  }
-  function d3_layout_hierarchySort(a, b) {
-    return b.value - a.value;
-  }
-  function d3_layout_hierarchyLinks(nodes) {
-    return d3.merge(nodes.map(function(parent) {
-      return (parent.children || []).map(function(child) {
-        return {
-          source: parent,
-          target: child
-        };
-      });
-    }));
-  }
-  function d3_layout_packSort(a, b) {
-    return a.value - b.value;
-  }
-  function d3_layout_packInsert(a, b) {
-    var c = a._pack_next;
-    a._pack_next = b;
-    b._pack_prev = a;
-    b._pack_next = c;
-    c._pack_prev = b;
-  }
-  function d3_layout_packSplice(a, b) {
-    a._pack_next = b;
-    b._pack_prev = a;
-  }
-  function d3_layout_packIntersects(a, b) {
-    var dx = b.x - a.x, dy = b.y - a.y, dr = a.r + b.r;
-    return dr * dr - dx * dx - dy * dy > .001;
-  }
-  function d3_layout_packSiblings(node) {
-    function bound(node) {
-      xMin = Math.min(node.x - node.r, xMin);
-      xMax = Math.max(node.x + node.r, xMax);
-      yMin = Math.min(node.y - node.r, yMin);
-      yMax = Math.max(node.y + node.r, yMax);
-    }
-    if (!(nodes = node.children) || !(n = nodes.length)) return;
-    var nodes, xMin = Infinity, xMax = -Infinity, yMin = Infinity, yMax = -Infinity, a, b, c, i, j, k, n;
-    nodes.forEach(d3_layout_packLink);
-    a = nodes[0];
-    a.x = -a.r;
-    a.y = 0;
-    bound(a);
-    if (n > 1) {
-      b = nodes[1];
-      b.x = b.r;
-      b.y = 0;
-      bound(b);
-      if (n > 2) {
-        c = nodes[2];
-        d3_layout_packPlace(a, b, c);
-        bound(c);
-        d3_layout_packInsert(a, c);
-        a._pack_prev = c;
-        d3_layout_packInsert(c, b);
-        b = a._pack_next;
-        for (i = 3; i < n; i++) {
-          d3_layout_packPlace(a, b, c = nodes[i]);
-          var isect = 0, s1 = 1, s2 = 1;
-          for (j = b._pack_next; j !== b; j = j._pack_next, s1++) {
-            if (d3_layout_packIntersects(j, c)) {
-              isect = 1;
-              break;
-            }
-          }
-          if (isect == 1) {
-            for (k = a._pack_prev; k !== j._pack_prev; k = k._pack_prev, s2++) {
-              if (d3_layout_packIntersects(k, c)) {
-                break;
-              }
-            }
-          }
-          if (isect) {
-            if (s1 < s2 || s1 == s2 && b.r < a.r) d3_layout_packSplice(a, b = j); else d3_layout_packSplice(a = k, b);
-            i--;
-          } else {
-            d3_layout_packInsert(a, c);
-            b = c;
-            bound(c);
-          }
-        }
-      }
-    }
-    var cx = (xMin + xMax) / 2, cy = (yMin + yMax) / 2, cr = 0;
-    for (i = 0; i < n; i++) {
-      c = nodes[i];
-      c.x -= cx;
-      c.y -= cy;
-      cr = Math.max(cr, c.r + Math.sqrt(c.x * c.x + c.y * c.y));
-    }
-    node.r = cr;
-    nodes.forEach(d3_layout_packUnlink);
-  }
-  function d3_layout_packLink(node) {
-    node._pack_next = node._pack_prev = node;
-  }
-  function d3_layout_packUnlink(node) {
-    delete node._pack_next;
-    delete node._pack_prev;
-  }
-  function d3_layout_packTransform(node, x, y, k) {
-    var children = node.children;
-    node.x = x += k * node.x;
-    node.y = y += k * node.y;
-    node.r *= k;
-    if (children) {
-      var i = -1, n = children.length;
-      while (++i < n) d3_layout_packTransform(children[i], x, y, k);
-    }
-  }
-  function d3_layout_packPlace(a, b, c) {
-    var db = a.r + c.r, dx = b.x - a.x, dy = b.y - a.y;
-    if (db && (dx || dy)) {
-      var da = b.r + c.r, dc = dx * dx + dy * dy;
-      da *= da;
-      db *= db;
-      var x = .5 + (db - da) / (2 * dc), y = Math.sqrt(Math.max(0, 2 * da * (db + dc) - (db -= dc) * db - da * da)) / (2 * dc);
-      c.x = a.x + x * dx + y * dy;
-      c.y = a.y + x * dy - y * dx;
-    } else {
-      c.x = a.x + db;
-      c.y = a.y;
-    }
-  }
-  function d3_layout_clusterY(children) {
-    return 1 + d3.max(children, function(child) {
-      return child.y;
-    });
-  }
-  function d3_layout_clusterX(children) {
-    return children.reduce(function(x, child) {
-      return x + child.x;
-    }, 0) / children.length;
-  }
-  function d3_layout_clusterLeft(node) {
-    var children = node.children;
-    return children && children.length ? d3_layout_clusterLeft(children[0]) : node;
-  }
-  function d3_layout_clusterRight(node) {
-    var children = node.children, n;
-    return children && (n = children.length) ? d3_layout_clusterRight(children[n - 1]) : node;
-  }
-  function d3_layout_treeSeparation(a, b) {
-    return a.parent == b.parent ? 1 : 2;
-  }
-  function d3_layout_treeLeft(node) {
-    var children = node.children;
-    return children && children.length ? children[0] : node._tree.thread;
-  }
-  function d3_layout_treeRight(node) {
-    var children = node.children, n;
-    return children && (n = children.length) ? children[n - 1] : node._tree.thread;
-  }
-  function d3_layout_treeSearch(node, compare) {
-    var children = node.children;
-    if (children && (n = children.length)) {
-      var child, n, i = -1;
-      while (++i < n) {
-        if (compare(child = d3_layout_treeSearch(children[i], compare), node) > 0) {
-          node = child;
-        }
-      }
-    }
-    return node;
-  }
-  function d3_layout_treeRightmost(a, b) {
-    return a.x - b.x;
-  }
-  function d3_layout_treeLeftmost(a, b) {
-    return b.x - a.x;
-  }
-  function d3_layout_treeDeepest(a, b) {
-    return a.depth - b.depth;
-  }
-  function d3_layout_treeVisitAfter(node, callback) {
-    function visit(node, previousSibling) {
-      var children = node.children;
-      if (children && (n = children.length)) {
-        var child, previousChild = null, i = -1, n;
-        while (++i < n) {
-          child = children[i];
-          visit(child, previousChild);
-          previousChild = child;
-        }
-      }
-      callback(node, previousSibling);
-    }
-    visit(node, null);
-  }
-  function d3_layout_treeShift(node) {
-    var shift = 0, change = 0, children = node.children, i = children.length, child;
-    while (--i >= 0) {
-      child = children[i]._tree;
-      child.prelim += shift;
-      child.mod += shift;
-      shift += child.shift + (change += child.change);
-    }
-  }
-  function d3_layout_treeMove(ancestor, node, shift) {
-    ancestor = ancestor._tree;
-    node = node._tree;
-    var change = shift / (node.number - ancestor.number);
-    ancestor.change += change;
-    node.change -= change;
-    node.shift += shift;
-    node.prelim += shift;
-    node.mod += shift;
-  }
-  function d3_layout_treeAncestor(vim, node, ancestor) {
-    return vim._tree.ancestor.parent == node.parent ? vim._tree.ancestor : ancestor;
-  }
-  function d3_layout_treemapPadNull(node) {
-    return {
-      x: node.x,
-      y: node.y,
-      dx: node.dx,
-      dy: node.dy
-    };
-  }
-  function d3_layout_treemapPad(node, padding) {
-    var x = node.x + padding[3], y = node.y + padding[0], dx = node.dx - padding[1] - padding[3], dy = node.dy - padding[0] - padding[2];
-    if (dx < 0) {
-      x += dx / 2;
-      dx = 0;
-    }
-    if (dy < 0) {
-      y += dy / 2;
-      dy = 0;
-    }
-    return {
-      x: x,
-      y: y,
-      dx: dx,
-      dy: dy
-    };
-  }
-  function d3_dsv(delimiter, mimeType) {
-    function dsv(url, callback) {
-      d3.text(url, mimeType, function(text) {
-        callback(text && dsv.parse(text));
-      });
-    }
-    function formatRow(row) {
-      return row.map(formatValue).join(delimiter);
-    }
-    function formatValue(text) {
-      return reFormat.test(text) ? '"' + text.replace(/\"/g, '""') + '"' : text;
-    }
-    var reParse = new RegExp("\r\n|[" + delimiter + "\r\n]", "g"), reFormat = new RegExp('["' + delimiter + "\n]"), delimiterCode = delimiter.charCodeAt(0);
-    dsv.parse = function(text) {
-      var header;
-      return dsv.parseRows(text, function(row, i) {
-        if (i) {
-          var o = {}, j = -1, m = header.length;
-          while (++j < m) o[header[j]] = row[j];
-          return o;
-        } else {
-          header = row;
-          return null;
-        }
-      });
-    };
-    dsv.parseRows = function(text, f) {
-      function token() {
-        if (reParse.lastIndex >= text.length) return EOF;
-        if (eol) {
-          eol = false;
-          return EOL;
-        }
-        var j = reParse.lastIndex;
-        if (text.charCodeAt(j) === 34) {
-          var i = j;
-          while (i++ < text.length) {
-            if (text.charCodeAt(i) === 34) {
-              if (text.charCodeAt(i + 1) !== 34) break;
-              i++;
-            }
-          }
-          reParse.lastIndex = i + 2;
-          var c = text.charCodeAt(i + 1);
-          if (c === 13) {
-            eol = true;
-            if (text.charCodeAt(i + 2) === 10) reParse.lastIndex++;
-          } else if (c === 10) {
-            eol = true;
-          }
-          return text.substring(j + 1, i).replace(/""/g, '"');
-        }
-        var m = reParse.exec(text);
-        if (m) {
-          eol = m[0].charCodeAt(0) !== delimiterCode;
-          return text.substring(j, m.index);
-        }
-        reParse.lastIndex = text.length;
-        return text.substring(j);
-      }
-      var EOL = {}, EOF = {}, rows = [], n = 0, t, eol;
-      reParse.lastIndex = 0;
-      while ((t = token()) !== EOF) {
-        var a = [];
-        while (t !== EOL && t !== EOF) {
-          a.push(t);
-          t = token();
-        }
-        if (f && !(a = f(a, n++))) continue;
-        rows.push(a);
-      }
-      return rows;
-    };
-    dsv.format = function(rows) {
-      return rows.map(formatRow).join("\n");
-    };
-    return dsv;
-  }
-  function d3_geo_type(types, defaultValue) {
-    return function(object) {
-      return object && types.hasOwnProperty(object.type) ? types[object.type](object) : defaultValue;
-    };
-  }
-  function d3_path_circle(radius) {
-    return "m0," + radius + "a" + radius + "," + radius + " 0 1,1 0," + -2 * radius + "a" + radius + "," + radius + " 0 1,1 0," + +2 * radius + "z";
-  }
-  function d3_geo_bounds(o, f) {
-    if (d3_geo_boundsTypes.hasOwnProperty(o.type)) d3_geo_boundsTypes[o.type](o, f);
-  }
-  function d3_geo_boundsFeature(o, f) {
-    d3_geo_bounds(o.geometry, f);
-  }
-  function d3_geo_boundsFeatureCollection(o, f) {
-    for (var a = o.features, i = 0, n = a.length; i < n; i++) {
-      d3_geo_bounds(a[i].geometry, f);
-    }
-  }
-  function d3_geo_boundsGeometryCollection(o, f) {
-    for (var a = o.geometries, i = 0, n = a.length; i < n; i++) {
-      d3_geo_bounds(a[i], f);
-    }
-  }
-  function d3_geo_boundsLineString(o, f) {
-    for (var a = o.coordinates, i = 0, n = a.length; i < n; i++) {
-      f.apply(null, a[i]);
-    }
-  }
-  function d3_geo_boundsMultiLineString(o, f) {
-    for (var a = o.coordinates, i = 0, n = a.length; i < n; i++) {
-      for (var b = a[i], j = 0, m = b.length; j < m; j++) {
-        f.apply(null, b[j]);
-      }
-    }
-  }
-  function d3_geo_boundsMultiPolygon(o, f) {
-    for (var a = o.coordinates, i = 0, n = a.length; i < n; i++) {
-      for (var b = a[i][0], j = 0, m = b.length; j < m; j++) {
-        f.apply(null, b[j]);
-      }
-    }
-  }
-  function d3_geo_boundsPoint(o, f) {
-    f.apply(null, o.coordinates);
-  }
-  function d3_geo_boundsPolygon(o, f) {
-    for (var a = o.coordinates[0], i = 0, n = a.length; i < n; i++) {
-      f.apply(null, a[i]);
-    }
-  }
-  function d3_geo_greatArcSource(d) {
-    return d.source;
-  }
-  function d3_geo_greatArcTarget(d) {
-    return d.target;
-  }
-  function d3_geo_greatArcInterpolator() {
-    function interpolate(t) {
-      var B = Math.sin(t *= d) * k, A = Math.sin(d - t) * k, x = A * kx0 + B * kx1, y = A * ky0 + B * ky1, z = A * sy0 + B * sy1;
-      return [ Math.atan2(y, x) / d3_geo_radians, Math.atan2(z, Math.sqrt(x * x + y * y)) / d3_geo_radians ];
-    }
-    var x0, y0, cy0, sy0, kx0, ky0, x1, y1, cy1, sy1, kx1, ky1, d, k;
-    interpolate.distance = function() {
-      if (d == null) k = 1 / Math.sin(d = Math.acos(Math.max(-1, Math.min(1, sy0 * sy1 + cy0 * cy1 * Math.cos(x1 - x0)))));
-      return d;
-    };
-    interpolate.source = function(_) {
-      var cx0 = Math.cos(x0 = _[0] * d3_geo_radians), sx0 = Math.sin(x0);
-      cy0 = Math.cos(y0 = _[1] * d3_geo_radians);
-      sy0 = Math.sin(y0);
-      kx0 = cy0 * cx0;
-      ky0 = cy0 * sx0;
-      d = null;
-      return interpolate;
-    };
-    interpolate.target = function(_) {
-      var cx1 = Math.cos(x1 = _[0] * d3_geo_radians), sx1 = Math.sin(x1);
-      cy1 = Math.cos(y1 = _[1] * d3_geo_radians);
-      sy1 = Math.sin(y1);
-      kx1 = cy1 * cx1;
-      ky1 = cy1 * sx1;
-      d = null;
-      return interpolate;
-    };
-    return interpolate;
-  }
-  function d3_geo_greatArcInterpolate(a, b) {
-    var i = d3_geo_greatArcInterpolator().source(a).target(b);
-    i.distance();
-    return i;
-  }
-  function d3_geom_contourStart(grid) {
-    var x = 0, y = 0;
-    while (true) {
-      if (grid(x, y)) {
-        return [ x, y ];
-      }
-      if (x === 0) {
-        x = y + 1;
-        y = 0;
-      } else {
-        x = x - 1;
-        y = y + 1;
-      }
-    }
-  }
-  function d3_geom_hullCCW(i1, i2, i3, v) {
-    var t, a, b, c, d, e, f;
-    t = v[i1];
-    a = t[0];
-    b = t[1];
-    t = v[i2];
-    c = t[0];
-    d = t[1];
-    t = v[i3];
-    e = t[0];
-    f = t[1];
-    return (f - b) * (c - a) - (d - b) * (e - a) > 0;
-  }
-  function d3_geom_polygonInside(p, a, b) {
-    return (b[0] - a[0]) * (p[1] - a[1]) < (b[1] - a[1]) * (p[0] - a[0]);
-  }
-  function d3_geom_polygonIntersect(c, d, a, b) {
-    var x1 = c[0], x2 = d[0], x3 = a[0], x4 = b[0], y1 = c[1], y2 = d[1], y3 = a[1], y4 = b[1], x13 = x1 - x3, x21 = x2 - x1, x43 = x4 - x3, y13 = y1 - y3, y21 = y2 - y1, y43 = y4 - y3, ua = (x43 * y13 - y43 * x13) / (y43 * x21 - x43 * y21);
-    return [ x1 + ua * x21, y1 + ua * y21 ];
-  }
-  function d3_voronoi_tessellate(vertices, callback) {
-    var Sites = {
-      list: vertices.map(function(v, i) {
-        return {
-          index: i,
-          x: v[0],
-          y: v[1]
-        };
-      }).sort(function(a, b) {
-        return a.y < b.y ? -1 : a.y > b.y ? 1 : a.x < b.x ? -1 : a.x > b.x ? 1 : 0;
-      }),
-      bottomSite: null
-    };
-    var EdgeList = {
-      list: [],
-      leftEnd: null,
-      rightEnd: null,
-      init: function() {
-        EdgeList.leftEnd = EdgeList.createHalfEdge(null, "l");
-        EdgeList.rightEnd = EdgeList.createHalfEdge(null, "l");
-        EdgeList.leftEnd.r = EdgeList.rightEnd;
-        EdgeList.rightEnd.l = EdgeList.leftEnd;
-        EdgeList.list.unshift(EdgeList.leftEnd, EdgeList.rightEnd);
-      },
-      createHalfEdge: function(edge, side) {
-        return {
-          edge: edge,
-          side: side,
-          vertex: null,
-          l: null,
-          r: null
-        };
-      },
-      insert: function(lb, he) {
-        he.l = lb;
-        he.r = lb.r;
-        lb.r.l = he;
-        lb.r = he;
-      },
-      leftBound: function(p) {
-        var he = EdgeList.leftEnd;
-        do {
-          he = he.r;
-        } while (he != EdgeList.rightEnd && Geom.rightOf(he, p));
-        he = he.l;
-        return he;
-      },
-      del: function(he) {
-        he.l.r = he.r;
-        he.r.l = he.l;
-        he.edge = null;
-      },
-      right: function(he) {
-        return he.r;
-      },
-      left: function(he) {
-        return he.l;
-      },
-      leftRegion: function(he) {
-        return he.edge == null ? Sites.bottomSite : he.edge.region[he.side];
-      },
-      rightRegion: function(he) {
-        return he.edge == null ? Sites.bottomSite : he.edge.region[d3_voronoi_opposite[he.side]];
-      }
-    };
-    var Geom = {
-      bisect: function(s1, s2) {
-        var newEdge = {
-          region: {
-            l: s1,
-            r: s2
-          },
-          ep: {
-            l: null,
-            r: null
-          }
-        };
-        var dx = s2.x - s1.x, dy = s2.y - s1.y, adx = dx > 0 ? dx : -dx, ady = dy > 0 ? dy : -dy;
-        newEdge.c = s1.x * dx + s1.y * dy + (dx * dx + dy * dy) * .5;
-        if (adx > ady) {
-          newEdge.a = 1;
-          newEdge.b = dy / dx;
-          newEdge.c /= dx;
-        } else {
-          newEdge.b = 1;
-          newEdge.a = dx / dy;
-          newEdge.c /= dy;
-        }
-        return newEdge;
-      },
-      intersect: function(el1, el2) {
-        var e1 = el1.edge, e2 = el2.edge;
-        if (!e1 || !e2 || e1.region.r == e2.region.r) {
-          return null;
-        }
-        var d = e1.a * e2.b - e1.b * e2.a;
-        if (Math.abs(d) < 1e-10) {
-          return null;
-        }
-        var xint = (e1.c * e2.b - e2.c * e1.b) / d, yint = (e2.c * e1.a - e1.c * e2.a) / d, e1r = e1.region.r, e2r = e2.region.r, el, e;
-        if (e1r.y < e2r.y || e1r.y == e2r.y && e1r.x < e2r.x) {
-          el = el1;
-          e = e1;
-        } else {
-          el = el2;
-          e = e2;
-        }
-        var rightOfSite = xint >= e.region.r.x;
-        if (rightOfSite && el.side === "l" || !rightOfSite && el.side === "r") {
-          return null;
-        }
-        return {
-          x: xint,
-          y: yint
-        };
-      },
-      rightOf: function(he, p) {
-        var e = he.edge, topsite = e.region.r, rightOfSite = p.x > topsite.x;
-        if (rightOfSite && he.side === "l") {
-          return 1;
-        }
-        if (!rightOfSite && he.side === "r") {
-          return 0;
-        }
-        if (e.a === 1) {
-          var dyp = p.y - topsite.y, dxp = p.x - topsite.x, fast = 0, above = 0;
-          if (!rightOfSite && e.b < 0 || rightOfSite && e.b >= 0) {
-            above = fast = dyp >= e.b * dxp;
-          } else {
-            above = p.x + p.y * e.b > e.c;
-            if (e.b < 0) {
-              above = !above;
-            }
-            if (!above) {
-              fast = 1;
-            }
-          }
-          if (!fast) {
-            var dxs = topsite.x - e.region.l.x;
-            above = e.b * (dxp * dxp - dyp * dyp) < dxs * dyp * (1 + 2 * dxp / dxs + e.b * e.b);
-            if (e.b < 0) {
-              above = !above;
-            }
-          }
-        } else {
-          var yl = e.c - e.a * p.x, t1 = p.y - yl, t2 = p.x - topsite.x, t3 = yl - topsite.y;
-          above = t1 * t1 > t2 * t2 + t3 * t3;
-        }
-        return he.side === "l" ? above : !above;
-      },
-      endPoint: function(edge, side, site) {
-        edge.ep[side] = site;
-        if (!edge.ep[d3_voronoi_opposite[side]]) return;
-        callback(edge);
-      },
-      distance: function(s, t) {
-        var dx = s.x - t.x, dy = s.y - t.y;
-        return Math.sqrt(dx * dx + dy * dy);
-      }
-    };
-    var EventQueue = {
-      list: [],
-      insert: function(he, site, offset) {
-        he.vertex = site;
-        he.ystar = site.y + offset;
-        for (var i = 0, list = EventQueue.list, l = list.length; i < l; i++) {
-          var next = list[i];
-          if (he.ystar > next.ystar || he.ystar == next.ystar && site.x > next.vertex.x) {
-            continue;
-          } else {
-            break;
-          }
-        }
-        list.splice(i, 0, he);
-      },
-      del: function(he) {
-        for (var i = 0, ls = EventQueue.list, l = ls.length; i < l && ls[i] != he; ++i) {}
-        ls.splice(i, 1);
-      },
-      empty: function() {
-        return EventQueue.list.length === 0;
-      },
-      nextEvent: function(he) {
-        for (var i = 0, ls = EventQueue.list, l = ls.length; i < l; ++i) {
-          if (ls[i] == he) return ls[i + 1];
-        }
-        return null;
-      },
-      min: function() {
-        var elem = EventQueue.list[0];
-        return {
-          x: elem.vertex.x,
-          y: elem.ystar
-        };
-      },
-      extractMin: function() {
-        return EventQueue.list.shift();
-      }
-    };
-    EdgeList.init();
-    Sites.bottomSite = Sites.list.shift();
-    var newSite = Sites.list.shift(), newIntStar;
-    var lbnd, rbnd, llbnd, rrbnd, bisector;
-    var bot, top, temp, p, v;
-    var e, pm;
-    while (true) {
-      if (!EventQueue.empty()) {
-        newIntStar = EventQueue.min();
-      }
-      if (newSite && (EventQueue.empty() || newSite.y < newIntStar.y || newSite.y == newIntStar.y && newSite.x < newIntStar.x)) {
-        lbnd = EdgeList.leftBound(newSite);
-        rbnd = EdgeList.right(lbnd);
-        bot = EdgeList.rightRegion(lbnd);
-        e = Geom.bisect(bot, newSite);
-        bisector = EdgeList.createHalfEdge(e, "l");
-        EdgeList.insert(lbnd, bisector);
-        p = Geom.intersect(lbnd, bisector);
-        if (p) {
-          EventQueue.del(lbnd);
-          EventQueue.insert(lbnd, p, Geom.distance(p, newSite));
-        }
-        lbnd = bisector;
-        bisector = EdgeList.createHalfEdge(e, "r");
-        EdgeList.insert(lbnd, bisector);
-        p = Geom.intersect(bisector, rbnd);
-        if (p) {
-          EventQueue.insert(bisector, p, Geom.distance(p, newSite));
-        }
-        newSite = Sites.list.shift();
-      } else if (!EventQueue.empty()) {
-        lbnd = EventQueue.extractMin();
-        llbnd = EdgeList.left(lbnd);
-        rbnd = EdgeList.right(lbnd);
-        rrbnd = EdgeList.right(rbnd);
-        bot = EdgeList.leftRegion(lbnd);
-        top = EdgeList.rightRegion(rbnd);
-        v = lbnd.vertex;
-        Geom.endPoint(lbnd.edge, lbnd.side, v);
-        Geom.endPoint(rbnd.edge, rbnd.side, v);
-        EdgeList.del(lbnd);
-        EventQueue.del(rbnd);
-        EdgeList.del(rbnd);
-        pm = "l";
-        if (bot.y > top.y) {
-          temp = bot;
-          bot = top;
-          top = temp;
-          pm = "r";
-        }
-        e = Geom.bisect(bot, top);
-        bisector = EdgeList.createHalfEdge(e, pm);
-        EdgeList.insert(llbnd, bisector);
-        Geom.endPoint(e, d3_voronoi_opposite[pm], v);
-        p = Geom.intersect(llbnd, bisector);
-        if (p) {
-          EventQueue.del(llbnd);
-          EventQueue.insert(llbnd, p, Geom.distance(p, bot));
-        }
-        p = Geom.intersect(bisector, rrbnd);
-        if (p) {
-          EventQueue.insert(bisector, p, Geom.distance(p, bot));
-        }
-      } else {
-        break;
-      }
-    }
-    for (lbnd = EdgeList.right(EdgeList.leftEnd); lbnd != EdgeList.rightEnd; lbnd = EdgeList.right(lbnd)) {
-      callback(lbnd.edge);
-    }
-  }
-  function d3_geom_quadtreeNode() {
-    return {
-      leaf: true,
-      nodes: [],
-      point: null
-    };
-  }
-  function d3_geom_quadtreeVisit(f, node, x1, y1, x2, y2) {
-    if (!f(node, x1, y1, x2, y2)) {
-      var sx = (x1 + x2) * .5, sy = (y1 + y2) * .5, children = node.nodes;
-      if (children[0]) d3_geom_quadtreeVisit(f, children[0], x1, y1, sx, sy);
-      if (children[1]) d3_geom_quadtreeVisit(f, children[1], sx, y1, x2, sy);
-      if (children[2]) d3_geom_quadtreeVisit(f, children[2], x1, sy, sx, y2);
-      if (children[3]) d3_geom_quadtreeVisit(f, children[3], sx, sy, x2, y2);
-    }
-  }
-  function d3_geom_quadtreePoint(p) {
-    return {
-      x: p[0],
-      y: p[1]
-    };
-  }
-  function d3_time_utc() {
-    this._ = new Date(arguments.length > 1 ? Date.UTC.apply(this, arguments) : arguments[0]);
-  }
-  function d3_time_formatAbbreviate(name) {
-    return name.substring(0, 3);
-  }
-  function d3_time_parse(date, template, string, j) {
-    var c, p, i = 0, n = template.length, m = string.length;
-    while (i < n) {
-      if (j >= m) return -1;
-      c = template.charCodeAt(i++);
-      if (c == 37) {
-        p = d3_time_parsers[template.charAt(i++)];
-        if (!p || (j = p(date, string, j)) < 0) return -1;
-      } else if (c != string.charCodeAt(j++)) {
-        return -1;
-      }
-    }
-    return j;
-  }
-  function d3_time_formatRe(names) {
-    return new RegExp("^(?:" + names.map(d3.requote).join("|") + ")", "i");
-  }
-  function d3_time_formatLookup(names) {
-    var map = new d3_Map, i = -1, n = names.length;
-    while (++i < n) map.set(names[i].toLowerCase(), i);
-    return map;
-  }
-  function d3_time_parseWeekdayAbbrev(date, string, i) {
-    d3_time_dayAbbrevRe.lastIndex = 0;
-    var n = d3_time_dayAbbrevRe.exec(string.substring(i));
-    return n ? i += n[0].length : -1;
-  }
-  function d3_time_parseWeekday(date, string, i) {
-    d3_time_dayRe.lastIndex = 0;
-    var n = d3_time_dayRe.exec(string.substring(i));
-    return n ? i += n[0].length : -1;
-  }
-  function d3_time_parseMonthAbbrev(date, string, i) {
-    d3_time_monthAbbrevRe.lastIndex = 0;
-    var n = d3_time_monthAbbrevRe.exec(string.substring(i));
-    return n ? (date.m = d3_time_monthAbbrevLookup.get(n[0].toLowerCase()), i += n[0].length) : -1;
-  }
-  function d3_time_parseMonth(date, string, i) {
-    d3_time_monthRe.lastIndex = 0;
-    var n = d3_time_monthRe.exec(string.substring(i));
-    return n ? (date.m = d3_time_monthLookup.get(n[0].toLowerCase()), i += n[0].length) : -1;
-  }
-  function d3_time_parseLocaleFull(date, string, i) {
-    return d3_time_parse(date, d3_time_formats.c.toString(), string, i);
-  }
-  function d3_time_parseLocaleDate(date, string, i) {
-    return d3_time_parse(date, d3_time_formats.x.toString(), string, i);
-  }
-  function d3_time_parseLocaleTime(date, string, i) {
-    return d3_time_parse(date, d3_time_formats.X.toString(), string, i);
-  }
-  function d3_time_parseFullYear(date, string, i) {
-    d3_time_numberRe.lastIndex = 0;
-    var n = d3_time_numberRe.exec(string.substring(i, i + 4));
-    return n ? (date.y = +n[0], i += n[0].length) : -1;
-  }
-  function d3_time_parseYear(date, string, i) {
-    d3_time_numberRe.lastIndex = 0;
-    var n = d3_time_numberRe.exec(string.substring(i, i + 2));
-    return n ? (date.y = d3_time_expandYear(+n[0]), i += n[0].length) : -1;
-  }
-  function d3_time_expandYear(d) {
-    return d + (d > 68 ? 1900 : 2e3);
-  }
-  function d3_time_parseMonthNumber(date, string, i) {
-    d3_time_numberRe.lastIndex = 0;
-    var n = d3_time_numberRe.exec(string.substring(i, i + 2));
-    return n ? (date.m = n[0] - 1, i += n[0].length) : -1;
-  }
-  function d3_time_parseDay(date, string, i) {
-    d3_time_numberRe.lastIndex = 0;
-    var n = d3_time_numberRe.exec(string.substring(i, i + 2));
-    return n ? (date.d = +n[0], i += n[0].length) : -1;
-  }
-  function d3_time_parseHour24(date, string, i) {
-    d3_time_numberRe.lastIndex = 0;
-    var n = d3_time_numberRe.exec(string.substring(i, i + 2));
-    return n ? (date.H = +n[0], i += n[0].length) : -1;
-  }
-  function d3_time_parseMinutes(date, string, i) {
-    d3_time_numberRe.lastIndex = 0;
-    var n = d3_time_numberRe.exec(string.substring(i, i + 2));
-    return n ? (date.M = +n[0], i += n[0].length) : -1;
-  }
-  function d3_time_parseSeconds(date, string, i) {
-    d3_time_numberRe.lastIndex = 0;
-    var n = d3_time_numberRe.exec(string.substring(i, i + 2));
-    return n ? (date.S = +n[0], i += n[0].length) : -1;
-  }
-  function d3_time_parseMilliseconds(date, string, i) {
-    d3_time_numberRe.lastIndex = 0;
-    var n = d3_time_numberRe.exec(string.substring(i, i + 3));
-    return n ? (date.L = +n[0], i += n[0].length) : -1;
-  }
-  function d3_time_parseAmPm(date, string, i) {
-    var n = d3_time_amPmLookup.get(string.substring(i, i += 2).toLowerCase());
-    return n == null ? -1 : (date.p = n, i);
-  }
-  function d3_time_zone(d) {
-    var z = d.getTimezoneOffset(), zs = z > 0 ? "-" : "+", zh = ~~(Math.abs(z) / 60), zm = Math.abs(z) % 60;
-    return zs + d3_time_zfill2(zh) + d3_time_zfill2(zm);
-  }
-  function d3_time_formatIsoNative(date) {
-    return date.toISOString();
-  }
-  function d3_time_interval(local, step, number) {
-    function round(date) {
-      var d0 = local(date), d1 = offset(d0, 1);
-      return date - d0 < d1 - date ? d0 : d1;
-    }
-    function ceil(date) {
-      step(date = local(new d3_time(date - 1)), 1);
-      return date;
-    }
-    function offset(date, k) {
-      step(date = new d3_time(+date), k);
-      return date;
-    }
-    function range(t0, t1, dt) {
-      var time = ceil(t0), times = [];
-      if (dt > 1) {
-        while (time < t1) {
-          if (!(number(time) % dt)) times.push(new Date(+time));
-          step(time, 1);
-        }
-      } else {
-        while (time < t1) times.push(new Date(+time)), step(time, 1);
-      }
-      return times;
-    }
-    function range_utc(t0, t1, dt) {
-      try {
-        d3_time = d3_time_utc;
-        var utc = new d3_time_utc;
-        utc._ = t0;
-        return range(utc, t1, dt);
-      } finally {
-        d3_time = Date;
-      }
-    }
-    local.floor = local;
-    local.round = round;
-    local.ceil = ceil;
-    local.offset = offset;
-    local.range = range;
-    var utc = local.utc = d3_time_interval_utc(local);
-    utc.floor = utc;
-    utc.round = d3_time_interval_utc(round);
-    utc.ceil = d3_time_interval_utc(ceil);
-    utc.offset = d3_time_interval_utc(offset);
-    utc.range = range_utc;
-    return local;
-  }
-  function d3_time_interval_utc(method) {
-    return function(date, k) {
-      try {
-        d3_time = d3_time_utc;
-        var utc = new d3_time_utc;
-        utc._ = date;
-        return method(utc, k)._;
-      } finally {
-        d3_time = Date;
-      }
-    };
-  }
-  function d3_time_scale(linear, methods, format) {
-    function scale(x) {
-      return linear(x);
-    }
-    scale.invert = function(x) {
-      return d3_time_scaleDate(linear.invert(x));
-    };
-    scale.domain = function(x) {
-      if (!arguments.length) return linear.domain().map(d3_time_scaleDate);
-      linear.domain(x);
-      return scale;
-    };
-    scale.nice = function(m) {
-      return scale.domain(d3_scale_nice(scale.domain(), function() {
-        return m;
-      }));
-    };
-    scale.ticks = function(m, k) {
-      var extent = d3_time_scaleExtent(scale.domain());
-      if (typeof m !== "function") {
-        var span = extent[1] - extent[0], target = span / m, i = d3.bisect(d3_time_scaleSteps, target);
-        if (i == d3_time_scaleSteps.length) return methods.year(extent, m);
-        if (!i) return linear.ticks(m).map(d3_time_scaleDate);
-        if (Math.log(target / d3_time_scaleSteps[i - 1]) < Math.log(d3_time_scaleSteps[i] / target)) --i;
-        m = methods[i];
-        k = m[1];
-        m = m[0].range;
-      }
-      return m(extent[0], new Date(+extent[1] + 1), k);
-    };
-    scale.tickFormat = function() {
-      return format;
-    };
-    scale.copy = function() {
-      return d3_time_scale(linear.copy(), methods, format);
-    };
-    return d3.rebind(scale, linear, "range", "rangeRound", "interpolate", "clamp");
-  }
-  function d3_time_scaleExtent(domain) {
-    var start = domain[0], stop = domain[domain.length - 1];
-    return start < stop ? [ start, stop ] : [ stop, start ];
-  }
-  function d3_time_scaleDate(t) {
-    return new Date(t);
-  }
-  function d3_time_scaleFormat(formats) {
-    return function(date) {
-      var i = formats.length - 1, f = formats[i];
-      while (!f[1](date)) f = formats[--i];
-      return f[0](date);
-    };
-  }
-  function d3_time_scaleSetYear(y) {
-    var d = new Date(y, 0, 1);
-    d.setFullYear(y);
-    return d;
-  }
-  function d3_time_scaleGetYear(d) {
-    var y = d.getFullYear(), d0 = d3_time_scaleSetYear(y), d1 = d3_time_scaleSetYear(y + 1);
-    return y + (d - d0) / (d1 - d0);
-  }
-  function d3_time_scaleUTCSetYear(y) {
-    var d = new Date(Date.UTC(y, 0, 1));
-    d.setUTCFullYear(y);
-    return d;
-  }
-  function d3_time_scaleUTCGetYear(d) {
-    var y = d.getUTCFullYear(), d0 = d3_time_scaleUTCSetYear(y), d1 = d3_time_scaleUTCSetYear(y + 1);
-    return y + (d - d0) / (d1 - d0);
-  }
-  if (!Date.now) Date.now = function() {
-    return +(new Date);
-  };
-  try {
-    document.createElement("div").style.setProperty("opacity", 0, "");
-  } catch (error) {
-    var d3_style_prototype = CSSStyleDeclaration.prototype, d3_style_setProperty = d3_style_prototype.setProperty;
-    d3_style_prototype.setProperty = function(name, value, priority) {
-      d3_style_setProperty.call(this, name, value + "", priority);
-    };
-  }
-  d3 = {
-    version: "2.10.2"
-  };
-  var d3_array = d3_arraySlice;
-  try {
-    d3_array(document.documentElement.childNodes)[0].nodeType;
-  } catch (e) {
-    d3_array = d3_arrayCopy;
-  }
-  var d3_arraySubclass = [].__proto__ ? function(array, prototype) {
-    array.__proto__ = prototype;
-  } : function(array, prototype) {
-    for (var property in prototype) array[property] = prototype[property];
-  };
-  d3.map = function(object) {
-    var map = new d3_Map;
-    for (var key in object) map.set(key, object[key]);
-    return map;
-  };
-  d3_class(d3_Map, {
-    has: function(key) {
-      return d3_map_prefix + key in this;
-    },
-    get: function(key) {
-      return this[d3_map_prefix + key];
-    },
-    set: function(key, value) {
-      return this[d3_map_prefix + key] = value;
-    },
-    remove: function(key) {
-      key = d3_map_prefix + key;
-      return key in this && delete this[key];
-    },
-    keys: function() {
-      var keys = [];
-      this.forEach(function(key) {
-        keys.push(key);
-      });
-      return keys;
-    },
-    values: function() {
-      var values = [];
-      this.forEach(function(key, value) {
-        values.push(value);
-      });
-      return values;
-    },
-    entries: function() {
-      var entries = [];
-      this.forEach(function(key, value) {
-        entries.push({
-          key: key,
-          value: value
-        });
-      });
-      return entries;
-    },
-    forEach: function(f) {
-      for (var key in this) {
-        if (key.charCodeAt(0) === d3_map_prefixCode) {
-          f.call(this, key.substring(1), this[key]);
-        }
-      }
-    }
-  });
-  var d3_map_prefix = "\0", d3_map_prefixCode = d3_map_prefix.charCodeAt(0);
-  d3.functor = d3_functor;
-  d3.rebind = function(target, source) {
-    var i = 1, n = arguments.length, method;
-    while (++i < n) target[method = arguments[i]] = d3_rebind(target, source, source[method]);
-    return target;
-  };
-  d3.ascending = function(a, b) {
-    return a < b ? -1 : a > b ? 1 : a >= b ? 0 : NaN;
-  };
-  d3.descending = function(a, b) {
-    return b < a ? -1 : b > a ? 1 : b >= a ? 0 : NaN;
-  };
-  d3.mean = function(array, f) {
-    var n = array.length, a, m = 0, i = -1, j = 0;
-    if (arguments.length === 1) {
-      while (++i < n) if (d3_number(a = array[i])) m += (a - m) / ++j;
-    } else {
-      while (++i < n) if (d3_number(a = f.call(array, array[i], i))) m += (a - m) / ++j;
-    }
-    return j ? m : undefined;
-  };
-  d3.median = function(array, f) {
-    if (arguments.length > 1) array = array.map(f);
-    array = array.filter(d3_number);
-    return array.length ? d3.quantile(array.sort(d3.ascending), .5) : undefined;
-  };
-  d3.min = function(array, f) {
-    var i = -1, n = array.length, a, b;
-    if (arguments.length === 1) {
-      while (++i < n && ((a = array[i]) == null || a != a)) a = undefined;
-      while (++i < n) if ((b = array[i]) != null && a > b) a = b;
-    } else {
-      while (++i < n && ((a = f.call(array, array[i], i)) == null || a != a)) a = undefined;
-      while (++i < n) if ((b = f.call(array, array[i], i)) != null && a > b) a = b;
-    }
-    return a;
-  };
-  d3.max = function(array, f) {
-    var i = -1, n = array.length, a, b;
-    if (arguments.length === 1) {
-      while (++i < n && ((a = array[i]) == null || a != a)) a = undefined;
-      while (++i < n) if ((b = array[i]) != null && b > a) a = b;
-    } else {
-      while (++i < n && ((a = f.call(array, array[i], i)) == null || a != a)) a = undefined;
-      while (++i < n) if ((b = f.call(array, array[i], i)) != null && b > a) a = b;
-    }
-    return a;
-  };
-  d3.extent = function(array, f) {
-    var i = -1, n = array.length, a, b, c;
-    if (arguments.length === 1) {
-      while (++i < n && ((a = c = array[i]) == null || a != a)) a = c = undefined;
-      while (++i < n) if ((b = array[i]) != null) {
-        if (a > b) a = b;
-        if (c < b) c = b;
-      }
-    } else {
-      while (++i < n && ((a = c = f.call(array, array[i], i)) == null || a != a)) a = undefined;
-      while (++i < n) if ((b = f.call(array, array[i], i)) != null) {
-        if (a > b) a = b;
-        if (c < b) c = b;
-      }
-    }
-    return [ a, c ];
-  };
-  d3.random = {
-    normal: function(µ, σ) {
-      var n = arguments.length;
-      if (n < 2) σ = 1;
-      if (n < 1) µ = 0;
-      return function() {
-        var x, y, r;
-        do {
-          x = Math.random() * 2 - 1;
-          y = Math.random() * 2 - 1;
-          r = x * x + y * y;
-        } while (!r || r > 1);
-        return µ + σ * x * Math.sqrt(-2 * Math.log(r) / r);
-      };
-    },
-    logNormal: function(µ, σ) {
-      var n = arguments.length;
-      if (n < 2) σ = 1;
-      if (n < 1) µ = 0;
-      var random = d3.random.normal();
-      return function() {
-        return Math.exp(µ + σ * random());
-      };
-    },
-    irwinHall: function(m) {
-      return function() {
-        for (var s = 0, j = 0; j < m; j++) s += Math.random();
-        return s / m;
-      };
-    }
-  };
-  d3.sum = function(array, f) {
-    var s = 0, n = array.length, a, i = -1;
-    if (arguments.length === 1) {
-      while (++i < n) if (!isNaN(a = +array[i])) s += a;
-    } else {
-      while (++i < n) if (!isNaN(a = +f.call(array, array[i], i))) s += a;
-    }
-    return s;
-  };
-  d3.quantile = function(values, p) {
-    var H = (values.length - 1) * p + 1, h = Math.floor(H), v = values[h - 1], e = H - h;
-    return e ? v + e * (values[h] - v) : v;
-  };
-  d3.transpose = function(matrix) {
-    return d3.zip.apply(d3, matrix);
-  };
-  d3.zip = function() {
-    if (!(n = arguments.length)) return [];
-    for (var i = -1, m = d3.min(arguments, d3_zipLength), zips = new Array(m); ++i < m; ) {
-      for (var j = -1, n, zip = zips[i] = new Array(n); ++j < n; ) {
-        zip[j] = arguments[j][i];
-      }
-    }
-    return zips;
-  };
-  d3.bisector = function(f) {
-    return {
-      left: function(a, x, lo, hi) {
-        if (arguments.length < 3) lo = 0;
-        if (arguments.length < 4) hi = a.length;
-        while (lo < hi) {
-          var mid = lo + hi >>> 1;
-          if (f.call(a, a[mid], mid) < x) lo = mid + 1; else hi = mid;
-        }
-        return lo;
-      },
-      right: function(a, x, lo, hi) {
-        if (arguments.length < 3) lo = 0;
-        if (arguments.length < 4) hi = a.length;
-        while (lo < hi) {
-          var mid = lo + hi >>> 1;
-          if (x < f.call(a, a[mid], mid)) hi = mid; else lo = mid + 1;
-        }
-        return lo;
-      }
-    };
-  };
-  var d3_bisector = d3.bisector(function(d) {
-    return d;
-  });
-  d3.bisectLeft = d3_bisector.left;
-  d3.bisect = d3.bisectRight = d3_bisector.right;
-  d3.first = function(array, f) {
-    var i = 0, n = array.length, a = array[0], b;
-    if (arguments.length === 1) f = d3.ascending;
-    while (++i < n) {
-      if (f.call(array, a, b = array[i]) > 0) {
-        a = b;
-      }
-    }
-    return a;
-  };
-  d3.last = function(array, f) {
-    var i = 0, n = array.length, a = array[0], b;
-    if (arguments.length === 1) f = d3.ascending;
-    while (++i < n) {
-      if (f.call(array, a, b = array[i]) <= 0) {
-        a = b;
-      }
-    }
-    return a;
-  };
-  d3.nest = function() {
-    function map(array, depth) {
-      if (depth >= keys.length) return rollup ? rollup.call(nest, array) : sortValues ? array.sort(sortValues) : array;
-      var i = -1, n = array.length, key = keys[depth++], keyValue, object, valuesByKey = new d3_Map, values, o = {};
-      while (++i < n) {
-        if (values = valuesByKey.get(keyValue = key(object = array[i]))) {
-          values.push(object);
-        } else {
-          valuesByKey.set(keyValue, [ object ]);
-        }
-      }
-      valuesByKey.forEach(function(keyValue, values) {
-        o[keyValue] = map(values, depth);
-      });
-      return o;
-    }
-    function entries(map, depth) {
-      if (depth >= keys.length) return map;
-      var a = [], sortKey = sortKeys[depth++], key;
-      for (key in map) {
-        a.push({
-          key: key,
-          values: entries(map[key], depth)
-        });
-      }
-      if (sortKey) a.sort(function(a, b) {
-        return sortKey(a.key, b.key);
-      });
-      return a;
-    }
-    var nest = {}, keys = [], sortKeys = [], sortValues, rollup;
-    nest.map = function(array) {
-      return map(array, 0);
-    };
-    nest.entries = function(array) {
-      return entries(map(array, 0), 0);
-    };
-    nest.key = function(d) {
-      keys.push(d);
-      return nest;
-    };
-    nest.sortKeys = function(order) {
-      sortKeys[keys.length - 1] = order;
-      return nest;
-    };
-    nest.sortValues = function(order) {
-      sortValues = order;
-      return nest;
-    };
-    nest.rollup = function(f) {
-      rollup = f;
-      return nest;
-    };
-    return nest;
-  };
-  d3.keys = function(map) {
-    var keys = [];
-    for (var key in map) keys.push(key);
-    return keys;
-  };
-  d3.values = function(map) {
-    var values = [];
-    for (var key in map) values.push(map[key]);
-    return values;
-  };
-  d3.entries = function(map) {
-    var entries = [];
-    for (var key in map) entries.push({
-      key: key,
-      value: map[key]
-    });
-    return entries;
-  };
-  d3.permute = function(array, indexes) {
-    var permutes = [], i = -1, n = indexes.length;
-    while (++i < n) permutes[i] = array[indexes[i]];
-    return permutes;
-  };
-  d3.merge = function(arrays) {
-    return Array.prototype.concat.apply([], arrays);
-  };
-  d3.split = function(array, f) {
-    var arrays = [], values = [], value, i = -1, n = array.length;
-    if (arguments.length < 2) f = d3_splitter;
-    while (++i < n) {
-      if (f.call(values, value = array[i], i)) {
-        values = [];
-      } else {
-        if (!values.length) arrays.push(values);
-        values.push(value);
-      }
-    }
-    return arrays;
-  };
-  d3.range = function(start, stop, step) {
-    if (arguments.length < 3) {
-      step = 1;
-      if (arguments.length < 2) {
-        stop = start;
-        start = 0;
-      }
-    }
-    if ((stop - start) / step === Infinity) throw new Error("infinite range");
-    var range = [], k = d3_range_integerScale(Math.abs(step)), i = -1, j;
-    start *= k, stop *= k, step *= k;
-    if (step < 0) while ((j = start + step * ++i) > stop) range.push(j / k); else while ((j = start + step * ++i) < stop) range.push(j / k);
-    return range;
-  };
-  d3.requote = function(s) {
-    return s.replace(d3_requote_re, "\\$&");
-  };
-  var d3_requote_re = /[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g;
-  d3.round = function(x, n) {
-    return n ? Math.round(x * (n = Math.pow(10, n))) / n : Math.round(x);
-  };
-  d3.xhr = function(url, mime, callback) {
-    var req = new XMLHttpRequest;
-    if (arguments.length < 3) callback = mime, mime = null; else if (mime && req.overrideMimeType) req.overrideMimeType(mime);
-    req.open("GET", url, true);
-    if (mime) req.setRequestHeader("Accept", mime);
-    req.onreadystatechange = function() {
-      if (req.readyState === 4) {
-        var s = req.status;
-        callback(!s && req.response || s >= 200 && s < 300 || s === 304 ? req : null);
-      }
-    };
-    req.send(null);
-  };
-  d3.text = function(url, mime, callback) {
-    function ready(req) {
-      callback(req && req.responseText);
-    }
-    if (arguments.length < 3) {
-      callback = mime;
-      mime = null;
-    }
-    d3.xhr(url, mime, ready);
-  };
-  d3.json = function(url, callback) {
-    d3.text(url, "application/json", function(text) {
-      callback(text ? JSON.parse(text) : null);
-    });
-  };
-  d3.html = function(url, callback) {
-    d3.text(url, "text/html", function(text) {
-      if (text != null) {
-        var range = document.createRange();
-        range.selectNode(document.body);
-        text = range.createContextualFragment(text);
-      }
-      callback(text);
-    });
-  };
-  d3.xml = function(url, mime, callback) {
-    function ready(req) {
-      callback(req && req.responseXML);
-    }
-    if (arguments.length < 3) {
-      callback = mime;
-      mime = null;
-    }
-    d3.xhr(url, mime, ready);
-  };
-  var d3_nsPrefix = {
-    svg: "http://www.w3.org/2000/svg",
-    xhtml: "http://www.w3.org/1999/xhtml",
-    xlink: "http://www.w3.org/1999/xlink",
-    xml: "http://www.w3.org/XML/1998/namespace",
-    xmlns: "http://www.w3.org/2000/xmlns/"
-  };
-  d3.ns = {
-    prefix: d3_nsPrefix,
-    qualify: function(name) {
-      var i = name.indexOf(":"), prefix = name;
-      if (i >= 0) {
-        prefix = name.substring(0, i);
-        name = name.substring(i + 1);
-      }
-      return d3_nsPrefix.hasOwnProperty(prefix) ? {
-        space: d3_nsPrefix[prefix],
-        local: name
-      } : name;
-    }
-  };
-  d3.dispatch = function() {
-    var dispatch = new d3_dispatch, i = -1, n = arguments.length;
-    while (++i < n) dispatch[arguments[i]] = d3_dispatch_event(dispatch);
-    return dispatch;
-  };
-  d3_dispatch.prototype.on = function(type, listener) {
-    var i = type.indexOf("."), name = "";
-    if (i > 0) {
-      name = type.substring(i + 1);
-      type = type.substring(0, i);
-    }
-    return arguments.length < 2 ? this[type].on(name) : this[type].on(name, listener);
-  };
-  d3.format = function(specifier) {
-    var match = d3_format_re.exec(specifier), fill = match[1] || " ", sign = match[3] || "", zfill = match[5], width = +match[6], comma = match[7], precision = match[8], type = match[9], scale = 1, suffix = "", integer = false;
-    if (precision) precision = +precision.substring(1);
-    if (zfill) {
-      fill = "0";
-      if (comma) width -= Math.floor((width - 1) / 4);
-    }
-    switch (type) {
-     case "n":
-      comma = true;
-      type = "g";
-      break;
-     case "%":
-      scale = 100;
-      suffix = "%";
-      type = "f";
-      break;
-     case "p":
-      scale = 100;
-      suffix = "%";
-      type = "r";
-      break;
-     case "d":
-      integer = true;
-      precision = 0;
-      break;
-     case "s":
-      scale = -1;
-      type = "r";
-      break;
-    }
-    if (type == "r" && !precision) type = "g";
-    type = d3_format_types.get(type) || d3_format_typeDefault;
-    return function(value) {
-      if (integer && value % 1) return "";
-      var negative = value < 0 && (value = -value) ? "-" : sign;
-      if (scale < 0) {
-        var prefix = d3.formatPrefix(value, precision);
-        value = prefix.scale(value);
-        suffix = prefix.symbol;
-      } else {
-        value *= scale;
-      }
-      value = type(value, precision);
-      if (zfill) {
-        var length = value.length + negative.length;
-        if (length < width) value = (new Array(width - length + 1)).join(fill) + value;
-        if (comma) value = d3_format_group(value);
-        value = negative + value;
-      } else {
-        if (comma) value = d3_format_group(value);
-        value = negative + value;
-        var length = value.length;
-        if (length < width) value = (new Array(width - length + 1)).join(fill) + value;
-      }
-      return value + suffix;
-    };
-  };
-  var d3_format_re = /(?:([^{])?([<>=^]))?([+\- ])?(#)?(0)?([0-9]+)?(,)?(\.[0-9]+)?([a-zA-Z%])?/;
-  var d3_format_types = d3.map({
-    g: function(x, p) {
-      return x.toPrecision(p);
-    },
-    e: function(x, p) {
-      return x.toExponential(p);
-    },
-    f: function(x, p) {
-      return x.toFixed(p);
-    },
-    r: function(x, p) {
-      return d3.round(x, p = d3_format_precision(x, p)).toFixed(Math.max(0, Math.min(20, p)));
-    }
-  });
-  var d3_formatPrefixes = [ "y", "z", "a", "f", "p", "n", "μ", "m", "", "k", "M", "G", "T", "P", "E", "Z", "Y" ].map(d3_formatPrefix);
-  d3.formatPrefix = function(value, precision) {
-    var i = 0;
-    if (value) {
-      if (value < 0) value *= -1;
-      if (precision) value = d3.round(value, d3_format_precision(value, precision));
-      i = 1 + Math.floor(1e-12 + Math.log(value) / Math.LN10);
-      i = Math.max(-24, Math.min(24, Math.floor((i <= 0 ? i + 1 : i - 1) / 3) * 3));
-    }
-    return d3_formatPrefixes[8 + i / 3];
-  };
-  var d3_ease_quad = d3_ease_poly(2), d3_ease_cubic = d3_ease_poly(3), d3_ease_default = function() {
-    return d3_ease_identity;
-  };
-  var d3_ease = d3.map({
-    linear: d3_ease_default,
-    poly: d3_ease_poly,
-    quad: function() {
-      return d3_ease_quad;
-    },
-    cubic: function() {
-      return d3_ease_cubic;
-    },
-    sin: function() {
-      return d3_ease_sin;
-    },
-    exp: function() {
-      return d3_ease_exp;
-    },
-    circle: function() {
-      return d3_ease_circle;
-    },
-    elastic: d3_ease_elastic,
-    back: d3_ease_back,
-    bounce: function() {
-      return d3_ease_bounce;
-    }
-  });
-  var d3_ease_mode = d3.map({
-    "in": d3_ease_identity,
-    out: d3_ease_reverse,
-    "in-out": d3_ease_reflect,
-    "out-in": function(f) {
-      return d3_ease_reflect(d3_ease_reverse(f));
-    }
-  });
-  d3.ease = function(name) {
-    var i = name.indexOf("-"), t = i >= 0 ? name.substring(0, i) : name, m = i >= 0 ? name.substring(i + 1) : "in";
-    t = d3_ease.get(t) || d3_ease_default;
-    m = d3_ease_mode.get(m) || d3_ease_identity;
-    return d3_ease_clamp(m(t.apply(null, Array.prototype.slice.call(arguments, 1))));
-  };
-  d3.event = null;
-  d3.transform = function(string) {
-    var g = document.createElementNS(d3.ns.prefix.svg, "g");
-    return (d3.transform = function(string) {
-      g.setAttribute("transform", string);
-      var t = g.transform.baseVal.consolidate();
-      return new d3_transform(t ? t.matrix : d3_transformIdentity);
-    })(string);
-  };
-  d3_transform.prototype.toString = function() {
-    return "translate(" + this.translate + ")rotate(" + this.rotate + ")skewX(" + this.skew + ")scale(" + this.scale + ")";
-  };
-  var d3_transformDegrees = 180 / Math.PI, d3_transformIdentity = {
-    a: 1,
-    b: 0,
-    c: 0,
-    d: 1,
-    e: 0,
-    f: 0
-  };
-  d3.interpolate = function(a, b) {
-    var i = d3.interpolators.length, f;
-    while (--i >= 0 && !(f = d3.interpolators[i](a, b))) ;
-    return f;
-  };
-  d3.interpolateNumber = function(a, b) {
-    b -= a;
-    return function(t) {
-      return a + b * t;
-    };
-  };
-  d3.interpolateRound = function(a, b) {
-    b -= a;
-    return function(t) {
-      return Math.round(a + b * t);
-    };
-  };
-  d3.interpolateString = function(a, b) {
-    var m, i, j, s0 = 0, s1 = 0, s = [], q = [], n, o;
-    d3_interpolate_number.lastIndex = 0;
-    for (i = 0; m = d3_interpolate_number.exec(b); ++i) {
-      if (m.index) s.push(b.substring(s0, s1 = m.index));
-      q.push({
-        i: s.length,
-        x: m[0]
-      });
-      s.push(null);
-      s0 = d3_interpolate_number.lastIndex;
-    }
-    if (s0 < b.length) s.push(b.substring(s0));
-    for (i = 0, n = q.length; (m = d3_interpolate_number.exec(a)) && i < n; ++i) {
-      o = q[i];
-      if (o.x == m[0]) {
-        if (o.i) {
-          if (s[o.i + 1] == null) {
-            s[o.i - 1] += o.x;
-            s.splice(o.i, 1);
-            for (j = i + 1; j < n; ++j) q[j].i--;
-          } else {
-            s[o.i - 1] += o.x + s[o.i + 1];
-            s.splice(o.i, 2);
-            for (j = i + 1; j < n; ++j) q[j].i -= 2;
-          }
-        } else {
-          if (s[o.i + 1] == null) {
-            s[o.i] = o.x;
-          } else {
-            s[o.i] = o.x + s[o.i + 1];
-            s.splice(o.i + 1, 1);
-            for (j = i + 1; j < n; ++j) q[j].i--;
-          }
-        }
-        q.splice(i, 1);
-        n--;
-        i--;
-      } else {
-        o.x = d3.interpolateNumber(parseFloat(m[0]), parseFloat(o.x));
-      }
-    }
-    while (i < n) {
-      o = q.pop();
-      if (s[o.i + 1] == null) {
-        s[o.i] = o.x;
-      } else {
-        s[o.i] = o.x + s[o.i + 1];
-        s.splice(o.i + 1, 1);
-      }
-      n--;
-    }
-    if (s.length === 1) {
-      return s[0] == null ? q[0].x : function() {
-        return b;
-      };
-    }
-    return function(t) {
-      for (i = 0; i < n; ++i) s[(o = q[i]).i] = o.x(t);
-      return s.join("");
-    };
-  };
-  d3.interpolateTransform = function(a, b) {
-    var s = [], q = [], n, A = d3.transform(a), B = d3.transform(b), ta = A.translate, tb = B.translate, ra = A.rotate, rb = B.rotate, wa = A.skew, wb = B.skew, ka = A.scale, kb = B.scale;
-    if (ta[0] != tb[0] || ta[1] != tb[1]) {
-      s.push("translate(", null, ",", null, ")");
-      q.push({
-        i: 1,
-        x: d3.interpolateNumber(ta[0], tb[0])
-      }, {
-        i: 3,
-        x: d3.interpolateNumber(ta[1], tb[1])
-      });
-    } else if (tb[0] || tb[1]) {
-      s.push("translate(" + tb + ")");
-    } else {
-      s.push("");
-    }
-    if (ra != rb) {
-      if (ra - rb > 180) rb += 360; else if (rb - ra > 180) ra += 360;
-      q.push({
-        i: s.push(s.pop() + "rotate(", null, ")") - 2,
-        x: d3.interpolateNumber(ra, rb)
-      });
-    } else if (rb) {
-      s.push(s.pop() + "rotate(" + rb + ")");
-    }
-    if (wa != wb) {
-      q.push({
-        i: s.push(s.pop() + "skewX(", null, ")") - 2,
-        x: d3.interpolateNumber(wa, wb)
-      });
-    } else if (wb) {
-      s.push(s.pop() + "skewX(" + wb + ")");
-    }
-    if (ka[0] != kb[0] || ka[1] != kb[1]) {
-      n = s.push(s.pop() + "scale(", null, ",", null, ")");
-      q.push({
-        i: n - 4,
-        x: d3.interpolateNumber(ka[0], kb[0])
-      }, {
-        i: n - 2,
-        x: d3.interpolateNumber(ka[1], kb[1])
-      });
-    } else if (kb[0] != 1 || kb[1] != 1) {
-      s.push(s.pop() + "scale(" + kb + ")");
-    }
-    n = q.length;
-    return function(t) {
-      var i = -1, o;
-      while (++i < n) s[(o = q[i]).i] = o.x(t);
-      return s.join("");
-    };
-  };
-  d3.interpolateRgb = function(a, b) {
-    a = d3.rgb(a);
-    b = d3.rgb(b);
-    var ar = a.r, ag = a.g, ab = a.b, br = b.r - ar, bg = b.g - ag, bb = b.b - ab;
-    return function(t) {
-      return "#" + d3_rgb_hex(Math.round(ar + br * t)) + d3_rgb_hex(Math.round(ag + bg * t)) + d3_rgb_hex(Math.round(ab + bb * t));
-    };
-  };
-  d3.interpolateHsl = function(a, b) {
-    a = d3.hsl(a);
-    b = d3.hsl(b);
-    var h0 = a.h, s0 = a.s, l0 = a.l, h1 = b.h - h0, s1 = b.s - s0, l1 = b.l - l0;
-    if (h1 > 180) h1 -= 360; else if (h1 < -180) h1 += 360;
-    return function(t) {
-      return d3_hsl_rgb(h0 + h1 * t, s0 + s1 * t, l0 + l1 * t) + "";
-    };
-  };
-  d3.interpolateLab = function(a, b) {
-    a = d3.lab(a);
-    b = d3.lab(b);
-    var al = a.l, aa = a.a, ab = a.b, bl = b.l - al, ba = b.a - aa, bb = b.b - ab;
-    return function(t) {
-      return d3_lab_rgb(al + bl * t, aa + ba * t, ab + bb * t) + "";
-    };
-  };
-  d3.interpolateHcl = function(a, b) {
-    a = d3.hcl(a);
-    b = d3.hcl(b);
-    var ah = a.h, ac = a.c, al = a.l, bh = b.h - ah, bc = b.c - ac, bl = b.l - al;
-    if (bh > 180) bh -= 360; else if (bh < -180) bh += 360;
-    return function(t) {
-      return d3_hcl_lab(ah + bh * t, ac + bc * t, al + bl * t) + "";
-    };
-  };
-  d3.interpolateArray = function(a, b) {
-    var x = [], c = [], na = a.length, nb = b.length, n0 = Math.min(a.length, b.length), i;
-    for (i = 0; i < n0; ++i) x.push(d3.interpolate(a[i], b[i]));
-    for (; i < na; ++i) c[i] = a[i];
-    for (; i < nb; ++i) c[i] = b[i];
-    return function(t) {
-      for (i = 0; i < n0; ++i) c[i] = x[i](t);
-      return c;
-    };
-  };
-  d3.interpolateObject = function(a, b) {
-    var i = {}, c = {}, k;
-    for (k in a) {
-      if (k in b) {
-        i[k] = d3_interpolateByName(k)(a[k], b[k]);
-      } else {
-        c[k] = a[k];
-      }
-    }
-    for (k in b) {
-      if (!(k in a)) {
-        c[k] = b[k];
-      }
-    }
-    return function(t) {
-      for (k in i) c[k] = i[k](t);
-      return c;
-    };
-  };
-  var d3_interpolate_number = /[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g;
-  d3.interpolators = [ d3.interpolateObject, function(a, b) {
-    return b instanceof Array && d3.interpolateArray(a, b);
-  }, function(a, b) {
-    return (typeof a === "string" || typeof b === "string") && d3.interpolateString(a + "", b + "");
-  }, function(a, b) {
-    return (typeof b === "string" ? d3_rgb_names.has(b) || /^(#|rgb\(|hsl\()/.test(b) : b instanceof d3_Rgb || b instanceof d3_Hsl) && d3.interpolateRgb(a, b);
-  }, function(a, b) {
-    return !isNaN(a = +a) && !isNaN(b = +b) && d3.interpolateNumber(a, b);
-  } ];
-  d3.rgb = function(r, g, b) {
-    return arguments.length === 1 ? r instanceof d3_Rgb ? d3_rgb(r.r, r.g, r.b) : d3_rgb_parse("" + r, d3_rgb, d3_hsl_rgb) : d3_rgb(~~r, ~~g, ~~b);
-  };
-  d3_Rgb.prototype.brighter = function(k) {
-    k = Math.pow(.7, arguments.length ? k : 1);
-    var r = this.r, g = this.g, b = this.b, i = 30;
-    if (!r && !g && !b) return d3_rgb(i, i, i);
-    if (r && r < i) r = i;
-    if (g && g < i) g = i;
-    if (b && b < i) b = i;
-    return d3_rgb(Math.min(255, Math.floor(r / k)), Math.min(255, Math.floor(g / k)), Math.min(255, Math.floor(b / k)));
-  };
-  d3_Rgb.prototype.darker = function(k) {
-    k = Math.pow(.7, arguments.length ? k : 1);
-    return d3_rgb(Math.floor(k * this.r), Math.floor(k * this.g), Math.floor(k * this.b));
-  };
-  d3_Rgb.prototype.hsl = function() {
-    return d3_rgb_hsl(this.r, this.g, this.b);
-  };
-  d3_Rgb.prototype.toString = function() {
-    return "#" + d3_rgb_hex(this.r) + d3_rgb_hex(this.g) + d3_rgb_hex(this.b);
-  };
-  var d3_rgb_names = d3.map({
-    aliceblue: "#f0f8ff",
-    antiquewhite: "#faebd7",
-    aqua: "#00ffff",
-    aquamarine: "#7fffd4",
-    azure: "#f0ffff",
-    beige: "#f5f5dc",
-    bisque: "#ffe4c4",
-    black: "#000000",
-    blanchedalmond: "#ffebcd",
-    blue: "#0000ff",
-    blueviolet: "#8a2be2",
-    brown: "#a52a2a",
-    burlywood: "#deb887",
-    cadetblue: "#5f9ea0",
-    chartreuse: "#7fff00",
-    chocolate: "#d2691e",
-    coral: "#ff7f50",
-    cornflowerblue: "#6495ed",
-    cornsilk: "#fff8dc",
-    crimson: "#dc143c",
-    cyan: "#00ffff",
-    darkblue: "#00008b",
-    darkcyan: "#008b8b",
-    darkgoldenrod: "#b8860b",
-    darkgray: "#a9a9a9",
-    darkgreen: "#006400",
-    darkgrey: "#a9a9a9",
-    darkkhaki: "#bdb76b",
-    darkmagenta: "#8b008b",
-    darkolivegreen: "#556b2f",
-    darkorange: "#ff8c00",
-    darkorchid: "#9932cc",
-    darkred: "#8b0000",
-    darksalmon: "#e9967a",
-    darkseagreen: "#8fbc8f",
-    darkslateblue: "#483d8b",
-    darkslategray: "#2f4f4f",
-    darkslategrey: "#2f4f4f",
-    darkturquoise: "#00ced1",
-    darkviolet: "#9400d3",
-    deeppink: "#ff1493",
-    deepskyblue: "#00bfff",
-    dimgray: "#696969",
-    dimgrey: "#696969",
-    dodgerblue: "#1e90ff",
-    firebrick: "#b22222",
-    floralwhite: "#fffaf0",
-    forestgreen: "#228b22",
-    fuchsia: "#ff00ff",
-    gainsboro: "#dcdcdc",
-    ghostwhite: "#f8f8ff",
-    gold: "#ffd700",
-    goldenrod: "#daa520",
-    gray: "#808080",
-    green: "#008000",
-    greenyellow: "#adff2f",
-    grey: "#808080",
-    honeydew: "#f0fff0",
-    hotpink: "#ff69b4",
-    indianred: "#cd5c5c",
-    indigo: "#4b0082",
-    ivory: "#fffff0",
-    khaki: "#f0e68c",
-    lavender: "#e6e6fa",
-    lavenderblush: "#fff0f5",
-    lawngreen: "#7cfc00",
-    lemonchiffon: "#fffacd",
-    lightblue: "#add8e6",
-    lightcoral: "#f08080",
-    lightcyan: "#e0ffff",
-    lightgoldenrodyellow: "#fafad2",
-    lightgray: "#d3d3d3",
-    lightgreen: "#90ee90",
-    lightgrey: "#d3d3d3",
-    lightpink: "#ffb6c1",
-    lightsalmon: "#ffa07a",
-    lightseagreen: "#20b2aa",
-    lightskyblue: "#87cefa",
-    lightslategray: "#778899",
-    lightslategrey: "#778899",
-    lightsteelblue: "#b0c4de",
-    lightyellow: "#ffffe0",
-    lime: "#00ff00",
-    limegreen: "#32cd32",
-    linen: "#faf0e6",
-    magenta: "#ff00ff",
-    maroon: "#800000",
-    mediumaquamarine: "#66cdaa",
-    mediumblue: "#0000cd",
-    mediumorchid: "#ba55d3",
-    mediumpurple: "#9370db",
-    mediumseagreen: "#3cb371",
-    mediumslateblue: "#7b68ee",
-    mediumspringgreen: "#00fa9a",
-    mediumturquoise: "#48d1cc",
-    mediumvioletred: "#c71585",
-    midnightblue: "#191970",
-    mintcream: "#f5fffa",
-    mistyrose: "#ffe4e1",
-    moccasin: "#ffe4b5",
-    navajowhite: "#ffdead",
-    navy: "#000080",
-    oldlace: "#fdf5e6",
-    olive: "#808000",
-    olivedrab: "#6b8e23",
-    orange: "#ffa500",
-    orangered: "#ff4500",
-    orchid: "#da70d6",
-    palegoldenrod: "#eee8aa",
-    palegreen: "#98fb98",
-    paleturquoise: "#afeeee",
-    palevioletred: "#db7093",
-    papayawhip: "#ffefd5",
-    peachpuff: "#ffdab9",
-    peru: "#cd853f",
-    pink: "#ffc0cb",
-    plum: "#dda0dd",
-    powderblue: "#b0e0e6",
-    purple: "#800080",
-    red: "#ff0000",
-    rosybrown: "#bc8f8f",
-    royalblue: "#4169e1",
-    saddlebrown: "#8b4513",
-    salmon: "#fa8072",
-    sandybrown: "#f4a460",
-    seagreen: "#2e8b57",
-    seashell: "#fff5ee",
-    sienna: "#a0522d",
-    silver: "#c0c0c0",
-    skyblue: "#87ceeb",
-    slateblue: "#6a5acd",
-    slategray: "#708090",
-    slategrey: "#708090",
-    snow: "#fffafa",
-    springgreen: "#00ff7f",
-    steelblue: "#4682b4",
-    tan: "#d2b48c",
-    teal: "#008080",
-    thistle: "#d8bfd8",
-    tomato: "#ff6347",
-    turquoise: "#40e0d0",
-    violet: "#ee82ee",
-    wheat: "#f5deb3",
-    white: "#ffffff",
-    whitesmoke: "#f5f5f5",
-    yellow: "#ffff00",
-    yellowgreen: "#9acd32"
-  });
-  d3_rgb_names.forEach(function(key, value) {
-    d3_rgb_names.set(key, d3_rgb_parse(value, d3_rgb, d3_hsl_rgb));
-  });
-  d3.hsl = function(h, s, l) {
-    return arguments.length === 1 ? h instanceof d3_Hsl ? d3_hsl(h.h, h.s, h.l) : d3_rgb_parse("" + h, d3_rgb_hsl, d3_hsl) : d3_hsl(+h, +s, +l);
-  };
-  d3_Hsl.prototype.brighter = function(k) {
-    k = Math.pow(.7, arguments.length ? k : 1);
-    return d3_hsl(this.h, this.s, this.l / k);
-  };
-  d3_Hsl.prototype.darker = function(k) {
-    k = Math.pow(.7, arguments.length ? k : 1);
-    return d3_hsl(this.h, this.s, k * this.l);
-  };
-  d3_Hsl.prototype.rgb = function() {
-    return d3_hsl_rgb(this.h, this.s, this.l);
-  };
-  d3_Hsl.prototype.toString = function() {
-    return this.rgb().toString();
-  };
-  d3.hcl = function(h, c, l) {
-    return arguments.length === 1 ? h instanceof d3_Hcl ? d3_hcl(h.h, h.c, h.l) : h instanceof d3_Lab ? d3_lab_hcl(h.l, h.a, h.b) : d3_lab_hcl((h = d3_rgb_lab((h = d3.rgb(h)).r, h.g, h.b)).l, h.a, h.b) : d3_hcl(+h, +c, +l);
-  };
-  d3_Hcl.prototype.brighter = function(k) {
-    return d3_hcl(this.h, this.c, Math.min(100, this.l + d3_lab_K * (arguments.length ? k : 1)));
-  };
-  d3_Hcl.prototype.darker = function(k) {
-    return d3_hcl(this.h, this.c, Math.max(0, this.l - d3_lab_K * (arguments.length ? k : 1)));
-  };
-  d3_Hcl.prototype.rgb = function() {
-    return d3_hcl_lab(this.h, this.c, this.l).rgb();
-  };
-  d3_Hcl.prototype.toString = function() {
-    return this.rgb() + "";
-  };
-  d3.lab = function(l, a, b) {
-    return arguments.length === 1 ? l instanceof d3_Lab ? d3_lab(l.l, l.a, l.b) : l instanceof d3_Hcl ? d3_hcl_lab(l.l, l.c, l.h) : d3_rgb_lab((l = d3.rgb(l)).r, l.g, l.b) : d3_lab(+l, +a, +b);
-  };
-  var d3_lab_K = 18;
-  var d3_lab_X = .95047, d3_lab_Y = 1, d3_lab_Z = 1.08883;
-  d3_Lab.prototype.brighter = function(k) {
-    return d3_lab(Math.min(100, this.l + d3_lab_K * (arguments.length ? k : 1)), this.a, this.b);
-  };
-  d3_Lab.prototype.darker = function(k) {
-    return d3_lab(Math.max(0, this.l - d3_lab_K * (arguments.length ? k : 1)), this.a, this.b);
-  };
-  d3_Lab.prototype.rgb = function() {
-    return d3_lab_rgb(this.l, this.a, this.b);
-  };
-  d3_Lab.prototype.toString = function() {
-    return this.rgb() + "";
-  };
-  var d3_select = function(s, n) {
-    return n.querySelector(s);
-  }, d3_selectAll = function(s, n) {
-    return n.querySelectorAll(s);
-  }, d3_selectRoot = document.documentElement, d3_selectMatcher = d3_selectRoot.matchesSelector || d3_selectRoot.webkitMatchesSelector || d3_selectRoot.mozMatchesSelector || d3_selectRoot.msMatchesSelector || d3_selectRoot.oMatchesSelector, d3_selectMatches = function(n, s) {
-    return d3_selectMatcher.call(n, s);
-  };
-  if (typeof Sizzle === "function") {
-    d3_select = function(s, n) {
-      return Sizzle(s, n)[0] || null;
-    };
-    d3_selectAll = function(s, n) {
-      return Sizzle.uniqueSort(Sizzle(s, n));
-    };
-    d3_selectMatches = Sizzle.matchesSelector;
-  }
-  var d3_selectionPrototype = [];
-  d3.selection = function() {
-    return d3_selectionRoot;
-  };
-  d3.selection.prototype = d3_selectionPrototype;
-  d3_selectionPrototype.select = function(selector) {
-    var subgroups = [], subgroup, subnode, group, node;
-    if (typeof selector !== "function") selector = d3_selection_selector(selector);
-    for (var j = -1, m = this.length; ++j < m; ) {
-      subgroups.push(subgroup = []);
-      subgroup.parentNode = (group = this[j]).parentNode;
-      for (var i = -1, n = group.length; ++i < n; ) {
-        if (node = group[i]) {
-          subgroup.push(subnode = selector.call(node, node.__data__, i));
-          if (subnode && "__data__" in node) subnode.__data__ = node.__data__;
-        } else {
-          subgroup.push(null);
-        }
-      }
-    }
-    return d3_selection(subgroups);
-  };
-  d3_selectionPrototype.selectAll = function(selector) {
-    var subgroups = [], subgroup, node;
-    if (typeof selector !== "function") selector = d3_selection_selectorAll(selector);
-    for (var j = -1, m = this.length; ++j < m; ) {
-      for (var group = this[j], i = -1, n = group.length; ++i < n; ) {
-        if (node = group[i]) {
-          subgroups.push(subgroup = d3_array(selector.call(node, node.__data__, i)));
-          subgroup.parentNode = node;
-        }
-      }
-    }
-    return d3_selection(subgroups);
-  };
-  d3_selectionPrototype.attr = function(name, value) {
-    if (arguments.length < 2) {
-      if (typeof name === "string") {
-        var node = this.node();
-        name = d3.ns.qualify(name);
-        return name.local ? node.getAttributeNS(name.space, name.local) : node.getAttribute(name);
-      }
-      for (value in name) this.each(d3_selection_attr(value, name[value]));
-      return this;
-    }
-    return this.each(d3_selection_attr(name, value));
-  };
-  d3_selectionPrototype.classed = function(name, value) {
-    if (arguments.length < 2) {
-      if (typeof name === "string") {
-        var node = this.node(), n = (name = name.trim().split(/^|\s+/g)).length, i = -1;
-        if (value = node.classList) {
-          while (++i < n) if (!value.contains(name[i])) return false;
-        } else {
-          value = node.className;
-          if (value.baseVal != null) value = value.baseVal;
-          while (++i < n) if (!d3_selection_classedRe(name[i]).test(value)) return false;
-        }
-        return true;
-      }
-      for (value in name) this.each(d3_selection_classed(value, name[value]));
-      return this;
-    }
-    return this.each(d3_selection_classed(name, value));
-  };
-  d3_selectionPrototype.style = function(name, value, priority) {
-    var n = arguments.length;
-    if (n < 3) {
-      if (typeof name !== "string") {
-        if (n < 2) value = "";
-        for (priority in name) this.each(d3_selection_style(priority, name[priority], value));
-        return this;
-      }
-      if (n < 2) return window.getComputedStyle(this.node(), null).getPropertyValue(name);
-      priority = "";
-    }
-    return this.each(d3_selection_style(name, value, priority));
-  };
-  d3_selectionPrototype.property = function(name, value) {
-    if (arguments.length < 2) {
-      if (typeof name === "string") return this.node()[name];
-      for (value in name) this.each(d3_selection_property(value, name[value]));
-      return this;
-    }
-    return this.each(d3_selection_property(name, value));
-  };
-  d3_selectionPrototype.text = function(value) {
-    return arguments.length < 1 ? this.node().textContent : this.each(typeof value === "function" ? function() {
-      var v = value.apply(this, arguments);
-      this.textContent = v == null ? "" : v;
-    } : value == null ? function() {
-      this.textContent = "";
-    } : function() {
-      this.textContent = value;
-    });
-  };
-  d3_selectionPrototype.html = function(value) {
-    return arguments.length < 1 ? this.node().innerHTML : this.each(typeof value === "function" ? function() {
-      var v = value.apply(this, arguments);
-      this.innerHTML = v == null ? "" : v;
-    } : value == null ? function() {
-      this.innerHTML = "";
-    } : function() {
-      this.innerHTML = value;
-    });
-  };
-  d3_selectionPrototype.append = function(name) {
-    function append() {
-      return this.appendChild(document.createElementNS(this.namespaceURI, name));
-    }
-    function appendNS() {
-      return this.appendChild(document.createElementNS(name.space, name.local));
-    }
-    name = d3.ns.qualify(name);
-    return this.select(name.local ? appendNS : append);
-  };
-  d3_selectionPrototype.insert = function(name, before) {
-    function insert() {
-      return this.insertBefore(document.createElementNS(this.namespaceURI, name), d3_select(before, this));
-    }
-    function insertNS() {
-      return this.insertBefore(document.createElementNS(name.space, name.local), d3_select(before, this));
-    }
-    name = d3.ns.qualify(name);
-    return this.select(name.local ? insertNS : insert);
-  };
-  d3_selectionPrototype.remove = function() {
-    return this.each(function() {
-      var parent = this.parentNode;
-      if (parent) parent.removeChild(this);
-    });
-  };
-  d3_selectionPrototype.data = function(value, key) {
-    function bind(group, groupData) {
-      var i, n = group.length, m = groupData.length, n0 = Math.min(n, m), n1 = Math.max(n, m), updateNodes = [], enterNodes = [], exitNodes = [], node, nodeData;
-      if (key) {
-        var nodeByKeyValue = new d3_Map, keyValues = [], keyValue, j = groupData.length;
-        for (i = -1; ++i < n; ) {
-          keyValue = key.call(node = group[i], node.__data__, i);
-          if (nodeByKeyValue.has(keyValue)) {
-            exitNodes[j++] = node;
-          } else {
-            nodeByKeyValue.set(keyValue, node);
-          }
-          keyValues.push(keyValue);
-        }
-        for (i = -1; ++i < m; ) {
-          keyValue = key.call(groupData, nodeData = groupData[i], i);
-          if (nodeByKeyValue.has(keyValue)) {
-            updateNodes[i] = node = nodeByKeyValue.get(keyValue);
-            node.__data__ = nodeData;
-            enterNodes[i] = exitNodes[i] = null;
-          } else {
-            enterNodes[i] = d3_selection_dataNode(nodeData);
-            updateNodes[i] = exitNodes[i] = null;
-          }
-          nodeByKeyValue.remove(keyValue);
-        }
-        for (i = -1; ++i < n; ) {
-          if (nodeByKeyValue.has(keyValues[i])) {
-            exitNodes[i] = group[i];
-          }
-        }
-      } else {
-        for (i = -1; ++i < n0; ) {
-          node = group[i];
-          nodeData = groupData[i];
-          if (node) {
-            node.__data__ = nodeData;
-            updateNodes[i] = node;
-            enterNodes[i] = exitNodes[i] = null;
-          } else {
-            enterNodes[i] = d3_selection_dataNode(nodeData);
-            updateNodes[i] = exitNodes[i] = null;
-          }
-        }
-        for (; i < m; ++i) {
-          enterNodes[i] = d3_selection_dataNode(groupData[i]);
-          updateNodes[i] = exitNodes[i] = null;
-        }
-        for (; i < n1; ++i) {
-          exitNodes[i] = group[i];
-          enterNodes[i] = updateNodes[i] = null;
-        }
-      }
-      enterNodes.update = updateNodes;
-      enterNodes.parentNode = updateNodes.parentNode = exitNodes.parentNode = group.parentNode;
-      enter.push(enterNodes);
-      update.push(updateNodes);
-      exit.push(exitNodes);
-    }
-    var i = -1, n = this.length, group, node;
-    if (!arguments.length) {
-      value = new Array(n = (group = this[0]).length);
-      while (++i < n) {
-        if (node = group[i]) {
-          value[i] = node.__data__;
-        }
-      }
-      return value;
-    }
-    var enter = d3_selection_enter([]), update = d3_selection([]), exit = d3_selection([]);
-    if (typeof value === "function") {
-      while (++i < n) {
-        bind(group = this[i], value.call(group, group.parentNode.__data__, i));
-      }
-    } else {
-      while (++i < n) {
-        bind(group = this[i], value);
-      }
-    }
-    update.enter = function() {
-      return enter;
-    };
-    update.exit = function() {
-      return exit;
-    };
-    return update;
-  };
-  d3_selectionPrototype.datum = d3_selectionPrototype.map = function(value) {
-    return arguments.length < 1 ? this.property("__data__") : this.property("__data__", value);
-  };
-  d3_selectionPrototype.filter = function(filter) {
-    var subgroups = [], subgroup, group, node;
-    if (typeof filter !== "function") filter = d3_selection_filter(filter);
-    for (var j = 0, m = this.length; j < m; j++) {
-      subgroups.push(subgroup = []);
-      subgroup.parentNode = (group = this[j]).parentNode;
-      for (var i = 0, n = group.length; i < n; i++) {
-        if ((node = group[i]) && filter.call(node, node.__data__, i)) {
-          subgroup.push(node);
-        }
-      }
-    }
-    return d3_selection(subgroups);
-  };
-  d3_selectionPrototype.order = function() {
-    for (var j = -1, m = this.length; ++j < m; ) {
-      for (var group = this[j], i = group.length - 1, next = group[i], node; --i >= 0; ) {
-        if (node = group[i]) {
-          if (next && next !== node.nextSibling) next.parentNode.insertBefore(node, next);
-          next = node;
-        }
-      }
-    }
-    return this;
-  };
-  d3_selectionPrototype.sort = function(comparator) {
-    comparator = d3_selection_sortComparator.apply(this, arguments);
-    for (var j = -1, m = this.length; ++j < m; ) this[j].sort(comparator);
-    return this.order();
-  };
-  d3_selectionPrototype.on = function(type, listener, capture) {
-    var n = arguments.length;
-    if (n < 3) {
-      if (typeof type !== "string") {
-        if (n < 2) listener = false;
-        for (capture in type) this.each(d3_selection_on(capture, type[capture], listener));
-        return this;
-      }
-      if (n < 2) return (n = this.node()["__on" + type]) && n._;
-      capture = false;
-    }
-    return this.each(d3_selection_on(type, listener, capture));
-  };
-  d3_selectionPrototype.each = function(callback) {
-    return d3_selection_each(this, function(node, i, j) {
-      callback.call(node, node.__data__, i, j);
-    });
-  };
-  d3_selectionPrototype.call = function(callback) {
-    callback.apply(this, (arguments[0] = this, arguments));
-    return this;
-  };
-  d3_selectionPrototype.empty = function() {
-    return !this.node();
-  };
-  d3_selectionPrototype.node = function(callback) {
-    for (var j = 0, m = this.length; j < m; j++) {
-      for (var group = this[j], i = 0, n = group.length; i < n; i++) {
-        var node = group[i];
-        if (node) return node;
-      }
-    }
-    return null;
-  };
-  d3_selectionPrototype.transition = function() {
-    var subgroups = [], subgroup, node;
-    for (var j = -1, m = this.length; ++j < m; ) {
-      subgroups.push(subgroup = []);
-      for (var group = this[j], i = -1, n = group.length; ++i < n; ) {
-        subgroup.push((node = group[i]) ? {
-          node: node,
-          delay: d3_transitionDelay,
-          duration: d3_transitionDuration
-        } : null);
-      }
-    }
-    return d3_transition(subgroups, d3_transitionId || ++d3_transitionNextId, Date.now());
-  };
-  var d3_selectionRoot = d3_selection([ [ document ] ]);
-  d3_selectionRoot[0].parentNode = d3_selectRoot;
-  d3.select = function(selector) {
-    return typeof selector === "string" ? d3_selectionRoot.select(selector) : d3_selection([ [ selector ] ]);
-  };
-  d3.selectAll = function(selector) {
-    return typeof selector === "string" ? d3_selectionRoot.selectAll(selector) : d3_selection([ d3_array(selector) ]);
-  };
-  var d3_selection_enterPrototype = [];
-  d3.selection.enter = d3_selection_enter;
-  d3.selection.enter.prototype = d3_selection_enterPrototype;
-  d3_selection_enterPrototype.append = d3_selectionPrototype.append;
-  d3_selection_enterPrototype.insert = d3_selectionPrototype.insert;
-  d3_selection_enterPrototype.empty = d3_selectionPrototype.empty;
-  d3_selection_enterPrototype.node = d3_selectionPrototype.node;
-  d3_selection_enterPrototype.select = function(selector) {
-    var subgroups = [], subgroup, subnode, upgroup, group, node;
-    for (var j = -1, m = this.length; ++j < m; ) {
-      upgroup = (group = this[j]).update;
-      subgroups.push(subgroup = []);
-      subgroup.parentNode = group.parentNode;
-      for (var i = -1, n = group.length; ++i < n; ) {
-        if (node = group[i]) {
-          subgroup.push(upgroup[i] = subnode = selector.call(group.parentNode, node.__data__, i));
-          subnode.__data__ = node.__data__;
-        } else {
-          subgroup.push(null);
-        }
-      }
-    }
-    return d3_selection(subgroups);
-  };
-  var d3_transitionPrototype = [], d3_transitionNextId = 0, d3_transitionId = 0, d3_transitionDefaultDelay = 0, d3_transitionDefaultDuration = 250, d3_transitionDefaultEase = d3.ease("cubic-in-out"), d3_transitionDelay = d3_transitionDefaultDelay, d3_transitionDuration = d3_transitionDefaultDuration, d3_transitionEase = d3_transitionDefaultEase;
-  d3_transitionPrototype.call = d3_selectionPrototype.call;
-  d3.transition = function(selection) {
-    return arguments.length ? d3_transitionId ? selection.transition() : selection : d3_selectionRoot.transition();
-  };
-  d3.transition.prototype = d3_transitionPrototype;
-  d3_transitionPrototype.select = function(selector) {
-    var subgroups = [], subgroup, subnode, node;
-    if (typeof selector !== "function") selector = d3_selection_selector(selector);
-    for (var j = -1, m = this.length; ++j < m; ) {
-      subgroups.push(subgroup = []);
-      for (var group = this[j], i = -1, n = group.length; ++i < n; ) {
-        if ((node = group[i]) && (subnode = selector.call(node.node, node.node.__data__, i))) {
-          if ("__data__" in node.node) subnode.__data__ = node.node.__data__;
-          subgroup.push({
-            node: subnode,
-            delay: node.delay,
-            duration: node.duration
-          });
-        } else {
-          subgroup.push(null);
-        }
-      }
-    }
-    return d3_transition(subgroups, this.id, this.time).ease(this.ease());
-  };
-  d3_transitionPrototype.selectAll = function(selector) {
-    var subgroups = [], subgroup, subnodes, node;
-    if (typeof selector !== "function") selector = d3_selection_selectorAll(selector);
-    for (var j = -1, m = this.length; ++j < m; ) {
-      for (var group = this[j], i = -1, n = group.length; ++i < n; ) {
-        if (node = group[i]) {
-          subnodes = selector.call(node.node, node.node.__data__, i);
-          subgroups.push(subgroup = []);
-          for (var k = -1, o = subnodes.length; ++k < o; ) {
-            subgroup.push({
-              node: subnodes[k],
-              delay: node.delay,
-              duration: node.duration
-            });
-          }
-        }
-      }
-    }
-    return d3_transition(subgroups, this.id, this.time).ease(this.ease());
-  };
-  d3_transitionPrototype.filter = function(filter) {
-    var subgroups = [], subgroup, group, node;
-    if (typeof filter !== "function") filter = d3_selection_filter(filter);
-    for (var j = 0, m = this.length; j < m; j++) {
-      subgroups.push(subgroup = []);
-      for (var group = this[j], i = 0, n = group.length; i < n; i++) {
-        if ((node = group[i]) && filter.call(node.node, node.node.__data__, i)) {
-          subgroup.push(node);
-        }
-      }
-    }
-    return d3_transition(subgroups, this.id, this.time).ease(this.ease());
-  };
-  d3_transitionPrototype.attr = function(name, value) {
-    if (arguments.length < 2) {
-      for (value in name) this.attrTween(value, d3_tweenByName(name[value], value));
-      return this;
-    }
-    return this.attrTween(name, d3_tweenByName(value, name));
-  };
-  d3_transitionPrototype.attrTween = function(nameNS, tween) {
-    function attrTween(d, i) {
-      var f = tween.call(this, d, i, this.getAttribute(name));
-      return f === d3_tweenRemove ? (this.removeAttribute(name), null) : f && function(t) {
-        this.setAttribute(name, f(t));
-      };
-    }
-    function attrTweenNS(d, i) {
-      var f = tween.call(this, d, i, this.getAttributeNS(name.space, name.local));
-      return f === d3_tweenRemove ? (this.removeAttributeNS(name.space, name.local), null) : f && function(t) {
-        this.setAttributeNS(name.space, name.local, f(t));
-      };
-    }
-    var name = d3.ns.qualify(nameNS);
-    return this.tween("attr." + nameNS, name.local ? attrTweenNS : attrTween);
-  };
-  d3_transitionPrototype.style = function(name, value, priority) {
-    var n = arguments.length;
-    if (n < 3) {
-      if (typeof name !== "string") {
-        if (n < 2) value = "";
-        for (priority in name) this.styleTween(priority, d3_tweenByName(name[priority], priority), value);
-        return this;
-      }
-      priority = "";
-    }
-    return this.styleTween(name, d3_tweenByName(value, name), priority);
-  };
-  d3_transitionPrototype.styleTween = function(name, tween, priority) {
-    if (arguments.length < 3) priority = "";
-    return this.tween("style." + name, function(d, i) {
-      var f = tween.call(this, d, i, window.getComputedStyle(this, null).getPropertyValue(name));
-      return f === d3_tweenRemove ? (this.style.removeProperty(name), null) : f && function(t) {
-        this.style.setProperty(name, f(t), priority);
-      };
-    });
-  };
-  d3_transitionPrototype.text = function(value) {
-    return this.tween("text", function(d, i) {
-      this.textContent = typeof value === "function" ? value.call(this, d, i) : value;
-    });
-  };
-  d3_transitionPrototype.remove = function() {
-    return this.each("end.transition", function() {
-      var p;
-      if (!this.__transition__ && (p = this.parentNode)) p.removeChild(this);
-    });
-  };
-  d3_transitionPrototype.delay = function(value) {
-    return d3_selection_each(this, typeof value === "function" ? function(node, i, j) {
-      node.delay = value.call(node = node.node, node.__data__, i, j) | 0;
-    } : (value = value | 0, function(node) {
-      node.delay = value;
-    }));
-  };
-  d3_transitionPrototype.duration = function(value) {
-    return d3_selection_each(this, typeof value === "function" ? function(node, i, j) {
-      node.duration = Math.max(1, value.call(node = node.node, node.__data__, i, j) | 0);
-    } : (value = Math.max(1, value | 0), function(node) {
-      node.duration = value;
-    }));
-  };
-  d3_transitionPrototype.transition = function() {
-    return this.select(d3_this);
-  };
-  d3.tween = function(b, interpolate) {
-    function tweenFunction(d, i, a) {
-      var v = b.call(this, d, i);
-      return v == null ? a != "" && d3_tweenRemove : a != v && interpolate(a, v);
-    }
-    function tweenString(d, i, a) {
-      return a != b && interpolate(a, b);
-    }
-    return typeof b === "function" ? tweenFunction : b == null ? d3_tweenNull : (b += "", tweenString);
-  };
-  var d3_tweenRemove = {};
-  var d3_timer_queue = null, d3_timer_interval, d3_timer_timeout;
-  d3.timer = function(callback, delay, then) {
-    var found = false, t0, t1 = d3_timer_queue;
-    if (arguments.length < 3) {
-      if (arguments.length < 2) delay = 0; else if (!isFinite(delay)) return;
-      then = Date.now();
-    }
-    while (t1) {
-      if (t1.callback === callback) {
-        t1.then = then;
-        t1.delay = delay;
-        found = true;
-        break;
-      }
-      t0 = t1;
-      t1 = t1.next;
-    }
-    if (!found) d3_timer_queue = {
-      callback: callback,
-      then: then,
-      delay: delay,
-      next: d3_timer_queue
-    };
-    if (!d3_timer_interval) {
-      d3_timer_timeout = clearTimeout(d3_timer_timeout);
-      d3_timer_interval = 1;
-      d3_timer_frame(d3_timer_step);
-    }
-  };
-  d3.timer.flush = function() {
-    var elapsed, now = Date.now(), t1 = d3_timer_queue;
-    while (t1) {
-      elapsed = now - t1.then;
-      if (!t1.delay) t1.flush = t1.callback(elapsed);
-      t1 = t1.next;
-    }
-    d3_timer_flush();
-  };
-  var d3_timer_frame = window.requestAnimationFrame || window.webkitRequestAnimationFrame || window.mozRequestAnimationFrame || window.oRequestAnimationFrame || window.msRequestAnimationFrame || function(callback) {
-    setTimeout(callback, 17);
-  };
-  d3.mouse = function(container) {
-    return d3_mousePoint(container, d3_eventSource());
-  };
-  var d3_mouse_bug44083 = /WebKit/.test(navigator.userAgent) ? -1 : 0;
-  d3.touches = function(container, touches) {
-    if (arguments.length < 2) touches = d3_eventSource().touches;
-    return touches ? d3_array(touches).map(function(touch) {
-      var point = d3_mousePoint(container, touch);
-      point.identifier = touch.identifier;
-      return point;
-    }) : [];
-  };
-  d3.scale = {};
-  d3.scale.linear = function() {
-    return d3_scale_linear([ 0, 1 ], [ 0, 1 ], d3.interpolate, false);
-  };
-  d3.scale.log = function() {
-    return d3_scale_log(d3.scale.linear(), d3_scale_logp);
-  };
-  var d3_scale_logFormat = d3.format(".0e");
-  d3_scale_logp.pow = function(x) {
-    return Math.pow(10, x);
-  };
-  d3_scale_logn.pow = function(x) {
-    return -Math.pow(10, -x);
-  };
-  d3.scale.pow = function() {
-    return d3_scale_pow(d3.scale.linear(), 1);
-  };
-  d3.scale.sqrt = function() {
-    return d3.scale.pow().exponent(.5);
-  };
-  d3.scale.ordinal = function() {
-    return d3_scale_ordinal([], {
-      t: "range",
-      a: [ [] ]
-    });
-  };
-  d3.scale.category10 = function() {
-    return d3.scale.ordinal().range(d3_category10);
-  };
-  d3.scale.category20 = function() {
-    return d3.scale.ordinal().range(d3_category20);
-  };
-  d3.scale.category20b = function() {
-    return d3.scale.ordinal().range(d3_category20b);
-  };
-  d3.scale.category20c = function() {
-    return d3.scale.ordinal().range(d3_category20c);
-  };
-  var d3_category10 = [ "#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf" ];
-  var d3_category20 = [ "#1f77b4", "#aec7e8", "#ff7f0e", "#ffbb78", "#2ca02c", "#98df8a", "#d62728", "#ff9896", "#9467bd", "#c5b0d5", "#8c564b", "#c49c94", "#e377c2", "#f7b6d2", "#7f7f7f", "#c7c7c7", "#bcbd22", "#dbdb8d", "#17becf", "#9edae5" ];
-  var d3_category20b = [ "#393b79", "#5254a3", "#6b6ecf", "#9c9ede", "#637939", "#8ca252", "#b5cf6b", "#cedb9c", "#8c6d31", "#bd9e39", "#e7ba52", "#e7cb94", "#843c39", "#ad494a", "#d6616b", "#e7969c", "#7b4173", "#a55194", "#ce6dbd", "#de9ed6" ];
-  var d3_category20c = [ "#3182bd", "#6baed6", "#9ecae1", "#c6dbef", "#e6550d", "#fd8d3c", "#fdae6b", "#fdd0a2", "#31a354", "#74c476", "#a1d99b", "#c7e9c0", "#756bb1", "#9e9ac8", "#bcbddc", "#dadaeb", "#636363", "#969696", "#bdbdbd", "#d9d9d9" ];
-  d3.scale.quantile = function() {
-    return d3_scale_quantile([], []);
-  };
-  d3.scale.quantize = function() {
-    return d3_scale_quantize(0, 1, [ 0, 1 ]);
-  };
-  d3.scale.threshold = function() {
-    return d3_scale_threshold([ .5 ], [ 0, 1 ]);
-  };
-  d3.scale.identity = function() {
-    return d3_scale_identity([ 0, 1 ]);
-  };
-  d3.svg = {};
-  d3.svg.arc = function() {
-    function arc() {
-      var r0 = innerRadius.apply(this, arguments), r1 = outerRadius.apply(this, arguments), a0 = startAngle.apply(this, arguments) + d3_svg_arcOffset, a1 = endAngle.apply(this, arguments) + d3_svg_arcOffset, da = (a1 < a0 && (da = a0, a0 = a1, a1 = da), a1 - a0), df = da < Math.PI ? "0" : "1", c0 = Math.cos(a0), s0 = Math.sin(a0), c1 = Math.cos(a1), s1 = Math.sin(a1);
-      return da >= d3_svg_arcMax ? r0 ? "M0," + r1 + "A" + r1 + "," + r1 + " 0 1,1 0," + -r1 + "A" + r1 + "," + r1 + " 0 1,1 0," + r1 + "M0," + r0 + "A" + r0 + "," + r0 + " 0 1,0 0," + -r0 + "A" + r0 + "," + r0 + " 0 1,0 0," + r0 + "Z" : "M0," + r1 + "A" + r1 + "," + r1 + " 0 1,1 0," + -r1 + "A" + r1 + "," + r1 + " 0 1,1 0," + r1 + "Z" : r0 ? "M" + r1 * c0 + "," + r1 * s0 + "A" + r1 + "," + r1 + " 0 " + df + ",1 " + r1 * c1 + "," + r1 * s1 + "L" + r0 * c1 + "," + r0 * s1 + "A" + r0 + "," + r0 + " 0 " + df + ",0 " + r0 * c0 + "," + r0 * s0 + "Z" : "M" + r1 * c0 + "," + r1 * s0 + "A" + r1 + "," + r1 + " 0 " + df + ",1 " + r1 * c1 + "," + r1 * s1 + "L0,0" + "Z";
-    }
-    var innerRadius = d3_svg_arcInnerRadius, outerRadius = d3_svg_arcOuterRadius, startAngle = d3_svg_arcStartAngle, endAngle = d3_svg_arcEndAngle;
-    arc.innerRadius = function(v) {
-      if (!arguments.length) return innerRadius;
-      innerRadius = d3_functor(v);
-      return arc;
-    };
-    arc.outerRadius = function(v) {
-      if (!arguments.length) return outerRadius;
-      outerRadius = d3_functor(v);
-      return arc;
-    };
-    arc.startAngle = function(v) {
-      if (!arguments.length) return startAngle;
-      startAngle = d3_functor(v);
-      return arc;
-    };
-    arc.endAngle = function(v) {
-      if (!arguments.length) return endAngle;
-      endAngle = d3_functor(v);
-      return arc;
-    };
-    arc.centroid = function() {
-      var r = (innerRadius.apply(this, arguments) + outerRadius.apply(this, arguments)) / 2, a = (startAngle.apply(this, arguments) + endAngle.apply(this, arguments)) / 2 + d3_svg_arcOffset;
-      return [ Math.cos(a) * r, Math.sin(a) * r ];
-    };
-    return arc;
-  };
-  var d3_svg_arcOffset = -Math.PI / 2, d3_svg_arcMax = 2 * Math.PI - 1e-6;
-  d3.svg.line = function() {
-    return d3_svg_line(d3_identity);
-  };
-  var d3_svg_lineInterpolators = d3.map({
-    linear: d3_svg_lineLinear,
-    "linear-closed": d3_svg_lineLinearClosed,
-    "step-before": d3_svg_lineStepBefore,
-    "step-after": d3_svg_lineStepAfter,
-    basis: d3_svg_lineBasis,
-    "basis-open": d3_svg_lineBasisOpen,
-    "basis-closed": d3_svg_lineBasisClosed,
-    bundle: d3_svg_lineBundle,
-    cardinal: d3_svg_lineCardinal,
-    "cardinal-open": d3_svg_lineCardinalOpen,
-    "cardinal-closed": d3_svg_lineCardinalClosed,
-    monotone: d3_svg_lineMonotone
-  });
-  d3_svg_lineInterpolators.forEach(function(key, value) {
-    value.key = key;
-    value.closed = /-closed$/.test(key);
-  });
-  var d3_svg_lineBasisBezier1 = [ 0, 2 / 3, 1 / 3, 0 ], d3_svg_lineBasisBezier2 = [ 0, 1 / 3, 2 / 3, 0 ], d3_svg_lineBasisBezier3 = [ 0, 1 / 6, 2 / 3, 1 / 6 ];
-  d3.svg.line.radial = function() {
-    var line = d3_svg_line(d3_svg_lineRadial);
-    line.radius = line.x, delete line.x;
-    line.angle = line.y, delete line.y;
-    return line;
-  };
-  d3_svg_lineStepBefore.reverse = d3_svg_lineStepAfter;
-  d3_svg_lineStepAfter.reverse = d3_svg_lineStepBefore;
-  d3.svg.area = function() {
-    return d3_svg_area(d3_identity);
-  };
-  d3.svg.area.radial = function() {
-    var area = d3_svg_area(d3_svg_lineRadial);
-    area.radius = area.x, delete area.x;
-    area.innerRadius = area.x0, delete area.x0;
-    area.outerRadius = area.x1, delete area.x1;
-    area.angle = area.y, delete area.y;
-    area.startAngle = area.y0, delete area.y0;
-    area.endAngle = area.y1, delete area.y1;
-    return area;
-  };
-  d3.svg.chord = function() {
-    function chord(d, i) {
-      var s = subgroup(this, source, d, i), t = subgroup(this, target, d, i);
-      return "M" + s.p0 + arc(s.r, s.p1, s.a1 - s.a0) + (equals(s, t) ? curve(s.r, s.p1, s.r, s.p0) : curve(s.r, s.p1, t.r, t.p0) + arc(t.r, t.p1, t.a1 - t.a0) + curve(t.r, t.p1, s.r, s.p0)) + "Z";
-    }
-    function subgroup(self, f, d, i) {
-      var subgroup = f.call(self, d, i), r = radius.call(self, subgroup, i), a0 = startAngle.call(self, subgroup, i) + d3_svg_arcOffset, a1 = endAngle.call(self, subgroup, i) + d3_svg_arcOffset;
-      return {
-        r: r,
-        a0: a0,
-        a1: a1,
-        p0: [ r * Math.cos(a0), r * Math.sin(a0) ],
-        p1: [ r * Math.cos(a1), r * Math.sin(a1) ]
-      };
-    }
-    function equals(a, b) {
-      return a.a0 == b.a0 && a.a1 == b.a1;
-    }
-    function arc(r, p, a) {
-      return "A" + r + "," + r + " 0 " + +(a > Math.PI) + ",1 " + p;
-    }
-    function curve(r0, p0, r1, p1) {
-      return "Q 0,0 " + p1;
-    }
-    var source = d3_svg_chordSource, target = d3_svg_chordTarget, radius = d3_svg_chordRadius, startAngle = d3_svg_arcStartAngle, endAngle = d3_svg_arcEndAngle;
-    chord.radius = function(v) {
-      if (!arguments.length) return radius;
-      radius = d3_functor(v);
-      return chord;
-    };
-    chord.source = function(v) {
-      if (!arguments.length) return source;
-      source = d3_functor(v);
-      return chord;
-    };
-    chord.target = function(v) {
-      if (!arguments.length) return target;
-      target = d3_functor(v);
-      return chord;
-    };
-    chord.startAngle = function(v) {
-      if (!arguments.length) return startAngle;
-      startAngle = d3_functor(v);
-      return chord;
-    };
-    chord.endAngle = function(v) {
-      if (!arguments.length) return endAngle;
-      endAngle = d3_functor(v);
-      return chord;
-    };
-    return chord;
-  };
-  d3.svg.diagonal = function() {
-    function diagonal(d, i) {
-      var p0 = source.call(this, d, i), p3 = target.call(this, d, i), m = (p0.y + p3.y) / 2, p = [ p0, {
-        x: p0.x,
-        y: m
-      }, {
-        x: p3.x,
-        y: m
-      }, p3 ];
-      p = p.map(projection);
-      return "M" + p[0] + "C" + p[1] + " " + p[2] + " " + p[3];
-    }
-    var source = d3_svg_chordSource, target = d3_svg_chordTarget, projection = d3_svg_diagonalProjection;
-    diagonal.source = function(x) {
-      if (!arguments.length) return source;
-      source = d3_functor(x);
-      return diagonal;
-    };
-    diagonal.target = function(x) {
-      if (!arguments.length) return target;
-      target = d3_functor(x);
-      return diagonal;
-    };
-    diagonal.projection = function(x) {
-      if (!arguments.length) return projection;
-      projection = x;
-      return diagonal;
-    };
-    return diagonal;
-  };
-  d3.svg.diagonal.radial = function() {
-    var diagonal = d3.svg.diagonal(), projection = d3_svg_diagonalProjection, projection_ = diagonal.projection;
-    diagonal.projection = function(x) {
-      return arguments.length ? projection_(d3_svg_diagonalRadialProjection(projection = x)) : projection;
-    };
-    return diagonal;
-  };
-  d3.svg.mouse = d3.mouse;
-  d3.svg.touches = d3.touches;
-  d3.svg.symbol = function() {
-    function symbol(d, i) {
-      return (d3_svg_symbols.get(type.call(this, d, i)) || d3_svg_symbolCircle)(size.call(this, d, i));
-    }
-    var type = d3_svg_symbolType, size = d3_svg_symbolSize;
-    symbol.type = function(x) {
-      if (!arguments.length) return type;
-      type = d3_functor(x);
-      return symbol;
-    };
-    symbol.size = function(x) {
-      if (!arguments.length) return size;
-      size = d3_functor(x);
-      return symbol;
-    };
-    return symbol;
-  };
-  var d3_svg_symbols = d3.map({
-    circle: d3_svg_symbolCircle,
-    cross: function(size) {
-      var r = Math.sqrt(size / 5) / 2;
-      return "M" + -3 * r + "," + -r + "H" + -r + "V" + -3 * r + "H" + r + "V" + -r + "H" + 3 * r + "V" + r + "H" + r + "V" + 3 * r + "H" + -r + "V" + r + "H" + -3 * r + "Z";
-    },
-    diamond: function(size) {
-      var ry = Math.sqrt(size / (2 * d3_svg_symbolTan30)), rx = ry * d3_svg_symbolTan30;
-      return "M0," + -ry + "L" + rx + ",0" + " 0," + ry + " " + -rx + ",0" + "Z";
-    },
-    square: function(size) {
-      var r = Math.sqrt(size) / 2;
-      return "M" + -r + "," + -r + "L" + r + "," + -r + " " + r + "," + r + " " + -r + "," + r + "Z";
-    },
-    "triangle-down": function(size) {
-      var rx = Math.sqrt(size / d3_svg_symbolSqrt3), ry = rx * d3_svg_symbolSqrt3 / 2;
-      return "M0," + ry + "L" + rx + "," + -ry + " " + -rx + "," + -ry + "Z";
-    },
-    "triangle-up": function(size) {
-      var rx = Math.sqrt(size / d3_svg_symbolSqrt3), ry = rx * d3_svg_symbolSqrt3 / 2;
-      return "M0," + -ry + "L" + rx + "," + ry + " " + -rx + "," + ry + "Z";
-    }
-  });
-  d3.svg.symbolTypes = d3_svg_symbols.keys();
-  var d3_svg_symbolSqrt3 = Math.sqrt(3), d3_svg_symbolTan30 = Math.tan(30 * Math.PI / 180);
-  d3.svg.axis = function() {
-    function axis(g) {
-      g.each(function() {
-        var g = d3.select(this);
-        var ticks = tickValues == null ? scale.ticks ? scale.ticks.apply(scale, tickArguments_) : scale.domain() : tickValues, tickFormat = tickFormat_ == null ? scale.tickFormat ? scale.tickFormat.apply(scale, tickArguments_) : String : tickFormat_;
-        var subticks = d3_svg_axisSubdivide(scale, ticks, tickSubdivide), subtick = g.selectAll(".minor").data(subticks, String), subtickEnter = subtick.enter().insert("line", "g").attr("class", "tick minor").style("opacity", 1e-6), subtickExit = d3.transition(subtick.exit()).style("opacity", 1e-6).remove(), subtickUpdate = d3.transition(subtick).style("opacity", 1);
-        var tick = g.selectAll("g").data(ticks, String), tickEnter = tick.enter().insert("g", "path").style("opacity", 1e-6), tickExit = d3.transition(tick.exit()).style("opacity", 1e-6).remove(), tickUpdate = d3.transition(tick).style("opacity", 1), tickTransform;
-        var range = d3_scaleRange(scale), path = g.selectAll(".domain").data([ 0 ]), pathEnter = path.enter().append("path").attr("class", "domain"), pathUpdate = d3.transition(path);
-        var scale1 = scale.copy(), scale0 = this.__chart__ || scale1;
-        this.__chart__ = scale1;
-        tickEnter.append("line").attr("class", "tick");
-        tickEnter.append("text");
-        var lineEnter = tickEnter.select("line"), lineUpdate = tickUpdate.select("line"), text = tick.select("text").text(tickFormat), textEnter = tickEnter.select("text"), textUpdate = tickUpdate.select("text");
-        switch (orient) {
-         case "bottom":
-          {
-            tickTransform = d3_svg_axisX;
-            subtickEnter.attr("y2", tickMinorSize);
-            subtickUpdate.attr("x2", 0).attr("y2", tickMinorSize);
-            lineEnter.attr("y2", tickMajorSize);
-            textEnter.attr("y", Math.max(tickMajorSize, 0) + tickPadding);
-            lineUpdate.attr("x2", 0).attr("y2", tickMajorSize);
-            textUpdate.attr("x", 0).attr("y", Math.max(tickMajorSize, 0) + tickPadding);
-            text.attr("dy", ".71em").attr("text-anchor", "middle");
-            pathUpdate.attr("d", "M" + range[0] + "," + tickEndSize + "V0H" + range[1] + "V" + tickEndSize);
-            break;
-          }
-         case "top":
-          {
-            tickTransform = d3_svg_axisX;
-            subtickEnter.attr("y2", -tickMinorSize);
-            subtickUpdate.attr("x2", 0).attr("y2", -tickMinorSize);
-            lineEnter.attr("y2", -tickMajorSize);
-            textEnter.attr("y", -(Math.max(tickMajorSize, 0) + tickPadding));
-            lineUpdate.attr("x2", 0).attr("y2", -tickMajorSize);
-            textUpdate.attr("x", 0).attr("y", -(Math.max(tickMajorSize, 0) + tickPadding));
-            text.attr("dy", "0em").attr("text-anchor", "middle");
-            pathUpdate.attr("d", "M" + range[0] + "," + -tickEndSize + "V0H" + range[1] + "V" + -tickEndSize);
-            break;
-          }
-         case "left":
-          {
-            tickTransform = d3_svg_axisY;
-            subtickEnter.attr("x2", -tickMinorSize);
-            subtickUpdate.attr("x2", -tickMinorSize).attr("y2", 0);
-            lineEnter.attr("x2", -tickMajorSize);
-            textEnter.attr("x", -(Math.max(tickMajorSize, 0) + tickPadding));
-            lineUpdate.attr("x2", -tickMajorSize).attr("y2", 0);
-            textUpdate.attr("x", -(Math.max(tickMajorSize, 0) + tickPadding)).attr("y", 0);
-            text.attr("dy", ".32em").attr("text-anchor", "end");
-            pathUpdate.attr("d", "M" + -tickEndSize + "," + range[0] + "H0V" + range[1] + "H" + -tickEndSize);
-            break;
-          }
-         case "right":
-          {
-            tickTransform = d3_svg_axisY;
-            subtickEnter.attr("x2", tickMinorSize);
-            subtickUpdate.attr("x2", tickMinorSize).attr("y2", 0);
-            lineEnter.attr("x2", tickMajorSize);
-            textEnter.attr("x", Math.max(tickMajorSize, 0) + tickPadding);
-            lineUpdate.attr("x2", tickMajorSize).attr("y2", 0);
-            textUpdate.attr("x", Math.max(tickMajorSize, 0) + tickPadding).attr("y", 0);
-            text.attr("dy", ".32em").attr("text-anchor", "start");
-            pathUpdate.attr("d", "M" + tickEndSize + "," + range[0] + "H0V" + range[1] + "H" + tickEndSize);
-            break;
-          }
-        }
-        if (scale.ticks) {
-          tickEnter.call(tickTransform, scale0);
-          tickUpdate.call(tickTransform, scale1);
-          tickExit.call(tickTransform, scale1);
-          subtickEnter.call(tickTransform, scale0);
-          subtickUpdate.call(tickTransform, scale1);
-          subtickExit.call(tickTransform, scale1);
-        } else {
-          var dx = scale1.rangeBand() / 2, x = function(d) {
-            return scale1(d) + dx;
-          };
-          tickEnter.call(tickTransform, x);
-          tickUpdate.call(tickTransform, x);
-        }
-      });
-    }
-    var scale = d3.scale.linear(), orient = "bottom", tickMajorSize = 6, tickMinorSize = 6, tickEndSize = 6, tickPadding = 3, tickArguments_ = [ 10 ], tickValues = null, tickFormat_, tickSubdivide = 0;
-    axis.scale = function(x) {
-      if (!arguments.length) return scale;
-      scale = x;
-      return axis;
-    };
-    axis.orient = function(x) {
-      if (!arguments.length) return orient;
-      orient = x;
-      return axis;
-    };
-    axis.ticks = function() {
-      if (!arguments.length) return tickArguments_;
-      tickArguments_ = arguments;
-      return axis;
-    };
-    axis.tickValues = function(x) {
-      if (!arguments.length) return tickValues;
-      tickValues = x;
-      return axis;
-    };
-    axis.tickFormat = function(x) {
-      if (!arguments.length) return tickFormat_;
-      tickFormat_ = x;
-      return axis;
-    };
-    axis.tickSize = function(x, y, z) {
-      if (!arguments.length) return tickMajorSize;
-      var n = arguments.length - 1;
-      tickMajorSize = +x;
-      tickMinorSize = n > 1 ? +y : tickMajorSize;
-      tickEndSize = n > 0 ? +arguments[n] : tickMajorSize;
-      return axis;
-    };
-    axis.tickPadding = function(x) {
-      if (!arguments.length) return tickPadding;
-      tickPadding = +x;
-      return axis;
-    };
-    axis.tickSubdivide = function(x) {
-      if (!arguments.length) return tickSubdivide;
-      tickSubdivide = +x;
-      return axis;
-    };
-    return axis;
-  };
-  d3.svg.brush = function() {
-    function brush(g) {
-      g.each(function() {
-        var g = d3.select(this), bg = g.selectAll(".background").data([ 0 ]), fg = g.selectAll(".extent").data([ 0 ]), tz = g.selectAll(".resize").data(resizes, String), e;
-        g.style("pointer-events", "all").on("mousedown.brush", brushstart).on("touchstart.brush", brushstart);
-        bg.enter().append("rect").attr("class", "background").style("visibility", "hidden").style("cursor", "crosshair");
-        fg.enter().append("rect").attr("class", "extent").style("cursor", "move");
-        tz.enter().append("g").attr("class", function(d) {
-          return "resize " + d;
-        }).style("cursor", function(d) {
-          return d3_svg_brushCursor[d];
-        }).append("rect").attr("x", function(d) {
-          return /[ew]$/.test(d) ? -3 : null;
-        }).attr("y", function(d) {
-          return /^[ns]/.test(d) ? -3 : null;
-        }).attr("width", 6).attr("height", 6).style("visibility", "hidden");
-        tz.style("display", brush.empty() ? "none" : null);
-        tz.exit().remove();
-        if (x) {
-          e = d3_scaleRange(x);
-          bg.attr("x", e[0]).attr("width", e[1] - e[0]);
-          redrawX(g);
-        }
-        if (y) {
-          e = d3_scaleRange(y);
-          bg.attr("y", e[0]).attr("height", e[1] - e[0]);
-          redrawY(g);
-        }
-        redraw(g);
-      });
-    }
-    function redraw(g) {
-      g.selectAll(".resize").attr("transform", function(d) {
-        return "translate(" + extent[+/e$/.test(d)][0] + "," + extent[+/^s/.test(d)][1] + ")";
-      });
-    }
-    function redrawX(g) {
-      g.select(".extent").attr("x", extent[0][0]);
-      g.selectAll(".extent,.n>rect,.s>rect").attr("width", extent[1][0] - extent[0][0]);
-    }
-    function redrawY(g) {
-      g.select(".extent").attr("y", extent[0][1]);
-      g.selectAll(".extent,.e>rect,.w>rect").attr("height", extent[1][1] - extent[0][1]);
-    }
-    function brushstart() {
-      function mouse() {
-        var touches = d3.event.changedTouches;
-        return touches ? d3.touches(target, touches)[0] : d3.mouse(target);
-      }
-      function keydown() {
-        if (d3.event.keyCode == 32) {
-          if (!dragging) {
-            center = null;
-            origin[0] -= extent[1][0];
-            origin[1] -= extent[1][1];
-            dragging = 2;
-          }
-          d3_eventCancel();
-        }
-      }
-      function keyup() {
-        if (d3.event.keyCode == 32 && dragging == 2) {
-          origin[0] += extent[1][0];
-          origin[1] += extent[1][1];
-          dragging = 0;
-          d3_eventCancel();
-        }
-      }
-      function brushmove() {
-        var point = mouse(), moved = false;
-        if (offset) {
-          point[0] += offset[0];
-          point[1] += offset[1];
-        }
-        if (!dragging) {
-          if (d3.event.altKey) {
-            if (!center) center = [ (extent[0][0] + extent[1][0]) / 2, (extent[0][1] + extent[1][1]) / 2 ];
-            origin[0] = extent[+(point[0] < center[0])][0];
-            origin[1] = extent[+(point[1] < center[1])][1];
-          } else center = null;
-        }
-        if (resizingX && move1(point, x, 0)) {
-          redrawX(g);
-          moved = true;
-        }
-        if (resizingY && move1(point, y, 1)) {
-          redrawY(g);
-          moved = true;
-        }
-        if (moved) {
-          redraw(g);
-          event_({
-            type: "brush",
-            mode: dragging ? "move" : "resize"
-          });
-        }
-      }
-      function move1(point, scale, i) {
-        var range = d3_scaleRange(scale), r0 = range[0], r1 = range[1], position = origin[i], size = extent[1][i] - extent[0][i], min, max;
-        if (dragging) {
-          r0 -= position;
-          r1 -= size + position;
-        }
-        min = Math.max(r0, Math.min(r1, point[i]));
-        if (dragging) {
-          max = (min += position) + size;
-        } else {
-          if (center) position = Math.max(r0, Math.min(r1, 2 * center[i] - min));
-          if (position < min) {
-            max = min;
-            min = position;
-          } else {
-            max = position;
-          }
-        }
-        if (extent[0][i] !== min || extent[1][i] !== max) {
-          extentDomain = null;
-          extent[0][i] = min;
-          extent[1][i] = max;
-          return true;
-        }
-      }
-      function brushend() {
-        brushmove();
-        g.style("pointer-events", "all").selectAll(".resize").style("display", brush.empty() ? "none" : null);
-        d3.select("body").style("cursor", null);
-        w.on("mousemove.brush", null).on("mouseup.brush", null).on("touchmove.brush", null).on("touchend.brush", null).on("keydown.brush", null).on("keyup.brush", null);
-        event_({
-          type: "brushend"
-        });
-        d3_eventCancel();
-      }
-      var target = this, eventTarget = d3.select(d3.event.target), event_ = event.of(target, arguments), g = d3.select(target), resizing = eventTarget.datum(), resizingX = !/^(n|s)$/.test(resizing) && x, resizingY = !/^(e|w)$/.test(resizing) && y, dragging = eventTarget.classed("extent"), center, origin = mouse(), offset;
-      var w = d3.select(window).on("mousemove.brush", brushmove).on("mouseup.brush", brushend).on("touchmove.brush", brushmove).on("touchend.brush", brushend).on("keydown.brush", keydown).on("keyup.brush", keyup);
-      if (dragging) {
-        origin[0] = extent[0][0] - origin[0];
-        origin[1] = extent[0][1] - origin[1];
-      } else if (resizing) {
-        var ex = +/w$/.test(resizing), ey = +/^n/.test(resizing);
-        offset = [ extent[1 - ex][0] - origin[0], extent[1 - ey][1] - origin[1] ];
-        origin[0] = extent[ex][0];
-        origin[1] = extent[ey][1];
-      } else if (d3.event.altKey) center = origin.slice();
-      g.style("pointer-events", "none").selectAll(".resize").style("display", null);
-      d3.select("body").style("cursor", eventTarget.style("cursor"));
-      event_({
-        type: "brushstart"
-      });
-      brushmove();
-      d3_eventCancel();
-    }
-    var event = d3_eventDispatch(brush, "brushstart", "brush", "brushend"), x = null, y = null, resizes = d3_svg_brushResizes[0], extent = [ [ 0, 0 ], [ 0, 0 ] ], extentDomain;
-    brush.x = function(z) {
-      if (!arguments.length) return x;
-      x = z;
-      resizes = d3_svg_brushResizes[!x << 1 | !y];
-      return brush;
-    };
-    brush.y = function(z) {
-      if (!arguments.length) return y;
-      y = z;
-      resizes = d3_svg_brushResizes[!x << 1 | !y];
-      return brush;
-    };
-    brush.extent = function(z) {
-      var x0, x1, y0, y1, t;
-      if (!arguments.length) {
-        z = extentDomain || extent;
-        if (x) {
-          x0 = z[0][0], x1 = z[1][0];
-          if (!extentDomain) {
-            x0 = extent[0][0], x1 = extent[1][0];
-            if (x.invert) x0 = x.invert(x0), x1 = x.invert(x1);
-            if (x1 < x0) t = x0, x0 = x1, x1 = t;
-          }
-        }
-        if (y) {
-          y0 = z[0][1], y1 = z[1][1];
-          if (!extentDomain) {
-            y0 = extent[0][1], y1 = extent[1][1];
-            if (y.invert) y0 = y.invert(y0), y1 = y.invert(y1);
-            if (y1 < y0) t = y0, y0 = y1, y1 = t;
-          }
-        }
-        return x && y ? [ [ x0, y0 ], [ x1, y1 ] ] : x ? [ x0, x1 ] : y && [ y0, y1 ];
-      }
-      extentDomain = [ [ 0, 0 ], [ 0, 0 ] ];
-      if (x) {
-        x0 = z[0], x1 = z[1];
-        if (y) x0 = x0[0], x1 = x1[0];
-        extentDomain[0][0] = x0, extentDomain[1][0] = x1;
-        if (x.invert) x0 = x(x0), x1 = x(x1);
-        if (x1 < x0) t = x0, x0 = x1, x1 = t;
-        extent[0][0] = x0 | 0, extent[1][0] = x1 | 0;
-      }
-      if (y) {
-        y0 = z[0], y1 = z[1];
-        if (x) y0 = y0[1], y1 = y1[1];
-        extentDomain[0][1] = y0, extentDomain[1][1] = y1;
-        if (y.invert) y0 = y(y0), y1 = y(y1);
-        if (y1 < y0) t = y0, y0 = y1, y1 = t;
-        extent[0][1] = y0 | 0, extent[1][1] = y1 | 0;
-      }
-      return brush;
-    };
-    brush.clear = function() {
-      extentDomain = null;
-      extent[0][0] = extent[0][1] = extent[1][0] = extent[1][1] = 0;
-      return brush;
-    };
-    brush.empty = function() {
-      return x && extent[0][0] === extent[1][0] || y && extent[0][1] === extent[1][1];
-    };
-    return d3.rebind(brush, event, "on");
-  };
-  var d3_svg_brushCursor = {
-    n: "ns-resize",
-    e: "ew-resize",
-    s: "ns-resize",
-    w: "ew-resize",
-    nw: "nwse-resize",
-    ne: "nesw-resize",
-    se: "nwse-resize",
-    sw: "nesw-resize"
-  };
-  var d3_svg_brushResizes = [ [ "n", "e", "s", "w", "nw", "ne", "se", "sw" ], [ "e", "w" ], [ "n", "s" ], [] ];
-  d3.behavior = {};
-  d3.behavior.drag = function() {
-    function drag() {
-      this.on("mousedown.drag", mousedown).on("touchstart.drag", mousedown);
-    }
-    function mousedown() {
-      function point() {
-        var p = target.parentNode;
-        return touchId ? d3.touches(p).filter(function(p) {
-          return p.identifier === touchId;
-        })[0] : d3.mouse(p);
-      }
-      function dragmove() {
-        if (!target.parentNode) return dragend();
-        var p = point(), dx = p[0] - origin_[0], dy = p[1] - origin_[1];
-        moved |= dx | dy;
-        origin_ = p;
-        d3_eventCancel();
-        event_({
-          type: "drag",
-          x: p[0] + offset[0],
-          y: p[1] + offset[1],
-          dx: dx,
-          dy: dy
-        });
-      }
-      function dragend() {
-        event_({
-          type: "dragend"
-        });
-        if (moved) {
-          d3_eventCancel();
-          if (d3.event.target === eventTarget) w.on("click.drag", click, true);
-        }
-        w.on(touchId ? "touchmove.drag-" + touchId : "mousemove.drag", null).on(touchId ? "touchend.drag-" + touchId : "mouseup.drag", null);
-      }
-      function click() {
-        d3_eventCancel();
-        w.on("click.drag", null);
-      }
-      var target = this, event_ = event.of(target, arguments), eventTarget = d3.event.target, touchId = d3.event.touches && d3.event.changedTouches[0].identifier, offset, origin_ = point(), moved = 0;
-      var w = d3.select(window).on(touchId ? "touchmove.drag-" + touchId : "mousemove.drag", dragmove).on(touchId ? "touchend.drag-" + touchId : "mouseup.drag", dragend, true);
-      if (origin) {
-        offset = origin.apply(target, arguments);
-        offset = [ offset.x - origin_[0], offset.y - origin_[1] ];
-      } else {
-        offset = [ 0, 0 ];
-      }
-      if (!touchId) d3_eventCancel();
-      event_({
-        type: "dragstart"
-      });
-    }
-    var event = d3_eventDispatch(drag, "drag", "dragstart", "dragend"), origin = null;
-    drag.origin = function(x) {
-      if (!arguments.length) return origin;
-      origin = x;
-      return drag;
-    };
-    return d3.rebind(drag, event, "on");
-  };
-  d3.behavior.zoom = function() {
-    function zoom() {
-      this.on("mousedown.zoom", mousedown).on("mousewheel.zoom", mousewheel).on("mousemove.zoom", mousemove).on("DOMMouseScroll.zoom", mousewheel).on("dblclick.zoom", dblclick).on("touchstart.zoom", touchstart).on("touchmove.zoom", touchmove).on("touchend.zoom", touchstart);
-    }
-    function location(p) {
-      return [ (p[0] - translate[0]) / scale, (p[1] - translate[1]) / scale ];
-    }
-    function point(l) {
-      return [ l[0] * scale + translate[0], l[1] * scale + translate[1] ];
-    }
-    function scaleTo(s) {
-      scale = Math.max(scaleExtent[0], Math.min(scaleExtent[1], s));
-    }
-    function translateTo(p, l) {
-      l = point(l);
-      translate[0] += p[0] - l[0];
-      translate[1] += p[1] - l[1];
-    }
-    function dispatch(event) {
-      if (x1) x1.domain(x0.range().map(function(x) {
-        return (x - translate[0]) / scale;
-      }).map(x0.invert));
-      if (y1) y1.domain(y0.range().map(function(y) {
-        return (y - translate[1]) / scale;
-      }).map(y0.invert));
-      d3.event.preventDefault();
-      event({
-        type: "zoom",
-        scale: scale,
-        translate: translate
-      });
-    }
-    function mousedown() {
-      function mousemove() {
-        moved = 1;
-        translateTo(d3.mouse(target), l);
-        dispatch(event_);
-      }
-      function mouseup() {
-        if (moved) d3_eventCancel();
-        w.on("mousemove.zoom", null).on("mouseup.zoom", null);
-        if (moved && d3.event.target === eventTarget) w.on("click.zoom", click, true);
-      }
-      function click() {
-        d3_eventCancel();
-        w.on("click.zoom", null);
-      }
-      var target = this, event_ = event.of(target, arguments), eventTarget = d3.event.target, moved = 0, w = d3.select(window).on("mousemove.zoom", mousemove).on("mouseup.zoom", mouseup), l = location(d3.mouse(target));
-      window.focus();
-      d3_eventCancel();
-    }
-    function mousewheel() {
-      if (!translate0) translate0 = location(d3.mouse(this));
-      scaleTo(Math.pow(2, d3_behavior_zoomDelta() * .002) * scale);
-      translateTo(d3.mouse(this), translate0);
-      dispatch(event.of(this, arguments));
-    }
-    function mousemove() {
-      translate0 = null;
-    }
-    function dblclick() {
-      var p = d3.mouse(this), l = location(p);
-      scaleTo(d3.event.shiftKey ? scale / 2 : scale * 2);
-      translateTo(p, l);
-      dispatch(event.of(this, arguments));
-    }
-    function touchstart() {
-      var touches = d3.touches(this), now = Date.now();
-      scale0 = scale;
-      translate0 = {};
-      touches.forEach(function(t) {
-        translate0[t.identifier] = location(t);
-      });
-      d3_eventCancel();
-      if (touches.length === 1) {
-        if (now - touchtime < 500) {
-          var p = touches[0], l = location(touches[0]);
-          scaleTo(scale * 2);
-          translateTo(p, l);
-          dispatch(event.of(this, arguments));
-        }
-        touchtime = now;
-      }
-    }
-    function touchmove() {
-      var touches = d3.touches(this), p0 = touches[0], l0 = translate0[p0.identifier];
-      if (p1 = touches[1]) {
-        var p1, l1 = translate0[p1.identifier];
-        p0 = [ (p0[0] + p1[0]) / 2, (p0[1] + p1[1]) / 2 ];
-        l0 = [ (l0[0] + l1[0]) / 2, (l0[1] + l1[1]) / 2 ];
-        scaleTo(d3.event.scale * scale0);
-      }
-      translateTo(p0, l0);
-      touchtime = null;
-      dispatch(event.of(this, arguments));
-    }
-    var translate = [ 0, 0 ], translate0, scale = 1, scale0, scaleExtent = d3_behavior_zoomInfinity, event = d3_eventDispatch(zoom, "zoom"), x0, x1, y0, y1, touchtime;
-    zoom.translate = function(x) {
-      if (!arguments.length) return translate;
-      translate = x.map(Number);
-      return zoom;
-    };
-    zoom.scale = function(x) {
-      if (!arguments.length) return scale;
-      scale = +x;
-      return zoom;
-    };
-    zoom.scaleExtent = function(x) {
-      if (!arguments.length) return scaleExtent;
-      scaleExtent = x == null ? d3_behavior_zoomInfinity : x.map(Number);
-      return zoom;
-    };
-    zoom.x = function(z) {
-      if (!arguments.length) return x1;
-      x1 = z;
-      x0 = z.copy();
-      return zoom;
-    };
-    zoom.y = function(z) {
-      if (!arguments.length) return y1;
-      y1 = z;
-      y0 = z.copy();
-      return zoom;
-    };
-    return d3.rebind(zoom, event, "on");
-  };
-  var d3_behavior_zoomDiv, d3_behavior_zoomInfinity = [ 0, Infinity ];
-  d3.layout = {};
-  d3.layout.bundle = function() {
-    return function(links) {
-      var paths = [], i = -1, n = links.length;
-      while (++i < n) paths.push(d3_layout_bundlePath(links[i]));
-      return paths;
-    };
-  };
-  d3.layout.chord = function() {
-    function relayout() {
-      var subgroups = {}, groupSums = [], groupIndex = d3.range(n), subgroupIndex = [], k, x, x0, i, j;
-      chords = [];
-      groups = [];
-      k = 0, i = -1;
-      while (++i < n) {
-        x = 0, j = -1;
-        while (++j < n) {
-          x += matrix[i][j];
-        }
-        groupSums.push(x);
-        subgroupIndex.push(d3.range(n));
-        k += x;
-      }
-      if (sortGroups) {
-        groupIndex.sort(function(a, b) {
-          return sortGroups(groupSums[a], groupSums[b]);
-        });
-      }
-      if (sortSubgroups) {
-        subgroupIndex.forEach(function(d, i) {
-          d.sort(function(a, b) {
-            return sortSubgroups(matrix[i][a], matrix[i][b]);
-          });
-        });
-      }
-      k = (2 * Math.PI - padding * n) / k;
-      x = 0, i = -1;
-      while (++i < n) {
-        x0 = x, j = -1;
-        while (++j < n) {
-          var di = groupIndex[i], dj = subgroupIndex[di][j], v = matrix[di][dj], a0 = x, a1 = x += v * k;
-          subgroups[di + "-" + dj] = {
-            index: di,
-            subindex: dj,
-            startAngle: a0,
-            endAngle: a1,
-            value: v
-          };
-        }
-        groups[di] = {
-          index: di,
-          startAngle: x0,
-          endAngle: x,
-          value: (x - x0) / k
-        };
-        x += padding;
-      }
-      i = -1;
-      while (++i < n) {
-        j = i - 1;
-        while (++j < n) {
-          var source = subgroups[i + "-" + j], target = subgroups[j + "-" + i];
-          if (source.value || target.value) {
-            chords.push(source.value < target.value ? {
-              source: target,
-              target: source
-            } : {
-              source: source,
-              target: target
-            });
-          }
-        }
-      }
-      if (sortChords) resort();
-    }
-    function resort() {
-      chords.sort(function(a, b) {
-        return sortChords((a.source.value + a.target.value) / 2, (b.source.value + b.target.value) / 2);
-      });
-    }
-    var chord = {}, chords, groups, matrix, n, padding = 0, sortGroups, sortSubgroups, sortChords;
-    chord.matrix = function(x) {
-      if (!arguments.length) return matrix;
-      n = (matrix = x) && matrix.length;
-      chords = groups = null;
-      return chord;
-    };
-    chord.padding = function(x) {
-      if (!arguments.length) return padding;
-      padding = x;
-      chords = groups = null;
-      return chord;
-    };
-    chord.sortGroups = function(x) {
-      if (!arguments.length) return sortGroups;
-      sortGroups = x;
-      chords = groups = null;
-      return chord;
-    };
-    chord.sortSubgroups = function(x) {
-      if (!arguments.length) return sortSubgroups;
-      sortSubgroups = x;
-      chords = null;
-      return chord;
-    };
-    chord.sortChords = function(x) {
-      if (!arguments.length) return sortChords;
-      sortChords = x;
-      if (chords) resort();
-      return chord;
-    };
-    chord.chords = function() {
-      if (!chords) relayout();
-      return chords;
-    };
-    chord.groups = function() {
-      if (!groups) relayout();
-      return groups;
-    };
-    return chord;
-  };
-  d3.layout.force = function() {
-    function repulse(node) {
-      return function(quad, x1, y1, x2, y2) {
-        if (quad.point !== node) {
-          var dx = quad.cx - node.x, dy = quad.cy - node.y, dn = 1 / Math.sqrt(dx * dx + dy * dy);
-          if ((x2 - x1) * dn < theta) {
-            var k = quad.charge * dn * dn;
-            node.px -= dx * k;
-            node.py -= dy * k;
-            return true;
-          }
-          if (quad.point && isFinite(dn)) {
-            var k = quad.pointCharge * dn * dn;
-            node.px -= dx * k;
-            node.py -= dy * k;
-          }
-        }
-        return !quad.charge;
-      };
-    }
-    function dragmove(d) {
-      d.px = d3.event.x;
-      d.py = d3.event.y;
-      force.resume();
-    }
-    var force = {}, event = d3.dispatch("start", "tick", "end"), size = [ 1, 1 ], drag, alpha, friction = .9, linkDistance = d3_layout_forceLinkDistance, linkStrength = d3_layout_forceLinkStrength, charge = -30, gravity = .1, theta = .8, interval, nodes = [], links = [], distances, strengths, charges;
-    force.tick = function() {
-      if ((alpha *= .99) < .005) {
-        event.end({
-          type: "end",
-          alpha: alpha = 0
-        });
-        return true;
-      }
-      var n = nodes.length, m = links.length, q, i, o, s, t, l, k, x, y;
-      for (i = 0; i < m; ++i) {
-        o = links[i];
-        s = o.source;
-        t = o.target;
-        x = t.x - s.x;
-        y = t.y - s.y;
-        if (l = x * x + y * y) {
-          l = alpha * strengths[i] * ((l = Math.sqrt(l)) - distances[i]) / l;
-          x *= l;
-          y *= l;
-          t.x -= x * (k = s.weight / (t.weight + s.weight));
-          t.y -= y * k;
-          s.x += x * (k = 1 - k);
-          s.y += y * k;
-        }
-      }
-      if (k = alpha * gravity) {
-        x = size[0] / 2;
-        y = size[1] / 2;
-        i = -1;
-        if (k) while (++i < n) {
-          o = nodes[i];
-          o.x += (x - o.x) * k;
-          o.y += (y - o.y) * k;
-        }
-      }
-      if (charge) {
-        d3_layout_forceAccumulate(q = d3.geom.quadtree(nodes), alpha, charges);
-        i = -1;
-        while (++i < n) {
-          if (!(o = nodes[i]).fixed) {
-            q.visit(repulse(o));
-          }
-        }
-      }
-      i = -1;
-      while (++i < n) {
-        o = nodes[i];
-        if (o.fixed) {
-          o.x = o.px;
-          o.y = o.py;
-        } else {
-          o.x -= (o.px - (o.px = o.x)) * friction;
-          o.y -= (o.py - (o.py = o.y)) * friction;
-        }
-      }
-      event.tick({
-        type: "tick",
-        alpha: alpha
-      });
-    };
-    force.nodes = function(x) {
-      if (!arguments.length) return nodes;
-      nodes = x;
-      return force;
-    };
-    force.links = function(x) {
-      if (!arguments.length) return links;
-      links = x;
-      return force;
-    };
-    force.size = function(x) {
-      if (!arguments.length) return size;
-      size = x;
-      return force;
-    };
-    force.linkDistance = function(x) {
-      if (!arguments.length) return linkDistance;
-      linkDistance = d3_functor(x);
-      return force;
-    };
-    force.distance = force.linkDistance;
-    force.linkStrength = function(x) {
-      if (!arguments.length) return linkStrength;
-      linkStrength = d3_functor(x);
-      return force;
-    };
-    force.friction = function(x) {
-      if (!arguments.length) return friction;
-      friction = x;
-      return force;
-    };
-    force.charge = function(x) {
-      if (!arguments.length) return charge;
-      charge = typeof x === "function" ? x : +x;
-      return force;
-    };
-    force.gravity = function(x) {
-      if (!arguments.length) return gravity;
-      gravity = x;
-      return force;
-    };
-    force.theta = function(x) {
-      if (!arguments.length) return theta;
-      theta = x;
-      return force;
-    };
-    force.alpha = function(x) {
-      if (!arguments.length) return alpha;
-      if (alpha) {
-        if (x > 0) alpha = x; else alpha = 0;
-      } else if (x > 0) {
-        event.start({
-          type: "start",
-          alpha: alpha = x
-        });
-        d3.timer(force.tick);
-      }
-      return force;
-    };
-    force.start = function() {
-      function position(dimension, size) {
-        var neighbors = neighbor(i), j = -1, m = neighbors.length, x;
-        while (++j < m) if (!isNaN(x = neighbors[j][dimension])) return x;
-        return Math.random() * size;
-      }
-      function neighbor() {
-        if (!neighbors) {
-          neighbors = [];
-          for (j = 0; j < n; ++j) {
-            neighbors[j] = [];
-          }
-          for (j = 0; j < m; ++j) {
-            var o = links[j];
-            neighbors[o.source.index].push(o.target);
-            neighbors[o.target.index].push(o.source);
-          }
-        }
-        return neighbors[i];
-      }
-      var i, j, n = nodes.length, m = links.length, w = size[0], h = size[1], neighbors, o;
-      for (i = 0; i < n; ++i) {
-        (o = nodes[i]).index = i;
-        o.weight = 0;
-      }
-      distances = [];
-      strengths = [];
-      for (i = 0; i < m; ++i) {
-        o = links[i];
-        if (typeof o.source == "number") o.source = nodes[o.source];
-        if (typeof o.target == "number") o.target = nodes[o.target];
-        distances[i] = linkDistance.call(this, o, i);
-        strengths[i] = linkStrength.call(this, o, i);
-        ++o.source.weight;
-        ++o.target.weight;
-      }
-      for (i = 0; i < n; ++i) {
-        o = nodes[i];
-        if (isNaN(o.x)) o.x = position("x", w);
-        if (isNaN(o.y)) o.y = position("y", h);
-        if (isNaN(o.px)) o.px = o.x;
-        if (isNaN(o.py)) o.py = o.y;
-      }
-      charges = [];
-      if (typeof charge === "function") {
-        for (i = 0; i < n; ++i) {
-          charges[i] = +charge.call(this, nodes[i], i);
-        }
-      } else {
-        for (i = 0; i < n; ++i) {
-          charges[i] = charge;
-        }
-      }
-      return force.resume();
-    };
-    force.resume = function() {
-      return force.alpha(.1);
-    };
-    force.stop = function() {
-      return force.alpha(0);
-    };
-    force.drag = function() {
-      if (!drag) drag = d3.behavior.drag().origin(d3_identity).on("dragstart", d3_layout_forceDragstart).on("drag", dragmove).on("dragend", d3_layout_forceDragend);
-      this.on("mouseover.force", d3_layout_forceMouseover).on("mouseout.force", d3_layout_forceMouseout).call(drag);
-    };
-    return d3.rebind(force, event, "on");
-  };
-  d3.layout.partition = function() {
-    function position(node, x, dx, dy) {
-      var children = node.children;
-      node.x = x;
-      node.y = node.depth * dy;
-      node.dx = dx;
-      node.dy = dy;
-      if (children && (n = children.length)) {
-        var i = -1, n, c, d;
-        dx = node.value ? dx / node.value : 0;
-        while (++i < n) {
-          position(c = children[i], x, d = c.value * dx, dy);
-          x += d;
-        }
-      }
-    }
-    function depth(node) {
-      var children = node.children, d = 0;
-      if (children && (n = children.length)) {
-        var i = -1, n;
-        while (++i < n) d = Math.max(d, depth(children[i]));
-      }
-      return 1 + d;
-    }
-    function partition(d, i) {
-      var nodes = hierarchy.call(this, d, i);
-      position(nodes[0], 0, size[0], size[1] / depth(nodes[0]));
-      return nodes;
-    }
-    var hierarchy = d3.layout.hierarchy(), size = [ 1, 1 ];
-    partition.size = function(x) {
-      if (!arguments.length) return size;
-      size = x;
-      return partition;
-    };
-    return d3_layout_hierarchyRebind(partition, hierarchy);
-  };
-  d3.layout.pie = function() {
-    function pie(data, i) {
-      var values = data.map(function(d, i) {
-        return +value.call(pie, d, i);
-      });
-      var a = +(typeof startAngle === "function" ? startAngle.apply(this, arguments) : startAngle);
-      var k = ((typeof endAngle === "function" ? endAngle.apply(this, arguments) : endAngle) - startAngle) / d3.sum(values);
-      var index = d3.range(data.length);
-      if (sort != null) index.sort(sort === d3_layout_pieSortByValue ? function(i, j) {
-        return values[j] - values[i];
-      } : function(i, j) {
-        return sort(data[i], data[j]);
-      });
-      var arcs = [];
-      index.forEach(function(i) {
-        var d;
-        arcs[i] = {
-          data: data[i],
-          value: d = values[i],
-          startAngle: a,
-          endAngle: a += d * k
-        };
-      });
-      return arcs;
-    }
-    var value = Number, sort = d3_layout_pieSortByValue, startAngle = 0, endAngle = 2 * Math.PI;
-    pie.value = function(x) {
-      if (!arguments.length) return value;
-      value = x;
-      return pie;
-    };
-    pie.sort = function(x) {
-      if (!arguments.length) return sort;
-      sort = x;
-      return pie;
-    };
-    pie.startAngle = function(x) {
-      if (!arguments.length) return startAngle;
-      startAngle = x;
-      return pie;
-    };
-    pie.endAngle = function(x) {
-      if (!arguments.length) return endAngle;
-      endAngle = x;
-      return pie;
-    };
-    return pie;
-  };
-  var d3_layout_pieSortByValue = {};
-  d3.layout.stack = function() {
-    function stack(data, index) {
-      var series = data.map(function(d, i) {
-        return values.call(stack, d, i);
-      });
-      var points = series.map(function(d, i) {
-        return d.map(function(v, i) {
-          return [ x.call(stack, v, i), y.call(stack, v, i) ];
-        });
-      });
-      var orders = order.call(stack, points, index);
-      series = d3.permute(series, orders);
-      points = d3.permute(points, orders);
-      var offsets = offset.call(stack, points, index);
-      var n = series.length, m = series[0].length, i, j, o;
-      for (j = 0; j < m; ++j) {
-        out.call(stack, series[0][j], o = offsets[j], points[0][j][1]);
-        for (i = 1; i < n; ++i) {
-          out.call(stack, series[i][j], o += points[i - 1][j][1], points[i][j][1]);
-        }
-      }
-      return data;
-    }
-    var values = d3_identity, order = d3_layout_stackOrderDefault, offset = d3_layout_stackOffsetZero, out = d3_layout_stackOut, x = d3_layout_stackX, y = d3_layout_stackY;
-    stack.values = function(x) {
-      if (!arguments.length) return values;
-      values = x;
-      return stack;
-    };
-    stack.order = function(x) {
-      if (!arguments.length) return order;
-      order = typeof x === "function" ? x : d3_layout_stackOrders.get(x) || d3_layout_stackOrderDefault;
-      return stack;
-    };
-    stack.offset = function(x) {
-      if (!arguments.length) return offset;
-      offset = typeof x === "function" ? x : d3_layout_stackOffsets.get(x) || d3_layout_stackOffsetZero;
-      return stack;
-    };
-    stack.x = function(z) {
-      if (!arguments.length) return x;
-      x = z;
-      return stack;
-    };
-    stack.y = function(z) {
-      if (!arguments.length) return y;
-      y = z;
-      return stack;
-    };
-    stack.out = function(z) {
-      if (!arguments.length) return out;
-      out = z;
-      return stack;
-    };
-    return stack;
-  };
-  var d3_layout_stackOrders = d3.map({
-    "inside-out": function(data) {
-      var n = data.length, i, j, max = data.map(d3_layout_stackMaxIndex), sums = data.map(d3_layout_stackReduceSum), index = d3.range(n).sort(function(a, b) {
-        return max[a] - max[b];
-      }), top = 0, bottom = 0, tops = [], bottoms = [];
-      for (i = 0; i < n; ++i) {
-        j = index[i];
-        if (top < bottom) {
-          top += sums[j];
-          tops.push(j);
-        } else {
-          bottom += sums[j];
-          bottoms.push(j);
-        }
-      }
-      return bottoms.reverse().concat(tops);
-    },
-    reverse: function(data) {
-      return d3.range(data.length).reverse();
-    },
-    "default": d3_layout_stackOrderDefault
-  });
-  var d3_layout_stackOffsets = d3.map({
-    silhouette: function(data) {
-      var n = data.length, m = data[0].length, sums = [], max = 0, i, j, o, y0 = [];
-      for (j = 0; j < m; ++j) {
-        for (i = 0, o = 0; i < n; i++) o += data[i][j][1];
-        if (o > max) max = o;
-        sums.push(o);
-      }
-      for (j = 0; j < m; ++j) {
-        y0[j] = (max - sums[j]) / 2;
-      }
-      return y0;
-    },
-    wiggle: function(data) {
-      var n = data.length, x = data[0], m = x.length, max = 0, i, j, k, s1, s2, s3, dx, o, o0, y0 = [];
-      y0[0] = o = o0 = 0;
-      for (j = 1; j < m; ++j) {
-        for (i = 0, s1 = 0; i < n; ++i) s1 += data[i][j][1];
-        for (i = 0, s2 = 0, dx = x[j][0] - x[j - 1][0]; i < n; ++i) {
-          for (k = 0, s3 = (data[i][j][1] - data[i][j - 1][1]) / (2 * dx); k < i; ++k) {
-            s3 += (data[k][j][1] - data[k][j - 1][1]) / dx;
-          }
-          s2 += s3 * data[i][j][1];
-        }
-        y0[j] = o -= s1 ? s2 / s1 * dx : 0;
-        if (o < o0) o0 = o;
-      }
-      for (j = 0; j < m; ++j) y0[j] -= o0;
-      return y0;
-    },
-    expand: function(data) {
-      var n = data.length, m = data[0].length, k = 1 / n, i, j, o, y0 = [];
-      for (j = 0; j < m; ++j) {
-        for (i = 0, o = 0; i < n; i++) o += data[i][j][1];
-        if (o) for (i = 0; i < n; i++) data[i][j][1] /= o; else for (i = 0; i < n; i++) data[i][j][1] = k;
-      }
-      for (j = 0; j < m; ++j) y0[j] = 0;
-      return y0;
-    },
-    zero: d3_layout_stackOffsetZero
-  });
-  d3.layout.histogram = function() {
-    function histogram(data, i) {
-      var bins = [], values = data.map(valuer, this), range = ranger.call(this, values, i), thresholds = binner.call(this, range, values, i), bin, i = -1, n = values.length, m = thresholds.length - 1, k = frequency ? 1 : 1 / n, x;
-      while (++i < m) {
-        bin = bins[i] = [];
-        bin.dx = thresholds[i + 1] - (bin.x = thresholds[i]);
-        bin.y = 0;
-      }
-      if (m > 0) {
-        i = -1;
-        while (++i < n) {
-          x = values[i];
-          if (x >= range[0] && x <= range[1]) {
-            bin = bins[d3.bisect(thresholds, x, 1, m) - 1];
-            bin.y += k;
-            bin.push(data[i]);
-          }
-        }
-      }
-      return bins;
-    }
-    var frequency = true, valuer = Number, ranger = d3_layout_histogramRange, binner = d3_layout_histogramBinSturges;
-    histogram.value = function(x) {
-      if (!arguments.length) return valuer;
-      valuer = x;
-      return histogram;
-    };
-    histogram.range = function(x) {
-      if (!arguments.length) return ranger;
-      ranger = d3_functor(x);
-      return histogram;
-    };
-    histogram.bins = function(x) {
-      if (!arguments.length) return binner;
-      binner = typeof x === "number" ? function(range) {
-        return d3_layout_histogramBinFixed(range, x);
-      } : d3_functor(x);
-      return histogram;
-    };
-    histogram.frequency = function(x) {
-      if (!arguments.length) return frequency;
-      frequency = !!x;
-      return histogram;
-    };
-    return histogram;
-  };
-  d3.layout.hierarchy = function() {
-    function recurse(data, depth, nodes) {
-      var childs = children.call(hierarchy, data, depth), node = d3_layout_hierarchyInline ? data : {
-        data: data
-      };
-      node.depth = depth;
-      nodes.push(node);
-      if (childs && (n = childs.length)) {
-        var i = -1, n, c = node.children = [], v = 0, j = depth + 1, d;
-        while (++i < n) {
-          d = recurse(childs[i], j, nodes);
-          d.parent = node;
-          c.push(d);
-          v += d.value;
-        }
-        if (sort) c.sort(sort);
-        if (value) node.value = v;
-      } else if (value) {
-        node.value = +value.call(hierarchy, data, depth) || 0;
-      }
-      return node;
-    }
-    function revalue(node, depth) {
-      var children = node.children, v = 0;
-      if (children && (n = children.length)) {
-        var i = -1, n, j = depth + 1;
-        while (++i < n) v += revalue(children[i], j);
-      } else if (value) {
-        v = +value.call(hierarchy, d3_layout_hierarchyInline ? node : node.data, depth) || 0;
-      }
-      if (value) node.value = v;
-      return v;
-    }
-    function hierarchy(d) {
-      var nodes = [];
-      recurse(d, 0, nodes);
-      return nodes;
-    }
-    var sort = d3_layout_hierarchySort, children = d3_layout_hierarchyChildren, value = d3_layout_hierarchyValue;
-    hierarchy.sort = function(x) {
-      if (!arguments.length) return sort;
-      sort = x;
-      return hierarchy;
-    };
-    hierarchy.children = function(x) {
-      if (!arguments.length) return children;
-      children = x;
-      return hierarchy;
-    };
-    hierarchy.value = function(x) {
-      if (!arguments.length) return value;
-      value = x;
-      return hierarchy;
-    };
-    hierarchy.revalue = function(root) {
-      revalue(root, 0);
-      return root;
-    };
-    return hierarchy;
-  };
-  var d3_layout_hierarchyInline = false;
-  d3.layout.pack = function() {
-    function pack(d, i) {
-      var nodes = hierarchy.call(this, d, i), root = nodes[0];
-      root.x = 0;
-      root.y = 0;
-      d3_layout_treeVisitAfter(root, function(d) {
-        d.r = Math.sqrt(d.value);
-      });
-      d3_layout_treeVisitAfter(root, d3_layout_packSiblings);
-      var w = size[0], h = size[1], k = Math.max(2 * root.r / w, 2 * root.r / h);
-      if (padding > 0) {
-        var dr = padding * k / 2;
-        d3_layout_treeVisitAfter(root, function(d) {
-          d.r += dr;
-        });
-        d3_layout_treeVisitAfter(root, d3_layout_packSiblings);
-        d3_layout_treeVisitAfter(root, function(d) {
-          d.r -= dr;
-        });
-        k = Math.max(2 * root.r / w, 2 * root.r / h);
-      }
-      d3_layout_packTransform(root, w / 2, h / 2, 1 / k);
-      return nodes;
-    }
-    var hierarchy = d3.layout.hierarchy().sort(d3_layout_packSort), padding = 0, size = [ 1, 1 ];
-    pack.size = function(x) {
-      if (!arguments.length) return size;
-      size = x;
-      return pack;
-    };
-    pack.padding = function(_) {
-      if (!arguments.length) return padding;
-      padding = +_;
-      return pack;
-    };
-    return d3_layout_hierarchyRebind(pack, hierarchy);
-  };
-  d3.layout.cluster = function() {
-    function cluster(d, i) {
-      var nodes = hierarchy.call(this, d, i), root = nodes[0], previousNode, x = 0, kx, ky;
-      d3_layout_treeVisitAfter(root, function(node) {
-        var children = node.children;
-        if (children && children.length) {
-          node.x = d3_layout_clusterX(children);
-          node.y = d3_layout_clusterY(children);
-        } else {
-          node.x = previousNode ? x += separation(node, previousNode) : 0;
-          node.y = 0;
-          previousNode = node;
-        }
-      });
-      var left = d3_layout_clusterLeft(root), right = d3_layout_clusterRight(root), x0 = left.x - separation(left, right) / 2, x1 = right.x + separation(right, left) / 2;
-      d3_layout_treeVisitAfter(root, function(node) {
-        node.x = (node.x - x0) / (x1 - x0) * size[0];
-        node.y = (1 - (root.y ? node.y / root.y : 1)) * size[1];
-      });
-      return nodes;
-    }
-    var hierarchy = d3.layout.hierarchy().sort(null).value(null), separation = d3_layout_treeSeparation, size = [ 1, 1 ];
-    cluster.separation = function(x) {
-      if (!arguments.length) return separation;
-      separation = x;
-      return cluster;
-    };
-    cluster.size = function(x) {
-      if (!arguments.length) return size;
-      size = x;
-      return cluster;
-    };
-    return d3_layout_hierarchyRebind(cluster, hierarchy);
-  };
-  d3.layout.tree = function() {
-    function tree(d, i) {
-      function firstWalk(node, previousSibling) {
-        var children = node.children, layout = node._tree;
-        if (children && (n = children.length)) {
-          var n, firstChild = children[0], previousChild, ancestor = firstChild, child, i = -1;
-          while (++i < n) {
-            child = children[i];
-            firstWalk(child, previousChild);
-            ancestor = apportion(child, previousChild, ancestor);
-            previousChild = child;
-          }
-          d3_layout_treeShift(node);
-          var midpoint = .5 * (firstChild._tree.prelim + child._tree.prelim);
-          if (previousSibling) {
-            layout.prelim = previousSibling._tree.prelim + separation(node, previousSibling);
-            layout.mod = layout.prelim - midpoint;
-          } else {
-            layout.prelim = midpoint;
-          }
-        } else {
-          if (previousSibling) {
-            layout.prelim = previousSibling._tree.prelim + separation(node, previousSibling);
-          }
-        }
-      }
-      function secondWalk(node, x) {
-        node.x = node._tree.prelim + x;
-        var children = node.children;
-        if (children && (n = children.length)) {
-          var i = -1, n;
-          x += node._tree.mod;
-          while (++i < n) {
-            secondWalk(children[i], x);
-          }
-        }
-      }
-      function apportion(node, previousSibling, ancestor) {
-        if (previousSibling) {
-          var vip = node, vop = node, vim = previousSibling, vom = node.parent.children[0], sip = vip._tree.mod, sop = vop._tree.mod, sim = vim._tree.mod, som = vom._tree.mod, shift;
-          while (vim = d3_layout_treeRight(vim), vip = d3_layout_treeLeft(vip), vim && vip) {
-            vom = d3_layout_treeLeft(vom);
-            vop = d3_layout_treeRight(vop);
-            vop._tree.ancestor = node;
-            shift = vim._tree.prelim + sim - vip._tree.prelim - sip + separation(vim, vip);
-            if (shift > 0) {
-              d3_layout_treeMove(d3_layout_treeAncestor(vim, node, ancestor), node, shift);
-              sip += shift;
-              sop += shift;
-            }
-            sim += vim._tree.mod;
-            sip += vip._tree.mod;
-            som += vom._tree.mod;
-            sop += vop._tree.mod;
-          }
-          if (vim && !d3_layout_treeRight(vop)) {
-            vop._tree.thread = vim;
-            vop._tree.mod += sim - sop;
-          }
-          if (vip && !d3_layout_treeLeft(vom)) {
-            vom._tree.thread = vip;
-            vom._tree.mod += sip - som;
-            ancestor = node;
-          }
-        }
-        return ancestor;
-      }
-      var nodes = hierarchy.call(this, d, i), root = nodes[0];
-      d3_layout_treeVisitAfter(root, function(node, previousSibling) {
-        node._tree = {
-          ancestor: node,
-          prelim: 0,
-          mod: 0,
-          change: 0,
-          shift: 0,
-          number: previousSibling ? previousSibling._tree.number + 1 : 0
-        };
-      });
-      firstWalk(root);
-      secondWalk(root, -root._tree.prelim);
-      var left = d3_layout_treeSearch(root, d3_layout_treeLeftmost), right = d3_layout_treeSearch(root, d3_layout_treeRightmost), deep = d3_layout_treeSearch(root, d3_layout_treeDeepest), x0 = left.x - separation(left, right) / 2, x1 = right.x + separation(right, left) / 2, y1 = deep.depth || 1;
-      d3_layout_treeVisitAfter(root, function(node) {
-        node.x = (node.x - x0) / (x1 - x0) * size[0];
-        node.y = node.depth / y1 * size[1];
-        delete node._tree;
-      });
-      return nodes;
-    }
-    var hierarchy = d3.layout.hierarchy().sort(null).value(null), separation = d3_layout_treeSeparation, size = [ 1, 1 ];
-    tree.separation = function(x) {
-      if (!arguments.length) return separation;
-      separation = x;
-      return tree;
-    };
-    tree.size = function(x) {
-      if (!arguments.length) return size;
-      size = x;
-      return tree;
-    };
-    return d3_layout_hierarchyRebind(tree, hierarchy);
-  };
-  d3.layout.treemap = function() {
-    function scale(children, k) {
-      var i = -1, n = children.length, child, area;
-      while (++i < n) {
-        area = (child = children[i]).value * (k < 0 ? 0 : k);
-        child.area = isNaN(area) || area <= 0 ? 0 : area;
-      }
-    }
-    function squarify(node) {
-      var children = node.children;
-      if (children && children.length) {
-        var rect = pad(node), row = [], remaining = children.slice(), child, best = Infinity, score, u = Math.min(rect.dx, rect.dy), n;
-        scale(remaining, rect.dx * rect.dy / node.value);
-        row.area = 0;
-        while ((n = remaining.length) > 0) {
-          row.push(child = remaining[n - 1]);
-          row.area += child.area;
-          if ((score = worst(row, u)) <= best) {
-            remaining.pop();
-            best = score;
-          } else {
-            row.area -= row.pop().area;
-            position(row, u, rect, false);
-            u = Math.min(rect.dx, rect.dy);
-            row.length = row.area = 0;
-            best = Infinity;
-          }
-        }
-        if (row.length) {
-          position(row, u, rect, true);
-          row.length = row.area = 0;
-        }
-        children.forEach(squarify);
-      }
-    }
-    function stickify(node) {
-      var children = node.children;
-      if (children && children.length) {
-        var rect = pad(node), remaining = children.slice(), child, row = [];
-        scale(remaining, rect.dx * rect.dy / node.value);
-        row.area = 0;
-        while (child = remaining.pop()) {
-          row.push(child);
-          row.area += child.area;
-          if (child.z != null) {
-            position(row, child.z ? rect.dx : rect.dy, rect, !remaining.length);
-            row.length = row.area = 0;
-          }
-        }
-        children.forEach(stickify);
-      }
-    }
-    function worst(row, u) {
-      var s = row.area, r, rmax = 0, rmin = Infinity, i = -1, n = row.length;
-      while (++i < n) {
-        if (!(r = row[i].area)) continue;
-        if (r < rmin) rmin = r;
-        if (r > rmax) rmax = r;
-      }
-      s *= s;
-      u *= u;
-      return s ? Math.max(u * rmax * ratio / s, s / (u * rmin * ratio)) : Infinity;
-    }
-    function position(row, u, rect, flush) {
-      var i = -1, n = row.length, x = rect.x, y = rect.y, v = u ? round(row.area / u) : 0, o;
-      if (u == rect.dx) {
-        if (flush || v > rect.dy) v = rect.dy;
-        while (++i < n) {
-          o = row[i];
-          o.x = x;
-          o.y = y;
-          o.dy = v;
-          x += o.dx = Math.min(rect.x + rect.dx - x, v ? round(o.area / v) : 0);
-        }
-        o.z = true;
-        o.dx += rect.x + rect.dx - x;
-        rect.y += v;
-        rect.dy -= v;
-      } else {
-        if (flush || v > rect.dx) v = rect.dx;
-        while (++i < n) {
-          o = row[i];
-          o.x = x;
-          o.y = y;
-          o.dx = v;
-          y += o.dy = Math.min(rect.y + rect.dy - y, v ? round(o.area / v) : 0);
-        }
-        o.z = false;
-        o.dy += rect.y + rect.dy - y;
-        rect.x += v;
-        rect.dx -= v;
-      }
-    }
-    function treemap(d) {
-      var nodes = stickies || hierarchy(d), root = nodes[0];
-      root.x = 0;
-      root.y = 0;
-      root.dx = size[0];
-      root.dy = size[1];
-      if (stickies) hierarchy.revalue(root);
-      scale([ root ], root.dx * root.dy / root.value);
-      (stickies ? stickify : squarify)(root);
-      if (sticky) stickies = nodes;
-      return nodes;
-    }
-    var hierarchy = d3.layout.hierarchy(), round = Math.round, size = [ 1, 1 ], padding = null, pad = d3_layout_treemapPadNull, sticky = false, stickies, ratio = .5 * (1 + Math.sqrt(5));
-    treemap.size = function(x) {
-      if (!arguments.length) return size;
-      size = x;
-      return treemap;
-    };
-    treemap.padding = function(x) {
-      function padFunction(node) {
-        var p = x.call(treemap, node, node.depth);
-        return p == null ? d3_layout_treemapPadNull(node) : d3_layout_treemapPad(node, typeof p === "number" ? [ p, p, p, p ] : p);
-      }
-      function padConstant(node) {
-        return d3_layout_treemapPad(node, x);
-      }
-      if (!arguments.length) return padding;
-      var type;
-      pad = (padding = x) == null ? d3_layout_treemapPadNull : (type = typeof x) === "function" ? padFunction : type === "number" ? (x = [ x, x, x, x ], padConstant) : padConstant;
-      return treemap;
-    };
-    treemap.round = function(x) {
-      if (!arguments.length) return round != Number;
-      round = x ? Math.round : Number;
-      return treemap;
-    };
-    treemap.sticky = function(x) {
-      if (!arguments.length) return sticky;
-      sticky = x;
-      stickies = null;
-      return treemap;
-    };
-    treemap.ratio = function(x) {
-      if (!arguments.length) return ratio;
-      ratio = x;
-      return treemap;
-    };
-    return d3_layout_hierarchyRebind(treemap, hierarchy);
-  };
-  d3.csv = d3_dsv(",", "text/csv");
-  d3.tsv = d3_dsv("	", "text/tab-separated-values");
-  d3.geo = {};
-  var d3_geo_radians = Math.PI / 180;
-  d3.geo.azimuthal = function() {
-    function azimuthal(coordinates) {
-      var x1 = coordinates[0] * d3_geo_radians - x0, y1 = coordinates[1] * d3_geo_radians, cx1 = Math.cos(x1), sx1 = Math.sin(x1), cy1 = Math.cos(y1), sy1 = Math.sin(y1), cc = mode !== "orthographic" ? sy0 * sy1 + cy0 * cy1 * cx1 : null, c, k = mode === "stereographic" ? 1 / (1 + cc) : mode === "gnomonic" ? 1 / cc : mode === "equidistant" ? (c = Math.acos(cc), c ? c / Math.sin(c) : 0) : mode === "equalarea" ? Math.sqrt(2 / (1 + cc)) : 1, x = k * cy1 * sx1, y = k * (sy0 * cy1 * cx1 - cy0 * sy1);
-      return [ scale * x + translate[0], scale * y + translate[1] ];
-    }
-    var mode = "orthographic", origin, scale = 200, translate = [ 480, 250 ], x0, y0, cy0, sy0;
-    azimuthal.invert = function(coordinates) {
-      var x = (coordinates[0] - translate[0]) / scale, y = (coordinates[1] - translate[1]) / scale, p = Math.sqrt(x * x + y * y), c = mode === "stereographic" ? 2 * Math.atan(p) : mode === "gnomonic" ? Math.atan(p) : mode === "equidistant" ? p : mode === "equalarea" ? 2 * Math.asin(.5 * p) : Math.asin(p), sc = Math.sin(c), cc = Math.cos(c);
-      return [ (x0 + Math.atan2(x * sc, p * cy0 * cc + y * sy0 * sc)) / d3_geo_radians, Math.asin(cc * sy0 - (p ? y * sc * cy0 / p : 0)) / d3_geo_radians ];
-    };
-    azimuthal.mode = function(x) {
-      if (!arguments.length) return mode;
-      mode = x + "";
-      return azimuthal;
-    };
-    azimuthal.origin = function(x) {
-      if (!arguments.length) return origin;
-      origin = x;
-      x0 = origin[0] * d3_geo_radians;
-      y0 = origin[1] * d3_geo_radians;
-      cy0 = Math.cos(y0);
-      sy0 = Math.sin(y0);
-      return azimuthal;
-    };
-    azimuthal.scale = function(x) {
-      if (!arguments.length) return scale;
-      scale = +x;
-      return azimuthal;
-    };
-    azimuthal.translate = function(x) {
-      if (!arguments.length) return translate;
-      translate = [ +x[0], +x[1] ];
-      return azimuthal;
-    };
-    return azimuthal.origin([ 0, 0 ]);
-  };
-  d3.geo.albers = function() {
-    function albers(coordinates) {
-      var t = n * (d3_geo_radians * coordinates[0] - lng0), p = Math.sqrt(C - 2 * n * Math.sin(d3_geo_radians * coordinates[1])) / n;
-      return [ scale * p * Math.sin(t) + translate[0], scale * (p * Math.cos(t) - p0) + translate[1] ];
-    }
-    function reload() {
-      var phi1 = d3_geo_radians * parallels[0], phi2 = d3_geo_radians * parallels[1], lat0 = d3_geo_radians * origin[1], s = Math.sin(phi1), c = Math.cos(phi1);
-      lng0 = d3_geo_radians * origin[0];
-      n = .5 * (s + Math.sin(phi2));
-      C = c * c + 2 * n * s;
-      p0 = Math.sqrt(C - 2 * n * Math.sin(lat0)) / n;
-      return albers;
-    }
-    var origin = [ -98, 38 ], parallels = [ 29.5, 45.5 ], scale = 1e3, translate = [ 480, 250 ], lng0, n, C, p0;
-    albers.invert = function(coordinates) {
-      var x = (coordinates[0] - translate[0]) / scale, y = (coordinates[1] - translate[1]) / scale, p0y = p0 + y, t = Math.atan2(x, p0y), p = Math.sqrt(x * x + p0y * p0y);
-      return [ (lng0 + t / n) / d3_geo_radians, Math.asin((C - p * p * n * n) / (2 * n)) / d3_geo_radians ];
-    };
-    albers.origin = function(x) {
-      if (!arguments.length) return origin;
-      origin = [ +x[0], +x[1] ];
-      return reload();
-    };
-    albers.parallels = function(x) {
-      if (!arguments.length) return parallels;
-      parallels = [ +x[0], +x[1] ];
-      return reload();
-    };
-    albers.scale = function(x) {
-      if (!arguments.length) return scale;
-      scale = +x;
-      return albers;
-    };
-    albers.translate = function(x) {
-      if (!arguments.length) return translate;
-      translate = [ +x[0], +x[1] ];
-      return albers;
-    };
-    return reload();
-  };
-  d3.geo.albersUsa = function() {
-    function albersUsa(coordinates) {
-      var lon = coordinates[0], lat = coordinates[1];
-      return (lat > 50 ? alaska : lon < -140 ? hawaii : lat < 21 ? puertoRico : lower48)(coordinates);
-    }
-    var lower48 = d3.geo.albers();
-    var alaska = d3.geo.albers().origin([ -160, 60 ]).parallels([ 55, 65 ]);
-    var hawaii = d3.geo.albers().origin([ -160, 20 ]).parallels([ 8, 18 ]);
-    var puertoRico = d3.geo.albers().origin([ -60, 10 ]).parallels([ 8, 18 ]);
-    albersUsa.scale = function(x) {
-      if (!arguments.length) return lower48.scale();
-      lower48.scale(x);
-      alaska.scale(x * .6);
-      hawaii.scale(x);
-      puertoRico.scale(x * 1.5);
-      return albersUsa.translate(lower48.translate());
-    };
-    albersUsa.translate = function(x) {
-      if (!arguments.length) return lower48.translate();
-      var dz = lower48.scale() / 1e3, dx = x[0], dy = x[1];
-      lower48.translate(x);
-      alaska.translate([ dx - 400 * dz, dy + 170 * dz ]);
-      hawaii.translate([ dx - 190 * dz, dy + 200 * dz ]);
-      puertoRico.translate([ dx + 580 * dz, dy + 430 * dz ]);
-      return albersUsa;
-    };
-    return albersUsa.scale(lower48.scale());
-  };
-  d3.geo.bonne = function() {
-    function bonne(coordinates) {
-      var x = coordinates[0] * d3_geo_radians - x0, y = coordinates[1] * d3_geo_radians - y0;
-      if (y1) {
-        var p = c1 + y1 - y, E = x * Math.cos(y) / p;
-        x = p * Math.sin(E);
-        y = p * Math.cos(E) - c1;
-      } else {
-        x *= Math.cos(y);
-        y *= -1;
-      }
-      return [ scale * x + translate[0], scale * y + translate[1] ];
-    }
-    var scale = 200, translate = [ 480, 250 ], x0, y0, y1, c1;
-    bonne.invert = function(coordinates) {
-      var x = (coordinates[0] - translate[0]) / scale, y = (coordinates[1] - translate[1]) / scale;
-      if (y1) {
-        var c = c1 + y, p = Math.sqrt(x * x + c * c);
-        y = c1 + y1 - p;
-        x = x0 + p * Math.atan2(x, c) / Math.cos(y);
-      } else {
-        y *= -1;
-        x /= Math.cos(y);
-      }
-      return [ x / d3_geo_radians, y / d3_geo_radians ];
-    };
-    bonne.parallel = function(x) {
-      if (!arguments.length) return y1 / d3_geo_radians;
-      c1 = 1 / Math.tan(y1 = x * d3_geo_radians);
-      return bonne;
-    };
-    bonne.origin = function(x) {
-      if (!arguments.length) return [ x0 / d3_geo_radians, y0 / d3_geo_radians ];
-      x0 = x[0] * d3_geo_radians;
-      y0 = x[1] * d3_geo_radians;
-      return bonne;
-    };
-    bonne.scale = function(x) {
-      if (!arguments.length) return scale;
-      scale = +x;
-      return bonne;
-    };
-    bonne.translate = function(x) {
-      if (!arguments.length) return translate;
-      translate = [ +x[0], +x[1] ];
-      return bonne;
-    };
-    return bonne.origin([ 0, 0 ]).parallel(45);
-  };
-  d3.geo.equirectangular = function() {
-    function equirectangular(coordinates) {
-      var x = coordinates[0] / 360, y = -coordinates[1] / 360;
-      return [ scale * x + translate[0], scale * y + translate[1] ];
-    }
-    var scale = 500, translate = [ 480, 250 ];
-    equirectangular.invert = function(coordinates) {
-      var x = (coordinates[0] - translate[0]) / scale, y = (coordinates[1] - translate[1]) / scale;
-      return [ 360 * x, -360 * y ];
-    };
-    equirectangular.scale = function(x) {
-      if (!arguments.length) return scale;
-      scale = +x;
-      return equirectangular;
-    };
-    equirectangular.translate = function(x) {
-      if (!arguments.length) return translate;
-      translate = [ +x[0], +x[1] ];
-      return equirectangular;
-    };
-    return equirectangular;
-  };
-  d3.geo.mercator = function() {
-    function mercator(coordinates) {
-      var x = coordinates[0] / 360, y = -(Math.log(Math.tan(Math.PI / 4 + coordinates[1] * d3_geo_radians / 2)) / d3_geo_radians) / 360;
-      return [ scale * x + translate[0], scale * Math.max(-.5, Math.min(.5, y)) + translate[1] ];
-    }
-    var scale = 500, translate = [ 480, 250 ];
-    mercator.invert = function(coordinates) {
-      var x = (coordinates[0] - translate[0]) / scale, y = (coordinates[1] - translate[1]) / scale;
-      return [ 360 * x, 2 * Math.atan(Math.exp(-360 * y * d3_geo_radians)) / d3_geo_radians - 90 ];
-    };
-    mercator.scale = function(x) {
-      if (!arguments.length) return scale;
-      scale = +x;
-      return mercator;
-    };
-    mercator.translate = function(x) {
-      if (!arguments.length) return translate;
-      translate = [ +x[0], +x[1] ];
-      return mercator;
-    };
-    return mercator;
-  };
-  d3.geo.path = function() {
-    function path(d, i) {
-      if (typeof pointRadius === "function") pointCircle = d3_path_circle(pointRadius.apply(this, arguments));
-      pathType(d);
-      var result = buffer.length ? buffer.join("") : null;
-      buffer = [];
-      return result;
-    }
-    function project(coordinates) {
-      return projection(coordinates).join(",");
-    }
-    function polygonArea(coordinates) {
-      var sum = area(coordinates[0]), i = 0, n = coordinates.length;
-      while (++i < n) sum -= area(coordinates[i]);
-      return sum;
-    }
-    function polygonCentroid(coordinates) {
-      var polygon = d3.geom.polygon(coordinates[0].map(projection)), area = polygon.area(), centroid = polygon.centroid(area < 0 ? (area *= -1, 1) : -1), x = centroid[0], y = centroid[1], z = area, i = 0, n = coordinates.length;
-      while (++i < n) {
-        polygon = d3.geom.polygon(coordinates[i].map(projection));
-        area = polygon.area();
-        centroid = polygon.centroid(area < 0 ? (area *= -1, 1) : -1);
-        x -= centroid[0];
-        y -= centroid[1];
-        z -= area;
-      }
-      return [ x, y, 6 * z ];
-    }
-    function area(coordinates) {
-      return Math.abs(d3.geom.polygon(coordinates.map(projection)).area());
-    }
-    var pointRadius = 4.5, pointCircle = d3_path_circle(pointRadius), projection = d3.geo.albersUsa(), buffer = [];
-    var pathType = d3_geo_type({
-      FeatureCollection: function(o) {
-        var features = o.features, i = -1, n = features.length;
-        while (++i < n) buffer.push(pathType(features[i].geometry));
-      },
-      Feature: function(o) {
-        pathType(o.geometry);
-      },
-      Point: function(o) {
-        buffer.push("M", project(o.coordinates), pointCircle);
-      },
-      MultiPoint: function(o) {
-        var coordinates = o.coordinates, i = -1, n = coordinates.length;
-        while (++i < n) buffer.push("M", project(coordinates[i]), pointCircle);
-      },
-      LineString: function(o) {
-        var coordinates = o.coordinates, i = -1, n = coordinates.length;
-        buffer.push("M");
-        while (++i < n) buffer.push(project(coordinates[i]), "L");
-        buffer.pop();
-      },
-      MultiLineString: function(o) {
-        var coordinates = o.coordinates, i = -1, n = coordinates.length, subcoordinates, j, m;
-        while (++i < n) {
-          subcoordinates = coordinates[i];
-          j = -1;
-          m = subcoordinates.length;
-          buffer.push("M");
-          while (++j < m) buffer.push(project(subcoordinates[j]), "L");
-          buffer.pop();
-        }
-      },
-      Polygon: function(o) {
-        var coordinates = o.coordinates, i = -1, n = coordinates.length, subcoordinates, j, m;
-        while (++i < n) {
-          subcoordinates = coordinates[i];
-          j = -1;
-          if ((m = subcoordinates.length - 1) > 0) {
-            buffer.push("M");
-            while (++j < m) buffer.push(project(subcoordinates[j]), "L");
-            buffer[buffer.length - 1] = "Z";
-          }
-        }
-      },
-      MultiPolygon: function(o) {
-        var coordinates = o.coordinates, i = -1, n = coordinates.length, subcoordinates, j, m, subsubcoordinates, k, p;
-        while (++i < n) {
-          subcoordinates = coordinates[i];
-          j = -1;
-          m = subcoordinates.length;
-          while (++j < m) {
-            subsubcoordinates = subcoordinates[j];
-            k = -1;
-            if ((p = subsubcoordinates.length - 1) > 0) {
-              buffer.push("M");
-              while (++k < p) buffer.push(project(subsubcoordinates[k]), "L");
-              buffer[buffer.length - 1] = "Z";
-            }
-          }
-        }
-      },
-      GeometryCollection: function(o) {
-        var geometries = o.geometries, i = -1, n = geometries.length;
-        while (++i < n) buffer.push(pathType(geometries[i]));
-      }
-    });
-    var areaType = path.area = d3_geo_type({
-      FeatureCollection: function(o) {
-        var area = 0, features = o.features, i = -1, n = features.length;
-        while (++i < n) area += areaType(features[i]);
-        return area;
-      },
-      Feature: function(o) {
-        return areaType(o.geometry);
-      },
-      Polygon: function(o) {
-        return polygonArea(o.coordinates);
-      },
-      MultiPolygon: function(o) {
-        var sum = 0, coordinates = o.coordinates, i = -1, n = coordinates.length;
-        while (++i < n) sum += polygonArea(coordinates[i]);
-        return sum;
-      },
-      GeometryCollection: function(o) {
-        var sum = 0, geometries = o.geometries, i = -1, n = geometries.length;
-        while (++i < n) sum += areaType(geometries[i]);
-        return sum;
-      }
-    }, 0);
-    var centroidType = path.centroid = d3_geo_type({
-      Feature: function(o) {
-        return centroidType(o.geometry);
-      },
-      Polygon: function(o) {
-        var centroid = polygonCentroid(o.coordinates);
-        return [ centroid[0] / centroid[2], centroid[1] / centroid[2] ];
-      },
-      MultiPolygon: function(o) {
-        var area = 0, coordinates = o.coordinates, centroid, x = 0, y = 0, z = 0, i = -1, n = coordinates.length;
-        while (++i < n) {
-          centroid = polygonCentroid(coordinates[i]);
-          x += centroid[0];
-          y += centroid[1];
-          z += centroid[2];
-        }
-        return [ x / z, y / z ];
-      }
-    });
-    path.projection = function(x) {
-      projection = x;
-      return path;
-    };
-    path.pointRadius = function(x) {
-      if (typeof x === "function") pointRadius = x; else {
-        pointRadius = +x;
-        pointCircle = d3_path_circle(pointRadius);
-      }
-      return path;
-    };
-    return path;
-  };
-  d3.geo.bounds = function(feature) {
-    var left = Infinity, bottom = Infinity, right = -Infinity, top = -Infinity;
-    d3_geo_bounds(feature, function(x, y) {
-      if (x < left) left = x;
-      if (x > right) right = x;
-      if (y < bottom) bottom = y;
-      if (y > top) top = y;
-    });
-    return [ [ left, bottom ], [ right, top ] ];
-  };
-  var d3_geo_boundsTypes = {
-    Feature: d3_geo_boundsFeature,
-    FeatureCollection: d3_geo_boundsFeatureCollection,
-    GeometryCollection: d3_geo_boundsGeometryCollection,
-    LineString: d3_geo_boundsLineString,
-    MultiLineString: d3_geo_boundsMultiLineString,
-    MultiPoint: d3_geo_boundsLineString,
-    MultiPolygon: d3_geo_boundsMultiPolygon,
-    Point: d3_geo_boundsPoint,
-    Polygon: d3_geo_boundsPolygon
-  };
-  d3.geo.circle = function() {
-    function circle() {}
-    function visible(point) {
-      return arc.distance(point) < radians;
-    }
-    function clip(coordinates) {
-      var i = -1, n = coordinates.length, clipped = [], p0, p1, p2, d0, d1;
-      while (++i < n) {
-        d1 = arc.distance(p2 = coordinates[i]);
-        if (d1 < radians) {
-          if (p1) clipped.push(d3_geo_greatArcInterpolate(p1, p2)((d0 - radians) / (d0 - d1)));
-          clipped.push(p2);
-          p0 = p1 = null;
-        } else {
-          p1 = p2;
-          if (!p0 && clipped.length) {
-            clipped.push(d3_geo_greatArcInterpolate(clipped[clipped.length - 1], p1)((radians - d0) / (d1 - d0)));
-            p0 = p1;
-          }
-        }
-        d0 = d1;
-      }
-      p0 = coordinates[0];
-      p1 = clipped[0];
-      if (p1 && p2[0] === p0[0] && p2[1] === p0[1] && !(p2[0] === p1[0] && p2[1] === p1[1])) {
-        clipped.push(p1);
-      }
-      return resample(clipped);
-    }
-    function resample(coordinates) {
-      var i = 0, n = coordinates.length, j, m, resampled = n ? [ coordinates[0] ] : coordinates, resamples, origin = arc.source();
-      while (++i < n) {
-        resamples = arc.source(coordinates[i - 1])(coordinates[i]).coordinates;
-        for (j = 0, m = resamples.length; ++j < m; ) resampled.push(resamples[j]);
-      }
-      arc.source(origin);
-      return resampled;
-    }
-    var origin = [ 0, 0 ], degrees = 90 - .01, radians = degrees * d3_geo_radians, arc = d3.geo.greatArc().source(origin).target(d3_identity);
-    circle.clip = function(d) {
-      if (typeof origin === "function") arc.source(origin.apply(this, arguments));
-      return clipType(d) || null;
-    };
-    var clipType = d3_geo_type({
-      FeatureCollection: function(o) {
-        var features = o.features.map(clipType).filter(d3_identity);
-        return features && (o = Object.create(o), o.features = features, o);
-      },
-      Feature: function(o) {
-        var geometry = clipType(o.geometry);
-        return geometry && (o = Object.create(o), o.geometry = geometry, o);
-      },
-      Point: function(o) {
-        return visible(o.coordinates) && o;
-      },
-      MultiPoint: function(o) {
-        var coordinates = o.coordinates.filter(visible);
-        return coordinates.length && {
-          type: o.type,
-          coordinates: coordinates
-        };
-      },
-      LineString: function(o) {
-        var coordinates = clip(o.coordinates);
-        return coordinates.length && (o = Object.create(o), o.coordinates = coordinates, o);
-      },
-      MultiLineString: function(o) {
-        var coordinates = o.coordinates.map(clip).filter(function(d) {
-          return d.length;
-        });
-        return coordinates.length && (o = Object.create(o), o.coordinates = coordinates, o);
-      },
-      Polygon: function(o) {
-        var coordinates = o.coordinates.map(clip);
-        return coordinates[0].length && (o = Object.create(o), o.coordinates = coordinates, o);
-      },
-      MultiPolygon: function(o) {
-        var coordinates = o.coordinates.map(function(d) {
-          return d.map(clip);
-        }).filter(function(d) {
-          return d[0].length;
-        });
-        return coordinates.length && (o = Object.create(o), o.coordinates = coordinates, o);
-      },
-      GeometryCollection: function(o) {
-        var geometries = o.geometries.map(clipType).filter(d3_identity);
-        return geometries.length && (o = Object.create(o), o.geometries = geometries, o);
-      }
-    });
-    circle.origin = function(x) {
-      if (!arguments.length) return origin;
-      origin = x;
-      if (typeof origin !== "function") arc.source(origin);
-      return circle;
-    };
-    circle.angle = function(x) {
-      if (!arguments.length) return degrees;
-      radians = (degrees = +x) * d3_geo_radians;
-      return circle;
-    };
-    return d3.rebind(circle, arc, "precision");
-  };
-  d3.geo.greatArc = function() {
-    function greatArc() {
-      var d = greatArc.distance.apply(this, arguments), t = 0, dt = precision / d, coordinates = [ p0 ];
-      while ((t += dt) < 1) coordinates.push(interpolate(t));
-      coordinates.push(p1);
-      return {
-        type: "LineString",
-        coordinates: coordinates
-      };
-    }
-    var source = d3_geo_greatArcSource, p0, target = d3_geo_greatArcTarget, p1, precision = 6 * d3_geo_radians, interpolate = d3_geo_greatArcInterpolator();
-    greatArc.distance = function() {
-      if (typeof source === "function") interpolate.source(p0 = source.apply(this, arguments));
-      if (typeof target === "function") interpolate.target(p1 = target.apply(this, arguments));
-      return interpolate.distance();
-    };
-    greatArc.source = function(_) {
-      if (!arguments.length) return source;
-      source = _;
-      if (typeof source !== "function") interpolate.source(p0 = source);
-      return greatArc;
-    };
-    greatArc.target = function(_) {
-      if (!arguments.length) return target;
-      target = _;
-      if (typeof target !== "function") interpolate.target(p1 = target);
-      return greatArc;
-    };
-    greatArc.precision = function(_) {
-      if (!arguments.length) return precision / d3_geo_radians;
-      precision = _ * d3_geo_radians;
-      return greatArc;
-    };
-    return greatArc;
-  };
-  d3.geo.greatCircle = d3.geo.circle;
-  d3.geom = {};
-  d3.geom.contour = function(grid, start) {
-    var s = start || d3_geom_contourStart(grid), c = [], x = s[0], y = s[1], dx = 0, dy = 0, pdx = NaN, pdy = NaN, i = 0;
-    do {
-      i = 0;
-      if (grid(x - 1, y - 1)) i += 1;
-      if (grid(x, y - 1)) i += 2;
-      if (grid(x - 1, y)) i += 4;
-      if (grid(x, y)) i += 8;
-      if (i === 6) {
-        dx = pdy === -1 ? -1 : 1;
-        dy = 0;
-      } else if (i === 9) {
-        dx = 0;
-        dy = pdx === 1 ? -1 : 1;
-      } else {
-        dx = d3_geom_contourDx[i];
-        dy = d3_geom_contourDy[i];
-      }
-      if (dx != pdx && dy != pdy) {
-        c.push([ x, y ]);
-        pdx = dx;
-        pdy = dy;
-      }
-      x += dx;
-      y += dy;
-    } while (s[0] != x || s[1] != y);
-    return c;
-  };
-  var d3_geom_contourDx = [ 1, 0, 1, 1, -1, 0, -1, 1, 0, 0, 0, 0, -1, 0, -1, NaN ], d3_geom_contourDy = [ 0, -1, 0, 0, 0, -1, 0, 0, 1, -1, 1, 1, 0, -1, 0, NaN ];
-  d3.geom.hull = function(vertices) {
-    if (vertices.length < 3) return [];
-    var len = vertices.length, plen = len - 1, points = [], stack = [], i, j, h = 0, x1, y1, x2, y2, u, v, a, sp;
-    for (i = 1; i < len; ++i) {
-      if (vertices[i][1] < vertices[h][1]) {
-        h = i;
-      } else if (vertices[i][1] == vertices[h][1]) {
-        h = vertices[i][0] < vertices[h][0] ? i : h;
-      }
-    }
-    for (i = 0; i < len; ++i) {
-      if (i === h) continue;
-      y1 = vertices[i][1] - vertices[h][1];
-      x1 = vertices[i][0] - vertices[h][0];
-      points.push({
-        angle: Math.atan2(y1, x1),
-        index: i
-      });
-    }
-    points.sort(function(a, b) {
-      return a.angle - b.angle;
-    });
-    a = points[0].angle;
-    v = points[0].index;
-    u = 0;
-    for (i = 1; i < plen; ++i) {
-      j = points[i].index;
-      if (a == points[i].angle) {
-        x1 = vertices[v][0] - vertices[h][0];
-        y1 = vertices[v][1] - vertices[h][1];
-        x2 = vertices[j][0] - vertices[h][0];
-        y2 = vertices[j][1] - vertices[h][1];
-        if (x1 * x1 + y1 * y1 >= x2 * x2 + y2 * y2) {
-          points[i].index = -1;
-        } else {
-          points[u].index = -1;
-          a = points[i].angle;
-          u = i;
-          v = j;
-        }
-      } else {
-        a = points[i].angle;
-        u = i;
-        v = j;
-      }
-    }
-    stack.push(h);
-    for (i = 0, j = 0; i < 2; ++j) {
-      if (points[j].index !== -1) {
-        stack.push(points[j].index);
-        i++;
-      }
-    }
-    sp = stack.length;
-    for (; j < plen; ++j) {
-      if (points[j].index === -1) continue;
-      while (!d3_geom_hullCCW(stack[sp - 2], stack[sp - 1], points[j].index, vertices)) {
-        --sp;
-      }
-      stack[sp++] = points[j].index;
-    }
-    var poly = [];
-    for (i = 0; i < sp; ++i) {
-      poly.push(vertices[stack[i]]);
-    }
-    return poly;
-  };
-  d3.geom.polygon = function(coordinates) {
-    coordinates.area = function() {
-      var i = 0, n = coordinates.length, a = coordinates[n - 1][0] * coordinates[0][1], b = coordinates[n - 1][1] * coordinates[0][0];
-      while (++i < n) {
-        a += coordinates[i - 1][0] * coordinates[i][1];
-        b += coordinates[i - 1][1] * coordinates[i][0];
-      }
-      return (b - a) * .5;
-    };
-    coordinates.centroid = function(k) {
-      var i = -1, n = coordinates.length, x = 0, y = 0, a, b = coordinates[n - 1], c;
-      if (!arguments.length) k = -1 / (6 * coordinates.area());
-      while (++i < n) {
-        a = b;
-        b = coordinates[i];
-        c = a[0] * b[1] - b[0] * a[1];
-        x += (a[0] + b[0]) * c;
-        y += (a[1] + b[1]) * c;
-      }
-      return [ x * k, y * k ];
-    };
-    coordinates.clip = function(subject) {
-      var input, i = -1, n = coordinates.length, j, m, a = coordinates[n - 1], b, c, d;
-      while (++i < n) {
-        input = subject.slice();
-        subject.length = 0;
-        b = coordinates[i];
-        c = input[(m = input.length) - 1];
-        j = -1;
-        while (++j < m) {
-          d = input[j];
-          if (d3_geom_polygonInside(d, a, b)) {
-            if (!d3_geom_polygonInside(c, a, b)) {
-              subject.push(d3_geom_polygonIntersect(c, d, a, b));
-            }
-            subject.push(d);
-          } else if (d3_geom_polygonInside(c, a, b)) {
-            subject.push(d3_geom_polygonIntersect(c, d, a, b));
-          }
-          c = d;
-        }
-        a = b;
-      }
-      return subject;
-    };
-    return coordinates;
-  };
-  d3.geom.voronoi = function(vertices) {
-    var polygons = vertices.map(function() {
-      return [];
-    });
-    d3_voronoi_tessellate(vertices, function(e) {
-      var s1, s2, x1, x2, y1, y2;
-      if (e.a === 1 && e.b >= 0) {
-        s1 = e.ep.r;
-        s2 = e.ep.l;
-      } else {
-        s1 = e.ep.l;
-        s2 = e.ep.r;
-      }
-      if (e.a === 1) {
-        y1 = s1 ? s1.y : -1e6;
-        x1 = e.c - e.b * y1;
-        y2 = s2 ? s2.y : 1e6;
-        x2 = e.c - e.b * y2;
-      } else {
-        x1 = s1 ? s1.x : -1e6;
-        y1 = e.c - e.a * x1;
-        x2 = s2 ? s2.x : 1e6;
-        y2 = e.c - e.a * x2;
-      }
-      var v1 = [ x1, y1 ], v2 = [ x2, y2 ];
-      polygons[e.region.l.index].push(v1, v2);
-      polygons[e.region.r.index].push(v1, v2);
-    });
-    return polygons.map(function(polygon, i) {
-      var cx = vertices[i][0], cy = vertices[i][1];
-      polygon.forEach(function(v) {
-        v.angle = Math.atan2(v[0] - cx, v[1] - cy);
-      });
-      return polygon.sort(function(a, b) {
-        return a.angle - b.angle;
-      }).filter(function(d, i) {
-        return !i || d.angle - polygon[i - 1].angle > 1e-10;
-      });
-    });
-  };
-  var d3_voronoi_opposite = {
-    l: "r",
-    r: "l"
-  };
-  d3.geom.delaunay = function(vertices) {
-    var edges = vertices.map(function() {
-      return [];
-    }), triangles = [];
-    d3_voronoi_tessellate(vertices, function(e) {
-      edges[e.region.l.index].push(vertices[e.region.r.index]);
-    });
-    edges.forEach(function(edge, i) {
-      var v = vertices[i], cx = v[0], cy = v[1];
-      edge.forEach(function(v) {
-        v.angle = Math.atan2(v[0] - cx, v[1] - cy);
-      });
-      edge.sort(function(a, b) {
-        return a.angle - b.angle;
-      });
-      for (var j = 0, m = edge.length - 1; j < m; j++) {
-        triangles.push([ v, edge[j], edge[j + 1] ]);
-      }
-    });
-    return triangles;
-  };
-  d3.geom.quadtree = function(points, x1, y1, x2, y2) {
-    function insert(n, p, x1, y1, x2, y2) {
-      if (isNaN(p.x) || isNaN(p.y)) return;
-      if (n.leaf) {
-        var v = n.point;
-        if (v) {
-          if (Math.abs(v.x - p.x) + Math.abs(v.y - p.y) < .01) {
-            insertChild(n, p, x1, y1, x2, y2);
-          } else {
-            n.point = null;
-            insertChild(n, v, x1, y1, x2, y2);
-            insertChild(n, p, x1, y1, x2, y2);
-          }
-        } else {
-          n.point = p;
-        }
-      } else {
-        insertChild(n, p, x1, y1, x2, y2);
-      }
-    }
-    function insertChild(n, p, x1, y1, x2, y2) {
-      var sx = (x1 + x2) * .5, sy = (y1 + y2) * .5, right = p.x >= sx, bottom = p.y >= sy, i = (bottom << 1) + right;
-      n.leaf = false;
-      n = n.nodes[i] || (n.nodes[i] = d3_geom_quadtreeNode());
-      if (right) x1 = sx; else x2 = sx;
-      if (bottom) y1 = sy; else y2 = sy;
-      insert(n, p, x1, y1, x2, y2);
-    }
-    var p, i = -1, n = points.length;
-    if (n && isNaN(points[0].x)) points = points.map(d3_geom_quadtreePoint);
-    if (arguments.length < 5) {
-      if (arguments.length === 3) {
-        y2 = x2 = y1;
-        y1 = x1;
-      } else {
-        x1 = y1 = Infinity;
-        x2 = y2 = -Infinity;
-        while (++i < n) {
-          p = points[i];
-          if (p.x < x1) x1 = p.x;
-          if (p.y < y1) y1 = p.y;
-          if (p.x > x2) x2 = p.x;
-          if (p.y > y2) y2 = p.y;
-        }
-        var dx = x2 - x1, dy = y2 - y1;
-        if (dx > dy) y2 = y1 + dx; else x2 = x1 + dy;
-      }
-    }
-    var root = d3_geom_quadtreeNode();
-    root.add = function(p) {
-      insert(root, p, x1, y1, x2, y2);
-    };
-    root.visit = function(f) {
-      d3_geom_quadtreeVisit(f, root, x1, y1, x2, y2);
-    };
-    points.forEach(root.add);
-    return root;
-  };
-  d3.time = {};
-  var d3_time = Date, d3_time_daySymbols = [ "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" ];
-  d3_time_utc.prototype = {
-    getDate: function() {
-      return this._.getUTCDate();
-    },
-    getDay: function() {
-      return this._.getUTCDay();
-    },
-    getFullYear: function() {
-      return this._.getUTCFullYear();
-    },
-    getHours: function() {
-      return this._.getUTCHours();
-    },
-    getMilliseconds: function() {
-      return this._.getUTCMilliseconds();
-    },
-    getMinutes: function() {
-      return this._.getUTCMinutes();
-    },
-    getMonth: function() {
-      return this._.getUTCMonth();
-    },
-    getSeconds: function() {
-      return this._.getUTCSeconds();
-    },
-    getTime: function() {
-      return this._.getTime();
-    },
-    getTimezoneOffset: function() {
-      return 0;
-    },
-    valueOf: function() {
-      return this._.valueOf();
-    },
-    setDate: function() {
-      d3_time_prototype.setUTCDate.apply(this._, arguments);
-    },
-    setDay: function() {
-      d3_time_prototype.setUTCDay.apply(this._, arguments);
-    },
-    setFullYear: function() {
-      d3_time_prototype.setUTCFullYear.apply(this._, arguments);
-    },
-    setHours: function() {
-      d3_time_prototype.setUTCHours.apply(this._, arguments);
-    },
-    setMilliseconds: function() {
-      d3_time_prototype.setUTCMilliseconds.apply(this._, arguments);
-    },
-    setMinutes: function() {
-      d3_time_prototype.setUTCMinutes.apply(this._, arguments);
-    },
-    setMonth: function() {
-      d3_time_prototype.setUTCMonth.apply(this._, arguments);
-    },
-    setSeconds: function() {
-      d3_time_prototype.setUTCSeconds.apply(this._, arguments);
-    },
-    setTime: function() {
-      d3_time_prototype.setTime.apply(this._, arguments);
-    }
-  };
-  var d3_time_prototype = Date.prototype;
-  var d3_time_formatDateTime = "%a %b %e %H:%M:%S %Y", d3_time_formatDate = "%m/%d/%y", d3_time_formatTime = "%H:%M:%S";
-  var d3_time_days = d3_time_daySymbols, d3_time_dayAbbreviations = d3_time_days.map(d3_time_formatAbbreviate), d3_time_months = [ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" ], d3_time_monthAbbreviations = d3_time_months.map(d3_time_formatAbbreviate);
-  d3.time.format = function(template) {
-    function format(date) {
-      var string = [], i = -1, j = 0, c, f;
-      while (++i < n) {
-        if (template.charCodeAt(i) == 37) {
-          string.push(template.substring(j, i), (f = d3_time_formats[c = template.charAt(++i)]) ? f(date) : c);
-          j = i + 1;
-        }
-      }
-      string.push(template.substring(j, i));
-      return string.join("");
-    }
-    var n = template.length;
-    format.parse = function(string) {
-      var d = {
-        y: 1900,
-        m: 0,
-        d: 1,
-        H: 0,
-        M: 0,
-        S: 0,
-        L: 0
-      }, i = d3_time_parse(d, template, string, 0);
-      if (i != string.length) return null;
-      if ("p" in d) d.H = d.H % 12 + d.p * 12;
-      var date = new d3_time;
-      date.setFullYear(d.y, d.m, d.d);
-      date.setHours(d.H, d.M, d.S, d.L);
-      return date;
-    };
-    format.toString = function() {
-      return template;
-    };
-    return format;
-  };
-  var d3_time_zfill2 = d3.format("02d"), d3_time_zfill3 = d3.format("03d"), d3_time_zfill4 = d3.format("04d"), d3_time_sfill2 = d3.format("2d");
-  var d3_time_dayRe = d3_time_formatRe(d3_time_days), d3_time_dayAbbrevRe = d3_time_formatRe(d3_time_dayAbbreviations), d3_time_monthRe = d3_time_formatRe(d3_time_months), d3_time_monthLookup = d3_time_formatLookup(d3_time_months), d3_time_monthAbbrevRe = d3_time_formatRe(d3_time_monthAbbreviations), d3_time_monthAbbrevLookup = d3_time_formatLookup(d3_time_monthAbbreviations);
-  var d3_time_formats = {
-    a: function(d) {
-      return d3_time_dayAbbreviations[d.getDay()];
-    },
-    A: function(d) {
-      return d3_time_days[d.getDay()];
-    },
-    b: function(d) {
-      return d3_time_monthAbbreviations[d.getMonth()];
-    },
-    B: function(d) {
-      return d3_time_months[d.getMonth()];
-    },
-    c: d3.time.format(d3_time_formatDateTime),
-    d: function(d) {
-      return d3_time_zfill2(d.getDate());
-    },
-    e: function(d) {
-      return d3_time_sfill2(d.getDate());
-    },
-    H: function(d) {
-      return d3_time_zfill2(d.getHours());
-    },
-    I: function(d) {
-      return d3_time_zfill2(d.getHours() % 12 || 12);
-    },
-    j: function(d) {
-      return d3_time_zfill3(1 + d3.time.dayOfYear(d));
-    },
-    L: function(d) {
-      return d3_time_zfill3(d.getMilliseconds());
-    },
-    m: function(d) {
-      return d3_time_zfill2(d.getMonth() + 1);
-    },
-    M: function(d) {
-      return d3_time_zfill2(d.getMinutes());
-    },
-    p: function(d) {
-      return d.getHours() >= 12 ? "PM" : "AM";
-    },
-    S: function(d) {
-      return d3_time_zfill2(d.getSeconds());
-    },
-    U: function(d) {
-      return d3_time_zfill2(d3.time.sundayOfYear(d));
-    },
-    w: function(d) {
-      return d.getDay();
-    },
-    W: function(d) {
-      return d3_time_zfill2(d3.time.mondayOfYear(d));
-    },
-    x: d3.time.format(d3_time_formatDate),
-    X: d3.time.format(d3_time_formatTime),
-    y: function(d) {
-      return d3_time_zfill2(d.getFullYear() % 100);
-    },
-    Y: function(d) {
-      return d3_time_zfill4(d.getFullYear() % 1e4);
-    },
-    Z: d3_time_zone,
-    "%": function(d) {
-      return "%";
-    }
-  };
-  var d3_time_parsers = {
-    a: d3_time_parseWeekdayAbbrev,
-    A: d3_time_parseWeekday,
-    b: d3_time_parseMonthAbbrev,
-    B: d3_time_parseMonth,
-    c: d3_time_parseLocaleFull,
-    d: d3_time_parseDay,
-    e: d3_time_parseDay,
-    H: d3_time_parseHour24,
-    I: d3_time_parseHour24,
-    L: d3_time_parseMilliseconds,
-    m: d3_time_parseMonthNumber,
-    M: d3_time_parseMinutes,
-    p: d3_time_parseAmPm,
-    S: d3_time_parseSeconds,
-    x: d3_time_parseLocaleDate,
-    X: d3_time_parseLocaleTime,
-    y: d3_time_parseYear,
-    Y: d3_time_parseFullYear
-  };
-  var d3_time_numberRe = /^\s*\d+/;
-  var d3_time_amPmLookup = d3.map({
-    am: 0,
-    pm: 1
-  });
-  d3.time.format.utc = function(template) {
-    function format(date) {
-      try {
-        d3_time = d3_time_utc;
-        var utc = new d3_time;
-        utc._ = date;
-        return local(utc);
-      } finally {
-        d3_time = Date;
-      }
-    }
-    var local = d3.time.format(template);
-    format.parse = function(string) {
-      try {
-        d3_time = d3_time_utc;
-        var date = local.parse(string);
-        return date && date._;
-      } finally {
-        d3_time = Date;
-      }
-    };
-    format.toString = local.toString;
-    return format;
-  };
-  var d3_time_formatIso = d3.time.format.utc("%Y-%m-%dT%H:%M:%S.%LZ");
-  d3.time.format.iso = Date.prototype.toISOString ? d3_time_formatIsoNative : d3_time_formatIso;
-  d3_time_formatIsoNative.parse = function(string) {
-    var date = new Date(string);
-    return isNaN(date) ? null : date;
-  };
-  d3_time_formatIsoNative.toString = d3_time_formatIso.toString;
-  d3.time.second = d3_time_interval(function(date) {
-    return new d3_time(Math.floor(date / 1e3) * 1e3);
-  }, function(date, offset) {
-    date.setTime(date.getTime() + Math.floor(offset) * 1e3);
-  }, function(date) {
-    return date.getSeconds();
-  });
-  d3.time.seconds = d3.time.second.range;
-  d3.time.seconds.utc = d3.time.second.utc.range;
-  d3.time.minute = d3_time_interval(function(date) {
-    return new d3_time(Math.floor(date / 6e4) * 6e4);
-  }, function(date, offset) {
-    date.setTime(date.getTime() + Math.floor(offset) * 6e4);
-  }, function(date) {
-    return date.getMinutes();
-  });
-  d3.time.minutes = d3.time.minute.range;
-  d3.time.minutes.utc = d3.time.minute.utc.range;
-  d3.time.hour = d3_time_interval(function(date) {
-    var timezone = date.getTimezoneOffset() / 60;
-    return new d3_time((Math.floor(date / 36e5 - timezone) + timezone) * 36e5);
-  }, function(date, offset) {
-    date.setTime(date.getTime() + Math.floor(offset) * 36e5);
-  }, function(date) {
-    return date.getHours();
-  });
-  d3.time.hours = d3.time.hour.range;
-  d3.time.hours.utc = d3.time.hour.utc.range;
-  d3.time.day = d3_time_interval(function(date) {
-    var day = new d3_time(1970, 0);
-    day.setFullYear(date.getFullYear(), date.getMonth(), date.getDate());
-    return day;
-  }, function(date, offset) {
-    date.setDate(date.getDate() + offset);
-  }, function(date) {
-    return date.getDate() - 1;
-  });
-  d3.time.days = d3.time.day.range;
-  d3.time.days.utc = d3.time.day.utc.range;
-  d3.time.dayOfYear = function(date) {
-    var year = d3.time.year(date);
-    return Math.floor((date - year - (date.getTimezoneOffset() - year.getTimezoneOffset()) * 6e4) / 864e5);
-  };
-  d3_time_daySymbols.forEach(function(day, i) {
-    day = day.toLowerCase();
-    i = 7 - i;
-    var interval = d3.time[day] = d3_time_interval(function(date) {
-      (date = d3.time.day(date)).setDate(date.getDate() - (date.getDay() + i) % 7);
-      return date;
-    }, function(date, offset) {
-      date.setDate(date.getDate() + Math.floor(offset) * 7);
-    }, function(date) {
-      var day = d3.time.year(date).getDay();
-      return Math.floor((d3.time.dayOfYear(date) + (day + i) % 7) / 7) - (day !== i);
-    });
-    d3.time[day + "s"] = interval.range;
-    d3.time[day + "s"].utc = interval.utc.range;
-    d3.time[day + "OfYear"] = function(date) {
-      var day = d3.time.year(date).getDay();
-      return Math.floor((d3.time.dayOfYear(date) + (day + i) % 7) / 7);
-    };
-  });
-  d3.time.week = d3.time.sunday;
-  d3.time.weeks = d3.time.sunday.range;
-  d3.time.weeks.utc = d3.time.sunday.utc.range;
-  d3.time.weekOfYear = d3.time.sundayOfYear;
-  d3.time.month = d3_time_interval(function(date) {
-    date = d3.time.day(date);
-    date.setDate(1);
-    return date;
-  }, function(date, offset) {
-    date.setMonth(date.getMonth() + offset);
-  }, function(date) {
-    return date.getMonth();
-  });
-  d3.time.months = d3.time.month.range;
-  d3.time.months.utc = d3.time.month.utc.range;
-  d3.time.year = d3_time_interval(function(date) {
-    date = d3.time.day(date);
-    date.setMonth(0, 1);
-    return date;
-  }, function(date, offset) {
-    date.setFullYear(date.getFullYear() + offset);
-  }, function(date) {
-    return date.getFullYear();
-  });
-  d3.time.years = d3.time.year.range;
-  d3.time.years.utc = d3.time.year.utc.range;
-  var d3_time_scaleSteps = [ 1e3, 5e3, 15e3, 3e4, 6e4, 3e5, 9e5, 18e5, 36e5, 108e5, 216e5, 432e5, 864e5, 1728e5, 6048e5, 2592e6, 7776e6, 31536e6 ];
-  var d3_time_scaleLocalMethods = [ [ d3.time.second, 1 ], [ d3.time.second, 5 ], [ d3.time.second, 15 ], [ d3.time.second, 30 ], [ d3.time.minute, 1 ], [ d3.time.minute, 5 ], [ d3.time.minute, 15 ], [ d3.time.minute, 30 ], [ d3.time.hour, 1 ], [ d3.time.hour, 3 ], [ d3.time.hour, 6 ], [ d3.time.hour, 12 ], [ d3.time.day, 1 ], [ d3.time.day, 2 ], [ d3.time.week, 1 ], [ d3.time.month, 1 ], [ d3.time.month, 3 ], [ d3.time.year, 1 ] ];
-  var d3_time_scaleLocalFormats = [ [ d3.time.format("%Y"), function(d) {
-    return true;
-  } ], [ d3.time.format("%B"), function(d) {
-    return d.getMonth();
-  } ], [ d3.time.format("%b %d"), function(d) {
-    return d.getDate() != 1;
-  } ], [ d3.time.format("%a %d"), function(d) {
-    return d.getDay() && d.getDate() != 1;
-  } ], [ d3.time.format("%I %p"), function(d) {
-    return d.getHours();
-  } ], [ d3.time.format("%I:%M"), function(d) {
-    return d.getMinutes();
-  } ], [ d3.time.format(":%S"), function(d) {
-    return d.getSeconds();
-  } ], [ d3.time.format(".%L"), function(d) {
-    return d.getMilliseconds();
-  } ] ];
-  var d3_time_scaleLinear = d3.scale.linear(), d3_time_scaleLocalFormat = d3_time_scaleFormat(d3_time_scaleLocalFormats);
-  d3_time_scaleLocalMethods.year = function(extent, m) {
-    return d3_time_scaleLinear.domain(extent.map(d3_time_scaleGetYear)).ticks(m).map(d3_time_scaleSetYear);
-  };
-  d3.time.scale = function() {
-    return d3_time_scale(d3.scale.linear(), d3_time_scaleLocalMethods, d3_time_scaleLocalFormat);
-  };
-  var d3_time_scaleUTCMethods = d3_time_scaleLocalMethods.map(function(m) {
-    return [ m[0].utc, m[1] ];
-  });
-  var d3_time_scaleUTCFormats = [ [ d3.time.format.utc("%Y"), function(d) {
-    return true;
-  } ], [ d3.time.format.utc("%B"), function(d) {
-    return d.getUTCMonth();
-  } ], [ d3.time.format.utc("%b %d"), function(d) {
-    return d.getUTCDate() != 1;
-  } ], [ d3.time.format.utc("%a %d"), function(d) {
-    return d.getUTCDay() && d.getUTCDate() != 1;
-  } ], [ d3.time.format.utc("%I %p"), function(d) {
-    return d.getUTCHours();
-  } ], [ d3.time.format.utc("%I:%M"), function(d) {
-    return d.getUTCMinutes();
-  } ], [ d3.time.format.utc(":%S"), function(d) {
-    return d.getUTCSeconds();
-  } ], [ d3.time.format.utc(".%L"), function(d) {
-    return d.getUTCMilliseconds();
-  } ] ];
-  var d3_time_scaleUTCFormat = d3_time_scaleFormat(d3_time_scaleUTCFormats);
-  d3_time_scaleUTCMethods.year = function(extent, m) {
-    return d3_time_scaleLinear.domain(extent.map(d3_time_scaleUTCGetYear)).ticks(m).map(d3_time_scaleUTCSetYear);
-  };
-  d3.time.scale.utc = function() {
-    return d3_time_scale(d3.scale.linear(), d3_time_scaleUTCMethods, d3_time_scaleUTCFormat);
-  };
-})();
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/vendor/scripts/ember-data-latest.js b/branch-1.2/ambari-web/vendor/scripts/ember-data-latest.js
deleted file mode 100644
index 9194f48..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/ember-data-latest.js
+++ /dev/null
@@ -1,4176 +0,0 @@
-(function() {
-window.DS = Ember.Namespace.create({
-  CURRENT_API_REVISION: 4
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set;
-
-/**
-  A record array is an array that contains records of a certain type. The record
-  array materializes records as needed when they are retrieved for the first
-  time. You should not create record arrays yourself. Instead, an instance of
-  DS.RecordArray or its subclasses will be returned by your application's store
-  in response to queries.
-*/
-
-DS.RecordArray = Ember.ArrayProxy.extend({
-
-  /**
-    The model type contained by this record array.
-
-    @type DS.Model
-  */
-  type: null,
-
-  // The array of client ids backing the record array. When a
-  // record is requested from the record array, the record
-  // for the client id at the same index is materialized, if
-  // necessary, by the store.
-  content: null,
-
-  // The store that created this record array.
-  store: null,
-
-  objectAtContent: function(index) {
-    var content = get(this, 'content'),
-        clientId = content.objectAt(index),
-        store = get(this, 'store');
-
-    if (clientId !== undefined) {
-      return store.findByClientId(get(this, 'type'), clientId);
-    }
-  }
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get;
-
-DS.FilteredRecordArray = DS.RecordArray.extend({
-  filterFunction: null,
-
-  replace: function() {
-    var type = get(this, 'type').toString();
-    throw new Error("The result of a client-side filter (on " + type + ") is immutable.");
-  },
-
-  updateFilter: Ember.observer(function() {
-    var store = get(this, 'store');
-    store.updateRecordArrayFilter(this, get(this, 'type'), get(this, 'filterFunction'));
-  }, 'filterFunction')
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set;
-
-DS.AdapterPopulatedRecordArray = DS.RecordArray.extend({
-  query: null,
-  isLoaded: false,
-
-  replace: function() {
-    var type = get(this, 'type').toString();
-    throw new Error("The result of a server query (on " + type + ") is immutable.");
-  },
-
-  load: function(array) {
-    var store = get(this, 'store'), type = get(this, 'type');
-
-    var clientIds = store.loadMany(type, array).clientIds;
-
-    this.beginPropertyChanges();
-    set(this, 'content', Ember.A(clientIds));
-    set(this, 'isLoaded', true);
-    this.endPropertyChanges();
-  }
-});
-
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set, guidFor = Ember.guidFor;
-
-var Set = function() {
-  this.hash = {};
-  this.list = [];
-};
-
-Set.prototype = {
-  add: function(item) {
-    var hash = this.hash,
-        guid = guidFor(item);
-
-    if (hash.hasOwnProperty(guid)) { return; }
-
-    hash[guid] = true;
-    this.list.push(item);
-  },
-
-  remove: function(item) {
-    var hash = this.hash,
-        guid = guidFor(item);
-
-    if (!hash.hasOwnProperty(guid)) { return; }
-
-    delete hash[guid];
-    var list = this.list,
-        index = Ember.EnumerableUtils.indexOf(this, item);
-
-    list.splice(index, 1);
-  },
-
-  isEmpty: function() {
-    return this.list.length === 0;
-  }
-};
-
-var LoadedState = Ember.State.extend({
-  recordWasAdded: function(manager, record) {
-    var dirty = manager.dirty, observer;
-    dirty.add(record);
-
-    observer = function() {
-      if (!get(record, 'isDirty')) {
-        record.removeObserver('isDirty', observer);
-        manager.send('childWasSaved', record);
-      }
-    };
-
-    record.addObserver('isDirty', observer);
-  },
-
-  recordWasRemoved: function(manager, record) {
-    var dirty = manager.dirty, observer;
-    dirty.add(record);
-
-    observer = function() {
-      record.removeObserver('isDirty', observer);
-      if (!get(record, 'isDirty')) { manager.send('childWasSaved', record); }
-    };
-
-    record.addObserver('isDirty', observer);
-  }
-});
-
-var states = {
-  loading: Ember.State.create({
-    isLoaded: false,
-    isDirty: false,
-
-    loadedRecords: function(manager, count) {
-      manager.decrement(count);
-    },
-
-    becameLoaded: function(manager) {
-      manager.transitionTo('clean');
-    }
-  }),
-
-  clean: LoadedState.create({
-    isLoaded: true,
-    isDirty: false,
-
-    recordWasAdded: function(manager, record) {
-      this._super(manager, record);
-      manager.goToState('dirty');
-    },
-
-    update: function(manager, clientIds) {
-      var manyArray = manager.manyArray;
-      set(manyArray, 'content', clientIds);
-    }
-  }),
-
-  dirty: LoadedState.create({
-    isLoaded: true,
-    isDirty: true,
-
-    childWasSaved: function(manager, child) {
-      var dirty = manager.dirty;
-      dirty.remove(child);
-
-      if (dirty.isEmpty()) { manager.send('arrayBecameSaved'); }
-    },
-
-    arrayBecameSaved: function(manager) {
-      manager.goToState('clean');
-    }
-  })
-};
-
-DS.ManyArrayStateManager = Ember.StateManager.extend({
-  manyArray: null,
-  initialState: 'loading',
-  states: states,
-
-  /**
-   This number is used to keep track of the number of outstanding
-   records that must be loaded before the array is considered
-   loaded. As results stream in, this number is decremented until
-   it becomes zero, at which case the `isLoaded` flag will be set
-   to true
-  */
-  counter: 0,
-
-  init: function() {
-    this._super();
-    this.dirty = new Set();
-    this.counter = get(this, 'manyArray.length');
-  },
-
-  decrement: function(count) {
-    var counter = this.counter = this.counter - count;
-
-    Ember.assert("Somehow the ManyArray loaded counter went below 0. This is probably an ember-data bug. Please report it at https://github.com/emberjs/data/issues", counter >= 0);
-
-    if (counter === 0) {
-      this.send('becameLoaded');
-    }
-  }
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set;
-
-DS.ManyArray = DS.RecordArray.extend({
-  init: function() {
-    set(this, 'stateManager', DS.ManyArrayStateManager.create({ manyArray: this }));
-
-    return this._super();
-  },
-
-  parentRecord: null,
-
-  isDirty: Ember.computed(function() {
-    return get(this, 'stateManager.currentState.isDirty');
-  }).property('stateManager.currentState').cacheable(),
-
-  isLoaded: Ember.computed(function() {
-    return get(this, 'stateManager.currentState.isLoaded');
-  }).property('stateManager.currentState').cacheable(),
-
-  send: function(event, context) {
-    this.get('stateManager').send(event, context);
-  },
-
-  fetch: function() {
-    var clientIds = get(this, 'content'),
-        store = get(this, 'store'),
-        type = get(this, 'type');
-
-    store.fetchUnloadedClientIds(type, clientIds);
-  },
-
-  // Overrides Ember.Array's replace method to implement
-  replaceContent: function(index, removed, added) {
-    var parentRecord = get(this, 'parentRecord');
-    var pendingParent = parentRecord && !get(parentRecord, 'id');
-    var stateManager = get(this, 'stateManager');
-
-    // Map the array of record objects into an array of  client ids.
-    added = added.map(function(record) {
-      Ember.assert("You can only add records of " + (get(this, 'type') && get(this, 'type').toString()) + " to this association.", !get(this, 'type') || (get(this, 'type') === record.constructor));
-
-      // If the record to which this many array belongs does not yet
-      // have an id, notify the newly-added record that it must wait
-      // for the parent to receive an id before the child can be
-      // saved.
-      if (pendingParent) {
-        record.send('waitingOn', parentRecord);
-      }
-
-      var oldParent = this.assignInverse(record, parentRecord);
-
-      record.get('transaction')
-        .relationshipBecameDirty(record, oldParent, parentRecord);
-
-      stateManager.send('recordWasAdded', record);
-
-      return record.get('clientId');
-    }, this);
-
-    var store = this.store;
-
-    var len = index+removed, record;
-    for (var i = index; i < len; i++) {
-      // TODO: null out inverse FK
-      record = this.objectAt(i);
-      var oldParent = this.assignInverse(record, parentRecord, true);
-
-      record.get('transaction')
-        .relationshipBecameDirty(record, parentRecord, null);
-
-      // If we put the child record into a pending state because
-      // we were waiting on the parent record to get an id, we
-      // can tell the child it no longer needs to wait.
-      if (pendingParent) {
-        record.send('doneWaitingOn', parentRecord);
-      }
-
-      stateManager.send('recordWasAdded', record);
-    }
-
-    this._super(index, removed, added);
-  },
-
-  assignInverse: function(record, parentRecord, remove) {
-    var associationMap = get(record.constructor, 'associations'),
-        possibleAssociations = associationMap.get(parentRecord.constructor),
-        possible, actual, oldParent;
-
-    if (!possibleAssociations) { return; }
-
-    for (var i = 0, l = possibleAssociations.length; i < l; i++) {
-      possible = possibleAssociations[i];
-
-      if (possible.kind === 'belongsTo') {
-        actual = possible;
-        break;
-      }
-    }
-
-    if (actual) {
-      oldParent = get(record, actual.name);
-      set(record, actual.name, remove ? null : parentRecord);
-      return oldParent;
-    }
-  },
-
-  // Create a child record within the parentRecord
-  createRecord: function(hash, transaction) {
-    var parentRecord = get(this, 'parentRecord'),
-        store = get(parentRecord, 'store'),
-        type = get(this, 'type'),
-        record;
-
-    transaction = transaction || get(parentRecord, 'transaction');
-
-    record = store.createRecord.call(store, type, hash, transaction);
-    this.pushObject(record);
-
-    return record;
-  }
-});
-
-})();
-
-
-
-(function() {
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set, fmt = Ember.String.fmt,
-    removeObject = Ember.EnumerableUtils.removeObject;
-
-/**
-  A transaction allows you to collect multiple records into a unit of work
-  that can be committed or rolled back as a group.
-
-  For example, if a record has local modifications that have not yet
-  been saved, calling `commit()` on its transaction will cause those
-  modifications to be sent to the adapter to be saved. Calling
-  `rollback()` on its transaction would cause all of the modifications to
-  be discarded and the record to return to the last known state before
-  changes were made.
-
-  If a newly created record's transaction is rolled back, it will
-  immediately transition to the deleted state.
-
-  If you do not explicitly create a transaction, a record is assigned to
-  an implicit transaction called the default transaction. In these cases,
-  you can treat your application's instance of `DS.Store` as a transaction
-  and call the `commit()` and `rollback()` methods on the store itself.
-
-  Once a record has been successfully committed or rolled back, it will
-  be moved back to the implicit transaction. Because it will now be in
-  a clean state, it can be moved to a new transaction if you wish.
-
-  ### Creating a Transaction
-
-  To create a new transaction, call the `transaction()` method of your
-  application's `DS.Store` instance:
-
-      var transaction = App.store.transaction();
-
-  This will return a new instance of `DS.Transaction` with no records
-  yet assigned to it.
-
-  ### Adding Existing Records
-
-  Add records to a transaction using the `add()` method:
-
-      record = App.store.find(Person, 1);
-      transaction.add(record);
-
-  Note that only records whose `isDirty` flag is `false` may be added
-  to a transaction. Once modifications to a record have been made
-  (its `isDirty` flag is `true`), it is not longer able to be added to
-  a transaction.
-
-  ### Creating New Records
-
-  Because newly created records are dirty from the time they are created,
-  and because dirty records can not be added to a transaction, you must
-  use the `createRecord()` method to assign new records to a transaction.
-
-  For example, instead of this:
-
-    var transaction = store.transaction();
-    var person = Person.createRecord({ name: "Steve" });
-
-    // won't work because person is dirty
-    transaction.add(person);
-
-  Call `createRecord()` on the transaction directly:
-
-    var transaction = store.transaction();
-    transaction.createRecord(Person, { name: "Steve" });
-
-  ### Asynchronous Commits
-
-  Typically, all of the records in a transaction will be committed
-  together. However, new records that have a dependency on other new
-  records need to wait for their parent record to be saved and assigned an
-  ID. In that case, the child record will continue to live in the
-  transaction until its parent is saved, at which time the transaction will
-  attempt to commit again.
-
-  For this reason, you should not re-use transactions once you have committed
-  them. Always make a new transaction and move the desired records to it before
-  calling commit.
-*/
-
-DS.Transaction = Ember.Object.extend({
-  /**
-    @private
-
-    Creates the bucket data structure used to segregate records by
-    type.
-  */
-  init: function() {
-    set(this, 'buckets', {
-      clean:    Ember.Map.create(),
-      created:  Ember.Map.create(),
-      updated:  Ember.Map.create(),
-      deleted:  Ember.Map.create(),
-      inflight: Ember.Map.create()
-    });
-
-    this.dirtyRelationships = {
-      byChild: Ember.Map.create(),
-      byNewParent: Ember.Map.create(),
-      byOldParent: Ember.Map.create()
-    };
-  },
-
-  /**
-    Creates a new record of the given type and assigns it to the transaction
-    on which the method was called.
-
-    This is useful as only clean records can be added to a transaction and
-    new records created using other methods immediately become dirty.
-
-    @param {DS.Model} type the model type to create
-    @param {Object} hash the data hash to assign the new record
-  */
-  createRecord: function(type, hash) {
-    var store = get(this, 'store');
-
-    return store.createRecord(type, hash, this);
-  },
-
-  /**
-    Adds an existing record to this transaction. Only records without
-    modficiations (i.e., records whose `isDirty` property is `false`)
-    can be added to a transaction.
-
-    @param {DS.Model} record the record to add to the transaction
-  */
-  add: function(record) {
-    // we could probably make this work if someone has a valid use case. Do you?
-    Ember.assert("Once a record has changed, you cannot move it into a different transaction", !get(record, 'isDirty'));
-
-    var recordTransaction = get(record, 'transaction'),
-        defaultTransaction = get(this, 'store.defaultTransaction');
-
-    Ember.assert("Models cannot belong to more than one transaction at a time.", recordTransaction === defaultTransaction);
-
-    this.adoptRecord(record);
-  },
-
-  /**
-    Commits the transaction, which causes all of the modified records that
-    belong to the transaction to be sent to the adapter to be saved.
-
-    Once you call `commit()` on a transaction, you should not re-use it.
-
-    When a record is saved, it will be removed from this transaction and
-    moved back to the store's default transaction.
-  */
-  commit: function() {
-    var self = this,
-        iterate;
-
-    iterate = function(bucketType, fn, binding) {
-      var dirty = self.bucketForType(bucketType);
-
-      dirty.forEach(function(type, records) {
-        if (records.isEmpty()) { return; }
-
-        var array = [];
-
-        records.forEach(function(record) {
-          record.send('willCommit');
-
-          if (get(record, 'isPending') === false) {
-            array.push(record);
-          }
-        });
-
-        fn.call(binding, type, array);
-      });
-    };
-
-    var commitDetails = {
-      updated: {
-        eachType: function(fn, binding) { iterate('updated', fn, binding); }
-      },
-
-      created: {
-        eachType: function(fn, binding) { iterate('created', fn, binding); }
-      },
-
-      deleted: {
-        eachType: function(fn, binding) { iterate('deleted', fn, binding); }
-      }
-    };
-
-    var store = get(this, 'store');
-    var adapter = get(store, '_adapter');
-
-    this.removeCleanRecords();
-
-    if (adapter && adapter.commit) { adapter.commit(store, commitDetails); }
-    else { throw fmt("Adapter is either null or does not implement `commit` method", this); }
-  },
-
-  /**
-    Rolling back a transaction resets the records that belong to
-    that transaction.
-
-    Updated records have their properties reset to the last known
-    value from the persistence layer. Deleted records are reverted
-    to a clean, non-deleted state. Newly created records immediately
-    become deleted, and are not sent to the adapter to be persisted.
-
-    After the transaction is rolled back, any records that belong
-    to it will return to the store's default transaction, and the
-    current transaction should not be used again.
-  */
-  rollback: function() {
-    var store = get(this, 'store'),
-        dirty;
-
-    // Loop through all of the records in each of the dirty states
-    // and initiate a rollback on them. As a side effect of telling
-    // the record to roll back, it should also move itself out of
-    // the dirty bucket and into the clean bucket.
-    ['created', 'updated', 'deleted', 'inflight'].forEach(function(bucketType) {
-      dirty = this.bucketForType(bucketType);
-
-      dirty.forEach(function(type, records) {
-        records.forEach(function(record) {
-          record.send('rollback');
-        });
-      });
-    }, this);
-
-    // Now that all records in the transaction are guaranteed to be
-    // clean, migrate them all to the store's default transaction.
-    this.removeCleanRecords();
-  },
-
-  /**
-    @private
-
-    Removes a record from this transaction and back to the store's
-    default transaction.
-
-    Note: This method is private for now, but should probably be exposed
-    in the future once we have stricter error checking (for example, in the
-    case of the record being dirty).
-
-    @param {DS.Model} record
-  */
-  remove: function(record) {
-    var defaultTransaction = get(this, 'store.defaultTransaction');
-    defaultTransaction.adoptRecord(record);
-  },
-
-  /**
-    @private
-
-    Removes all of the records in the transaction's clean bucket.
-  */
-  removeCleanRecords: function() {
-    var clean = this.bucketForType('clean'),
-        self = this;
-
-    clean.forEach(function(type, records) {
-      records.forEach(function(record) {
-        self.remove(record);
-      });
-    });
-  },
-
-  /**
-    @private
-
-    Returns the bucket for the given bucket type. For example, you might call
-    `this.bucketForType('updated')` to get the `Ember.Map` that contains all
-    of the records that have changes pending.
-
-    @param {String} bucketType the type of bucket
-    @returns Ember.Map
-  */
-  bucketForType: function(bucketType) {
-    var buckets = get(this, 'buckets');
-
-    return get(buckets, bucketType);
-  },
-
-  /**
-    @private
-
-    This method moves a record into a different transaction without the normal
-    checks that ensure that the user is not doing something weird, like moving
-    a dirty record into a new transaction.
-
-    It is designed for internal use, such as when we are moving a clean record
-    into a new transaction when the transaction is committed.
-
-    This method must not be called unless the record is clean.
-
-    @param {DS.Model} record
-  */
-  adoptRecord: function(record) {
-    var oldTransaction = get(record, 'transaction');
-
-    if (oldTransaction) {
-      oldTransaction.removeFromBucket('clean', record);
-    }
-
-    this.addToBucket('clean', record);
-    set(record, 'transaction', this);
-  },
-
-  /**
-    @private
-
-    Adds a record to the named bucket.
-
-    @param {String} bucketType one of `clean`, `created`, `updated`, or `deleted`
-  */
-  addToBucket: function(bucketType, record) {
-    var bucket = this.bucketForType(bucketType),
-        type = record.constructor;
-
-    var records = bucket.get(type);
-
-    if (!records) {
-      records = Ember.OrderedSet.create();
-      bucket.set(type, records);
-    }
-
-    records.add(record);
-  },
-
-  /**
-    @private
-
-    Removes a record from the named bucket.
-
-    @param {String} bucketType one of `clean`, `created`, `updated`, or `deleted`
-  */
-  removeFromBucket: function(bucketType, record) {
-    var bucket = this.bucketForType(bucketType),
-        type = record.constructor;
-
-    var records = bucket.get(type);
-    records.remove(record);
-  },
-
-  /**
-    @private
-
-    Called by a ManyArray when a new record is added to it. This
-    method will index a relationship description by the child
-    record, its old parent, and its new parent.
-
-    The store will provide this description to the adapter's
-    shouldCommit method, so it can determine whether any of
-    the records is pending another record. The store will also
-    provide a list of these descriptions to the adapter's commit
-    method.
-
-    @param {DS.Model} record the new child record
-    @param {DS.Model} oldParent the parent that the child is
-      moving from, or null
-    @param {DS.Model} newParent the parent that the child is
-      moving to, or null
-  */
-  relationshipBecameDirty: function(child, oldParent, newParent) {
-    var relationships = this.dirtyRelationships, relationship;
-
-    var relationshipsForChild = relationships.byChild.get(child),
-        possibleRelationship,
-        needsNewEntries = true;
-
-    // If the child has any existing dirty relationships in this
-    // transaction, we need to collapse the old relationship
-    // into the new one. For example, if we change the parent of
-    // a child record before saving, there is no need to save the
-    // record that was its parent temporarily.
-    if (relationshipsForChild) {
-
-      // Loop through all of the relationships we know about that
-      // contain the same child as the new relationship.
-      for (var i=0, l=relationshipsForChild.length; i<l; i++) {
-        relationship = relationshipsForChild[i];
-
-        // If the parent of the child record has changed, there is
-        // no need to update the old parent that had not yet been saved.
-        //
-        // This case is two changes in a record's parent:
-        //
-        //   A -> B
-        //   B -> C
-        //
-        // In this case, there is no need to remember the A->B
-        // change. We can collapse both changes into:
-        //
-        //   A -> C
-        //
-        // Another possible case is:
-        //
-        //   A -> B
-        //   B -> A
-        //
-        // In this case, we don't need to do anything. We can
-        // simply remove the original A->B change and call it
-        // a day.
-        if (relationship.newParent === oldParent) {
-          oldParent = relationship.oldParent;
-          this.removeRelationship(relationship);
-
-          // This is the case of A->B followed by B->A.
-          if (relationship.oldParent === newParent) {
-            needsNewEntries = false;
-          }
-        }
-      }
-    }
-
-    relationship = {
-      child: child,
-      oldParent: oldParent,
-      newParent: newParent
-    };
-
-    // If we didn't go A->B and then B->A, add new dirty relationship
-    // entries.
-    if (needsNewEntries) {
-      this.addRelationshipTo('byChild', child, relationship);
-      this.addRelationshipTo('byOldParent', oldParent, relationship);
-      this.addRelationshipTo('byNewParent', newParent, relationship);
-    }
-  },
-
-  removeRelationship: function(relationship) {
-    var relationships = this.dirtyRelationships;
-
-    removeObject(relationships.byOldParent.get(relationship.oldParent), relationship);
-    removeObject(relationships.byNewParent.get(relationship.newParent), relationship);
-    removeObject(relationships.byChild.get(relationship.child), relationship);
-  },
-
-  addRelationshipTo: function(type, record, description) {
-    var map = this.dirtyRelationships[type];
-
-    var relationships = map.get(record);
-
-    if (!relationships) {
-      relationships = [ description ];
-      map.set(record, relationships);
-    } else {
-      relationships.push(description);
-    }
-  },
-
-  /**
-    @private
-
-    Called by a record's state manager to indicate that the record has entered
-    a dirty state. The record will be moved from the `clean` bucket and into
-    the appropriate dirty bucket.
-
-    @param {String} bucketType one of `created`, `updated`, or `deleted`
-  */
-  recordBecameDirty: function(bucketType, record) {
-    this.removeFromBucket('clean', record);
-    this.addToBucket(bucketType, record);
-  },
-
-  /**
-    @private
-
-    Called by a record's state manager to indicate that the record has entered
-    inflight state. The record will be moved from its current dirty bucket and into
-    the `inflight` bucket.
-
-    @param {String} bucketType one of `created`, `updated`, or `deleted`
-  */
-  recordBecameInFlight: function(kind, record) {
-    this.removeFromBucket(kind, record);
-    this.addToBucket('inflight', record);
-  },
-
-  /**
-    @private
-
-    Called by a record's state manager to indicate that the record has entered
-    a clean state. The record will be moved from its current dirty or inflight bucket and into
-    the `clean` bucket.
-
-    @param {String} bucketType one of `created`, `updated`, or `deleted`
-  */
-  recordBecameClean: function(kind, record) {
-    this.removeFromBucket(kind, record);
-
-    this.remove(record);
-  }
-});
-
-})();
-
-
-
-(function() {
-/*globals Ember*/
-var get = Ember.get, set = Ember.set, fmt = Ember.String.fmt;
-
-var DATA_PROXY = {
-  get: function(name) {
-    return this.savedData[name];
-  }
-};
-
-// These values are used in the data cache when clientIds are
-// needed but the underlying data has not yet been loaded by
-// the server.
-var UNLOADED = 'unloaded';
-var LOADING = 'loading';
-
-// Implementors Note:
-//
-//   The variables in this file are consistently named according to the following
-//   scheme:
-//
-//   * +id+ means an identifier managed by an external source, provided inside the
-//     data hash provided by that source.
-//   * +clientId+ means a transient numerical identifier generated at runtime by
-//     the data store. It is important primarily because newly created objects may
-//     not yet have an externally generated id.
-//   * +type+ means a subclass of DS.Model.
-
-/**
-  The store contains all of the hashes for records loaded from the server.
-  It is also responsible for creating instances of DS.Model when you request one
-  of these data hashes, so that they can be bound to in your Handlebars templates.
-
-  Create a new store like this:
-
-       MyApp.store = DS.Store.create();
-
-  You can retrieve DS.Model instances from the store in several ways. To retrieve
-  a record for a specific id, use the `find()` method:
-
-       var record = MyApp.store.find(MyApp.Contact, 123);
-
-   By default, the store will talk to your backend using a standard REST mechanism.
-   You can customize how the store talks to your backend by specifying a custom adapter:
-
-       MyApp.store = DS.Store.create({
-         adapter: 'MyApp.CustomAdapter'
-       });
-
-    You can learn more about writing a custom adapter by reading the `DS.Adapter`
-    documentation.
-*/
-DS.Store = Ember.Object.extend({
-
-  /**
-    Many methods can be invoked without specifying which store should be used.
-    In those cases, the first store created will be used as the default. If
-    an application has multiple stores, it should specify which store to use
-    when performing actions, such as finding records by id.
-
-    The init method registers this store as the default if none is specified.
-  */
-  init: function() {
-    // Enforce API revisioning. See BREAKING_CHANGES.md for more.
-    var revision = get(this, 'revision');
-
-    if (revision !== DS.CURRENT_API_REVISION && !Ember.ENV.TESTING) {
-      throw new Error("Error: The Ember Data library has had breaking API changes since the last time you updated the library. Please review the list of breaking changes at https://github.com/emberjs/data/blob/master/BREAKING_CHANGES.md, then update your store's `revision` property to " + DS.CURRENT_API_REVISION);
-    }
-
-    if (!get(DS, 'defaultStore') || get(this, 'isDefaultStore')) {
-      set(DS, 'defaultStore', this);
-    }
-
-    // internal bookkeeping; not observable
-    this.typeMaps = {};
-    this.recordCache = [];
-    this.clientIdToId = {};
-    this.recordArraysByClientId = {};
-
-    // Internally, we maintain a map of all unloaded IDs requested by
-    // a ManyArray. As the adapter loads hashes into the store, the
-    // store notifies any interested ManyArrays. When the ManyArray's
-    // total number of loading records drops to zero, it becomes
-    // `isLoaded` and fires a `didLoad` event.
-    this.loadingRecordArrays = {};
-
-    set(this, 'defaultTransaction', this.transaction());
-
-    return this._super();
-  },
-
-  /**
-    Returns a new transaction scoped to this store.
-
-    @see {DS.Transaction}
-    @returns DS.Transaction
-  */
-  transaction: function() {
-    return DS.Transaction.create({ store: this });
-  },
-
-  /**
-    @private
-
-    This is used only by the record's DataProxy. Do not use this directly.
-  */
-  dataForRecord: function(record) {
-    var type = record.constructor,
-        clientId = get(record, 'clientId'),
-        typeMap = this.typeMapFor(type);
-
-    return typeMap.cidToHash[clientId];
-  },
-
-  /**
-    The adapter to use to communicate to a backend server or other persistence layer.
-
-    This can be specified as an instance, a class, or a property path that specifies
-    where the adapter can be located.
-
-    @property {DS.Adapter|String}
-  */
-  adapter: null,
-
-  /**
-    @private
-
-    This property returns the adapter, after resolving a possible String.
-
-    @returns DS.Adapter
-  */
-  _adapter: Ember.computed(function() {
-    var adapter = get(this, 'adapter');
-    if (typeof adapter === 'string') {
-      return get(this, adapter, false) || get(window, adapter);
-    }
-    return adapter;
-  }).property('adapter').cacheable(),
-
-  // A monotonically increasing number to be used to uniquely identify
-  // data hashes and records.
-  clientIdCounter: 1,
-
-  // .....................
-  // . CREATE NEW RECORD .
-  // .....................
-
-  /**
-    Create a new record in the current store. The properties passed
-    to this method are set on the newly created record.
-
-    @param {subclass of DS.Model} type
-    @param {Object} properties a hash of properties to set on the
-      newly created record.
-    @returns DS.Model
-  */
-  createRecord: function(type, properties, transaction) {
-    properties = properties || {};
-
-    // Create a new instance of the model `type` and put it
-    // into the specified `transaction`. If no transaction is
-    // specified, the default transaction will be used.
-    //
-    // NOTE: A `transaction` is specified when the
-    // `transaction.createRecord` API is used.
-    var record = type._create({
-      store: this
-    });
-
-    transaction = transaction || get(this, 'defaultTransaction');
-    transaction.adoptRecord(record);
-
-    // Extract the primary key from the `properties` hash,
-    // based on the `primaryKey` for the model type.
-    var primaryKey = get(record, 'primaryKey'),
-        id = properties[primaryKey] || null;
-
-    // If the passed properties do not include a primary key,
-    // give the adapter an opportunity to generate one.
-    var adapter;
-    if (Ember.none(id)) {
-      adapter = get(this, 'adapter');
-      if (adapter && adapter.generateIdForRecord) {
-        id = adapter.generateIdForRecord(this, record);
-        properties.id = id;
-      }
-    }
-
-    var hash = {}, clientId;
-
-    // Push the hash into the store. If present, associate the
-    // extracted `id` with the hash.
-    clientId = this.pushHash(hash, id, type);
-
-    record.send('didChangeData');
-
-    var recordCache = get(this, 'recordCache');
-
-    // Now that we have a clientId, attach it to the record we
-    // just created.
-    set(record, 'clientId', clientId);
-
-    // Store the record we just created in the record cache for
-    // this clientId.
-    recordCache[clientId] = record;
-
-    // Set the properties specified on the record.
-    record.setProperties(properties);
-
-    this.updateRecordArrays(type, clientId, get(record, 'data'));
-
-    return record;
-  },
-
-  // .................
-  // . DELETE RECORD .
-  // .................
-
-  /**
-    For symmetry, a record can be deleted via the store.
-
-    @param {DS.Model} record
-  */
-  deleteRecord: function(record) {
-    record.send('deleteRecord');
-  },
-
-  // ................
-  // . FIND RECORDS .
-  // ................
-
-  /**
-    This is the main entry point into finding records. The first
-    parameter to this method is always a subclass of `DS.Model`.
-
-    You can use the `find` method on a subclass of `DS.Model`
-    directly if your application only has one store. For
-    example, instead of `store.find(App.Person, 1)`, you could
-    say `App.Person.find(1)`.
-
-    ---
-
-    To find a record by ID, pass the `id` as the second parameter:
-
-        store.find(App.Person, 1);
-        App.Person.find(1);
-
-    If the record with that `id` had not previously been loaded,
-    the store will return an empty record immediately and ask
-    the adapter to find the data by calling the adapter's `find`
-    method.
-
-    The `find` method will always return the same object for a
-    given type and `id`. To check whether the adapter has populated
-    a record, you can check its `isLoaded` property.
-
-    ---
-
-    To find all records for a type, call `find` with no additional
-    parameters:
-
-        store.find(App.Person);
-        App.Person.find();
-
-    This will return a `RecordArray` representing all known records
-    for the given type and kick off a request to the adapter's
-    `findAll` method to load any additional records for the type.
-
-    The `RecordArray` returned by `find()` is live. If any more
-    records for the type are added at a later time through any
-    mechanism, it will automatically update to reflect the change.
-
-    ---
-
-    To find a record by a query, call `find` with a hash as the
-    second parameter:
-
-        store.find(App.Person, { page: 1 });
-        App.Person.find({ page: 1 });
-
-    This will return a `RecordArray` immediately, but it will always
-    be an empty `RecordArray` at first. It will call the adapter's
-    `findQuery` method, which will populate the `RecordArray` once
-    the server has returned results.
-
-    You can check whether a query results `RecordArray` has loaded
-    by checking its `isLoaded` property.
-  */
-  find: function(type, id, query) {
-    if (id === undefined) {
-      return this.findAll(type);
-    }
-
-    if (query !== undefined) {
-      return this.findMany(type, id, query);
-    } else if (Ember.typeOf(id) === 'object') {
-      return this.findQuery(type, id);
-    }
-
-    if (Ember.isArray(id)) {
-      return this.findMany(type, id);
-    }
-
-    var clientId = this.typeMapFor(type).idToCid[id];
-
-    return this.findByClientId(type, clientId, id);
-  },
-
-  findByClientId: function(type, clientId, id) {
-    var recordCache = get(this, 'recordCache'),
-        dataCache, record;
-
-    // If there is already a clientId assigned for this
-    // type/id combination, try to find an existing
-    // record for that id and return. Otherwise,
-    // materialize a new record and set its data to the
-    // value we already have.
-    if (clientId !== undefined) {
-      record = recordCache[clientId];
-
-      if (!record) {
-        // create a new instance of the model type in the
-        // 'isLoading' state
-        record = this.materializeRecord(type, clientId);
-
-        dataCache = this.typeMapFor(type).cidToHash;
-
-        if (typeof dataCache[clientId] === 'object') {
-          record.send('didChangeData');
-        }
-      }
-    } else {
-      clientId = this.pushHash(LOADING, id, type);
-
-      // create a new instance of the model type in the
-      // 'isLoading' state
-      record = this.materializeRecord(type, clientId, id);
-
-      // let the adapter set the data, possibly async
-      var adapter = get(this, '_adapter');
-      if (adapter && adapter.find) { adapter.find(this, type, id); }
-      else { throw fmt("Adapter is either null or does not implement `find` method", this); }
-    }
-
-    return record;
-  },
-
-  /**
-    @private
-
-    Given a type and array of `clientId`s, determines which of those
-    `clientId`s has not yet been loaded.
-
-    In preparation for loading, this method also marks any unloaded
-    `clientId`s as loading.
-  */
-  neededClientIds: function(type, clientIds) {
-    var neededClientIds = [],
-        typeMap = this.typeMapFor(type),
-        dataCache = typeMap.cidToHash,
-        clientId;
-
-    for (var i=0, l=clientIds.length; i<l; i++) {
-      clientId = clientIds[i];
-      if (dataCache[clientId] === UNLOADED) {
-        neededClientIds.push(clientId);
-        dataCache[clientId] = LOADING;
-      }
-    }
-
-    return neededClientIds;
-  },
-
-  /**
-    @private
-
-    This method is the entry point that associations use to update
-    themselves when their underlying data changes.
-
-    First, it determines which of its `clientId`s are still unloaded,
-    then converts the needed `clientId`s to IDs and invokes `findMany`
-    on the adapter.
-  */
-  fetchUnloadedClientIds: function(type, clientIds) {
-    var neededClientIds = this.neededClientIds(type, clientIds);
-    this.fetchMany(type, neededClientIds);
-  },
-
-  /**
-    @private
-
-    This method takes a type and list of `clientId`s, converts the
-    `clientId`s into IDs, and then invokes the adapter's `findMany`
-    method.
-
-    It is used both by a brand new association (via the `findMany`
-    method) or when the data underlying an existing association
-    changes (via the `fetchUnloadedClientIds` method).
-  */
-  fetchMany: function(type, clientIds) {
-    var clientIdToId = this.clientIdToId;
-
-    var neededIds = Ember.EnumerableUtils.map(clientIds, function(clientId) {
-      return clientIdToId[clientId];
-    });
-
-    if (!neededIds.length) { return; }
-
-    var adapter = get(this, '_adapter');
-    if (adapter && adapter.findMany) { adapter.findMany(this, type, neededIds); }
-    else { throw fmt("Adapter is either null or does not implement `findMany` method", this); }
-  },
-
-  /**
-    @private
-
-    `findMany` is the entry point that associations use to generate a
-    new `ManyArray` for the list of IDs specified by the server for
-    the association.
-
-    Its responsibilities are:
-
-    * convert the IDs into clientIds
-    * determine which of the clientIds still need to be loaded
-    * create a new ManyArray whose content is *all* of the clientIds
-    * notify the ManyArray of the number of its elements that are
-      already loaded
-    * insert the unloaded clientIds into the `loadingRecordArrays`
-      bookkeeping structure, which will allow the `ManyArray` to know
-      when all of its loading elements are loaded from the server.
-    * ask the adapter to load the unloaded elements, by invoking
-      findMany with the still-unloaded IDs.
-  */
-  findMany: function(type, ids) {
-    // 1. Convert ids to client ids
-    // 2. Determine which of the client ids need to be loaded
-    // 3. Create a new ManyArray whose content is ALL of the clientIds
-    // 4. Decrement the ManyArray's counter by the number of loaded clientIds
-    // 5. Put the ManyArray into our bookkeeping data structure, keyed on
-    //    the needed clientIds
-    // 6. Ask the adapter to load the records for the unloaded clientIds (but
-    //    convert them back to ids)
-
-    var clientIds = this.clientIdsForIds(type, ids);
-
-    var neededClientIds = this.neededClientIds(type, clientIds),
-        manyArray = this.createManyArray(type, Ember.A(clientIds)),
-        loadedCount = clientIds.length - neededClientIds.length,
-        loadingRecordArrays = this.loadingRecordArrays,
-        clientId, i, l;
-
-    manyArray.send('loadedRecords', loadedCount);
-
-    if (neededClientIds.length) {
-      for (i=0, l=neededClientIds.length; i<l; i++) {
-        clientId = neededClientIds[i];
-        if (loadingRecordArrays[clientId]) {
-          loadingRecordArrays[clientId].push(manyArray);
-        } else {
-          this.loadingRecordArrays[clientId] = [ manyArray ];
-        }
-      }
-
-      this.fetchMany(type, neededClientIds);
-    }
-
-    return manyArray;
-  },
-
-  findQuery: function(type, query) {
-    var array = DS.AdapterPopulatedRecordArray.create({ type: type, content: Ember.A([]), store: this });
-    var adapter = get(this, '_adapter');
-    if (adapter && adapter.findQuery) { adapter.findQuery(this, type, query, array); }
-    else { throw fmt("Adapter is either null or does not implement `findQuery` method", this); }
-    return array;
-  },
-
-  findAll: function(type) {
-
-    var typeMap = this.typeMapFor(type),
-        findAllCache = typeMap.findAllCache;
-
-    if (findAllCache) { return findAllCache; }
-
-    var array = DS.RecordArray.create({ type: type, content: Ember.A([]), store: this });
-    this.registerRecordArray(array, type);
-
-    var adapter = get(this, '_adapter');
-    if (adapter && adapter.findAll) { adapter.findAll(this, type); }
-
-    typeMap.findAllCache = array;
-    return array;
-  },
-
-  filter: function(type, query, filter) {
-    // allow an optional server query
-    if (arguments.length === 3) {
-      this.findQuery(type, query);
-    } else if (arguments.length === 2) {
-      filter = query;
-    }
-
-    var array = DS.FilteredRecordArray.create({ type: type, content: Ember.A([]), store: this, filterFunction: filter });
-
-    this.registerRecordArray(array, type, filter);
-
-    return array;
-  },
-
-  recordIsLoaded: function(type, id) {
-    return !Ember.none(this.typeMapFor(type).idToCid[id]);
-  },
-
-  // ............
-  // . UPDATING .
-  // ............
-
-  hashWasUpdated: function(type, clientId, record) {
-    // Because hash updates are invoked at the end of the run loop,
-    // it is possible that a record might be deleted after its hash
-    // has been modified and this method was scheduled to be called.
-    //
-    // If that's the case, the record would have already been removed
-    // from all record arrays; calling updateRecordArrays would just
-    // add it back. If the record is deleted, just bail. It shouldn't
-    // give us any more trouble after this.
-
-    if (get(record, 'isDeleted')) { return; }
-    this.updateRecordArrays(type, clientId, get(record, 'data'));
-  },
-
-  // ..............
-  // . PERSISTING .
-  // ..............
-
-  commit: function() {
-    var defaultTransaction = get(this, 'defaultTransaction');
-    set(this, 'defaultTransaction', this.transaction());
-
-    defaultTransaction.commit();
-  },
-
-  didUpdateRecords: function(array, hashes) {
-    if (hashes) {
-      array.forEach(function(record, idx) {
-        this.didUpdateRecord(record, hashes[idx]);
-      }, this);
-    } else {
-      array.forEach(function(record) {
-        this.didUpdateRecord(record);
-      }, this);
-    }
-  },
-
-  didUpdateRecord: function(record, hash) {
-    if (hash) {
-      var clientId = get(record, 'clientId'),
-          dataCache = this.typeMapFor(record.constructor).cidToHash;
-
-      dataCache[clientId] = hash;
-      record.send('didChangeData');
-      record.hashWasUpdated();
-    } else {
-      record.send('didSaveData');
-    }
-
-    record.send('didCommit');
-  },
-
-  didDeleteRecords: function(array) {
-    array.forEach(function(record) {
-      record.send('didCommit');
-    });
-  },
-
-  didDeleteRecord: function(record) {
-    record.send('didCommit');
-  },
-
-  _didCreateRecord: function(record, hash, typeMap, clientId, primaryKey) {
-    var recordData = get(record, 'data'), id, changes;
-
-    if (hash) {
-      typeMap.cidToHash[clientId] = hash;
-
-      // If the server returns a hash, we assume that the server's version
-      // of the data supercedes the local changes.
-      record.beginPropertyChanges();
-      record.send('didChangeData');
-      recordData.adapterDidUpdate();
-      record.hashWasUpdated();
-      record.endPropertyChanges();
-
-      id = hash[primaryKey];
-
-      typeMap.idToCid[id] = clientId;
-      this.clientIdToId[clientId] = id;
-    } else {
-      recordData.commit();
-    }
-
-    record.send('didCommit');
-  },
-
-
-  didCreateRecords: function(type, array, hashes) {
-    var primaryKey = type.proto().primaryKey,
-        typeMap = this.typeMapFor(type),
-        clientId;
-
-    for (var i=0, l=get(array, 'length'); i<l; i++) {
-      var record = array[i], hash = hashes[i];
-      clientId = get(record, 'clientId');
-
-      this._didCreateRecord(record, hash, typeMap, clientId, primaryKey);
-    }
-  },
-
-  didCreateRecord: function(record, hash) {
-    var type = record.constructor,
-        typeMap = this.typeMapFor(type),
-        clientId, primaryKey;
-
-    // The hash is optional, but if it is not provided, the client must have
-    // provided a primary key.
-
-    primaryKey = type.proto().primaryKey;
-
-    // TODO: Make Ember.assert more flexible
-    if (hash) {
-      Ember.assert("The server must provide a primary key: " + primaryKey, get(hash, primaryKey));
-    } else {
-      Ember.assert("The server did not return data, and you did not create a primary key (" + primaryKey + ") on the client", get(get(record, 'data'), primaryKey));
-    }
-
-    clientId = get(record, 'clientId');
-
-    this._didCreateRecord(record, hash, typeMap, clientId, primaryKey);
-  },
-
-  recordWasInvalid: function(record, errors) {
-    record.send('becameInvalid', errors);
-  },
-
-  // .................
-  // . RECORD ARRAYS .
-  // .................
-
-  registerRecordArray: function(array, type, filter) {
-    var recordArrays = this.typeMapFor(type).recordArrays;
-
-    recordArrays.push(array);
-
-    this.updateRecordArrayFilter(array, type, filter);
-  },
-
-  createManyArray: function(type, clientIds) {
-    var array = DS.ManyArray.create({ type: type, content: clientIds, store: this });
-
-    clientIds.forEach(function(clientId) {
-      var recordArrays = this.recordArraysForClientId(clientId);
-      recordArrays.add(array);
-    }, this);
-
-    return array;
-  },
-
-  updateRecordArrayFilter: function(array, type, filter) {
-    var typeMap = this.typeMapFor(type),
-        dataCache = typeMap.cidToHash,
-        clientIds = typeMap.clientIds,
-        clientId, hash, proxy;
-
-    var recordCache = get(this, 'recordCache'),
-        foundRecord,
-        record;
-
-    for (var i=0, l=clientIds.length; i<l; i++) {
-      clientId = clientIds[i];
-      foundRecord = false;
-
-      hash = dataCache[clientId];
-      if (typeof hash === 'object') {
-        if (record = recordCache[clientId]) {
-          if (!get(record, 'isDeleted')) {
-            proxy = get(record, 'data');
-            foundRecord = true;
-          }
-        } else {
-          DATA_PROXY.savedData = hash;
-          proxy = DATA_PROXY;
-          foundRecord = true;
-        }
-
-        if (foundRecord) { this.updateRecordArray(array, filter, type, clientId, proxy); }
-      }
-    }
-  },
-
-  updateRecordArrays: function(type, clientId, dataProxy) {
-    var recordArrays = this.typeMapFor(type).recordArrays,
-        filter;
-
-    recordArrays.forEach(function(array) {
-      filter = get(array, 'filterFunction');
-      this.updateRecordArray(array, filter, type, clientId, dataProxy);
-    }, this);
-
-    // loop through all manyArrays containing an unloaded copy of this
-    // clientId and notify them that the record was loaded.
-    var manyArrays = this.loadingRecordArrays[clientId], manyArray;
-
-    if (manyArrays) {
-      for (var i=0, l=manyArrays.length; i<l; i++) {
-        manyArrays[i].send('loadedRecords', 1);
-      }
-
-      this.loadingRecordArrays[clientId] = null;
-    }
-  },
-
-  updateRecordArray: function(array, filter, type, clientId, dataProxy) {
-    var shouldBeInArray;
-
-    if (!filter) {
-      shouldBeInArray = true;
-    } else {
-      shouldBeInArray = filter(dataProxy);
-    }
-
-    var content = get(array, 'content');
-    var alreadyInArray = content.indexOf(clientId) !== -1;
-
-    var recordArrays = this.recordArraysForClientId(clientId);
-
-    if (shouldBeInArray && !alreadyInArray) {
-      recordArrays.add(array);
-      content.pushObject(clientId);
-    } else if (!shouldBeInArray && alreadyInArray) {
-      recordArrays.remove(array);
-      content.removeObject(clientId);
-    }
-  },
-
-  removeFromRecordArrays: function(record) {
-    var clientId = get(record, 'clientId');
-    var recordArrays = this.recordArraysForClientId(clientId);
-
-    recordArrays.forEach(function(array) {
-      var content = get(array, 'content');
-      content.removeObject(clientId);
-    });
-  },
-
-  // ............
-  // . INDEXING .
-  // ............
-
-  recordArraysForClientId: function(clientId) {
-    var recordArrays = get(this, 'recordArraysByClientId');
-    var ret = recordArrays[clientId];
-
-    if (!ret) {
-      ret = recordArrays[clientId] = Ember.OrderedSet.create();
-    }
-
-    return ret;
-  },
-
-  typeMapFor: function(type) {
-    var typeMaps = get(this, 'typeMaps');
-    var guidForType = Ember.guidFor(type);
-
-    var typeMap = typeMaps[guidForType];
-
-    if (typeMap) {
-      return typeMap;
-    } else {
-      return (typeMaps[guidForType] =
-        {
-          idToCid: {},
-          clientIds: [],
-          cidToHash: {},
-          recordArrays: []
-      });
-    }
-  },
-
-  /** @private
-
-    For a given type and id combination, returns the client id used by the store.
-    If no client id has been assigned yet, one will be created and returned.
-
-    @param {DS.Model} type
-    @param {String|Number} id
-  */
-  clientIdForId: function(type, id) {
-    var clientId = this.typeMapFor(type).idToCid[id];
-
-    if (clientId !== undefined) { return clientId; }
-
-    return this.pushHash(UNLOADED, id, type);
-  },
-
-  /**
-    @private
-
-    This method works exactly like `clientIdForId`, but does not
-    require looking up the `typeMap` for every `clientId` and
-    invoking a method per `clientId`.
-  */
-  clientIdsForIds: function(type, ids) {
-    var typeMap = this.typeMapFor(type),
-        idToClientIdMap = typeMap.idToCid;
-
-    return Ember.EnumerableUtils.map(ids, function(id) {
-      var clientId = idToClientIdMap[id];
-      if (clientId) { return clientId; }
-      return this.pushHash(UNLOADED, id, type);
-    }, this);
-  },
-
-  // ................
-  // . LOADING DATA .
-  // ................
-
-  /**
-    Load a new data hash into the store for a given id and type combination.
-    If data for that record had been loaded previously, the new information
-    overwrites the old.
-
-    If the record you are loading data for has outstanding changes that have not
-    yet been saved, an exception will be thrown.
-
-    @param {DS.Model} type
-    @param {String|Number} id
-    @param {Object} hash the data hash to load
-  */
-  load: function(type, id, hash) {
-    if (hash === undefined) {
-      hash = id;
-      var primaryKey = type.proto().primaryKey;
-      Ember.assert("A data hash was loaded for a record of type " + type.toString() + " but no primary key '" + primaryKey + "' was provided.", primaryKey in hash);
-      id = hash[primaryKey];
-    }
-
-    var typeMap = this.typeMapFor(type),
-        dataCache = typeMap.cidToHash,
-        clientId = typeMap.idToCid[id],
-        recordCache = get(this, 'recordCache');
-
-    if (clientId !== undefined) {
-      dataCache[clientId] = hash;
-
-      var record = recordCache[clientId];
-      if (record) {
-        record.send('didChangeData');
-      }
-    } else {
-      clientId = this.pushHash(hash, id, type);
-    }
-
-    DATA_PROXY.savedData = hash;
-    this.updateRecordArrays(type, clientId, DATA_PROXY);
-
-    return { id: id, clientId: clientId };
-  },
-
-  loadMany: function(type, ids, hashes) {
-    var clientIds = Ember.A([]);
-
-    if (hashes === undefined) {
-      hashes = ids;
-      ids = [];
-      var primaryKey = type.proto().primaryKey;
-
-      ids = Ember.EnumerableUtils.map(hashes, function(hash) {
-        return hash[primaryKey];
-      });
-    }
-
-    for (var i=0, l=get(ids, 'length'); i<l; i++) {
-      var loaded = this.load(type, ids[i], hashes[i]);
-      clientIds.pushObject(loaded.clientId);
-    }
-
-    return { clientIds: clientIds, ids: ids };
-  },
-
-  /** @private
-
-    Stores a data hash for the specified type and id combination and returns
-    the client id.
-
-    @param {Object} hash
-    @param {String|Number} id
-    @param {DS.Model} type
-    @returns {Number}
-  */
-  pushHash: function(hash, id, type) {
-    var typeMap = this.typeMapFor(type);
-
-    var idToClientIdMap = typeMap.idToCid,
-        clientIdToIdMap = this.clientIdToId,
-        clientIds = typeMap.clientIds,
-        dataCache = typeMap.cidToHash;
-
-    var clientId = ++this.clientIdCounter;
-
-    dataCache[clientId] = hash;
-
-    // if we're creating an item, this process will be done
-    // later, once the object has been persisted.
-    if (id) {
-      idToClientIdMap[id] = clientId;
-      clientIdToIdMap[clientId] = id;
-    }
-
-    clientIds.push(clientId);
-
-    return clientId;
-  },
-
-  // ..........................
-  // . RECORD MATERIALIZATION .
-  // ..........................
-
-  materializeRecord: function(type, clientId, id) {
-    var record;
-
-    get(this, 'recordCache')[clientId] = record = type._create({
-      store: this,
-      clientId: clientId,
-      _id: id
-    });
-
-    get(this, 'defaultTransaction').adoptRecord(record);
-
-    record.send('loadingData');
-    return record;
-  },
-
-  destroy: function() {
-    if (get(DS, 'defaultStore') === this) {
-      set(DS, 'defaultStore', null);
-    }
-
-    return this._super();
-  }
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set, guidFor = Ember.guidFor;
-
-/**
-  This file encapsulates the various states that a record can transition
-  through during its lifecycle.
-
-  ### State Manager
-
-  A record's state manager explicitly tracks what state a record is in
-  at any given time. For instance, if a record is newly created and has
-  not yet been sent to the adapter to be saved, it would be in the
-  `created.uncommitted` state.  If a record has had local modifications
-  made to it that are in the process of being saved, the record would be
-  in the `updated.inFlight` state. (These state paths will be explained
-  in more detail below.)
-
-  Events are sent by the record or its store to the record's state manager.
-  How the state manager reacts to these events is dependent on which state
-  it is in. In some states, certain events will be invalid and will cause
-  an exception to be raised.
-
-  States are hierarchical. For example, a record can be in the
-  `deleted.start` state, then transition into the `deleted.inFlight` state.
-  If a child state does not implement an event handler, the state manager
-  will attempt to invoke the event on all parent states until the root state is
-  reached. The state hierarchy of a record is described in terms of a path
-  string. You can determine a record's current state by getting its manager's
-  current state path:
-
-        record.get('stateManager.currentState.path');
-        //=> "created.uncommitted"
-
-  The `DS.Model` states are themselves stateless. What we mean is that,
-  though each instance of a record also has a unique instance of a
-  `DS.StateManager`, the hierarchical states that each of *those* points
-  to is a shared data structure. For performance reasons, instead of each
-  record getting its own copy of the hierarchy of states, each state
-  manager points to this global, immutable shared instance. How does a
-  state know which record it should be acting on?  We pass a reference to
-  the current state manager as the first parameter to every method invoked
-  on a state.
-
-  The state manager passed as the first parameter is where you should stash
-  state about the record if needed; you should never store data on the state
-  object itself. If you need access to the record being acted on, you can
-  retrieve the state manager's `record` property. For example, if you had
-  an event handler `myEvent`:
-
-      myEvent: function(manager) {
-        var record = manager.get('record');
-        record.doSomething();
-      }
-
-  For more information about state managers in general, see the Ember.js
-  documentation on `Ember.StateManager`.
-
-  ### Events, Flags, and Transitions
-
-  A state may implement zero or more events, flags, or transitions.
-
-  #### Events
-
-  Events are named functions that are invoked when sent to a record. The
-  state manager will first look for a method with the given name on the
-  current state. If no method is found, it will search the current state's
-  parent, and then its grandparent, and so on until reaching the top of
-  the hierarchy. If the root is reached without an event handler being found,
-  an exception will be raised. This can be very helpful when debugging new
-  features.
-
-  Here's an example implementation of a state with a `myEvent` event handler:
-
-      aState: DS.State.create({
-        myEvent: function(manager, param) {
-          console.log("Received myEvent with "+param);
-        }
-      })
-
-  To trigger this event:
-
-      record.send('myEvent', 'foo');
-      //=> "Received myEvent with foo"
-
-  Note that an optional parameter can be sent to a record's `send()` method,
-  which will be passed as the second parameter to the event handler.
-
-  Events should transition to a different state if appropriate. This can be
-  done by calling the state manager's `goToState()` method with a path to the
-  desired state. The state manager will attempt to resolve the state path
-  relative to the current state. If no state is found at that path, it will
-  attempt to resolve it relative to the current state's parent, and then its
-  parent, and so on until the root is reached. For example, imagine a hierarchy
-  like this:
-
-      * created
-        * start <-- currentState
-        * inFlight
-      * updated
-        * inFlight
-
-  If we are currently in the `start` state, calling
-  `goToState('inFlight')` would transition to the `created.inFlight` state,
-  while calling `goToState('updated.inFlight')` would transition to
-  the `updated.inFlight` state.
-
-  Remember that *only events* should ever cause a state transition. You should
-  never call `goToState()` from outside a state's event handler. If you are
-  tempted to do so, create a new event and send that to the state manager.
-
-  #### Flags
-
-  Flags are Boolean values that can be used to introspect a record's current
-  state in a more user-friendly way than examining its state path. For example,
-  instead of doing this:
-
-      var statePath = record.get('stateManager.currentState.path');
-      if (statePath === 'created.inFlight') {
-        doSomething();
-      }
-
-  You can say:
-
-      if (record.get('isNew') && record.get('isSaving')) {
-        doSomething();
-      }
-
-  If your state does not set a value for a given flag, the value will
-  be inherited from its parent (or the first place in the state hierarchy
-  where it is defined).
-
-  The current set of flags are defined below. If you want to add a new flag,
-  in addition to the area below, you will also need to declare it in the
-  `DS.Model` class.
-
-  #### Transitions
-
-  Transitions are like event handlers but are called automatically upon
-  entering or exiting a state. To implement a transition, just call a method
-  either `enter` or `exit`:
-
-      myState: DS.State.create({
-        // Gets called automatically when entering
-        // this state.
-        enter: function(manager) {
-          console.log("Entered myState");
-        }
-      })
-
-   Note that enter and exit events are called once per transition. If the
-   current state changes, but changes to another child state of the parent,
-   the transition event on the parent will not be triggered.
-*/
-
-var stateProperty = Ember.computed(function(key) {
-  var parent = get(this, 'parentState');
-  if (parent) {
-    return get(parent, key);
-  }
-}).property();
-
-var isEmptyObject = function(object) {
-  for (var name in object) {
-    if (object.hasOwnProperty(name)) { return false; }
-  }
-
-  return true;
-};
-
-var hasDefinedProperties = function(object) {
-  for (var name in object) {
-    if (object.hasOwnProperty(name) && object[name]) { return true; }
-  }
-
-  return false;
-};
-
-DS.State = Ember.State.extend({
-  isLoaded: stateProperty,
-  isDirty: stateProperty,
-  isSaving: stateProperty,
-  isDeleted: stateProperty,
-  isError: stateProperty,
-  isNew: stateProperty,
-  isValid: stateProperty,
-  isPending: stateProperty,
-
-  // For states that are substates of a
-  // DirtyState (updated or created), it is
-  // useful to be able to determine which
-  // type of dirty state it is.
-  dirtyType: stateProperty
-});
-
-var setProperty = function(manager, context) {
-  var key = context.key, value = context.value;
-
-  var record = get(manager, 'record'),
-      data = get(record, 'data');
-
-  set(data, key, value);
-};
-
-var setAssociation = function(manager, context) {
-  var key = context.key, value = context.value;
-
-  var record = get(manager, 'record'),
-      data = get(record, 'data');
-
-  data.setAssociation(key, value);
-};
-
-var didChangeData = function(manager) {
-  var record = get(manager, 'record'),
-      data = get(record, 'data');
-
-  data._savedData = null;
-  record.notifyPropertyChange('data');
-};
-
-// The waitingOn event shares common functionality
-// between the different dirty states, but each is
-// treated slightly differently. This method is exposed
-// so that each implementation can invoke the common
-// behavior, and then implement the behavior specific
-// to the state.
-var waitingOn = function(manager, object) {
-  var record = get(manager, 'record'),
-      pendingQueue = get(record, 'pendingQueue'),
-      objectGuid = guidFor(object);
-
-  var observer = function() {
-    if (get(object, 'id')) {
-      manager.send('doneWaitingOn', object);
-      Ember.removeObserver(object, 'id', observer);
-    }
-  };
-
-  pendingQueue[objectGuid] = [object, observer];
-  Ember.addObserver(object, 'id', observer);
-};
-
-// Implementation notes:
-//
-// Each state has a boolean value for all of the following flags:
-//
-// * isLoaded: The record has a populated `data` property. When a
-//   record is loaded via `store.find`, `isLoaded` is false
-//   until the adapter sets it. When a record is created locally,
-//   its `isLoaded` property is always true.
-// * isDirty: The record has local changes that have not yet been
-//   saved by the adapter. This includes records that have been
-//   created (but not yet saved) or deleted.
-// * isSaving: The record's transaction has been committed, but
-//   the adapter has not yet acknowledged that the changes have
-//   been persisted to the backend.
-// * isDeleted: The record was marked for deletion. When `isDeleted`
-//   is true and `isDirty` is true, the record is deleted locally
-//   but the deletion was not yet persisted. When `isSaving` is
-//   true, the change is in-flight. When both `isDirty` and
-//   `isSaving` are false, the change has persisted.
-// * isError: The adapter reported that it was unable to save
-//   local changes to the backend. This may also result in the
-//   record having its `isValid` property become false if the
-//   adapter reported that server-side validations failed.
-// * isNew: The record was created on the client and the adapter
-//   did not yet report that it was successfully saved.
-// * isValid: No client-side validations have failed and the
-//   adapter did not report any server-side validation failures.
-// * isPending: A record `isPending` when it belongs to an
-//   association on another record and that record has not been
-//   saved. A record in this state cannot be saved because it
-//   lacks a "foreign key" that will be supplied by its parent
-//   association when the parent record has been created. When
-//   the adapter reports that the parent has saved, the
-//   `isPending` property on all children will become `false`
-//   and the transaction will try to commit the records.
-
-// This mixin is mixed into various uncommitted states. Make
-// sure to mix it in *after* the class definition, so its
-// super points to the class definition.
-var Uncommitted = Ember.Mixin.create({
-  setProperty: setProperty,
-  setAssociation: setAssociation
-});
-
-// These mixins are mixed into substates of the concrete
-// subclasses of DirtyState.
-
-var CreatedUncommitted = Ember.Mixin.create({
-  deleteRecord: function(manager) {
-    var record = get(manager, 'record');
-    this._super(manager);
-
-    record.withTransaction(function(t) {
-      t.recordBecameClean('created', record);
-    });
-    manager.goToState('deleted.saved');
-  }
-});
-
-var UpdatedUncommitted = Ember.Mixin.create({
-  deleteRecord: function(manager) {
-    this._super(manager);
-
-    var record = get(manager, 'record');
-
-    record.withTransaction(function(t) {
-      t.recordBecameClean('updated', record);
-    });
-
-    manager.goToState('deleted');
-  }
-});
-
-// The dirty state is a abstract state whose functionality is
-// shared between the `created` and `updated` states.
-//
-// The deleted state shares the `isDirty` flag with the
-// subclasses of `DirtyState`, but with a very different
-// implementation.
-var DirtyState = DS.State.extend({
-  initialState: 'uncommitted',
-
-  // FLAGS
-  isDirty: true,
-
-  // SUBSTATES
-
-  // When a record first becomes dirty, it is `uncommitted`.
-  // This means that there are local pending changes,
-  // but they have not yet begun to be saved.
-  uncommitted: DS.State.extend({
-    // TRANSITIONS
-    enter: function(manager) {
-      var dirtyType = get(this, 'dirtyType'),
-          record = get(manager, 'record');
-
-      record.withTransaction(function (t) {
-        t.recordBecameDirty(dirtyType, record);
-      });
-    },
-
-    // EVENTS
-    deleteRecord: Ember.K,
-
-    waitingOn: function(manager, object) {
-      waitingOn(manager, object);
-      manager.goToState('pending');
-    },
-
-    willCommit: function(manager) {
-      manager.goToState('inFlight');
-    },
-
-    becameInvalid: function(manager) {
-      var dirtyType = get(this, 'dirtyType'),
-          record = get(manager, 'record');
-
-      record.withTransaction(function (t) {
-        t.recordBecameInFlight(dirtyType, record);
-      });
-
-      manager.goToState('invalid');
-    },
-
-    rollback: function(manager) {
-      var record = get(manager, 'record'),
-          dirtyType = get(this, 'dirtyType'),
-          data = get(record, 'data');
-
-      data.rollback();
-
-      record.withTransaction(function(t) {
-        t.recordBecameClean(dirtyType, record);
-      });
-
-      manager.goToState('saved');
-    }
-  }, Uncommitted),
-
-  // Once a record has been handed off to the adapter to be
-  // saved, it is in the 'in flight' state. Changes to the
-  // record cannot be made during this window.
-  inFlight: DS.State.extend({
-    // FLAGS
-    isSaving: true,
-
-    // TRANSITIONS
-    enter: function(manager) {
-      var dirtyType = get(this, 'dirtyType'),
-          record = get(manager, 'record');
-
-      record.withTransaction(function (t) {
-        t.recordBecameInFlight(dirtyType, record);
-      });
-    },
-
-    // EVENTS
-    didCommit: function(manager) {
-      var dirtyType = get(this, 'dirtyType'),
-          record = get(manager, 'record');
-
-      record.withTransaction(function(t) {
-        t.recordBecameClean('inflight', record);
-      });
-
-      manager.goToState('saved');
-      manager.send('invokeLifecycleCallbacks', dirtyType);
-    },
-
-    becameInvalid: function(manager, errors) {
-      var record = get(manager, 'record');
-
-      set(record, 'errors', errors);
-
-      manager.goToState('invalid');
-      manager.send('invokeLifecycleCallbacks');
-    },
-
-    becameError: function(manager) {
-      manager.goToState('error');
-      manager.send('invokeLifecycleCallbacks');
-    },
-
-    didChangeData: didChangeData
-  }),
-
-  // If a record becomes associated with a newly created
-  // parent record, it will be `pending` until the parent
-  // record has successfully persisted. Once this happens,
-  // this record can use the parent's primary key as its
-  // foreign key.
-  //
-  // If the record's transaction had already started to
-  // commit, the record will transition to the `inFlight`
-  // state. If it had not, the record will transition to
-  // the `uncommitted` state.
-  pending: DS.State.extend({
-    initialState: 'uncommitted',
-
-    // FLAGS
-    isPending: true,
-
-    // SUBSTATES
-
-    // A pending record whose transaction has not yet
-    // started to commit is in this state.
-    uncommitted: DS.State.extend({
-      // EVENTS
-      deleteRecord: function(manager) {
-        var record = get(manager, 'record'),
-            pendingQueue = get(record, 'pendingQueue'),
-            tuple;
-
-        // since we are leaving the pending state, remove any
-        // observers we have registered on other records.
-        for (var prop in pendingQueue) {
-          if (!pendingQueue.hasOwnProperty(prop)) { continue; }
-
-          tuple = pendingQueue[prop];
-          Ember.removeObserver(tuple[0], 'id', tuple[1]);
-        }
-      },
-
-      willCommit: function(manager) {
-        manager.goToState('committing');
-      },
-
-      doneWaitingOn: function(manager, object) {
-        var record = get(manager, 'record'),
-            pendingQueue = get(record, 'pendingQueue'),
-            objectGuid = guidFor(object);
-
-        delete pendingQueue[objectGuid];
-
-        if (isEmptyObject(pendingQueue)) {
-          manager.send('doneWaiting');
-        }
-      },
-
-      doneWaiting: function(manager) {
-        var dirtyType = get(this, 'dirtyType');
-        manager.goToState(dirtyType + '.uncommitted');
-      }
-    }, Uncommitted),
-
-    // A pending record whose transaction has started
-    // to commit is in this state. Since it has not yet
-    // been sent to the adapter, it is not `inFlight`
-    // until all of its dependencies have been committed.
-    committing: DS.State.extend({
-      // FLAGS
-      isSaving: true,
-
-      // EVENTS
-      doneWaitingOn: function(manager, object) {
-        var record = get(manager, 'record'),
-            pendingQueue = get(record, 'pendingQueue'),
-            objectGuid = guidFor(object);
-
-        delete pendingQueue[objectGuid];
-
-        if (isEmptyObject(pendingQueue)) {
-          manager.send('doneWaiting');
-        }
-      },
-
-      doneWaiting: function(manager) {
-        var record = get(manager, 'record'),
-            transaction = get(record, 'transaction');
-
-        // Now that the record is no longer pending, schedule
-        // the transaction to commit.
-        Ember.run.once(transaction, transaction.commit);
-      },
-
-      willCommit: function(manager) {
-        var record = get(manager, 'record'),
-            pendingQueue = get(record, 'pendingQueue');
-
-        if (isEmptyObject(pendingQueue)) {
-          var dirtyType = get(this, 'dirtyType');
-          manager.goToState(dirtyType + '.inFlight');
-        }
-      }
-    })
-  }),
-
-  // A record is in the `invalid` state when its client-side
-  // invalidations have failed, or if the adapter has indicated
-  // the the record failed server-side invalidations.
-  invalid: DS.State.extend({
-    // FLAGS
-    isValid: false,
-
-    exit: function(manager) {
-      var record = get(manager, 'record');
-
-      record.withTransaction(function (t) {
-        t.recordBecameClean('inflight', record);
-      });
-    },
-
-    // EVENTS
-    deleteRecord: function(manager) {
-      manager.goToState('deleted');
-    },
-
-    setAssociation: setAssociation,
-
-    setProperty: function(manager, context) {
-      setProperty(manager, context);
-
-      var record = get(manager, 'record'),
-          errors = get(record, 'errors'),
-          key = context.key;
-
-      set(errors, key, null);
-
-      if (!hasDefinedProperties(errors)) {
-        manager.send('becameValid');
-      }
-    },
-
-    rollback: function(manager) {
-      manager.send('becameValid');
-      manager.send('rollback');
-    },
-
-    becameValid: function(manager) {
-      manager.goToState('uncommitted');
-    },
-
-    invokeLifecycleCallbacks: function(manager) {
-      var record = get(manager, 'record');
-      record.trigger('becameInvalid', record);
-    }
-  })
-});
-
-// The created and updated states are created outside the state
-// chart so we can reopen their substates and add mixins as
-// necessary.
-
-var createdState = DirtyState.create({
-  dirtyType: 'created',
-
-  // FLAGS
-  isNew: true
-});
-
-var updatedState = DirtyState.create({
-  dirtyType: 'updated'
-});
-
-// The created.uncommitted state and created.pending.uncommitted share
-// some logic defined in CreatedUncommitted.
-createdState.states.uncommitted.reopen(CreatedUncommitted);
-createdState.states.pending.states.uncommitted.reopen(CreatedUncommitted);
-
-// The created.uncommitted state needs to immediately transition to the
-// deleted state if it is rolled back.
-createdState.states.uncommitted.reopen({
-  rollback: function(manager) {
-    this._super(manager);
-    manager.goToState('deleted.saved');
-  }
-});
-
-// The updated.uncommitted state and updated.pending.uncommitted share
-// some logic defined in UpdatedUncommitted.
-updatedState.states.uncommitted.reopen(UpdatedUncommitted);
-updatedState.states.pending.states.uncommitted.reopen(UpdatedUncommitted);
-updatedState.states.inFlight.reopen({
-  didSaveData: function(manager) {
-    var record = get(manager, 'record'),
-        data = get(record, 'data');
-
-    data.saveData();
-    data.adapterDidUpdate();
-  }
-});
-
-var states = {
-  rootState: Ember.State.create({
-    // FLAGS
-    isLoaded: false,
-    isDirty: false,
-    isSaving: false,
-    isDeleted: false,
-    isError: false,
-    isNew: false,
-    isValid: true,
-    isPending: false,
-
-    // SUBSTATES
-
-    // A record begins its lifecycle in the `empty` state.
-    // If its data will come from the adapter, it will
-    // transition into the `loading` state. Otherwise, if
-    // the record is being created on the client, it will
-    // transition into the `created` state.
-    empty: DS.State.create({
-      // EVENTS
-      loadingData: function(manager) {
-        manager.goToState('loading');
-      },
-
-      didChangeData: function(manager) {
-        didChangeData(manager);
-
-        manager.goToState('loaded.created');
-      }
-    }),
-
-    // A record enters this state when the store askes
-    // the adapter for its data. It remains in this state
-    // until the adapter provides the requested data.
-    //
-    // Usually, this process is asynchronous, using an
-    // XHR to retrieve the data.
-    loading: DS.State.create({
-      // TRANSITIONS
-      exit: function(manager) {
-        var record = get(manager, 'record');
-        record.trigger('didLoad');
-      },
-
-      // EVENTS
-      didChangeData: function(manager, data) {
-        didChangeData(manager);
-        manager.send('loadedData');
-      },
-
-      loadedData: function(manager) {
-        manager.goToState('loaded');
-      }
-    }),
-
-    // A record enters this state when its data is populated.
-    // Most of a record's lifecycle is spent inside substates
-    // of the `loaded` state.
-    loaded: DS.State.create({
-      initialState: 'saved',
-
-      // FLAGS
-      isLoaded: true,
-
-      // SUBSTATES
-
-      // If there are no local changes to a record, it remains
-      // in the `saved` state.
-      saved: DS.State.create({
-
-        // EVENTS
-        setProperty: function(manager, context) {
-          setProperty(manager, context);
-          manager.goToState('updated');
-        },
-
-        setAssociation: function(manager, context) {
-          setAssociation(manager, context);
-          manager.goToState('updated');
-        },
-
-        didChangeData: didChangeData,
-
-        deleteRecord: function(manager) {
-          manager.goToState('deleted');
-        },
-
-        waitingOn: function(manager, object) {
-          waitingOn(manager, object);
-          manager.goToState('updated.pending');
-        },
-
-        invokeLifecycleCallbacks: function(manager, dirtyType) {
-          var record = get(manager, 'record');
-          if (dirtyType === 'created') {
-            record.trigger('didCreate', record);
-          } else {
-            record.trigger('didUpdate', record);
-          }
-        }
-      }),
-
-      // A record is in this state after it has been locally
-      // created but before the adapter has indicated that
-      // it has been saved.
-      created: createdState,
-
-      // A record is in this state if it has already been
-      // saved to the server, but there are new local changes
-      // that have not yet been saved.
-      updated: updatedState
-    }),
-
-    // A record is in this state if it was deleted from the store.
-    deleted: DS.State.create({
-      // FLAGS
-      isDeleted: true,
-      isLoaded: true,
-      isDirty: true,
-
-      // TRANSITIONS
-      enter: function(manager) {
-        var record = get(manager, 'record'),
-            store = get(record, 'store');
-
-        store.removeFromRecordArrays(record);
-      },
-
-      // SUBSTATES
-
-      // When a record is deleted, it enters the `start`
-      // state. It will exit this state when the record's
-      // transaction starts to commit.
-      start: DS.State.create({
-        // TRANSITIONS
-        enter: function(manager) {
-          var record = get(manager, 'record');
-
-          record.withTransaction(function(t) {
-            t.recordBecameDirty('deleted', record);
-          });
-        },
-
-        // EVENTS
-        willCommit: function(manager) {
-          manager.goToState('inFlight');
-        },
-
-        rollback: function(manager) {
-          var record = get(manager, 'record'),
-              data = get(record, 'data');
-
-          data.rollback();
-          record.withTransaction(function(t) {
-            t.recordBecameClean('deleted', record);
-          });
-          manager.goToState('loaded');
-        }
-      }),
-
-      // After a record's transaction is committing, but
-      // before the adapter indicates that the deletion
-      // has saved to the server, a record is in the
-      // `inFlight` substate of `deleted`.
-      inFlight: DS.State.create({
-        // FLAGS
-        isSaving: true,
-
-        // TRANSITIONS
-        enter: function(manager) {
-          var record = get(manager, 'record');
-
-          record.withTransaction(function (t) {
-            t.recordBecameInFlight('deleted', record);
-          });
-        },
-
-        // EVENTS
-        didCommit: function(manager) {
-          var record = get(manager, 'record');
-
-          record.withTransaction(function(t) {
-            t.recordBecameClean('inflight', record);
-          });
-
-          manager.goToState('saved');
-
-          manager.send('invokeLifecycleCallbacks');
-        }
-      }),
-
-      // Once the adapter indicates that the deletion has
-      // been saved, the record enters the `saved` substate
-      // of `deleted`.
-      saved: DS.State.create({
-        // FLAGS
-        isDirty: false,
-
-        invokeLifecycleCallbacks: function(manager) {
-          var record = get(manager, 'record');
-          record.trigger('didDelete', record);
-        }
-      })
-    }),
-
-    // If the adapter indicates that there was an unknown
-    // error saving a record, the record enters the `error`
-    // state.
-    error: DS.State.create({
-      isError: true,
-
-      // EVENTS
-
-      invokeLifecycleCallbacks: function(manager) {
-        var record = get(manager, 'record');
-        record.trigger('becameError', record);
-      }
-    })
-  })
-};
-
-DS.StateManager = Ember.StateManager.extend({
-  record: null,
-  initialState: 'rootState',
-  states: states
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set;
-
-//  When a record is changed on the client, it is considered "dirty"--there are
-//  pending changes that need to be saved to a persistence layer, such as a
-//  server.
-//
-//  If the record is rolled back, it re-enters a clean state, any changes are
-//  discarded, and its attributes are reset back to the last known good copy
-//  of the data that came from the server.
-//
-//  If the record is committed, the changes are sent to the server to be saved,
-//  and once the server confirms that they are valid, the record's "canonical"
-//  data becomes the original canonical data plus the changes merged in.
-//
-//  A DataProxy is an object that encapsulates this change tracking. It
-//  contains three buckets:
-//
-//  * `savedData` - the last-known copy of the data from the server
-//  * `unsavedData` - a hash that contains any changes that have not yet
-//     been committed
-//  * `associations` - this is similar to `savedData`, but holds the client
-//    ids of associated records
-//
-//  When setting a property on the object, the value is placed into the
-//  `unsavedData` bucket:
-//
-//      proxy.set('key', 'value');
-//
-//      // unsavedData:
-//      {
-//        key: "value"
-//      }
-//
-//  When retrieving a property from the object, it first looks to see
-//  if that value exists in the `unsavedData` bucket, and returns it if so.
-//  Otherwise, it returns the value from the `savedData` bucket.
-//
-//  When the adapter notifies a record that it has been saved, it merges the
-//  `unsavedData` bucket into the `savedData` bucket. If the record's
-//  transaction is rolled back, the `unsavedData` hash is simply discarded.
-//
-//  This object is a regular JS object for performance. It is only
-//  used internally for bookkeeping purposes.
-
-var DataProxy = DS._DataProxy = function(record) {
-  this.record = record;
-
-  this.unsavedData = {};
-
-  this.associations = {};
-};
-
-DataProxy.prototype = {
-  get: function(key) { return Ember.get(this, key); },
-  set: function(key, value) { return Ember.set(this, key, value); },
-
-  setAssociation: function(key, value) {
-    this.associations[key] = value;
-  },
-
-  savedData: function() {
-    var savedData = this._savedData;
-    if (savedData) { return savedData; }
-
-    var record = this.record,
-        clientId = get(record, 'clientId'),
-        store = get(record, 'store');
-
-    if (store) {
-      savedData = store.dataForRecord(record);
-      this._savedData = savedData;
-      return savedData;
-    }
-  },
-
-  unknownProperty: function(key) {
-    var unsavedData = this.unsavedData,
-        associations = this.associations,
-        savedData = this.savedData(),
-        store;
-
-    var value = unsavedData[key], association;
-
-    // if this is a belongsTo association, this will
-    // be a clientId.
-    association = associations[key];
-
-    if (association !== undefined) {
-      store = get(this.record, 'store');
-      return store.clientIdToId[association];
-    }
-
-    if (savedData && value === undefined) {
-      value = savedData[key];
-    }
-
-    return value;
-  },
-
-  setUnknownProperty: function(key, value) {
-    var record = this.record,
-        unsavedData = this.unsavedData;
-
-    unsavedData[key] = value;
-
-    record.hashWasUpdated();
-
-    return value;
-  },
-
-  commit: function() {
-    this.saveData();
-
-    this.record.notifyPropertyChange('data');
-  },
-
-  rollback: function() {
-    this.unsavedData = {};
-
-    this.record.notifyPropertyChange('data');
-  },
-
-  saveData: function() {
-    var record = this.record;
-
-    var unsavedData = this.unsavedData;
-    var savedData = this.savedData();
-
-    for (var prop in unsavedData) {
-      if (unsavedData.hasOwnProperty(prop)) {
-        savedData[prop] = unsavedData[prop];
-        delete unsavedData[prop];
-      }
-    }
-  },
-
-  adapterDidUpdate: function() {
-    this.unsavedData = {};
-  }
-};
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set, none = Ember.none;
-
-var retrieveFromCurrentState = Ember.computed(function(key) {
-  return get(get(this, 'stateManager.currentState'), key);
-}).property('stateManager.currentState').cacheable();
-
-DS.Model = Ember.Object.extend(Ember.Evented, {
-  isLoaded: retrieveFromCurrentState,
-  isDirty: retrieveFromCurrentState,
-  isSaving: retrieveFromCurrentState,
-  isDeleted: retrieveFromCurrentState,
-  isError: retrieveFromCurrentState,
-  isNew: retrieveFromCurrentState,
-  isPending: retrieveFromCurrentState,
-  isValid: retrieveFromCurrentState,
-
-  clientId: null,
-  transaction: null,
-  stateManager: null,
-  pendingQueue: null,
-  errors: null,
-
-  // because unknownProperty is used, any internal property
-  // must be initialized here.
-  primaryKey: 'id',
-  id: Ember.computed(function(key, value) {
-    var primaryKey = get(this, 'primaryKey'),
-        data = get(this, 'data');
-
-    if (arguments.length === 2) {
-      set(data, primaryKey, value);
-      return value;
-    }
-
-    var id = get(data, primaryKey);
-    return id ? id : this._id;
-  }).property('primaryKey', 'data'),
-
-  // The following methods are callbacks invoked by `toJSON`. You
-  // can override one of the callbacks to override specific behavior,
-  // or toJSON itself.
-  //
-  // If you override toJSON, you can invoke these callbacks manually
-  // to get the default behavior.
-
-  /**
-    Add the record's primary key to the JSON hash.
-
-    The default implementation uses the record's specified `primaryKey`
-    and the `id` computed property, which are passed in as parameters.
-
-    @param {Object} json the JSON hash being built
-    @param {Number|String} id the record's id
-    @param {String} key the primaryKey for the record
-  */
-  addIdToJSON: function(json, id, key) {
-    if (id) { json[key] = id; }
-  },
-
-  /**
-    Add the attributes' current values to the JSON hash.
-
-    The default implementation gets the current value of each
-    attribute from the `data`, and uses a `defaultValue` if
-    specified in the `DS.attr` definition.
-
-    @param {Object} json the JSON hash being build
-    @param {Ember.Map} attributes a Map of attributes
-    @param {DataProxy} data the record's data, accessed with `get` and `set`.
-  */
-  addAttributesToJSON: function(json, attributes, data) {
-    attributes.forEach(function(name, meta) {
-      var key = meta.key(this.constructor),
-          value = get(data, key);
-
-      if (value === undefined) {
-        value = meta.options.defaultValue;
-      }
-
-      json[key] = value;
-    }, this);
-  },
-
-  /**
-    Add the value of a `hasMany` association to the JSON hash.
-
-    The default implementation honors the `embedded` option
-    passed to `DS.hasMany`. If embedded, `toJSON` is recursively
-    called on the child records. If not, the `id` of each
-    record is added.
-
-    Note that if a record is not embedded and does not
-    yet have an `id` (usually provided by the server), it
-    will not be included in the output.
-
-    @param {Object} json the JSON hash being built
-    @param {DataProxy} data the record's data, accessed with `get` and `set`.
-    @param {Object} meta information about the association
-    @param {Object} options options passed to `toJSON`
-  */
-  addHasManyToJSON: function(json, data, meta, options) {
-    var key = meta.key,
-        manyArray = get(this, key),
-        records = [], i, l,
-        clientId, id;
-
-    if (meta.options.embedded) {
-      // TODO: Avoid materializing embedded hashes if possible
-      manyArray.forEach(function(record) {
-        records.push(record.toJSON(options));
-      });
-    } else {
-      var clientIds = get(manyArray, 'content');
-
-      for (i=0, l=clientIds.length; i<l; i++) {
-        clientId = clientIds[i];
-        id = get(this, 'store').clientIdToId[clientId];
-
-        if (id !== undefined) {
-          records.push(id);
-        }
-      }
-    }
-
-    key = meta.options.key || get(this, 'namingConvention').keyToJSONKey(key);
-    json[key] = records;
-  },
-
-  /**
-    Add the value of a `belongsTo` association to the JSON hash.
-
-    The default implementation always includes the `id`.
-
-    @param {Object} json the JSON hash being built
-    @param {DataProxy} data the record's data, accessed with `get` and `set`.
-    @param {Object} meta information about the association
-    @param {Object} options options passed to `toJSON`
-  */
-  addBelongsToToJSON: function(json, data, meta, options) {
-    var key = meta.key, value, id;
-
-    if (meta.options.embedded) {
-      key = meta.options.key || get(this, 'namingConvention').keyToJSONKey(key);
-      value = get(data.record, key);
-      json[key] = value ? value.toJSON(options) : null;
-    } else {
-      key = meta.options.key || get(this, 'namingConvention').foreignKey(key);
-      id = data.get(key);
-      json[key] = none(id) ? null : id;
-    }
-  },
-  /**
-    Create a JSON representation of the record, including its `id`,
-    attributes and associations. Honor any settings defined on the
-    attributes or associations (such as `embedded` or `key`).
-  */
-  toJSON: function(options) {
-    var data = get(this, 'data'),
-        result = {},
-        type = this.constructor,
-        attributes = get(type, 'attributes'),
-        primaryKey = get(this, 'primaryKey'),
-        id = get(this, 'id'),
-        store = get(this, 'store'),
-        associations;
-
-    options = options || {};
-
-    // delegate to `addIdToJSON` callback
-    this.addIdToJSON(result, id, primaryKey);
-
-    // delegate to `addAttributesToJSON` callback
-    this.addAttributesToJSON(result, attributes, data);
-
-    associations = get(type, 'associationsByName');
-
-    // add associations, delegating to `addHasManyToJSON` and
-    // `addBelongsToToJSON`.
-    associations.forEach(function(key, meta) {
-      if (options.associations && meta.kind === 'hasMany') {
-        this.addHasManyToJSON(result, data, meta, options);
-      } else if (meta.kind === 'belongsTo') {
-        this.addBelongsToToJSON(result, data, meta, options);
-      }
-    }, this);
-
-    return result;
-  },
-
-  data: Ember.computed(function() {
-    return new DS._DataProxy(this);
-  }).cacheable(),
-
-  didLoad: Ember.K,
-  didUpdate: Ember.K,
-  didCreate: Ember.K,
-  didDelete: Ember.K,
-  becameInvalid: Ember.K,
-  becameError: Ember.K,
-
-  init: function() {
-    var stateManager = DS.StateManager.create({
-      record: this
-    });
-
-    set(this, 'pendingQueue', {});
-
-    set(this, 'stateManager', stateManager);
-    stateManager.goToState('empty');
-  },
-
-  destroy: function() {
-    if (!get(this, 'isDeleted')) {
-      this.deleteRecord();
-    }
-    this._super();
-  },
-
-  send: function(name, context) {
-    return get(this, 'stateManager').send(name, context);
-  },
-
-  withTransaction: function(fn) {
-    var transaction = get(this, 'transaction');
-    if (transaction) { fn(transaction); }
-  },
-
-  setProperty: function(key, value) {
-    this.send('setProperty', { key: key, value: value });
-  },
-
-  deleteRecord: function() {
-    this.send('deleteRecord');
-  },
-
-  waitingOn: function(record) {
-    this.send('waitingOn', record);
-  },
-
-  notifyHashWasUpdated: function() {
-    var store = get(this, 'store');
-    if (store) {
-      store.hashWasUpdated(this.constructor, get(this, 'clientId'), this);
-    }
-  },
-
-  unknownProperty: function(key) {
-    var data = get(this, 'data');
-
-    if (data && key in data) {
-      Ember.assert("You attempted to access the " + key + " property on a record without defining an attribute.", false);
-    }
-  },
-
-  setUnknownProperty: function(key, value) {
-    var data = get(this, 'data');
-
-    if (data && key in data) {
-      Ember.assert("You attempted to set the " + key + " property on a record without defining an attribute.", false);
-    } else {
-      return this._super(key, value);
-    }
-  },
-
-  namingConvention: {
-    keyToJSONKey: function(key) {
-      // TODO: Strip off `is` from the front. Example: `isHipster` becomes `hipster`
-      return Ember.String.decamelize(key);
-    },
-
-    foreignKey: function(key) {
-      return Ember.String.decamelize(key) + '_id';
-    }
-  },
-
-  /** @private */
-  hashWasUpdated: function() {
-    // At the end of the run loop, notify record arrays that
-    // this record has changed so they can re-evaluate its contents
-    // to determine membership.
-    Ember.run.once(this, this.notifyHashWasUpdated);
-  },
-
-  dataDidChange: Ember.observer(function() {
-    var associations = get(this.constructor, 'associationsByName'),
-        data = get(this, 'data'), store = get(this, 'store'),
-        idToClientId = store.idToClientId,
-        cachedValue;
-
-    associations.forEach(function(name, association) {
-      if (association.kind === 'hasMany') {
-        cachedValue = this.cacheFor(name);
-
-        if (cachedValue) {
-          var key = association.options.key || get(this, 'namingConvention').keyToJSONKey(name),
-              ids = data.get(key) || [];
-          
-          var clientIds;   
-          if(association.options.embedded) {
-            clientIds = store.loadMany(association.type, ids).clientIds;
-          } else {
-            clientIds = Ember.EnumerableUtils.map(ids, function(id) {
-              return store.clientIdForId(association.type, id);
-            });
-          }
-          
-          set(cachedValue, 'content', Ember.A(clientIds));
-          cachedValue.fetch();
-        }
-      }
-    }, this);
-  }, 'data'),
-
-  /**
-    @private
-
-    Override the default event firing from Ember.Evented to
-    also call methods with the given name.
-  */
-  trigger: function(name) {
-    Ember.tryInvoke(this, name, [].slice.call(arguments, 1));
-    this._super.apply(this, arguments);
-  }
-});
-
-// Helper function to generate store aliases.
-// This returns a function that invokes the named alias
-// on the default store, but injects the class as the
-// first parameter.
-var storeAlias = function(methodName) {
-  return function() {
-    var store = get(DS, 'defaultStore'),
-        args = [].slice.call(arguments);
-
-    args.unshift(this);
-    return store[methodName].apply(store, args);
-  };
-};
-
-DS.Model.reopenClass({
-  isLoaded: storeAlias('recordIsLoaded'),
-  find: storeAlias('find'),
-  filter: storeAlias('filter'),
-
-  _create: DS.Model.create,
-
-  create: function() {
-    throw new Ember.Error("You should not call `create` on a model. Instead, call `createRecord` with the attributes you would like to set.");
-  },
-
-  createRecord: storeAlias('createRecord')
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get;
-DS.Model.reopenClass({
-  attributes: Ember.computed(function() {
-    var map = Ember.Map.create();
-
-    this.eachComputedProperty(function(name, meta) {
-      if (meta.isAttribute) { map.set(name, meta); }
-    });
-
-    return map;
-  }).cacheable(),
-
-  processAttributeKeys: function() {
-    if (this.processedAttributeKeys) { return; }
-
-    var namingConvention = this.proto().namingConvention;
-
-    this.eachComputedProperty(function(name, meta) {
-      if (meta.isAttribute && !meta.options.key) {
-        meta.options.key = namingConvention.keyToJSONKey(name, this);
-      }
-    }, this);
-  }
-});
-
-function getAttr(record, options, key) {
-  var data = get(record, 'data');
-  var value = get(data, key);
-
-  if (value === undefined) {
-    value = options.defaultValue;
-  }
-
-  return value;
-}
-
-DS.attr = function(type, options) {
-  var transform = DS.attr.transforms[type];
-  Ember.assert("Could not find model attribute of type " + type, !!transform);
-
-  var transformFrom = transform.from;
-  var transformTo = transform.to;
-
-  options = options || {};
-
-  var meta = {
-    type: type,
-    isAttribute: true,
-    options: options,
-
-    // this will ensure that the key always takes naming
-    // conventions into consideration.
-    key: function(recordType) {
-      recordType.processAttributeKeys();
-      return options.key;
-    }
-  };
-
-  return Ember.computed(function(key, value) {
-    var data;
-
-    key = meta.key(this.constructor);
-
-    if (arguments.length === 2) {
-      value = transformTo(value);
-
-      if (value !== getAttr(this, options, key)) {
-        this.setProperty(key, value);
-      }
-    } else {
-      value = getAttr(this, options, key);
-    }
-
-    return transformFrom(value);
-  // `data` is never set directly. However, it may be
-  // invalidated from the state manager's setData
-  // event.
-  }).property('data').cacheable().meta(meta);
-};
-
-DS.attr.transforms = {
-  string: {
-    from: function(serialized) {
-      return Ember.none(serialized) ? null : String(serialized);
-    },
-
-    to: function(deserialized) {
-      return Ember.none(deserialized) ? null : String(deserialized);
-    }
-  },
-
-  number: {
-    from: function(serialized) {
-      return Ember.none(serialized) ? null : Number(serialized);
-    },
-
-    to: function(deserialized) {
-      return Ember.none(deserialized) ? null : Number(deserialized);
-    }
-  },
-
-  'boolean': {
-    from: function(serialized) {
-      return Boolean(serialized);
-    },
-
-    to: function(deserialized) {
-      return Boolean(deserialized);
-    }
-  },
-
-  date: {
-    from: function(serialized) {
-      var type = typeof serialized;
-
-      if (type === "string" || type === "number") {
-        return new Date(serialized);
-      } else if (serialized === null || serialized === undefined) {
-        // if the value is not present in the data,
-        // return undefined, not null.
-        return serialized;
-      } else {
-        return null;
-      }
-    },
-
-    to: function(date) {
-      if (date instanceof Date) {
-        var days = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"];
-        var months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"];
-
-        var pad = function(num) {
-          return num < 10 ? "0"+num : ""+num;
-        };
-
-        var utcYear = date.getUTCFullYear(),
-            utcMonth = date.getUTCMonth(),
-            utcDayOfMonth = date.getUTCDate(),
-            utcDay = date.getUTCDay(),
-            utcHours = date.getUTCHours(),
-            utcMinutes = date.getUTCMinutes(),
-            utcSeconds = date.getUTCSeconds();
-
-
-        var dayOfWeek = days[utcDay];
-        var dayOfMonth = pad(utcDayOfMonth);
-        var month = months[utcMonth];
-
-        return dayOfWeek + ", " + dayOfMonth + " " + month + " " + utcYear + " " +
-               pad(utcHours) + ":" + pad(utcMinutes) + ":" + pad(utcSeconds) + " GMT";
-      } else if (date === undefined) {
-        return undefined;
-      } else {
-        return null;
-      }
-    }
-  }
-};
-
-
-})();
-
-
-
-(function() {
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set,
-    none = Ember.none;
-
-var embeddedFindRecord = function(store, type, data, key, one) {
-  var association = get(data, key);
-  return none(association) ? undefined : store.load(type, association).id;
-};
-
-var referencedFindRecord = function(store, type, data, key, one) {
-  return get(data, key);
-};
-
-var hasAssociation = function(type, options, one) {
-  options = options || {};
-
-  var embedded = options.embedded,
-      findRecord = embedded ? embeddedFindRecord : referencedFindRecord;
-
-  var meta = { type: type, isAssociation: true, options: options, kind: 'belongsTo' };
-
-  return Ember.computed(function(key, value) {
-    var data = get(this, 'data'), ids, id, association,
-        store = get(this, 'store');
-
-    if (typeof type === 'string') {
-      type = get(this, type, false) || get(window, type);
-    }
-
-    if (arguments.length === 2) {
-      key = options.key || get(this, 'namingConvention').foreignKey(key);
-      this.send('setAssociation', { key: key, value: Ember.none(value) ? null : get(value, 'clientId') });
-      //data.setAssociation(key, get(value, 'clientId'));
-      // put the client id in `key` in the data hash
-      return value;
-    } else {
-      // Embedded belongsTo associations should not look for
-      // a foreign key.
-      if (embedded) {
-        key = options.key || get(this, 'namingConvention').keyToJSONKey(key);
-
-      // Non-embedded associations should look for a foreign key.
-      // For example, instead of person, we might look for person_id
-      } else {
-        key = options.key || get(this, 'namingConvention').foreignKey(key);
-      }
-      id = findRecord(store, type, data, key, true);
-      association = id ? store.find(type, id) : null;
-    }
-
-    return association;
-  }).property('data').cacheable().meta(meta);
-};
-
-DS.belongsTo = function(type, options) {
-  Ember.assert("The type passed to DS.belongsTo must be defined", !!type);
-  return hasAssociation(type, options);
-};
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set;
-var embeddedFindRecord = function(store, type, data, key) {
-  var association = get(data, key);
-  return association ? store.loadMany(type, association).ids : [];
-};
-
-var referencedFindRecord = function(store, type, data, key, one) {
-  return get(data, key);
-};
-
-var hasAssociation = function(type, options) {
-  options = options || {};
-
-  var embedded = options.embedded,
-      findRecord = embedded ? embeddedFindRecord : referencedFindRecord;
-
-  var meta = { type: type, isAssociation: true, options: options, kind: 'hasMany' };
-
-  return Ember.computed(function(key, value) {
-    var data = get(this, 'data'),
-        store = get(this, 'store'),
-        ids, id, association;
-
-    if (typeof type === 'string') {
-      type = get(this, type, false) || get(window, type);
-    }
-
-    key = options.key || get(this, 'namingConvention').keyToJSONKey(key);
-    ids = findRecord(store, type, data, key);
-    association = store.findMany(type, ids || []);
-    set(association, 'parentRecord', this);
-
-    return association;
-  }).property().cacheable().meta(meta);
-};
-
-DS.hasMany = function(type, options) {
-  Ember.assert("The type passed to DS.hasMany must be defined", !!type);
-  return hasAssociation(type, options);
-};
-
-})();
-
-
-
-(function() {
-var get = Ember.get;
-
-DS.Model.reopenClass({
-  typeForAssociation: function(name) {
-    var association = get(this, 'associationsByName').get(name);
-    return association && association.type;
-  },
-
-  associations: Ember.computed(function() {
-    var map = Ember.Map.create();
-
-    this.eachComputedProperty(function(name, meta) {
-      if (meta.isAssociation) {
-        var type = meta.type,
-            typeList = map.get(type);
-
-        if (typeof type === 'string') {
-          type = get(this, type, false) || get(window, type);
-          meta.type = type;
-        }
-
-        if (!typeList) {
-          typeList = [];
-          map.set(type, typeList);
-        }
-
-        typeList.push({ name: name, kind: meta.kind });
-      }
-    });
-
-    return map;
-  }).cacheable(),
-
-  associationsByName: Ember.computed(function() {
-    var map = Ember.Map.create(), type;
-
-    this.eachComputedProperty(function(name, meta) {
-      if (meta.isAssociation) {
-        meta.key = name;
-        type = meta.type;
-
-        if (typeof type === 'string') {
-          type = get(this, type, false) || get(window, type);
-          meta.type = type;
-        }
-
-        map.set(name, meta);
-      }
-    });
-
-    return map;
-  }).cacheable()
-});
-
-})();
-
-
-
-(function() {
-
-})();
-
-
-
-(function() {
-/**
-  An adapter is an object that receives requests from a store and
-  translates them into the appropriate action to take against your
-  persistence layer. The persistence layer is usually an HTTP API, but may
-  be anything, such as the browser's local storage.
-
-  ### Creating an Adapter
-
-  First, create a new subclass of `DS.Adapter`:
-
-      App.MyAdapter = DS.Adapter.extend({
-        // ...your code here
-      });
-
-  To tell your store which adapter to use, set its `adapter` property:
-
-      App.store = DS.Store.create({
-        revision: 3,
-        adapter: App.MyAdapter.create()
-      });
-
-  `DS.Adapter` is an abstract base class that you should override in your
-  application to customize it for your backend. The minimum set of methods
-  that you should implement is:
-
-    * `find()`
-    * `createRecord()`
-    * `updateRecord()`
-    * `deleteRecord()`
-
-   To improve the network performance of your application, you can optimize
-   your adapter by overriding these lower-level methods:
-
-    * `findMany()`
-    * `createRecords()`
-    * `updateRecords()`
-    * `deleteRecords()`
-    * `commit()`
-
-   For more information about the adapter API, please see `README.md`.
-*/
-
-DS.Adapter = Ember.Object.extend({
-  /**
-    The `find()` method is invoked when the store is asked for a record that
-    has not previously been loaded. In response to `find()` being called, you
-    should query your persistence layer for a record with the given ID. Once
-    found, you can asynchronously call the store's `load()` method to load
-    the record.
-
-    Here is an example `find` implementation:
-
-      find: function(store, type, id) {
-        var url = type.url;
-        url = url.fmt(id);
-
-        jQuery.getJSON(url, function(data) {
-            // data is a Hash of key/value pairs. If your server returns a
-            // root, simply do something like:
-            // store.load(type, id, data.person)
-            store.load(type, id, data);
-        });
-      }
-  */
-  find: null,
-
-  /**
-    If the globally unique IDs for your records should be generated on the client,
-    implement the `generateIdForRecord()` method. This method will be invoked
-    each time you create a new record, and the value returned from it will be
-    assigned to the record's `primaryKey`.
-
-    Most traditional REST-like HTTP APIs will not use this method. Instead, the ID
-    of the record will be set by the server, and your adapter will update the store
-    with the new ID when it calls `didCreateRecord()`. Only implement this method if
-    you intend to generate record IDs on the client-side.
-
-    The `generateIdForRecord()` method will be invoked with the requesting store as
-    the first parameter and the newly created record as the second parameter:
-
-        generateIdForRecord: function(store, record) {
-          var uuid = App.generateUUIDWithStatisticallyLowOddsOfCollision();
-          return uuid;
-        }
-  */
-  generateIdForRecord: null,
-
-  commit: function(store, commitDetails) {
-    commitDetails.updated.eachType(function(type, array) {
-      this.updateRecords(store, type, array.slice());
-    }, this);
-
-    commitDetails.created.eachType(function(type, array) {
-      this.createRecords(store, type, array.slice());
-    }, this);
-
-    commitDetails.deleted.eachType(function(type, array) {
-      this.deleteRecords(store, type, array.slice());
-    }, this);
-  },
-
-  createRecords: function(store, type, records) {
-    records.forEach(function(record) {
-      this.createRecord(store, type, record);
-    }, this);
-  },
-
-  updateRecords: function(store, type, records) {
-    records.forEach(function(record) {
-      this.updateRecord(store, type, record);
-    }, this);
-  },
-
-  deleteRecords: function(store, type, records) {
-    records.forEach(function(record) {
-      this.deleteRecord(store, type, record);
-    }, this);
-  },
-
-  findMany: function(store, type, ids) {
-    ids.forEach(function(id) {
-      this.find(store, type, id);
-    }, this);
-  }
-});
-
-})();
-
-
-
-(function() {
-var set = Ember.set;
-
-Ember.onLoad('application', function(app) {
-  app.registerInjection({
-    name: "store",
-    before: "controllers",
-
-    injection: function(app, stateManager, property) {
-      if (property === 'Store') {
-        set(stateManager, 'store', app[property].create());
-      }
-    }
-  });
-
-  app.registerInjection({
-    name: "giveStoreToControllers",
-
-    injection: function(app, stateManager, property) {
-      if (property.match(/Controller$/)) {
-        var controllerName = property.charAt(0).toLowerCase() + property.substr(1);
-        var store = stateManager.get('store');
-        var controller = stateManager.get(controllerName);
-
-        controller.set('store', store);
-      }
-    }
-  });
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get;
-
-DS.FixtureAdapter = DS.Adapter.extend({
-
-  simulateRemoteResponse: true,
-
-  latency: 50,
-
-  /*
-    Implement this method in order to provide data associated with a type
-  */
-  fixturesForType: function(type) {
-    return type.FIXTURES ? Ember.A(type.FIXTURES) : null;
-  },
-
-  /*
-    Implement this method in order to query fixtures data
-  */
-  queryFixtures: function(fixtures, query) {
-    return fixtures;
-  },
-
-  /*
-    Implement this method in order to provide provide json for CRUD methods
-  */
-  mockJSON: function(type, record) {
-    return record.toJSON({associations: true});
-  },
-
-  /*
-    Adapter methods
-  */
-  generateIdForRecord: function(store, record) {
-    return Ember.guidFor(record);
-  },
-
-  find: function(store, type, id) {
-    var fixtures = this.fixturesForType(type);
-
-    Ember.assert("Unable to find fixtures for model type "+type.toString(), !!fixtures);
-
-    if (fixtures) {
-      fixtures = fixtures.findProperty('id', id);
-    }
-
-    if (fixtures) {
-      this.simulateRemoteCall(function() {
-        store.load(type, fixtures);
-      }, store, type);
-    }
-  },
-
-  findMany: function(store, type, ids) {
-    var fixtures = this.fixturesForType(type);
-
-    Ember.assert("Unable to find fixtures for model type "+type.toString(), !!fixtures);
-
-    if (fixtures) {
-      fixtures = fixtures.filter(function(item) {
-        return ids.indexOf(item.id) !== -1;
-      });
-    }
-  
-    if (fixtures) {
-      this.simulateRemoteCall(function() {
-        store.loadMany(type, fixtures);
-      }, store, type);
-    }
-  },
-
-  findAll: function(store, type) {
-    var fixtures = this.fixturesForType(type);
-
-    Ember.assert("Unable to find fixtures for model type "+type.toString(), !!fixtures);
-
-    this.simulateRemoteCall(function() {
-      store.loadMany(type, fixtures);
-    }, store, type);
-  },
-
-  findQuery: function(store, type, query, array) {
-    var fixtures = this.fixturesForType(type);
-    
-    Ember.assert("Unable to find fixtures for model type "+type.toString(), !!fixtures);
-
-    fixtures = this.queryFixtures(fixtures, query);
-
-    if (fixtures) {
-      this.simulateRemoteCall(function() {
-        array.load(fixtures);
-      }, store, type);
-    }
-  },
-
-  createRecord: function(store, type, record) {
-    var fixture = this.mockJSON(type, record);
-
-    fixture.id = this.generateIdForRecord(store, record);
-
-    this.simulateRemoteCall(function() {
-      store.didCreateRecord(record, fixture);
-    }, store, type, record);
-  },
-
-  updateRecord: function(store, type, record) {
-    var fixture = this.mockJSON(type, record);
-
-    this.simulateRemoteCall(function() {
-      store.didUpdateRecord(record, fixture);
-    }, store, type, record);
-  },
-
-  deleteRecord: function(store, type, record) {
-    this.simulateRemoteCall(function() {
-      store.didDeleteRecord(record);
-    }, store, type, record);
-  },
-
-  /*
-    @private
-  */
-  simulateRemoteCall: function(callback, store, type, record) {
-    if (get(this, 'simulateRemoteResponse')) {
-      setTimeout(callback, get(this, 'latency'));
-    } else {
-      callback();
-    }
-  }
-});
-
-DS.fixtureAdapter = DS.FixtureAdapter.create();
-
-})();
-
-
-
-(function() {
-/*global jQuery*/
-
-var get = Ember.get, set = Ember.set;
-
-DS.RESTAdapter = DS.Adapter.extend({
-  bulkCommit: false,
-	
-  createRecord: function(store, type, record) {
-    var root = this.rootForType(type);
-
-    var data = {};
-    data[root] = record.toJSON();
-
-    this.ajax(this.buildURL(root), "POST", {
-      data: data,
-      context: this,
-      success: function(json) {
-        this.didCreateRecord(store, type, record, json);
-      }
-    });
-  },
-
-  didCreateRecord: function(store, type, record, json) {
-    var root = this.rootForType(type);
-
-    this.sideload(store, type, json, root);
-    store.didCreateRecord(record, json[root]);
-  },
-
-  createRecords: function(store, type, records) {
-    if (get(this, 'bulkCommit') === false) {
-      return this._super(store, type, records);
-    }
-
-    var root = this.rootForType(type),
-        plural = this.pluralize(root);
-
-    var data = {};
-    data[plural] = records.map(function(record) {
-      return record.toJSON();
-    });
-
-    this.ajax(this.buildURL(root), "POST", {
-      data: data,
-      context: this,
-      success: function(json) {
-        this.didCreateRecords(store, type, records, json);
-      }
-    });
-  },
-
-  didCreateRecords: function(store, type, records, json) {
-    var root = this.pluralize(this.rootForType(type));
-
-    this.sideload(store, type, json, root);
-    store.didCreateRecords(type, records, json[root]);
-  },
-
-  updateRecord: function(store, type, record) {
-    var id = get(record, 'id');
-    var root = this.rootForType(type);
-
-    var data = {};
-    data[root] = record.toJSON();
-
-    this.ajax(this.buildURL(root, id), "PUT", {
-      data: data,
-      context: this,
-      success: function(json) {
-        this.didUpdateRecord(store, type, record, json);
-      }
-    });
-  },
-
-  didUpdateRecord: function(store, type, record, json) {
-    var root = this.rootForType(type);
-
-    this.sideload(store, type, json, root);
-    store.didUpdateRecord(record, json && json[root]);
-  },
-
-  updateRecords: function(store, type, records) {
-    if (get(this, 'bulkCommit') === false) {
-      return this._super(store, type, records);
-    }
-
-    var root = this.rootForType(type),
-        plural = this.pluralize(root);
-
-    var data = {};
-    data[plural] = records.map(function(record) {
-      return record.toJSON();
-    });
-
-    this.ajax(this.buildURL(root, "bulk"), "PUT", {
-      data: data,
-      context: this,
-      success: function(json) {
-        this.didUpdateRecords(store, type, records, json);
-      }
-    });
-  },
-
-  didUpdateRecords: function(store, type, records, json) {
-    var root = this.pluralize(this.rootForType(type));
-
-    this.sideload(store, type, json, root);
-    store.didUpdateRecords(records, json[root]);
-  },
-
-  deleteRecord: function(store, type, record) {
-    var id = get(record, 'id');
-    var root = this.rootForType(type);
-
-    this.ajax(this.buildURL(root, id), "DELETE", {
-      context: this,
-      success: function(json) {
-        this.didDeleteRecord(store, type, record, json);
-      }
-    });
-  },
-
-  didDeleteRecord: function(store, type, record, json) {
-    if (json) { this.sideload(store, type, json); }
-    store.didDeleteRecord(record);
-  },
-
-  deleteRecords: function(store, type, records) {
-    if (get(this, 'bulkCommit') === false) {
-      return this._super(store, type, records);
-    }
-
-    var root = this.rootForType(type),
-        plural = this.pluralize(root);
-
-    var data = {};
-    data[plural] = records.map(function(record) {
-      return get(record, 'id');
-    });
-
-    this.ajax(this.buildURL(root, 'bulk'), "DELETE", {
-      data: data,
-      context: this,
-      success: function(json) {
-        this.didDeleteRecords(store, type, records, json);
-      }
-    });
-  },
-
-  didDeleteRecords: function(store, type, records, json) {
-    if (json) { this.sideload(store, type, json); }
-    store.didDeleteRecords(records);
-  },
-
-  find: function(store, type, id) {
-    var root = this.rootForType(type);
-
-    this.ajax(this.buildURL(root, id), "GET", {
-      success: function(json) {
-        this.sideload(store, type, json, root);
-        store.load(type, json[root]);
-      }
-    });
-  },
-
-  findMany: function(store, type, ids) {
-    var root = this.rootForType(type), plural = this.pluralize(root);
-
-    this.ajax(this.buildURL(root), "GET", {
-      data: { ids: ids },
-      success: function(json) {
-        this.sideload(store, type, json, plural);
-        store.loadMany(type, json[plural]);
-      }
-    });
-  },
-
-  findAll: function(store, type) {
-    var root = this.rootForType(type), plural = this.pluralize(root);
-
-    this.ajax(this.buildURL(root), "GET", {
-      success: function(json) {
-        this.sideload(store, type, json, plural);
-        store.loadMany(type, json[plural]);
-      }
-    });
-  },
-
-  findQuery: function(store, type, query, recordArray) {
-    var root = this.rootForType(type), plural = this.pluralize(root);
-
-    this.ajax(this.buildURL(root), "GET", {
-      data: query,
-      success: function(json) {
-        this.sideload(store, type, json, plural);
-        recordArray.load(json[plural]);
-      }
-    });
-  },
-
-  // HELPERS
-
-  plurals: {},
-
-  // define a plurals hash in your subclass to define
-  // special-case pluralization
-  pluralize: function(name) {
-    return this.plurals[name] || name + "s";
-  },
-
-  rootForType: function(type) {
-    if (type.url) { return type.url; }
-
-    // use the last part of the name as the URL
-    var parts = type.toString().split(".");
-    var name = parts[parts.length - 1];
-    return name.replace(/([A-Z])/g, '_$1').toLowerCase().slice(1);
-  },
-
-  ajax: function(url, type, hash) {
-    hash.url = url;
-    hash.type = type;
-    hash.dataType = 'json';
-    hash.contentType = 'application/json; charset=utf-8';
-    hash.context = this;
-
-    if (hash.data && type !== 'GET') {
-      hash.data = JSON.stringify(hash.data);
-    }
-
-    jQuery.ajax(hash);
-  },
-
-  sideload: function(store, type, json, root) {
-    var sideloadedType, mappings, loaded = {};
-
-    loaded[root] = true;
-
-    for (var prop in json) {
-      if (!json.hasOwnProperty(prop)) { continue; }
-      if (prop === root) { continue; }
-
-      sideloadedType = type.typeForAssociation(prop);
-
-      if (!sideloadedType) {
-        mappings = get(this, 'mappings');
-        Ember.assert("Your server returned a hash with the key " + prop + " but you have no mappings", !!mappings);
-
-        sideloadedType = get(mappings, prop);
-
-        if (typeof sideloadedType === 'string') {
-          sideloadedType = get(window, sideloadedType);
-        }
-
-        Ember.assert("Your server returned a hash with the key " + prop + " but you have no mapping for it", !!sideloadedType);
-      }
-
-      this.sideloadAssociations(store, sideloadedType, json, prop, loaded);
-    }
-  },
-
-  sideloadAssociations: function(store, type, json, prop, loaded) {
-    loaded[prop] = true;
-
-    get(type, 'associationsByName').forEach(function(key, meta) {
-      key = meta.key || key;
-      if (meta.kind === 'belongsTo') {
-        key = this.pluralize(key);
-      }
-      if (json[key] && !loaded[key]) {
-        this.sideloadAssociations(store, meta.type, json, key, loaded);
-      }
-    }, this);
-
-    this.loadValue(store, type, json[prop]);
-  },
-
-  loadValue: function(store, type, value) {
-    if (value instanceof Array) {
-      store.loadMany(type, value);
-    } else {
-      store.load(type, value);
-    }
-  },
-
-  buildURL: function(record, suffix) {
-    var url = [""];
-
-    Ember.assert("Namespace URL (" + this.namespace + ") must not start with slash", !this.namespace || this.namespace.toString().charAt(0) !== "/");
-    Ember.assert("Record URL (" + record + ") must not start with slash", !record || record.toString().charAt(0) !== "/");
-    Ember.assert("URL suffix (" + suffix + ") must not start with slash", !suffix || suffix.toString().charAt(0) !== "/");
-
-    if (this.namespace !== undefined) {
-      url.push(this.namespace);
-    }
-
-    url.push(this.pluralize(record));
-    if (suffix !== undefined) {
-      url.push(suffix);
-    }
-
-    return url.join("/");
-  }
-});
-
-
-})();
-
-
-
-(function() {
-//Copyright (C) 2011 by Living Social, Inc.
-
-//Permission is hereby granted, free of charge, to any person obtaining a copy of
-//this software and associated documentation files (the "Software"), to deal in
-//the Software without restriction, including without limitation the rights to
-//use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-//of the Software, and to permit persons to whom the Software is furnished to do
-//so, subject to the following conditions:
-
-//The above copyright notice and this permission notice shall be included in all
-//copies or substantial portions of the Software.
-
-//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-//SOFTWARE.
-
-})();
-
diff --git a/branch-1.2/ambari-web/vendor/scripts/ember-i18n-1.2.0.js b/branch-1.2/ambari-web/vendor/scripts/ember-i18n-1.2.0.js
deleted file mode 100644
index 6ab38d6..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/ember-i18n-1.2.0.js
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
-Copyright (C) 2011 by James A. Rosen; Zendesk, Inc.
-
-  Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-  The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-*/
-(function() {
-  var I18n, findTemplate, getPath, isBinding, isTranslatedAttribute, pluralForm;
-
-  isTranslatedAttribute = /(.+)Translation$/;
-
-  getPath = Ember.Handlebars.getPath || Ember.getPath;
-
-  if (typeof CLDR !== "undefined" && CLDR !== null) pluralForm = CLDR.pluralForm;
-
-  if (pluralForm == null) {
-    Ember.Logger.warn("CLDR.pluralForm not found. Em.I18n will not support count-based inflection.");
-  }
-
-  findTemplate = function(key, setOnMissing) {
-    var result;
-    Ember.assert("You must provide a translation key string, not %@".fmt(key), typeof key === 'string');
-    result = I18n.translations[key];
-    if (setOnMissing) {
-      if (result == null) {
-        result = I18n.translations[key] = I18n.compile("Missing translation: " + key);
-      }
-    }
-    if ((result != null) && !$.isFunction(result)) {
-      result = I18n.translations[key] = I18n.compile(result);
-    }
-    return result;
-  };
-
-  I18n = {
-    compile: Handlebars.compile,
-    translations: {},
-    template: function(key, count) {
-      var interpolatedKey, result, suffix;
-      if ((count != null) && (pluralForm != null)) {
-        suffix = pluralForm(count);
-        interpolatedKey = "%@.%@".fmt(key, suffix);
-        result = findTemplate(interpolatedKey, false);
-      }
-      return result != null ? result : result = findTemplate(key, true);
-    },
-    t: function(key, context) {
-      var template;
-      if (context == null) context = {};
-      template = I18n.template(key, context.count);
-      return template(context);
-    },
-    TranslateableAttributes: Em.Mixin.create({
-      didInsertElement: function() {
-        var attribute, isTranslatedAttributeMatch, key, path, result, translatedValue;
-        result = this._super.apply(this, arguments);
-        for (key in this) {
-          path = this[key];
-          isTranslatedAttributeMatch = key.match(isTranslatedAttribute);
-          if (isTranslatedAttributeMatch) {
-            attribute = isTranslatedAttributeMatch[1];
-            translatedValue = I18n.t(path);
-            this.$().attr(attribute, translatedValue);
-          }
-        }
-        return result;
-      }
-    })
-  };
-
-  // SC.I18n = I18n;
-
-  Em.I18n = I18n;
-
-  Ember.I18n = I18n;
-
-  isBinding = /(.+)Binding$/;
-
-  Handlebars.registerHelper('t', function(key, options) {
-    var attrs, context, elementID, result, tagName, view;
-    context = this;
-    attrs = options.hash;
-    view = options.data.view;
-    tagName = attrs.tagName || 'span';
-    delete attrs.tagName;
-    elementID = "i18n-" + (jQuery.uuid++);
-    Em.keys(attrs).forEach(function(property) {
-      var bindPath, currentValue, invoker, isBindingMatch, observer, propertyName;
-      isBindingMatch = property.match(isBinding);
-      if (isBindingMatch) {
-        propertyName = isBindingMatch[1];
-        bindPath = attrs[property];
-        currentValue = getPath(bindPath);
-        attrs[propertyName] = currentValue;
-        invoker = null;
-        observer = function() {
-          var elem, newValue;
-          newValue = getPath(context, bindPath);
-          elem = view.$("#" + elementID);
-          if (elem.length === 0) {
-            Em.removeObserver(context, bindPath, invoker);
-            return;
-          }
-          attrs[propertyName] = newValue;
-          return elem.html(I18n.t(key, attrs));
-        };
-        invoker = function() {
-          return Em.run.once(observer);
-        };
-        return Em.addObserver(context, bindPath, invoker);
-      }
-    });
-    result = '<%@ id="%@">%@</%@>'.fmt(tagName, elementID, I18n.t(key, attrs), tagName);
-    return new Handlebars.SafeString(result);
-  });
-
-  Handlebars.registerHelper('translateAttr', function(options) {
-    var attrs, result;
-    attrs = options.hash;
-    result = [];
-    Em.keys(attrs).forEach(function(property) {
-      var translatedValue;
-      translatedValue = I18n.t(attrs[property]);
-      return result.push('%@="%@"'.fmt(property, translatedValue));
-    });
-    return new Handlebars.SafeString(result.join(' '));
-  });
-
-}).call(this);
diff --git a/branch-1.2/ambari-web/vendor/scripts/ember-latest.js b/branch-1.2/ambari-web/vendor/scripts/ember-latest.js
deleted file mode 100644
index fc0e4ab..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/ember-latest.js
+++ /dev/null
@@ -1,20709 +0,0 @@
-// Version: v1.0.pre
-// Last commit: 7955b85 (2012-08-03 14:50:17 -0700)
-
-
-(function() {
-/*global __fail__*/
-
-if ('undefined' === typeof Ember) {
-  Ember = {};
-
-  if ('undefined' !== typeof window) {
-    window.Em = window.Ember = Em = Ember;
-  }
-}
-
-Ember.ENV = 'undefined' === typeof ENV ? {} : ENV;
-
-if (!('MANDATORY_SETTER' in Ember.ENV)) {
-  Ember.ENV.MANDATORY_SETTER = true; // default to true for debug dist
-}
-
-/**
-  Define an assertion that will throw an exception if the condition is not
-  met.  Ember build tools will remove any calls to Ember.assert() when
-  doing a production build. Example:
-
-      // Test for truthiness
-      Ember.assert('Must pass a valid object', obj);
-      // Fail unconditionally
-      Ember.assert('This code path should never be run')
-
-  @static
-  @function
-  @param {String} desc
-    A description of the assertion.  This will become the text of the Error
-    thrown if the assertion fails.
-
-  @param {Boolean} test
-    Must be truthy for the assertion to pass. If falsy, an exception will be
-    thrown.
-*/
-Ember.assert = function(desc, test) {
-  if (!test) throw new Error("assertion failed: "+desc);
-};
-
-
-/**
-  Display a warning with the provided message. Ember build tools will
-  remove any calls to Ember.warn() when doing a production build.
-
-  @static
-  @function
-  @param {String} message
-    A warning to display.
-
-  @param {Boolean} test
-    An optional boolean. If falsy, the warning will be displayed.
-*/
-Ember.warn = function(message, test) {
-  if (!test) {
-    Ember.Logger.warn("WARNING: "+message);
-    if ('trace' in Ember.Logger) Ember.Logger.trace();
-  }
-};
-
-/**
-  Display a deprecation warning with the provided message and a stack trace
-  (Chrome and Firefox only). Ember build tools will remove any calls to
-  Ember.deprecate() when doing a production build.
-
-  @static
-  @function
-  @param {String} message
-    A description of the deprecation.
-
-  @param {Boolean} test
-    An optional boolean. If falsy, the deprecation will be displayed.
-*/
-Ember.deprecate = function(message, test) {
-  if (Ember && Ember.TESTING_DEPRECATION) { return; }
-
-  if (arguments.length === 1) { test = false; }
-  if (test) { return; }
-
-  if (Ember && Ember.ENV.RAISE_ON_DEPRECATION) { throw new Error(message); }
-
-  var error;
-
-  // When using new Error, we can't do the arguments check for Chrome. Alternatives are welcome
-  try { __fail__.fail(); } catch (e) { error = e; }
-
-  if (Ember.LOG_STACKTRACE_ON_DEPRECATION && error.stack) {
-    var stack, stackStr = '';
-    if (error['arguments']) {
-      // Chrome
-      stack = error.stack.replace(/^\s+at\s+/gm, '').
-                          replace(/^([^\(]+?)([\n$])/gm, '{anonymous}($1)$2').
-                          replace(/^Object.<anonymous>\s*\(([^\)]+)\)/gm, '{anonymous}($1)').split('\n');
-      stack.shift();
-    } else {
-      // Firefox
-      stack = error.stack.replace(/(?:\n@:0)?\s+$/m, '').
-                          replace(/^\(/gm, '{anonymous}(').split('\n');
-    }
-
-    stackStr = "\n    " + stack.slice(2).join("\n    ");
-    message = message + stackStr;
-  }
-
-  Ember.Logger.warn("DEPRECATION: "+message);
-};
-
-
-
-/**
-  Display a deprecation warning with the provided message and a stack trace
-  (Chrome and Firefox only) when the wrapped method is called.
-
-  Ember build tools will not remove calls to Ember.deprecateFunc(), though
-  no warnings will be shown in production.
-
-  @static
-  @function
-  @param {String} message
-    A description of the deprecation.
-
-  @param {Function} func
-    The function to be deprecated.
-*/
-Ember.deprecateFunc = function(message, func) {
-  return function() {
-    Ember.deprecate(message);
-    return func.apply(this, arguments);
-  };
-};
-
-
-window.ember_assert         = Ember.deprecateFunc("ember_assert is deprecated. Please use Ember.assert instead.",               Ember.assert);
-window.ember_warn           = Ember.deprecateFunc("ember_warn is deprecated. Please use Ember.warn instead.",                   Ember.warn);
-window.ember_deprecate      = Ember.deprecateFunc("ember_deprecate is deprecated. Please use Ember.deprecate instead.",         Ember.deprecate);
-window.ember_deprecateFunc  = Ember.deprecateFunc("ember_deprecateFunc is deprecated. Please use Ember.deprecateFunc instead.", Ember.deprecateFunc);
-
-})();
-
-// Version: v1.0.pre
-// Last commit: 7955b85 (2012-08-03 14:50:17 -0700)
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Metal
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-/*globals Em:true ENV */
-
-if ('undefined' === typeof Ember) {
-  // Create core object. Make it act like an instance of Ember.Namespace so that
-  // objects assigned to it are given a sane string representation.
-  Ember = {};
-}
-
-/**
-  @namespace
-  @name Ember
-  @version 1.0.pre
-
-  All Ember methods and functions are defined inside of this namespace.
-  You generally should not add new properties to this namespace as it may be
-  overwritten by future versions of Ember.
-
-  You can also use the shorthand "Em" instead of "Ember".
-
-  Ember-Runtime is a framework that provides core functions for
-  Ember including cross-platform functions, support for property
-  observing and objects. Its focus is on small size and performance. You can
-  use this in place of or along-side other cross-platform libraries such as
-  jQuery.
-
-  The core Runtime framework is based on the jQuery API with a number of
-  performance optimizations.
-*/
-
-// aliases needed to keep minifiers from removing the global context
-if ('undefined' !== typeof window) {
-  window.Em = window.Ember = Em = Ember;
-}
-
-// Make sure these are set whether Ember was already defined or not
-
-Ember.isNamespace = true;
-
-Ember.toString = function() { return "Ember"; };
-
-
-/**
-  @static
-  @type String
-  @default '1.0.pre'
-  @constant
-*/
-Ember.VERSION = '1.0.pre';
-
-/**
-  @static
-  @type Hash
-  @constant
-
-  Standard environmental variables.  You can define these in a global `ENV`
-  variable before loading Ember to control various configuration
-  settings.
-*/
-Ember.ENV = Ember.ENV || ('undefined' === typeof ENV ? {} : ENV);
-
-Ember.config = Ember.config || {};
-
-// ..........................................................
-// BOOTSTRAP
-//
-
-/**
-  @static
-  @type Boolean
-  @default true
-  @constant
-
-  Determines whether Ember should enhances some built-in object
-  prototypes to provide a more friendly API.  If enabled, a few methods
-  will be added to Function, String, and Array.  Object.prototype will not be
-  enhanced, which is the one that causes most troubles for people.
-
-  In general we recommend leaving this option set to true since it rarely
-  conflicts with other code.  If you need to turn it off however, you can
-  define an ENV.EXTEND_PROTOTYPES config to disable it.
-*/
-Ember.EXTEND_PROTOTYPES = (Ember.ENV.EXTEND_PROTOTYPES !== false);
-
-/**
-  @static
-  @type Boolean
-  @default true
-  @constant
-
-  Determines whether Ember logs a full stack trace during deprecation warnings
-*/
-Ember.LOG_STACKTRACE_ON_DEPRECATION = (Ember.ENV.LOG_STACKTRACE_ON_DEPRECATION !== false);
-
-/**
-  @static
-  @type Boolean
-  @default Ember.EXTEND_PROTOTYPES
-  @constant
-
-  Determines whether Ember should add ECMAScript 5 shims to older browsers.
-*/
-Ember.SHIM_ES5 = (Ember.ENV.SHIM_ES5 === false) ? false : Ember.EXTEND_PROTOTYPES;
-
-
-/**
-  @static
-  @type Boolean
-  @default true
-  @constant
-
-  Determines whether computed properties are cacheable by default.
-  This option will be removed for the 1.1 release.
-
-  When caching is enabled by default, you can use `volatile()` to disable
-  caching on individual computed properties.
-*/
-Ember.CP_DEFAULT_CACHEABLE = (Ember.ENV.CP_DEFAULT_CACHEABLE !== false);
-
-/**
-  @static
-  @type Boolean
-  @default true
-  @constant
-
-  Determines whether views render their templates using themselves
-  as the context, or whether it is inherited from the parent. This option
-  will be removed in the 1.1 release.
-
-  If you need to update your application to use the new context rules, simply
-  prefix property access with `view.`:
-
-      // Before:
-      {{#each App.photosController}}
-        Photo Title: {{title}}
-        {{#view App.InfoView contentBinding="this"}}
-          {{content.date}}
-          {{content.cameraType}}
-          {{otherViewProperty}}
-        {{/view}}
-      {{/each}}
-
-      // After:
-      {{#each App.photosController}}
-        Photo Title: {{title}}
-        {{#view App.InfoView}}
-          {{date}}
-          {{cameraType}}
-          {{view.otherViewProperty}}
-        {{/view}}
-      {{/each}}
-*/
-Ember.VIEW_PRESERVES_CONTEXT = (Ember.ENV.VIEW_PRESERVES_CONTEXT !== false);
-
-/**
-  Empty function.  Useful for some operations.
-
-  @returns {Object}
-  @private
-*/
-Ember.K = function() { return this; };
-
-/**
-  @namespace
-  @name window
-  @description The global window object
-*/
-
-
-// Stub out the methods defined by the ember-debug package in case it's not loaded
-
-if ('undefined' === typeof Ember.assert) { Ember.assert = Ember.K; }
-if ('undefined' === typeof Ember.warn) { Ember.warn = Ember.K; }
-if ('undefined' === typeof Ember.deprecate) { Ember.deprecate = Ember.K; }
-if ('undefined' === typeof Ember.deprecateFunc) {
-  Ember.deprecateFunc = function(_, func) { return func; };
-}
-
-// These are deprecated but still supported
-
-if ('undefined' === typeof ember_assert) { window.ember_assert = Ember.K; }
-if ('undefined' === typeof ember_warn) { window.ember_warn = Ember.K; }
-if ('undefined' === typeof ember_deprecate) { window.ember_deprecate = Ember.K; }
-if ('undefined' === typeof ember_deprecateFunc) {
-  /** @private */
-  window.ember_deprecateFunc = function(_, func) { return func; };
-}
-
-
-// ..........................................................
-// LOGGER
-//
-
-/**
-  @class
-
-  Inside Ember-Metal, simply uses the window.console object.
-  Override this to provide more robust logging functionality.
-*/
-Ember.Logger = window.console || { log: Ember.K, warn: Ember.K, error: Ember.K, info: Ember.K, debug: Ember.K };
-
-})();
-
-
-
-(function() {
-/*jshint newcap:false*/
-
-// NOTE: There is a bug in jshint that doesn't recognize `Object()` without `new`
-// as being ok unless both `newcap:false` and not `use strict`.
-// https://github.com/jshint/jshint/issues/392
-
-// Testing this is not ideal, but we want to use native functions
-// if available, but not to use versions created by libraries like Prototype
-/** @private */
-var isNativeFunc = function(func) {
-  // This should probably work in all browsers likely to have ES5 array methods
-  return func && Function.prototype.toString.call(func).indexOf('[native code]') > -1;
-};
-
-// From: https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/array/map
-/** @private */
-var arrayMap = isNativeFunc(Array.prototype.map) ? Array.prototype.map : function(fun /*, thisp */) {
-  //"use strict";
-
-  if (this === void 0 || this === null) {
-    throw new TypeError();
-  }
-
-  var t = Object(this);
-  var len = t.length >>> 0;
-  if (typeof fun !== "function") {
-    throw new TypeError();
-  }
-
-  var res = new Array(len);
-  var thisp = arguments[1];
-  for (var i = 0; i < len; i++) {
-    if (i in t) {
-      res[i] = fun.call(thisp, t[i], i, t);
-    }
-  }
-
-  return res;
-};
-
-// From: https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/array/foreach
-/** @private */
-var arrayForEach = isNativeFunc(Array.prototype.forEach) ? Array.prototype.forEach : function(fun /*, thisp */) {
-  //"use strict";
-
-  if (this === void 0 || this === null) {
-    throw new TypeError();
-  }
-
-  var t = Object(this);
-  var len = t.length >>> 0;
-  if (typeof fun !== "function") {
-    throw new TypeError();
-  }
-
-  var thisp = arguments[1];
-  for (var i = 0; i < len; i++) {
-    if (i in t) {
-      fun.call(thisp, t[i], i, t);
-    }
-  }
-};
-
-/** @private */
-var arrayIndexOf = isNativeFunc(Array.prototype.indexOf) ? Array.prototype.indexOf : function (obj, fromIndex) {
-  if (fromIndex === null || fromIndex === undefined) { fromIndex = 0; }
-  else if (fromIndex < 0) { fromIndex = Math.max(0, this.length + fromIndex); }
-  for (var i = fromIndex, j = this.length; i < j; i++) {
-    if (this[i] === obj) { return i; }
-  }
-  return -1;
-};
-
-Ember.ArrayPolyfills = {
-  map: arrayMap,
-  forEach: arrayForEach,
-  indexOf: arrayIndexOf
-};
-
-var utils = Ember.EnumerableUtils = {
-  map: function(obj, callback, thisArg) {
-    return obj.map ? obj.map.call(obj, callback, thisArg) : arrayMap.call(obj, callback, thisArg);
-  },
-
-  forEach: function(obj, callback, thisArg) {
-    return obj.forEach ? obj.forEach.call(obj, callback, thisArg) : arrayForEach.call(obj, callback, thisArg);
-  },
-
-  indexOf: function(obj, element, index) {
-    return obj.indexOf ? obj.indexOf.call(obj, element, index) : arrayIndexOf.call(obj, element, index);
-  },
-
-  indexesOf: function(obj, elements) {
-    return elements === undefined ? [] : utils.map(elements, function(item) {
-      return utils.indexOf(obj, item);
-    });
-  },
-
-  removeObject: function(array, item) {
-    var index = utils.indexOf(array, item);
-    if (index !== -1) { array.splice(index, 1); }
-  }
-};
-
-
-if (Ember.SHIM_ES5) {
-  if (!Array.prototype.map) {
-    /** @private */
-    Array.prototype.map = arrayMap;
-  }
-
-  if (!Array.prototype.forEach) {
-    /** @private */
-    Array.prototype.forEach = arrayForEach;
-  }
-
-  if (!Array.prototype.indexOf) {
-    /** @private */
-    Array.prototype.indexOf = arrayIndexOf;
-  }
-}
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Metal
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-/*globals Node */
-/**
-  @class
-
-  Platform specific methods and feature detectors needed by the framework.
-
-  @name Ember.platform
-*/
-var platform = Ember.platform = {};
-
-/**
-  Identical to Object.create().  Implements if not available natively.
-  @memberOf Ember.platform
-  @name create
-*/
-Ember.create = Object.create;
-
-if (!Ember.create) {
-  /** @private */
-  var K = function() {};
-
-  Ember.create = function(obj, props) {
-    K.prototype = obj;
-    obj = new K();
-    if (props) {
-      K.prototype = obj;
-      for (var prop in props) {
-        K.prototype[prop] = props[prop].value;
-      }
-      obj = new K();
-    }
-    K.prototype = null;
-
-    return obj;
-  };
-
-  Ember.create.isSimulated = true;
-}
-
-/** @private */
-var defineProperty = Object.defineProperty;
-var canRedefineProperties, canDefinePropertyOnDOM;
-
-// Catch IE8 where Object.defineProperty exists but only works on DOM elements
-if (defineProperty) {
-  try {
-    defineProperty({}, 'a',{get:function(){}});
-  } catch (e) {
-    /** @private */
-    defineProperty = null;
-  }
-}
-
-if (defineProperty) {
-  // Detects a bug in Android <3.2 where you cannot redefine a property using
-  // Object.defineProperty once accessors have already been set.
-  /** @private */
-  canRedefineProperties = (function() {
-    var obj = {};
-
-    defineProperty(obj, 'a', {
-      configurable: true,
-      enumerable: true,
-      get: function() { },
-      set: function() { }
-    });
-
-    defineProperty(obj, 'a', {
-      configurable: true,
-      enumerable: true,
-      writable: true,
-      value: true
-    });
-
-    return obj.a === true;
-  })();
-
-  // This is for Safari 5.0, which supports Object.defineProperty, but not
-  // on DOM nodes.
-  /** @private */
-  canDefinePropertyOnDOM = (function(){
-    try {
-      defineProperty(document.createElement('div'), 'definePropertyOnDOM', {});
-      return true;
-    } catch(e) { }
-
-    return false;
-  })();
-
-  if (!canRedefineProperties) {
-    /** @private */
-    defineProperty = null;
-  } else if (!canDefinePropertyOnDOM) {
-    /** @private */
-    defineProperty = function(obj, keyName, desc){
-      var isNode;
-
-      if (typeof Node === "object") {
-        isNode = obj instanceof Node;
-      } else {
-        isNode = typeof obj === "object" && typeof obj.nodeType === "number" && typeof obj.nodeName === "string";
-      }
-
-      if (isNode) {
-        // TODO: Should we have a warning here?
-        return (obj[keyName] = desc.value);
-      } else {
-        return Object.defineProperty(obj, keyName, desc);
-      }
-    };
-  }
-}
-
-/**
-  Identical to Object.defineProperty().  Implements as much functionality
-  as possible if not available natively.
-
-  @memberOf Ember.platform
-  @name defineProperty
-  @param {Object} obj The object to modify
-  @param {String} keyName property name to modify
-  @param {Object} desc descriptor hash
-  @returns {void}
-*/
-platform.defineProperty = defineProperty;
-
-/**
-  Set to true if the platform supports native getters and setters.
-
-  @memberOf Ember.platform
-  @name hasPropertyAccessors
-*/
-platform.hasPropertyAccessors = true;
-
-if (!platform.defineProperty) {
-  platform.hasPropertyAccessors = false;
-
-  platform.defineProperty = function(obj, keyName, desc) {
-    if (!desc.get) { obj[keyName] = desc.value; }
-  };
-
-  platform.defineProperty.isSimulated = true;
-}
-
-if (Ember.ENV.MANDATORY_SETTER && !platform.hasPropertyAccessors) {
-  Ember.ENV.MANDATORY_SETTER = false;
-}
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Metal
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var o_defineProperty = Ember.platform.defineProperty,
-    o_create = Ember.create,
-    // Used for guid generation...
-    GUID_KEY = '__ember'+ (+ new Date()),
-    uuid         = 0,
-    numberCache  = [],
-    stringCache  = {};
-
-var MANDATORY_SETTER = Ember.ENV.MANDATORY_SETTER;
-
-/**
-  @private
-  @static
-  @type String
-  @constant
-
-  A unique key used to assign guids and other private metadata to objects.
-  If you inspect an object in your browser debugger you will often see these.
-  They can be safely ignored.
-
-  On browsers that support it, these properties are added with enumeration
-  disabled so they won't show up when you iterate over your properties.
-*/
-Ember.GUID_KEY = GUID_KEY;
-
-var GUID_DESC = {
-  writable:    false,
-  configurable: false,
-  enumerable:  false,
-  value: null
-};
-
-/**
-  @private
-
-  Generates a new guid, optionally saving the guid to the object that you
-  pass in.  You will rarely need to use this method.  Instead you should
-  call Ember.guidFor(obj), which return an existing guid if available.
-
-  @param {Object} obj
-    Optional object the guid will be used for.  If passed in, the guid will
-    be saved on the object and reused whenever you pass the same object
-    again.
-
-    If no object is passed, just generate a new guid.
-
-  @param {String} prefix
-    Optional prefix to place in front of the guid.  Useful when you want to
-    separate the guid into separate namespaces.
-
-  @returns {String} the guid
-*/
-Ember.generateGuid = function generateGuid(obj, prefix) {
-  if (!prefix) prefix = 'ember';
-  var ret = (prefix + (uuid++));
-  if (obj) {
-    GUID_DESC.value = ret;
-    o_defineProperty(obj, GUID_KEY, GUID_DESC);
-  }
-  return ret ;
-};
-
-/**
-  @private
-
-  Returns a unique id for the object.  If the object does not yet have
-  a guid, one will be assigned to it.  You can call this on any object,
-  Ember.Object-based or not, but be aware that it will add a _guid property.
-
-  You can also use this method on DOM Element objects.
-
-  @method
-  @param obj {Object} any object, string, number, Element, or primitive
-  @returns {String} the unique guid for this instance.
-*/
-Ember.guidFor = function guidFor(obj) {
-
-  // special cases where we don't want to add a key to object
-  if (obj === undefined) return "(undefined)";
-  if (obj === null) return "(null)";
-
-  var cache, ret;
-  var type = typeof obj;
-
-  // Don't allow prototype changes to String etc. to change the guidFor
-  switch(type) {
-    case 'number':
-      ret = numberCache[obj];
-      if (!ret) ret = numberCache[obj] = 'nu'+obj;
-      return ret;
-
-    case 'string':
-      ret = stringCache[obj];
-      if (!ret) ret = stringCache[obj] = 'st'+(uuid++);
-      return ret;
-
-    case 'boolean':
-      return obj ? '(true)' : '(false)';
-
-    default:
-      if (obj[GUID_KEY]) return obj[GUID_KEY];
-      if (obj === Object) return '(Object)';
-      if (obj === Array)  return '(Array)';
-      ret = 'ember'+(uuid++);
-      GUID_DESC.value = ret;
-      o_defineProperty(obj, GUID_KEY, GUID_DESC);
-      return ret;
-  }
-};
-
-// ..........................................................
-// META
-//
-
-var META_DESC = {
-  writable:    true,
-  configurable: false,
-  enumerable:  false,
-  value: null
-};
-
-var META_KEY = Ember.GUID_KEY+'_meta';
-
-/**
-  The key used to store meta information on object for property observing.
-
-  @static
-  @type String
-*/
-Ember.META_KEY = META_KEY;
-
-// Placeholder for non-writable metas.
-var EMPTY_META = {
-  descs: {},
-  watching: {}
-};
-
-if (MANDATORY_SETTER) { EMPTY_META.values = {}; }
-
-Ember.EMPTY_META = EMPTY_META;
-
-if (Object.freeze) Object.freeze(EMPTY_META);
-
-var isDefinePropertySimulated = Ember.platform.defineProperty.isSimulated;
-
-function Meta(obj) {
-  this.descs = {};
-  this.watching = {};
-  this.cache = {};
-  this.source = obj;
-}
-
-if (isDefinePropertySimulated) {
-  // on platforms that don't support enumerable false
-  // make meta fail jQuery.isPlainObject() to hide from
-  // jQuery.extend() by having a property that fails
-  // hasOwnProperty check.
-  Meta.prototype.__preventPlainObject__ = true;
-}
-
-/**
-  @private
-  @function
-
-  Retrieves the meta hash for an object.  If 'writable' is true ensures the
-  hash is writable for this object as well.
-
-  The meta object contains information about computed property descriptors as
-  well as any watched properties and other information.  You generally will
-  not access this information directly but instead work with higher level
-  methods that manipulate this hash indirectly.
-
-  @param {Object} obj
-    The object to retrieve meta for
-
-  @param {Boolean} writable
-    Pass false if you do not intend to modify the meta hash, allowing the
-    method to avoid making an unnecessary copy.
-
-  @returns {Hash}
-*/
-Ember.meta = function meta(obj, writable) {
-
-  var ret = obj[META_KEY];
-  if (writable===false) return ret || EMPTY_META;
-
-  if (!ret) {
-    if (!isDefinePropertySimulated) o_defineProperty(obj, META_KEY, META_DESC);
-
-    ret = new Meta(obj);
-
-    if (MANDATORY_SETTER) { ret.values = {}; }
-
-    obj[META_KEY] = ret;
-
-    // make sure we don't accidentally try to create constructor like desc
-    ret.descs.constructor = null;
-
-  } else if (ret.source !== obj) {
-    if (!isDefinePropertySimulated) o_defineProperty(obj, META_KEY, META_DESC);
-
-    ret = o_create(ret);
-    ret.descs    = o_create(ret.descs);
-    ret.watching = o_create(ret.watching);
-    ret.cache    = {};
-    ret.source   = obj;
-
-    if (MANDATORY_SETTER) { ret.values = o_create(ret.values); }
-
-    obj[META_KEY] = ret;
-  }
-  return ret;
-};
-
-Ember.getMeta = function getMeta(obj, property) {
-  var meta = Ember.meta(obj, false);
-  return meta[property];
-};
-
-Ember.setMeta = function setMeta(obj, property, value) {
-  var meta = Ember.meta(obj, true);
-  meta[property] = value;
-  return value;
-};
-
-/**
-  @private
-
-  In order to store defaults for a class, a prototype may need to create
-  a default meta object, which will be inherited by any objects instantiated
-  from the class's constructor.
-
-  However, the properties of that meta object are only shallow-cloned,
-  so if a property is a hash (like the event system's `listeners` hash),
-  it will by default be shared across all instances of that class.
-
-  This method allows extensions to deeply clone a series of nested hashes or
-  other complex objects. For instance, the event system might pass
-  ['listeners', 'foo:change', 'ember157'] to `prepareMetaPath`, which will
-  walk down the keys provided.
-
-  For each key, if the key does not exist, it is created. If it already
-  exists and it was inherited from its constructor, the constructor's
-  key is cloned.
-
-  You can also pass false for `writable`, which will simply return
-  undefined if `prepareMetaPath` discovers any part of the path that
-  shared or undefined.
-
-  @param {Object} obj The object whose meta we are examining
-  @param {Array} path An array of keys to walk down
-  @param {Boolean} writable whether or not to create a new meta
-    (or meta property) if one does not already exist or if it's
-    shared with its constructor
-*/
-Ember.metaPath = function metaPath(obj, path, writable) {
-  var meta = Ember.meta(obj, writable), keyName, value;
-
-  for (var i=0, l=path.length; i<l; i++) {
-    keyName = path[i];
-    value = meta[keyName];
-
-    if (!value) {
-      if (!writable) { return undefined; }
-      value = meta[keyName] = { __ember_source__: obj };
-    } else if (value.__ember_source__ !== obj) {
-      if (!writable) { return undefined; }
-      value = meta[keyName] = o_create(value);
-      value.__ember_source__ = obj;
-    }
-
-    meta = value;
-  }
-
-  return value;
-};
-
-/**
-  @private
-
-  Wraps the passed function so that `this._super` will point to the superFunc
-  when the function is invoked.  This is the primitive we use to implement
-  calls to super.
-
-  @param {Function} func
-    The function to call
-
-  @param {Function} superFunc
-    The super function.
-
-  @returns {Function} wrapped function.
-*/
-Ember.wrap = function(func, superFunc) {
-
-  function K() {}
-
-  var newFunc = function() {
-    var ret, sup = this._super;
-    this._super = superFunc || K;
-    ret = func.apply(this, arguments);
-    this._super = sup;
-    return ret;
-  };
-
-  newFunc.base = func;
-  return newFunc;
-};
-
-/**
-  Returns true if the passed object is an array or Array-like.
-
-  Ember Array Protocol:
-
-    - the object has an objectAt property
-    - the object is a native Array
-    - the object is an Object, and has a length property
-
-  Unlike Ember.typeOf this method returns true even if the passed object is
-  not formally array but appears to be array-like (i.e. implements Ember.Array)
-
-      Ember.isArray(); // false
-      Ember.isArray([]); // true
-      Ember.isArray( Ember.ArrayProxy.create({ content: [] }) ); // true
-
-  @param {Object} obj The object to test
-  @returns {Boolean}
-*/
-Ember.isArray = function(obj) {
-  if (!obj || obj.setInterval) { return false; }
-  if (Array.isArray && Array.isArray(obj)) { return true; }
-  if (Ember.Array && Ember.Array.detect(obj)) { return true; }
-  if ((obj.length !== undefined) && 'object'===typeof obj) { return true; }
-  return false;
-};
-
-/**
-  Forces the passed object to be part of an array.  If the object is already
-  an array or array-like, returns the object.  Otherwise adds the object to
-  an array.  If obj is null or undefined, returns an empty array.
-
-      Ember.makeArray();          => []
-      Ember.makeArray(null);      => []
-      Ember.makeArray(undefined); => []
-      Ember.makeArray('lindsay'); => ['lindsay']
-      Ember.makeArray([1,2,42]);  => [1,2,42]
-
-      var controller = Ember.ArrayProxy.create({ content: [] });
-      Ember.makeArray(controller) === controller;   => true
-
-  @param {Object} obj the object
-  @returns {Array}
-*/
-Ember.makeArray = function(obj) {
-  if (obj === null || obj === undefined) { return []; }
-  return Ember.isArray(obj) ? obj : [obj];
-};
-
-function canInvoke(obj, methodName) {
-  return !!(obj && typeof obj[methodName] === 'function');
-}
-
-/**
-  Checks to see if the `methodName` exists on the `obj`.
-
-  @function
-
-  @param {Object} obj The object to check for the method
-  @param {String} methodName The method name to check for
-*/
-Ember.canInvoke = canInvoke;
-
-/**
-  Checks to see if the `methodName` exists on the `obj`,
-  and if it does, invokes it with the arguments passed.
-
-  @function
-
-  @param {Object} obj The object to check for the method
-  @param {String} methodName The method name to check for
-  @param {Array} args The arguments to pass to the method
-
-  @returns {Boolean} true if the method does not return false
-  @returns {Boolean} false otherwise
-*/
-Ember.tryInvoke = function(obj, methodName, args) {
-  if (canInvoke(obj, methodName)) {
-    return obj[methodName].apply(obj, args);
-  }
-};
-
-})();
-
-
-
-(function() {
-/**
-  JavaScript (before ES6) does not have a Map implementation. Objects,
-  which are often used as dictionaries, may only have Strings as keys.
-
-  Because Ember has a way to get a unique identifier for every object
-  via `Ember.guidFor`, we can implement a performant Map with arbitrary
-  keys. Because it is commonly used in low-level bookkeeping, Map is
-  implemented as a pure JavaScript object for performance.
-
-  This implementation follows the current iteration of the ES6 proposal
-  for maps (http://wiki.ecmascript.org/doku.php?id=harmony:simple_maps_and_sets),
-  with two exceptions. First, because we need our implementation to be
-  pleasant on older browsers, we do not use the `delete` name (using
-  `remove` instead). Second, as we do not have the luxury of in-VM
-  iteration, we implement a forEach method for iteration.
-
-  Map is mocked out to look like an Ember object, so you can do
-  `Ember.Map.create()` for symmetry with other Ember classes.
-*/
-/** @private */
-var guidFor = Ember.guidFor,
-    indexOf = Ember.ArrayPolyfills.indexOf;
-
-var copy = function(obj) {
-  var output = {};
-
-  for (var prop in obj) {
-    if (obj.hasOwnProperty(prop)) { output[prop] = obj[prop]; }
-  }
-
-  return output;
-};
-
-var copyMap = function(original, newObject) {
-  var keys = original.keys.copy(),
-      values = copy(original.values);
-
-  newObject.keys = keys;
-  newObject.values = values;
-
-  return newObject;
-};
-
-// This class is used internally by Ember.js and Ember Data.
-// Please do not use it at this time. We plan to clean it up
-// and add many tests soon.
-var OrderedSet = Ember.OrderedSet = function() {
-  this.clear();
-};
-
-OrderedSet.create = function() {
-  return new OrderedSet();
-};
-
-OrderedSet.prototype = {
-  clear: function() {
-    this.presenceSet = {};
-    this.list = [];
-  },
-
-  add: function(obj) {
-    var guid = guidFor(obj),
-        presenceSet = this.presenceSet,
-        list = this.list;
-
-    if (guid in presenceSet) { return; }
-
-    presenceSet[guid] = true;
-    list.push(obj);
-  },
-
-  remove: function(obj) {
-    var guid = guidFor(obj),
-        presenceSet = this.presenceSet,
-        list = this.list;
-
-    delete presenceSet[guid];
-
-    var index = indexOf.call(list, obj);
-    if (index > -1) {
-      list.splice(index, 1);
-    }
-  },
-
-  isEmpty: function() {
-    return this.list.length === 0;
-  },
-
-  forEach: function(fn, self) {
-    // allow mutation during iteration
-    var list = this.list.slice();
-
-    for (var i = 0, j = list.length; i < j; i++) {
-      fn.call(self, list[i]);
-    }
-  },
-
-  toArray: function() {
-    return this.list.slice();
-  },
-
-  copy: function() {
-    var set = new OrderedSet();
-
-    set.presenceSet = copy(this.presenceSet);
-    set.list = this.list.slice();
-
-    return set;
-  }
-};
-
-/**
-  A Map stores values indexed by keys. Unlike JavaScript's
-  default Objects, the keys of a Map can be any JavaScript
-  object.
-
-  Internally, a Map has two data structures:
-
-    `keys`: an OrderedSet of all of the existing keys
-    `values`: a JavaScript Object indexed by the
-      Ember.guidFor(key)
-
-  When a key/value pair is added for the first time, we
-  add the key to the `keys` OrderedSet, and create or
-  replace an entry in `values`. When an entry is deleted,
-  we delete its entry in `keys` and `values`.
-*/
-
-/** @private */
-var Map = Ember.Map = function() {
-  this.keys = Ember.OrderedSet.create();
-  this.values = {};
-};
-
-Map.create = function() {
-  return new Map();
-};
-
-Map.prototype = {
-  /**
-    Retrieve the value associated with a given key.
-
-    @param {anything} key
-    @return {anything} the value associated with the key, or undefined
-  */
-  get: function(key) {
-    var values = this.values,
-        guid = guidFor(key);
-
-    return values[guid];
-  },
-
-  /**
-    Adds a value to the map. If a value for the given key has already been
-    provided, the new value will replace the old value.
-
-    @param {anything} key
-    @param {anything} value
-  */
-  set: function(key, value) {
-    var keys = this.keys,
-        values = this.values,
-        guid = guidFor(key);
-
-    keys.add(key);
-    values[guid] = value;
-  },
-
-  /**
-    Removes a value from the map for an associated key.
-
-    @param {anything} key
-    @returns {Boolean} true if an item was removed, false otherwise
-  */
-  remove: function(key) {
-    // don't use ES6 "delete" because it will be annoying
-    // to use in browsers that are not ES6 friendly;
-    var keys = this.keys,
-        values = this.values,
-        guid = guidFor(key),
-        value;
-
-    if (values.hasOwnProperty(guid)) {
-      keys.remove(key);
-      value = values[guid];
-      delete values[guid];
-      return true;
-    } else {
-      return false;
-    }
-  },
-
-  /**
-    Check whether a key is present.
-
-    @param {anything} key
-    @returns {Boolean} true if the item was present, false otherwise
-  */
-  has: function(key) {
-    var values = this.values,
-        guid = guidFor(key);
-
-    return values.hasOwnProperty(guid);
-  },
-
-  /**
-    Iterate over all the keys and values. Calls the function once
-    for each key, passing in the key and value, in that order.
-
-    The keys are guaranteed to be iterated over in insertion order.
-
-    @param {Function} callback
-    @param {anything} self if passed, the `this` value inside the
-      callback. By default, `this` is the map.
-  */
-  forEach: function(callback, self) {
-    var keys = this.keys,
-        values = this.values;
-
-    keys.forEach(function(key) {
-      var guid = guidFor(key);
-      callback.call(self, key, values[guid]);
-    });
-  },
-
-  copy: function() {
-    return copyMap(this, new Map());
-  }
-};
-
-var MapWithDefault = Ember.MapWithDefault = function(options) {
-  Map.call(this);
-  this.defaultValue = options.defaultValue;
-};
-
-MapWithDefault.create = function(options) {
-  if (options) {
-    return new MapWithDefault(options);
-  } else {
-    return new Map();
-  }
-};
-
-MapWithDefault.prototype = Ember.create(Map.prototype);
-
-MapWithDefault.prototype.get = function(key) {
-  var hasValue = this.has(key);
-
-  if (hasValue) {
-    return Map.prototype.get.call(this, key);
-  } else {
-    var defaultValue = this.defaultValue(key);
-    this.set(key, defaultValue);
-    return defaultValue;
-  }
-};
-
-MapWithDefault.prototype.copy = function() {
-  return copyMap(this, new MapWithDefault({
-    defaultValue: this.defaultValue
-  }));
-};
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Metal
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var META_KEY = Ember.META_KEY, get, set;
-
-var MANDATORY_SETTER = Ember.ENV.MANDATORY_SETTER;
-
-/** @private */
-var IS_GLOBAL = /^([A-Z$]|([0-9][A-Z$]))/;
-var IS_GLOBAL_PATH = /^([A-Z$]|([0-9][A-Z$])).*[\.\*]/;
-var HAS_THIS  = /^this[\.\*]/;
-var FIRST_KEY = /^([^\.\*]+)/;
-
-// ..........................................................
-// GET AND SET
-//
-// If we are on a platform that supports accessors we can get use those.
-// Otherwise simulate accessors by looking up the property directly on the
-// object.
-
-/** @private */
-get = function get(obj, keyName) {
-  // Helpers that operate with 'this' within an #each
-  if (keyName === '') {
-    return obj;
-  }
-
-  if (!keyName && 'string'===typeof obj) {
-    keyName = obj;
-    obj = null;
-  }
-
-  if (!obj || keyName.indexOf('.') !== -1) {
-    return getPath(obj, keyName);
-  }
-
-  Ember.assert("You need to provide an object and key to `get`.", !!obj && keyName);
-
-  var meta = obj[META_KEY], desc = meta && meta.descs[keyName], ret;
-  if (desc) {
-    return desc.get(obj, keyName);
-  } else {
-    if (MANDATORY_SETTER && meta && meta.watching[keyName] > 0) {
-      ret = meta.values[keyName];
-    } else {
-      ret = obj[keyName];
-    }
-
-    if (ret === undefined &&
-        'object' === typeof obj && !(keyName in obj) && 'function' === typeof obj.unknownProperty) {
-      return obj.unknownProperty(keyName);
-    }
-
-    return ret;
-  }
-};
-
-/** @private */
-set = function set(obj, keyName, value, tolerant) {
-  if (typeof obj === 'string') {
-    Ember.assert("Path '" + obj + "' must be global if no obj is given.", IS_GLOBAL.test(obj));
-    value = keyName;
-    keyName = obj;
-    obj = null;
-  }
-
-  if (!obj || keyName.indexOf('.') !== -1) {
-    return setPath(obj, keyName, value, tolerant);
-  }
-
-  Ember.assert("You need to provide an object and key to `set`.", !!obj && keyName !== undefined);
-  Ember.assert('calling set on destroyed object', !obj.isDestroyed);
-
-  var meta = obj[META_KEY], desc = meta && meta.descs[keyName],
-      isUnknown, currentValue;
-  if (desc) {
-    desc.set(obj, keyName, value);
-  }
-  else {
-    isUnknown = 'object' === typeof obj && !(keyName in obj);
-
-    // setUnknownProperty is called if `obj` is an object,
-    // the property does not already exist, and the
-    // `setUnknownProperty` method exists on the object
-    if (isUnknown && 'function' === typeof obj.setUnknownProperty) {
-      obj.setUnknownProperty(keyName, value);
-    } else if (meta && meta.watching[keyName] > 0) {
-      if (MANDATORY_SETTER) {
-        currentValue = meta.values[keyName];
-      } else {
-        currentValue = obj[keyName];
-      }
-      // only trigger a change if the value has changed
-      if (value !== currentValue) {
-        Ember.propertyWillChange(obj, keyName);
-        if (MANDATORY_SETTER) {
-          if (currentValue === undefined && !(keyName in obj)) {
-            Ember.defineProperty(obj, keyName, null, value); // setup mandatory setter
-          } else {
-            meta.values[keyName] = value;
-          }
-        } else {
-          obj[keyName] = value;
-        }
-        Ember.propertyDidChange(obj, keyName);
-      }
-    } else {
-      obj[keyName] = value;
-    }
-  }
-  return value;
-};
-
-/** @private */
-function firstKey(path) {
-  return path.match(FIRST_KEY)[0];
-}
-
-// assumes path is already normalized
-/** @private */
-function normalizeTuple(target, path) {
-  var hasThis  = HAS_THIS.test(path),
-      isGlobal = !hasThis && IS_GLOBAL_PATH.test(path),
-      key;
-
-  if (!target || isGlobal) target = window;
-  if (hasThis) path = path.slice(5);
-
-  if (target === window) {
-    key = firstKey(path);
-    target = get(target, key);
-    path   = path.slice(key.length+1);
-  }
-
-  // must return some kind of path to be valid else other things will break.
-  if (!path || path.length===0) throw new Error('Invalid Path');
-
-  return [ target, path ];
-}
-
-/** @private */
-function getPath(root, path) {
-  var hasThis, parts, tuple, idx, len;
-
-  // If there is no root and path is a key name, return that
-  // property from the global object.
-  // E.g. get('Ember') -> Ember
-  if (root === null && path.indexOf('.') === -1) { return get(window, path); }
-
-  // detect complicated paths and normalize them
-  hasThis  = HAS_THIS.test(path);
-
-  if (!root || hasThis) {
-    tuple = normalizeTuple(root, path);
-    root = tuple[0];
-    path = tuple[1];
-    tuple.length = 0;
-  }
-
-  parts = path.split(".");
-  len = parts.length;
-  for (idx=0; root && idx<len; idx++) {
-    root = get(root, parts[idx], true);
-    if (root && root.isDestroyed) { return undefined; }
-  }
-  return root;
-}
-
-/** @private */
-function setPath(root, path, value, tolerant) {
-  var keyName;
-
-  // get the last part of the path
-  keyName = path.slice(path.lastIndexOf('.') + 1);
-
-  // get the first part of the part
-  path    = path.slice(0, path.length-(keyName.length+1));
-
-  // unless the path is this, look up the first part to
-  // get the root
-  if (path !== 'this') {
-    root = getPath(root, path);
-  }
-
-  if (!keyName || keyName.length === 0) {
-    throw new Error('You passed an empty path');
-  }
-
-  if (!root) {
-    if (tolerant) { return; }
-    else { throw new Error('Object in path '+path+' could not be found or was destroyed.'); }
-  }
-
-  return set(root, keyName, value);
-}
-
-/**
-  @private
-
-  Normalizes a target/path pair to reflect that actual target/path that should
-  be observed, etc.  This takes into account passing in global property
-  paths (i.e. a path beginning with a captial letter not defined on the
-  target) and * separators.
-
-  @param {Object} target
-    The current target.  May be null.
-
-  @param {String} path
-    A path on the target or a global property path.
-
-  @returns {Array} a temporary array with the normalized target/path pair.
-*/
-Ember.normalizeTuple = function(target, path) {
-  return normalizeTuple(target, path);
-};
-
-Ember.getWithDefault = function(root, key, defaultValue) {
-  var value = get(root, key);
-
-  if (value === undefined) { return defaultValue; }
-  return value;
-};
-
-
-/**
-  @function
-
-  Gets the value of a property on an object.  If the property is computed,
-  the function will be invoked.  If the property is not defined but the
-  object implements the unknownProperty() method then that will be invoked.
-
-  If you plan to run on IE8 and older browsers then you should use this
-  method anytime you want to retrieve a property on an object that you don't
-  know for sure is private.  (My convention only properties beginning with
-  an underscore '_' are considered private.)
-
-  On all newer browsers, you only need to use this method to retrieve
-  properties if the property might not be defined on the object and you want
-  to respect the unknownProperty() handler.  Otherwise you can ignore this
-  method.
-
-  Note that if the obj itself is null, this method will simply return
-  undefined.
-
-  @param {Object} obj
-    The object to retrieve from.
-
-  @param {String} keyName
-    The property key to retrieve
-
-  @returns {Object} the property value or null.
-*/
-Ember.get = get;
-Ember.getPath = Ember.deprecateFunc('getPath is deprecated since get now supports paths', Ember.get);
-
-/**
-  @function
-
-  Sets the value of a property on an object, respecting computed properties
-  and notifying observers and other listeners of the change.  If the
-  property is not defined but the object implements the unknownProperty()
-  method then that will be invoked as well.
-
-  If you plan to run on IE8 and older browsers then you should use this
-  method anytime you want to set a property on an object that you don't
-  know for sure is private.  (My convention only properties beginning with
-  an underscore '_' are considered private.)
-
-  On all newer browsers, you only need to use this method to set
-  properties if the property might not be defined on the object and you want
-  to respect the unknownProperty() handler.  Otherwise you can ignore this
-  method.
-
-  @param {Object} obj
-    The object to modify.
-
-  @param {String} keyName
-    The property key to set
-
-  @param {Object} value
-    The value to set
-
-  @returns {Object} the passed value.
-*/
-Ember.set = set;
-Ember.setPath = Ember.deprecateFunc('setPath is deprecated since set now supports paths', Ember.set);
-
-/**
-  Error-tolerant form of Ember.set. Will not blow up if any part of the
-  chain is undefined, null, or destroyed.
-
-  This is primarily used when syncing bindings, which may try to update after
-  an object has been destroyed.
-*/
-Ember.trySet = function(root, path, value) {
-  return set(root, path, value, true);
-};
-Ember.trySetPath = Ember.deprecateFunc('trySetPath has been renamed to trySet', Ember.trySet);
-
-/**
-  Returns true if the provided path is global (e.g., "MyApp.fooController.bar")
-  instead of local ("foo.bar.baz").
-
-  @param {String} path
-  @returns Boolean
-*/
-Ember.isGlobalPath = function(path) {
-  return IS_GLOBAL.test(path);
-};
-
-
-
-if (Ember.config.overrideAccessors) {
-  Ember.config.overrideAccessors();
-  get = Ember.get;
-  set = Ember.set;
-}
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Metal
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var GUID_KEY = Ember.GUID_KEY,
-    META_KEY = Ember.META_KEY,
-    EMPTY_META = Ember.EMPTY_META,
-    metaFor = Ember.meta,
-    o_create = Ember.create,
-    objectDefineProperty = Ember.platform.defineProperty;
-
-var MANDATORY_SETTER = Ember.ENV.MANDATORY_SETTER;
-
-// ..........................................................
-// DESCRIPTOR
-//
-
-/**
-  @private
-  @constructor
-
-  Objects of this type can implement an interface to responds requests to
-  get and set.  The default implementation handles simple properties.
-
-  You generally won't need to create or subclass this directly.
-*/
-var Descriptor = Ember.Descriptor = function() {};
-
-// ..........................................................
-// DEFINING PROPERTIES API
-//
-
-/**
-  @private
-
-  NOTE: This is a low-level method used by other parts of the API.  You almost
-  never want to call this method directly.  Instead you should use Ember.mixin()
-  to define new properties.
-
-  Defines a property on an object.  This method works much like the ES5
-  Object.defineProperty() method except that it can also accept computed
-  properties and other special descriptors.
-
-  Normally this method takes only three parameters.  However if you pass an
-  instance of Ember.Descriptor as the third param then you can pass an optional
-  value as the fourth parameter.  This is often more efficient than creating
-  new descriptor hashes for each property.
-
-  ## Examples
-
-      // ES5 compatible mode
-      Ember.defineProperty(contact, 'firstName', {
-        writable: true,
-        configurable: false,
-        enumerable: true,
-        value: 'Charles'
-      });
-
-      // define a simple property
-      Ember.defineProperty(contact, 'lastName', undefined, 'Jolley');
-
-      // define a computed property
-      Ember.defineProperty(contact, 'fullName', Ember.computed(function() {
-        return this.firstName+' '+this.lastName;
-      }).property('firstName', 'lastName').cacheable());
-*/
-Ember.defineProperty = function(obj, keyName, desc, data, meta) {
-  // The first two parameters to defineProperty are mandatory:
-  //
-  // * obj: the object to define this property on. This may be
-  //   a prototype.
-  // * keyName: the name of the property
-  //
-  // One and only one of the following two parameters must be
-  // provided:
-  //
-  // * desc: an instance of Ember.Descriptor (typically a
-  //   computed property) or an ES5 descriptor.
-  // * data: something other than a descriptor, that will
-  //   become the explicit value of this property.
-
-  var descs, existingDesc, watching, value;
-
-  if (!meta) meta = metaFor(obj);
-  descs = meta.descs;
-  existingDesc = meta.descs[keyName];
-  watching = meta.watching[keyName] > 0;
-
-  if (existingDesc instanceof Ember.Descriptor) {
-    existingDesc.teardown(obj, keyName);
-  }
-
-  if (desc instanceof Ember.Descriptor) {
-    value = desc;
-
-    descs[keyName] = desc;
-    if (MANDATORY_SETTER && watching) {
-      objectDefineProperty(obj, keyName, {
-        configurable: true,
-        enumerable: true,
-        writable: true,
-        value: undefined // make enumerable
-      });
-    } else {
-      obj[keyName] = undefined; // make enumerable
-    }
-    desc.setup(obj, keyName);
-  } else {
-    descs[keyName] = undefined; // shadow descriptor in proto
-    if (desc == null) {
-      value = data;
-
-      if (MANDATORY_SETTER && watching) {
-        meta.values[keyName] = data;
-        objectDefineProperty(obj, keyName, {
-          configurable: true,
-          enumerable: true,
-          set: function() {
-            Ember.assert('Must use Ember.set() to access this property', false);
-          },
-          get: function() {
-            var meta = this[META_KEY];
-            return meta && meta.values[keyName];
-          }
-        });
-      } else {
-        obj[keyName] = data;
-      }
-    } else {
-      value = desc;
-
-      // compatibility with ES5
-      objectDefineProperty(obj, keyName, desc);
-    }
-  }
-
-  // if key is being watched, override chains that
-  // were initialized with the prototype
-  if (watching) { Ember.overrideChains(obj, keyName, meta); }
-
-  // The `value` passed to the `didDefineProperty` hook is
-  // either the descriptor or data, whichever was passed.
-  if (obj.didDefineProperty) { obj.didDefineProperty(obj, keyName, value); }
-
-  return this;
-};
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Metal
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var AFTER_OBSERVERS = ':change';
-var BEFORE_OBSERVERS = ':before';
-var guidFor = Ember.guidFor;
-
-var deferred = 0;
-var array_Slice = [].slice;
-
-/** @private */
-var ObserverSet = function () {
-  this.targetSet = {};
-};
-ObserverSet.prototype.add = function (target, path) {
-  var targetSet = this.targetSet,
-    targetGuid = Ember.guidFor(target),
-    pathSet = targetSet[targetGuid];
-  if (!pathSet) {
-    targetSet[targetGuid] = pathSet = {};
-  }
-  if (pathSet[path]) {
-    return false;
-  } else {
-    return pathSet[path] = true;
-  }
-};
-ObserverSet.prototype.clear = function () {
-  this.targetSet = {};
-};
-
-/** @private */
-var DeferredEventQueue = function() {
-  this.targetSet = {};
-  this.queue = [];
-};
-
-DeferredEventQueue.prototype.push = function(target, eventName, keyName) {
-  var targetSet = this.targetSet,
-    queue = this.queue,
-    targetGuid = Ember.guidFor(target),
-    eventNameSet = targetSet[targetGuid],
-    index;
-
-  if (!eventNameSet) {
-    targetSet[targetGuid] = eventNameSet = {};
-  }
-  index = eventNameSet[eventName];
-  if (index === undefined) {
-    eventNameSet[eventName] = queue.push(Ember.deferEvent(target, eventName, [target, keyName])) - 1;
-  } else {
-    queue[index] = Ember.deferEvent(target, eventName, [target, keyName]);
-  }
-};
-
-DeferredEventQueue.prototype.flush = function() {
-  var queue = this.queue;
-  this.queue = [];
-  this.targetSet = {};
-  for (var i=0, len=queue.length; i < len; ++i) {
-    queue[i]();
-  }
-};
-
-var queue = new DeferredEventQueue(), beforeObserverSet = new ObserverSet();
-
-/** @private */
-function notifyObservers(obj, eventName, keyName, forceNotification) {
-  if (deferred && !forceNotification) {
-    queue.push(obj, eventName, keyName);
-  } else {
-    Ember.sendEvent(obj, eventName, [obj, keyName]);
-  }
-}
-
-/** @private */
-function flushObserverQueue() {
-  beforeObserverSet.clear();
-
-  queue.flush();
-}
-
-Ember.beginPropertyChanges = function() {
-  deferred++;
-  return this;
-};
-
-Ember.endPropertyChanges = function() {
-  deferred--;
-  if (deferred<=0) flushObserverQueue();
-};
-
-/**
-  Make a series of property changes together in an
-  exception-safe way.
-
-      Ember.changeProperties(function() {
-        obj1.set('foo', mayBlowUpWhenSet);
-        obj2.set('bar', baz);
-      });
-*/
-Ember.changeProperties = function(cb, binding){
-  Ember.beginPropertyChanges();
-  try {
-    cb.call(binding);
-  } finally {
-    Ember.endPropertyChanges();
-  }
-};
-
-/**
-  Set a list of properties on an object. These properties are set inside
-  a single `beginPropertyChanges` and `endPropertyChanges` batch, so
-  observers will be buffered.
-*/
-Ember.setProperties = function(self, hash) {
-  Ember.changeProperties(function(){
-    for(var prop in hash) {
-      if (hash.hasOwnProperty(prop)) Ember.set(self, prop, hash[prop]);
-    }
-  });
-  return self;
-};
-
-
-/** @private */
-function changeEvent(keyName) {
-  return keyName+AFTER_OBSERVERS;
-}
-
-/** @private */
-function beforeEvent(keyName) {
-  return keyName+BEFORE_OBSERVERS;
-}
-
-Ember.addObserver = function(obj, path, target, method) {
-  Ember.addListener(obj, changeEvent(path), target, method);
-  Ember.watch(obj, path);
-  return this;
-};
-
-/** @private */
-Ember.observersFor = function(obj, path) {
-  return Ember.listenersFor(obj, changeEvent(path));
-};
-
-Ember.removeObserver = function(obj, path, target, method) {
-  Ember.unwatch(obj, path);
-  Ember.removeListener(obj, changeEvent(path), target, method);
-  return this;
-};
-
-Ember.addBeforeObserver = function(obj, path, target, method) {
-  Ember.addListener(obj, beforeEvent(path), target, method);
-  Ember.watch(obj, path);
-  return this;
-};
-
-// Suspend observer during callback.
-//
-// This should only be used by the target of the observer
-// while it is setting the observed path.
-/** @private */
-Ember._suspendBeforeObserver = function(obj, path, target, method, callback) {
-  return Ember._suspendListener(obj, beforeEvent(path), target, method, callback);
-};
-
-Ember._suspendObserver = function(obj, path, target, method, callback) {
-  return Ember._suspendListener(obj, changeEvent(path), target, method, callback);
-};
-
-/** @private */
-Ember.beforeObserversFor = function(obj, path) {
-  return Ember.listenersFor(obj, beforeEvent(path));
-};
-
-Ember.removeBeforeObserver = function(obj, path, target, method) {
-  Ember.unwatch(obj, path);
-  Ember.removeListener(obj, beforeEvent(path), target, method);
-  return this;
-};
-
-/** @private */
-Ember.notifyObservers = function(obj, keyName) {
-  if (obj.isDestroying) { return; }
-
-  notifyObservers(obj, changeEvent(keyName), keyName);
-};
-
-/** @private */
-Ember.notifyBeforeObservers = function(obj, keyName) {
-  if (obj.isDestroying) { return; }
-
-  var guid, set, forceNotification = false;
-
-  if (deferred) {
-    if (beforeObserverSet.add(obj, keyName)) {
-      forceNotification = true;
-    } else {
-      return;
-    }
-  }
-
-  notifyObservers(obj, beforeEvent(keyName), keyName, forceNotification);
-};
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Metal
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var guidFor = Ember.guidFor, // utils.js
-    metaFor = Ember.meta, // utils.js
-    get = Ember.get, // accessors.js
-    set = Ember.set, // accessors.js
-    normalizeTuple = Ember.normalizeTuple, // accessors.js
-    GUID_KEY = Ember.GUID_KEY, // utils.js
-    META_KEY = Ember.META_KEY, // utils.js
-    // circular reference observer depends on Ember.watch
-    // we should move change events to this file or its own property_events.js
-    notifyObservers = Ember.notifyObservers, // observer.js
-    forEach = Ember.ArrayPolyfills.forEach, // array.js
-    FIRST_KEY = /^([^\.\*]+)/,
-    IS_PATH = /[\.\*]/;
-
-var MANDATORY_SETTER = Ember.ENV.MANDATORY_SETTER,
-o_defineProperty = Ember.platform.defineProperty;
-
-/** @private */
-function firstKey(path) {
-  return path.match(FIRST_KEY)[0];
-}
-
-// returns true if the passed path is just a keyName
-/** @private */
-function isKeyName(path) {
-  return path==='*' || !IS_PATH.test(path);
-}
-
-// ..........................................................
-// DEPENDENT KEYS
-//
-
-var DEP_SKIP = { __emberproto__: true }; // skip some keys and toString
-
-/** @private */
-function iterDeps(method, obj, depKey, seen, meta) {
-
-  var guid = guidFor(obj);
-  if (!seen[guid]) seen[guid] = {};
-  if (seen[guid][depKey]) return;
-  seen[guid][depKey] = true;
-
-  var deps = meta.deps;
-  deps = deps && deps[depKey];
-  if (deps) {
-    for(var key in deps) {
-      if (DEP_SKIP[key]) continue;
-      method(obj, key);
-    }
-  }
-}
-
-
-var WILL_SEEN, DID_SEEN;
-
-// called whenever a property is about to change to clear the cache of any dependent keys (and notify those properties of changes, etc...)
-/** @private */
-function dependentKeysWillChange(obj, depKey, meta) {
-  if (obj.isDestroying) { return; }
-
-  var seen = WILL_SEEN, top = !seen;
-  if (top) { seen = WILL_SEEN = {}; }
-  iterDeps(propertyWillChange, obj, depKey, seen, meta);
-  if (top) { WILL_SEEN = null; }
-}
-
-// called whenever a property has just changed to update dependent keys
-/** @private */
-function dependentKeysDidChange(obj, depKey, meta) {
-  if (obj.isDestroying) { return; }
-
-  var seen = DID_SEEN, top = !seen;
-  if (top) { seen = DID_SEEN = {}; }
-  iterDeps(propertyDidChange, obj, depKey, seen, meta);
-  if (top) { DID_SEEN = null; }
-}
-
-// ..........................................................
-// CHAIN
-//
-
-/** @private */
-function addChainWatcher(obj, keyName, node) {
-  if (!obj || ('object' !== typeof obj)) return; // nothing to do
-  var m = metaFor(obj);
-  var nodes = m.chainWatchers;
-  if (!nodes || nodes.__emberproto__ !== obj) {
-    nodes = m.chainWatchers = { __emberproto__: obj };
-  }
-
-  if (!nodes[keyName]) { nodes[keyName] = {}; }
-  nodes[keyName][guidFor(node)] = node;
-  Ember.watch(obj, keyName);
-}
-
-/** @private */
-function removeChainWatcher(obj, keyName, node) {
-  if (!obj || 'object' !== typeof obj) { return; } // nothing to do
-  var m = metaFor(obj, false),
-      nodes = m.chainWatchers;
-  if (!nodes || nodes.__emberproto__ !== obj) { return; } //nothing to do
-  if (nodes[keyName]) { delete nodes[keyName][guidFor(node)]; }
-  Ember.unwatch(obj, keyName);
-}
-
-var pendingQueue = [];
-
-// attempts to add the pendingQueue chains again.  If some of them end up
-// back in the queue and reschedule is true, schedules a timeout to try
-// again.
-/** @private */
-function flushPendingChains() {
-  if (pendingQueue.length === 0) { return; } // nothing to do
-
-  var queue = pendingQueue;
-  pendingQueue = [];
-
-  forEach.call(queue, function(q) { q[0].add(q[1]); });
-
-  Ember.warn('Watching an undefined global, Ember expects watched globals to be setup by the time the run loop is flushed, check for typos', pendingQueue.length === 0);
-}
-
-/** @private */
-function isProto(pvalue) {
-  return metaFor(pvalue, false).proto === pvalue;
-}
-
-// A ChainNode watches a single key on an object.  If you provide a starting
-// value for the key then the node won't actually watch it.  For a root node
-// pass null for parent and key and object for value.
-/** @private */
-var ChainNode = function(parent, key, value, separator) {
-  var obj;
-  this._parent = parent;
-  this._key    = key;
-
-  // _watching is true when calling get(this._parent, this._key) will
-  // return the value of this node.
-  //
-  // It is false for the root of a chain (because we have no parent)
-  // and for global paths (because the parent node is the object with
-  // the observer on it)
-  this._watching = value===undefined;
-
-  this._value  = value;
-  this._separator = separator || '.';
-  this._paths = {};
-  if (this._watching) {
-    this._object = parent.value();
-    if (this._object) { addChainWatcher(this._object, this._key, this); }
-  }
-
-  // Special-case: the EachProxy relies on immediate evaluation to
-  // establish its observers.
-  //
-  // TODO: Replace this with an efficient callback that the EachProxy
-  // can implement.
-  if (this._parent && this._parent._key === '@each') {
-    this.value();
-  }
-};
-
-var ChainNodePrototype = ChainNode.prototype;
-
-ChainNodePrototype.value = function() {
-  if (this._value === undefined && this._watching) {
-    var obj = this._parent.value();
-    this._value = (obj && !isProto(obj)) ? get(obj, this._key) : undefined;
-  }
-  return this._value;
-};
-
-ChainNodePrototype.destroy = function() {
-  if (this._watching) {
-    var obj = this._object;
-    if (obj) { removeChainWatcher(obj, this._key, this); }
-    this._watching = false; // so future calls do nothing
-  }
-};
-
-// copies a top level object only
-ChainNodePrototype.copy = function(obj) {
-  var ret = new ChainNode(null, null, obj, this._separator),
-      paths = this._paths, path;
-  for (path in paths) {
-    if (paths[path] <= 0) { continue; } // this check will also catch non-number vals.
-    ret.add(path);
-  }
-  return ret;
-};
-
-// called on the root node of a chain to setup watchers on the specified
-// path.
-ChainNodePrototype.add = function(path) {
-  var obj, tuple, key, src, separator, paths;
-
-  paths = this._paths;
-  paths[path] = (paths[path] || 0) + 1;
-
-  obj = this.value();
-  tuple = normalizeTuple(obj, path);
-
-  // the path was a local path
-  if (tuple[0] && tuple[0] === obj) {
-    path = tuple[1];
-    key  = firstKey(path);
-    path = path.slice(key.length+1);
-
-  // global path, but object does not exist yet.
-  // put into a queue and try to connect later.
-  } else if (!tuple[0]) {
-    pendingQueue.push([this, path]);
-    tuple.length = 0;
-    return;
-
-  // global path, and object already exists
-  } else {
-    src  = tuple[0];
-    key  = path.slice(0, 0-(tuple[1].length+1));
-    separator = path.slice(key.length, key.length+1);
-    path = tuple[1];
-  }
-
-  tuple.length = 0;
-  this.chain(key, path, src, separator);
-};
-
-// called on the root node of a chain to teardown watcher on the specified
-// path
-ChainNodePrototype.remove = function(path) {
-  var obj, tuple, key, src, paths;
-
-  paths = this._paths;
-  if (paths[path] > 0) { paths[path]--; }
-
-  obj = this.value();
-  tuple = normalizeTuple(obj, path);
-  if (tuple[0] === obj) {
-    path = tuple[1];
-    key  = firstKey(path);
-    path = path.slice(key.length+1);
-  } else {
-    src  = tuple[0];
-    key  = path.slice(0, 0-(tuple[1].length+1));
-    path = tuple[1];
-  }
-
-  tuple.length = 0;
-  this.unchain(key, path);
-};
-
-ChainNodePrototype.count = 0;
-
-ChainNodePrototype.chain = function(key, path, src, separator) {
-  var chains = this._chains, node;
-  if (!chains) { chains = this._chains = {}; }
-
-  node = chains[key];
-  if (!node) { node = chains[key] = new ChainNode(this, key, src, separator); }
-  node.count++; // count chains...
-
-  // chain rest of path if there is one
-  if (path && path.length>0) {
-    key = firstKey(path);
-    path = path.slice(key.length+1);
-    node.chain(key, path); // NOTE: no src means it will observe changes...
-  }
-};
-
-ChainNodePrototype.unchain = function(key, path) {
-  var chains = this._chains, node = chains[key];
-
-  // unchain rest of path first...
-  if (path && path.length>1) {
-    key  = firstKey(path);
-    path = path.slice(key.length+1);
-    node.unchain(key, path);
-  }
-
-  // delete node if needed.
-  node.count--;
-  if (node.count<=0) {
-    delete chains[node._key];
-    node.destroy();
-  }
-
-};
-
-ChainNodePrototype.willChange = function() {
-  var chains = this._chains;
-  if (chains) {
-    for(var key in chains) {
-      if (!chains.hasOwnProperty(key)) { continue; }
-      chains[key].willChange();
-    }
-  }
-
-  if (this._parent) { this._parent.chainWillChange(this, this._key, 1); }
-};
-
-ChainNodePrototype.chainWillChange = function(chain, path, depth) {
-  if (this._key) { path = this._key + this._separator + path; }
-
-  if (this._parent) {
-    this._parent.chainWillChange(this, path, depth+1);
-  } else {
-    if (depth > 1) { Ember.propertyWillChange(this.value(), path); }
-    path = 'this.' + path;
-    if (this._paths[path] > 0) { Ember.propertyWillChange(this.value(), path); }
-  }
-};
-
-ChainNodePrototype.chainDidChange = function(chain, path, depth) {
-  if (this._key) { path = this._key + this._separator + path; }
-  if (this._parent) {
-    this._parent.chainDidChange(this, path, depth+1);
-  } else {
-    if (depth > 1) { Ember.propertyDidChange(this.value(), path); }
-    path = 'this.' + path;
-    if (this._paths[path] > 0) { Ember.propertyDidChange(this.value(), path); }
-  }
-};
-
-ChainNodePrototype.didChange = function(suppressEvent) {
-  // invalidate my own value first.
-  if (this._watching) {
-    var obj = this._parent.value();
-    if (obj !== this._object) {
-      removeChainWatcher(this._object, this._key, this);
-      this._object = obj;
-      addChainWatcher(obj, this._key, this);
-    }
-    this._value  = undefined;
-
-    // Special-case: the EachProxy relies on immediate evaluation to
-    // establish its observers.
-    if (this._parent && this._parent._key === '@each')
-      this.value();
-  }
-
-  // then notify chains...
-  var chains = this._chains;
-  if (chains) {
-    for(var key in chains) {
-      if (!chains.hasOwnProperty(key)) { continue; }
-      chains[key].didChange(suppressEvent);
-    }
-  }
-
-  if (suppressEvent) { return; }
-
-  // and finally tell parent about my path changing...
-  if (this._parent) { this._parent.chainDidChange(this, this._key, 1); }
-};
-
-// get the chains for the current object.  If the current object has
-// chains inherited from the proto they will be cloned and reconfigured for
-// the current object.
-/** @private */
-function chainsFor(obj) {
-  var m = metaFor(obj), ret = m.chains;
-  if (!ret) {
-    ret = m.chains = new ChainNode(null, null, obj);
-  } else if (ret.value() !== obj) {
-    ret = m.chains = ret.copy(obj);
-  }
-  return ret;
-}
-
-/** @private */
-function notifyChains(obj, m, keyName, methodName, arg) {
-  var nodes = m.chainWatchers;
-
-  if (!nodes || nodes.__emberproto__ !== obj) { return; } // nothing to do
-
-  nodes = nodes[keyName];
-  if (!nodes) { return; }
-
-  for(var key in nodes) {
-    if (!nodes.hasOwnProperty(key)) { continue; }
-    nodes[key][methodName](arg);
-  }
-}
-
-Ember.overrideChains = function(obj, keyName, m) {
-  notifyChains(obj, m, keyName, 'didChange', true);
-};
-
-/** @private */
-function chainsWillChange(obj, keyName, m) {
-  notifyChains(obj, m, keyName, 'willChange');
-}
-
-/** @private */
-function chainsDidChange(obj, keyName, m) {
-  notifyChains(obj, m, keyName, 'didChange');
-}
-
-// ..........................................................
-// WATCH
-//
-
-/**
-  @private
-
-  Starts watching a property on an object.  Whenever the property changes,
-  invokes Ember.propertyWillChange and Ember.propertyDidChange.  This is the
-  primitive used by observers and dependent keys; usually you will never call
-  this method directly but instead use higher level methods like
-  Ember.addObserver().
-*/
-Ember.watch = function(obj, keyName) {
-  // can't watch length on Array - it is special...
-  if (keyName === 'length' && Ember.typeOf(obj) === 'array') { return this; }
-
-  var m = metaFor(obj), watching = m.watching, desc;
-
-  // activate watching first time
-  if (!watching[keyName]) {
-    watching[keyName] = 1;
-    if (isKeyName(keyName)) {
-      desc = m.descs[keyName];
-      if (desc && desc.willWatch) { desc.willWatch(obj, keyName); }
-
-      if ('function' === typeof obj.willWatchProperty) {
-        obj.willWatchProperty(keyName);
-      }
-
-      if (MANDATORY_SETTER && keyName in obj) {
-        m.values[keyName] = obj[keyName];
-        o_defineProperty(obj, keyName, {
-          configurable: true,
-          enumerable: true,
-          set: function() {
-            Ember.assert('Must use Ember.set() to access this property', false);
-          },
-          get: function() {
-            var meta = this[META_KEY];
-            return meta && meta.values[keyName];
-          }
-        });
-      }
-    } else {
-      chainsFor(obj).add(keyName);
-    }
-
-  }  else {
-    watching[keyName] = (watching[keyName] || 0) + 1;
-  }
-  return this;
-};
-
-Ember.isWatching = function isWatching(obj, key) {
-  var meta = obj[META_KEY];
-  return (meta && meta.watching[key]) > 0;
-};
-
-Ember.watch.flushPending = flushPendingChains;
-
-/** @private */
-Ember.unwatch = function(obj, keyName) {
-  // can't watch length on Array - it is special...
-  if (keyName === 'length' && Ember.typeOf(obj) === 'array') { return this; }
-
-  var m = metaFor(obj), watching = m.watching, desc;
-
-  if (watching[keyName] === 1) {
-    watching[keyName] = 0;
-
-    if (isKeyName(keyName)) {
-      desc = m.descs[keyName];
-      if (desc && desc.didUnwatch) { desc.didUnwatch(obj, keyName); }
-
-      if ('function' === typeof obj.didUnwatchProperty) {
-        obj.didUnwatchProperty(keyName);
-      }
-
-      if (MANDATORY_SETTER && keyName in obj) {
-        o_defineProperty(obj, keyName, {
-          configurable: true,
-          enumerable: true,
-          writable: true,
-          value: m.values[keyName]
-        });
-        delete m.values[keyName];
-      }
-    } else {
-      chainsFor(obj).remove(keyName);
-    }
-
-  } else if (watching[keyName]>1) {
-    watching[keyName]--;
-  }
-
-  return this;
-};
-
-/**
-  @private
-
-  Call on an object when you first beget it from another object.  This will
-  setup any chained watchers on the object instance as needed.  This method is
-  safe to call multiple times.
-*/
-Ember.rewatch = function(obj) {
-  var m = metaFor(obj, false), chains = m.chains;
-
-  // make sure the object has its own guid.
-  if (GUID_KEY in obj && !obj.hasOwnProperty(GUID_KEY)) {
-    Ember.generateGuid(obj, 'ember');
-  }
-
-  // make sure any chained watchers update.
-  if (chains && chains.value() !== obj) {
-    m.chains = chains.copy(obj);
-  }
-
-  return this;
-};
-
-Ember.finishChains = function(obj) {
-  var m = metaFor(obj, false), chains = m.chains;
-  if (chains) {
-    if (chains.value() !== obj) {
-      m.chains = chains = chains.copy(obj);
-    }
-    chains.didChange(true);
-  }
-};
-
-// ..........................................................
-// PROPERTY CHANGES
-//
-
-/**
-  This function is called just before an object property is about to change.
-  It will notify any before observers and prepare caches among other things.
-
-  Normally you will not need to call this method directly but if for some
-  reason you can't directly watch a property you can invoke this method
-  manually along with `Ember.propertyDidChange()` which you should call just
-  after the property value changes.
-
-  @memberOf Ember
-
-  @param {Object} obj
-    The object with the property that will change
-
-  @param {String} keyName
-    The property key (or path) that will change.
-
-  @returns {void}
-*/
-function propertyWillChange(obj, keyName, value) {
-  var m = metaFor(obj, false),
-      watching = m.watching[keyName] > 0 || keyName === 'length',
-      proto = m.proto,
-      desc = m.descs[keyName];
-
-  if (!watching) { return; }
-  if (proto === obj) { return; }
-  if (desc && desc.willChange) { desc.willChange(obj, keyName); }
-  dependentKeysWillChange(obj, keyName, m);
-  chainsWillChange(obj, keyName, m);
-  Ember.notifyBeforeObservers(obj, keyName);
-}
-
-Ember.propertyWillChange = propertyWillChange;
-
-/**
-  This function is called just after an object property has changed.
-  It will notify any observers and clear caches among other things.
-
-  Normally you will not need to call this method directly but if for some
-  reason you can't directly watch a property you can invoke this method
-  manually along with `Ember.propertyWilLChange()` which you should call just
-  before the property value changes.
-
-  @memberOf Ember
-
-  @param {Object} obj
-    The object with the property that will change
-
-  @param {String} keyName
-    The property key (or path) that will change.
-
-  @returns {void}
-*/
-function propertyDidChange(obj, keyName) {
-  var m = metaFor(obj, false),
-      watching = m.watching[keyName] > 0 || keyName === 'length',
-      proto = m.proto,
-      desc = m.descs[keyName];
-
-  if (proto === obj) { return; }
-
-  // shouldn't this mean that we're watching this key?
-  if (desc && desc.didChange) { desc.didChange(obj, keyName); }
-  if (!watching && keyName !== 'length') { return; }
-
-  dependentKeysDidChange(obj, keyName, m);
-  chainsDidChange(obj, keyName, m);
-  Ember.notifyObservers(obj, keyName);
-}
-
-Ember.propertyDidChange = propertyDidChange;
-
-var NODE_STACK = [];
-
-/**
-  Tears down the meta on an object so that it can be garbage collected.
-  Multiple calls will have no effect.
-
-  @param {Object} obj  the object to destroy
-  @returns {void}
-*/
-Ember.destroy = function (obj) {
-  var meta = obj[META_KEY], node, nodes, key, nodeObject;
-  if (meta) {
-    obj[META_KEY] = null;
-    // remove chainWatchers to remove circular references that would prevent GC
-    node = meta.chains;
-    if (node) {
-      NODE_STACK.push(node);
-      // process tree
-      while (NODE_STACK.length > 0) {
-        node = NODE_STACK.pop();
-        // push children
-        nodes = node._chains;
-        if (nodes) {
-          for (key in nodes) {
-            if (nodes.hasOwnProperty(key)) {
-              NODE_STACK.push(nodes[key]);
-            }
-          }
-        }
-        // remove chainWatcher in node object
-        if (node._watching) {
-          nodeObject = node._object;
-          if (nodeObject) {
-            removeChainWatcher(nodeObject, node._key, node);
-          }
-        }
-      }
-    }
-  }
-};
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Metal
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-Ember.warn("Computed properties will soon be cacheable by default. To enable this in your app, set `ENV.CP_DEFAULT_CACHEABLE = true`.", Ember.CP_DEFAULT_CACHEABLE);
-
-
-var get = Ember.get,
-    metaFor = Ember.meta,
-    guidFor = Ember.guidFor,
-    a_slice = [].slice,
-    o_create = Ember.create,
-    META_KEY = Ember.META_KEY,
-    watch = Ember.watch,
-    unwatch = Ember.unwatch;
-
-// ..........................................................
-// DEPENDENT KEYS
-//
-
-// data structure:
-//  meta.deps = {
-//   'depKey': {
-//     'keyName': count,
-//     __emberproto__: SRC_OBJ [to detect clones]
-//     },
-//   __emberproto__: SRC_OBJ
-//  }
-
-/**
-  @private
-
-  This function returns a map of unique dependencies for a
-  given object and key.
-*/
-function keysForDep(obj, depsMeta, depKey) {
-  var keys = depsMeta[depKey];
-  if (!keys) {
-    // if there are no dependencies yet for a the given key
-    // create a new empty list of dependencies for the key
-    keys = depsMeta[depKey] = { __emberproto__: obj };
-  } else if (keys.__emberproto__ !== obj) {
-    // otherwise if the dependency list is inherited from
-    // a superclass, clone the hash
-    keys = depsMeta[depKey] = o_create(keys);
-    keys.__emberproto__ = obj;
-  }
-  return keys;
-}
-
-/**
-  @private
-
-  return obj[META_KEY].deps
-  */
-function metaForDeps(obj, meta) {
-  var deps = meta.deps;
-  // If the current object has no dependencies...
-  if (!deps) {
-    // initialize the dependencies with a pointer back to
-    // the current object
-    deps = meta.deps = { __emberproto__: obj };
-  } else if (deps.__emberproto__ !== obj) {
-    // otherwise if the dependencies are inherited from the
-    // object's superclass, clone the deps
-    deps = meta.deps = o_create(deps);
-    deps.__emberproto__ = obj;
-  }
-  return deps;
-}
-
-/** @private */
-function addDependentKeys(desc, obj, keyName, meta) {
-  // the descriptor has a list of dependent keys, so
-  // add all of its dependent keys.
-  var depKeys = desc._dependentKeys, depsMeta, idx, len, depKey, keys;
-  if (!depKeys) return;
-
-  depsMeta = metaForDeps(obj, meta);
-
-  for(idx = 0, len = depKeys.length; idx < len; idx++) {
-    depKey = depKeys[idx];
-    // Lookup keys meta for depKey
-    keys = keysForDep(obj, depsMeta, depKey);
-    // Increment the number of times depKey depends on keyName.
-    keys[keyName] = (keys[keyName] || 0) + 1;
-    // Watch the depKey
-    watch(obj, depKey);
-  }
-}
-
-/** @private */
-function removeDependentKeys(desc, obj, keyName, meta) {
-  // the descriptor has a list of dependent keys, so
-  // add all of its dependent keys.
-  var depKeys = desc._dependentKeys, depsMeta, idx, len, depKey, keys;
-  if (!depKeys) return;
-
-  depsMeta = metaForDeps(obj, meta);
-
-  for(idx = 0, len = depKeys.length; idx < len; idx++) {
-    depKey = depKeys[idx];
-    // Lookup keys meta for depKey
-    keys = keysForDep(obj, depsMeta, depKey);
-    // Increment the number of times depKey depends on keyName.
-    keys[keyName] = (keys[keyName] || 0) - 1;
-    // Watch the depKey
-    unwatch(obj, depKey);
-  }
-}
-
-// ..........................................................
-// COMPUTED PROPERTY
-//
-
-/** @private */
-function ComputedProperty(func, opts) {
-  this.func = func;
-  this._cacheable = (opts && opts.cacheable !== undefined) ? opts.cacheable : Ember.CP_DEFAULT_CACHEABLE;
-  this._dependentKeys = opts && opts.dependentKeys;
-}
-
-/**
-  @constructor
-*/
-Ember.ComputedProperty = ComputedProperty;
-ComputedProperty.prototype = new Ember.Descriptor();
-
-/**
-  @extends Ember.ComputedProperty
-  @private
-*/
-var ComputedPropertyPrototype = ComputedProperty.prototype;
-
-/**
-  Call on a computed property to set it into cacheable mode.  When in this
-  mode the computed property will automatically cache the return value of
-  your function until one of the dependent keys changes.
-
-      MyApp.president = Ember.Object.create({
-        fullName: function() {
-          return this.get('firstName') + ' ' + this.get('lastName');
-
-          // After calculating the value of this function, Ember.js will
-          // return that value without re-executing this function until
-          // one of the dependent properties change.
-        }.property('firstName', 'lastName').cacheable()
-      });
-
-  Properties are cacheable by default.
-
-  @memberOf Ember.ComputedProperty.prototype
-  @name cacheable
-  @function
-  @param {Boolean} aFlag optional set to false to disable caching
-  @returns {Ember.ComputedProperty} receiver
-*/
-ComputedPropertyPrototype.cacheable = function(aFlag) {
-  this._cacheable = aFlag !== false;
-  return this;
-};
-
-/**
-  Call on a computed property to set it into non-cached mode.  When in this
-  mode the computed property will not automatically cache the return value.
-
-      MyApp.outsideService = Ember.Object.create({
-        value: function() {
-          return OutsideService.getValue();
-        }.property().volatile()
-      });
-
-  @memberOf Ember.ComputedProperty.prototype
-  @name volatile
-  @function
-  @returns {Ember.ComputedProperty} receiver
-*/
-ComputedPropertyPrototype.volatile = function() {
-  return this.cacheable(false);
-};
-
-/**
-  Sets the dependent keys on this computed property.  Pass any number of
-  arguments containing key paths that this computed property depends on.
-
-      MyApp.president = Ember.Object.create({
-        fullName: Ember.computed(function() {
-          return this.get('firstName') + ' ' + this.get('lastName');
-
-          // Tell Ember.js that this computed property depends on firstName
-          // and lastName
-        }).property('firstName', 'lastName')
-      });
-
-  @memberOf Ember.ComputedProperty.prototype
-  @name property
-  @function
-  @param {String} path... zero or more property paths
-  @returns {Ember.ComputedProperty} receiver
-*/
-ComputedPropertyPrototype.property = function() {
-  var args = [];
-  for (var i = 0, l = arguments.length; i < l; i++) {
-    args.push(arguments[i]);
-  }
-  this._dependentKeys = args;
-  return this;
-};
-
-/**
-  In some cases, you may want to annotate computed properties with additional
-  metadata about how they function or what values they operate on. For example,
-  computed property functions may close over variables that are then no longer
-  available for introspection.
-
-  You can pass a hash of these values to a computed property like this:
-
-      person: function() {
-        var personId = this.get('personId');
-        return App.Person.create({ id: personId });
-      }.property().meta({ type: App.Person })
-
-  The hash that you pass to the `meta()` function will be saved on the
-  computed property descriptor under the `_meta` key. Ember runtime
-  exposes a public API for retrieving these values from classes,
-  via the `metaForProperty()` function.
-
-  @memberOf Ember.ComputedProperty.prototype
-  @name meta
-  @function
-  @param {Hash} meta
-  @returns {Ember.ComputedProperty} property descriptor instance
-*/
-
-ComputedPropertyPrototype.meta = function(meta) {
-  if (arguments.length === 0) {
-    return this._meta || {};
-  } else {
-    this._meta = meta;
-    return this;
-  }
-};
-
-/** @private - impl descriptor API */
-ComputedPropertyPrototype.willWatch = function(obj, keyName) {
-  // watch already creates meta for this instance
-  var meta = obj[META_KEY];
-  Ember.assert('watch should have setup meta to be writable', meta.source === obj);
-  if (!(keyName in meta.cache)) {
-    addDependentKeys(this, obj, keyName, meta);
-  }
-};
-
-ComputedPropertyPrototype.didUnwatch = function(obj, keyName) {
-  var meta = obj[META_KEY];
-  Ember.assert('unwatch should have setup meta to be writable', meta.source === obj);
-  if (!(keyName in meta.cache)) {
-    // unwatch already creates meta for this instance
-    removeDependentKeys(this, obj, keyName, meta);
-  }
-};
-
-/** @private - impl descriptor API */
-ComputedPropertyPrototype.didChange = function(obj, keyName) {
-  // _suspended is set via a CP.set to ensure we don't clear
-  // the cached value set by the setter
-  if (this._cacheable && this._suspended !== obj) {
-    var meta = metaFor(obj);
-    if (keyName in meta.cache) {
-      delete meta.cache[keyName];
-      if (!meta.watching[keyName]) {
-        removeDependentKeys(this, obj, keyName, meta);
-      }
-    }
-  }
-};
-
-/** @private - impl descriptor API */
-ComputedPropertyPrototype.get = function(obj, keyName) {
-  var ret, cache, meta;
-  if (this._cacheable) {
-    meta = metaFor(obj);
-    cache = meta.cache;
-    if (keyName in cache) { return cache[keyName]; }
-    ret = cache[keyName] = this.func.call(obj, keyName);
-    if (!meta.watching[keyName]) {
-      addDependentKeys(this, obj, keyName, meta);
-    }
-  } else {
-    ret = this.func.call(obj, keyName);
-  }
-  return ret;
-};
-
-/** @private - impl descriptor API */
-ComputedPropertyPrototype.set = function(obj, keyName, value) {
-  var cacheable = this._cacheable,
-      meta = metaFor(obj, cacheable),
-      watched = meta.watching[keyName],
-      oldSuspended = this._suspended,
-      hadCachedValue,
-      ret;
-
-  this._suspended = obj;
-
-  if (watched) { Ember.propertyWillChange(obj, keyName); }
-  if (cacheable) {
-    if (keyName in meta.cache) {
-      delete meta.cache[keyName];
-      hadCachedValue = true;
-    }
-  }
-  ret = this.func.call(obj, keyName, value);
-  if (cacheable) {
-    if (!watched && !hadCachedValue) {
-      addDependentKeys(this, obj, keyName, meta);
-    }
-    meta.cache[keyName] = ret;
-  }
-  if (watched) { Ember.propertyDidChange(obj, keyName); }
-  this._suspended = oldSuspended;
-  return ret;
-};
-
-/** @private - called when property is defined */
-ComputedPropertyPrototype.setup = function(obj, keyName) {
-  var meta = obj[META_KEY];
-  if (meta && meta.watching[keyName]) {
-    addDependentKeys(this, obj, keyName, metaFor(obj));
-  }
-};
-
-/** @private - called before property is overridden */
-ComputedPropertyPrototype.teardown = function(obj, keyName) {
-  var meta = metaFor(obj);
-
-  if (meta.watching[keyName] || keyName in meta.cache) {
-    removeDependentKeys(this, obj, keyName, meta);
-  }
-
-  if (this._cacheable) { delete meta.cache[keyName]; }
-
-  return null; // no value to restore
-};
-
-/**
-  This helper returns a new property descriptor that wraps the passed
-  computed property function.  You can use this helper to define properties
-  with mixins or via Ember.defineProperty().
-
-  The function you pass will be used to both get and set property values.
-  The function should accept two parameters, key and value.  If value is not
-  undefined you should set the value first.  In either case return the
-  current value of the property.
-
-  @param {Function} func
-    The computed property function.
-
-  @returns {Ember.ComputedProperty} property descriptor instance
-*/
-Ember.computed = function(func) {
-  var args;
-
-  if (arguments.length > 1) {
-    args = a_slice.call(arguments, 0, -1);
-    func = a_slice.call(arguments, -1)[0];
-  }
-
-  var cp = new ComputedProperty(func);
-
-  if (args) {
-    cp.property.apply(cp, args);
-  }
-
-  return cp;
-};
-
-/**
-  Returns the cached value for a property, if one exists.
-  This can be useful for peeking at the value of a computed
-  property that is generated lazily, without accidentally causing
-  it to be created.
-
-  @param {Object} obj the object whose property you want to check
-  @param {String} key the name of the property whose cached value you want
-                      to return
-
-*/
-Ember.cacheFor = function cacheFor(obj, key) {
-  var cache = metaFor(obj, false).cache;
-
-  if (cache && key in cache) {
-    return cache[key];
-  }
-};
-
-Ember.computed.not = function(dependentKey) {
-  return Ember.computed(dependentKey, function(key) {
-    return !get(this, dependentKey);
-  }).cacheable();
-};
-
-Ember.computed.empty = function(dependentKey) {
-  return Ember.computed(dependentKey, function(key) {
-    var val = get(this, dependentKey);
-    return val === undefined || val === null || val === '' || (Ember.isArray(val) && get(val, 'length') === 0);
-  }).cacheable();
-};
-
-Ember.computed.bool = function(dependentKey) {
-  return Ember.computed(dependentKey, function(key) {
-    return !!get(this, dependentKey);
-  }).cacheable();
-};
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Metal
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var o_create = Ember.create,
-    meta = Ember.meta,
-    metaPath = Ember.metaPath,
-    guidFor = Ember.guidFor,
-    a_slice = [].slice;
-
-/**
-  The event system uses a series of nested hashes to store listeners on an
-  object. When a listener is registered, or when an event arrives, these
-  hashes are consulted to determine which target and action pair to invoke.
-
-  The hashes are stored in the object's meta hash, and look like this:
-
-      // Object's meta hash
-      {
-        listeners: {               // variable name: `listenerSet`
-          "foo:changed": {         // variable name: `targetSet`
-            [targetGuid]: {        // variable name: `actionSet`
-              [methodGuid]: {      // variable name: `action`
-                target: [Object object],
-                method: [Function function]
-              }
-            }
-          }
-        }
-      }
-
-*/
-
-// Gets the set of all actions, keyed on the guid of each action's
-// method property.
-/** @private */
-function actionSetFor(obj, eventName, target, writable) {
-  return metaPath(obj, ['listeners', eventName, guidFor(target)], writable);
-}
-
-// Gets the set of all targets, keyed on the guid of each action's
-// target property.
-/** @private */
-function targetSetFor(obj, eventName) {
-  var listenerSet = meta(obj, false).listeners;
-  if (!listenerSet) { return false; }
-
-  return listenerSet[eventName] || false;
-}
-
-// TODO: This knowledge should really be a part of the
-// meta system.
-var SKIP_PROPERTIES = { __ember_source__: true };
-
-/** @private */
-function iterateSet(obj, eventName, callback, params) {
-  var targetSet = targetSetFor(obj, eventName);
-  if (!targetSet) { return false; }
-  // Iterate through all elements of the target set
-  for(var targetGuid in targetSet) {
-    if (SKIP_PROPERTIES[targetGuid]) { continue; }
-
-    var actionSet = targetSet[targetGuid];
-    if (actionSet) {
-      // Iterate through the elements of the action set
-      for(var methodGuid in actionSet) {
-        if (SKIP_PROPERTIES[methodGuid]) { continue; }
-
-        var action = actionSet[methodGuid];
-        if (action) {
-          if (callback(action, params, obj) === true) {
-            return true;
-          }
-        }
-      }
-    }
-  }
-  return false;
-}
-
-/** @private */
-function invokeAction(action, params, sender) {
-  var method = action.method, target = action.target;
-  // If there is no target, the target is the object
-  // on which the event was fired.
-  if (!target) { target = sender; }
-  if ('string' === typeof method) { method = target[method]; }
-  if (params) {
-    method.apply(target, params);
-  } else {
-    method.apply(target);
-  }
-}
-
-/**
-  The sendEvent arguments > 2 are passed to an event listener.
-
-  @memberOf Ember
-*/
-function addListener(obj, eventName, target, method) {
-  Ember.assert("You must pass at least an object and event name to Ember.addListener", !!obj && !!eventName);
-
-  if (!method && 'function' === typeof target) {
-    method = target;
-    target = null;
-  }
-
-  var actionSet = actionSetFor(obj, eventName, target, true),
-      methodGuid = guidFor(method);
-
-  if (!actionSet[methodGuid]) {
-    actionSet[methodGuid] = { target: target, method: method };
-  }
-
-  if ('function' === typeof obj.didAddListener) {
-    obj.didAddListener(eventName, target, method);
-  }
-}
-
-/** @memberOf Ember */
-function removeListener(obj, eventName, target, method) {
-  Ember.assert("You must pass at least an object and event name to Ember.removeListener", !!obj && !!eventName);
-
-  if (!method && 'function' === typeof target) {
-    method = target;
-    target = null;
-  }
-
-  var actionSet = actionSetFor(obj, eventName, target, true),
-      methodGuid = guidFor(method);
-
-  // we can't simply delete this parameter, because if we do, we might
-  // re-expose the property from the prototype chain.
-  if (actionSet && actionSet[methodGuid]) { actionSet[methodGuid] = null; }
-
-  if ('function' === typeof obj.didRemoveListener) {
-    obj.didRemoveListener(eventName, target, method);
-  }
-}
-
-// Suspend listener during callback.
-//
-// This should only be used by the target of the event listener
-// when it is taking an action that would cause the event, e.g.
-// an object might suspend its property change listener while it is
-// setting that property.
-/** @private */
-function suspendListener(obj, eventName, target, method, callback) {
-  if (!method && 'function' === typeof target) {
-    method = target;
-    target = null;
-  }
-
-  var actionSet = actionSetFor(obj, eventName, target, true),
-      methodGuid = guidFor(method),
-      action = actionSet && actionSet[methodGuid];
-
-  actionSet[methodGuid] = null;
-  try {
-    return callback.call(target);
-  } finally {
-    actionSet[methodGuid] = action;
-  }
-}
-
-// returns a list of currently watched events
-/** @memberOf Ember */
-function watchedEvents(obj) {
-  var listeners = meta(obj, false).listeners, ret = [];
-
-  if (listeners) {
-    for(var eventName in listeners) {
-      if (!SKIP_PROPERTIES[eventName] && listeners[eventName]) {
-        ret.push(eventName);
-      }
-    }
-  }
-  return ret;
-}
-
-/** @memberOf Ember */
-function sendEvent(obj, eventName, params) {
-  // first give object a chance to handle it
-  if (obj !== Ember && 'function' === typeof obj.sendEvent) {
-    obj.sendEvent(eventName, params);
-  }
-
-  iterateSet(obj, eventName, invokeAction, params);
-  return true;
-}
-
-/** @memberOf Ember */
-function deferEvent(obj, eventName, params) {
-  var actions = [];
-  iterateSet(obj, eventName, function (action) {
-    actions.push(action);
-  });
-
-  return function() {
-    if (obj.isDestroyed) { return; }
-
-    if (obj !== Ember && 'function' === typeof obj.sendEvent) {
-      obj.sendEvent(eventName, params);
-    }
-
-    for (var i=0, len=actions.length; i < len; ++i) {
-      invokeAction(actions[i], params, obj);
-    }
-  };
-}
-
-/** @memberOf Ember */
-function hasListeners(obj, eventName) {
-  if (iterateSet(obj, eventName, function() { return true; })) {
-    return true;
-  }
-
-  // no listeners!  might as well clean this up so it is faster later.
-  var set = metaPath(obj, ['listeners'], true);
-  set[eventName] = null;
-
-  return false;
-}
-
-/** @memberOf Ember */
-function listenersFor(obj, eventName) {
-  var ret = [];
-  iterateSet(obj, eventName, function (action) {
-    ret.push([action.target, action.method]);
-  });
-  return ret;
-}
-
-Ember.addListener = addListener;
-Ember.removeListener = removeListener;
-Ember._suspendListener = suspendListener;
-Ember.sendEvent = sendEvent;
-Ember.hasListeners = hasListeners;
-Ember.watchedEvents = watchedEvents;
-Ember.listenersFor = listenersFor;
-Ember.deferEvent = deferEvent;
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2010 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-// Ember.Logger
-// Ember.watch.flushPending
-// Ember.beginPropertyChanges, Ember.endPropertyChanges
-// Ember.guidFor
-
-// ..........................................................
-// HELPERS
-//
-
-var slice = [].slice,
-    forEach = Ember.ArrayPolyfills.forEach;
-
-// invokes passed params - normalizing so you can pass target/func,
-// target/string or just func
-/** @private */
-function invoke(target, method, args, ignore) {
-
-  if (method === undefined) {
-    method = target;
-    target = undefined;
-  }
-
-  if ('string' === typeof method) { method = target[method]; }
-  if (args && ignore > 0) {
-    args = args.length > ignore ? slice.call(args, ignore) : null;
-  }
-
-  // Unfortunately in some browsers we lose the backtrace if we rethrow the existing error,
-  // so in the event that we don't have an `onerror` handler we don't wrap in a try/catch
-  if ('function' === typeof Ember.onerror) {
-    try {
-      // IE8's Function.prototype.apply doesn't accept undefined/null arguments.
-      return method.apply(target || this, args || []);
-    } catch (error) {
-      Ember.onerror(error);
-    }
-  } else {
-    // IE8's Function.prototype.apply doesn't accept undefined/null arguments.
-    return method.apply(target || this, args || []);
-  }
-}
-
-
-// ..........................................................
-// RUNLOOP
-//
-
-var timerMark; // used by timers...
-
-/** @private */
-var RunLoop = function(prev) {
-  this._prev = prev || null;
-  this.onceTimers = {};
-};
-
-RunLoop.prototype = {
-  end: function() {
-    this.flush();
-  },
-
-  prev: function() {
-    return this._prev;
-  },
-
-  // ..........................................................
-  // Delayed Actions
-  //
-
-  schedule: function(queueName, target, method) {
-    var queues = this._queues, queue;
-    if (!queues) { queues = this._queues = {}; }
-    queue = queues[queueName];
-    if (!queue) { queue = queues[queueName] = []; }
-
-    var args = arguments.length > 3 ? slice.call(arguments, 3) : null;
-    queue.push({ target: target, method: method, args: args });
-    return this;
-  },
-
-  flush: function(queueName) {
-    var queueNames, idx, len, queue, log;
-
-    if (!this._queues) { return this; } // nothing to do
-
-    function iter(item) {
-      invoke(item.target, item.method, item.args);
-    }
-
-    Ember.watch.flushPending(); // make sure all chained watchers are setup
-
-    if (queueName) {
-      while (this._queues && (queue = this._queues[queueName])) {
-        this._queues[queueName] = null;
-
-        // the sync phase is to allow property changes to propagate.  don't
-        // invoke observers until that is finished.
-        if (queueName === 'sync') {
-          log = Ember.LOG_BINDINGS;
-          if (log) { Ember.Logger.log('Begin: Flush Sync Queue'); }
-
-          Ember.beginPropertyChanges();
-          try {
-            forEach.call(queue, iter);
-          } finally {
-            Ember.endPropertyChanges();
-          }
-
-          if (log) { Ember.Logger.log('End: Flush Sync Queue'); }
-
-        } else {
-          forEach.call(queue, iter);
-        }
-      }
-
-    } else {
-      queueNames = Ember.run.queues;
-      len = queueNames.length;
-      idx = 0;
-
-      outerloop:
-      while (idx < len) {
-        queueName = queueNames[idx];
-        queue = this._queues && this._queues[queueName];
-        delete this._queues[queueName];
-
-        if (queue) {
-          // the sync phase is to allow property changes to propagate.  don't
-          // invoke observers until that is finished.
-          if (queueName === 'sync') {
-            log = Ember.LOG_BINDINGS;
-            if (log) { Ember.Logger.log('Begin: Flush Sync Queue'); }
-
-            Ember.beginPropertyChanges();
-            try {
-              forEach.call(queue, iter);
-            } finally {
-              Ember.endPropertyChanges();
-            }
-
-            if (log) { Ember.Logger.log('End: Flush Sync Queue'); }
-          } else {
-            forEach.call(queue, iter);
-          }
-        }
-
-        // Loop through prior queues
-        for (var i = 0; i <= idx; i++) {
-          if (this._queues && this._queues[queueNames[i]]) {
-            // Start over at the first queue with contents
-            idx = i;
-            continue outerloop;
-          }
-        }
-
-        idx++;
-      }
-    }
-
-    timerMark = null;
-
-    return this;
-  }
-
-};
-
-Ember.RunLoop = RunLoop;
-
-// ..........................................................
-// Ember.run - this is ideally the only public API the dev sees
-//
-/**
-* @namespace Ember.run is both a function and a namespace for
-* RunLoop-related functions.
-* @name Ember.run
-*/
-
-/**
-  Runs the passed target and method inside of a RunLoop, ensuring any
-  deferred actions including bindings and views updates are flushed at the
-  end.
-
-  Normally you should not need to invoke this method yourself.  However if
-  you are implementing raw event handlers when interfacing with other
-  libraries or plugins, you should probably wrap all of your code inside this
-  call.
-
-      Ember.run(function(){
-        // code to be execute within a RunLoop 
-      });
-
-  @name run
-  @methodOf Ember.run
-  @param {Object} target
-    (Optional) target of method to call
-
-  @param {Function|String} method
-    Method to invoke.  May be a function or a string.  If you pass a string
-    then it will be looked up on the passed target.
-
-  @param {Object...} args
-    Any additional arguments you wish to pass to the method.
-
-  @returns {Object} return value from invoking the passed function.
-*/
-Ember.run = function(target, method) {
-  var ret, loop;
-  run.begin();
-  try {
-    if (target || method) { ret = invoke(target, method, arguments, 2); }
-  } finally {
-    run.end();
-  }
-  return ret;
-};
-
-/** @private */
-var run = Ember.run;
-
-
-/**
-  Begins a new RunLoop.  Any deferred actions invoked after the begin will
-  be buffered until you invoke a matching call to Ember.run.end().  This is
-  an lower-level way to use a RunLoop instead of using Ember.run().
-
-      Ember.run.begin();
-      // code to be execute within a RunLoop 
-      Ember.run.end();
-
-
-  @returns {void}
-*/
-Ember.run.begin = function() {
-  run.currentRunLoop = new RunLoop(run.currentRunLoop);
-};
-
-/**
-  Ends a RunLoop.  This must be called sometime after you call Ember.run.begin()
-  to flush any deferred actions.  This is a lower-level way to use a RunLoop
-  instead of using Ember.run().
-
-      Ember.run.begin();
-      // code to be execute within a RunLoop 
-      Ember.run.end();
-
-  @returns {void}
-*/
-Ember.run.end = function() {
-  Ember.assert('must have a current run loop', run.currentRunLoop);
-  try {
-    run.currentRunLoop.end();
-  }
-  finally {
-    run.currentRunLoop = run.currentRunLoop.prev();
-  }
-};
-
-/**
-  Array of named queues.  This array determines the order in which queues
-  are flushed at the end of the RunLoop.  You can define your own queues by
-  simply adding the queue name to this array.  Normally you should not need
-  to inspect or modify this property.
-
-  @type Array
-  @default ['sync', 'actions', 'destroy', 'timers']
-*/
-Ember.run.queues = ['sync', 'actions', 'destroy', 'timers'];
-
-/**
-  Adds the passed target/method and any optional arguments to the named
-  queue to be executed at the end of the RunLoop.  If you have not already
-  started a RunLoop when calling this method one will be started for you
-  automatically.
-
-  At the end of a RunLoop, any methods scheduled in this way will be invoked.
-  Methods will be invoked in an order matching the named queues defined in
-  the run.queues property.
-
-      Ember.run.schedule('timers', this, function(){
-        // this will be executed at the end of the RunLoop, when timers are run
-        console.log("scheduled on timers queue");
-      });
-      Ember.run.schedule('sync', this, function(){
-        // this will be executed at the end of the RunLoop, when bindings are synced
-        console.log("scheduled on sync queue");
-      });
-      // Note the functions will be run in order based on the run queues order. Output would be:
-      //   scheduled on sync queue
-      //   scheduled on timers queue
-
-  @param {String} queue
-    The name of the queue to schedule against.  Default queues are 'sync' and
-    'actions'
-
-  @param {Object} target
-    (Optional) target object to use as the context when invoking a method.
-
-  @param {String|Function} method
-    The method to invoke.  If you pass a string it will be resolved on the
-    target object at the time the scheduled item is invoked allowing you to
-    change the target function.
-
-  @param {Object} arguments...
-    Optional arguments to be passed to the queued method.
-
-  @returns {void}
-*/
-Ember.run.schedule = function(queue, target, method) {
-  var loop = run.autorun();
-  loop.schedule.apply(loop, arguments);
-};
-
-var scheduledAutorun;
-/** @private */
-function autorun() {
-  scheduledAutorun = null;
-  if (run.currentRunLoop) { run.end(); }
-}
-
-// Used by global test teardown
-/** @private */
-Ember.run.hasScheduledTimers = function() {
-  return !!(scheduledAutorun || scheduledLater || scheduledNext);
-};
-
-// Used by global test teardown
-/** @private */
-Ember.run.cancelTimers = function () {
-  if (scheduledAutorun) {
-    clearTimeout(scheduledAutorun);
-    scheduledAutorun = null;
-  }
-  if (scheduledLater) {
-    clearTimeout(scheduledLater);
-    scheduledLater = null;
-  }
-  if (scheduledNext) {
-    clearTimeout(scheduledNext);
-    scheduledNext = null;
-  }
-  timers = {};
-};
-
-/**
-  Begins a new RunLoop if necessary and schedules a timer to flush the
-  RunLoop at a later time.  This method is used by parts of Ember to
-  ensure the RunLoop always finishes.  You normally do not need to call this
-  method directly.  Instead use Ember.run().
-
-      Ember.run.autorun();
-
-  @returns {Ember.RunLoop} the new current RunLoop
-*/
-Ember.run.autorun = function() {
-  if (!run.currentRunLoop) {
-    Ember.assert("You have turned on testing mode, which disabled the run-loop's autorun. You will need to wrap any code with asynchronous side-effects in an Ember.run", !Ember.testing);
-
-    run.begin();
-
-    if (!scheduledAutorun) {
-      scheduledAutorun = setTimeout(autorun, 1);
-    }
-  }
-
-  return run.currentRunLoop;
-};
-
-/**
-  Immediately flushes any events scheduled in the 'sync' queue.  Bindings
-  use this queue so this method is a useful way to immediately force all
-  bindings in the application to sync.
-
-  You should call this method anytime you need any changed state to propagate
-  throughout the app immediately without repainting the UI.
-
-      Ember.run.sync();
-
-  @returns {void}
-*/
-Ember.run.sync = function() {
-  run.autorun();
-  run.currentRunLoop.flush('sync');
-};
-
-// ..........................................................
-// TIMERS
-//
-
-var timers = {}; // active timers...
-
-var scheduledLater;
-/** @private */
-function invokeLaterTimers() {
-  scheduledLater = null;
-  var now = (+ new Date()), earliest = -1;
-  for (var key in timers) {
-    if (!timers.hasOwnProperty(key)) { continue; }
-    var timer = timers[key];
-    if (timer && timer.expires) {
-      if (now >= timer.expires) {
-        delete timers[key];
-        invoke(timer.target, timer.method, timer.args, 2);
-      } else {
-        if (earliest<0 || (timer.expires < earliest)) earliest=timer.expires;
-      }
-    }
-  }
-
-  // schedule next timeout to fire...
-  if (earliest > 0) { scheduledLater = setTimeout(invokeLaterTimers, earliest-(+ new Date())); }
-}
-
-/**
-  Invokes the passed target/method and optional arguments after a specified
-  period if time.  The last parameter of this method must always be a number
-  of milliseconds.
-
-  You should use this method whenever you need to run some action after a
-  period of time instead of using setTimeout().  This method will ensure that
-  items that expire during the same script execution cycle all execute
-  together, which is often more efficient than using a real setTimeout.
-
-      Ember.run.later(myContext, function(){
-        // code here will execute within a RunLoop in about 500ms with this == myContext
-      }, 500);
-
-  @param {Object} target
-    (optional) target of method to invoke
-
-  @param {Function|String} method
-    The method to invoke.  If you pass a string it will be resolved on the
-    target at the time the method is invoked.
-
-  @param {Object...} args
-    Optional arguments to pass to the timeout.
-
-  @param {Number} wait
-    Number of milliseconds to wait.
-
-  @returns {String} a string you can use to cancel the timer in Ember.run.cancel() later.
-*/
-Ember.run.later = function(target, method) {
-  var args, expires, timer, guid, wait;
-
-  // setTimeout compatibility...
-  if (arguments.length===2 && 'function' === typeof target) {
-    wait   = method;
-    method = target;
-    target = undefined;
-    args   = [target, method];
-  } else {
-    args = slice.call(arguments);
-    wait = args.pop();
-  }
-
-  expires = (+ new Date()) + wait;
-  timer   = { target: target, method: method, expires: expires, args: args };
-  guid    = Ember.guidFor(timer);
-  timers[guid] = timer;
-  run.once(timers, invokeLaterTimers);
-  return guid;
-};
-
-/** @private */
-function invokeOnceTimer(guid, onceTimers) {
-  if (onceTimers[this.tguid]) { delete onceTimers[this.tguid][this.mguid]; }
-  if (timers[guid]) { invoke(this.target, this.method, this.args, 2); }
-  delete timers[guid];
-}
-
-/**
-  Schedules an item to run one time during the current RunLoop.  Calling
-  this method with the same target/method combination will have no effect.
-
-  Note that although you can pass optional arguments these will not be
-  considered when looking for duplicates.  New arguments will replace previous
-  calls.
-
-      Ember.run(function(){
-        var doFoo = function() { foo(); }
-        Ember.run.once(myContext, doFoo);
-        Ember.run.once(myContext, doFoo);
-        // doFoo will only be executed once at the end of the RunLoop
-      });
-
-  @param {Object} target
-    (optional) target of method to invoke
-
-  @param {Function|String} method
-    The method to invoke.  If you pass a string it will be resolved on the
-    target at the time the method is invoked.
-
-  @param {Object...} args
-    Optional arguments to pass to the timeout.
-
-
-  @returns {Object} timer
-*/
-Ember.run.once = function(target, method) {
-  var tguid = Ember.guidFor(target),
-      mguid = Ember.guidFor(method),
-      onceTimers = run.autorun().onceTimers,
-      guid = onceTimers[tguid] && onceTimers[tguid][mguid],
-      timer;
-
-  if (guid && timers[guid]) {
-    timers[guid].args = slice.call(arguments); // replace args
-  } else {
-    timer = {
-      target: target,
-      method: method,
-      args:   slice.call(arguments),
-      tguid:  tguid,
-      mguid:  mguid
-    };
-
-    guid  = Ember.guidFor(timer);
-    timers[guid] = timer;
-    if (!onceTimers[tguid]) { onceTimers[tguid] = {}; }
-    onceTimers[tguid][mguid] = guid; // so it isn't scheduled more than once
-
-    run.schedule('actions', timer, invokeOnceTimer, guid, onceTimers);
-  }
-
-  return guid;
-};
-
-var scheduledNext;
-/** @private */
-function invokeNextTimers() {
-  scheduledNext = null;
-  for(var key in timers) {
-    if (!timers.hasOwnProperty(key)) { continue; }
-    var timer = timers[key];
-    if (timer.next) {
-      delete timers[key];
-      invoke(timer.target, timer.method, timer.args, 2);
-    }
-  }
-}
-
-/**
-  Schedules an item to run after control has been returned to the system.
-  This is often equivalent to calling setTimeout(function...,1).
-
-      Ember.run.next(myContext, function(){
-        // code to be executed in the next RunLoop, which will be scheduled after the current one
-      });
-
-  @param {Object} target
-    (optional) target of method to invoke
-
-  @param {Function|String} method
-    The method to invoke.  If you pass a string it will be resolved on the
-    target at the time the method is invoked.
-
-  @param {Object...} args
-    Optional arguments to pass to the timeout.
-
-  @returns {Object} timer
-*/
-Ember.run.next = function(target, method) {
-  var guid,
-      timer = {
-        target: target,
-        method: method,
-        args: slice.call(arguments),
-        next: true
-      };
-
-  guid = Ember.guidFor(timer);
-  timers[guid] = timer;
-
-  if (!scheduledNext) { scheduledNext = setTimeout(invokeNextTimers, 1); }
-  return guid;
-};
-
-/**
-  Cancels a scheduled item.  Must be a value returned by `Ember.run.later()`,
-  `Ember.run.once()`, or `Ember.run.next()`.
-
-      var runNext = Ember.run.next(myContext, function(){
-        // will not be executed
-      });
-      Ember.run.cancel(runNext);
-
-      var runLater = Ember.run.later(myContext, function(){
-        // will not be executed
-      }, 500);
-      Ember.run.cancel(runLater);
-
-      var runOnce = Ember.run.once(myContext, function(){
-        // will not be executed
-      });
-      Ember.run.cancel(runOnce);
-
-  @param {Object} timer
-    Timer object to cancel
-
-  @returns {void}
-*/
-Ember.run.cancel = function(timer) {
-  delete timers[timer];
-};
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-// Ember.Logger
-// get, set, trySet
-// guidFor, isArray, meta
-// addObserver, removeObserver
-// Ember.run.schedule
-// ..........................................................
-// CONSTANTS
-//
-
-/**
-  @static
-
-  Debug parameter you can turn on. This will log all bindings that fire to
-  the console. This should be disabled in production code. Note that you
-  can also enable this from the console or temporarily.
-
-  @type Boolean
-  @default false
-*/
-Ember.LOG_BINDINGS = false || !!Ember.ENV.LOG_BINDINGS;
-
-var get     = Ember.get,
-    set     = Ember.set,
-    guidFor = Ember.guidFor,
-    isGlobalPath = Ember.isGlobalPath;
-
-
-/** @private */
-function getWithGlobals(obj, path) {
-  return get(isGlobalPath(path) ? window : obj, path);
-}
-
-// ..........................................................
-// BINDING
-//
-
-/** @private */
-var Binding = function(toPath, fromPath) {
-  this._direction = 'fwd';
-  this._from = fromPath;
-  this._to   = toPath;
-  this._directionMap = Ember.Map.create();
-};
-
-Binding.prototype = /** @scope Ember.Binding.prototype */ {
-  /**
-    This copies the Binding so it can be connected to another object.
-    @returns {Ember.Binding}
-  */
-  copy: function () {
-    var copy = new Binding(this._to, this._from);
-    if (this._oneWay) { copy._oneWay = true; }
-    return copy;
-  },
-
-  // ..........................................................
-  // CONFIG
-  //
-
-  /**
-    This will set "from" property path to the specified value. It will not
-    attempt to resolve this property path to an actual object until you
-    connect the binding.
-
-    The binding will search for the property path starting at the root object
-    you pass when you connect() the binding.  It follows the same rules as
-    `get()` - see that method for more information.
-
-    @param {String} propertyPath the property path to connect to
-    @returns {Ember.Binding} receiver
-  */
-  from: function(path) {
-    this._from = path;
-    return this;
-  },
-
-  /**
-    This will set the "to" property path to the specified value. It will not
-    attempt to resolve this property path to an actual object until you
-    connect the binding.
-
-    The binding will search for the property path starting at the root object
-    you pass when you connect() the binding.  It follows the same rules as
-    `get()` - see that method for more information.
-
-    @param {String|Tuple} propertyPath A property path or tuple
-    @returns {Ember.Binding} this
-  */
-  to: function(path) {
-    this._to = path;
-    return this;
-  },
-
-  /**
-    Configures the binding as one way. A one-way binding will relay changes
-    on the "from" side to the "to" side, but not the other way around. This
-    means that if you change the "to" side directly, the "from" side may have
-    a different value.
-
-    @returns {Ember.Binding} receiver
-  */
-  oneWay: function() {
-    this._oneWay = true;
-    return this;
-  },
-
-  /** @private */
-  toString: function() {
-    var oneWay = this._oneWay ? '[oneWay]' : '';
-    return "Ember.Binding<" + guidFor(this) + ">(" + this._from + " -> " + this._to + ")" + oneWay;
-  },
-
-  // ..........................................................
-  // CONNECT AND SYNC
-  //
-
-  /**
-    Attempts to connect this binding instance so that it can receive and relay
-    changes. This method will raise an exception if you have not set the
-    from/to properties yet.
-
-    @param {Object} obj The root object for this binding.
-    @returns {Ember.Binding} this
-  */
-  connect: function(obj) {
-    Ember.assert('Must pass a valid object to Ember.Binding.connect()', !!obj);
-
-    var fromPath = this._from, toPath = this._to;
-    Ember.trySet(obj, toPath, getWithGlobals(obj, fromPath));
-
-    // add an observer on the object to be notified when the binding should be updated
-    Ember.addObserver(obj, fromPath, this, this.fromDidChange);
-
-    // if the binding is a two-way binding, also set up an observer on the target
-    if (!this._oneWay) { Ember.addObserver(obj, toPath, this, this.toDidChange); }
-
-    this._readyToSync = true;
-
-    return this;
-  },
-
-  /**
-    Disconnects the binding instance. Changes will no longer be relayed. You
-    will not usually need to call this method.
-
-    @param {Object} obj
-      The root object you passed when connecting the binding.
-
-    @returns {Ember.Binding} this
-  */
-  disconnect: function(obj) {
-    Ember.assert('Must pass a valid object to Ember.Binding.disconnect()', !!obj);
-
-    var twoWay = !this._oneWay;
-
-    // remove an observer on the object so we're no longer notified of
-    // changes that should update bindings.
-    Ember.removeObserver(obj, this._from, this, this.fromDidChange);
-
-    // if the binding is two-way, remove the observer from the target as well
-    if (twoWay) { Ember.removeObserver(obj, this._to, this, this.toDidChange); }
-
-    this._readyToSync = false; // disable scheduled syncs...
-    return this;
-  },
-
-  // ..........................................................
-  // PRIVATE
-  //
-
-  /** @private - called when the from side changes */
-  fromDidChange: function(target) {
-    this._scheduleSync(target, 'fwd');
-  },
-
-  /** @private - called when the to side changes */
-  toDidChange: function(target) {
-    this._scheduleSync(target, 'back');
-  },
-
-  /** @private */
-  _scheduleSync: function(obj, dir) {
-    var directionMap = this._directionMap;
-    var existingDir = directionMap.get(obj);
-
-    // if we haven't scheduled the binding yet, schedule it
-    if (!existingDir) {
-      Ember.run.schedule('sync', this, this._sync, obj);
-      directionMap.set(obj, dir);
-    }
-
-    // If both a 'back' and 'fwd' sync have been scheduled on the same object,
-    // default to a 'fwd' sync so that it remains deterministic.
-    if (existingDir === 'back' && dir === 'fwd') {
-      directionMap.set(obj, 'fwd');
-    }
-  },
-
-  /** @private */
-  _sync: function(obj) {
-    var log = Ember.LOG_BINDINGS;
-
-    // don't synchronize destroyed objects or disconnected bindings
-    if (obj.isDestroyed || !this._readyToSync) { return; }
-
-    // get the direction of the binding for the object we are
-    // synchronizing from
-    var directionMap = this._directionMap;
-    var direction = directionMap.get(obj);
-
-    var fromPath = this._from, toPath = this._to;
-
-    directionMap.remove(obj);
-
-    // if we're synchronizing from the remote object...
-    if (direction === 'fwd') {
-      var fromValue = getWithGlobals(obj, this._from);
-      if (log) {
-        Ember.Logger.log(' ', this.toString(), '->', fromValue, obj);
-      }
-      if (this._oneWay) {
-        Ember.trySet(obj, toPath, fromValue);
-      } else {
-        Ember._suspendObserver(obj, toPath, this, this.toDidChange, function () {
-          Ember.trySet(obj, toPath, fromValue);
-        });
-      }
-    // if we're synchronizing *to* the remote object
-    } else if (direction === 'back') {
-      var toValue = get(obj, this._to);
-      if (log) {
-        Ember.Logger.log(' ', this.toString(), '<-', toValue, obj);
-      }
-      Ember._suspendObserver(obj, fromPath, this, this.fromDidChange, function () {
-        Ember.trySet(Ember.isGlobalPath(fromPath) ? window : obj, fromPath, toValue);
-      });
-    }
-  }
-
-};
-
-/** @private */
-function mixinProperties(to, from) {
-  for (var key in from) {
-    if (from.hasOwnProperty(key)) {
-      to[key] = from[key];
-    }
-  }
-}
-
-mixinProperties(Binding,
-/** @scope Ember.Binding */ {
-
-  /**
-    @see Ember.Binding.prototype.from
-  */
-  from: function() {
-    var C = this, binding = new C();
-    return binding.from.apply(binding, arguments);
-  },
-
-  /**
-    @see Ember.Binding.prototype.to
-  */
-  to: function() {
-    var C = this, binding = new C();
-    return binding.to.apply(binding, arguments);
-  },
-
-  /**
-    Creates a new Binding instance and makes it apply in a single direction.
-    A one-way binding will relay changes on the "from" side object (supplies
-    as the `from` argument) the "to" side, but not the other way around.
-    This means that if you change the "to" side directly, the "from" side may have
-    a different value.
-
-    @param {String} from from path.
-    @param {Boolean} [flag] (Optional) passing nothing here will make the binding oneWay.  You can
-    instead pass false to disable oneWay, making the binding two way again.
-
-    @see Ember.Binding.prototype.oneWay
-  */
-  oneWay: function(from, flag) {
-    var C = this, binding = new C(null, from);
-    return binding.oneWay(flag);
-  }
-
-});
-
-/**
-  @class
-
-  An Ember.Binding connects the properties of two objects so that whenever the
-  value of one property changes, the other property will be changed also.
-
-  ## Automatic Creation of Bindings with `/^*Binding/`-named Properties
-  You do not usually create Binding objects directly but instead describe
-  bindings in your class or object definition using automatic binding detection.
-
-  Properties ending in a `Binding` suffix will be converted to Ember.Binding instances.
-  The value of this property should be a string representing a path to another object or
-  a custom binding instanced created using Binding helpers (see "Customizing Your Bindings"):
-
-      valueBinding: "MyApp.someController.title"
-
-  This will create a binding from `MyApp.someController.title` to the `value`
-  property of your object instance automatically. Now the two values will be
-  kept in sync.
-
-  ## One Way Bindings
-
-  One especially useful binding customization you can use is the `oneWay()`
-  helper. This helper tells Ember that you are only interested in
-  receiving changes on the object you are binding from. For example, if you
-  are binding to a preference and you want to be notified if the preference
-  has changed, but your object will not be changing the preference itself, you
-  could do:
-
-      bigTitlesBinding: Ember.Binding.oneWay("MyApp.preferencesController.bigTitles")
-
-  This way if the value of MyApp.preferencesController.bigTitles changes the
-  "bigTitles" property of your object will change also. However, if you
-  change the value of your "bigTitles" property, it will not update the
-  preferencesController.
-
-  One way bindings are almost twice as fast to setup and twice as fast to
-  execute because the binding only has to worry about changes to one side.
-
-  You should consider using one way bindings anytime you have an object that
-  may be created frequently and you do not intend to change a property; only
-  to monitor it for changes. (such as in the example above).
-
-  ## Adding Bindings Manually
-
-  All of the examples above show you how to configure a custom binding, but
-  the result of these customizations will be a binding template, not a fully
-  active Binding instance. The binding will actually become active only when you
-  instantiate the object the binding belongs to. It is useful however, to
-  understand what actually happens when the binding is activated.
-
-  For a binding to function it must have at least a "from" property and a "to"
-  property. The from property path points to the object/key that you want to
-  bind from while the to path points to the object/key you want to bind to.
-
-  When you define a custom binding, you are usually describing the property
-  you want to bind from (such as "MyApp.someController.value" in the examples
-  above). When your object is created, it will automatically assign the value
-  you want to bind "to" based on the name of your binding key. In the
-  examples above, during init, Ember objects will effectively call
-  something like this on your binding:
-
-      binding = Ember.Binding.from(this.valueBinding).to("value");
-
-  This creates a new binding instance based on the template you provide, and
-  sets the to path to the "value" property of the new object. Now that the
-  binding is fully configured with a "from" and a "to", it simply needs to be
-  connected to become active. This is done through the connect() method:
-
-      binding.connect(this);
-
-  Note that when you connect a binding you pass the object you want it to be
-  connected to.  This object will be used as the root for both the from and
-  to side of the binding when inspecting relative paths.  This allows the
-  binding to be automatically inherited by subclassed objects as well.
-
-  Now that the binding is connected, it will observe both the from and to side
-  and relay changes.
-
-  If you ever needed to do so (you almost never will, but it is useful to
-  understand this anyway), you could manually create an active binding by
-  using the Ember.bind() helper method. (This is the same method used by
-  to setup your bindings on objects):
-
-      Ember.bind(MyApp.anotherObject, "value", "MyApp.someController.value");
-
-  Both of these code fragments have the same effect as doing the most friendly
-  form of binding creation like so:
-
-      MyApp.anotherObject = Ember.Object.create({
-        valueBinding: "MyApp.someController.value",
-
-        // OTHER CODE FOR THIS OBJECT...
-
-      });
-
-  Ember's built in binding creation method makes it easy to automatically
-  create bindings for you. You should always use the highest-level APIs
-  available, even if you understand how it works underneath.
-
-  @since Ember 0.9
-*/
-Ember.Binding = Binding;
-
-/**
-  Global helper method to create a new binding.  Just pass the root object
-  along with a to and from path to create and connect the binding.
-
-  @param {Object} obj
-    The root object of the transform.
-
-  @param {String} to
-    The path to the 'to' side of the binding.  Must be relative to obj.
-
-  @param {String} from
-    The path to the 'from' side of the binding.  Must be relative to obj or
-    a global path.
-
-  @returns {Ember.Binding} binding instance
-*/
-Ember.bind = function(obj, to, from) {
-  return new Ember.Binding(to, from).connect(obj);
-};
-
-Ember.oneWay = function(obj, to, from) {
-  return new Ember.Binding(to, from).oneWay().connect(obj);
-};
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var Mixin, REQUIRED, Alias,
-    classToString, superClassString,
-    a_map = Ember.ArrayPolyfills.map,
-    a_indexOf = Ember.ArrayPolyfills.indexOf,
-    a_forEach = Ember.ArrayPolyfills.forEach,
-    a_slice = [].slice,
-    EMPTY_META = {}, // dummy for non-writable meta
-    META_SKIP = { __emberproto__: true, __ember_count__: true },
-    o_create = Ember.create,
-    defineProperty = Ember.defineProperty,
-    guidFor = Ember.guidFor;
-
-/** @private */
-function mixinsMeta(obj) {
-  var m = Ember.meta(obj, true), ret = m.mixins;
-  if (!ret) {
-    ret = m.mixins = { __emberproto__: obj };
-  } else if (ret.__emberproto__ !== obj) {
-    ret = m.mixins = o_create(ret);
-    ret.__emberproto__ = obj;
-  }
-  return ret;
-}
-
-/** @private */
-function initMixin(mixin, args) {
-  if (args && args.length > 0) {
-    mixin.mixins = a_map.call(args, function(x) {
-      if (x instanceof Mixin) { return x; }
-
-      // Note: Manually setup a primitive mixin here.  This is the only
-      // way to actually get a primitive mixin.  This way normal creation
-      // of mixins will give you combined mixins...
-      var mixin = new Mixin();
-      mixin.properties = x;
-      return mixin;
-    });
-  }
-  return mixin;
-}
-
-/** @private */
-function isMethod(obj) {
-  return 'function' === typeof obj &&
-         obj.isMethod !== false &&
-         obj !== Boolean && obj !== Object && obj !== Number && obj !== Array && obj !== Date && obj !== String;
-}
-
-/** @private */
-function mergeMixins(mixins, m, descs, values, base) {
-  var len = mixins.length, idx, mixin, guid, props, value, key, ovalue, concats;
-
-  /** @private */
-  function removeKeys(keyName) {
-    delete descs[keyName];
-    delete values[keyName];
-  }
-
-  for(idx=0; idx < len; idx++) {
-    mixin = mixins[idx];
-    Ember.assert('Expected hash or Mixin instance, got ' + Object.prototype.toString.call(mixin), typeof mixin === 'object' && mixin !== null && Object.prototype.toString.call(mixin) !== '[object Array]');
-
-    if (mixin instanceof Mixin) {
-      guid = guidFor(mixin);
-      if (m[guid]) { continue; }
-      m[guid] = mixin;
-      props = mixin.properties;
-    } else {
-      props = mixin; // apply anonymous mixin properties
-    }
-
-    if (props) {
-      // reset before adding each new mixin to pickup concats from previous
-      concats = values.concatenatedProperties || base.concatenatedProperties;
-      if (props.concatenatedProperties) {
-        concats = concats ? concats.concat(props.concatenatedProperties) : props.concatenatedProperties;
-      }
-
-      for (key in props) {
-        if (!props.hasOwnProperty(key)) { continue; }
-        value = props[key];
-        if (value instanceof Ember.Descriptor) {
-          if (value === REQUIRED && descs[key]) { continue; }
-
-          descs[key]  = value;
-          values[key] = undefined;
-        } else {
-          // impl super if needed...
-          if (isMethod(value)) {
-            ovalue = descs[key] === undefined && values[key];
-            if (!ovalue) { ovalue = base[key]; }
-            if ('function' !== typeof ovalue) { ovalue = null; }
-            if (ovalue) {
-              var o = value.__ember_observes__, ob = value.__ember_observesBefore__;
-              value = Ember.wrap(value, ovalue);
-              value.__ember_observes__ = o;
-              value.__ember_observesBefore__ = ob;
-            }
-          } else if ((concats && a_indexOf.call(concats, key) >= 0) || key === 'concatenatedProperties') {
-            var baseValue = values[key] || base[key];
-            value = baseValue ? baseValue.concat(value) : Ember.makeArray(value);
-          }
-
-          descs[key] = undefined;
-          values[key] = value;
-        }
-      }
-
-      // manually copy toString() because some JS engines do not enumerate it
-      if (props.hasOwnProperty('toString')) {
-        base.toString = props.toString;
-      }
-
-    } else if (mixin.mixins) {
-      mergeMixins(mixin.mixins, m, descs, values, base);
-      if (mixin._without) { a_forEach.call(mixin._without, removeKeys); }
-    }
-  }
-}
-
-/** @private */
-function writableReq(obj) {
-  var m = Ember.meta(obj), req = m.required;
-  if (!req || req.__emberproto__ !== obj) {
-    req = m.required = req ? o_create(req) : { __ember_count__: 0 };
-    req.__emberproto__ = obj;
-  }
-  return req;
-}
-
-var IS_BINDING = Ember.IS_BINDING = /^.+Binding$/;
-
-/** @private */
-function detectBinding(obj, key, value, m) {
-  if (IS_BINDING.test(key)) {
-    var bindings = m.bindings;
-    if (!bindings) {
-      bindings = m.bindings = { __emberproto__: obj };
-    } else if (bindings.__emberproto__ !== obj) {
-      bindings = m.bindings = o_create(m.bindings);
-      bindings.__emberproto__ = obj;
-    }
-    bindings[key] = value;
-  }
-}
-
-/** @private */
-function connectBindings(obj, m) {
-  // TODO Mixin.apply(instance) should disconnect binding if exists
-  var bindings = m.bindings, key, binding, to;
-  if (bindings) {
-    for (key in bindings) {
-      binding = key !== '__emberproto__' && bindings[key];
-      if (binding) {
-        to = key.slice(0, -7); // strip Binding off end
-        if (binding instanceof Ember.Binding) {
-          binding = binding.copy(); // copy prototypes' instance
-          binding.to(to);
-        } else { // binding is string path
-          binding = new Ember.Binding(to, binding);
-        }
-        binding.connect(obj);
-        obj[key] = binding;
-      }
-    }
-    // mark as applied
-    m.bindings = { __emberproto__: obj };
-  }
-}
-
-function finishPartial(obj, m) {
-  connectBindings(obj, m || Ember.meta(obj));
-  return obj;
-}
-
-/** @private */
-function applyMixin(obj, mixins, partial) {
-  var descs = {}, values = {}, m = Ember.meta(obj), req = m.required,
-      key, value, desc, prevValue, paths, len, idx;
-
-  // Go through all mixins and hashes passed in, and:
-  //
-  // * Handle concatenated properties
-  // * Set up _super wrapping if necessary
-  // * Set up computed property descriptors
-  // * Copying `toString` in broken browsers
-  mergeMixins(mixins, mixinsMeta(obj), descs, values, obj);
-
-  for(key in values) {
-    if (key === 'contructor') { continue; }
-    if (!values.hasOwnProperty(key)) { continue; }
-
-    desc = descs[key];
-    value = values[key];
-
-    if (desc === REQUIRED) {
-      if (!(key in obj)) {
-        Ember.assert('Required property not defined: '+key, !!partial);
-
-        // for partial applies add to hash of required keys
-        req = writableReq(obj);
-        req.__ember_count__++;
-        req[key] = true;
-      }
-    } else {
-      while (desc && desc instanceof Alias) {
-        var altKey = desc.methodName;
-        if (descs[altKey] || values[altKey]) {
-          value = values[altKey];
-          desc  = descs[altKey];
-        } else if (m.descs[altKey]) {
-          desc  = m.descs[altKey];
-          value = undefined;
-        } else {
-          desc = undefined;
-          value = obj[altKey];
-        }
-      }
-
-      if (desc === undefined && value === undefined) { continue; }
-
-      prevValue = obj[key];
-
-      if ('function' === typeof prevValue) {
-        if ((paths = prevValue.__ember_observesBefore__)) {
-          len = paths.length;
-          for (idx=0; idx < len; idx++) {
-            Ember.removeBeforeObserver(obj, paths[idx], null, key);
-          }
-        } else if ((paths = prevValue.__ember_observes__)) {
-          len = paths.length;
-          for (idx=0; idx < len; idx++) {
-            Ember.removeObserver(obj, paths[idx], null, key);
-          }
-        }
-      }
-
-      detectBinding(obj, key, value, m);
-
-      defineProperty(obj, key, desc, value, m);
-
-      if ('function' === typeof value) {
-        if (paths = value.__ember_observesBefore__) {
-          len = paths.length;
-          for (idx=0; idx < len; idx++) {
-            Ember.addBeforeObserver(obj, paths[idx], null, key);
-          }
-        } else if (paths = value.__ember_observes__) {
-          len = paths.length;
-          for (idx=0; idx < len; idx++) {
-            Ember.addObserver(obj, paths[idx], null, key);
-          }
-        }
-      }
-
-      if (req && req[key]) {
-        req = writableReq(obj);
-        req.__ember_count__--;
-        req[key] = false;
-      }
-    }
-  }
-
-  if (!partial) { // don't apply to prototype
-    finishPartial(obj, m);
-  }
-
-  // Make sure no required attrs remain
-  if (!partial && req && req.__ember_count__>0) {
-    var keys = [];
-    for (key in req) {
-      if (META_SKIP[key]) { continue; }
-      keys.push(key);
-    }
-    // TODO: Remove surrounding if clause from production build
-    Ember.assert('Required properties not defined: '+keys.join(','));
-  }
-  return obj;
-}
-
-Ember.mixin = function(obj) {
-  var args = a_slice.call(arguments, 1);
-  applyMixin(obj, args, false);
-  return obj;
-};
-
-/**
-  @class
-
-  The `Ember.Mixin` class allows you to create mixins, whose properties can be
-  added to other classes. For instance,
-
-      App.Editable = Ember.Mixin.create({
-        edit: function() {
-          console.log('starting to edit');
-          this.set('isEditing', true);
-        },
-        isEditing: false
-      });
-
-      // Mix mixins into classes by passing them as the first arguments to
-      // .extend or .create.
-      App.CommentView = Ember.View.extend(App.Editable, {
-        template: Ember.Handlebars.compile('{{#if isEditing}}...{{else}}...{{/if}}')
-      });
-
-      commentView = App.CommentView.create();
-      commentView.edit(); // => outputs 'starting to edit'
-
-  Note that Mixins are created with `Ember.Mixin.create`, not
-  `Ember.Mixin.extend`.
-*/
-Ember.Mixin = function() { return initMixin(this, arguments); };
-
-/** @private */
-Mixin = Ember.Mixin;
-
-/** @private */
-Mixin._apply = applyMixin;
-
-Mixin.applyPartial = function(obj) {
-  var args = a_slice.call(arguments, 1);
-  return applyMixin(obj, args, true);
-};
-
-Mixin.finishPartial = finishPartial;
-
-Mixin.create = function() {
-  classToString.processed = false;
-  var M = this;
-  return initMixin(new M(), arguments);
-};
-
-var MixinPrototype = Mixin.prototype;
-
-MixinPrototype.reopen = function() {
-  var mixin, tmp;
-
-  if (this.properties) {
-    mixin = Mixin.create();
-    mixin.properties = this.properties;
-    delete this.properties;
-    this.mixins = [mixin];
-  } else if (!this.mixins) {
-    this.mixins = [];
-  }
-
-  var len = arguments.length, mixins = this.mixins, idx;
-
-  for(idx=0; idx < len; idx++) {
-    mixin = arguments[idx];
-    Ember.assert('Expected hash or Mixin instance, got ' + Object.prototype.toString.call(mixin), typeof mixin === 'object' && mixin !== null && Object.prototype.toString.call(mixin) !== '[object Array]');
-
-    if (mixin instanceof Mixin) {
-      mixins.push(mixin);
-    } else {
-      tmp = Mixin.create();
-      tmp.properties = mixin;
-      mixins.push(tmp);
-    }
-  }
-
-  return this;
-};
-
-MixinPrototype.apply = function(obj) {
-  return applyMixin(obj, [this], false);
-};
-
-MixinPrototype.applyPartial = function(obj) {
-  return applyMixin(obj, [this], true);
-};
-
-/** @private */
-function _detect(curMixin, targetMixin, seen) {
-  var guid = guidFor(curMixin);
-
-  if (seen[guid]) { return false; }
-  seen[guid] = true;
-
-  if (curMixin === targetMixin) { return true; }
-  var mixins = curMixin.mixins, loc = mixins ? mixins.length : 0;
-  while (--loc >= 0) {
-    if (_detect(mixins[loc], targetMixin, seen)) { return true; }
-  }
-  return false;
-}
-
-MixinPrototype.detect = function(obj) {
-  if (!obj) { return false; }
-  if (obj instanceof Mixin) { return _detect(obj, this, {}); }
-  var mixins = Ember.meta(obj, false).mixins;
-  if (mixins) {
-    return !!mixins[guidFor(this)];
-  }
-  return false;
-};
-
-MixinPrototype.without = function() {
-  var ret = new Mixin(this);
-  ret._without = a_slice.call(arguments);
-  return ret;
-};
-
-/** @private */
-function _keys(ret, mixin, seen) {
-  if (seen[guidFor(mixin)]) { return; }
-  seen[guidFor(mixin)] = true;
-
-  if (mixin.properties) {
-    var props = mixin.properties;
-    for (var key in props) {
-      if (props.hasOwnProperty(key)) { ret[key] = true; }
-    }
-  } else if (mixin.mixins) {
-    a_forEach.call(mixin.mixins, function(x) { _keys(ret, x, seen); });
-  }
-}
-
-MixinPrototype.keys = function() {
-  var keys = {}, seen = {}, ret = [];
-  _keys(keys, this, seen);
-  for(var key in keys) {
-    if (keys.hasOwnProperty(key)) { ret.push(key); }
-  }
-  return ret;
-};
-
-/** @private - make Mixin's have nice displayNames */
-
-var NAME_KEY = Ember.GUID_KEY+'_name';
-var get = Ember.get;
-
-/** @private */
-function processNames(paths, root, seen) {
-  var idx = paths.length;
-  for(var key in root) {
-    if (!root.hasOwnProperty || !root.hasOwnProperty(key)) { continue; }
-    var obj = root[key];
-    paths[idx] = key;
-
-    if (obj && obj.toString === classToString) {
-      obj[NAME_KEY] = paths.join('.');
-    } else if (obj && get(obj, 'isNamespace')) {
-      if (seen[guidFor(obj)]) { continue; }
-      seen[guidFor(obj)] = true;
-      processNames(paths, obj, seen);
-    }
-  }
-  paths.length = idx; // cut out last item
-}
-
-/** @private */
-function findNamespaces() {
-  var Namespace = Ember.Namespace, obj, isNamespace;
-
-  if (Namespace.PROCESSED) { return; }
-
-  for (var prop in window) {
-    //  get(window.globalStorage, 'isNamespace') would try to read the storage for domain isNamespace and cause exception in Firefox.
-    // globalStorage is a storage obsoleted by the WhatWG storage specification. See https://developer.mozilla.org/en/DOM/Storage#globalStorage
-    if (prop === "globalStorage" && window.StorageList && window.globalStorage instanceof window.StorageList) { continue; }
-    // Unfortunately, some versions of IE don't support window.hasOwnProperty
-    if (window.hasOwnProperty && !window.hasOwnProperty(prop)) { continue; }
-
-    // At times we are not allowed to access certain properties for security reasons.
-    // There are also times where even if we can access them, we are not allowed to access their properties.
-    try {
-      obj = window[prop];
-      isNamespace = obj && get(obj, 'isNamespace');
-    } catch (e) {
-      continue;
-    }
-
-    if (isNamespace) {
-      Ember.deprecate("Namespaces should not begin with lowercase.", /^[A-Z]/.test(prop));
-      obj[NAME_KEY] = prop;
-    }
-  }
-}
-
-Ember.identifyNamespaces = findNamespaces;
-
-/** @private */
-superClassString = function(mixin) {
-  var superclass = mixin.superclass;
-  if (superclass) {
-    if (superclass[NAME_KEY]) { return superclass[NAME_KEY]; }
-    else { return superClassString(superclass); }
-  } else {
-    return;
-  }
-};
-
-/** @private */
-classToString = function() {
-  var Namespace = Ember.Namespace, namespace;
-
-  // TODO: Namespace should really be in Metal
-  if (Namespace) {
-    if (!this[NAME_KEY] && !classToString.processed) {
-      if (!Namespace.PROCESSED) {
-        findNamespaces();
-        Namespace.PROCESSED = true;
-      }
-
-      classToString.processed = true;
-
-      var namespaces = Namespace.NAMESPACES;
-      for (var i=0, l=namespaces.length; i<l; i++) {
-        namespace = namespaces[i];
-        processNames([namespace.toString()], namespace, {});
-      }
-    }
-  }
-
-  if (this[NAME_KEY]) {
-    return this[NAME_KEY];
-  } else {
-    var str = superClassString(this);
-    if (str) {
-      return "(subclass of " + str + ")";
-    } else {
-      return "(unknown mixin)";
-    }
-  }
-};
-
-MixinPrototype.toString = classToString;
-
-// returns the mixins currently applied to the specified object
-// TODO: Make Ember.mixin
-Mixin.mixins = function(obj) {
-  var ret = [], mixins = Ember.meta(obj, false).mixins, key, mixin;
-  if (mixins) {
-    for(key in mixins) {
-      if (META_SKIP[key]) { continue; }
-      mixin = mixins[key];
-
-      // skip primitive mixins since these are always anonymous
-      if (!mixin.properties) { ret.push(mixins[key]); }
-    }
-  }
-  return ret;
-};
-
-REQUIRED = new Ember.Descriptor();
-REQUIRED.toString = function() { return '(Required Property)'; };
-
-Ember.required = function() {
-  return REQUIRED;
-};
-
-/** @private */
-Alias = function(methodName) {
-  this.methodName = methodName;
-};
-Alias.prototype = new Ember.Descriptor();
-
-Ember.alias = function(methodName) {
-  return new Alias(methodName);
-};
-
-// ..........................................................
-// OBSERVER HELPER
-//
-
-Ember.observer = function(func) {
-  var paths = a_slice.call(arguments, 1);
-  func.__ember_observes__ = paths;
-  return func;
-};
-
-// If observers ever become asynchronous, Ember.immediateObserver
-// must remain synchronous.
-Ember.immediateObserver = function() {
-  for (var i=0, l=arguments.length; i<l; i++) {
-    var arg = arguments[i];
-    Ember.assert("Immediate observers must observe internal properties only, not properties on other objects.", typeof arg !== "string" || arg.indexOf('.') === -1);
-  }
-
-  return Ember.observer.apply(this, arguments);
-};
-
-Ember.beforeObserver = function(func) {
-  var paths = a_slice.call(arguments, 1);
-  func.__ember_observesBefore__ = paths;
-  return func;
-};
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Metal
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-})();
-
-(function() {
-/**
- * @license
- * ==========================================================================
- * Ember
- * Copyright ©2006-2011, Strobe Inc. and contributors.
- * Portions copyright ©2008-2011 Apple Inc. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * For more information about Ember, visit http://www.emberjs.com
- *
- * ==========================================================================
- */
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-/*globals ENV */
-var indexOf = Ember.EnumerableUtils.indexOf;
-
-// ........................................
-// TYPING & ARRAY MESSAGING
-//
-
-var TYPE_MAP = {};
-var t = "Boolean Number String Function Array Date RegExp Object".split(" ");
-Ember.ArrayPolyfills.forEach.call(t, function(name) {
-  TYPE_MAP[ "[object " + name + "]" ] = name.toLowerCase();
-});
-
-var toString = Object.prototype.toString;
-
-/**
-  Returns a consistent type for the passed item.
-
-  Use this instead of the built-in Ember.typeOf() to get the type of an item.
-  It will return the same result across all browsers and includes a bit
-  more detail.  Here is what will be returned:
-
-      | Return Value  | Meaning                                              |
-      |---------------|------------------------------------------------------|
-      | 'string'      | String primitive                                     |
-      | 'number'      | Number primitive                                     |
-      | 'boolean'     | Boolean primitive                                    |
-      | 'null'        | Null value                                           |
-      | 'undefined'   | Undefined value                                      |
-      | 'function'    | A function                                           |
-      | 'array'       | An instance of Array                                 |
-      | 'class'       | A Ember class (created using Ember.Object.extend())  |
-      | 'instance'    | A Ember object instance                              |
-      | 'error'       | An instance of the Error object                      |
-      | 'object'      | A JavaScript object not inheriting from Ember.Object |
-
-  Examples:
-
-      Ember.typeOf();                      => 'undefined'
-      Ember.typeOf(null);                  => 'null'
-      Ember.typeOf(undefined);             => 'undefined'
-      Ember.typeOf('michael');             => 'string'
-      Ember.typeOf(101);                   => 'number'
-      Ember.typeOf(true);                  => 'boolean'
-      Ember.typeOf(Ember.makeArray);       => 'function'
-      Ember.typeOf([1,2,90]);              => 'array'
-      Ember.typeOf(Ember.Object.extend()); => 'class'
-      Ember.typeOf(Ember.Object.create()); => 'instance'
-      Ember.typeOf(new Error('teamocil')); => 'error'
-
-      // "normal" JavaScript object
-      Ember.typeOf({a: 'b'});              => 'object'
-
-  @param item {Object} the item to check
-  @returns {String} the type
-*/
-Ember.typeOf = function(item) {
-  var ret;
-
-  ret = (item === null || item === undefined) ? String(item) : TYPE_MAP[toString.call(item)] || 'object';
-
-  if (ret === 'function') {
-    if (Ember.Object && Ember.Object.detect(item)) ret = 'class';
-  } else if (ret === 'object') {
-    if (item instanceof Error) ret = 'error';
-    else if (Ember.Object && item instanceof Ember.Object) ret = 'instance';
-    else ret = 'object';
-  }
-
-  return ret;
-};
-
-/**
-  Returns true if the passed value is null or undefined.  This avoids errors
-  from JSLint complaining about use of ==, which can be technically
-  confusing.
-
-      Ember.none();             => true
-      Ember.none(null);         => true
-      Ember.none(undefined);    => true
-      Ember.none('');           => false
-      Ember.none([]);           => false
-      Ember.none(function(){}); => false
-
-  @param {Object} obj Value to test
-  @returns {Boolean}
-*/
-Ember.none = function(obj) {
-  return obj === null || obj === undefined;
-};
-
-/**
-  Verifies that a value is null or an empty string | array | function.
-
-  Constrains the rules on `Ember.none` by returning false for empty
-  string and empty arrays.
-
-      Ember.empty();               => true
-      Ember.empty(null);           => true
-      Ember.empty(undefined);      => true
-      Ember.empty('');             => true
-      Ember.empty([]);             => true
-      Ember.empty('tobias fünke'); => false
-      Ember.empty([0,1,2]);        => false
-
-  @param {Object} obj Value to test
-  @returns {Boolean}
-*/
-Ember.empty = function(obj) {
-  return obj === null || obj === undefined || (obj.length === 0 && typeof obj !== 'function');
-};
-
-/**
- This will compare two javascript values of possibly different types.
- It will tell you which one is greater than the other by returning:
-
-  - -1 if the first is smaller than the second,
-  - 0 if both are equal,
-  - 1 if the first is greater than the second.
-
- The order is calculated based on Ember.ORDER_DEFINITION, if types are different.
- In case they have the same type an appropriate comparison for this type is made.
-
-    Ember.compare('hello', 'hello');  => 0
-    Ember.compare('abc', 'dfg');      => -1
-    Ember.compare(2, 1);              => 1
-
- @param {Object} v First value to compare
- @param {Object} w Second value to compare
- @returns {Number} -1 if v < w, 0 if v = w and 1 if v > w.
-*/
-Ember.compare = function compare(v, w) {
-  if (v === w) { return 0; }
-
-  var type1 = Ember.typeOf(v);
-  var type2 = Ember.typeOf(w);
-
-  var Comparable = Ember.Comparable;
-  if (Comparable) {
-    if (type1==='instance' && Comparable.detect(v.constructor)) {
-      return v.constructor.compare(v, w);
-    }
-
-    if (type2 === 'instance' && Comparable.detect(w.constructor)) {
-      return 1-w.constructor.compare(w, v);
-    }
-  }
-
-  // If we haven't yet generated a reverse-mapping of Ember.ORDER_DEFINITION,
-  // do so now.
-  var mapping = Ember.ORDER_DEFINITION_MAPPING;
-  if (!mapping) {
-    var order = Ember.ORDER_DEFINITION;
-    mapping = Ember.ORDER_DEFINITION_MAPPING = {};
-    var idx, len;
-    for (idx = 0, len = order.length; idx < len;  ++idx) {
-      mapping[order[idx]] = idx;
-    }
-
-    // We no longer need Ember.ORDER_DEFINITION.
-    delete Ember.ORDER_DEFINITION;
-  }
-
-  var type1Index = mapping[type1];
-  var type2Index = mapping[type2];
-
-  if (type1Index < type2Index) { return -1; }
-  if (type1Index > type2Index) { return 1; }
-
-  // types are equal - so we have to check values now
-  switch (type1) {
-    case 'boolean':
-    case 'number':
-      if (v < w) { return -1; }
-      if (v > w) { return 1; }
-      return 0;
-
-    case 'string':
-      var comp = v.localeCompare(w);
-      if (comp < 0) { return -1; }
-      if (comp > 0) { return 1; }
-      return 0;
-
-    case 'array':
-      var vLen = v.length;
-      var wLen = w.length;
-      var l = Math.min(vLen, wLen);
-      var r = 0;
-      var i = 0;
-      while (r === 0 && i < l) {
-        r = compare(v[i],w[i]);
-        i++;
-      }
-      if (r !== 0) { return r; }
-
-      // all elements are equal now
-      // shorter array should be ordered first
-      if (vLen < wLen) { return -1; }
-      if (vLen > wLen) { return 1; }
-      // arrays are equal now
-      return 0;
-
-    case 'instance':
-      if (Ember.Comparable && Ember.Comparable.detect(v)) {
-        return v.compare(v, w);
-      }
-      return 0;
-
-    case 'date':
-      var vNum = v.getTime();
-      var wNum = w.getTime();
-      if (vNum < wNum) { return -1; }
-      if (vNum > wNum) { return 1; }
-      return 0;
-
-    default:
-      return 0;
-  }
-};
-
-/** @private */
-function _copy(obj, deep, seen, copies) {
-  var ret, loc, key;
-
-  // primitive data types are immutable, just return them.
-  if ('object' !== typeof obj || obj===null) return obj;
-
-  // avoid cyclical loops
-  if (deep && (loc=indexOf(seen, obj))>=0) return copies[loc];
-
-  Ember.assert('Cannot clone an Ember.Object that does not implement Ember.Copyable', !(obj instanceof Ember.Object) || (Ember.Copyable && Ember.Copyable.detect(obj)));
-
-  // IMPORTANT: this specific test will detect a native array only.  Any other
-  // object will need to implement Copyable.
-  if (Ember.typeOf(obj) === 'array') {
-    ret = obj.slice();
-    if (deep) {
-      loc = ret.length;
-      while(--loc>=0) ret[loc] = _copy(ret[loc], deep, seen, copies);
-    }
-  } else if (Ember.Copyable && Ember.Copyable.detect(obj)) {
-    ret = obj.copy(deep, seen, copies);
-  } else {
-    ret = {};
-    for(key in obj) {
-      if (!obj.hasOwnProperty(key)) continue;
-      ret[key] = deep ? _copy(obj[key], deep, seen, copies) : obj[key];
-    }
-  }
-
-  if (deep) {
-    seen.push(obj);
-    copies.push(ret);
-  }
-
-  return ret;
-}
-
-/**
-  Creates a clone of the passed object. This function can take just about
-  any type of object and create a clone of it, including primitive values
-  (which are not actually cloned because they are immutable).
-
-  If the passed object implements the clone() method, then this function
-  will simply call that method and return the result.
-
-  @param {Object} object The object to clone
-  @param {Boolean} deep If true, a deep copy of the object is made
-  @returns {Object} The cloned object
-*/
-Ember.copy = function(obj, deep) {
-  // fast paths
-  if ('object' !== typeof obj || obj===null) return obj; // can't copy primitives
-  if (Ember.Copyable && Ember.Copyable.detect(obj)) return obj.copy(deep);
-  return _copy(obj, deep, deep ? [] : null, deep ? [] : null);
-};
-
-/**
-  Convenience method to inspect an object. This method will attempt to
-  convert the object into a useful string description.
-
-  @param {Object} obj The object you want to inspect.
-  @returns {String} A description of the object
-*/
-Ember.inspect = function(obj) {
-  var v, ret = [];
-  for(var key in obj) {
-    if (obj.hasOwnProperty(key)) {
-      v = obj[key];
-      if (v === 'toString') { continue; } // ignore useless items
-      if (Ember.typeOf(v) === 'function') { v = "function() { ... }"; }
-      ret.push(key + ": " + v);
-    }
-  }
-  return "{" + ret.join(" , ") + "}";
-};
-
-/**
-  Compares two objects, returning true if they are logically equal.  This is
-  a deeper comparison than a simple triple equal. For sets it will compare the
-  internal objects.  For any other object that implements `isEqual()` it will 
-  respect that method.
-
-      Ember.isEqual('hello', 'hello');  => true
-      Ember.isEqual(1, 2);              => false
-      Ember.isEqual([4,2], [4,2]);      => false
-
-  @param {Object} a first object to compare
-  @param {Object} b second object to compare
-  @returns {Boolean}
-*/
-Ember.isEqual = function(a, b) {
-  if (a && 'function'===typeof a.isEqual) return a.isEqual(b);
-  return a === b;
-};
-
-/**
-  @private
-  Used by Ember.compare
-*/
-Ember.ORDER_DEFINITION = Ember.ENV.ORDER_DEFINITION || [
-  'undefined',
-  'null',
-  'boolean',
-  'number',
-  'string',
-  'array',
-  'object',
-  'instance',
-  'function',
-  'class',
-  'date'
-];
-
-/**
-  Returns all of the keys defined on an object or hash. This is useful
-  when inspecting objects for debugging.  On browsers that support it, this
-  uses the native Object.keys implementation.
-
-  @function
-  @param {Object} obj
-  @returns {Array} Array containing keys of obj
-*/
-Ember.keys = Object.keys;
-
-if (!Ember.keys) {
-  Ember.keys = function(obj) {
-    var ret = [];
-    for(var key in obj) {
-      if (obj.hasOwnProperty(key)) { ret.push(key); }
-    }
-    return ret;
-  };
-}
-
-// ..........................................................
-// ERROR
-//
-
-/**
-  @class
-
-  A subclass of the JavaScript Error object for use in Ember.
-*/
-Ember.Error = function() {
-  var tmp = Error.prototype.constructor.apply(this, arguments);
-
-  for (var p in tmp) {
-    if (tmp.hasOwnProperty(p)) { this[p] = tmp[p]; }
-  }
-  this.message = tmp.message;
-};
-
-Ember.Error.prototype = Ember.create(Error.prototype);
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-/** @private **/
-var STRING_DASHERIZE_REGEXP = (/[ _]/g);
-var STRING_DASHERIZE_CACHE = {};
-var STRING_DECAMELIZE_REGEXP = (/([a-z])([A-Z])/g);
-var STRING_CAMELIZE_REGEXP = (/(\-|_|\s)+(.)?/g);
-var STRING_UNDERSCORE_REGEXP_1 = (/([a-z\d])([A-Z]+)/g);
-var STRING_UNDERSCORE_REGEXP_2 = (/\-|\s+/g);
-
-/**
-  Defines the hash of localized strings for the current language.  Used by
-  the `Ember.String.loc()` helper.  To localize, add string values to this
-  hash.
-
-  @type Hash
-*/
-Ember.STRINGS = {};
-
-/**
-  Defines string helper methods including string formatting and localization.
-  Unless Ember.EXTEND_PROTOTYPES = false these methods will also be added to the
-  String.prototype as well.
-
-  @namespace
-*/
-Ember.String = {
-
-  /**
-    Apply formatting options to the string.  This will look for occurrences
-    of %@ in your string and substitute them with the arguments you pass into
-    this method.  If you want to control the specific order of replacement,
-    you can add a number after the key as well to indicate which argument
-    you want to insert.
-
-    Ordered insertions are most useful when building loc strings where values
-    you need to insert may appear in different orders.
-
-        "Hello %@ %@".fmt('John', 'Doe') => "Hello John Doe"
-        "Hello %@2, %@1".fmt('John', 'Doe') => "Hello Doe, John"
-
-    @param {Object...} [args]
-    @returns {String} formatted string
-  */
-  fmt: function(str, formats) {
-    // first, replace any ORDERED replacements.
-    var idx  = 0; // the current index for non-numerical replacements
-    return str.replace(/%@([0-9]+)?/g, function(s, argIndex) {
-      argIndex = (argIndex) ? parseInt(argIndex,0) - 1 : idx++ ;
-      s = formats[argIndex];
-      return ((s === null) ? '(null)' : (s === undefined) ? '' : s).toString();
-    }) ;
-  },
-
-  /**
-    Formats the passed string, but first looks up the string in the localized
-    strings hash.  This is a convenient way to localize text.  See
-    `Ember.String.fmt()` for more information on formatting.
-
-    Note that it is traditional but not required to prefix localized string
-    keys with an underscore or other character so you can easily identify
-    localized strings.
-
-        Ember.STRINGS = {
-          '_Hello World': 'Bonjour le monde',
-          '_Hello %@ %@': 'Bonjour %@ %@'
-        };
-
-        Ember.String.loc("_Hello World");
-        => 'Bonjour le monde';
-
-        Ember.String.loc("_Hello %@ %@", ["John", "Smith"]);
-        => "Bonjour John Smith";
-
-    @param {String} str
-      The string to format
-
-    @param {Array} formats
-      Optional array of parameters to interpolate into string.
-
-    @returns {String} formatted string
-  */
-  loc: function(str, formats) {
-    str = Ember.STRINGS[str] || str;
-    return Ember.String.fmt(str, formats) ;
-  },
-
-  /**
-    Splits a string into separate units separated by spaces, eliminating any
-    empty strings in the process.  This is a convenience method for split that
-    is mostly useful when applied to the String.prototype.
-
-        Ember.String.w("alpha beta gamma").forEach(function(key) {
-          console.log(key);
-        });
-        > alpha
-        > beta
-        > gamma
-
-    @param {String} str 
-      The string to split
-
-    @returns {String} split string
-  */
-  w: function(str) { return str.split(/\s+/); },
-
-  /**
-    Converts a camelized string into all lower case separated by underscores.
-    
-        'innerHTML'.decamelize()         => 'inner_html'
-        'action_name'.decamelize()       => 'action_name'
-        'css-class-name'.decamelize()    => 'css-class-name'
-        'my favorite items'.decamelize() => 'my favorite items'
-
-    @param {String} str
-      The string to decamelize.
-
-    @returns {String} the decamelized string.
-  */
-  decamelize: function(str) {
-    return str.replace(STRING_DECAMELIZE_REGEXP, '$1_$2').toLowerCase();
-  },
-
-  /**
-    Replaces underscores or spaces with dashes.
-    
-        'innerHTML'.dasherize()         => 'inner-html'
-        'action_name'.dasherize()       => 'action-name'
-        'css-class-name'.dasherize()    => 'css-class-name'
-        'my favorite items'.dasherize() => 'my-favorite-items'
-
-    @param {String} str
-      The string to dasherize.
-
-    @returns {String} the dasherized string.
-  */
-  dasherize: function(str) {
-    var cache = STRING_DASHERIZE_CACHE,
-        ret   = cache[str];
-
-    if (ret) {
-      return ret;
-    } else {
-      ret = Ember.String.decamelize(str).replace(STRING_DASHERIZE_REGEXP,'-');
-      cache[str] = ret;
-    }
-
-    return ret;
-  },
-
-  /**
-    Returns the lowerCaseCamel form of a string.
-
-        'innerHTML'.camelize()         => 'innerHTML'
-        'action_name'.camelize()       => 'actionName'
-        'css-class-name'.camelize()    => 'cssClassName'
-        'my favorite items'.camelize() => 'myFavoriteItems'
-
-    @param {String} str
-      The string to camelize.
-
-    @returns {String} the camelized string.
-  */
-  camelize: function(str) {
-    return str.replace(STRING_CAMELIZE_REGEXP, function(match, separator, chr) {
-      return chr ? chr.toUpperCase() : '';
-    });
-  },
-
-  /**
-    Returns the UpperCamelCase form of a string.
-
-        'innerHTML'.classify()         => 'InnerHTML'
-        'action_name'.classify()       => 'ActionName'
-        'css-class-name'.classify()    => 'CssClassName'
-        'my favorite items'.classift() => 'MyFavoriteItems'
-  */
-  classify: function(str) {
-    var camelized = Ember.String.camelize(str);
-    return camelized.charAt(0).toUpperCase() + camelized.substr(1);
-  },
-
-  /**
-    More general than decamelize. Returns the lower_case_and_underscored
-    form of a string.
-
-        'innerHTML'.underscore()         => 'inner_html'
-        'action_name'.underscore()       => 'action_name'
-        'css-class-name'.underscore()    => 'css_class_name'
-        'my favorite items'.underscore() => 'my_favorite_items'
-
-    @param {String} str
-      The string to underscore.
-
-    @returns {String} the underscored string.
-  */
-  underscore: function(str) {
-    return str.replace(STRING_UNDERSCORE_REGEXP_1, '$1_$2').
-      replace(STRING_UNDERSCORE_REGEXP_2, '_').toLowerCase();
-  }
-};
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var fmt = Ember.String.fmt,
-    w   = Ember.String.w,
-    loc = Ember.String.loc,
-    camelize = Ember.String.camelize,
-    decamelize = Ember.String.decamelize,
-    dasherize = Ember.String.dasherize,
-    underscore = Ember.String.underscore;
-
-if (Ember.EXTEND_PROTOTYPES) {
-
-  /**
-    @see Ember.String.fmt
-  */
-  String.prototype.fmt = function() {
-    return fmt(this, arguments);
-  };
-
-  /**
-    @see Ember.String.w
-  */
-  String.prototype.w = function() {
-    return w(this);
-  };
-
-  /**
-    @see Ember.String.loc
-  */
-  String.prototype.loc = function() {
-    return loc(this, arguments);
-  };
-
-  /**
-    @see Ember.String.camelize
-  */
-  String.prototype.camelize = function() {
-    return camelize(this);
-  };
-
-  /**
-    @see Ember.String.decamelize
-  */
-  String.prototype.decamelize = function() {
-    return decamelize(this);
-  };
-
-  /**
-    @see Ember.String.dasherize
-  */
-  String.prototype.dasherize = function() {
-    return dasherize(this);
-  };
-
-  /**
-    @see Ember.String.underscore
-  */
-  String.prototype.underscore = function() {
-    return underscore(this);
-  };
-
-}
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var a_slice = Array.prototype.slice;
-
-if (Ember.EXTEND_PROTOTYPES) {
-
-  /**
-    The `property` extension of Javascript's Function prototype is available
-    when Ember.EXTEND_PROTOTYPES is true, which is the default. 
-
-    Computed properties allow you to treat a function like a property:
-
-        MyApp.president = Ember.Object.create({
-          firstName: "Barack",
-          lastName: "Obama",
-
-          fullName: function() {
-            return this.get('firstName') + ' ' + this.get('lastName');
-
-            // Call this flag to mark the function as a property
-          }.property()
-        });
-
-        MyApp.president.get('fullName');    => "Barack Obama"
-
-    Treating a function like a property is useful because they can work with
-    bindings, just like any other property.
-
-    Many computed properties have dependencies on other properties. For
-    example, in the above example, the `fullName` property depends on
-    `firstName` and `lastName` to determine its value. You can tell Ember.js
-    about these dependencies like this:
-
-        MyApp.president = Ember.Object.create({
-          firstName: "Barack",
-          lastName: "Obama",
-
-          fullName: function() {
-            return this.get('firstName') + ' ' + this.get('lastName');
-
-            // Tell Ember.js that this computed property depends on firstName
-            // and lastName
-          }.property('firstName', 'lastName')
-        });
-
-    Make sure you list these dependencies so Ember.js knows when to update
-    bindings that connect to a computed property. Changing a dependency
-    will not immediately trigger an update of the computed property, but
-    will instead clear the cache so that it is updated when the next `get`
-    is called on the property.
-
-    Note: you will usually want to use `property(...)` with `cacheable()`.
-
-    @see Ember.ComputedProperty
-    @see Ember.computed
-  */
-  Function.prototype.property = function() {
-    var ret = Ember.computed(this);
-    return ret.property.apply(ret, arguments);
-  };
-
-  /**
-    The `observes` extension of Javascript's Function prototype is available
-    when Ember.EXTEND_PROTOTYPES is true, which is the default. 
-
-    You can observe property changes simply by adding the `observes`
-    call to the end of your method declarations in classes that you write.
-    For example:
-
-        Ember.Object.create({
-          valueObserver: function() {
-            // Executes whenever the "value" property changes
-          }.observes('value')
-        });
-    
-    @see Ember.Observable
-  */
-  Function.prototype.observes = function() {
-    this.__ember_observes__ = a_slice.call(arguments);
-    return this;
-  };
-
-  /**
-    The `observesBefore` extension of Javascript's Function prototype is
-    available when Ember.EXTEND_PROTOTYPES is true, which is the default. 
-
-    You can get notified when a property changes is about to happen by
-    by adding the `observesBefore` call to the end of your method
-    declarations in classes that you write. For example:
-
-        Ember.Object.create({
-          valueObserver: function() {
-            // Executes whenever the "value" property is about to change
-          }.observesBefore('value')
-        });
-    
-    @see Ember.Observable
-  */
-  Function.prototype.observesBefore = function() {
-    this.__ember_observesBefore__ = a_slice.call(arguments);
-    return this;
-  };
-
-}
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-
-
-
-
-// ..........................................................
-// HELPERS
-//
-
-var get = Ember.get, set = Ember.set;
-var a_slice = Array.prototype.slice;
-var a_indexOf = Ember.EnumerableUtils.indexOf;
-
-var contexts = [];
-/** @private */
-function popCtx() {
-  return contexts.length===0 ? {} : contexts.pop();
-}
-
-/** @private */
-function pushCtx(ctx) {
-  contexts.push(ctx);
-  return null;
-}
-
-/** @private */
-function iter(key, value) {
-  var valueProvided = arguments.length === 2;
-
-  function i(item) {
-    var cur = get(item, key);
-    return valueProvided ? value===cur : !!cur;
-  }
-  return i ;
-}
-
-/**
-  @class
-
-  This mixin defines the common interface implemented by enumerable objects
-  in Ember.  Most of these methods follow the standard Array iteration
-  API defined up to JavaScript 1.8 (excluding language-specific features that
-  cannot be emulated in older versions of JavaScript).
-
-  This mixin is applied automatically to the Array class on page load, so you
-  can use any of these methods on simple arrays.  If Array already implements
-  one of these methods, the mixin will not override them.
-
-  h3. Writing Your Own Enumerable
-
-  To make your own custom class enumerable, you need two items:
-
-  1. You must have a length property.  This property should change whenever
-     the number of items in your enumerable object changes.  If you using this
-     with an Ember.Object subclass, you should be sure to change the length
-     property using set().
-
-  2. If you must implement nextObject().  See documentation.
-
-  Once you have these two methods implement, apply the Ember.Enumerable mixin
-  to your class and you will be able to enumerate the contents of your object
-  like any other collection.
-
-  h3. Using Ember Enumeration with Other Libraries
-
-  Many other libraries provide some kind of iterator or enumeration like
-  facility.  This is often where the most common API conflicts occur.
-  Ember's API is designed to be as friendly as possible with other
-  libraries by implementing only methods that mostly correspond to the
-  JavaScript 1.8 API.
-
-  @extends Ember.Mixin
-  @since Ember 0.9
-*/
-Ember.Enumerable = Ember.Mixin.create(
-  /** @scope Ember.Enumerable.prototype */ {
-
-  /** @private - compatibility */
-  isEnumerable: true,
-
-  /**
-    Implement this method to make your class enumerable.
-
-    This method will be call repeatedly during enumeration.  The index value
-    will always begin with 0 and increment monotonically.  You don't have to
-    rely on the index value to determine what object to return, but you should
-    always check the value and start from the beginning when you see the
-    requested index is 0.
-
-    The previousObject is the object that was returned from the last call
-    to nextObject for the current iteration.  This is a useful way to
-    manage iteration if you are tracing a linked list, for example.
-
-    Finally the context parameter will always contain a hash you can use as
-    a "scratchpad" to maintain any other state you need in order to iterate
-    properly.  The context object is reused and is not reset between
-    iterations so make sure you setup the context with a fresh state whenever
-    the index parameter is 0.
-
-    Generally iterators will continue to call nextObject until the index
-    reaches the your current length-1.  If you run out of data before this
-    time for some reason, you should simply return undefined.
-
-    The default implementation of this method simply looks up the index.
-    This works great on any Array-like objects.
-
-    @param {Number} index the current index of the iteration
-    @param {Object} previousObject the value returned by the last call to nextObject.
-    @param {Object} context a context object you can use to maintain state.
-    @returns {Object} the next object in the iteration or undefined
-  */
-  nextObject: Ember.required(Function),
-
-  /**
-    Helper method returns the first object from a collection.  This is usually
-    used by bindings and other parts of the framework to extract a single
-    object if the enumerable contains only one item.
-
-    If you override this method, you should implement it so that it will
-    always return the same value each time it is called.  If your enumerable
-    contains only one object, this method should always return that object.
-    If your enumerable is empty, this method should return undefined.
-
-        var arr = ["a", "b", "c"];
-        arr.firstObject(); => "a"
-
-        var arr = [];
-        arr.firstObject(); => undefined
-
-    @returns {Object} the object or undefined
-  */
-  firstObject: Ember.computed(function() {
-    if (get(this, 'length')===0) return undefined ;
-
-    // handle generic enumerables
-    var context = popCtx(), ret;
-    ret = this.nextObject(0, null, context);
-    pushCtx(context);
-    return ret ;
-  }).property('[]').cacheable(),
-
-  /**
-    Helper method returns the last object from a collection. If your enumerable
-    contains only one object, this method should always return that object.
-    If your enumerable is empty, this method should return undefined.
-
-        var arr = ["a", "b", "c"];
-        arr.lastObject(); => "c"
-
-        var arr = [];
-        arr.lastObject(); => undefined
-
-    @returns {Object} the last object or undefined
-  */
-  lastObject: Ember.computed(function() {
-    var len = get(this, 'length');
-    if (len===0) return undefined ;
-    var context = popCtx(), idx=0, cur, last = null;
-    do {
-      last = cur;
-      cur = this.nextObject(idx++, last, context);
-    } while (cur !== undefined);
-    pushCtx(context);
-    return last;
-  }).property('[]').cacheable(),
-
-  /**
-    Returns true if the passed object can be found in the receiver.  The
-    default version will iterate through the enumerable until the object
-    is found.  You may want to override this with a more efficient version.
-
-        var arr = ["a", "b", "c"];
-        arr.contains("a"); => true
-        arr.contains("z"); => false
-
-    @param {Object} obj
-      The object to search for.
-
-    @returns {Boolean} true if object is found in enumerable.
-  */
-  contains: function(obj) {
-    return this.find(function(item) { return item===obj; }) !== undefined;
-  },
-
-  /**
-    Iterates through the enumerable, calling the passed function on each
-    item. This method corresponds to the forEach() method defined in
-    JavaScript 1.6.
-
-    The callback method you provide should have the following signature (all
-    parameters are optional):
-
-          function(item, index, enumerable);
-
-    - *item* is the current item in the iteration.
-    - *index* is the current index in the iteration
-    - *enumerable* is the enumerable object itself.
-
-    Note that in addition to a callback, you can also pass an optional target
-    object that will be set as "this" on the context. This is a good way
-    to give your iterator function access to the current object.
-
-    @param {Function} callback The callback to execute
-    @param {Object} [target] The target object to use
-    @returns {Object} receiver
-  */
-  forEach: function(callback, target) {
-    if (typeof callback !== "function") throw new TypeError() ;
-    var len = get(this, 'length'), last = null, context = popCtx();
-
-    if (target === undefined) target = null;
-
-    for(var idx=0;idx<len;idx++) {
-      var next = this.nextObject(idx, last, context) ;
-      callback.call(target, next, idx, this);
-      last = next ;
-    }
-    last = null ;
-    context = pushCtx(context);
-    return this ;
-  },
-
-  /**
-    Alias for mapProperty
-
-    @param {String} key name of the property
-    @returns {Array} The mapped array.
-  */
-  getEach: function(key) {
-    return this.mapProperty(key);
-  },
-
-  /**
-    Sets the value on the named property for each member. This is more
-    efficient than using other methods defined on this helper. If the object
-    implements Ember.Observable, the value will be changed to set(), otherwise
-    it will be set directly. null objects are skipped.
-
-    @param {String} key The key to set
-    @param {Object} value The object to set
-    @returns {Object} receiver
-  */
-  setEach: function(key, value) {
-    return this.forEach(function(item) {
-      set(item, key, value);
-    });
-  },
-
-  /**
-    Maps all of the items in the enumeration to another value, returning
-    a new array. This method corresponds to map() defined in JavaScript 1.6.
-
-    The callback method you provide should have the following signature (all
-    parameters are optional):
-
-        function(item, index, enumerable);
-
-    - *item* is the current item in the iteration.
-    - *index* is the current index in the iteration
-    - *enumerable* is the enumerable object itself.
-
-    It should return the mapped value.
-
-    Note that in addition to a callback, you can also pass an optional target
-    object that will be set as "this" on the context. This is a good way
-    to give your iterator function access to the current object.
-
-    @param {Function} callback The callback to execute
-    @param {Object} [target] The target object to use
-    @returns {Array} The mapped array.
-  */
-  map: function(callback, target) {
-    var ret = [];
-    this.forEach(function(x, idx, i) {
-      ret[idx] = callback.call(target, x, idx,i);
-    });
-    return ret ;
-  },
-
-  /**
-    Similar to map, this specialized function returns the value of the named
-    property on all items in the enumeration.
-
-    @param {String} key name of the property
-    @returns {Array} The mapped array.
-  */
-  mapProperty: function(key) {
-    return this.map(function(next) {
-      return get(next, key);
-    });
-  },
-
-  /**
-    Returns an array with all of the items in the enumeration that the passed
-    function returns true for. This method corresponds to filter() defined in
-    JavaScript 1.6.
-
-    The callback method you provide should have the following signature (all
-    parameters are optional):
-
-          function(item, index, enumerable);
-
-    - *item* is the current item in the iteration.
-    - *index* is the current index in the iteration
-    - *enumerable* is the enumerable object itself.
-
-    It should return the true to include the item in the results, false otherwise.
-
-    Note that in addition to a callback, you can also pass an optional target
-    object that will be set as "this" on the context. This is a good way
-    to give your iterator function access to the current object.
-
-    @param {Function} callback The callback to execute
-    @param {Object} [target] The target object to use
-    @returns {Array} A filtered array.
-  */
-  filter: function(callback, target) {
-    var ret = [];
-    this.forEach(function(x, idx, i) {
-      if (callback.call(target, x, idx, i)) ret.push(x);
-    });
-    return ret ;
-  },
-
-  /**
-    Returns an array with just the items with the matched property.  You
-    can pass an optional second argument with the target value.  Otherwise
-    this will match any property that evaluates to true.
-
-    @param {String} key the property to test
-    @param {String} [value] optional value to test against.
-    @returns {Array} filtered array
-  */
-  filterProperty: function(key, value) {
-    return this.filter(iter.apply(this, arguments));
-  },
-
-  /**
-    Returns the first item in the array for which the callback returns true.
-    This method works similar to the filter() method defined in JavaScript 1.6
-    except that it will stop working on the array once a match is found.
-
-    The callback method you provide should have the following signature (all
-    parameters are optional):
-
-          function(item, index, enumerable);
-
-    - *item* is the current item in the iteration.
-    - *index* is the current index in the iteration
-    - *enumerable* is the enumerable object itself.
-
-    It should return the true to include the item in the results, false otherwise.
-
-    Note that in addition to a callback, you can also pass an optional target
-    object that will be set as "this" on the context. This is a good way
-    to give your iterator function access to the current object.
-
-    @param {Function} callback The callback to execute
-    @param {Object} [target] The target object to use
-    @returns {Object} Found item or null.
-  */
-  find: function(callback, target) {
-    var len = get(this, 'length') ;
-    if (target === undefined) target = null;
-
-    var last = null, next, found = false, ret ;
-    var context = popCtx();
-    for(var idx=0;idx<len && !found;idx++) {
-      next = this.nextObject(idx, last, context) ;
-      if (found = callback.call(target, next, idx, this)) ret = next ;
-      last = next ;
-    }
-    next = last = null ;
-    context = pushCtx(context);
-    return ret ;
-  },
-
-  /**
-    Returns the first item with a property matching the passed value.  You
-    can pass an optional second argument with the target value.  Otherwise
-    this will match any property that evaluates to true.
-
-    This method works much like the more generic find() method.
-
-    @param {String} key the property to test
-    @param {String} [value] optional value to test against.
-    @returns {Object} found item or null
-  */
-  findProperty: function(key, value) {
-    return this.find(iter.apply(this, arguments));
-  },
-
-  /**
-    Returns true if the passed function returns true for every item in the
-    enumeration. This corresponds with the every() method in JavaScript 1.6.
-
-    The callback method you provide should have the following signature (all
-    parameters are optional):
-
-          function(item, index, enumerable);
-
-    - *item* is the current item in the iteration.
-    - *index* is the current index in the iteration
-    - *enumerable* is the enumerable object itself.
-
-    It should return the true or false.
-
-    Note that in addition to a callback, you can also pass an optional target
-    object that will be set as "this" on the context. This is a good way
-    to give your iterator function access to the current object.
-
-    Example Usage:
-
-          if (people.every(isEngineer)) { Paychecks.addBigBonus(); }
-
-    @param {Function} callback The callback to execute
-    @param {Object} [target] The target object to use
-    @returns {Boolean}
-  */
-  every: function(callback, target) {
-    return !this.find(function(x, idx, i) {
-      return !callback.call(target, x, idx, i);
-    });
-  },
-
-  /**
-    Returns true if the passed property resolves to true for all items in the
-    enumerable.  This method is often simpler/faster than using a callback.
-
-    @param {String} key the property to test
-    @param {String} [value] optional value to test against.
-    @returns {Array} filtered array
-  */
-  everyProperty: function(key, value) {
-    return this.every(iter.apply(this, arguments));
-  },
-
-
-  /**
-    Returns true if the passed function returns true for any item in the
-    enumeration. This corresponds with the every() method in JavaScript 1.6.
-
-    The callback method you provide should have the following signature (all
-    parameters are optional):
-
-          function(item, index, enumerable);
-
-    - *item* is the current item in the iteration.
-    - *index* is the current index in the iteration
-    - *enumerable* is the enumerable object itself.
-
-    It should return the true to include the item in the results, false otherwise.
-
-    Note that in addition to a callback, you can also pass an optional target
-    object that will be set as "this" on the context. This is a good way
-    to give your iterator function access to the current object.
-
-    Usage Example:
-
-          if (people.some(isManager)) { Paychecks.addBiggerBonus(); }
-
-    @param {Function} callback The callback to execute
-    @param {Object} [target] The target object to use
-    @returns {Array} A filtered array.
-  */
-  some: function(callback, target) {
-    return !!this.find(function(x, idx, i) {
-      return !!callback.call(target, x, idx, i);
-    });
-  },
-
-  /**
-    Returns true if the passed property resolves to true for any item in the
-    enumerable.  This method is often simpler/faster than using a callback.
-
-    @param {String} key the property to test
-    @param {String} [value] optional value to test against.
-    @returns {Boolean} true
-  */
-  someProperty: function(key, value) {
-    return this.some(iter.apply(this, arguments));
-  },
-
-  /**
-    This will combine the values of the enumerator into a single value. It
-    is a useful way to collect a summary value from an enumeration. This
-    corresponds to the reduce() method defined in JavaScript 1.8.
-
-    The callback method you provide should have the following signature (all
-    parameters are optional):
-
-          function(previousValue, item, index, enumerable);
-
-    - *previousValue* is the value returned by the last call to the iterator.
-    - *item* is the current item in the iteration.
-    - *index* is the current index in the iteration
-    - *enumerable* is the enumerable object itself.
-
-    Return the new cumulative value.
-
-    In addition to the callback you can also pass an initialValue. An error
-    will be raised if you do not pass an initial value and the enumerator is
-    empty.
-
-    Note that unlike the other methods, this method does not allow you to
-    pass a target object to set as this for the callback. It's part of the
-    spec. Sorry.
-
-    @param {Function} callback The callback to execute
-    @param {Object} initialValue Initial value for the reduce
-    @param {String} reducerProperty internal use only.
-    @returns {Object} The reduced value.
-  */
-  reduce: function(callback, initialValue, reducerProperty) {
-    if (typeof callback !== "function") { throw new TypeError(); }
-
-    var ret = initialValue;
-
-    this.forEach(function(item, i) {
-      ret = callback.call(null, ret, item, i, this, reducerProperty);
-    }, this);
-
-    return ret;
-  },
-
-  /**
-    Invokes the named method on every object in the receiver that
-    implements it.  This method corresponds to the implementation in
-    Prototype 1.6.
-
-    @param {String} methodName the name of the method
-    @param {Object...} args optional arguments to pass as well.
-    @returns {Array} return values from calling invoke.
-  */
-  invoke: function(methodName) {
-    var args, ret = [];
-    if (arguments.length>1) args = a_slice.call(arguments, 1);
-
-    this.forEach(function(x, idx) {
-      var method = x && x[methodName];
-      if ('function' === typeof method) {
-        ret[idx] = args ? method.apply(x, args) : method.call(x);
-      }
-    }, this);
-
-    return ret;
-  },
-
-  /**
-    Simply converts the enumerable into a genuine array.  The order is not
-    guaranteed.  Corresponds to the method implemented by Prototype.
-
-    @returns {Array} the enumerable as an array.
-  */
-  toArray: function() {
-    var ret = [];
-    this.forEach(function(o, idx) { ret[idx] = o; });
-    return ret ;
-  },
-
-  /**
-    Returns a copy of the array with all null elements removed.
-
-        var arr = ["a", null, "c", null];
-        arr.compact(); => ["a", "c"]
-
-    @returns {Array} the array without null elements.
-  */
-  compact: function() { return this.without(null); },
-
-  /**
-    Returns a new enumerable that excludes the passed value.  The default
-    implementation returns an array regardless of the receiver type unless
-    the receiver does not contain the value.
-
-        var arr = ["a", "b", "a", "c"];
-        arr.without("a"); => ["b", "c"]
-
-    @param {Object} value
-    @returns {Ember.Enumerable}
-  */
-  without: function(value) {
-    if (!this.contains(value)) return this; // nothing to do
-    var ret = [] ;
-    this.forEach(function(k) {
-      if (k !== value) ret[ret.length] = k;
-    }) ;
-    return ret ;
-  },
-
-  /**
-    Returns a new enumerable that contains only unique values.  The default
-    implementation returns an array regardless of the receiver type.
-
-        var arr = ["a", "a", "b", "b"];
-        arr.uniq(); => ["a", "b"]
-
-    @returns {Ember.Enumerable}
-  */
-  uniq: function() {
-    var ret = [];
-    this.forEach(function(k){
-      if (a_indexOf(ret, k)<0) ret.push(k);
-    });
-    return ret;
-  },
-
-  /**
-    This property will trigger anytime the enumerable's content changes.
-    You can observe this property to be notified of changes to the enumerables
-    content.
-
-    For plain enumerables, this property is read only.  Ember.Array overrides
-    this method.
-
-    @type Ember.Array
-  */
-  '[]': Ember.computed(function(key, value) {
-    return this;
-  }).property().cacheable(),
-
-  // ..........................................................
-  // ENUMERABLE OBSERVERS
-  //
-
-  /**
-    Registers an enumerable observer.   Must implement Ember.EnumerableObserver
-    mixin.
-  */
-  addEnumerableObserver: function(target, opts) {
-    var willChange = (opts && opts.willChange) || 'enumerableWillChange',
-        didChange  = (opts && opts.didChange) || 'enumerableDidChange';
-
-    var hasObservers = get(this, 'hasEnumerableObservers');
-    if (!hasObservers) Ember.propertyWillChange(this, 'hasEnumerableObservers');
-    Ember.addListener(this, '@enumerable:before', target, willChange);
-    Ember.addListener(this, '@enumerable:change', target, didChange);
-    if (!hasObservers) Ember.propertyDidChange(this, 'hasEnumerableObservers');
-    return this;
-  },
-
-  /**
-    Removes a registered enumerable observer.
-  */
-  removeEnumerableObserver: function(target, opts) {
-    var willChange = (opts && opts.willChange) || 'enumerableWillChange',
-        didChange  = (opts && opts.didChange) || 'enumerableDidChange';
-
-    var hasObservers = get(this, 'hasEnumerableObservers');
-    if (hasObservers) Ember.propertyWillChange(this, 'hasEnumerableObservers');
-    Ember.removeListener(this, '@enumerable:before', target, willChange);
-    Ember.removeListener(this, '@enumerable:change', target, didChange);
-    if (hasObservers) Ember.propertyDidChange(this, 'hasEnumerableObservers');
-    return this;
-  },
-
-  /**
-    Becomes true whenever the array currently has observers watching changes
-    on the array.
-
-    @type Boolean
-  */
-  hasEnumerableObservers: Ember.computed(function() {
-    return Ember.hasListeners(this, '@enumerable:change') || Ember.hasListeners(this, '@enumerable:before');
-  }).property().cacheable(),
-
-
-  /**
-    Invoke this method just before the contents of your enumerable will
-    change.  You can either omit the parameters completely or pass the objects
-    to be removed or added if available or just a count.
-
-    @param {Ember.Enumerable|Number} removing
-      An enumerable of the objects to be removed or the number of items to
-      be removed.
-
-    @param {Ember.Enumerable|Number} adding
-      An enumerable of the objects to be added or the number of items to be
-      added.
-
-    @returns {Ember.Enumerable} receiver
-  */
-  enumerableContentWillChange: function(removing, adding) {
-
-    var removeCnt, addCnt, hasDelta;
-
-    if ('number' === typeof removing) removeCnt = removing;
-    else if (removing) removeCnt = get(removing, 'length');
-    else removeCnt = removing = -1;
-
-    if ('number' === typeof adding) addCnt = adding;
-    else if (adding) addCnt = get(adding,'length');
-    else addCnt = adding = -1;
-
-    hasDelta = addCnt<0 || removeCnt<0 || addCnt-removeCnt!==0;
-
-    if (removing === -1) removing = null;
-    if (adding   === -1) adding   = null;
-
-    Ember.propertyWillChange(this, '[]');
-    if (hasDelta) Ember.propertyWillChange(this, 'length');
-    Ember.sendEvent(this, '@enumerable:before', [this, removing, adding]);
-
-    return this;
-  },
-
-  /**
-    Invoke this method when the contents of your enumerable has changed.
-    This will notify any observers watching for content changes.  If your are
-    implementing an ordered enumerable (such as an array), also pass the
-    start and end values where the content changed so that it can be used to
-    notify range observers.
-
-    @param {Number} start
-      optional start offset for the content change.  For unordered
-      enumerables, you should always pass -1.
-
-    @param {Ember.Enumerable|Number} removing
-      An enumerable of the objects to be removed or the number of items to
-      be removed.
-
-    @param {Ember.Enumerable|Number} adding
-      An enumerable of the objects to be added or the number of items to be
-      added.
-
-    @returns {Object} receiver
-  */
-  enumerableContentDidChange: function(removing, adding) {
-    var notify = this.propertyDidChange, removeCnt, addCnt, hasDelta;
-
-    if ('number' === typeof removing) removeCnt = removing;
-    else if (removing) removeCnt = get(removing, 'length');
-    else removeCnt = removing = -1;
-
-    if ('number' === typeof adding) addCnt = adding;
-    else if (adding) addCnt = get(adding, 'length');
-    else addCnt = adding = -1;
-
-    hasDelta = addCnt<0 || removeCnt<0 || addCnt-removeCnt!==0;
-
-    if (removing === -1) removing = null;
-    if (adding   === -1) adding   = null;
-
-    Ember.sendEvent(this, '@enumerable:change', [this, removing, adding]);
-    if (hasDelta) Ember.propertyDidChange(this, 'length');
-    Ember.propertyDidChange(this, '[]');
-
-    return this ;
-  }
-
-}) ;
-
-
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-// ..........................................................
-// HELPERS
-//
-
-var get = Ember.get, set = Ember.set, meta = Ember.meta, map = Ember.EnumerableUtils.map, cacheFor = Ember.cacheFor;
-
-/** @private */
-function none(obj) { return obj===null || obj===undefined; }
-
-// ..........................................................
-// ARRAY
-//
-/**
-  @namespace
-
-  This module implements Observer-friendly Array-like behavior.  This mixin is
-  picked up by the Array class as well as other controllers, etc. that want to
-  appear to be arrays.
-
-  Unlike Ember.Enumerable, this mixin defines methods specifically for
-  collections that provide index-ordered access to their contents.  When you
-  are designing code that needs to accept any kind of Array-like object, you
-  should use these methods instead of Array primitives because these will
-  properly notify observers of changes to the array.
-
-  Although these methods are efficient, they do add a layer of indirection to
-  your application so it is a good idea to use them only when you need the
-  flexibility of using both true JavaScript arrays and "virtual" arrays such
-  as controllers and collections.
-
-  You can use the methods defined in this module to access and modify array
-  contents in a KVO-friendly way.  You can also be notified whenever the
-  membership if an array changes by changing the syntax of the property to
-  .observes('*myProperty.[]') .
-
-  To support Ember.Array in your own class, you must override two
-  primitives to use it: replace() and objectAt().
-
-  Note that the Ember.Array mixin also incorporates the Ember.Enumerable mixin.  All
-  Ember.Array-like objects are also enumerable.
-
-  @extends Ember.Enumerable
-  @since Ember 0.9.0
-*/
-Ember.Array = Ember.Mixin.create(Ember.Enumerable, /** @scope Ember.Array.prototype */ {
-
-  /** @private - compatibility */
-  isSCArray: true,
-
-  /**
-    @field {Number} length
-
-    Your array must support the length property. Your replace methods should
-    set this property whenever it changes.
-  */
-  length: Ember.required(),
-
-  /**
-    Returns the object at the given index. If the given index is negative or
-    is greater or equal than the array length, returns `undefined`.
-
-    This is one of the primitives you must implement to support `Ember.Array`.
-    If your object supports retrieving the value of an array item using `get()`
-    (i.e. `myArray.get(0)`), then you do not need to implement this method
-    yourself.
-
-        var arr = ['a', 'b', 'c', 'd'];
-        arr.objectAt(0);  => "a"
-        arr.objectAt(3);  => "d"
-        arr.objectAt(-1); => undefined
-        arr.objectAt(4);  => undefined
-        arr.objectAt(5);  => undefined
-
-    @param {Number} idx
-      The index of the item to return.
-  */
-  objectAt: function(idx) {
-    if ((idx < 0) || (idx>=get(this, 'length'))) return undefined ;
-    return get(this, idx);
-  },
-
-  /**
-    This returns the objects at the specified indexes, using `objectAt`.
-
-        var arr = ['a', 'b', 'c', 'd'];
-        arr.objectsAt([0, 1, 2]) => ["a", "b", "c"]
-        arr.objectsAt([2, 3, 4]) => ["c", "d", undefined]
-
-    @param {Array} indexes
-      An array of indexes of items to return.
-   */
-  objectsAt: function(indexes) {
-    var self = this;
-    return map(indexes, function(idx){ return self.objectAt(idx); });
-  },
-
-  /** @private (nodoc) - overrides Ember.Enumerable version */
-  nextObject: function(idx) {
-    return this.objectAt(idx);
-  },
-
-  /**
-    @field []
-
-    This is the handler for the special array content property.  If you get
-    this property, it will return this.  If you set this property it a new
-    array, it will replace the current content.
-
-    This property overrides the default property defined in Ember.Enumerable.
-  */
-  '[]': Ember.computed(function(key, value) {
-    if (value !== undefined) this.replace(0, get(this, 'length'), value) ;
-    return this ;
-  }).property().cacheable(),
-
-  firstObject: Ember.computed(function() {
-    return this.objectAt(0);
-  }).property().cacheable(),
-
-  lastObject: Ember.computed(function() {
-    return this.objectAt(get(this, 'length')-1);
-  }).property().cacheable(),
-
-  /** @private (nodoc) - optimized version from Enumerable */
-  contains: function(obj){
-    return this.indexOf(obj) >= 0;
-  },
-
-  // Add any extra methods to Ember.Array that are native to the built-in Array.
-  /**
-    Returns a new array that is a slice of the receiver. This implementation
-    uses the observable array methods to retrieve the objects for the new
-    slice.
-
-        var arr = ['red', 'green', 'blue'];
-        arr.slice(0);      => ['red', 'green', 'blue']
-        arr.slice(0, 2);   => ['red', 'green']
-        arr.slice(1, 100); => ['green', 'blue']
-
-    @param beginIndex {Integer} (Optional) index to begin slicing from.
-    @param endIndex {Integer} (Optional) index to end the slice at.
-    @returns {Array} New array with specified slice
-  */
-  slice: function(beginIndex, endIndex) {
-    var ret = [];
-    var length = get(this, 'length') ;
-    if (none(beginIndex)) beginIndex = 0 ;
-    if (none(endIndex) || (endIndex > length)) endIndex = length ;
-    while(beginIndex < endIndex) {
-      ret[ret.length] = this.objectAt(beginIndex++) ;
-    }
-    return ret ;
-  },
-
-  /**
-    Returns the index of the given object's first occurrence.
-    If no startAt argument is given, the starting location to
-    search is 0. If it's negative, will count backward from
-    the end of the array. Returns -1 if no match is found.
-
-        var arr = ["a", "b", "c", "d", "a"];
-        arr.indexOf("a");      =>  0
-        arr.indexOf("z");      => -1
-        arr.indexOf("a", 2);   =>  4
-        arr.indexOf("a", -1);  =>  4
-        arr.indexOf("b", 3);   => -1
-        arr.indexOf("a", 100); => -1
-
-    @param {Object} object the item to search for
-    @param {Number} startAt optional starting location to search, default 0
-    @returns {Number} index or -1 if not found
-  */
-  indexOf: function(object, startAt) {
-    var idx, len = get(this, 'length');
-
-    if (startAt === undefined) startAt = 0;
-    if (startAt < 0) startAt += len;
-
-    for(idx=startAt;idx<len;idx++) {
-      if (this.objectAt(idx, true) === object) return idx ;
-    }
-    return -1;
-  },
-
-  /**
-    Returns the index of the given object's last occurrence.
-    If no startAt argument is given, the search starts from
-    the last position. If it's negative, will count backward
-    from the end of the array. Returns -1 if no match is found.
-
-        var arr = ["a", "b", "c", "d", "a"];
-        arr.lastIndexOf("a");      =>  4
-        arr.lastIndexOf("z");      => -1
-        arr.lastIndexOf("a", 2);   =>  0
-        arr.lastIndexOf("a", -1);  =>  4
-        arr.lastIndexOf("b", 3);   =>  1
-        arr.lastIndexOf("a", 100); =>  4
-
-    @param {Object} object the item to search for
-    @param {Number} startAt optional starting location to search, default 0
-    @returns {Number} index or -1 if not found
-  */
-  lastIndexOf: function(object, startAt) {
-    var idx, len = get(this, 'length');
-
-    if (startAt === undefined || startAt >= len) startAt = len-1;
-    if (startAt < 0) startAt += len;
-
-    for(idx=startAt;idx>=0;idx--) {
-      if (this.objectAt(idx) === object) return idx ;
-    }
-    return -1;
-  },
-
-  // ..........................................................
-  // ARRAY OBSERVERS
-  //
-
-  /**
-    Adds an array observer to the receiving array.  The array observer object
-    normally must implement two methods:
-
-    * `arrayWillChange(start, removeCount, addCount)` - This method will be
-      called just before the array is modified.
-    * `arrayDidChange(start, removeCount, addCount)` - This method will be
-      called just after the array is modified.
-
-    Both callbacks will be passed the starting index of the change as well a
-    a count of the items to be removed and added.  You can use these callbacks
-    to optionally inspect the array during the change, clear caches, or do
-    any other bookkeeping necessary.
-
-    In addition to passing a target, you can also include an options hash
-    which you can use to override the method names that will be invoked on the
-    target.
-
-    @param {Object} target
-      The observer object.
-
-    @param {Hash} opts
-      Optional hash of configuration options including willChange, didChange,
-      and a context option.
-
-    @returns {Ember.Array} receiver
-  */
-  addArrayObserver: function(target, opts) {
-    var willChange = (opts && opts.willChange) || 'arrayWillChange',
-        didChange  = (opts && opts.didChange) || 'arrayDidChange';
-
-    var hasObservers = get(this, 'hasArrayObservers');
-    if (!hasObservers) Ember.propertyWillChange(this, 'hasArrayObservers');
-    Ember.addListener(this, '@array:before', target, willChange);
-    Ember.addListener(this, '@array:change', target, didChange);
-    if (!hasObservers) Ember.propertyDidChange(this, 'hasArrayObservers');
-    return this;
-  },
-
-  /**
-    Removes an array observer from the object if the observer is current
-    registered.  Calling this method multiple times with the same object will
-    have no effect.
-
-    @param {Object} target
-      The object observing the array.
-
-    @returns {Ember.Array} receiver
-  */
-  removeArrayObserver: function(target, opts) {
-    var willChange = (opts && opts.willChange) || 'arrayWillChange',
-        didChange  = (opts && opts.didChange) || 'arrayDidChange';
-
-    var hasObservers = get(this, 'hasArrayObservers');
-    if (hasObservers) Ember.propertyWillChange(this, 'hasArrayObservers');
-    Ember.removeListener(this, '@array:before', target, willChange);
-    Ember.removeListener(this, '@array:change', target, didChange);
-    if (hasObservers) Ember.propertyDidChange(this, 'hasArrayObservers');
-    return this;
-  },
-
-  /**
-    Becomes true whenever the array currently has observers watching changes
-    on the array.
-
-    @type Boolean
-  */
-  hasArrayObservers: Ember.computed(function() {
-    return Ember.hasListeners(this, '@array:change') || Ember.hasListeners(this, '@array:before');
-  }).property().cacheable(),
-
-  /**
-    If you are implementing an object that supports Ember.Array, call this
-    method just before the array content changes to notify any observers and
-    invalidate any related properties.  Pass the starting index of the change
-    as well as a delta of the amounts to change.
-
-    @param {Number} startIdx
-      The starting index in the array that will change.
-
-    @param {Number} removeAmt
-      The number of items that will be removed.  If you pass null assumes 0
-
-    @param {Number} addAmt
-      The number of items that will be added.  If you pass null assumes 0.
-
-    @returns {Ember.Array} receiver
-  */
-  arrayContentWillChange: function(startIdx, removeAmt, addAmt) {
-
-    // if no args are passed assume everything changes
-    if (startIdx===undefined) {
-      startIdx = 0;
-      removeAmt = addAmt = -1;
-    } else {
-      if (removeAmt === undefined) removeAmt=-1;
-      if (addAmt    === undefined) addAmt=-1;
-    }
-
-    // Make sure the @each proxy is set up if anyone is observing @each
-    if (Ember.isWatching(this, '@each')) { get(this, '@each'); }
-
-    Ember.sendEvent(this, '@array:before', [this, startIdx, removeAmt, addAmt]);
-
-    var removing, lim;
-    if (startIdx>=0 && removeAmt>=0 && get(this, 'hasEnumerableObservers')) {
-      removing = [];
-      lim = startIdx+removeAmt;
-      for(var idx=startIdx;idx<lim;idx++) removing.push(this.objectAt(idx));
-    } else {
-      removing = removeAmt;
-    }
-
-    this.enumerableContentWillChange(removing, addAmt);
-
-    return this;
-  },
-
-  arrayContentDidChange: function(startIdx, removeAmt, addAmt) {
-
-    // if no args are passed assume everything changes
-    if (startIdx===undefined) {
-      startIdx = 0;
-      removeAmt = addAmt = -1;
-    } else {
-      if (removeAmt === undefined) removeAmt=-1;
-      if (addAmt    === undefined) addAmt=-1;
-    }
-
-    var adding, lim;
-    if (startIdx>=0 && addAmt>=0 && get(this, 'hasEnumerableObservers')) {
-      adding = [];
-      lim = startIdx+addAmt;
-      for(var idx=startIdx;idx<lim;idx++) adding.push(this.objectAt(idx));
-    } else {
-      adding = addAmt;
-    }
-
-    this.enumerableContentDidChange(removeAmt, adding);
-    Ember.sendEvent(this, '@array:change', [this, startIdx, removeAmt, addAmt]);
-
-    var length      = get(this, 'length'),
-        cachedFirst = cacheFor(this, 'firstObject'),
-        cachedLast  = cacheFor(this, 'lastObject');
-    if (this.objectAt(0) !== cachedFirst) {
-      Ember.propertyWillChange(this, 'firstObject');
-      Ember.propertyDidChange(this, 'firstObject');
-    }
-    if (this.objectAt(length-1) !== cachedLast) {
-      Ember.propertyWillChange(this, 'lastObject');
-      Ember.propertyDidChange(this, 'lastObject');
-    }
-
-    return this;
-  },
-
-  // ..........................................................
-  // ENUMERATED PROPERTIES
-  //
-
-  /**
-    Returns a special object that can be used to observe individual properties
-    on the array.  Just get an equivalent property on this object and it will
-    return an enumerable that maps automatically to the named key on the
-    member objects.
-  */
-  '@each': Ember.computed(function() {
-    if (!this.__each) this.__each = new Ember.EachProxy(this);
-    return this.__each;
-  }).property().cacheable()
-
-}) ;
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-/**
-  @namespace
-
-  Implements some standard methods for comparing objects. Add this mixin to
-  any class you create that can compare its instances.
-
-  You should implement the compare() method.
-
-  @extends Ember.Mixin
-  @since Ember 0.9
-*/
-Ember.Comparable = Ember.Mixin.create( /** @scope Ember.Comparable.prototype */{
-
-  /**
-    walk like a duck. Indicates that the object can be compared.
-
-    @type Boolean
-    @default true
-    @constant
-  */
-  isComparable: true,
-
-  /**
-    Override to return the result of the comparison of the two parameters. The
-    compare method should return:
-
-      - `-1` if `a < b`
-      - `0` if `a == b`
-      - `1` if `a > b`
-
-    Default implementation raises an exception.
-
-    @param a {Object} the first object to compare
-    @param b {Object} the second object to compare
-    @returns {Integer} the result of the comparison
-  */
-  compare: Ember.required(Function)
-
-});
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2010 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set;
-
-/**
-  @namespace
-
-  Implements some standard methods for copying an object.  Add this mixin to
-  any object you create that can create a copy of itself.  This mixin is
-  added automatically to the built-in array.
-
-  You should generally implement the copy() method to return a copy of the
-  receiver.
-
-  Note that frozenCopy() will only work if you also implement Ember.Freezable.
-
-  @extends Ember.Mixin
-  @since Ember 0.9
-*/
-Ember.Copyable = Ember.Mixin.create(
-/** @scope Ember.Copyable.prototype */ {
-
-  /**
-    Override to return a copy of the receiver.  Default implementation raises
-    an exception.
-
-    @function
-    @param deep {Boolean} if true, a deep copy of the object should be made
-    @returns {Object} copy of receiver
-  */
-  copy: Ember.required(Function),
-
-  /**
-    If the object implements Ember.Freezable, then this will return a new copy
-    if the object is not frozen and the receiver if the object is frozen.
-
-    Raises an exception if you try to call this method on a object that does
-    not support freezing.
-
-    You should use this method whenever you want a copy of a freezable object
-    since a freezable object can simply return itself without actually
-    consuming more memory.
-
-    @returns {Object} copy of receiver or receiver
-  */
-  frozenCopy: function() {
-    if (Ember.Freezable && Ember.Freezable.detect(this)) {
-      return get(this, 'isFrozen') ? this : this.copy().freeze();
-    } else {
-      throw new Error(Ember.String.fmt("%@ does not support freezing", [this]));
-    }
-  }
-});
-
-
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2010 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-
-
-
-
-var get = Ember.get, set = Ember.set;
-
-/**
-  @namespace
-
-  The Ember.Freezable mixin implements some basic methods for marking an object
-  as frozen. Once an object is frozen it should be read only. No changes
-  may be made the internal state of the object.
-
-  ## Enforcement
-
-  To fully support freezing in your subclass, you must include this mixin and
-  override any method that might alter any property on the object to instead
-  raise an exception. You can check the state of an object by checking the
-  isFrozen property.
-
-  Although future versions of JavaScript may support language-level freezing
-  object objects, that is not the case today. Even if an object is freezable,
-  it is still technically possible to modify the object, even though it could
-  break other parts of your application that do not expect a frozen object to
-  change. It is, therefore, very important that you always respect the
-  isFrozen property on all freezable objects.
-
-  ## Example Usage
-
-  The example below shows a simple object that implement the Ember.Freezable
-  protocol.
-
-        Contact = Ember.Object.extend(Ember.Freezable, {
-
-          firstName: null,
-
-          lastName: null,
-
-          // swaps the names
-          swapNames: function() {
-            if (this.get('isFrozen')) throw Ember.FROZEN_ERROR;
-            var tmp = this.get('firstName');
-            this.set('firstName', this.get('lastName'));
-            this.set('lastName', tmp);
-            return this;
-          }
-
-        });
-
-        c = Context.create({ firstName: "John", lastName: "Doe" });
-        c.swapNames();  => returns c
-        c.freeze();
-        c.swapNames();  => EXCEPTION
-
-  ## Copying
-
-  Usually the Ember.Freezable protocol is implemented in cooperation with the
-  Ember.Copyable protocol, which defines a frozenCopy() method that will return
-  a frozen object, if the object implements this method as well.
-
-  @extends Ember.Mixin
-  @since Ember 0.9
-*/
-Ember.Freezable = Ember.Mixin.create(
-/** @scope Ember.Freezable.prototype */ {
-
-  /**
-    Set to true when the object is frozen.  Use this property to detect whether
-    your object is frozen or not.
-
-    @type Boolean
-  */
-  isFrozen: false,
-
-  /**
-    Freezes the object.  Once this method has been called the object should
-    no longer allow any properties to be edited.
-
-    @returns {Object} receiver
-  */
-  freeze: function() {
-    if (get(this, 'isFrozen')) return this;
-    set(this, 'isFrozen', true);
-    return this;
-  }
-
-});
-
-Ember.FROZEN_ERROR = "Frozen object cannot be modified.";
-
-
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var forEach = Ember.EnumerableUtils.forEach;
-
-/**
-  @class
-
-  This mixin defines the API for modifying generic enumerables.  These methods
-  can be applied to an object regardless of whether it is ordered or
-  unordered.
-
-  Note that an Enumerable can change even if it does not implement this mixin.
-  For example, a MappedEnumerable cannot be directly modified but if its
-  underlying enumerable changes, it will change also.
-
-  ## Adding Objects
-
-  To add an object to an enumerable, use the addObject() method.  This
-  method will only add the object to the enumerable if the object is not
-  already present and the object if of a type supported by the enumerable.
-
-      set.addObject(contact);
-
-  ## Removing Objects
-
-  To remove an object form an enumerable, use the removeObject() method.  This
-  will only remove the object if it is already in the enumerable, otherwise
-  this method has no effect.
-
-      set.removeObject(contact);
-
-  ## Implementing In Your Own Code
-
-  If you are implementing an object and want to support this API, just include
-  this mixin in your class and implement the required methods.  In your unit
-  tests, be sure to apply the Ember.MutableEnumerableTests to your object.
-
-  @extends Ember.Mixin
-  @extends Ember.Enumerable
-*/
-Ember.MutableEnumerable = Ember.Mixin.create(Ember.Enumerable,
-  /** @scope Ember.MutableEnumerable.prototype */ {
-
-  /**
-    __Required.__ You must implement this method to apply this mixin.
-
-    Attempts to add the passed object to the receiver if the object is not
-    already present in the collection. If the object is present, this method
-    has no effect.
-
-    If the passed object is of a type not supported by the receiver
-    then this method should raise an exception.
-
-    @function
-
-    @param {Object} object
-      The object to add to the enumerable.
-
-    @returns {Object} the passed object
-  */
-  addObject: Ember.required(Function),
-
-  /**
-    Adds each object in the passed enumerable to the receiver.
-
-    @param {Ember.Enumerable} objects the objects to add.
-    @returns {Object} receiver
-  */
-  addObjects: function(objects) {
-    Ember.beginPropertyChanges(this);
-    forEach(objects, function(obj) { this.addObject(obj); }, this);
-    Ember.endPropertyChanges(this);
-    return this;
-  },
-
-  /**
-    __Required.__ You must implement this method to apply this mixin.
-
-    Attempts to remove the passed object from the receiver collection if the
-    object is in present in the collection.  If the object is not present,
-    this method has no effect.
-
-    If the passed object is of a type not supported by the receiver
-    then this method should raise an exception.
-
-    @function
-
-    @param {Object} object
-      The object to remove from the enumerable.
-
-    @returns {Object} the passed object
-  */
-  removeObject: Ember.required(Function),
-
-
-  /**
-    Removes each objects in the passed enumerable from the receiver.
-
-    @param {Ember.Enumerable} objects the objects to remove
-    @returns {Object} receiver
-  */
-  removeObjects: function(objects) {
-    Ember.beginPropertyChanges(this);
-    forEach(objects, function(obj) { this.removeObject(obj); }, this);
-    Ember.endPropertyChanges(this);
-    return this;
-  }
-
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-// ..........................................................
-// CONSTANTS
-//
-
-var OUT_OF_RANGE_EXCEPTION = "Index out of range" ;
-var EMPTY = [];
-
-// ..........................................................
-// HELPERS
-//
-
-var get = Ember.get, set = Ember.set, forEach = Ember.EnumerableUtils.forEach;
-
-/**
-  @class
-
-  This mixin defines the API for modifying array-like objects.  These methods
-  can be applied only to a collection that keeps its items in an ordered set.
-
-  Note that an Array can change even if it does not implement this mixin.
-  For example, one might implement a SparseArray that cannot be directly
-  modified, but if its underlying enumerable changes, it will change also.
-
-  @extends Ember.Mixin
-  @extends Ember.Array
-  @extends Ember.MutableEnumerable
-*/
-Ember.MutableArray = Ember.Mixin.create(Ember.Array, Ember.MutableEnumerable,
-  /** @scope Ember.MutableArray.prototype */ {
-
-  /**
-    __Required.__ You must implement this method to apply this mixin.
-
-    This is one of the primitives you must implement to support Ember.Array.  You
-    should replace amt objects started at idx with the objects in the passed
-    array.  You should also call this.enumerableContentDidChange() ;
-
-    @function
-
-    @param {Number} idx
-      Starting index in the array to replace.  If idx >= length, then append
-      to the end of the array.
-
-    @param {Number} amt
-      Number of elements that should be removed from the array, starting at
-      *idx*.
-
-    @param {Array} objects
-      An array of zero or more objects that should be inserted into the array
-      at *idx*
-  */
-  replace: Ember.required(),
-
-  /**
-    Remove all elements from self. This is useful if you
-    want to reuse an existing array without having to recreate it.
-
-        var colors = ["red", "green", "blue"];
-        color.length();  => 3
-        colors.clear();  => []
-        colors.length(); => 0
-
-    @returns {Ember.Array} An empty Array. 
-  */
-  clear: function () {
-    var len = get(this, 'length');
-    if (len === 0) return this;
-    this.replace(0, len, EMPTY);
-    return this;
-  },
-
-  /**
-    This will use the primitive replace() method to insert an object at the
-    specified index.
-
-        var colors = ["red", "green", "blue"];
-        colors.insertAt(2, "yellow"); => ["red", "green", "yellow", "blue"]
-        colors.insertAt(5, "orange"); => Error: Index out of range
-
-    @param {Number} idx index of insert the object at.
-    @param {Object} object object to insert
-  */
-  insertAt: function(idx, object) {
-    if (idx > get(this, 'length')) throw new Error(OUT_OF_RANGE_EXCEPTION) ;
-    this.replace(idx, 0, [object]) ;
-    return this ;
-  },
-
-  /**
-    Remove an object at the specified index using the replace() primitive
-    method.  You can pass either a single index, or a start and a length.
-
-    If you pass a start and length that is beyond the
-    length this method will throw an Ember.OUT_OF_RANGE_EXCEPTION
-
-        var colors = ["red", "green", "blue", "yellow", "orange"];
-        colors.removeAt(0); => ["green", "blue", "yellow", "orange"]
-        colors.removeAt(2, 2); => ["green", "blue"]
-        colors.removeAt(4, 2); => Error: Index out of range
-
-    @param {Number} start index, start of range
-    @param {Number} len length of passing range
-    @returns {Object} receiver
-  */
-  removeAt: function(start, len) {
-
-    var delta = 0;
-
-    if ('number' === typeof start) {
-
-      if ((start < 0) || (start >= get(this, 'length'))) {
-        throw new Error(OUT_OF_RANGE_EXCEPTION);
-      }
-
-      // fast case
-      if (len === undefined) len = 1;
-      this.replace(start, len, EMPTY);
-    }
-
-    return this ;
-  },
-
-  /**
-    Push the object onto the end of the array.  Works just like push() but it
-    is KVO-compliant.
-
-        var colors = ["red", "green", "blue"];
-        colors.pushObject("black"); => ["red", "green", "blue", "black"]
-        colors.pushObject(["yellow", "orange"]); => ["red", "green", "blue", "black", ["yellow", "orange"]]
-
-  */
-  pushObject: function(obj) {
-    this.insertAt(get(this, 'length'), obj) ;
-    return obj ;
-  },
-
-  /**
-    Add the objects in the passed numerable to the end of the array.  Defers
-    notifying observers of the change until all objects are added.
-
-        var colors = ["red", "green", "blue"];
-        colors.pushObjects("black"); => ["red", "green", "blue", "black"]
-        colors.pushObjects(["yellow", "orange"]); => ["red", "green", "blue", "black", "yellow", "orange"]
-
-    @param {Ember.Enumerable} objects the objects to add
-    @returns {Ember.Array} receiver
-  */
-  pushObjects: function(objects) {
-    this.replace(get(this, 'length'), 0, objects);
-    return this;
-  },
-
-  /**
-    Pop object from array or nil if none are left.  Works just like pop() but
-    it is KVO-compliant.
-
-        var colors = ["red", "green", "blue"];
-        colors.popObject(); => "blue"
-        console.log(colors); => ["red", "green"]
-
-  */
-  popObject: function() {
-    var len = get(this, 'length') ;
-    if (len === 0) return null ;
-
-    var ret = this.objectAt(len-1) ;
-    this.removeAt(len-1, 1) ;
-    return ret ;
-  },
-
-  /**
-    Shift an object from start of array or nil if none are left.  Works just
-    like shift() but it is KVO-compliant.
-
-        var colors = ["red", "green", "blue"];
-        colors.shiftObject(); => "red"
-        console.log(colors); => ["green", "blue"]
-
-  */
-  shiftObject: function() {
-    if (get(this, 'length') === 0) return null ;
-    var ret = this.objectAt(0) ;
-    this.removeAt(0) ;
-    return ret ;
-  },
-
-  /**
-    Unshift an object to start of array.  Works just like unshift() but it is
-    KVO-compliant.
-
-        var colors = ["red", "green", "blue"];
-        colors.unshiftObject("yellow"); => ["yellow", "red", "green", "blue"]
-        colors.unshiftObject(["black", "white"]); => [["black", "white"], "yellow", "red", "green", "blue"]
-
-  */
-  unshiftObject: function(obj) {
-    this.insertAt(0, obj) ;
-    return obj ;
-  },
-
-  /**
-    Adds the named objects to the beginning of the array.  Defers notifying
-    observers until all objects have been added.
-
-        var colors = ["red", "green", "blue"];
-        colors.unshiftObjects(["black", "white"]); => ["black", "white", "red", "green", "blue"]
-        colors.unshiftObjects("yellow"); => Type Error: 'undefined' is not a function
-
-    @param {Ember.Enumerable} objects the objects to add
-    @returns {Ember.Array} receiver
-  */
-  unshiftObjects: function(objects) {
-    this.replace(0, 0, objects);
-    return this;
-  },
-
-  /**
-    Reverse objects in the array.  Works just like reverse() but it is
-    KVO-compliant.
-
-    @return {Ember.Array} receiver
-   */
-  reverseObjects: function() {
-    var len = get(this, 'length');
-    if (len === 0) return this;
-    var objects = this.toArray().reverse();
-    this.replace(0, len, objects);
-    return this;
-  },
-
-  // ..........................................................
-  // IMPLEMENT Ember.MutableEnumerable
-  //
-
-  /** @private (nodoc) */
-  removeObject: function(obj) {
-    var loc = get(this, 'length') || 0;
-    while(--loc >= 0) {
-      var curObject = this.objectAt(loc) ;
-      if (curObject === obj) this.removeAt(loc) ;
-    }
-    return this ;
-  },
-
-  /** @private (nodoc) */
-  addObject: function(obj) {
-    if (!this.contains(obj)) this.pushObject(obj);
-    return this ;
-  }
-
-});
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-var get = Ember.get, set = Ember.set, defineProperty = Ember.defineProperty;
-
-/**
-  @class
-
-  ## Overview
-  
-  This mixin provides properties and property observing functionality, core
-  features of the Ember object model.
-  
-  Properties and observers allow one object to observe changes to a
-  property on another object. This is one of the fundamental ways that
-  models, controllers and views communicate with each other in an Ember
-  application.
-  
-  Any object that has this mixin applied can be used in observer
-  operations. That includes Ember.Object and most objects you will
-  interact with as you write your Ember application.
-
-  Note that you will not generally apply this mixin to classes yourself,
-  but you will use the features provided by this module frequently, so it
-  is important to understand how to use it.
-  
-  ## Using get() and set()
-  
-  Because of Ember's support for bindings and observers, you will always
-  access properties using the get method, and set properties using the
-  set method. This allows the observing objects to be notified and
-  computed properties to be handled properly.
-  
-  More documentation about `get` and `set` are below.
-  
-  ## Observing Property Changes
-
-  You typically observe property changes simply by adding the `observes`
-  call to the end of your method declarations in classes that you write.
-  For example:
-
-      Ember.Object.create({
-        valueObserver: function() {
-          // Executes whenever the "value" property changes
-        }.observes('value')
-      });
-    
-  Although this is the most common way to add an observer, this capability
-  is actually built into the Ember.Object class on top of two methods
-  defined in this mixin: `addObserver` and `removeObserver`. You can use
-  these two methods to add and remove observers yourself if you need to
-  do so at runtime.
-
-  To add an observer for a property, call:
-
-      object.addObserver('propertyKey', targetObject, targetAction)
-
-  This will call the `targetAction` method on the `targetObject` to be called
-  whenever the value of the `propertyKey` changes.
-  
-  Note that if `propertyKey` is a computed property, the observer will be 
-  called when any of the property dependencies are changed, even if the 
-  resulting value of the computed property is unchanged. This is necessary
-  because computed properties are not computed until `get` is called.
-  
-  @extends Ember.Mixin
-*/
-Ember.Observable = Ember.Mixin.create(/** @scope Ember.Observable.prototype */ {
-
-  /** @private - compatibility */
-  isObserverable: true,
-
-  /**
-    Retrieves the value of a property from the object.
-
-    This method is usually similar to using object[keyName] or object.keyName,
-    however it supports both computed properties and the unknownProperty
-    handler.
-    
-    Because `get` unifies the syntax for accessing all these kinds
-    of properties, it can make many refactorings easier, such as replacing a
-    simple property with a computed property, or vice versa.
-
-    ### Computed Properties
-
-    Computed properties are methods defined with the `property` modifier
-    declared at the end, such as:
-
-          fullName: function() {
-            return this.getEach('firstName', 'lastName').compact().join(' ');
-          }.property('firstName', 'lastName')
-
-    When you call `get` on a computed property, the function will be
-    called and the return value will be returned instead of the function
-    itself.
-
-    ### Unknown Properties
-
-    Likewise, if you try to call `get` on a property whose value is
-    undefined, the unknownProperty() method will be called on the object.
-    If this method returns any value other than undefined, it will be returned
-    instead. This allows you to implement "virtual" properties that are
-    not defined upfront.
-
-    @param {String} key The property to retrieve
-    @returns {Object} The property value or undefined.
-  */
-  get: function(keyName) {
-    return get(this, keyName);
-  },
-
-  /**
-    To get multiple properties at once, call getProperties
-    with a list of strings or an array:
-
-          record.getProperties('firstName', 'lastName', 'zipCode'); // => { firstName: 'John', lastName: 'Doe', zipCode: '10011' }
-
-   is equivalent to:
-
-          record.getProperties(['firstName', 'lastName', 'zipCode']); // => { firstName: 'John', lastName: 'Doe', zipCode: '10011' }
-
-    @param {String...|Array} list of keys to get
-    @returns {Hash}
-  */
-  getProperties: function() {
-    var ret = {};
-    var propertyNames = arguments;
-    if (arguments.length === 1 && Ember.typeOf(arguments[0]) === 'array') {
-      propertyNames = arguments[0];
-    }
-    for(var i = 0; i < propertyNames.length; i++) {
-      ret[propertyNames[i]] = get(this, propertyNames[i]);
-    }
-    return ret;
-  },
-
-  /**
-    Sets the provided key or path to the value.
-
-    This method is generally very similar to calling object[key] = value or
-    object.key = value, except that it provides support for computed
-    properties, the unknownProperty() method and property observers.
-
-    ### Computed Properties
-
-    If you try to set a value on a key that has a computed property handler
-    defined (see the get() method for an example), then set() will call
-    that method, passing both the value and key instead of simply changing
-    the value itself. This is useful for those times when you need to
-    implement a property that is composed of one or more member
-    properties.
-
-    ### Unknown Properties
-
-    If you try to set a value on a key that is undefined in the target
-    object, then the unknownProperty() handler will be called instead. This
-    gives you an opportunity to implement complex "virtual" properties that
-    are not predefined on the object. If unknownProperty() returns
-    undefined, then set() will simply set the value on the object.
-
-    ### Property Observers
-
-    In addition to changing the property, set() will also register a
-    property change with the object. Unless you have placed this call
-    inside of a beginPropertyChanges() and endPropertyChanges(), any "local"
-    observers (i.e. observer methods declared on the same object), will be
-    called immediately. Any "remote" observers (i.e. observer methods
-    declared on another object) will be placed in a queue and called at a
-    later time in a coalesced manner.
-
-    ### Chaining
-
-    In addition to property changes, set() returns the value of the object
-    itself so you can do chaining like this:
-
-          record.set('firstName', 'Charles').set('lastName', 'Jolley');
-
-    @param {String} key The property to set
-    @param {Object} value The value to set or null.
-    @returns {Ember.Observable}
-  */
-  set: function(keyName, value) {
-    set(this, keyName, value);
-    return this;
-  },
-
-  /**
-    To set multiple properties at once, call setProperties
-    with a Hash:
-
-          record.setProperties({ firstName: 'Charles', lastName: 'Jolley' });
-
-    @param {Hash} hash the hash of keys and values to set
-    @returns {Ember.Observable}
-  */
-  setProperties: function(hash) {
-    return Ember.setProperties(this, hash);
-  },
-
-  /**
-    Begins a grouping of property changes.
-
-    You can use this method to group property changes so that notifications
-    will not be sent until the changes are finished. If you plan to make a
-    large number of changes to an object at one time, you should call this
-    method at the beginning of the changes to begin deferring change
-    notifications. When you are done making changes, call endPropertyChanges()
-    to deliver the deferred change notifications and end deferring.
-
-    @returns {Ember.Observable}
-  */
-  beginPropertyChanges: function() {
-    Ember.beginPropertyChanges();
-    return this;
-  },
-
-  /**
-    Ends a grouping of property changes.
-
-    You can use this method to group property changes so that notifications
-    will not be sent until the changes are finished. If you plan to make a
-    large number of changes to an object at one time, you should call
-    beginPropertyChanges() at the beginning of the changes to defer change
-    notifications. When you are done making changes, call this method to
-    deliver the deferred change notifications and end deferring.
-
-    @returns {Ember.Observable}
-  */
-  endPropertyChanges: function() {
-    Ember.endPropertyChanges();
-    return this;
-  },
-
-  /**
-    Notify the observer system that a property is about to change.
-
-    Sometimes you need to change a value directly or indirectly without
-    actually calling get() or set() on it. In this case, you can use this
-    method and propertyDidChange() instead. Calling these two methods
-    together will notify all observers that the property has potentially
-    changed value.
-
-    Note that you must always call propertyWillChange and propertyDidChange as
-    a pair. If you do not, it may get the property change groups out of order
-    and cause notifications to be delivered more often than you would like.
-
-    @param {String} key The property key that is about to change.
-    @returns {Ember.Observable}
-  */
-  propertyWillChange: function(keyName){
-    Ember.propertyWillChange(this, keyName);
-    return this;
-  },
-
-  /**
-    Notify the observer system that a property has just changed.
-
-    Sometimes you need to change a value directly or indirectly without
-    actually calling get() or set() on it. In this case, you can use this
-    method and propertyWillChange() instead. Calling these two methods
-    together will notify all observers that the property has potentially
-    changed value.
-
-    Note that you must always call propertyWillChange and propertyDidChange as
-    a pair. If you do not, it may get the property change groups out of order
-    and cause notifications to be delivered more often than you would like.
-
-    @param {String} keyName The property key that has just changed.
-    @returns {Ember.Observable}
-  */
-  propertyDidChange: function(keyName) {
-    Ember.propertyDidChange(this, keyName);
-    return this;
-  },
-  
-  /**
-    Convenience method to call `propertyWillChange` and `propertyDidChange` in
-    succession.
-  
-    @param {String} keyName The property key to be notified about.
-    @returns {Ember.Observable}
-  */
-  notifyPropertyChange: function(keyName) {
-    this.propertyWillChange(keyName);
-    this.propertyDidChange(keyName);
-    return this;
-  },
-
-  addBeforeObserver: function(key, target, method) {
-    Ember.addBeforeObserver(this, key, target, method);
-  },
-
-  /**
-    Adds an observer on a property.
-
-    This is the core method used to register an observer for a property.
-
-    Once you call this method, anytime the key's value is set, your observer
-    will be notified. Note that the observers are triggered anytime the
-    value is set, regardless of whether it has actually changed. Your
-    observer should be prepared to handle that.
-
-    You can also pass an optional context parameter to this method. The
-    context will be passed to your observer method whenever it is triggered.
-    Note that if you add the same target/method pair on a key multiple times
-    with different context parameters, your observer will only be called once
-    with the last context you passed.
-
-    ### Observer Methods
-
-    Observer methods you pass should generally have the following signature if
-    you do not pass a "context" parameter:
-
-          fooDidChange: function(sender, key, value, rev);
-
-    The sender is the object that changed. The key is the property that
-    changes. The value property is currently reserved and unused. The rev
-    is the last property revision of the object when it changed, which you can
-    use to detect if the key value has really changed or not.
-
-    If you pass a "context" parameter, the context will be passed before the
-    revision like so:
-
-          fooDidChange: function(sender, key, value, context, rev);
-
-    Usually you will not need the value, context or revision parameters at
-    the end. In this case, it is common to write observer methods that take
-    only a sender and key value as parameters or, if you aren't interested in
-    any of these values, to write an observer that has no parameters at all.
-
-    @param {String} key The key to observer
-    @param {Object} target The target object to invoke
-    @param {String|Function} method The method to invoke.
-    @returns {Ember.Object} self
-  */
-  addObserver: function(key, target, method) {
-    Ember.addObserver(this, key, target, method);
-  },
-
-  /**
-    Remove an observer you have previously registered on this object. Pass
-    the same key, target, and method you passed to addObserver() and your
-    target will no longer receive notifications.
-
-    @param {String} key The key to observer
-    @param {Object} target The target object to invoke
-    @param {String|Function} method The method to invoke.
-    @returns {Ember.Observable} receiver
-  */
-  removeObserver: function(key, target, method) {
-    Ember.removeObserver(this, key, target, method);
-  },
-
-  /**
-    Returns true if the object currently has observers registered for a
-    particular key. You can use this method to potentially defer performing
-    an expensive action until someone begins observing a particular property
-    on the object.
-
-    @param {String} key Key to check
-    @returns {Boolean}
-  */
-  hasObserverFor: function(key) {
-    return Ember.hasListeners(this, key+':change');
-  },
-
-  /**
-    This method will be called when a client attempts to get the value of a
-    property that has not been defined in one of the typical ways. Override
-    this method to create "virtual" properties.
-    
-    @param {String} key The name of the unknown property that was requested.
-    @returns {Object} The property value or undefined. Default is undefined.
-  */
-  unknownProperty: function(key) {
-    return undefined;
-  },
-
-  /**
-    This method will be called when a client attempts to set the value of a
-    property that has not been defined in one of the typical ways. Override
-    this method to create "virtual" properties.
-    
-    @param {String} key The name of the unknown property to be set.
-    @param {Object} value The value the unknown property is to be set to.
-  */
-  setUnknownProperty: function(key, value) {
-    defineProperty(this, key);
-    set(this, key, value);
-  },
-
-  /**
-    @deprecated
-    @param {String} path The property path to retrieve
-    @returns {Object} The property value or undefined.
-  */
-  getPath: function(path) {
-    Ember.deprecate("getPath is deprecated since get now supports paths");
-    return this.get(path);
-  },
-
-  /**
-    @deprecated
-    @param {String} path The path to the property that will be set
-    @param {Object} value The value to set or null.
-    @returns {Ember.Observable}
-  */
-  setPath: function(path, value) {
-    Ember.deprecate("setPath is deprecated since set now supports paths");
-    return this.set(path, value);
-  },
-
-  /**
-    Retrieves the value of a property, or a default value in the case that the property
-    returns undefined.
-    
-        person.getWithDefault('lastName', 'Doe');
-    
-    @param {String} keyName The name of the property to retrieve
-    @param {Object} defaultValue The value to return if the property value is undefined
-    @returns {Object} The property value or the defaultValue.
-  */
-  getWithDefault: function(keyName, defaultValue) {
-    return Ember.getWithDefault(this, keyName, defaultValue);
-  },
-
-  /**
-    Set the value of a property to the current value plus some amount.
-    
-        person.incrementProperty('age');
-        team.incrementProperty('score', 2);
-    
-    @param {String} keyName The name of the property to increment
-    @param {Object} increment The amount to increment by. Defaults to 1
-    @returns {Object} The new property value
-  */
-  incrementProperty: function(keyName, increment) {
-    if (!increment) { increment = 1; }
-    set(this, keyName, (get(this, keyName) || 0)+increment);
-    return get(this, keyName);
-  },
-  
-  /**
-    Set the value of a property to the current value minus some amount.
-    
-        player.decrementProperty('lives');
-        orc.decrementProperty('health', 5);
-    
-    @param {String} keyName The name of the property to decrement
-    @param {Object} increment The amount to decrement by. Defaults to 1
-    @returns {Object} The new property value
-  */
-  decrementProperty: function(keyName, increment) {
-    if (!increment) { increment = 1; }
-    set(this, keyName, (get(this, keyName) || 0)-increment);
-    return get(this, keyName);
-  },
-
-  /**
-    Set the value of a boolean property to the opposite of it's
-    current value.
-    
-        starship.toggleProperty('warpDriveEnaged');
-    
-    @param {String} keyName The name of the property to toggle
-    @returns {Object} The new property value
-  */
-  toggleProperty: function(keyName) {
-    set(this, keyName, !get(this, keyName));
-    return get(this, keyName);
-  },
-
-  /**
-    Returns the cached value of a computed property, if it exists.
-    This allows you to inspect the value of a computed property
-    without accidentally invoking it if it is intended to be
-    generated lazily.
-
-    @param {String} keyName
-    @returns {Object} The cached value of the computed property, if any
-  */
-  cacheFor: function(keyName) {
-    return Ember.cacheFor(this, keyName);
-  },
-
-  /** @private - intended for debugging purposes */
-  observersForKey: function(keyName) {
-    return Ember.observersFor(this, keyName);
-  }
-});
-
-
-
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set;
-
-Ember.TargetActionSupport = Ember.Mixin.create({
-  target: null,
-  action: null,
-
-  targetObject: Ember.computed(function() {
-    var target = get(this, 'target');
-
-    if (Ember.typeOf(target) === "string") {
-      var value = get(this, target);
-      if (value === undefined) { value = get(window, target); }
-      return value;
-    } else {
-      return target;
-    }
-  }).property('target').cacheable(),
-
-  triggerAction: function() {
-    var action = get(this, 'action'),
-        target = get(this, 'targetObject');
-
-    if (target && action) {
-      var ret;
-
-      if (typeof target.send === 'function') {
-        ret = target.send(action, this);
-      } else {
-        if (typeof action === 'string') {
-          action = target[action];
-        }
-        ret = action.call(target, this);
-      }
-      if (ret !== false) ret = true;
-
-      return ret;
-    } else {
-      return false;
-    }
-  }
-});
-
-})();
-
-
-
-(function() {
-/**
- @class
-
- @extends Ember.Mixin
- */
-Ember.Evented = Ember.Mixin.create(
-  /** @scope Ember.Evented.prototype */ {
-  on: function(name, target, method) {
-    Ember.addListener(this, name, target, method);
-  },
-
-  one: function(name, target, method) {
-    if (!method) {
-      method = target;
-      target = null;
-    }
-
-    var self = this;
-    var wrapped = function() {
-      Ember.removeListener(self, name, target, wrapped);
-
-      if ('string' === typeof method) { method = this[method]; }
-
-      // Internally, a `null` target means that the target is
-      // the first parameter to addListener. That means that
-      // the `this` passed into this function is the target
-      // determined by the event system.
-      method.apply(this, arguments);
-    };
-
-    this.on(name, target, wrapped);
-  },
-
-  trigger: function(name) {
-    var args = [], i, l;
-    for (i = 1, l = arguments.length; i < l; i++) {
-      args.push(arguments[i]);
-    }
-    Ember.sendEvent(this, name, args);
-  },
-
-  fire: function(name) {
-    Ember.deprecate("Ember.Evented#fire() has been deprecated in favor of trigger() for compatibility with jQuery. It will be removed in 1.0. Please update your code to call trigger() instead.");
-    this.trigger.apply(this, arguments);
-  },
-
-  off: function(name, target, method) {
-    Ember.removeListener(this, name, target, method);
-  },
-
-  has: function(name) {
-    return Ember.hasListeners(this, name);
-  }
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-
-
-// NOTE: this object should never be included directly.  Instead use Ember.
-// Ember.Object.  We only define this separately so that Ember.Set can depend on it
-
-
-
-var classToString = Ember.Mixin.prototype.toString;
-var set = Ember.set, get = Ember.get;
-var o_create = Ember.create,
-    o_defineProperty = Ember.platform.defineProperty,
-    a_slice = Array.prototype.slice,
-    meta = Ember.meta,
-    rewatch = Ember.rewatch,
-    finishChains = Ember.finishChains,
-    finishPartial = Ember.Mixin.finishPartial,
-    reopen = Ember.Mixin.prototype.reopen;
-
-var undefinedDescriptor = {
-  configurable: true,
-  writable: true,
-  enumerable: false,
-  value: undefined
-};
-
-/** @private */
-function makeCtor() {
-
-  // Note: avoid accessing any properties on the object since it makes the
-  // method a lot faster.  This is glue code so we want it to be as fast as
-  // possible.
-
-  var wasApplied = false, initMixins;
-
-  var Class = function() {
-    if (!wasApplied) {
-      Class.proto(); // prepare prototype...
-    }
-    var m = Ember.meta(this);
-    m.proto = this;
-    if (initMixins) {
-      this.reopen.apply(this, initMixins);
-      initMixins = null;
-    }
-    o_defineProperty(this, Ember.GUID_KEY, undefinedDescriptor);
-    o_defineProperty(this, '_super', undefinedDescriptor);
-    finishPartial(this, m);
-    delete m.proto;
-    finishChains(this);
-    this.init.apply(this, arguments);
-  };
-
-  Class.toString = classToString;
-  Class.willReopen = function() {
-    if (wasApplied) {
-      Class.PrototypeMixin = Ember.Mixin.create(Class.PrototypeMixin);
-    }
-
-    wasApplied = false;
-  };
-  Class._initMixins = function(args) { initMixins = args; };
-
-  Class.proto = function() {
-    var superclass = Class.superclass;
-    if (superclass) { superclass.proto(); }
-
-    if (!wasApplied) {
-      wasApplied = true;
-      Class.PrototypeMixin.applyPartial(Class.prototype);
-      rewatch(Class.prototype);
-    }
-
-    return this.prototype;
-  };
-
-  return Class;
-
-}
-
-var CoreObject = makeCtor();
-
-CoreObject.PrototypeMixin = Ember.Mixin.create(
-/** @scope Ember.CoreObject.prototype */ {
-
-  reopen: function() {
-    Ember.Mixin._apply(this, arguments, true);
-    return this;
-  },
-
-  isInstance: true,
-
-  /** @private */
-  init: function() {},
-
-  /** @field */
-  isDestroyed: false,
-
-  /** @field */
-  isDestroying: false,
-
-  /**
-    Destroys an object by setting the isDestroyed flag and removing its
-    metadata, which effectively destroys observers and bindings.
-
-    If you try to set a property on a destroyed object, an exception will be
-    raised.
-
-    Note that destruction is scheduled for the end of the run loop and does not
-    happen immediately.
-
-    @returns {Ember.Object} receiver
-  */
-  destroy: function() {
-    if (this.isDestroying) { return; }
-
-    this.isDestroying = true;
-
-    if (this.willDestroy) { this.willDestroy(); }
-
-    set(this, 'isDestroyed', true);
-    Ember.run.schedule('destroy', this, this._scheduledDestroy);
-    return this;
-  },
-
-  /**
-    Invoked by the run loop to actually destroy the object. This is
-    scheduled for execution by the `destroy` method.
-
-    @private
-  */
-  _scheduledDestroy: function() {
-    Ember.destroy(this);
-    if (this.didDestroy) { this.didDestroy(); }
-  },
-
-  bind: function(to, from) {
-    if (!(from instanceof Ember.Binding)) { from = Ember.Binding.from(from); }
-    from.to(to).connect(this);
-    return from;
-  },
-
-  toString: function() {
-    return '<'+this.constructor.toString()+':'+Ember.guidFor(this)+'>';
-  }
-});
-
-if (Ember.config.overridePrototypeMixin) {
-  Ember.config.overridePrototypeMixin(CoreObject.PrototypeMixin);
-}
-
-CoreObject.__super__ = null;
-
-var ClassMixin = Ember.Mixin.create(
-/** @scope Ember.ClassMixin.prototype */ {
-
-  ClassMixin: Ember.required(),
-
-  PrototypeMixin: Ember.required(),
-
-  isClass: true,
-
-  isMethod: false,
-
-  extend: function() {
-    var Class = makeCtor(), proto;
-    Class.ClassMixin = Ember.Mixin.create(this.ClassMixin);
-    Class.PrototypeMixin = Ember.Mixin.create(this.PrototypeMixin);
-
-    Class.ClassMixin.ownerConstructor = Class;
-    Class.PrototypeMixin.ownerConstructor = Class;
-
-    reopen.apply(Class.PrototypeMixin, arguments);
-
-    Class.superclass = this;
-    Class.__super__  = this.prototype;
-
-    proto = Class.prototype = o_create(this.prototype);
-    proto.constructor = Class;
-    Ember.generateGuid(proto, 'ember');
-    meta(proto).proto = proto; // this will disable observers on prototype
-
-    Class.ClassMixin.apply(Class);
-    return Class;
-  },
-
-  create: function() {
-    var C = this;
-    if (arguments.length>0) { this._initMixins(arguments); }
-    return new C();
-  },
-
-  reopen: function() {
-    this.willReopen();
-    reopen.apply(this.PrototypeMixin, arguments);
-    return this;
-  },
-
-  reopenClass: function() {
-    reopen.apply(this.ClassMixin, arguments);
-    Ember.Mixin._apply(this, arguments, false);
-    return this;
-  },
-
-  detect: function(obj) {
-    if ('function' !== typeof obj) { return false; }
-    while(obj) {
-      if (obj===this) { return true; }
-      obj = obj.superclass;
-    }
-    return false;
-  },
-
-  detectInstance: function(obj) {
-    return obj instanceof this;
-  },
-
-  /**
-    In some cases, you may want to annotate computed properties with additional
-    metadata about how they function or what values they operate on. For example,
-    computed property functions may close over variables that are then no longer
-    available for introspection.
-
-    You can pass a hash of these values to a computed property like this:
-
-        person: function() {
-          var personId = this.get('personId');
-          return App.Person.create({ id: personId });
-        }.property().meta({ type: App.Person })
-
-    Once you've done this, you can retrieve the values saved to the computed
-    property from your class like this:
-
-        MyClass.metaForProperty('person');
-
-    This will return the original hash that was passed to `meta()`.
-  */
-  metaForProperty: function(key) {
-    var desc = meta(this.proto(), false).descs[key];
-
-    Ember.assert("metaForProperty() could not find a computed property with key '"+key+"'.", !!desc && desc instanceof Ember.ComputedProperty);
-    return desc._meta || {};
-  },
-
-  /**
-    Iterate over each computed property for the class, passing its name
-    and any associated metadata (see `metaForProperty`) to the callback.
-  */
-  eachComputedProperty: function(callback, binding) {
-    var proto = this.proto(),
-        descs = meta(proto).descs,
-        empty = {},
-        property;
-
-    for (var name in descs) {
-      property = descs[name];
-
-      if (property instanceof Ember.ComputedProperty) {
-        callback.call(binding || this, name, property._meta || empty);
-      }
-    }
-  }
-
-});
-
-if (Ember.config.overrideClassMixin) {
-  Ember.config.overrideClassMixin(ClassMixin);
-}
-
-CoreObject.ClassMixin = ClassMixin;
-ClassMixin.apply(CoreObject);
-
-/**
-  @class
-*/
-Ember.CoreObject = CoreObject;
-
-
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set, guidFor = Ember.guidFor, none = Ember.none;
-
-/**
-  @class
-
-  An unordered collection of objects.
-
-  A Set works a bit like an array except that its items are not ordered.
-  You can create a set to efficiently test for membership for an object. You
-  can also iterate through a set just like an array, even accessing objects
-  by index, however there is no guarantee as to their order.
-
-  All Sets are observable via the Enumerable Observer API - which works
-  on any enumerable object including both Sets and Arrays.
-
-  ## Creating a Set
-
-  You can create a set like you would most objects using
-  `new Ember.Set()`.  Most new sets you create will be empty, but you can
-  also initialize the set with some content by passing an array or other
-  enumerable of objects to the constructor.
-
-  Finally, you can pass in an existing set and the set will be copied. You
-  can also create a copy of a set by calling `Ember.Set#copy()`.
-
-      #js
-      // creates a new empty set
-      var foundNames = new Ember.Set();
-
-      // creates a set with four names in it.
-      var names = new Ember.Set(["Charles", "Tom", "Juan", "Alex"]); // :P
-
-      // creates a copy of the names set.
-      var namesCopy = new Ember.Set(names);
-
-      // same as above.
-      var anotherNamesCopy = names.copy();
-
-  ## Adding/Removing Objects
-
-  You generally add or remove objects from a set using `add()` or
-  `remove()`. You can add any type of object including primitives such as
-  numbers, strings, and booleans.
-
-  Unlike arrays, objects can only exist one time in a set. If you call `add()`
-  on a set with the same object multiple times, the object will only be added
-  once. Likewise, calling `remove()` with the same object multiple times will
-  remove the object the first time and have no effect on future calls until
-  you add the object to the set again.
-
-  NOTE: You cannot add/remove null or undefined to a set. Any attempt to do so
-  will be ignored.
-
-  In addition to add/remove you can also call `push()`/`pop()`. Push behaves
-  just like `add()` but `pop()`, unlike `remove()` will pick an arbitrary
-  object, remove it and return it. This is a good way to use a set as a job
-  queue when you don't care which order the jobs are executed in.
-
-  ## Testing for an Object
-
-  To test for an object's presence in a set you simply call
-  `Ember.Set#contains()`.
-
-  ## Observing changes
-
-  When using `Ember.Set`, you can observe the `"[]"` property to be
-  alerted whenever the content changes.  You can also add an enumerable
-  observer to the set to be notified of specific objects that are added and
-  removed from the set.  See `Ember.Enumerable` for more information on
-  enumerables.
-
-  This is often unhelpful. If you are filtering sets of objects, for instance,
-  it is very inefficient to re-filter all of the items each time the set
-  changes. It would be better if you could just adjust the filtered set based
-  on what was changed on the original set. The same issue applies to merging
-  sets, as well.
-
-  ## Other Methods
-
-  `Ember.Set` primary implements other mixin APIs.  For a complete reference
-  on the methods you will use with `Ember.Set`, please consult these mixins.
-  The most useful ones will be `Ember.Enumerable` and
-  `Ember.MutableEnumerable` which implement most of the common iterator
-  methods you are used to on Array.
-
-  Note that you can also use the `Ember.Copyable` and `Ember.Freezable`
-  APIs on `Ember.Set` as well.  Once a set is frozen it can no longer be
-  modified.  The benefit of this is that when you call frozenCopy() on it,
-  Ember will avoid making copies of the set.  This allows you to write
-  code that can know with certainty when the underlying set data will or
-  will not be modified.
-
-  @extends Ember.Enumerable
-  @extends Ember.MutableEnumerable
-  @extends Ember.Copyable
-  @extends Ember.Freezable
-
-  @since Ember 0.9
-*/
-Ember.Set = Ember.CoreObject.extend(Ember.MutableEnumerable, Ember.Copyable, Ember.Freezable,
-  /** @scope Ember.Set.prototype */ {
-
-  // ..........................................................
-  // IMPLEMENT ENUMERABLE APIS
-  //
-
-  /**
-    This property will change as the number of objects in the set changes.
-
-    @type number
-    @default 0
-  */
-  length: 0,
-
-  /**
-    Clears the set. This is useful if you want to reuse an existing set
-    without having to recreate it.
-
-        var colors = new Ember.Set(["red", "green", "blue"]);
-        colors.length;  => 3
-        colors.clear();
-        colors.length;  => 0
-
-    @returns {Ember.Set} An empty Set
-  */
-  clear: function() {
-    if (this.isFrozen) { throw new Error(Ember.FROZEN_ERROR); }
-
-    var len = get(this, 'length');
-    if (len === 0) { return this; }
-
-    var guid;
-
-    this.enumerableContentWillChange(len, 0);
-    Ember.propertyWillChange(this, 'firstObject');
-    Ember.propertyWillChange(this, 'lastObject');
-
-    for (var i=0; i < len; i++){
-      guid = guidFor(this[i]);
-      delete this[guid];
-      delete this[i];
-    }
-
-    set(this, 'length', 0);
-
-    Ember.propertyDidChange(this, 'firstObject');
-    Ember.propertyDidChange(this, 'lastObject');
-    this.enumerableContentDidChange(len, 0);
-
-    return this;
-  },
-
-  /**
-    Returns true if the passed object is also an enumerable that contains the
-    same objects as the receiver.
-
-        var colors = ["red", "green", "blue"],
-            same_colors = new Ember.Set(colors);
-        same_colors.isEqual(colors); => true
-        same_colors.isEqual(["purple", "brown"]); => false
-
-    @param {Ember.Set} obj the other object.
-    @returns {Boolean}
-  */
-  isEqual: function(obj) {
-    // fail fast
-    if (!Ember.Enumerable.detect(obj)) return false;
-
-    var loc = get(this, 'length');
-    if (get(obj, 'length') !== loc) return false;
-
-    while(--loc >= 0) {
-      if (!obj.contains(this[loc])) return false;
-    }
-
-    return true;
-  },
-
-  /**
-    Adds an object to the set. Only non-null objects can be added to a set
-    and those can only be added once. If the object is already in the set or
-    the passed value is null this method will have no effect.
-
-    This is an alias for `Ember.MutableEnumerable.addObject()`.
-
-        var colors = new Ember.Set();
-        colors.add("blue");    => ["blue"]
-        colors.add("blue");    => ["blue"]
-        colors.add("red");     => ["blue", "red"]
-        colors.add(null);      => ["blue", "red"]
-        colors.add(undefined); => ["blue", "red"]
-
-    @function
-    @param {Object} obj The object to add.
-    @returns {Ember.Set} The set itself.
-  */
-  add: Ember.alias('addObject'),
-
-  /**
-    Removes the object from the set if it is found.  If you pass a null value
-    or an object that is already not in the set, this method will have no
-    effect. This is an alias for `Ember.MutableEnumerable.removeObject()`.
-
-        var colors = new Ember.Set(["red", "green", "blue"]);
-        colors.remove("red");    => ["blue", "green"]
-        colors.remove("purple"); => ["blue", "green"]
-        colors.remove(null);     => ["blue", "green"]
-
-    @function
-    @param {Object} obj The object to remove
-    @returns {Ember.Set} The set itself.
-  */
-  remove: Ember.alias('removeObject'),
-
-  /**
-    Removes the last element from the set and returns it, or null if it's empty.
-
-        var colors = new Ember.Set(["green", "blue"]);
-        colors.pop(); => "blue"
-        colors.pop(); => "green"
-        colors.pop(); => null
-
-    @returns {Object} The removed object from the set or null.
-  */
-  pop: function() {
-    if (get(this, 'isFrozen')) throw new Error(Ember.FROZEN_ERROR);
-    var obj = this.length > 0 ? this[this.length-1] : null;
-    this.remove(obj);
-    return obj;
-  },
-
-  /**
-    Inserts the given object on to the end of the set. It returns
-    the set itself.
-
-    This is an alias for `Ember.MutableEnumerable.addObject()`.
-
-        var colors = new Ember.Set();
-        colors.push("red");   => ["red"]
-        colors.push("green"); => ["red", "green"]
-        colors.push("blue");  => ["red", "green", "blue"]
-
-    @function
-    @returns {Ember.Set} The set itself.
-  */
-  push: Ember.alias('addObject'),
-
-  /**
-    Removes the last element from the set and returns it, or null if it's empty.
-
-    This is an alias for `Ember.Set.pop()`.
-
-        var colors = new Ember.Set(["green", "blue"]);
-        colors.shift(); => "blue"
-        colors.shift(); => "green"
-        colors.shift(); => null
-
-    @function
-    @returns {Object} The removed object from the set or null.
-  */
-  shift: Ember.alias('pop'),
-
-  /**
-    Inserts the given object on to the end of the set. It returns
-    the set itself.
-
-    This is an alias of `Ember.Set.push()`
-
-        var colors = new Ember.Set();
-        colors.unshift("red");   => ["red"]
-        colors.unshift("green"); => ["red", "green"]
-        colors.unshift("blue");  => ["red", "green", "blue"]
-
-    @function
-    @returns {Ember.Set} The set itself.
-  */
-  unshift: Ember.alias('push'),
-
-  /**
-    Adds each object in the passed enumerable to the set.
-
-    This is an alias of `Ember.MutableEnumerable.addObjects()`
-
-        var colors = new Ember.Set();
-        colors.addEach(["red", "green", "blue"]); => ["red", "green", "blue"]
-
-    @function
-    @param {Ember.Enumerable} objects the objects to add.
-    @returns {Ember.Set} The set itself.
-  */
-  addEach: Ember.alias('addObjects'),
-
-  /**
-    Removes each object in the passed enumerable to the set.
-
-    This is an alias of `Ember.MutableEnumerable.removeObjects()`
-
-        var colors = new Ember.Set(["red", "green", "blue"]);
-        colors.removeEach(["red", "blue"]); => ["green"]
-
-    @function
-    @param {Ember.Enumerable} objects the objects to remove.
-    @returns {Ember.Set} The set itself.
-  */
-  removeEach: Ember.alias('removeObjects'),
-
-  // ..........................................................
-  // PRIVATE ENUMERABLE SUPPORT
-  //
-
-  /** @private */
-  init: function(items) {
-    this._super();
-    if (items) this.addObjects(items);
-  },
-
-  /** @private (nodoc) - implement Ember.Enumerable */
-  nextObject: function(idx) {
-    return this[idx];
-  },
-
-  /** @private - more optimized version */
-  firstObject: Ember.computed(function() {
-    return this.length > 0 ? this[0] : undefined;
-  }).property().cacheable(),
-
-  /** @private - more optimized version */
-  lastObject: Ember.computed(function() {
-    return this.length > 0 ? this[this.length-1] : undefined;
-  }).property().cacheable(),
-
-  /** @private (nodoc) - implements Ember.MutableEnumerable */
-  addObject: function(obj) {
-    if (get(this, 'isFrozen')) throw new Error(Ember.FROZEN_ERROR);
-    if (none(obj)) return this; // nothing to do
-
-    var guid = guidFor(obj),
-        idx  = this[guid],
-        len  = get(this, 'length'),
-        added ;
-
-    if (idx>=0 && idx<len && (this[idx] === obj)) return this; // added
-
-    added = [obj];
-
-    this.enumerableContentWillChange(null, added);
-    Ember.propertyWillChange(this, 'lastObject');
-
-    len = get(this, 'length');
-    this[guid] = len;
-    this[len] = obj;
-    set(this, 'length', len+1);
-
-    Ember.propertyDidChange(this, 'lastObject');
-    this.enumerableContentDidChange(null, added);
-
-    return this;
-  },
-
-  /** @private (nodoc) - implements Ember.MutableEnumerable */
-  removeObject: function(obj) {
-    if (get(this, 'isFrozen')) throw new Error(Ember.FROZEN_ERROR);
-    if (none(obj)) return this; // nothing to do
-
-    var guid = guidFor(obj),
-        idx  = this[guid],
-        len = get(this, 'length'),
-        isFirst = idx === 0,
-        isLast = idx === len-1,
-        last, removed;
-
-
-    if (idx>=0 && idx<len && (this[idx] === obj)) {
-      removed = [obj];
-
-      this.enumerableContentWillChange(removed, null);
-      if (isFirst) { Ember.propertyWillChange(this, 'firstObject'); }
-      if (isLast)  { Ember.propertyWillChange(this, 'lastObject'); }
-
-      // swap items - basically move the item to the end so it can be removed
-      if (idx < len-1) {
-        last = this[len-1];
-        this[idx] = last;
-        this[guidFor(last)] = idx;
-      }
-
-      delete this[guid];
-      delete this[len-1];
-      set(this, 'length', len-1);
-
-      if (isFirst) { Ember.propertyDidChange(this, 'firstObject'); }
-      if (isLast)  { Ember.propertyDidChange(this, 'lastObject'); }
-      this.enumerableContentDidChange(removed, null);
-    }
-
-    return this;
-  },
-
-  /** @private (nodoc) - optimized version */
-  contains: function(obj) {
-    return this[guidFor(obj)]>=0;
-  },
-
-  /** @private (nodoc) */
-  copy: function() {
-    var C = this.constructor, ret = new C(), loc = get(this, 'length');
-    set(ret, 'length', loc);
-    while(--loc>=0) {
-      ret[loc] = this[loc];
-      ret[guidFor(this[loc])] = loc;
-    }
-    return ret;
-  },
-
-  /** @private */
-  toString: function() {
-    var len = this.length, idx, array = [];
-    for(idx = 0; idx < len; idx++) {
-      array[idx] = this[idx];
-    }
-    return "Ember.Set<%@>".fmt(array.join(','));
-  }
-
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-/**
-  @class
-
-  `Ember.Object` is the main base class for all Ember objects. It is a subclass
-  of `Ember.CoreObject` with the `Ember.Observable` mixin applied. For details,
-  see the documentation for each of these.
-
-  @extends Ember.CoreObject
-  @extends Ember.Observable
-*/
-Ember.Object = Ember.CoreObject.extend(Ember.Observable);
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var indexOf = Ember.ArrayPolyfills.indexOf;
-
-/**
-  @private
-  A Namespace is an object usually used to contain other objects or methods
-  such as an application or framework.  Create a namespace anytime you want
-  to define one of these new containers.
-
-  # Example Usage
-
-      MyFramework = Ember.Namespace.create({
-        VERSION: '1.0.0'
-      });
-
-*/
-Ember.Namespace = Ember.Object.extend({
-  isNamespace: true,
-
-  init: function() {
-    Ember.Namespace.NAMESPACES.push(this);
-    Ember.Namespace.PROCESSED = false;
-  },
-
-  toString: function() {
-    Ember.identifyNamespaces();
-    return this[Ember.GUID_KEY+'_name'];
-  },
-
-  destroy: function() {
-    var namespaces = Ember.Namespace.NAMESPACES;
-    window[this.toString()] = undefined;
-    namespaces.splice(indexOf.call(namespaces, this), 1);
-    this._super();
-  }
-});
-
-Ember.Namespace.NAMESPACES = [Ember];
-Ember.Namespace.PROCESSED = false;
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-/**
-  @private
-
-  Defines a namespace that will contain an executable application.  This is
-  very similar to a normal namespace except that it is expected to include at
-  least a 'ready' function which can be run to initialize the application.
-
-  Currently Ember.Application is very similar to Ember.Namespace.  However, this
-  class may be augmented by additional frameworks so it is important to use
-  this instance when building new applications.
-
-  # Example Usage
-
-      MyApp = Ember.Application.create({
-        VERSION: '1.0.0',
-        store: Ember.Store.create().from(Ember.fixtures)
-      });
-
-      MyApp.ready = function() {
-        //..init code goes here...
-      }
-
-*/
-Ember.Application = Ember.Namespace.extend();
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set;
-
-/**
-  @class
-
-  An ArrayProxy wraps any other object that implements Ember.Array and/or
-  Ember.MutableArray, forwarding all requests. This makes it very useful for
-  a number of binding use cases or other cases where being able to swap
-  out the underlying array is useful.
-
-  A simple example of usage:
-
-      var pets = ['dog', 'cat', 'fish'];
-      var ap = Ember.ArrayProxy.create({ content: Ember.A(pets) });
-      ap.get('firstObject'); // => 'dog'
-      ap.set('content', ['amoeba', 'paramecium']);
-      ap.get('firstObject'); // => 'amoeba'
-
-  This class can also be useful as a layer to transform the contents of
-  an array, as they are accessed. This can be done by overriding
-  `objectAtContent`:
-
-      var pets = ['dog', 'cat', 'fish'];
-      var ap = Ember.ArrayProxy.create({
-          content: Ember.A(pets),
-          objectAtContent: function(idx) {
-              return this.get('content').objectAt(idx).toUpperCase();
-          }
-      });
-      ap.get('firstObject'); // => 'DOG'
-
-
-  @extends Ember.Object
-  @extends Ember.Array
-  @extends Ember.MutableArray
-*/
-Ember.ArrayProxy = Ember.Object.extend(Ember.MutableArray,
-/** @scope Ember.ArrayProxy.prototype */ {
-
-  /**
-    The content array.  Must be an object that implements Ember.Array and/or
-    Ember.MutableArray.
-
-    @type Ember.Array
-  */
-  content: null,
-
-  /**
-   The array that the proxy pretends to be. In the default `ArrayProxy`
-   implementation, this and `content` are the same. Subclasses of `ArrayProxy`
-   can override this property to provide things like sorting and filtering.
-  */
-  arrangedContent: Ember.computed('content', function() {
-    return get(this, 'content');
-  }).cacheable(),
-
-  /**
-    Should actually retrieve the object at the specified index from the
-    content. You can override this method in subclasses to transform the
-    content item to something new.
-
-    This method will only be called if content is non-null.
-
-    @param {Number} idx
-      The index to retrieve.
-
-    @returns {Object} the value or undefined if none found
-  */
-  objectAtContent: function(idx) {
-    return get(this, 'arrangedContent').objectAt(idx);
-  },
-
-  /**
-    Should actually replace the specified objects on the content array.
-    You can override this method in subclasses to transform the content item
-    into something new.
-
-    This method will only be called if content is non-null.
-
-    @param {Number} idx
-      The starting index
-
-    @param {Number} amt
-      The number of items to remove from the content.
-
-    @param {Array} objects
-      Optional array of objects to insert or null if no objects.
-
-    @returns {void}
-  */
-  replaceContent: function(idx, amt, objects) {
-    get(this, 'arrangedContent').replace(idx, amt, objects);
-  },
-
-  /**
-    Invoked when the content property is about to change. Notifies observers that the
-    entire array content will change.
-  */
-  _contentWillChange: Ember.beforeObserver(function() {
-    var content = get(this, 'content');
-
-    if (content) {
-      content.removeArrayObserver(this, {
-        willChange: 'contentArrayWillChange',
-        didChange: 'contentArrayDidChange'
-      });
-    }
-  }, 'content'),
-
-
-  contentArrayWillChange: Ember.K,
-  contentArrayDidChange: Ember.K,
-
-  /**
-    Invoked when the content property changes.  Notifies observers that the
-    entire array content has changed.
-  */
-  _contentDidChange: Ember.observer(function() {
-    var content = get(this, 'content'),
-        len     = content ? get(content, 'length') : 0;
-
-    Ember.assert("Can't set ArrayProxy's content to itself", content !== this);
-
-    if (content) {
-      content.addArrayObserver(this, {
-        willChange: 'contentArrayWillChange',
-        didChange: 'contentArrayDidChange'
-      });
-    }
-  }, 'content'),
-
-  _arrangedContentWillChange: Ember.beforeObserver(function() {
-    var arrangedContent = get(this, 'arrangedContent'),
-        len = arrangedContent ? get(arrangedContent, 'length') : 0;
-
-    this.arrangedContentArrayWillChange(this, 0, len, undefined);
-
-    if (arrangedContent) {
-      arrangedContent.removeArrayObserver(this, {
-        willChange: 'arrangedContentArrayWillChange',
-        didChange: 'arrangedContentArrayDidChange'
-      });
-    }
-  }, 'arrangedContent'),
-
-  _arrangedContentDidChange: Ember.observer(function() {
-    var arrangedContent = get(this, 'arrangedContent'),
-        len = arrangedContent ? get(arrangedContent, 'length') : 0;
-
-    Ember.assert("Can't set ArrayProxy's content to itself", arrangedContent !== this);
-
-    if (arrangedContent) {
-      arrangedContent.addArrayObserver(this, {
-        willChange: 'arrangedContentArrayWillChange',
-        didChange: 'arrangedContentArrayDidChange'
-      });
-    }
-
-    this.arrangedContentArrayDidChange(this, 0, undefined, len);
-  }, 'arrangedContent'),
-
-  /** @private (nodoc) */
-  objectAt: function(idx) {
-    return get(this, 'content') && this.objectAtContent(idx);
-  },
-
-  /** @private (nodoc) */
-  length: Ember.computed(function() {
-    var arrangedContent = get(this, 'arrangedContent');
-    return arrangedContent ? get(arrangedContent, 'length') : 0;
-    // No dependencies since Enumerable notifies length of change
-  }).property().cacheable(),
-
-  /** @private (nodoc) */
-  replace: function(idx, amt, objects) {
-    if (get(this, 'content')) this.replaceContent(idx, amt, objects);
-    return this;
-  },
-
-  /** @private (nodoc) */
-  arrangedContentArrayWillChange: function(item, idx, removedCnt, addedCnt) {
-    this.arrayContentWillChange(idx, removedCnt, addedCnt);
-  },
-
-  /** @private (nodoc) */
-  arrangedContentArrayDidChange: function(item, idx, removedCnt, addedCnt) {
-    this.arrayContentDidChange(idx, removedCnt, addedCnt);
-  },
-
-  /** @private (nodoc) */
-  init: function() {
-    this._super();
-    this._contentWillChange();
-    this._contentDidChange();
-    this._arrangedContentWillChange();
-    this._arrangedContentDidChange();
-  }
-
-});
-
-
-
-
-})();
-
-
-
-(function() {
-var get = Ember.get,
-    set = Ember.set,
-    fmt = Ember.String.fmt,
-    addBeforeObserver = Ember.addBeforeObserver,
-    addObserver = Ember.addObserver,
-    removeBeforeObserver = Ember.removeBeforeObserver,
-    removeObserver = Ember.removeObserver,
-    propertyWillChange = Ember.propertyWillChange,
-    propertyDidChange = Ember.propertyDidChange;
-
-function contentPropertyWillChange(content, contentKey) {
-  var key = contentKey.slice(8); // remove "content."
-  if (key in this) { return; }  // if shadowed in proxy
-  propertyWillChange(this, key);
-}
-
-function contentPropertyDidChange(content, contentKey) {
-  var key = contentKey.slice(8); // remove "content."
-  if (key in this) { return; } // if shadowed in proxy
-  propertyDidChange(this, key);
-}
-
-/**
-  @class
-
-  `Ember.ObjectProxy` forwards all properties not defined by the proxy itself
-  to a proxied `content` object.
-
-      object = Ember.Object.create({
-        name: 'Foo'
-      });
-      proxy = Ember.ObjectProxy.create({
-        content: object
-      });
-
-      // Access and change existing properties
-      proxy.get('name') // => 'Foo'
-      proxy.set('name', 'Bar');
-      object.get('name') // => 'Bar'
-
-      // Create new 'description' property on `object`
-      proxy.set('description', 'Foo is a whizboo baz');
-      object.get('description') // => 'Foo is a whizboo baz'
-
-  While `content` is unset, setting a property to be delegated will throw an Error.
-
-      proxy = Ember.ObjectProxy.create({
-        content: null,
-        flag: null
-      });
-      proxy.set('flag', true);
-      proxy.get('flag'); // => true
-      proxy.get('foo'); // => undefined
-      proxy.set('foo', 'data'); // throws Error
-
-  Delegated properties can be bound to and will change when content is updated.
-
-  Computed properties on the proxy itself can depend on delegated properties.
-
-      ProxyWithComputedProperty = Ember.ObjectProxy.extend({
-        fullName: function () {
-          var firstName = this.get('firstName'),
-              lastName = this.get('lastName');
-          if (firstName && lastName) {
-            return firstName + ' ' + lastName;
-          }
-          return firstName || lastName;
-        }.property('firstName', 'lastName')
-      });
-      proxy = ProxyWithComputedProperty.create();
-      proxy.get('fullName'); => undefined
-      proxy.set('content', {
-        firstName: 'Tom', lastName: 'Dale'
-      }); // triggers property change for fullName on proxy
-      proxy.get('fullName'); => 'Tom Dale'
-*/
-Ember.ObjectProxy = Ember.Object.extend(
-/** @scope Ember.ObjectProxy.prototype */ {
-  /**
-    The object whose properties will be forwarded.
-
-    @type Ember.Object
-    @default null
-  */
-  content: null,
-  _contentDidChange: Ember.observer(function() {
-    Ember.assert("Can't set ObjectProxy's content to itself", this.get('content') !== this);
-  }, 'content'),
-  /** @private */
-  willWatchProperty: function (key) {
-    var contentKey = 'content.' + key;
-    addBeforeObserver(this, contentKey, null, contentPropertyWillChange);
-    addObserver(this, contentKey, null, contentPropertyDidChange);
-  },
-  /** @private */
-  didUnwatchProperty: function (key) {
-    var contentKey = 'content.' + key;
-    removeBeforeObserver(this, contentKey, null, contentPropertyWillChange);
-    removeObserver(this, contentKey, null, contentPropertyDidChange);
-  },
-  /** @private */
-  unknownProperty: function (key) {
-    var content = get(this, 'content');
-    if (content) {
-      return get(content, key);
-    }
-  },
-  /** @private */
-  setUnknownProperty: function (key, value) {
-    var content = get(this, 'content');
-    Ember.assert(fmt("Cannot delegate set('%@', %@) to the 'content' property of object proxy %@: its 'content' is undefined.", [key, value, this]), content);
-    return set(content, key, value);
-  }
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var set = Ember.set, get = Ember.get, guidFor = Ember.guidFor;
-var forEach = Ember.EnumerableUtils.forEach;
-
-var EachArray = Ember.Object.extend(Ember.Array, {
-
-  init: function(content, keyName, owner) {
-    this._super();
-    this._keyName = keyName;
-    this._owner   = owner;
-    this._content = content;
-  },
-
-  objectAt: function(idx) {
-    var item = this._content.objectAt(idx);
-    return item && get(item, this._keyName);
-  },
-
-  length: Ember.computed(function() {
-    var content = this._content;
-    return content ? get(content, 'length') : 0;
-  }).property().cacheable()
-
-});
-
-var IS_OBSERVER = /^.+:(before|change)$/;
-
-/** @private */
-function addObserverForContentKey(content, keyName, proxy, idx, loc) {
-  var objects = proxy._objects, guid;
-  if (!objects) objects = proxy._objects = {};
-
-  while(--loc>=idx) {
-    var item = content.objectAt(loc);
-    if (item) {
-      Ember.addBeforeObserver(item, keyName, proxy, 'contentKeyWillChange');
-      Ember.addObserver(item, keyName, proxy, 'contentKeyDidChange');
-
-      // keep track of the indicies each item was found at so we can map
-      // it back when the obj changes.
-      guid = guidFor(item);
-      if (!objects[guid]) objects[guid] = [];
-      objects[guid].push(loc);
-    }
-  }
-}
-
-/** @private */
-function removeObserverForContentKey(content, keyName, proxy, idx, loc) {
-  var objects = proxy._objects;
-  if (!objects) objects = proxy._objects = {};
-  var indicies, guid;
-
-  while(--loc>=idx) {
-    var item = content.objectAt(loc);
-    if (item) {
-      Ember.removeBeforeObserver(item, keyName, proxy, 'contentKeyWillChange');
-      Ember.removeObserver(item, keyName, proxy, 'contentKeyDidChange');
-
-      guid = guidFor(item);
-      indicies = objects[guid];
-      indicies[indicies.indexOf(loc)] = null;
-    }
-  }
-}
-
-/**
-  @private
-  @class
-
-  This is the object instance returned when you get the @each property on an
-  array.  It uses the unknownProperty handler to automatically create
-  EachArray instances for property names.
-
-  @extends Ember.Object
-*/
-Ember.EachProxy = Ember.Object.extend({
-
-  init: function(content) {
-    this._super();
-    this._content = content;
-    content.addArrayObserver(this);
-
-    // in case someone is already observing some keys make sure they are
-    // added
-    forEach(Ember.watchedEvents(this), function(eventName) {
-      this.didAddListener(eventName);
-    }, this);
-  },
-
-  /**
-    You can directly access mapped properties by simply requesting them.
-    The unknownProperty handler will generate an EachArray of each item.
-  */
-  unknownProperty: function(keyName, value) {
-    var ret;
-    ret = new EachArray(this._content, keyName, this);
-    Ember.defineProperty(this, keyName, null, ret);
-    this.beginObservingContentKey(keyName);
-    return ret;
-  },
-
-  // ..........................................................
-  // ARRAY CHANGES
-  // Invokes whenever the content array itself changes.
-
-  arrayWillChange: function(content, idx, removedCnt, addedCnt) {
-    var keys = this._keys, key, array, lim;
-
-    lim = removedCnt>0 ? idx+removedCnt : -1;
-    Ember.beginPropertyChanges(this);
-
-    for(key in keys) {
-      if (!keys.hasOwnProperty(key)) { continue; }
-
-      if (lim>0) removeObserverForContentKey(content, key, this, idx, lim);
-
-      Ember.propertyWillChange(this, key);
-    }
-
-    Ember.propertyWillChange(this._content, '@each');
-    Ember.endPropertyChanges(this);
-  },
-
-  arrayDidChange: function(content, idx, removedCnt, addedCnt) {
-    var keys = this._keys, key, array, lim;
-
-    lim = addedCnt>0 ? idx+addedCnt : -1;
-    Ember.beginPropertyChanges(this);
-
-    for(key in keys) {
-      if (!keys.hasOwnProperty(key)) { continue; }
-
-      if (lim>0) addObserverForContentKey(content, key, this, idx, lim);
-
-      Ember.propertyDidChange(this, key);
-    }
-
-    Ember.propertyDidChange(this._content, '@each');
-    Ember.endPropertyChanges(this);
-  },
-
-  // ..........................................................
-  // LISTEN FOR NEW OBSERVERS AND OTHER EVENT LISTENERS
-  // Start monitoring keys based on who is listening...
-
-  didAddListener: function(eventName) {
-    if (IS_OBSERVER.test(eventName)) {
-      this.beginObservingContentKey(eventName.slice(0, -7));
-    }
-  },
-
-  didRemoveListener: function(eventName) {
-    if (IS_OBSERVER.test(eventName)) {
-      this.stopObservingContentKey(eventName.slice(0, -7));
-    }
-  },
-
-  // ..........................................................
-  // CONTENT KEY OBSERVING
-  // Actual watch keys on the source content.
-
-  beginObservingContentKey: function(keyName) {
-    var keys = this._keys;
-    if (!keys) keys = this._keys = {};
-    if (!keys[keyName]) {
-      keys[keyName] = 1;
-      var content = this._content,
-          len = get(content, 'length');
-      addObserverForContentKey(content, keyName, this, 0, len);
-    } else {
-      keys[keyName]++;
-    }
-  },
-
-  stopObservingContentKey: function(keyName) {
-    var keys = this._keys;
-    if (keys && (keys[keyName]>0) && (--keys[keyName]<=0)) {
-      var content = this._content,
-          len     = get(content, 'length');
-      removeObserverForContentKey(content, keyName, this, 0, len);
-    }
-  },
-
-  contentKeyWillChange: function(obj, keyName) {
-    Ember.propertyWillChange(this, keyName);
-  },
-
-  contentKeyDidChange: function(obj, keyName) {
-    Ember.propertyDidChange(this, keyName);
-  }
-
-});
-
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set;
-
-// Add Ember.Array to Array.prototype.  Remove methods with native
-// implementations and supply some more optimized versions of generic methods
-// because they are so common.
-var NativeArray = Ember.Mixin.create(Ember.MutableArray, Ember.Observable, Ember.Copyable, {
-
-  // because length is a built-in property we need to know to just get the
-  // original property.
-  get: function(key) {
-    if (key==='length') return this.length;
-    else if ('number' === typeof key) return this[key];
-    else return this._super(key);
-  },
-
-  objectAt: function(idx) {
-    return this[idx];
-  },
-
-  // primitive for array support.
-  replace: function(idx, amt, objects) {
-
-    if (this.isFrozen) throw Ember.FROZEN_ERROR ;
-
-    // if we replaced exactly the same number of items, then pass only the
-    // replaced range.  Otherwise, pass the full remaining array length
-    // since everything has shifted
-    var len = objects ? get(objects, 'length') : 0;
-    this.arrayContentWillChange(idx, amt, len);
-
-    if (!objects || objects.length === 0) {
-      this.splice(idx, amt) ;
-    } else {
-      var args = [idx, amt].concat(objects) ;
-      this.splice.apply(this,args) ;
-    }
-
-    this.arrayContentDidChange(idx, amt, len);
-    return this ;
-  },
-
-  // If you ask for an unknown property, then try to collect the value
-  // from member items.
-  unknownProperty: function(key, value) {
-    var ret;// = this.reducedProperty(key, value) ;
-    if ((value !== undefined) && ret === undefined) {
-      ret = this[key] = value;
-    }
-    return ret ;
-  },
-
-  // If browser did not implement indexOf natively, then override with
-  // specialized version
-  indexOf: function(object, startAt) {
-    var idx, len = this.length;
-
-    if (startAt === undefined) startAt = 0;
-    else startAt = (startAt < 0) ? Math.ceil(startAt) : Math.floor(startAt);
-    if (startAt < 0) startAt += len;
-
-    for(idx=startAt;idx<len;idx++) {
-      if (this[idx] === object) return idx ;
-    }
-    return -1;
-  },
-
-  lastIndexOf: function(object, startAt) {
-    var idx, len = this.length;
-
-    if (startAt === undefined) startAt = len-1;
-    else startAt = (startAt < 0) ? Math.ceil(startAt) : Math.floor(startAt);
-    if (startAt < 0) startAt += len;
-
-    for(idx=startAt;idx>=0;idx--) {
-      if (this[idx] === object) return idx ;
-    }
-    return -1;
-  },
-
-  copy: function() {
-    return this.slice();
-  }
-});
-
-// Remove any methods implemented natively so we don't override them
-var ignore = ['length'];
-Ember.EnumerableUtils.forEach(NativeArray.keys(), function(methodName) {
-  if (Array.prototype[methodName]) ignore.push(methodName);
-});
-
-if (ignore.length>0) {
-  NativeArray = NativeArray.without.apply(NativeArray, ignore);
-}
-
-/**
-  The NativeArray mixin contains the properties needed to to make the native
-  Array support Ember.MutableArray and all of its dependent APIs.  Unless you
-  have Ember.EXTEND_PROTOTYPES set to false, this will be applied automatically.
-  Otherwise you can apply the mixin at anytime by calling
-  `Ember.NativeArray.activate`.
-
-  @namespace
-  @extends Ember.MutableArray
-  @extends Ember.Array
-  @extends Ember.Enumerable
-  @extends Ember.MutableEnumerable
-  @extends Ember.Copyable
-  @extends Ember.Freezable
-*/
-Ember.NativeArray = NativeArray;
-
-/**
-  Creates an Ember.NativeArray from an Array like object.
-  Does not modify the original object.
-
-  @returns {Ember.NativeArray}
-*/
-Ember.A = function(arr){
-  if (arr === undefined) { arr = []; }
-  return Ember.NativeArray.apply(arr);
-};
-
-/**
-  Activates the mixin on the Array.prototype if not already applied.  Calling
-  this method more than once is safe.
-
-  @returns {void}
-*/
-Ember.NativeArray.activate = function() {
-  NativeArray.apply(Array.prototype);
-
-  Ember.A = function(arr) { return arr || []; };
-};
-
-if (Ember.EXTEND_PROTOTYPES) Ember.NativeArray.activate();
-
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set;
-
-Ember._PromiseChain = Ember.Object.extend({
-  promises: null,
-  failureCallback: Ember.K,
-  successCallback: Ember.K,
-  abortCallback: Ember.K,
-  promiseSuccessCallback: Ember.K,
-
-  /**
-    @private
-  */
-  runNextPromise: function() {
-    if (get(this, 'isDestroyed')) { return; }
-
-    var item = get(this, 'promises').shiftObject();
-    if (item) {
-      var promise = get(item, 'promise') || item;
-      Ember.assert("Cannot find promise to invoke", Ember.canInvoke(promise, 'then'));
-
-      var self = this;
-
-      var successCallback = function() {
-        self.promiseSuccessCallback.call(this, item, arguments);
-        self.runNextPromise();
-      };
-
-      var failureCallback = get(self, 'failureCallback');
-
-      promise.then(successCallback, failureCallback);
-     } else {
-      this.successCallback();
-    }
-  },
-
-  start: function() {
-    this.runNextPromise();
-    return this;
-  },
-
-  abort: function() {
-    this.abortCallback();
-    this.destroy();
-  },
-
-  init: function() {
-    set(this, 'promises', Ember.A(get(this, 'promises')));
-    this._super();
-  }
-});
-
-
-})();
-
-
-
-(function() {
-var loadHooks = {};
-var loaded = {};
-
-Ember.onLoad = function(name, callback) {
-  var object;
-
-  loadHooks[name] = loadHooks[name] || Ember.A();
-  loadHooks[name].pushObject(callback);
-
-  if (object = loaded[name]) {
-    callback(object);
-  }
-};
-
-Ember.runLoadHooks = function(name, object) {
-  var hooks;
-
-  loaded[name] = object;
-
-  if (hooks = loadHooks[name]) {
-    loadHooks[name].forEach(function(callback) {
-      callback(object);
-    });
-  }
-};
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-})();
-
-
-
-(function() {
-Ember.ControllerMixin = Ember.Mixin.create({
-  /**
-    The object to which events from the view should be sent.
-
-    For example, when a Handlebars template uses the `{{action}}` helper,
-    it will attempt to send the event to the view's controller's `target`.
-
-    By default, a controller's `target` is set to the router after it is
-    instantiated by `Ember.Application#initialize`.
-  */
-  target: null,
-  store: null
-});
-
-Ember.Controller = Ember.Object.extend(Ember.ControllerMixin);
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set, forEach = Ember.EnumerableUtils.forEach;
-
-/**
- @class
-
- @extends Ember.Mixin
- @extends Ember.MutableEnumerable
-*/
-Ember.SortableMixin = Ember.Mixin.create(Ember.MutableEnumerable,
-  /** @scope Ember.Observable.prototype */ {
-  sortProperties: null,
-  sortAscending: true,
-
-  addObject: function(obj) {
-    var content = get(this, 'content');
-    content.pushObject(obj);
-  },
-
-  removeObject: function(obj) {
-    var content = get(this, 'content');
-    content.removeObject(obj);
-  },
-
-  orderBy: function(item1, item2) {
-    var result = 0,
-        sortProperties = get(this, 'sortProperties'),
-        sortAscending = get(this, 'sortAscending');
-
-    Ember.assert("you need to define `sortProperties`", !!sortProperties);
-
-    forEach(sortProperties, function(propertyName) {
-      if (result === 0) {
-        result = Ember.compare(get(item1, propertyName), get(item2, propertyName));
-        if ((result !== 0) && !sortAscending) {
-          result = (-1) * result;
-        }
-      }
-    });
-
-    return result;
-  },
-
-  destroy: function() {
-    var content = get(this, 'content'),
-        sortProperties = get(this, 'sortProperties');
-
-    if (content && sortProperties) {
-      forEach(content, function(item) {
-        forEach(sortProperties, function(sortProperty) {
-          Ember.removeObserver(item, sortProperty, this, 'contentItemSortPropertyDidChange');
-        }, this);
-      }, this);
-    }
-
-    return this._super();
-  },
-
-  isSorted: Ember.computed('sortProperties', function() {
-    return !!get(this, 'sortProperties');
-  }),
-
-  arrangedContent: Ember.computed('content', 'sortProperties.@each', function(key, value) {
-    var content = get(this, 'content'),
-        isSorted = get(this, 'isSorted'),
-        sortProperties = get(this, 'sortProperties'),
-        self = this;
-
-    if (content && isSorted) {
-      content = content.slice();
-      content.sort(function(item1, item2) {
-        return self.orderBy(item1, item2);
-      });
-      forEach(content, function(item) {
-        forEach(sortProperties, function(sortProperty) {
-          Ember.addObserver(item, sortProperty, this, 'contentItemSortPropertyDidChange');
-        }, this);
-      }, this);
-      return Ember.A(content);
-    }
-
-    return content;
-  }).cacheable(),
-
-  _contentWillChange: Ember.beforeObserver(function() {
-    var content = get(this, 'content'),
-        sortProperties = get(this, 'sortProperties');
-
-    if (content && sortProperties) {
-      forEach(content, function(item) {
-        forEach(sortProperties, function(sortProperty) {
-          Ember.removeObserver(item, sortProperty, this, 'contentItemSortPropertyDidChange');
-        }, this);
-      }, this);
-    }
-
-    this._super();
-  }, 'content'),
-
-  sortAscendingWillChange: Ember.beforeObserver(function() {
-    this._lastSortAscending = get(this, 'sortAscending');
-  }, 'sortAscending'),
-
-  sortAscendingDidChange: Ember.observer(function() {
-    if (get(this, 'sortAscending') !== this._lastSortAscending) {
-      var arrangedContent = get(this, 'arrangedContent');
-      arrangedContent.reverseObjects();
-    }
-  }, 'sortAscending'),
-
-  contentArrayWillChange: function(array, idx, removedCount, addedCount) {
-    var isSorted = get(this, 'isSorted');
-
-    if (isSorted) {
-      var arrangedContent = get(this, 'arrangedContent');
-      var removedObjects = array.slice(idx, idx+removedCount);
-      var sortProperties = get(this, 'sortProperties');
-
-      forEach(removedObjects, function(item) {
-        arrangedContent.removeObject(item);
-
-        forEach(sortProperties, function(sortProperty) {
-          Ember.removeObserver(item, sortProperty, this, 'contentItemSortPropertyDidChange');
-        }, this);
-      });
-    }
-
-    return this._super(array, idx, removedCount, addedCount);
-  },
-
-  contentArrayDidChange: function(array, idx, removedCount, addedCount) {
-    var isSorted = get(this, 'isSorted'),
-        sortProperties = get(this, 'sortProperties');
-
-    if (isSorted) {
-      var addedObjects = array.slice(idx, idx+addedCount);
-      var arrangedContent = get(this, 'arrangedContent');
-
-      forEach(addedObjects, function(item) {
-        this.insertItemSorted(item);
-
-        forEach(sortProperties, function(sortProperty) {
-          Ember.addObserver(item, sortProperty, this, 'contentItemSortPropertyDidChange');
-        }, this);
-      }, this);
-    }
-
-    return this._super(array, idx, removedCount, addedCount);
-  },
-
-  insertItemSorted: function(item) {
-    var arrangedContent = get(this, 'arrangedContent');
-    var length = get(arrangedContent, 'length');
-
-    var idx = this._binarySearch(item, 0, length);
-    arrangedContent.insertAt(idx, item);
-  },
-
-  contentItemSortPropertyDidChange: function(item) {
-    var arrangedContent = get(this, 'arrangedContent'),
-        index = arrangedContent.indexOf(item);
-
-    arrangedContent.removeObject(item);
-    this.insertItemSorted(item);
-  },
-
-  _binarySearch: function(item, low, high) {
-    var mid, midItem, res, arrangedContent;
-
-    if (low === high) {
-      return low;
-    }
-
-    arrangedContent = get(this, 'arrangedContent');
-
-    mid = low + Math.floor((high - low) / 2);
-    midItem = arrangedContent.objectAt(mid);
-
-    res = this.orderBy(midItem, item);
-
-    if (res < 0) {
-      return this._binarySearch(item, mid+1, high);
-    } else if (res > 0) {
-      return this._binarySearch(item, low, mid);
-    }
-
-    return mid;
-  }
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set;
-
-/**
-  @class
-
-  Ember.ArrayController provides a way for you to publish a collection of objects
-  so that you can easily bind to the collection from a Handlebars #each helper,
-  an Ember.CollectionView, or other controllers.
-
-  The advantage of using an ArrayController is that you only have to set up
-  your view bindings once; to change what's displayed, simply swap out the
-  `content` property on the controller.
-
-  For example, imagine you wanted to display a list of items fetched via an XHR
-  request. Create an Ember.ArrayController and set its `content` property:
-
-      MyApp.listController = Ember.ArrayController.create();
-
-      $.get('people.json', function(data) {
-        MyApp.listController.set('content', data);
-      });
-
-  Then, create a view that binds to your new controller:
-
-      {{#each MyApp.listController}}
-        {{firstName}} {{lastName}}
-      {{/each}}
-
-  Although you are binding to the controller, the behavior of this controller
-  is to pass through any methods or properties to the underlying array. This
-  capability comes from `Ember.ArrayProxy`, which this class inherits from.
-
-  Note: As of this writing, `ArrayController` does not add any functionality
-  to its superclass, `ArrayProxy`. The Ember team plans to add additional
-  controller-specific functionality in the future, e.g. single or multiple
-  selection support. If you are creating something that is conceptually a
-  controller, use this class.
-
-  @extends Ember.ArrayProxy
-*/
-
-Ember.ArrayController = Ember.ArrayProxy.extend(Ember.ControllerMixin,
-  Ember.SortableMixin);
-
-})();
-
-
-
-(function() {
-Ember.ObjectController = Ember.ObjectProxy.extend(Ember.ControllerMixin);
-
-})();
-
-
-
-(function() {
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Runtime
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-})();
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-var get = Ember.get, set = Ember.set;
-
-/**
-  @class
-
-  An Ember.Application instance serves as the namespace in which you define your
-  application's classes. You can also override the configuration of your
-  application.
-
-  By default, Ember.Application will begin listening for events on the document.
-  If your application is embedded inside a page, instead of controlling the
-  entire document, you can specify which DOM element to attach to by setting
-  the `rootElement` property:
-
-      MyApp = Ember.Application.create({
-        rootElement: $('#my-app')
-      });
-
-  The root of an Ember.Application must not be removed during the course of the
-  page's lifetime. If you have only a single conceptual application for the
-  entire page, and are not embedding any third-party Ember applications
-  in your page, use the default document root for your application.
-
-  You only need to specify the root if your page contains multiple instances
-  of Ember.Application.
-
-  @extends Ember.Object
-*/
-Ember.Application = Ember.Namespace.extend(
-/** @scope Ember.Application.prototype */{
-
-  /**
-    The root DOM element of the Application.
-
-    Can be specified as DOMElement or a selector string.
-
-    @type DOMElement
-    @default 'body'
-  */
-  rootElement: 'body',
-
-  /**
-    @type Ember.EventDispatcher
-    @default null
-  */
-  eventDispatcher: null,
-
-  /**
-    @type Object
-    @default null
-  */
-  customEvents: null,
-
-  /** @private */
-  init: function() {
-    var eventDispatcher,
-        rootElement = get(this, 'rootElement');
-    this._super();
-
-    eventDispatcher = Ember.EventDispatcher.create({
-      rootElement: rootElement
-    });
-
-    set(this, 'eventDispatcher', eventDispatcher);
-
-    // jQuery 1.7 doesn't call the ready callback if already ready
-    if (Ember.$.isReady) {
-      Ember.run.once(this, this.didBecomeReady);
-    } else {
-      var self = this;
-      Ember.$(document).ready(function() {
-        Ember.run.once(self, self.didBecomeReady);
-      });
-    }
-  },
-
-  /**
-    Instantiate all controllers currently available on the namespace
-    and inject them onto a router.
-
-    Example:
-
-        App.PostsController = Ember.ArrayController.extend();
-        App.CommentsController = Ember.ArrayController.extend();
-
-        var router = Ember.Router.create({
-          ...
-        });
-
-        App.initialize(router);
-
-        router.get('postsController')     // <App.PostsController:ember1234>
-        router.get('commentsController')  // <App.CommentsController:ember1235>
-
-        router.get('postsController.router') // router
-  */
-  initialize: function(router) {
-    var properties = Ember.A(Ember.keys(this)),
-        injections = get(this.constructor, 'injections'),
-        namespace = this, controller, name;
-
-    if (!router && Ember.Router.detect(namespace['Router'])) {
-      router = namespace['Router'].create();
-      this._createdRouter = router;
-    }
-
-    if (router) {
-      set(this, 'router', router);
-
-      // By default, the router's namespace is the current application.
-      //
-      // This allows it to find model classes when a state has a
-      // route like `/posts/:post_id`. In that case, it would first
-      // convert `post_id` into `Post`, and then look it up on its
-      // namespace.
-      set(router, 'namespace', this);
-    }
-
-    Ember.runLoadHooks('application', this);
-
-    injections.forEach(function(injection) {
-      properties.forEach(function(property) {
-        injection[1](namespace, router, property);
-      });
-    });
-
-    if (router && router instanceof Ember.Router) {
-      this.startRouting(router);
-    }
-  },
-
-  /** @private */
-  didBecomeReady: function() {
-    var eventDispatcher = get(this, 'eventDispatcher'),
-        customEvents    = get(this, 'customEvents');
-
-    eventDispatcher.setup(customEvents);
-
-    this.ready();
-  },
-
-  /**
-    @private
-
-    If the application has a router, use it to route to the current URL, and
-    trigger a new call to `route` whenever the URL changes.
-  */
-  startRouting: function(router) {
-    var location = get(router, 'location'),
-        rootElement = get(this, 'rootElement'),
-        applicationController = get(router, 'applicationController');
-
-    Ember.assert("ApplicationView and ApplicationController must be defined on your application", (this.ApplicationView && applicationController) );
-
-    var applicationView = this.ApplicationView.create({
-      controller: applicationController
-    });
-    this._createdApplicationView = applicationView;
-
-    applicationView.appendTo(rootElement);
-
-    router.route(location.getURL());
-    location.onUpdateURL(function(url) {
-      router.route(url);
-    });
-  },
-
-  /**
-    Called when the Application has become ready.
-    The call will be delayed until the DOM has become ready.
-  */
-  ready: Ember.K,
-
-  /** @private */
-  willDestroy: function() {
-    get(this, 'eventDispatcher').destroy();
-    if (this._createdRouter)          { this._createdRouter.destroy(); }
-    if (this._createdApplicationView) { this._createdApplicationView.destroy(); }
-  },
-
-  registerInjection: function(options) {
-    this.constructor.registerInjection(options);
-  }
-});
-
-Ember.Application.reopenClass({
-  concatenatedProperties: ['injections'],
-  injections: Ember.A(),
-  registerInjection: function(options) {
-    var injections = get(this, 'injections'),
-        before = options.before,
-        name = options.name,
-        injection = options.injection,
-        location;
-
-    if (before) {
-      location = injections.find(function(item) {
-        if (item[0] === before) { return true; }
-      });
-      location = injections.indexOf(location);
-    } else {
-      location = get(injections, 'length');
-    }
-
-    injections.splice(location, 0, [name, injection]);
-  }
-});
-
-Ember.Application.registerInjection({
-  name: 'controllers',
-  injection: function(app, router, property) {
-    if (!/^[A-Z].*Controller$/.test(property)) { return; }
-
-    var name = property.charAt(0).toLowerCase() + property.substr(1),
-        controller = app[property].create();
-
-    router.set(name, controller);
-
-    controller.setProperties({
-      target: router,
-      controllers: router,
-      namespace: app
-    });
-  }
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set;
-
-/**
-  This file implements the `location` API used by Ember's router.
-
-  That API is:
-
-  getURL: returns the current URL
-  setURL(path): sets the current URL
-  onUpdateURL(callback): triggers the callback when the URL changes
-  formatURL(url): formats `url` to be placed into `href` attribute
-
-  Calling setURL will not trigger onUpdateURL callbacks.
-
-  TODO: This, as well as the Ember.Location documentation below, should
-  perhaps be moved so that it's visible in the JsDoc output.
-*/
-/**
-  @class
-
-  Ember.Location returns an instance of the correct implementation of
-  the `location` API.
-
-  You can pass it a `implementation` ('hash', 'history', 'none') to force a
-  particular implementation.
-*/
-Ember.Location = {
-  create: function(options) {
-    var implementation = options && options.implementation;
-    Ember.assert("Ember.Location.create: you must specify a 'implementation' option", !!implementation);
-
-    var implementationClass = this.implementations[implementation];
-    Ember.assert("Ember.Location.create: " + implementation + " is not a valid implementation", !!implementationClass);
-
-    return implementationClass.create.apply(implementationClass, arguments);
-  },
-
-  registerImplementation: function(name, implementation) {
-    this.implementations[name] = implementation;
-  },
-
-  implementations: {}
-};
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set;
-
-/**
-  @class
-
-  Ember.HashLocation implements the location API using the browser's
-  hash. At present, it relies on a hashchange event existing in the
-  browser.
-
-  @extends Ember.Object
-*/
-Ember.HashLocation = Ember.Object.extend(
-/** @scope Ember.HashLocation.prototype */ {
-
-  /** @private */
-  init: function() {
-    set(this, 'location', get(this, 'location') || window.location);
-  },
-
-  /**
-    @private
-
-    Returns the current `location.hash`, minus the '#' at the front.
-  */
-  getURL: function() {
-    return get(this, 'location').hash.substr(1);
-  },
-
-  /**
-    @private
-
-    Set the `location.hash` and remembers what was set. This prevents
-    `onUpdateURL` callbacks from triggering when the hash was set by
-    `HashLocation`.
-  */
-  setURL: function(path) {
-    get(this, 'location').hash = path;
-    set(this, 'lastSetURL', path);
-  },
-
-  /**
-    @private
-
-    Register a callback to be invoked when the hash changes. These
-    callbacks will execute when the user presses the back or forward
-    button, but not after `setURL` is invoked.
-  */
-  onUpdateURL: function(callback) {
-    var self = this;
-    var guid = Ember.guidFor(this);
-
-    Ember.$(window).bind('hashchange.ember-location-'+guid, function() {
-      var path = location.hash.substr(1);
-      if (get(self, 'lastSetURL') === path) { return; }
-
-      set(self, 'lastSetURL', null);
-
-      callback(location.hash.substr(1));
-    });
-  },
-
-  /**
-    @private
-
-    Given a URL, formats it to be placed into the page as part
-    of an element's `href` attribute.
-
-    This is used, for example, when using the {{action}} helper
-    to generate a URL based on an event.
-  */
-  formatURL: function(url) {
-    return '#'+url;
-  },
-
-  /** @private */
-  willDestroy: function() {
-    var guid = Ember.guidFor(this);
-
-    Ember.$(window).unbind('hashchange.ember-location-'+guid);
-  }
-});
-
-Ember.Location.registerImplementation('hash', Ember.HashLocation);
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set;
-
-/**
-  @class
-
-  Ember.HistoryLocation implements the location API using the browser's
-  history.pushState API.
-
-  @extends Ember.Object
-*/
-Ember.HistoryLocation = Ember.Object.extend(
-/** @scope Ember.HistoryLocation.prototype */ {
-
-  /** @private */
-  init: function() {
-    set(this, 'location', get(this, 'location') || window.location);
-    set(this, '_initialURL', get(this, 'location').pathname);
-  },
-
-  /**
-    Will be pre-pended to path upon state change
-   */
-  rootURL: '/',
-
-  /**
-    @private
-
-    Used to give history a starting reference
-   */
-  _initialURL: null,
-
-  /**
-    @private
-
-    Returns the current `location.pathname`.
-  */
-  getURL: function() {
-    return get(this, 'location').pathname;
-  },
-
-  /**
-    @private
-
-    Uses `history.pushState` to update the url without a page reload.
-  */
-  setURL: function(path) {
-    var state = window.history.state,
-        initialURL = get(this, '_initialURL');
-
-    path = this.formatPath(path);
-
-    if ((initialURL !== path && !state) || (state && state.path !== path)) {
-      window.history.pushState({ path: path }, null, path);
-    }
-  },
-
-  /**
-    @private
-
-    Register a callback to be invoked whenever the browser
-    history changes, including using forward and back buttons.
-  */
-  onUpdateURL: function(callback) {
-    var guid = Ember.guidFor(this);
-
-    Ember.$(window).bind('popstate.ember-location-'+guid, function(e) {
-      callback(location.pathname);
-    });
-  },
-
-  /**
-    @private
-
-    returns the given path appended to rootURL
-   */
-  formatPath: function(path) {
-    var rootURL = get(this, 'rootURL');
-
-    if (path !== '') {
-      rootURL = rootURL.replace(/\/$/, '');
-    }
-
-    return rootURL + path;
-  },
-
-  /**
-    @private
-
-    Used when using {{action}} helper.  Since no formatting
-    is required we just return the url given.
-  */
-  formatURL: function(url) {
-    return url;
-  },
-
-  /** @private */
-  willDestroy: function() {
-    var guid = Ember.guidFor(this);
-
-    Ember.$(window).unbind('popstate.ember-location-'+guid);
-  }
-});
-
-Ember.Location.registerImplementation('history', Ember.HistoryLocation);
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set;
-
-/**
-  @class
-
-  Ember.NoneLocation does not interact with the browser. It is useful for
-  testing, or when you need to manage state with your Router, but temporarily
-  don't want it to muck with the URL (for example when you embed your
-  application in a larger page).
-
-  @extends Ember.Object
-*/
-Ember.NoneLocation = Ember.Object.extend(
-/** @scope Ember.NoneLocation.prototype */ {
-  path: '',
-
-  getURL: function() {
-    return get(this, 'path');
-  },
-
-  setURL: function(path) {
-    set(this, 'path', path);
-  },
-
-  onUpdateURL: function(callback) {
-    // We are not wired up to the browser, so we'll never trigger the callback.
-  },
-
-  formatURL: function(url) {
-    // The return value is not overly meaningful, but we do not want to throw
-    // errors when test code renders templates containing {{action href=true}}
-    // helpers.
-    return url;
-  }
-});
-
-Ember.Location.registerImplementation('none', Ember.NoneLocation);
-
-})();
-
-
-
-(function() {
-
-})();
-
-
-
-(function() {
-
-})();
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-Ember.assert("Ember Views require jQuery 1.7", window.jQuery && (window.jQuery().jquery.match(/^1\.7(\.\d+)?(pre|rc\d?)?/) || Ember.ENV.FORCE_JQUERY));
-Ember.$ = window.jQuery;
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-// http://www.whatwg.org/specs/web-apps/current-work/multipage/dnd.html#dndevents
-var dragEvents = Ember.String.w('dragstart drag dragenter dragleave dragover drop dragend');
-
-// Copies the `dataTransfer` property from a browser event object onto the
-// jQuery event object for the specified events
-Ember.EnumerableUtils.forEach(dragEvents, function(eventName) {
-  Ember.$.event.fixHooks[eventName] = { props: ['dataTransfer'] };
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-var get = Ember.get, set = Ember.set;
-var indexOf = Ember.ArrayPolyfills.indexOf;
-
-/** @private */
-var ClassSet = function() {
-  this.seen = {};
-  this.list = [];
-};
-
-ClassSet.prototype = {
-  add: function(string) {
-    if (string in this.seen) { return; }
-    this.seen[string] = true;
-
-    this.list.push(string);
-  },
-
-  toDOM: function() {
-    return this.list.join(" ");
-  }
-};
-
-/**
-  @class
-
-  Ember.RenderBuffer gathers information regarding the a view and generates the
-  final representation. Ember.RenderBuffer will generate HTML which can be pushed
-  to the DOM.
-
-  @extends Ember.Object
-*/
-Ember.RenderBuffer = function(tagName) {
-  return new Ember._RenderBuffer(tagName);
-};
-
-Ember._RenderBuffer = function(tagName) {
-  this.elementTag = tagName;
-  this.childBuffers = [];
-};
-
-Ember._RenderBuffer.prototype =
-/** @scope Ember.RenderBuffer.prototype */ {
-
-  /**
-    Array of class-names which will be applied in the class="" attribute
-
-    You should not maintain this array yourself, rather, you should use
-    the addClass() method of Ember.RenderBuffer.
-
-    @type Array
-    @default []
-  */
-  elementClasses: null,
-
-  /**
-    The id in of the element, to be applied in the id="" attribute
-
-    You should not set this property yourself, rather, you should use
-    the id() method of Ember.RenderBuffer.
-
-    @type String
-    @default null
-  */
-  elementId: null,
-
-  /**
-    A hash keyed on the name of the attribute and whose value will be
-    applied to that attribute. For example, if you wanted to apply a
-    data-view="Foo.bar" property to an element, you would set the
-    elementAttributes hash to {'data-view':'Foo.bar'}
-
-    You should not maintain this hash yourself, rather, you should use
-    the attr() method of Ember.RenderBuffer.
-
-    @type Hash
-    @default {}
-  */
-  elementAttributes: null,
-
-  /**
-    The tagname of the element an instance of Ember.RenderBuffer represents.
-
-    Usually, this gets set as the first parameter to Ember.RenderBuffer. For
-    example, if you wanted to create a `p` tag, then you would call
-
-      Ember.RenderBuffer('p')
-
-    @type String
-    @default null
-  */
-  elementTag: null,
-
-  /**
-    A hash keyed on the name of the style attribute and whose value will
-    be applied to that attribute. For example, if you wanted to apply a
-    background-color:black;" style to an element, you would set the
-    elementStyle hash to {'background-color':'black'}
-
-    You should not maintain this hash yourself, rather, you should use
-    the style() method of Ember.RenderBuffer.
-
-    @type Hash
-    @default {}
-  */
-  elementStyle: null,
-
-  /**
-    Nested RenderBuffers will set this to their parent RenderBuffer
-    instance.
-
-    @type Ember._RenderBuffer
-  */
-  parentBuffer: null,
-
-  /**
-    Adds a string of HTML to the RenderBuffer.
-
-    @param {String} string HTML to push into the buffer
-    @returns {Ember.RenderBuffer} this
-  */
-  push: function(string) {
-    this.childBuffers.push(String(string));
-    return this;
-  },
-
-  /**
-    Adds a class to the buffer, which will be rendered to the class attribute.
-
-    @param {String} className Class name to add to the buffer
-    @returns {Ember.RenderBuffer} this
-  */
-  addClass: function(className) {
-    // lazily create elementClasses
-    var elementClasses = this.elementClasses = (this.elementClasses || new ClassSet());
-    this.elementClasses.add(className);
-
-    return this;
-  },
-
-  /**
-    Sets the elementID to be used for the element.
-
-    @param {String} id
-    @returns {Ember.RenderBuffer} this
-  */
-  id: function(id) {
-    this.elementId = id;
-    return this;
-  },
-
-  // duck type attribute functionality like jQuery so a render buffer
-  // can be used like a jQuery object in attribute binding scenarios.
-
-  /**
-    Adds an attribute which will be rendered to the element.
-
-    @param {String} name The name of the attribute
-    @param {String} value The value to add to the attribute
-    @returns {Ember.RenderBuffer|String} this or the current attribute value
-  */
-  attr: function(name, value) {
-    var attributes = this.elementAttributes = (this.elementAttributes || {});
-
-    if (arguments.length === 1) {
-      return attributes[name];
-    } else {
-      attributes[name] = value;
-    }
-
-    return this;
-  },
-
-  /**
-    Remove an attribute from the list of attributes to render.
-
-    @param {String} name The name of the attribute
-    @returns {Ember.RenderBuffer} this
-  */
-  removeAttr: function(name) {
-    var attributes = this.elementAttributes;
-    if (attributes) { delete attributes[name]; }
-
-    return this;
-  },
-
-  /**
-    Adds a style to the style attribute which will be rendered to the element.
-
-    @param {String} name Name of the style
-    @param {String} value
-    @returns {Ember.RenderBuffer} this
-  */
-  style: function(name, value) {
-    var style = this.elementStyle = (this.elementStyle || {});
-
-    this.elementStyle[name] = value;
-    return this;
-  },
-
-  /**
-    Create a new child render buffer from a parent buffer. Optionally set
-    additional properties on the buffer. Optionally invoke a callback
-    with the newly created buffer.
-
-    This is a primitive method used by other public methods: `begin`,
-    `prepend`, `replaceWith`, `insertAfter`.
-
-    @private
-    @param {String} tagName Tag name to use for the child buffer's element
-    @param {Ember._RenderBuffer} parent The parent render buffer that this
-      buffer should be appended to.
-    @param {Function} fn A callback to invoke with the newly created buffer.
-    @param {Object} other Additional properties to add to the newly created
-      buffer.
-  */
-  newBuffer: function(tagName, parent, fn, other) {
-    var buffer = new Ember._RenderBuffer(tagName);
-    buffer.parentBuffer = parent;
-
-    if (other) { Ember.$.extend(buffer, other); }
-    if (fn) { fn.call(this, buffer); }
-
-    return buffer;
-  },
-
-  /**
-    Replace the current buffer with a new buffer. This is a primitive
-    used by `remove`, which passes `null` for `newBuffer`, and `replaceWith`,
-    which passes the new buffer it created.
-
-    @private
-    @param {Ember._RenderBuffer} buffer The buffer to insert in place of
-      the existing buffer.
-  */
-  replaceWithBuffer: function(newBuffer) {
-    var parent = this.parentBuffer;
-    if (!parent) { return; }
-
-    var childBuffers = parent.childBuffers;
-
-    var index = indexOf.call(childBuffers, this);
-
-    if (newBuffer) {
-      childBuffers.splice(index, 1, newBuffer);
-    } else {
-      childBuffers.splice(index, 1);
-    }
-  },
-
-  /**
-    Creates a new Ember.RenderBuffer object with the provided tagName as
-    the element tag and with its parentBuffer property set to the current
-    Ember.RenderBuffer.
-
-    @param {String} tagName Tag name to use for the child buffer's element
-    @returns {Ember.RenderBuffer} A new RenderBuffer object
-  */
-  begin: function(tagName) {
-    return this.newBuffer(tagName, this, function(buffer) {
-      this.childBuffers.push(buffer);
-    });
-  },
-
-  /**
-    Prepend a new child buffer to the current render buffer.
-
-    @param {String} tagName Tag name to use for the child buffer's element
-  */
-  prepend: function(tagName) {
-    return this.newBuffer(tagName, this, function(buffer) {
-      this.childBuffers.splice(0, 0, buffer);
-    });
-  },
-
-  /**
-    Replace the current buffer with a new render buffer.
-
-    @param {String} tagName Tag name to use for the new buffer's element
-  */
-  replaceWith: function(tagName) {
-    var parentBuffer = this.parentBuffer;
-
-    return this.newBuffer(tagName, parentBuffer, function(buffer) {
-      this.replaceWithBuffer(buffer);
-    });
-  },
-
-  /**
-    Insert a new render buffer after the current render buffer.
-
-    @param {String} tagName Tag name to use for the new buffer's element
-  */
-  insertAfter: function(tagName) {
-    var parentBuffer = get(this, 'parentBuffer');
-
-    return this.newBuffer(tagName, parentBuffer, function(buffer) {
-      var siblings = parentBuffer.childBuffers;
-      var index = indexOf.call(siblings, this);
-      siblings.splice(index + 1, 0, buffer);
-    });
-  },
-
-  /**
-    Closes the current buffer and adds its content to the parentBuffer.
-
-    @returns {Ember.RenderBuffer} The parentBuffer, if one exists. Otherwise, this
-  */
-  end: function() {
-    var parent = this.parentBuffer;
-    return parent || this;
-  },
-
-  remove: function() {
-    this.replaceWithBuffer(null);
-  },
-
-  /**
-    @returns {DOMElement} The element corresponding to the generated HTML
-      of this buffer
-  */
-  element: function() {
-    return Ember.$(this.string())[0];
-  },
-
-  /**
-    Generates the HTML content for this buffer.
-
-    @returns {String} The generated HTMl
-  */
-  string: function() {
-    var content = '', tag = this.elementTag, openTag;
-
-    if (tag) {
-      var id = this.elementId,
-          classes = this.elementClasses,
-          attrs = this.elementAttributes,
-          style = this.elementStyle,
-          styleBuffer = '', prop;
-
-      openTag = ["<" + tag];
-
-      if (id) { openTag.push('id="' + this._escapeAttribute(id) + '"'); }
-      if (classes) { openTag.push('class="' + this._escapeAttribute(classes.toDOM()) + '"'); }
-
-      if (style) {
-        for (prop in style) {
-          if (style.hasOwnProperty(prop)) {
-            styleBuffer += (prop + ':' + this._escapeAttribute(style[prop]) + ';');
-          }
-        }
-
-        openTag.push('style="' + styleBuffer + '"');
-      }
-
-      if (attrs) {
-        for (prop in attrs) {
-          if (attrs.hasOwnProperty(prop)) {
-            openTag.push(prop + '="' + this._escapeAttribute(attrs[prop]) + '"');
-          }
-        }
-      }
-
-      openTag = openTag.join(" ") + '>';
-    }
-
-    var childBuffers = this.childBuffers;
-
-    Ember.ArrayPolyfills.forEach.call(childBuffers, function(buffer) {
-      var stringy = typeof buffer === 'string';
-      content += (stringy ? buffer : buffer.string());
-    });
-
-    if (tag) {
-      return openTag + content + "</" + tag + ">";
-    } else {
-      return content;
-    }
-  },
-
-  _escapeAttribute: function(value) {
-    // Stolen shamelessly from Handlebars
-
-    var escape = {
-      "<": "&lt;",
-      ">": "&gt;",
-      '"': "&quot;",
-      "'": "&#x27;",
-      "`": "&#x60;"
-    };
-
-    var badChars = /&(?!\w+;)|[<>"'`]/g;
-    var possible = /[&<>"'`]/;
-
-    var escapeChar = function(chr) {
-      return escape[chr] || "&amp;";
-    };
-
-    var string = value.toString();
-
-    if(!possible.test(string)) { return string; }
-    return string.replace(badChars, escapeChar);
-  }
-
-};
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-var get = Ember.get, set = Ember.set, fmt = Ember.String.fmt;
-
-/**
-  @ignore
-
-  Ember.EventDispatcher handles delegating browser events to their corresponding
-  Ember.Views. For example, when you click on a view, Ember.EventDispatcher ensures
-  that that view's `mouseDown` method gets called.
-*/
-Ember.EventDispatcher = Ember.Object.extend(
-/** @scope Ember.EventDispatcher.prototype */{
-
-  /**
-    @private
-
-    The root DOM element to which event listeners should be attached. Event
-    listeners will be attached to the document unless this is overridden.
-
-    Can be specified as a DOMElement or a selector string.
-
-    The default body is a string since this may be evaluated before document.body
-    exists in the DOM.
-
-    @type DOMElement
-    @default 'body'
-  */
-  rootElement: 'body',
-
-  /**
-    @private
-
-    Sets up event listeners for standard browser events.
-
-    This will be called after the browser sends a DOMContentReady event. By
-    default, it will set up all of the listeners on the document body. If you
-    would like to register the listeners on a different element, set the event
-    dispatcher's `root` property.
-  */
-  setup: function(addedEvents) {
-    var event, events = {
-      touchstart  : 'touchStart',
-      touchmove   : 'touchMove',
-      touchend    : 'touchEnd',
-      touchcancel : 'touchCancel',
-      keydown     : 'keyDown',
-      keyup       : 'keyUp',
-      keypress    : 'keyPress',
-      mousedown   : 'mouseDown',
-      mouseup     : 'mouseUp',
-      contextmenu : 'contextMenu',
-      click       : 'click',
-      dblclick    : 'doubleClick',
-      mousemove   : 'mouseMove',
-      focusin     : 'focusIn',
-      focusout    : 'focusOut',
-      mouseenter  : 'mouseEnter',
-      mouseleave  : 'mouseLeave',
-      submit      : 'submit',
-      input       : 'input',
-      change      : 'change',
-      dragstart   : 'dragStart',
-      drag        : 'drag',
-      dragenter   : 'dragEnter',
-      dragleave   : 'dragLeave',
-      dragover    : 'dragOver',
-      drop        : 'drop',
-      dragend     : 'dragEnd'
-    };
-
-    Ember.$.extend(events, addedEvents || {});
-
-    var rootElement = Ember.$(get(this, 'rootElement'));
-
-    Ember.assert(fmt('You cannot use the same root element (%@) multiple times in an Ember.Application', [rootElement.selector || rootElement[0].tagName]), !rootElement.is('.ember-application'));
-    Ember.assert('You cannot make a new Ember.Application using a root element that is a descendent of an existing Ember.Application', !rootElement.closest('.ember-application').length);
-    Ember.assert('You cannot make a new Ember.Application using a root element that is an ancestor of an existing Ember.Application', !rootElement.find('.ember-application').length);
-
-    rootElement.addClass('ember-application');
-
-    Ember.assert('Unable to add "ember-application" class to rootElement. Make sure you set rootElement to the body or an element in the body.', rootElement.is('.ember-application'));
-
-    for (event in events) {
-      if (events.hasOwnProperty(event)) {
-        this.setupHandler(rootElement, event, events[event]);
-      }
-    }
-  },
-
-  /**
-    @private
-
-    Registers an event listener on the document. If the given event is
-    triggered, the provided event handler will be triggered on the target
-    view.
-
-    If the target view does not implement the event handler, or if the handler
-    returns false, the parent view will be called. The event will continue to
-    bubble to each successive parent view until it reaches the top.
-
-    For example, to have the `mouseDown` method called on the target view when
-    a `mousedown` event is received from the browser, do the following:
-
-        setupHandler('mousedown', 'mouseDown');
-
-    @param {String} event the browser-originated event to listen to
-    @param {String} eventName the name of the method to call on the view
-  */
-  setupHandler: function(rootElement, event, eventName) {
-    var self = this;
-
-    rootElement.delegate('.ember-view', event + '.ember', function(evt, triggeringManager) {
-
-      var view = Ember.View.views[this.id],
-          result = true, manager = null;
-
-      manager = self._findNearestEventManager(view,eventName);
-
-      if (manager && manager !== triggeringManager) {
-        result = self._dispatchEvent(manager, evt, eventName, view);
-      } else if (view) {
-        result = self._bubbleEvent(view,evt,eventName);
-      } else {
-        evt.stopPropagation();
-      }
-
-      return result;
-    });
-
-    rootElement.delegate('[data-ember-action]', event + '.ember', function(evt) {
-      var actionId = Ember.$(evt.currentTarget).attr('data-ember-action'),
-          action   = Ember.Handlebars.ActionHelper.registeredActions[actionId],
-          handler  = action.handler;
-
-      if (action.eventName === eventName) {
-        return handler(evt);
-      }
-    });
-  },
-
-  /** @private */
-  _findNearestEventManager: function(view, eventName) {
-    var manager = null;
-
-    while (view) {
-      manager = get(view, 'eventManager');
-      if (manager && manager[eventName]) { break; }
-
-      view = get(view, 'parentView');
-    }
-
-    return manager;
-  },
-
-  /** @private */
-  _dispatchEvent: function(object, evt, eventName, view) {
-    var result = true;
-
-    var handler = object[eventName];
-    if (Ember.typeOf(handler) === 'function') {
-      result = handler.call(object, evt, view);
-      // Do not preventDefault in eventManagers.
-      evt.stopPropagation();
-    }
-    else {
-      result = this._bubbleEvent(view, evt, eventName);
-    }
-
-    return result;
-  },
-
-  /** @private */
-  _bubbleEvent: function(view, evt, eventName) {
-    return Ember.run(function() {
-      return view.handleEvent(eventName, evt);
-    });
-  },
-
-  /** @private */
-  destroy: function() {
-    var rootElement = get(this, 'rootElement');
-    Ember.$(rootElement).undelegate('.ember').removeClass('ember-application');
-    return this._super();
-  }
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-// Add a new named queue for rendering views that happens
-// after bindings have synced.
-var queues = Ember.run.queues;
-queues.splice(Ember.$.inArray('actions', queues)+1, 0, 'render');
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set;
-
-Ember.ControllerMixin.reopen({
-
-  target: null,
-  controllers: null,
-  namespace: null,
-  view: null,
-
-  /**
-    `connectOutlet` creates a new instance of a provided view
-    class, wires it up to its associated controller, and
-    assigns the new view to a property on the current controller.
-
-    The purpose of this method is to enable views that use
-    outlets to quickly assign new views for a given outlet.
-
-    For example, an application view's template may look like
-    this:
-
-        <h1>My Blog</h1>
-        {{outlet}}
-
-    The view for this outlet is specified by assigning a
-    `view` property to the application's controller. The
-    following code will assign a new `App.PostsView` to
-    that outlet:
-
-        applicationController.connectOutlet('posts');
-
-    In general, you will also want to assign a controller
-    to the newly created view. By convention, a controller
-    named `postsController` will be assigned as the view's
-    controller.
-
-    In an application initialized using `app.initialize(router)`,
-    `connectOutlet` will look for `postsController` on the
-    router. The initialization process will automatically
-    create an instance of `App.PostsController` called
-    `postsController`, so you don't need to do anything
-    beyond `connectOutlet` to assign your view and wire it
-    up to its associated controller.
-
-    You can supply a `content` for the controller by supplying
-    a final argument after the view class:
-
-        applicationController.connectOutlet('posts', App.Post.find());
-
-    You can specify a particular outlet to use. For example, if your main
-    template looks like:
-
-        <h1>My Blog</h1>
-        {{outlet master}}
-        {{outlet detail}}
-
-    You can assign an `App.PostsView` to the master outlet:
-
-        applicationController.connectOutlet({
-          name: 'posts',
-          outletName: 'master',
-          context: App.Post.find()
-        });
-
-    You can write this as:
-
-        applicationController.connectOutlet('master', 'posts', App.Post.find());
-
-    @param {String} outletName a name for the outlet to set
-    @param {String} name a view/controller pair name
-    @param {Object} context a context object to assign to the
-      controller's `content` property, if a controller can be
-      found (optional)
-  */
-  connectOutlet: function(name, context) {
-    // Normalize arguments. Supported arguments:
-    //
-    // name
-    // name, context
-    // outletName, name
-    // outletName, name, context
-    // options
-    //
-    // The options hash has the following keys:
-    //
-    //   name: the name of the controller and view
-    //     to use. If this is passed, the name
-    //     determines the view and controller.
-    //   outletName: the name of the outlet to
-    //     fill in. default: 'view'
-    //   viewClass: the class of the view to instantiate
-    //   controller: the controller instance to pass
-    //     to the view
-    //   context: an object that should become the
-    //     controller's `content` and thus the
-    //     template's context.
-
-    var outletName, viewClass, view, controller, options;
-
-    if (Ember.typeOf(context) === 'string') {
-      outletName = name;
-      name = context;
-      context = arguments[2];
-    }
-
-    if (arguments.length === 1) {
-      if (Ember.typeOf(name) === 'object') {
-        options = name;
-        outletName = options.outletName;
-        name = options.name;
-        viewClass = options.viewClass;
-        controller = options.controller;
-        context = options.context;
-      }
-    } else {
-      options = {};
-    }
-
-    outletName = outletName || 'view';
-
-    Ember.assert("You must supply a name or a view class to connectOutlets, but not both", (!!name && !viewClass && !controller) || (!name && !!viewClass));
-
-    if (name) {
-      var namespace = get(this, 'namespace'),
-          controllers = get(this, 'controllers');
-
-      var viewClassName = name.charAt(0).toUpperCase() + name.substr(1) + "View";
-      viewClass = get(namespace, viewClassName);
-      controller = get(controllers, name + 'Controller');
-
-      Ember.assert("The name you supplied " + name + " did not resolve to a view " + viewClassName, !!viewClass);
-      Ember.assert("The name you supplied " + name + " did not resolve to a controller " + name + 'Controller', (!!controller && !!context) || !context);
-    }
-
-    if (controller && context) { controller.set('content', context); }
-    view = viewClass.create();
-    if (controller) { set(view, 'controller', controller); }
-    set(this, outletName, view);
-
-    return view;
-  },
-
-  /**
-    Convenience method to connect controllers. This method makes other controllers
-    available on the controller the method was invoked on.
-
-    For example, to make the `personController` and the `postController` available
-    on the `overviewController`, you would call:
-
-        overviewController.connectControllers('person', 'post');
-
-    @param {String...} controllerNames the controllers to make available
-  */
-  connectControllers: function() {
-    var controllers = get(this, 'controllers'),
-        controllerNames = Array.prototype.slice.apply(arguments),
-        controllerName;
-
-    for (var i=0, l=controllerNames.length; i<l; i++) {
-      controllerName = controllerNames[i] + 'Controller';
-      set(this, controllerName, get(controllers, controllerName));
-    }
-  }
-});
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set, addObserver = Ember.addObserver;
-var meta = Ember.meta, fmt = Ember.String.fmt;
-var a_slice = [].slice;
-var a_forEach = Ember.EnumerableUtils.forEach;
-
-var childViewsProperty = Ember.computed(function() {
-  var childViews = this._childViews;
-
-  var ret = Ember.A();
-
-  a_forEach(childViews, function(view) {
-    if (view.isVirtual) {
-      ret.pushObjects(get(view, 'childViews'));
-    } else {
-      ret.push(view);
-    }
-  });
-
-  return ret;
-}).property().cacheable();
-
-var VIEW_PRESERVES_CONTEXT = Ember.VIEW_PRESERVES_CONTEXT;
-Ember.warn("The way that the {{view}} helper affects templates is about to change. Previously, templates inside child views would use the new view as the context. Soon, views will preserve their parent context when rendering their template. You can opt-in early to the new behavior by setting `ENV.VIEW_PRESERVES_CONTEXT = true`. For more information, see https://gist.github.com/2494968. You should update your templates as soon as possible; this default will change soon, and the option will be eliminated entirely before the 1.0 release.", VIEW_PRESERVES_CONTEXT);
-
-/**
-  @static
-
-  Global hash of shared templates. This will automatically be populated
-  by the build tools so that you can store your Handlebars templates in
-  separate files that get loaded into JavaScript at buildtime.
-
-  @type Hash
-*/
-Ember.TEMPLATES = {};
-
-var invokeForState = {
-  preRender: {},
-  inBuffer: {},
-  hasElement: {},
-  inDOM: {},
-  destroyed: {}
-};
-
-/**
-  @class
-
-  `Ember.View` is the class in Ember responsible for encapsulating templates of HTML
-  content, combining templates with data to render as sections of a page's DOM, and
-  registering and responding to user-initiated events.
-  
-  ## HTML Tag
-  The default HTML tag name used for a view's DOM representation is `div`. This can be
-  customized by setting the `tagName` property. The following view class:
-
-      ParagraphView = Ember.View.extend({
-        tagName: 'em'
-      })
-
-  Would result in instances with the following HTML:
-
-      <em id="ember1" class="ember-view"></em>
-
-  ## HTML `class` Attribute
-  The HTML `class` attribute of a view's tag can be set by providing a `classNames` property
-  that is set to an array of strings:
-
-      MyView = Ember.View.extend({
-        classNames: ['my-class', 'my-other-class']
-      })
-
-  Will result in view instances with an HTML representation of:
-
-      <div id="ember1" class="ember-view my-class my-other-class"></div>
-
-  `class` attribute values can also be set by providing a `classNameBindings` property
-  set to an array of properties names for the view. The return value of these properties 
-  will be added as part of the value for the view's `class` attribute. These properties
-  can be computed properties:
-
-      MyView = Ember.View.extend({
-        classNameBindings: ['propertyA', 'propertyB'],
-        propertyA: 'from-a',
-        propertyB: function(){
-          if(someLogic){ return 'from-b'; }
-        }.property()
-      })
-
-  Will result in view instances with an HTML representation of:
-
-      <div id="ember1" class="ember-view from-a from-b"></div>
-
-  If the value of a class name binding returns a boolean the property name itself
-  will be used as the class name if the property is true. The class name will
-  not be added if the value is `false` or `undefined`.
-
-      MyView = Ember.View.extend({
-        classNameBindings: ['hovered'],
-        hovered: true
-      })
-
-  Will result in view instances with an HTML representation of:
-
-      <div id="ember1" class="ember-view hovered"></div>
-
-  When using boolean class name bindings you can supply a string value other than the 
-  property name for use as the `class` HTML attribute by appending the preferred value after
-  a ":" character when defining the binding:
-
-      MyView = Ember.View.extend({
-        classNameBindings: ['awesome:so-very-cool'],
-        awesome: true
-      })
-
-  Will result in view instances with an HTML representation of:
-
-      <div id="ember1" class="ember-view so-very-cool"></div>
-
-
-  Boolean value class name bindings whose property names are in a camelCase-style
-  format will be converted to a dasherized format:
-
-      MyView = Ember.View.extend({
-        classNameBindings: ['isUrgent'],
-        isUrgent: true
-      })
-
-  Will result in view instances with an HTML representation of:
-
-      <div id="ember1" class="ember-view is-urgent"></div>
-
-
-  Class name bindings can also refer to object values that are found by
-  traversing a path relative to the view itself:
-
-      MyView = Ember.View.extend({
-        classNameBindings: ['messages.empty']
-        messages: Ember.Object.create({
-          empty: true
-        })
-      })
-
-  Will result in view instances with an HTML representation of:
-
-      <div id="ember1" class="ember-view empty"></div>
-
-
-  If you want to add a class name for a property which evaluates to true and
-  and a different class name if it evaluates to false, you can pass a binding
-  like this:
-
-    // Applies 'enabled' class when isEnabled is true and 'disabled' when isEnabled is false
-    Ember.View.create({
-      classNameBindings: ['isEnabled:enabled:disabled']
-      isEnabled: true
-    });
-
-  Will result in view instances with an HTML representation of:
-
-      <div id="ember1" class="ember-view enabled"></div>
-
-  When isEnabled is `false`, the resulting HTML reprensentation looks like this:
-
-      <div id="ember1" class="ember-view disabled"></div>
-
-  This syntax offers the convenience to add a class if a property is `false`:
-
-    // Applies no class when isEnabled is true and class 'disabled' when isEnabled is false
-    Ember.View.create({
-      classNameBindings: ['isEnabled::disabled']
-      isEnabled: true
-    });
-
-  Will result in view instances with an HTML representation of:
-
-    <div id="ember1" class="ember-view"></div>
-
-  When the `isEnabled` property on the view is set to `false`, it will result
-  in view instances with an HTML representation of:
-
-    <div id="ember1" class="ember-view disabled"></div>
-
-
-  Updates to the the value of a class name binding will result in automatic update 
-  of the  HTML `class` attribute in the view's rendered HTML representation.
-  If the value becomes  `false` or `undefined` the class name will be removed.
-
-  Both `classNames` and `classNameBindings` are concatenated properties. 
-  See `Ember.Object` documentation for more information about concatenated properties.
-
-  ## HTML Attributes
-  The HTML attribute section of a view's tag can be set by providing an `attributeBindings`
-  property set to an array of property names on the view. The return value of these properties
-  will be used as the value of the view's HTML associated attribute:
-
-      AnchorView = Ember.View.extend({
-        tagName: 'a',
-        attributeBindings: ['href'],
-        href: 'http://google.com'
-      })
-
-  Will result in view instances with an HTML representation of:
-
-      <a id="ember1" class="ember-view" href="http://google.com"></a>
-
-  If the return value of an `attributeBindings` monitored property is a boolean
-  the property will follow HTML's pattern of repeating the attribute's name as
-  its value:
-
-      MyTextInput = Ember.View.extend({
-        tagName: 'input',
-        attributeBindings: ['disabled'],
-        disabled: true
-      })
-
-  Will result in view instances with an HTML representation of:
-
-      <input id="ember1" class="ember-view" disabled="disabled" />
-
-  `attributeBindings` can refer to computed properties:
-
-      MyTextInput = Ember.View.extend({
-        tagName: 'input',
-        attributeBindings: ['disabled'],
-        disabled: function(){
-          if (someLogic) {
-            return true;
-          } else {
-            return false;
-          }
-        }.property()
-      })
-
-  Updates to the the property of an attribute binding will result in automatic update 
-  of the  HTML attribute in the view's rendered HTML representation.
-
-  `attributeBindings` is a concatenated property. See `Ember.Object` documentation
-  for more information about concatenated properties.
-
-  ## Templates
-  The HTML contents of a view's rendered representation are determined by its template.
-  Templates can be any function that accepts an optional context parameter and returns
-  a string of HTML that will be inserted within the view's tag. Most
-  typically in Ember this function will be a compiled Ember.Handlebars template.
-
-      AView = Ember.View.extend({
-        template: Ember.Handlebars.compile('I am the template')
-      })
-
-  Will result in view instances with an HTML representation of:
-
-      <div id="ember1" class="ember-view">I am the template</div>
-
-  The default context of the compiled template will be the view instance itself:
-
-      AView = Ember.View.extend({
-        template: Ember.Handlebars.compile('Hello {{excitedGreeting}}')
-      })
-
-      aView = AView.create({
-        content: Ember.Object.create({
-          firstName: 'Barry'
-        })
-        excitedGreeting: function(){
-          return this.get("content.firstName") + "!!!"
-        }
-      })
-
-  Will result in an HTML representation of:
-
-      <div id="ember1" class="ember-view">Hello Barry!!!</div>
-
-  Within an Ember application is more common to define a Handlebars templates as
-  part of a page:
-
-      <script type='text/x-handlebars' data-template-name='some-template'>
-        Hello
-      </script>
-
-  And associate it by name using a view's `templateName` property:
-
-      AView = Ember.View.extend({
-        templateName: 'some-template'
-      })
-
-  Using a value for `templateName` that does not have a Handlebars template with a
-  matching `data-template-name` attribute will throw an error.
-
-  Assigning a value to both `template` and `templateName` properties will throw an error.
-
-  For views classes that may have a template later defined (e.g. as the block portion of a `{{view}}`
-  Handlebars helper call in another template or in a subclass), you can provide a `defaultTemplate`
-  property set to compiled template function. If a template is not later provided for the view
-  instance the `defaultTemplate` value will be used:
-
-      AView = Ember.View.extend({
-        defaultTemplate: Ember.Handlebars.compile('I was the default'),
-        template: null,
-        templateName: null
-      })
-
-  Will result in instances with an HTML representation of:
-
-      <div id="ember1" class="ember-view">I was the default</div>
-
-  If a `template` or `templateName` is provided it will take precedence over `defaultTemplate`:
-
-      AView = Ember.View.extend({
-        defaultTemplate: Ember.Handlebars.compile('I was the default')
-      })
-
-      aView = AView.create({
-        template: Ember.Handlebars.compile('I was the template, not default')
-      })
-
-  Will result in the following HTML representation when rendered:
-
-      <div id="ember1" class="ember-view">I was the template, not default</div>
-
-  ## Layouts
-  Views can have a secondary template that wraps their main template. Like
-  primary templates, layouts can be any function that  accepts an optional context
-  parameter and returns a string of HTML that will be inserted inside view's tag. Views whose HTML
-  element is self closing (e.g. `<input />`) cannot have a layout and this property will be ignored.
-  
-  Most typically in Ember a layout will be a compiled Ember.Handlebars template.
-
-  A view's layout can be set directly with the `layout` property or reference an
-  existing Handlebars template by name with the `layoutName` property.
-
-  A template used as a layout must contain a single use of the Handlebars `{{yield}}`
-  helper. The HTML contents of a view's rendered `template` will be inserted at this location:
-
-      AViewWithLayout = Ember.View.extend({
-        layout: Ember.Handlebars.compile("<div class='my-decorative-class'>{{yield}}</div>")
-        template: Ember.Handlebars.compile("I got wrapped"),
-      })
-
-  Will result in view instances with an HTML representation of:
-
-      <div id="ember1" class="ember-view">
-        <div class="my-decorative-class">
-          I got wrapped
-        </div>
-      </div>
-
-  See `Handlebars.helpers.yield` for more information.
-
-  ## Responding to Browser Events
-  Views can respond to user-initiated events in one of three ways: method implementation, 
-  through an event manager, and through `{{action}}` helper use in their template or layout.
-
-  ### Method Implementation
-  Views can respond to user-initiated events by implementing a method that matches the
-  event name. A `jQuery.Event` object will be passed as the argument to this method.
-
-      AView = Ember.View.extend({
-        click: function(event){
-          // will be called when when an instance's
-          // rendered element is clicked
-        }
-      })
-
-  ### Event Managers
-  Views can define an object as their `eventManager` property. This object can then
-  implement methods that match the desired event names. Matching events that occur
-  on the view's rendered HTML or the rendered HTML of any of its DOM descendants 
-  will trigger this method.  A `jQuery.Event` object will be passed as the first 
-  argument to the method and an  `Ember.View` object as the second. The `Ember.View`
-  will be the view whose rendered HTML was interacted with. This may be the view with
-  the `eventManager` property or one of its descendent views.
-
-      AView = Ember.View.extend({
-        eventManager: Ember.Object.create({
-          doubleClick: function(event, view){
-            // will be called when when an instance's
-            // rendered element or any rendering
-            // of this views's descendent
-            // elements is clicked
-          }
-        })
-      })
-
-
-  An event defined for an event manager takes precedence over events of the same
-  name handled through methods on the view.
-
-
-      AView = Ember.View.extend({
-        mouseEnter: function(event){
-          // will never trigger.
-        },
-        eventManager: Ember.Object.create({
-          mouseEnter: function(event, view){
-            // takes presedence over AView#mouseEnter
-          }
-        })
-      })
-
-  Similarly a view's event manager will take precedence for events of any views
-  rendered as a descendent. A method name that matches an event name will not be called
-  if the view instance was rendered inside the HTML representation of a view that has 
-  an `eventManager` property defined that handles events of the name.  Events not handled
-  by the event manager will still trigger method calls on the descendent.
-
-      OuterView = Ember.View.extend({
-        template: Ember.Handlebars.compile("outer {{#view InnerView}}inner{{/view}} outer"),
-        eventManager: Ember.Object.create({
-          mouseEnter: function(event, view){
-            // view might be instance of either
-            // OutsideView or InnerView depending on
-            // where on the page the user interaction occured
-          }
-        })
-      })
-
-      InnerView = Ember.View.extend({
-        click: function(event){
-          // will be called if rendered inside
-          // an OuterView because OuterView's
-          // eventManager doesn't handle click events
-        },
-        mouseEnter: function(event){
-          // will never be called if rendered inside 
-          // an OuterView.
-        }
-      })
-
-  ### Handlebars `{{action}}` Helper
-  See `Handlebars.helpers.action`.
-
-  ### Event Names
-  Possible events names for any of the responding approaches described above are:
-
-  Touch events: 'touchStart', 'touchMove', 'touchEnd', 'touchCancel'
-
-  Keyboard events: 'keyDown', 'keyUp', 'keyPress'
-
-  Mouse events: 'mouseDown', 'mouseUp', 'contextMenu', 'click', 'doubleClick', 'mouseMove',
-  'focusIn', 'focusOut', 'mouseEnter', 'mouseLeave'
-
-  Form events: 'submit', 'change', 'focusIn', 'focusOut', 'input'
-
-  HTML5 drag and drop events: 'dragStart', 'drag', 'dragEnter', 'dragLeave', 'drop', 'dragEnd'
-  
-  ## Handlebars `{{view}}` Helper
-  Other `Ember.View` instances can be included as part of a view's template by using the `{{view}}`
-  Handlebars helper. See `Handlebars.helpers.view` for additional information.
-
-  @extends Ember.Object
-  @extends Ember.Evented
-*/
-Ember.View = Ember.Object.extend(Ember.Evented,
-/** @scope Ember.View.prototype */ {
-
-  /** @private */
-  concatenatedProperties: ['classNames', 'classNameBindings', 'attributeBindings'],
-
-  /**
-    @type Boolean
-    @default true
-    @constant
-  */
-  isView: true,
-
-  // ..........................................................
-  // TEMPLATE SUPPORT
-  //
-
-  /**
-    The name of the template to lookup if no template is provided.
-
-    Ember.View will look for a template with this name in this view's
-    `templates` object. By default, this will be a global object
-    shared in `Ember.TEMPLATES`.
-
-    @type String
-    @default null
-  */
-  templateName: null,
-
-  /**
-    The name of the layout to lookup if no layout is provided.
-
-    Ember.View will look for a template with this name in this view's
-    `templates` object. By default, this will be a global object
-    shared in `Ember.TEMPLATES`.
-
-    @type String
-    @default null
-  */
-  layoutName: null,
-
-  /**
-    The hash in which to look for `templateName`.
-
-    @type Ember.Object
-    @default Ember.TEMPLATES
-  */
-  templates: Ember.TEMPLATES,
-
-  /**
-    The template used to render the view. This should be a function that
-    accepts an optional context parameter and returns a string of HTML that
-    will be inserted into the DOM relative to its parent view.
-
-    In general, you should set the `templateName` property instead of setting
-    the template yourself.
-
-    @field
-    @type Function
-  */
-  template: Ember.computed(function(key, value) {
-    if (value !== undefined) { return value; }
-
-    var templateName = get(this, 'templateName'),
-        template = this.templateForName(templateName, 'template');
-
-    return template || get(this, 'defaultTemplate');
-  }).property('templateName').cacheable(),
-
-  /**
-    The controller managing this view. If this property is set, it will be
-    made available for use by the template.
-
-    @type Object
-  */
-  controller: Ember.computed(function(key, value) {
-    var parentView;
-
-    if (arguments.length === 2) {
-      return value;
-    } else {
-      parentView = get(this, 'parentView');
-      return parentView ? get(parentView, 'controller') : null;
-    }
-  }).property().cacheable(),
-
-  /**
-    A view may contain a layout. A layout is a regular template but
-    supersedes the `template` property during rendering. It is the
-    responsibility of the layout template to retrieve the `template`
-    property from the view (or alternatively, call `Handlebars.helpers.yield`,
-    `{{yield}}`) to render it in the correct location.
-
-    This is useful for a view that has a shared wrapper, but which delegates
-    the rendering of the contents of the wrapper to the `template` property
-    on a subclass.
-
-    @field
-    @type Function
-  */
-  layout: Ember.computed(function(key, value) {
-    if (arguments.length === 2) { return value; }
-
-    var layoutName = get(this, 'layoutName'),
-        layout = this.templateForName(layoutName, 'layout');
-
-    return layout || get(this, 'defaultLayout');
-  }).property('layoutName').cacheable(),
-
-  templateForName: function(name, type) {
-    if (!name) { return; }
-
-    var templates = get(this, 'templates'),
-        template = get(templates, name);
-
-    if (!template) {
-     throw new Ember.Error(fmt('%@ - Unable to find %@ "%@".', [this, type, name]));
-    }
-
-    return template;
-  },
-
-  /**
-    The object from which templates should access properties.
-
-    This object will be passed to the template function each time the render
-    method is called, but it is up to the individual function to decide what
-    to do with it.
-
-    By default, this will be the view itself.
-
-    @type Object
-  */
-  context: Ember.computed(function(key, value) {
-    if (arguments.length === 2) {
-      set(this, '_context', value);
-      return value;
-    } else {
-      return get(this, '_context');
-    }
-  }).cacheable(),
-
-  /**
-    @private
-
-    Private copy of the view's template context. This can be set directly
-    by Handlebars without triggering the observer that causes the view
-    to be re-rendered.
-
-    The context of a view is looked up as follows:
-
-    1. Specified controller
-    2. Supplied context (usually by Handlebars)
-    3. `parentView`'s context (for a child of a ContainerView)
-
-    The code in Handlebars that overrides the `_context` property first
-    checks to see whether the view has a specified controller. This is
-    something of a hack and should be revisited.
-  */
-  _context: Ember.computed(function(key, value) {
-    var parentView, controller, context;
-
-    if (arguments.length === 2) {
-      return value;
-    }
-
-    if (VIEW_PRESERVES_CONTEXT) {
-      if (controller = get(this, 'controller')) {
-        return controller;
-      }
-
-      parentView = get(this, '_parentView');
-      if (parentView) {
-        return get(parentView, '_context');
-      }
-    }
-
-    return this;
-  }).cacheable(),
-
-  /**
-    If a value that affects template rendering changes, the view should be
-    re-rendered to reflect the new value.
-
-    @private
-  */
-  _displayPropertyDidChange: Ember.observer(function() {
-    this.rerender();
-  }, 'context', 'controller'),
-
-  /**
-    If the view is currently inserted into the DOM of a parent view, this
-    property will point to the parent of the view.
-
-    @type Ember.View
-    @default null
-  */
-  parentView: Ember.computed(function() {
-    var parent = get(this, '_parentView');
-
-    if (parent && parent.isVirtual) {
-      return get(parent, 'parentView');
-    } else {
-      return parent;
-    }
-  }).property('_parentView').volatile(),
-
-  _parentView: null,
-
-  // return the current view, not including virtual views
-  concreteView: Ember.computed(function() {
-    if (!this.isVirtual) { return this; }
-    else { return get(this, 'parentView'); }
-  }).property('_parentView').volatile(),
-
-  /**
-    If false, the view will appear hidden in DOM.
-
-    @type Boolean
-    @default null
-  */
-  isVisible: true,
-
-  /**
-    Array of child views. You should never edit this array directly.
-    Instead, use appendChild and removeFromParent.
-
-    @private
-    @type Array
-    @default []
-  */
-  childViews: childViewsProperty,
-
-  _childViews: [],
-
-  /**
-    When it's a virtual view, we need to notify the parent that their
-    childViews will change.
-  */
-  _childViewsWillChange: Ember.beforeObserver(function() {
-    if (this.isVirtual) {
-      var parentView = get(this, 'parentView');
-      if (parentView) { Ember.propertyWillChange(parentView, 'childViews'); }
-    }
-  }, 'childViews'),
-
-  /**
-    When it's a virtual view, we need to notify the parent that their
-    childViews did change.
-  */
-  _childViewsDidChange: Ember.observer(function() {
-    if (this.isVirtual) {
-      var parentView = get(this, 'parentView');
-      if (parentView) { Ember.propertyDidChange(parentView, 'childViews'); }
-    }
-  }, 'childViews'),
-
-  /**
-    Return the nearest ancestor that is an instance of the provided
-    class.
-
-    @param {Class} klass Subclass of Ember.View (or Ember.View itself)
-    @returns Ember.View
-  */
-  nearestInstanceOf: function(klass) {
-    var view = get(this, 'parentView');
-
-    while (view) {
-      if(view instanceof klass) { return view; }
-      view = get(view, 'parentView');
-    }
-  },
-
-  /**
-    Return the nearest ancestor that has a given property.
-
-    @param {String} property A property name
-    @returns Ember.View
-  */
-  nearestWithProperty: function(property) {
-    var view = get(this, 'parentView');
-
-    while (view) {
-      if (property in view) { return view; }
-      view = get(view, 'parentView');
-    }
-  },
-
-  /**
-    Return the nearest ancestor whose parent is an instance of
-    `klass`.
-
-    @param {Class} klass Subclass of Ember.View (or Ember.View itself)
-    @returns Ember.View
-  */
-  nearestChildOf: function(klass) {
-    var view = get(this, 'parentView');
-
-    while (view) {
-      if(get(view, 'parentView') instanceof klass) { return view; }
-      view = get(view, 'parentView');
-    }
-  },
-
-  /**
-    Return the nearest ancestor that is an Ember.CollectionView
-
-    @returns Ember.CollectionView
-  */
-  collectionView: Ember.computed(function() {
-    return this.nearestInstanceOf(Ember.CollectionView);
-  }).cacheable(),
-
-  /**
-    Return the nearest ancestor that is a direct child of
-    an Ember.CollectionView
-
-    @returns Ember.View
-  */
-  itemView: Ember.computed(function() {
-    return this.nearestChildOf(Ember.CollectionView);
-  }).cacheable(),
-
-  /**
-    Return the nearest ancestor that has the property
-    `content`.
-
-    @returns Ember.View
-  */
-  contentView: Ember.computed(function() {
-    return this.nearestWithProperty('content');
-  }).cacheable(),
-
-  /**
-    @private
-
-    When the parent view changes, recursively invalidate
-    collectionView, itemView, and contentView
-  */
-  _parentViewDidChange: Ember.observer(function() {
-    if (this.isDestroying) { return; }
-
-    this.invokeRecursively(function(view) {
-      view.propertyDidChange('collectionView');
-      view.propertyDidChange('itemView');
-      view.propertyDidChange('contentView');
-    });
-
-    if (get(this, 'parentView.controller') && !get(this, 'controller')) {
-      this.notifyPropertyChange('controller');
-    }
-  }, '_parentView'),
-
-  _controllerDidChange: Ember.observer(function() {
-    if (this.isDestroying) { return; }
-
-    this.forEachChildView(function(view) {
-      view.propertyDidChange('controller');
-    });
-  }, 'controller'),
-
-  cloneKeywords: function() {
-    var templateData = get(this, 'templateData');
-
-    var keywords = templateData ? Ember.copy(templateData.keywords) : {};
-    set(keywords, 'view', get(this, 'concreteView'));
-    set(keywords, 'controller', get(this, 'controller'));
-
-    return keywords;
-  },
-
-  /**
-    Called on your view when it should push strings of HTML into a
-    Ember.RenderBuffer. Most users will want to override the `template`
-    or `templateName` properties instead of this method.
-
-    By default, Ember.View will look for a function in the `template`
-    property and invoke it with the value of `context`. The value of
-    `context` will be the view's controller unless you override it.
-
-    @param {Ember.RenderBuffer} buffer The render buffer
-  */
-  render: function(buffer) {
-    // If this view has a layout, it is the responsibility of the
-    // the layout to render the view's template. Otherwise, render the template
-    // directly.
-    var template = get(this, 'layout') || get(this, 'template');
-
-    if (template) {
-      var context = get(this, '_context');
-      var keywords = this.cloneKeywords();
-
-      var data = {
-        view: this,
-        buffer: buffer,
-        isRenderData: true,
-        keywords: keywords
-      };
-
-      // Invoke the template with the provided template context, which
-      // is the view by default. A hash of data is also passed that provides
-      // the template with access to the view and render buffer.
-
-      Ember.assert('template must be a function. Did you mean to call Ember.Handlebars.compile("...") or specify templateName instead?', typeof template === 'function');
-      // The template should write directly to the render buffer instead
-      // of returning a string.
-      var output = template(context, { data: data });
-
-      // If the template returned a string instead of writing to the buffer,
-      // push the string onto the buffer.
-      if (output !== undefined) { buffer.push(output); }
-    }
-  },
-
-  invokeForState: function(name) {
-    var stateName = this.state, args, fn;
-
-    // try to find the function for the state in the cache
-    if (fn = invokeForState[stateName][name]) {
-      args = a_slice.call(arguments);
-      args[0] = this;
-
-      return fn.apply(this, args);
-    }
-
-    // otherwise, find and cache the function for this state
-    var parent = this, states = parent.states, state;
-
-    while (states) {
-      state = states[stateName];
-
-      while (state) {
-        fn = state[name];
-
-        if (fn) {
-          invokeForState[stateName][name] = fn;
-
-          args = a_slice.call(arguments, 1);
-          args.unshift(this);
-
-          return fn.apply(this, args);
-        }
-
-        state = state.parentState;
-      }
-
-      states = states.parent;
-    }
-  },
-
-  /**
-    Renders the view again. This will work regardless of whether the
-    view is already in the DOM or not. If the view is in the DOM, the
-    rendering process will be deferred to give bindings a chance
-    to synchronize.
-
-    If children were added during the rendering process using `appendChild`,
-    `rerender` will remove them, because they will be added again
-    if needed by the next `render`.
-
-    In general, if the display of your view changes, you should modify
-    the DOM element directly instead of manually calling `rerender`, which can
-    be slow.
-  */
-  rerender: function() {
-    return this.invokeForState('rerender');
-  },
-
-  clearRenderedChildren: function() {
-    var lengthBefore = this.lengthBeforeRender,
-        lengthAfter  = this.lengthAfterRender;
-
-    // If there were child views created during the last call to render(),
-    // remove them under the assumption that they will be re-created when
-    // we re-render.
-
-    // VIEW-TODO: Unit test this path.
-    var childViews = this._childViews;
-    for (var i=lengthAfter-1; i>=lengthBefore; i--) {
-      if (childViews[i]) { childViews[i].destroy(); }
-    }
-  },
-
-  /**
-    @private
-
-    Iterates over the view's `classNameBindings` array, inserts the value
-    of the specified property into the `classNames` array, then creates an
-    observer to update the view's element if the bound property ever changes
-    in the future.
-  */
-  _applyClassNameBindings: function() {
-    var classBindings = get(this, 'classNameBindings'),
-        classNames = get(this, 'classNames'),
-        elem, newClass, dasherizedClass;
-
-    if (!classBindings) { return; }
-
-    // Loop through all of the configured bindings. These will be either
-    // property names ('isUrgent') or property paths relative to the view
-    // ('content.isUrgent')
-    a_forEach(classBindings, function(binding) {
-
-      // Variable in which the old class value is saved. The observer function
-      // closes over this variable, so it knows which string to remove when
-      // the property changes.
-      var oldClass;
-
-      // Set up an observer on the context. If the property changes, toggle the
-      // class name.
-      var observer = function() {
-        // Get the current value of the property
-        newClass = this._classStringForProperty(binding);
-        elem = this.$();
-
-        // If we had previously added a class to the element, remove it.
-        if (oldClass) {
-          elem.removeClass(oldClass);
-          // Also remove from classNames so that if the view gets rerendered,
-          // the class doesn't get added back to the DOM.
-          classNames.removeObject(oldClass);
-        }
-
-        // If necessary, add a new class. Make sure we keep track of it so
-        // it can be removed in the future.
-        if (newClass) {
-          elem.addClass(newClass);
-          oldClass = newClass;
-        } else {
-          oldClass = null;
-        }
-      };
-
-      // Get the class name for the property at its current value
-      dasherizedClass = this._classStringForProperty(binding);
-
-      if (dasherizedClass) {
-        // Ensure that it gets into the classNames array
-        // so it is displayed when we render.
-        classNames.push(dasherizedClass);
-
-        // Save a reference to the class name so we can remove it
-        // if the observer fires. Remember that this variable has
-        // been closed over by the observer.
-        oldClass = dasherizedClass;
-      }
-
-      // Extract just the property name from bindings like 'foo:bar'
-      var parsedPath = Ember.View._parsePropertyPath(binding);
-      addObserver(this, parsedPath.path, observer);
-    }, this);
-  },
-
-  /**
-    Iterates through the view's attribute bindings, sets up observers for each,
-    then applies the current value of the attributes to the passed render buffer.
-
-    @param {Ember.RenderBuffer} buffer
-  */
-  _applyAttributeBindings: function(buffer) {
-    var attributeBindings = get(this, 'attributeBindings'),
-        attributeValue, elem, type;
-
-    if (!attributeBindings) { return; }
-
-    a_forEach(attributeBindings, function(binding) {
-      var split = binding.split(':'),
-          property = split[0],
-          attributeName = split[1] || property;
-
-      // Create an observer to add/remove/change the attribute if the
-      // JavaScript property changes.
-      var observer = function() {
-        elem = this.$();
-        if (!elem) { return; }
-
-        attributeValue = get(this, property);
-
-        Ember.View.applyAttributeBindings(elem, attributeName, attributeValue);
-      };
-
-      addObserver(this, property, observer);
-
-      // Determine the current value and add it to the render buffer
-      // if necessary.
-      attributeValue = get(this, property);
-      Ember.View.applyAttributeBindings(buffer, attributeName, attributeValue);
-    }, this);
-  },
-
-  /**
-    @private
-
-    Given a property name, returns a dasherized version of that
-    property name if the property evaluates to a non-falsy value.
-
-    For example, if the view has property `isUrgent` that evaluates to true,
-    passing `isUrgent` to this method will return `"is-urgent"`.
-  */
-  _classStringForProperty: function(property) {
-    var parsedPath = Ember.View._parsePropertyPath(property);
-    var path = parsedPath.path;
-
-    var val = get(this, path);
-    if (val === undefined && Ember.isGlobalPath(path)) {
-      val = get(window, path);
-    }
-
-    return Ember.View._classStringForValue(path, val, parsedPath.className, parsedPath.falsyClassName);
-  },
-
-  // ..........................................................
-  // ELEMENT SUPPORT
-  //
-
-  /**
-    Returns the current DOM element for the view.
-
-    @field
-    @type DOMElement
-  */
-  element: Ember.computed(function(key, value) {
-    if (value !== undefined) {
-      return this.invokeForState('setElement', value);
-    } else {
-      return this.invokeForState('getElement');
-    }
-  }).property('_parentView').cacheable(),
-
-  /**
-    Returns a jQuery object for this view's element. If you pass in a selector
-    string, this method will return a jQuery object, using the current element
-    as its buffer.
-
-    For example, calling `view.$('li')` will return a jQuery object containing
-    all of the `li` elements inside the DOM element of this view.
-
-    @param {String} [selector] a jQuery-compatible selector string
-    @returns {Ember.CoreQuery} the CoreQuery object for the DOM node
-  */
-  $: function(sel) {
-    return this.invokeForState('$', sel);
-  },
-
-  /** @private */
-  mutateChildViews: function(callback) {
-    var childViews = this._childViews,
-        idx = childViews.length,
-        view;
-
-    while(--idx >= 0) {
-      view = childViews[idx];
-      callback.call(this, view, idx);
-    }
-
-    return this;
-  },
-
-  /** @private */
-  forEachChildView: function(callback) {
-    var childViews = this._childViews;
-
-    if (!childViews) { return this; }
-
-    var len = childViews.length,
-        view, idx;
-
-    for(idx = 0; idx < len; idx++) {
-      view = childViews[idx];
-      callback.call(this, view);
-    }
-
-    return this;
-  },
-
-  /**
-    Appends the view's element to the specified parent element.
-
-    If the view does not have an HTML representation yet, `createElement()`
-    will be called automatically.
-
-    Note that this method just schedules the view to be appended; the DOM
-    element will not be appended to the given element until all bindings have
-    finished synchronizing.
-
-    This is not typically a function that you will need to call directly
-    when building your application. You might consider using Ember.ContainerView
-    instead. If you do need to use appendTo, be sure that the target element you
-    are providing is associated with an Ember.Application and does not have an
-    ancestor element that is associated with an Ember view.
-
-    @param {String|DOMElement|jQuery} A selector, element, HTML string, or jQuery object
-    @returns {Ember.View} receiver
-  */
-  appendTo: function(target) {
-    // Schedule the DOM element to be created and appended to the given
-    // element after bindings have synchronized.
-    this._insertElementLater(function() {
-      Ember.assert("You cannot append to an existing Ember.View. Consider using Ember.ContainerView instead.", !Ember.$(target).is('.ember-view') && !Ember.$(target).parents().is('.ember-view'));
-      this.$().appendTo(target);
-    });
-
-    return this;
-  },
-
-  /**
-    Replaces the content of the specified parent element with this view's element.
-    If the view does not have an HTML representation yet, `createElement()`
-    will be called automatically.
-
-    Note that this method just schedules the view to be appended; the DOM
-    element will not be appended to the given element until all bindings have
-    finished synchronizing
-
-    @param {String|DOMElement|jQuery} A selector, element, HTML string, or jQuery object
-    @returns {Ember.View} received
-  */
-  replaceIn: function(target) {
-    Ember.assert("You cannot replace an existing Ember.View. Consider using Ember.ContainerView instead.", !Ember.$(target).is('.ember-view') && !Ember.$(target).parents().is('.ember-view'));
-
-    this._insertElementLater(function() {
-      Ember.$(target).empty();
-      this.$().appendTo(target);
-    });
-
-    return this;
-  },
-
-  /**
-    @private
-
-    Schedules a DOM operation to occur during the next render phase. This
-    ensures that all bindings have finished synchronizing before the view is
-    rendered.
-
-    To use, pass a function that performs a DOM operation..
-
-    Before your function is called, this view and all child views will receive
-    the `willInsertElement` event. After your function is invoked, this view
-    and all of its child views will receive the `didInsertElement` event.
-
-        view._insertElementLater(function() {
-          this.createElement();
-          this.$().appendTo('body');
-        });
-
-    @param {Function} fn the function that inserts the element into the DOM
-  */
-  _insertElementLater: function(fn) {
-    this._lastInsert = Ember.guidFor(fn);
-    Ember.run.schedule('render', this, this.invokeForState, 'insertElement', fn);
-  },
-
-  /**
-    Appends the view's element to the document body. If the view does
-    not have an HTML representation yet, `createElement()` will be called
-    automatically.
-
-    Note that this method just schedules the view to be appended; the DOM
-    element will not be appended to the document body until all bindings have
-    finished synchronizing.
-
-    @returns {Ember.View} receiver
-  */
-  append: function() {
-    return this.appendTo(document.body);
-  },
-
-  /**
-    Removes the view's element from the element to which it is attached.
-
-    @returns {Ember.View} receiver
-  */
-  remove: function() {
-    // What we should really do here is wait until the end of the run loop
-    // to determine if the element has been re-appended to a different
-    // element.
-    // In the interim, we will just re-render if that happens. It is more
-    // important than elements get garbage collected.
-    this.destroyElement();
-    this.invokeRecursively(function(view) {
-      view.clearRenderedChildren();
-    });
-  },
-
-  /**
-    The ID to use when trying to locate the element in the DOM. If you do not
-    set the elementId explicitly, then the view's GUID will be used instead.
-    This ID must be set at the time the view is created.
-
-    @type String
-    @readOnly
-  */
-  elementId: Ember.computed(function(key, value) {
-    return value !== undefined ? value : Ember.guidFor(this);
-  }).cacheable(),
-
-  /**
-    @private
-
-    TODO: Perhaps this should be removed from the production build somehow.
-  */
-  _elementIdDidChange: Ember.beforeObserver(function() {
-    throw "Changing a view's elementId after creation is not allowed.";
-  }, 'elementId'),
-
-  /**
-    Attempts to discover the element in the parent element. The default
-    implementation looks for an element with an ID of elementId (or the view's
-    guid if elementId is null). You can override this method to provide your
-    own form of lookup. For example, if you want to discover your element
-    using a CSS class name instead of an ID.
-
-    @param {DOMElement} parentElement The parent's DOM element
-    @returns {DOMElement} The discovered element
-  */
-  findElementInParentElement: function(parentElem) {
-    var id = "#" + get(this, 'elementId');
-    return Ember.$(id)[0] || Ember.$(id, parentElem)[0];
-  },
-
-  /**
-    Creates a new renderBuffer with the passed tagName. You can override this
-    method to provide further customization to the buffer if needed. Normally
-    you will not need to call or override this method.
-
-    @returns {Ember.RenderBuffer}
-  */
-  renderBuffer: function(tagName) {
-    tagName = tagName || get(this, 'tagName');
-
-    // Explicitly check for null or undefined, as tagName
-    // may be an empty string, which would evaluate to false.
-    if (tagName === null || tagName === undefined) {
-      tagName = 'div';
-    }
-
-    return Ember.RenderBuffer(tagName);
-  },
-
-  /**
-    Creates a DOM representation of the view and all of its
-    child views by recursively calling the `render()` method.
-
-    After the element has been created, `didInsertElement` will
-    be called on this view and all of its child views.
-
-    @returns {Ember.View} receiver
-  */
-  createElement: function() {
-    if (get(this, 'element')) { return this; }
-
-    var buffer = this.renderToBuffer();
-    set(this, 'element', buffer.element());
-
-    return this;
-  },
-
-  /**
-    Called when a view is going to insert an element into the DOM.
-  */
-  willInsertElement: Ember.K,
-
-  /**
-    Called when the element of the view has been inserted into the DOM.
-    Override this function to do any set up that requires an element in the
-    document body.
-  */
-  didInsertElement: Ember.K,
-
-  /**
-    Called when the view is about to rerender, but before anything has
-    been torn down. This is a good opportunity to tear down any manual
-    observers you have installed based on the DOM state
-  */
-  willRerender: Ember.K,
-
-  /**
-    Run this callback on the current view and recursively on child views.
-
-    @private
-  */
-  invokeRecursively: function(fn) {
-    fn.call(this, this);
-
-    this.forEachChildView(function(view) {
-      view.invokeRecursively(fn);
-    });
-  },
-
-  /**
-    Invalidates the cache for a property on all child views.
-  */
-  invalidateRecursively: function(key) {
-    this.forEachChildView(function(view) {
-      view.propertyDidChange(key);
-    });
-  },
-
-  /**
-    @private
-
-    Invokes the receiver's willInsertElement() method if it exists and then
-    invokes the same on all child views.
-
-    NOTE: In some cases this was called when the element existed. This no longer
-    works so we let people know. We can remove this warning code later.
-  */
-  _notifyWillInsertElement: function() {
-    this.invokeRecursively(function(view) {
-      view.trigger('willInsertElement');
-    });
-  },
-
-  /**
-    @private
-
-    Invokes the receiver's didInsertElement() method if it exists and then
-    invokes the same on all child views.
-  */
-  _notifyDidInsertElement: function() {
-    this.invokeRecursively(function(view) {
-      view.trigger('didInsertElement');
-    });
-  },
-
-  /**
-    @private
-
-    Invokes the receiver's willRerender() method if it exists and then
-    invokes the same on all child views.
-  */
-  _notifyWillRerender: function() {
-    this.invokeRecursively(function(view) {
-      view.trigger('willRerender');
-    });
-  },
-
-  /**
-    Destroys any existing element along with the element for any child views
-    as well. If the view does not currently have a element, then this method
-    will do nothing.
-
-    If you implement willDestroyElement() on your view, then this method will
-    be invoked on your view before your element is destroyed to give you a
-    chance to clean up any event handlers, etc.
-
-    If you write a willDestroyElement() handler, you can assume that your
-    didInsertElement() handler was called earlier for the same element.
-
-    Normally you will not call or override this method yourself, but you may
-    want to implement the above callbacks when it is run.
-
-    @returns {Ember.View} receiver
-  */
-  destroyElement: function() {
-    return this.invokeForState('destroyElement');
-  },
-
-  /**
-    Called when the element of the view is going to be destroyed. Override
-    this function to do any teardown that requires an element, like removing
-    event listeners.
-  */
-  willDestroyElement: function() {},
-
-  /**
-    @private
-
-    Invokes the `willDestroyElement` callback on the view and child views.
-  */
-  _notifyWillDestroyElement: function() {
-    this.invokeRecursively(function(view) {
-      view.trigger('willDestroyElement');
-    });
-  },
-
-  /** @private (nodoc) */
-  _elementWillChange: Ember.beforeObserver(function() {
-    this.forEachChildView(function(view) {
-      Ember.propertyWillChange(view, 'element');
-    });
-  }, 'element'),
-
-  /**
-    @private
-
-    If this view's element changes, we need to invalidate the caches of our
-    child views so that we do not retain references to DOM elements that are
-    no longer needed.
-
-    @observes element
-  */
-  _elementDidChange: Ember.observer(function() {
-    this.forEachChildView(function(view) {
-      Ember.propertyDidChange(view, 'element');
-    });
-  }, 'element'),
-
-  /**
-    Called when the parentView property has changed.
-
-    @function
-  */
-  parentViewDidChange: Ember.K,
-
-  /**
-    @private
-
-    Invoked by the view system when this view needs to produce an HTML
-    representation. This method will create a new render buffer, if needed,
-    then apply any default attributes, such as class names and visibility.
-    Finally, the `render()` method is invoked, which is responsible for
-    doing the bulk of the rendering.
-
-    You should not need to override this method; instead, implement the
-    `template` property, or if you need more control, override the `render`
-    method.
-
-    @param {Ember.RenderBuffer} buffer the render buffer. If no buffer is
-      passed, a default buffer, using the current view's `tagName`, will
-      be used.
-  */
-  renderToBuffer: function(parentBuffer, bufferOperation) {
-    var buffer;
-
-    Ember.run.sync();
-
-    // Determine where in the parent buffer to start the new buffer.
-    // By default, a new buffer will be appended to the parent buffer.
-    // The buffer operation may be changed if the child views array is
-    // mutated by Ember.ContainerView.
-    bufferOperation = bufferOperation || 'begin';
-
-    // If this is the top-most view, start a new buffer. Otherwise,
-    // create a new buffer relative to the original using the
-    // provided buffer operation (for example, `insertAfter` will
-    // insert a new buffer after the "parent buffer").
-    if (parentBuffer) {
-      var tagName = get(this, 'tagName');
-      if (tagName === null || tagName === undefined) {
-        tagName = 'div';
-      }
-
-      buffer = parentBuffer[bufferOperation](tagName);
-    } else {
-      buffer = this.renderBuffer();
-    }
-
-    this.buffer = buffer;
-    this.transitionTo('inBuffer', false);
-
-    this.lengthBeforeRender = this._childViews.length;
-
-    this.beforeRender(buffer);
-    this.render(buffer);
-    this.afterRender(buffer);
-
-    this.lengthAfterRender = this._childViews.length;
-
-    return buffer;
-  },
-
-  beforeRender: function(buffer) {
-    this.applyAttributesToBuffer(buffer);
-  },
-
-  afterRender: Ember.K,
-
-  /**
-    @private
-  */
-  applyAttributesToBuffer: function(buffer) {
-    // Creates observers for all registered class name and attribute bindings,
-    // then adds them to the element.
-    this._applyClassNameBindings();
-
-    // Pass the render buffer so the method can apply attributes directly.
-    // This isn't needed for class name bindings because they use the
-    // existing classNames infrastructure.
-    this._applyAttributeBindings(buffer);
-
-
-    a_forEach(get(this, 'classNames'), function(name){ buffer.addClass(name); });
-    buffer.id(get(this, 'elementId'));
-
-    var role = get(this, 'ariaRole');
-    if (role) {
-      buffer.attr('role', role);
-    }
-
-    if (get(this, 'isVisible') === false) {
-      buffer.style('display', 'none');
-    }
-  },
-
-  // ..........................................................
-  // STANDARD RENDER PROPERTIES
-  //
-
-  /**
-    Tag name for the view's outer element. The tag name is only used when
-    an element is first created. If you change the tagName for an element, you
-    must destroy and recreate the view element.
-
-    By default, the render buffer will use a `<div>` tag for views.
-
-    @type String
-    @default null
-  */
-
-  // We leave this null by default so we can tell the difference between
-  // the default case and a user-specified tag.
-  tagName: null,
-
-  /**
-    The WAI-ARIA role of the control represented by this view. For example, a
-    button may have a role of type 'button', or a pane may have a role of
-    type 'alertdialog'. This property is used by assistive software to help
-    visually challenged users navigate rich web applications.
-
-    The full list of valid WAI-ARIA roles is available at:
-    http://www.w3.org/TR/wai-aria/roles#roles_categorization
-
-    @type String
-    @default null
-  */
-  ariaRole: null,
-
-  /**
-    Standard CSS class names to apply to the view's outer element. This
-    property automatically inherits any class names defined by the view's
-    superclasses as well.
-
-    @type Array
-    @default ['ember-view']
-  */
-  classNames: ['ember-view'],
-
-  /**
-    A list of properties of the view to apply as class names. If the property
-    is a string value, the value of that string will be applied as a class
-    name.
-
-        // Applies the 'high' class to the view element
-        Ember.View.create({
-          classNameBindings: ['priority']
-          priority: 'high'
-        });
-
-    If the value of the property is a Boolean, the name of that property is
-    added as a dasherized class name.
-
-        // Applies the 'is-urgent' class to the view element
-        Ember.View.create({
-          classNameBindings: ['isUrgent']
-          isUrgent: true
-        });
-
-    If you would prefer to use a custom value instead of the dasherized
-    property name, you can pass a binding like this:
-
-        // Applies the 'urgent' class to the view element
-        Ember.View.create({
-          classNameBindings: ['isUrgent:urgent']
-          isUrgent: true
-        });
-
-    This list of properties is inherited from the view's superclasses as well.
-
-    @type Array
-    @default []
-  */
-  classNameBindings: [],
-
-  /**
-    A list of properties of the view to apply as attributes. If the property is
-    a string value, the value of that string will be applied as the attribute.
-
-        // Applies the type attribute to the element
-        // with the value "button", like <div type="button">
-        Ember.View.create({
-          attributeBindings: ['type'],
-          type: 'button'
-        });
-
-    If the value of the property is a Boolean, the name of that property is
-    added as an attribute.
-
-        // Renders something like <div enabled="enabled">
-        Ember.View.create({
-          attributeBindings: ['enabled'],
-          enabled: true
-        });
-  */
-  attributeBindings: [],
-
-  state: 'preRender',
-
-  // .......................................................
-  // CORE DISPLAY METHODS
-  //
-
-  /**
-    @private
-
-    Setup a view, but do not finish waking it up.
-    - configure childViews
-    - register the view with the global views hash, which is used for event
-      dispatch
-  */
-  init: function() {
-    this._super();
-
-    // Register the view for event handling. This hash is used by
-    // Ember.EventDispatcher to dispatch incoming events.
-    if (!this.isVirtual) Ember.View.views[get(this, 'elementId')] = this;
-
-    // setup child views. be sure to clone the child views array first
-    this._childViews = this._childViews.slice();
-
-    Ember.assert("Only arrays are allowed for 'classNameBindings'", Ember.typeOf(this.classNameBindings) === 'array');
-    this.classNameBindings = Ember.A(this.classNameBindings.slice());
-
-    Ember.assert("Only arrays are allowed for 'classNames'", Ember.typeOf(this.classNames) === 'array');
-    this.classNames = Ember.A(this.classNames.slice());
-
-    var viewController = get(this, 'viewController');
-    if (viewController) {
-      viewController = get(viewController);
-      if (viewController) {
-        set(viewController, 'view', this);
-      }
-    }
-  },
-
-  appendChild: function(view, options) {
-    return this.invokeForState('appendChild', view, options);
-  },
-
-  /**
-    Removes the child view from the parent view.
-
-    @param {Ember.View} view
-    @returns {Ember.View} receiver
-  */
-  removeChild: function(view) {
-    // If we're destroying, the entire subtree will be
-    // freed, and the DOM will be handled separately,
-    // so no need to mess with childViews.
-    if (this.isDestroying) { return; }
-
-    // update parent node
-    set(view, '_parentView', null);
-
-    // remove view from childViews array.
-    var childViews = this._childViews;
-
-    Ember.EnumerableUtils.removeObject(childViews, view);
-
-    this.propertyDidChange('childViews'); // HUH?! what happened to will change?
-
-    return this;
-  },
-
-  /**
-    Removes all children from the parentView.
-
-    @returns {Ember.View} receiver
-  */
-  removeAllChildren: function() {
-    return this.mutateChildViews(function(view) {
-      this.removeChild(view);
-    });
-  },
-
-  destroyAllChildren: function() {
-    return this.mutateChildViews(function(view) {
-      view.destroy();
-    });
-  },
-
-  /**
-    Removes the view from its parentView, if one is found. Otherwise
-    does nothing.
-
-    @returns {Ember.View} receiver
-  */
-  removeFromParent: function() {
-    var parent = get(this, '_parentView');
-
-    // Remove DOM element from parent
-    this.remove();
-
-    if (parent) { parent.removeChild(this); }
-    return this;
-  },
-
-  /**
-    You must call `destroy` on a view to destroy the view (and all of its
-    child views). This will remove the view from any parent node, then make
-    sure that the DOM element managed by the view can be released by the
-    memory manager.
-  */
-  willDestroy: function() {
-    // calling this._super() will nuke computed properties and observers,
-    // so collect any information we need before calling super.
-    var childViews = this._childViews,
-        parent     = get(this, '_parentView'),
-        childLen;
-
-    // destroy the element -- this will avoid each child view destroying
-    // the element over and over again...
-    if (!this.removedFromDOM) { this.destroyElement(); }
-
-    // remove from non-virtual parent view if viewName was specified
-    if (this.viewName) {
-      var nonVirtualParentView = get(this, 'parentView');
-      if (nonVirtualParentView) {
-        set(nonVirtualParentView, this.viewName, null);
-      }
-    }
-
-    // remove from parent if found. Don't call removeFromParent,
-    // as removeFromParent will try to remove the element from
-    // the DOM again.
-    if (parent) { parent.removeChild(this); }
-
-    this.state = 'destroyed';
-
-    childLen = childViews.length;
-    for (var i=childLen-1; i>=0; i--) {
-      childViews[i].removedFromDOM = true;
-      childViews[i].destroy();
-    }
-
-    // next remove view from global hash
-    if (!this.isVirtual) delete Ember.View.views[get(this, 'elementId')];
-  },
-
-  /**
-    Instantiates a view to be added to the childViews array during view
-    initialization. You generally will not call this method directly unless
-    you are overriding createChildViews(). Note that this method will
-    automatically configure the correct settings on the new view instance to
-    act as a child of the parent.
-
-    @param {Class} viewClass
-    @param {Hash} [attrs] Attributes to add
-    @returns {Ember.View} new instance
-    @test in createChildViews
-  */
-  createChildView: function(view, attrs) {
-    if (Ember.View.detect(view)) {
-      attrs = attrs || {};
-      attrs._parentView = this;
-      attrs.templateData = attrs.templateData || get(this, 'templateData');
-
-      view = view.create(attrs);
-
-      // don't set the property on a virtual view, as they are invisible to
-      // consumers of the view API
-      if (view.viewName) { set(get(this, 'concreteView'), view.viewName, view); }
-    } else {
-      Ember.assert('You must pass instance or subclass of View', view instanceof Ember.View);
-      Ember.assert("You can only pass attributes when a class is provided", !attrs);
-
-      if (!get(view, 'templateData')) {
-        set(view, 'templateData', get(this, 'templateData'));
-      }
-
-      set(view, '_parentView', this);
-    }
-
-    return view;
-  },
-
-  becameVisible: Ember.K,
-  becameHidden: Ember.K,
-
-  /**
-    @private
-
-    When the view's `isVisible` property changes, toggle the visibility
-    element of the actual DOM element.
-  */
-  _isVisibleDidChange: Ember.observer(function() {
-    var $el = this.$();
-    if (!$el) { return; }
-
-    var isVisible = get(this, 'isVisible');
-
-    $el.toggle(isVisible);
-
-    if (this._isAncestorHidden()) { return; }
-
-    if (isVisible) {
-      this._notifyBecameVisible();
-    } else {
-      this._notifyBecameHidden();
-    }
-  }, 'isVisible'),
-
-  _notifyBecameVisible: function() {
-    this.trigger('becameVisible');
-
-    this.forEachChildView(function(view) {
-      var isVisible = get(view, 'isVisible');
-
-      if (isVisible || isVisible === null) {
-        view._notifyBecameVisible();
-      }
-    });
-  },
-
-  _notifyBecameHidden: function() {
-    this.trigger('becameHidden');
-    this.forEachChildView(function(view) {
-      var isVisible = get(view, 'isVisible');
-
-      if (isVisible || isVisible === null) {
-        view._notifyBecameHidden();
-      }
-    });
-  },
-
-  _isAncestorHidden: function() {
-    var parent = get(this, 'parentView');
-
-    while (parent) {
-      if (get(parent, 'isVisible') === false) { return true; }
-
-      parent = get(parent, 'parentView');
-    }
-
-    return false;
-  },
-
-  clearBuffer: function() {
-    this.invokeRecursively(function(view) {
-      this.buffer = null;
-    });
-  },
-
-  transitionTo: function(state, children) {
-    this.state = state;
-
-    if (children !== false) {
-      this.forEachChildView(function(view) {
-        view.transitionTo(state);
-      });
-    }
-  },
-
-  /**
-    @private
-
-    Override the default event firing from Ember.Evented to
-    also call methods with the given name.
-  */
-  trigger: function(name) {
-    this._super.apply(this, arguments);
-    var method = this[name];
-    if (method) {
-      var args = [], i, l;
-      for (i = 1, l = arguments.length; i < l; i++) {
-        args.push(arguments[i]);
-      }
-      return method.apply(this, args);
-    }
-  },
-
-  has: function(name) {
-    return Ember.typeOf(this[name]) === 'function' || this._super(name);
-  },
-
-  // .......................................................
-  // EVENT HANDLING
-  //
-
-  /**
-    @private
-
-    Handle events from `Ember.EventDispatcher`
-  */
-  handleEvent: function(eventName, evt) {
-    return this.invokeForState('handleEvent', eventName, evt);
-  }
-
-});
-
-/**
-  Describe how the specified actions should behave in the various
-  states that a view can exist in. Possible states:
-
-  * preRender: when a view is first instantiated, and after its
-    element was destroyed, it is in the preRender state
-  * inBuffer: once a view has been rendered, but before it has
-    been inserted into the DOM, it is in the inBuffer state
-  * inDOM: once a view has been inserted into the DOM it is in
-    the inDOM state. A view spends the vast majority of its
-    existence in this state.
-  * destroyed: once a view has been destroyed (using the destroy
-    method), it is in this state. No further actions can be invoked
-    on a destroyed view.
-*/
-
-  // in the destroyed state, everything is illegal
-
-  // before rendering has begun, all legal manipulations are noops.
-
-  // inside the buffer, legal manipulations are done on the buffer
-
-  // once the view has been inserted into the DOM, legal manipulations
-  // are done on the DOM element.
-
-/** @private */
-var DOMManager = {
-  prepend: function(view, childView) {
-    childView._insertElementLater(function() {
-      var element = view.$();
-      element.prepend(childView.$());
-    });
-  },
-
-  after: function(view, nextView) {
-    nextView._insertElementLater(function() {
-      var element = view.$();
-      element.after(nextView.$());
-    });
-  },
-
-  replace: function(view) {
-    var element = get(view, 'element');
-
-    set(view, 'element', null);
-
-    view._insertElementLater(function() {
-      Ember.$(element).replaceWith(get(view, 'element'));
-    });
-  },
-
-  remove: function(view) {
-    var elem = get(view, 'element');
-
-    set(view, 'element', null);
-    view._lastInsert = null;
-
-    Ember.$(elem).remove();
-  },
-
-  empty: function(view) {
-    view.$().empty();
-  }
-};
-
-Ember.View.reopen({
-  states: Ember.View.states,
-  domManager: DOMManager
-});
-
-Ember.View.reopenClass({
-
-  /**
-    @private
-
-    Parse a path and return an object which holds the parsed properties.
-
-    For example a path like "content.isEnabled:enabled:disabled" wil return the
-    following object:
-
-        {
-          path: "content.isEnabled",
-          className: "enabled",
-          falsyClassName: "disabled",
-          classNames: ":enabled:disabled"
-        }
-
-  */
-  _parsePropertyPath: function(path) {
-    var split = path.split(/:/),
-        propertyPath = split[0],
-        classNames = "",
-        className,
-        falsyClassName;
-
-    // check if the property is defined as prop:class or prop:trueClass:falseClass
-    if (split.length > 1) {
-      className = split[1];
-      if (split.length === 3) { falsyClassName = split[2]; }
-
-      classNames = ':' + className;
-      if (falsyClassName) { classNames += ":" + falsyClassName; }
-    }
-
-    return {
-      path: propertyPath,
-      classNames: classNames,
-      className: (className === '') ? undefined : className,
-      falsyClassName: falsyClassName
-    };
-  },
-
-  /**
-    @private
-
-    Get the class name for a given value, based on the path, optional className
-    and optional falsyClassName.
-
-    - if the value is truthy and a className is defined, the className is returned
-    - if the value is true, the dasherized last part of the supplied path is returned
-    - if the value is false and a falsyClassName is supplied, the falsyClassName is returned
-    - if the value is truthy, the value is returned
-    - if none of the above rules apply, null is returned
-
-  */
-  _classStringForValue: function(path, val, className, falsyClassName) {
-    // If the value is truthy and we're using the colon syntax,
-    // we should return the className directly
-    if (!!val && className) {
-      return className;
-
-    // If value is a Boolean and true, return the dasherized property
-    // name.
-    } else if (val === true) {
-      // catch syntax like isEnabled::not-enabled
-      if (val === true && !className && falsyClassName) { return null; }
-
-      // Normalize property path to be suitable for use
-      // as a class name. For exaple, content.foo.barBaz
-      // becomes bar-baz.
-      var parts = path.split('.');
-      return Ember.String.dasherize(parts[parts.length-1]);
-
-    // If the value is false and a falsyClassName is specified, return it
-    } else if (val === false && falsyClassName) {
-      return falsyClassName;
-
-    // If the value is not false, undefined, or null, return the current
-    // value of the property.
-    } else if (val !== false && val !== undefined && val !== null) {
-      return val;
-
-    // Nothing to display. Return null so that the old class is removed
-    // but no new class is added.
-    } else {
-      return null;
-    }
-  }
-});
-
-// Create a global view hash.
-Ember.View.views = {};
-
-// If someone overrides the child views computed property when
-// defining their class, we want to be able to process the user's
-// supplied childViews and then restore the original computed property
-// at view initialization time. This happens in Ember.ContainerView's init
-// method.
-Ember.View.childViewsProperty = childViewsProperty;
-
-Ember.View.applyAttributeBindings = function(elem, name, value) {
-  var type = Ember.typeOf(value);
-  var currentValue = elem.attr(name);
-
-  // if this changes, also change the logic in ember-handlebars/lib/helpers/binding.js
-  if ((type === 'string' || (type === 'number' && !isNaN(value))) && value !== currentValue) {
-    elem.attr(name, value);
-  } else if (value && type === 'boolean') {
-    elem.attr(name, name);
-  } else if (!value) {
-    elem.removeAttr(name);
-  }
-};
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set;
-
-Ember.View.states = {
-  _default: {
-    // appendChild is only legal while rendering the buffer.
-    appendChild: function() {
-      throw "You can't use appendChild outside of the rendering process";
-    },
-
-    $: function() {
-      return undefined;
-    },
-
-    getElement: function() {
-      return null;
-    },
-
-    // Handle events from `Ember.EventDispatcher`
-    handleEvent: function() {
-      return true; // continue event propagation
-    },
-
-    destroyElement: function(view) {
-      set(view, 'element', null);
-      view._lastInsert = null;
-      return view;
-    }
-  }
-};
-
-Ember.View.reopen({
-  states: Ember.View.states
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-Ember.View.states.preRender = {
-  parentState: Ember.View.states._default,
-
-  // a view leaves the preRender state once its element has been
-  // created (createElement).
-  insertElement: function(view, fn) {
-    if (view._lastInsert !== Ember.guidFor(fn)){
-      return;
-    }
-    view.createElement();
-    view._notifyWillInsertElement();
-    // after createElement, the view will be in the hasElement state.
-    fn.call(view);
-    view.transitionTo('inDOM');
-    view._notifyDidInsertElement();
-  },
-
-  empty: Ember.K,
-
-  setElement: function(view, value) {
-    if (value !== null) {
-      view.transitionTo('hasElement');
-    }
-    return value;
-  }
-};
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set, meta = Ember.meta;
-
-Ember.View.states.inBuffer = {
-  parentState: Ember.View.states._default,
-
-  $: function(view, sel) {
-    // if we don't have an element yet, someone calling this.$() is
-    // trying to update an element that isn't in the DOM. Instead,
-    // rerender the view to allow the render method to reflect the
-    // changes.
-    view.rerender();
-    return Ember.$();
-  },
-
-  // when a view is rendered in a buffer, rerendering it simply
-  // replaces the existing buffer with a new one
-  rerender: function(view) {
-    Ember.deprecate("Something you did caused a view to re-render after it rendered but before it was inserted into the DOM. Because this is avoidable and the cause of significant performance issues in applications, this behavior is deprecated. If you want to use the debugger to find out what caused this, you can set ENV.RAISE_ON_DEPRECATION to true.");
-
-    view._notifyWillRerender();
-
-    view.clearRenderedChildren();
-    view.renderToBuffer(view.buffer, 'replaceWith');
-  },
-
-  // when a view is rendered in a buffer, appending a child
-  // view will render that view and append the resulting
-  // buffer into its buffer.
-  appendChild: function(view, childView, options) {
-    var buffer = view.buffer;
-
-    childView = this.createChildView(childView, options);
-    view._childViews.push(childView);
-
-    childView.renderToBuffer(buffer);
-
-    view.propertyDidChange('childViews');
-
-    return childView;
-  },
-
-  // when a view is rendered in a buffer, destroying the
-  // element will simply destroy the buffer and put the
-  // state back into the preRender state.
-  destroyElement: function(view) {
-    view.clearBuffer();
-    view._notifyWillDestroyElement();
-    view.transitionTo('preRender');
-
-    return view;
-  },
-
-  empty: function() {
-    Ember.assert("Emptying a view in the inBuffer state is not allowed and should not happen under normal circumstances. Most likely there is a bug in your application. This may be due to excessive property change notifications.");
-  },
-
-  // It should be impossible for a rendered view to be scheduled for
-  // insertion.
-  insertElement: function() {
-    throw "You can't insert an element that has already been rendered";
-  },
-
-  setElement: function(view, value) {
-    if (value === null) {
-      view.transitionTo('preRender');
-    } else {
-      view.clearBuffer();
-      view.transitionTo('hasElement');
-    }
-
-    return value;
-  }
-};
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set, meta = Ember.meta;
-
-Ember.View.states.hasElement = {
-  parentState: Ember.View.states._default,
-
-  $: function(view, sel) {
-    var elem = get(view, 'element');
-    return sel ? Ember.$(sel, elem) : Ember.$(elem);
-  },
-
-  getElement: function(view) {
-    var parent = get(view, 'parentView');
-    if (parent) { parent = get(parent, 'element'); }
-    if (parent) { return view.findElementInParentElement(parent); }
-    return Ember.$("#" + get(view, 'elementId'))[0];
-  },
-
-  setElement: function(view, value) {
-    if (value === null) {
-      view.transitionTo('preRender');
-    } else {
-      throw "You cannot set an element to a non-null value when the element is already in the DOM.";
-    }
-
-    return value;
-  },
-
-  // once the view has been inserted into the DOM, rerendering is
-  // deferred to allow bindings to synchronize.
-  rerender: function(view) {
-    view._notifyWillRerender();
-
-    view.clearRenderedChildren();
-
-    view.domManager.replace(view);
-    return view;
-  },
-
-  // once the view is already in the DOM, destroying it removes it
-  // from the DOM, nukes its element, and puts it back into the
-  // preRender state if inDOM.
-
-  destroyElement: function(view) {
-    view._notifyWillDestroyElement();
-    view.domManager.remove(view);
-    return view;
-  },
-
-  empty: function(view) {
-    var _childViews = view._childViews, len, idx;
-    if (_childViews) {
-      len = _childViews.length;
-      for (idx = 0; idx < len; idx++) {
-        _childViews[idx]._notifyWillDestroyElement();
-      }
-    }
-    view.domManager.empty(view);
-  },
-
-  // Handle events from `Ember.EventDispatcher`
-  handleEvent: function(view, eventName, evt) {
-    if (view.has(eventName)) {
-      // Handler should be able to re-dispatch events, so we don't
-      // preventDefault or stopPropagation.
-      return view.trigger(eventName, evt);
-    } else {
-      return true; // continue event propagation
-    }
-  }
-};
-
-Ember.View.states.inDOM = {
-  parentState: Ember.View.states.hasElement,
-
-  insertElement: function(view, fn) {
-    if (view._lastInsert !== Ember.guidFor(fn)){
-      return;
-    }
-    throw "You can't insert an element into the DOM that has already been inserted";
-  }
-};
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var destroyedError = "You can't call %@ on a destroyed view", fmt = Ember.String.fmt;
-
-Ember.View.states.destroyed = {
-  parentState: Ember.View.states._default,
-
-  appendChild: function() {
-    throw fmt(destroyedError, ['appendChild']);
-  },
-  rerender: function() {
-    throw fmt(destroyedError, ['rerender']);
-  },
-  destroyElement: function() {
-    throw fmt(destroyedError, ['destroyElement']);
-  },
-  empty: function() {
-    throw fmt(destroyedError, ['empty']);
-  },
-
-  setElement: function() {
-    throw fmt(destroyedError, ["set('element', ...)"]);
-  },
-
-  // Since element insertion is scheduled, don't do anything if
-  // the view has been destroyed between scheduling and execution
-  insertElement: Ember.K
-};
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set, meta = Ember.meta;
-var forEach = Ember.EnumerableUtils.forEach;
-
-var childViewsProperty = Ember.computed(function() {
-  return get(this, '_childViews');
-}).property('_childViews').cacheable();
-
-/**
-  @class
-
-  A `ContainerView` is an `Ember.View` subclass that allows for manual or programatic
-  management of a view's `childViews` array that will correctly update the `ContainerView`
-  instance's rendered DOM representation.
-
-  ## Setting Initial Child Views
-  The initial array of child views can be set in one of two ways. You can provide
-  a `childViews` property at creation time that contains instance of `Ember.View`:
-
-
-        aContainer = Ember.ContainerView.create({
-          childViews: [Ember.View.create(), Ember.View.create()]
-        })
-
-  You can also provide a list of property names whose values are instances of `Ember.View`:
-
-        aContainer = Ember.ContainerView.create({
-          childViews: ['aView', 'bView', 'cView'],
-          aView: Ember.View.create(),
-          bView: Ember.View.create()
-          cView: Ember.View.create()
-        })
-
-  The two strategies can be combined:
-
-        aContainer = Ember.ContainerView.create({
-          childViews: ['aView', Ember.View.create()],
-          aView: Ember.View.create()
-        })
-
-  Each child view's rendering will be inserted into the container's rendered HTML in the same
-  order as its position in the `childViews` property.
-
-  ## Adding and Removing Child Views
-  The views in a container's `childViews` array should be added and removed by manipulating
-  the `childViews` property directly.
-
-  To remove a view pass that view into a `removeObject` call on the container's `childViews` property.
-
-  Given an empty `<body>` the following code
-
-        aContainer = Ember.ContainerView.create({
-          classNames: ['the-container'],
-          childViews: ['aView', 'bView'],
-          aView: Ember.View.create({
-            template: Ember.Handlebars.compile("A")
-          }),
-          bView: Ember.View.create({
-            template: Ember.Handlebars.compile("B")
-          })
-        })
-
-        aContainer.appendTo('body')
-
-  Results in the HTML
-
-        <div class="ember-view the-container">
-          <div class="ember-view">A</div>
-          <div class="ember-view">B</div>
-        </div>
-
-  Removing a view
-
-        aContainer.get('childViews') // [aContainer.aView, aContainer.bView]
-        aContainer.get('childViews').removeObject(aContainer.get('bView'))
-        aContainer.get('childViews') // [aContainer.aView]
-
-  Will result in the following HTML
-
-        <div class="ember-view the-container">
-          <div class="ember-view">A</div>
-        </div>
-
-
-  Similarly, adding a child view is accomplished by adding `Ember.View` instances to the
-  container's `childViews` property.
-
-  Given an empty `<body>` the following code
-
-        aContainer = Ember.ContainerView.create({
-          classNames: ['the-container'],
-          childViews: ['aView', 'bView'],
-          aView: Ember.View.create({
-            template: Ember.Handlebars.compile("A")
-          }),
-          bView: Ember.View.create({
-            template: Ember.Handlebars.compile("B")
-          })
-        })
-
-        aContainer.appendTo('body')
-
-  Results in the HTML
-
-        <div class="ember-view the-container">
-          <div class="ember-view">A</div>
-          <div class="ember-view">B</div>
-        </div>
-
-  Adding a view
-
-        AnotherViewClass = Ember.View.extend({
-          template: Ember.Handlebars.compile("Another view")
-        })
-
-        aContainer.get('childViews') // [aContainer.aView, aContainer.bView]
-        aContainer.get('childViews').pushObject(AnotherViewClass.create())
-        aContainer.get('childViews') // [aContainer.aView, aContainer.bView, <AnotherViewClass instance>]
-
-  Will result in the following HTML
-
-        <div class="ember-view the-container">
-          <div class="ember-view">A</div>
-          <div class="ember-view">B</div>
-          <div class="ember-view">Another view</div>
-        </div>
-
-
-  Direct manipulation of childViews presence or absence in the DOM via calls to
-  `remove` or `removeFromParent` or calls to a container's `removeChild` may not behave
-  correctly.
-
-  Calling `remove()` on a child view will remove the view's HTML, but it will remain as part of its
-  container's `childView`s property.
-
-  Calling `removeChild()` on the container will remove the passed view instance from the container's
-  `childView`s but keep its HTML within the container's rendered view.
-
-  Calling `removeFromParent()` behaves as expected but should be avoided in favor of direct
-  manipulation of a container's `childViews` property.
-
-        aContainer = Ember.ContainerView.create({
-          classNames: ['the-container'],
-          childViews: ['aView', 'bView'],
-          aView: Ember.View.create({
-            template: Ember.Handlebars.compile("A")
-          }),
-          bView: Ember.View.create({
-            template: Ember.Handlebars.compile("B")
-          })
-        })
-
-        aContainer.appendTo('body')
-
-  Results in the HTML
-
-        <div class="ember-view the-container">
-          <div class="ember-view">A</div>
-          <div class="ember-view">B</div>
-        </div>
-
-  Calling `aContainer.get('aView').removeFromParent()` will result in the following HTML
-
-        <div class="ember-view the-container">
-          <div class="ember-view">B</div>
-        </div>
-
-  And the `Ember.View` instance stored in `aContainer.aView` will be removed from `aContainer`'s
-  `childViews` array.
-
-  ## Templates and Layout
-  A `template`, `templateName`, `defaultTemplate`, `layout`, `layoutName` or `defaultLayout`
-  property on a container view will not result in the template or layout being rendered.
-  The HTML contents of a `Ember.ContainerView`'s DOM representation will only be the rendered HTML
-  of its child views.
-
-  ## Binding a View to Display
-
-  If you would like to display a single view in your ContainerView, you can set its `currentView`
-  property. When the `currentView` property is set to a view instance, it will be added to the
-  ContainerView's `childViews` array. If the `currentView` property is later changed to a
-  different view, the new view will replace the old view. If `currentView` is set to `null`, the
-  last `currentView` will be removed.
-
-  This functionality is useful for cases where you want to bind the display of a ContainerView to
-  a controller or state manager. For example, you can bind the `currentView` of a container to
-  a controller like this:
-
-      // Controller
-      App.appController = Ember.Object.create({
-        view: Ember.View.create({
-          templateName: 'person_template'
-        })
-      });
-
-      // Handlebars template
-      {{view Ember.ContainerView currentViewBinding="App.appController.view"}}
-
-  @extends Ember.View
-*/
-
-Ember.ContainerView = Ember.View.extend({
-
-  init: function() {
-    this._super();
-
-    var childViews = get(this, 'childViews');
-    Ember.defineProperty(this, 'childViews', childViewsProperty);
-
-    var _childViews = this._childViews;
-
-    forEach(childViews, function(viewName, idx) {
-      var view;
-
-      if ('string' === typeof viewName) {
-        view = get(this, viewName);
-        view = this.createChildView(view);
-        set(this, viewName, view);
-      } else {
-        view = this.createChildView(viewName);
-      }
-
-      _childViews[idx] = view;
-    }, this);
-
-    var currentView = get(this, 'currentView');
-    if (currentView) _childViews.push(this.createChildView(currentView));
-
-    // Make the _childViews array observable
-    Ember.A(_childViews);
-
-    // Sets up an array observer on the child views array. This
-    // observer will detect when child views are added or removed
-    // and update the DOM to reflect the mutation.
-    get(this, 'childViews').addArrayObserver(this, {
-      willChange: 'childViewsWillChange',
-      didChange: 'childViewsDidChange'
-    });
-  },
-
-  /**
-    Instructs each child view to render to the passed render buffer.
-
-    @param {Ember.RenderBuffer} buffer the buffer to render to
-    @private
-  */
-  render: function(buffer) {
-    this.forEachChildView(function(view) {
-      view.renderToBuffer(buffer);
-    });
-  },
-
-  /**
-    When the container view is destroyed, tear down the child views
-    array observer.
-
-    @private
-  */
-  willDestroy: function() {
-    get(this, 'childViews').removeArrayObserver(this, {
-      willChange: 'childViewsWillChange',
-      didChange: 'childViewsDidChange'
-    });
-
-    this._super();
-  },
-
-  /**
-    When a child view is removed, destroy its element so that
-    it is removed from the DOM.
-
-    The array observer that triggers this action is set up in the
-    `renderToBuffer` method.
-
-    @private
-    @param {Ember.Array} views the child views array before mutation
-    @param {Number} start the start position of the mutation
-    @param {Number} removed the number of child views removed
-  **/
-  childViewsWillChange: function(views, start, removed) {
-    if (removed === 0) { return; }
-
-    var changedViews = views.slice(start, start+removed);
-    this.initializeViews(changedViews, null, null);
-
-    this.invokeForState('childViewsWillChange', views, start, removed);
-  },
-
-  /**
-    When a child view is added, make sure the DOM gets updated appropriately.
-
-    If the view has already rendered an element, we tell the child view to
-    create an element and insert it into the DOM. If the enclosing container view
-    has already written to a buffer, but not yet converted that buffer into an
-    element, we insert the string representation of the child into the appropriate
-    place in the buffer.
-
-    @private
-    @param {Ember.Array} views the array of child views afte the mutation has occurred
-    @param {Number} start the start position of the mutation
-    @param {Number} removed the number of child views removed
-    @param {Number} the number of child views added
-  */
-  childViewsDidChange: function(views, start, removed, added) {
-    var len = get(views, 'length');
-
-    // No new child views were added; bail out.
-    if (added === 0) return;
-
-    var changedViews = views.slice(start, start+added);
-    this.initializeViews(changedViews, this, get(this, 'templateData'));
-
-    // Let the current state handle the changes
-    this.invokeForState('childViewsDidChange', views, start, added);
-  },
-
-  initializeViews: function(views, parentView, templateData) {
-    forEach(views, function(view) {
-      set(view, '_parentView', parentView);
-
-      if (!get(view, 'templateData')) {
-        set(view, 'templateData', templateData);
-      }
-    });
-  },
-
-  /**
-    Schedules a child view to be inserted into the DOM after bindings have
-    finished syncing for this run loop.
-
-    @param {Ember.View} view the child view to insert
-    @param {Ember.View} prev the child view after which the specified view should
-                     be inserted
-    @private
-  */
-  _scheduleInsertion: function(view, prev) {
-    if (prev) {
-      prev.domManager.after(prev, view);
-    } else {
-      this.domManager.prepend(this, view);
-    }
-  },
-
-  currentView: null,
-
-  _currentViewWillChange: Ember.beforeObserver(function() {
-    var childViews = get(this, 'childViews'),
-        currentView = get(this, 'currentView');
-
-    if (currentView) {
-      childViews.removeObject(currentView);
-      currentView.destroy();
-    }
-  }, 'currentView'),
-
-  _currentViewDidChange: Ember.observer(function() {
-    var childViews = get(this, 'childViews'),
-        currentView = get(this, 'currentView');
-
-    if (currentView) {
-      childViews.pushObject(currentView);
-    }
-  }, 'currentView')
-});
-
-// Ember.ContainerView extends the default view states to provide different
-// behavior for childViewsWillChange and childViewsDidChange.
-Ember.ContainerView.states = {
-  parent: Ember.View.states,
-
-  inBuffer: {
-    childViewsDidChange: function(parentView, views, start, added) {
-      var buffer = parentView.buffer,
-          startWith, prev, prevBuffer, view;
-
-      // Determine where to begin inserting the child view(s) in the
-      // render buffer.
-      if (start === 0) {
-        // If views were inserted at the beginning, prepend the first
-        // view to the render buffer, then begin inserting any
-        // additional views at the beginning.
-        view = views[start];
-        startWith = start + 1;
-        view.renderToBuffer(buffer, 'prepend');
-      } else {
-        // Otherwise, just insert them at the same place as the child
-        // views mutation.
-        view = views[start - 1];
-        startWith = start;
-      }
-
-      for (var i=startWith; i<start+added; i++) {
-        prev = view;
-        view = views[i];
-        prevBuffer = prev.buffer;
-        view.renderToBuffer(prevBuffer, 'insertAfter');
-      }
-    }
-  },
-
-  hasElement: {
-    childViewsWillChange: function(view, views, start, removed) {
-      for (var i=start; i<start+removed; i++) {
-        views[i].remove();
-      }
-    },
-
-    childViewsDidChange: function(view, views, start, added) {
-      // If the DOM element for this container view already exists,
-      // schedule each child view to insert its DOM representation after
-      // bindings have finished syncing.
-      var prev = start === 0 ? null : views[start-1];
-
-      for (var i=start; i<start+added; i++) {
-        view = views[i];
-        this._scheduleInsertion(view, prev);
-        prev = view;
-      }
-    }
-  }
-};
-
-Ember.ContainerView.states.inDOM = {
-  parentState: Ember.ContainerView.states.hasElement
-};
-
-Ember.ContainerView.reopen({
-  states: Ember.ContainerView.states
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set, fmt = Ember.String.fmt;
-
-/**
-  @class
-
-  `Ember.CollectionView` is an `Ember.View` descendent responsible for managing a
-  collection (an array or array-like object) by maintaing a child view object and 
-  associated DOM representation for each item in the array and ensuring that child
-  views and their associated rendered HTML are updated when items in the array
-  are added, removed, or replaced.
-
-  ## Setting content
-  The managed collection of objects is referenced as the `Ember.CollectionView` instance's
-  `content` property.
-
-      someItemsView = Ember.CollectionView.create({
-        content: ['A', 'B','C']
-      })
-
-  The view for each item in the collection will have its `content` property set
-  to the item.
-
-  ## Specifying itemViewClass
-  By default the view class for each item in the managed collection will be an instance
-  of `Ember.View`. You can supply a different class by setting the `CollectionView`'s
-  `itemViewClass` property.
-
-  Given an empty `<body>` and the following code:
-
-
-        someItemsView = Ember.CollectionView.create({
-          classNames: ['a-collection'],
-          content: ['A','B','C'],
-          itemViewClass: Ember.View.extend({
-            template: Ember.Handlebars.compile("the letter: {{view.content}}")
-          })
-        })
-
-        someItemsView.appendTo('body')
-
-  Will result in the following HTML structure
-
-        <div class="ember-view a-collection">
-          <div class="ember-view">the letter: A</div>
-          <div class="ember-view">the letter: B</div>
-          <div class="ember-view">the letter: C</div>
-        </div>
-
-  ## Automatic matching of parent/child tagNames
-
-  Setting the `tagName` property of a `CollectionView` to any of 
-  "ul", "ol", "table", "thead", "tbody", "tfoot", "tr", or "select" will result
-  in the item views receiving an appropriately matched `tagName` property.
-
-
-  Given an empty `<body>` and the following code:
-
-        anUndorderedListView = Ember.CollectionView.create({
-          tagName: 'ul',
-          content: ['A','B','C'],
-          itemViewClass: Ember.View.extend({
-            template: Ember.Handlebars.compile("the letter: {{view.content}}")
-          })
-        })
-
-        anUndorderedListView.appendTo('body')
-
-  Will result in the following HTML structure
-
-        <ul class="ember-view a-collection">
-          <li class="ember-view">the letter: A</li>
-          <li class="ember-view">the letter: B</li>
-          <li class="ember-view">the letter: C</li>
-        </ul>
-
-  Additional tagName pairs can be provided by adding to `Ember.CollectionView.CONTAINER_MAP `
-
-        Ember.CollectionView.CONTAINER_MAP['article'] = 'section'
-
-
-  ## Empty View
-  You can provide an `Ember.View` subclass to the `Ember.CollectionView` instance as its
-  `emptyView` property. If the `content` property of a `CollectionView` is set to `null`
-  or an empty array, an instance of this view will be the `CollectionView`s only child.
-
-        aListWithNothing = Ember.CollectionView.create({
-          classNames: ['nothing']
-          content: null,
-          emptyView: Ember.View.extend({
-            template: Ember.Handlebars.compile("The collection is empty")
-          })
-        })
-
-        aListWithNothing.appendTo('body')
-
-  Will result in the following HTML structure
-
-        <div class="ember-view nothing">
-          <div class="ember-view">
-            The collection is empty
-          </div>
-        </div>
-
-  ## Adding and Removing items
-  The `childViews` property of a `CollectionView` should not be directly manipulated. Instead,
-  add, remove, replace items from its `content` property. This will trigger
-  appropriate changes to its rendered HTML.
-
-  ## Use in templates via the `{{collection}}` Ember.Handlebars helper
-  Ember.Handlebars provides a helper specifically for adding `CollectionView`s to templates.
-  See `Ember.Handlebars.collection` for more details
-
-  @since Ember 0.9
-  @extends Ember.ContainerView
-*/
-Ember.CollectionView = Ember.ContainerView.extend(
-/** @scope Ember.CollectionView.prototype */ {
-
-  /**
-    A list of items to be displayed by the Ember.CollectionView.
-
-    @type Ember.Array
-    @default null
-  */
-  content: null,
-
-  /**
-    @private
-
-    This provides metadata about what kind of empty view class this
-    collection would like if it is being instantiated from another
-    system (like Handlebars)
-  */
-  emptyViewClass: Ember.View,
-
-  /**
-    An optional view to display if content is set to an empty array.
-
-    @type Ember.View
-    @default null
-  */
-  emptyView: null,
-
-  /**
-    @type Ember.View
-    @default Ember.View
-  */
-  itemViewClass: Ember.View,
-
-  /** @private */
-  init: function() {
-    var ret = this._super();
-    this._contentDidChange();
-    return ret;
-  },
-
-  _contentWillChange: Ember.beforeObserver(function() {
-    var content = this.get('content');
-
-    if (content) { content.removeArrayObserver(this); }
-    var len = content ? get(content, 'length') : 0;
-    this.arrayWillChange(content, 0, len);
-  }, 'content'),
-
-  /**
-    @private
-
-    Check to make sure that the content has changed, and if so,
-    update the children directly. This is always scheduled
-    asynchronously, to allow the element to be created before
-    bindings have synchronized and vice versa.
-  */
-  _contentDidChange: Ember.observer(function() {
-    var content = get(this, 'content');
-
-    if (content) {
-      Ember.assert(fmt("an Ember.CollectionView's content must implement Ember.Array. You passed %@", [content]), Ember.Array.detect(content));
-      content.addArrayObserver(this);
-    }
-
-    var len = content ? get(content, 'length') : 0;
-    this.arrayDidChange(content, 0, null, len);
-  }, 'content'),
-
-  willDestroy: function() {
-    var content = get(this, 'content');
-    if (content) { content.removeArrayObserver(this); }
-
-    this._super();
-  },
-
-  arrayWillChange: function(content, start, removedCount) {
-    // If the contents were empty before and this template collection has an
-    // empty view remove it now.
-    var emptyView = get(this, 'emptyView');
-    if (emptyView && emptyView instanceof Ember.View) {
-      emptyView.removeFromParent();
-    }
-
-    // Loop through child views that correspond with the removed items.
-    // Note that we loop from the end of the array to the beginning because
-    // we are mutating it as we go.
-    var childViews = get(this, 'childViews'), childView, idx, len;
-
-    len = get(childViews, 'length');
-
-    var removingAll = removedCount === len;
-
-    if (removingAll) {
-      this.invokeForState('empty');
-    }
-
-    for (idx = start + removedCount - 1; idx >= start; idx--) {
-      childView = childViews[idx];
-      if (removingAll) { childView.removedFromDOM = true; }
-      childView.destroy();
-    }
-  },
-
-  /**
-    Called when a mutation to the underlying content array occurs.
-
-    This method will replay that mutation against the views that compose the
-    Ember.CollectionView, ensuring that the view reflects the model.
-
-    This array observer is added in contentDidChange.
-
-    @param {Array} addedObjects
-      the objects that were added to the content
-
-    @param {Array} removedObjects
-      the objects that were removed from the content
-
-    @param {Number} changeIndex
-      the index at which the changes occurred
-  */
-  arrayDidChange: function(content, start, removed, added) {
-    var itemViewClass = get(this, 'itemViewClass'),
-        childViews = get(this, 'childViews'),
-        addedViews = [], view, item, idx, len, itemTagName;
-
-    if ('string' === typeof itemViewClass) {
-      itemViewClass = get(itemViewClass);
-    }
-
-    Ember.assert(fmt("itemViewClass must be a subclass of Ember.View, not %@", [itemViewClass]), Ember.View.detect(itemViewClass));
-
-    len = content ? get(content, 'length') : 0;
-    if (len) {
-      for (idx = start; idx < start+added; idx++) {
-        item = content.objectAt(idx);
-
-        view = this.createChildView(itemViewClass, {
-          content: item,
-          contentIndex: idx
-        });
-
-        addedViews.push(view);
-      }
-    } else {
-      var emptyView = get(this, 'emptyView');
-      if (!emptyView) { return; }
-
-      emptyView = this.createChildView(emptyView);
-      addedViews.push(emptyView);
-      set(this, 'emptyView', emptyView);
-    }
-    childViews.replace(start, 0, addedViews);
-  },
-
-  createChildView: function(view, attrs) {
-    view = this._super(view, attrs);
-
-    var itemTagName = get(view, 'tagName');
-    var tagName = (itemTagName === null || itemTagName === undefined) ? Ember.CollectionView.CONTAINER_MAP[get(this, 'tagName')] : itemTagName;
-
-    set(view, 'tagName', tagName);
-
-    return view;
-  }
-});
-
-/**
-  @static
-
-  A map of parent tags to their default child tags. You can add
-  additional parent tags if you want collection views that use
-  a particular parent tag to default to a child tag.
-
-  @type Hash
-  @constant
-*/
-Ember.CollectionView.CONTAINER_MAP = {
-  ul: 'li',
-  ol: 'li',
-  table: 'tr',
-  thead: 'tr',
-  tbody: 'tr',
-  tfoot: 'tr',
-  tr: 'td',
-  select: 'option'
-};
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember - JavaScript Application Framework
-// Copyright: ©2006-2011 Strobe Inc. and contributors.
-//            Portions ©2008-2011 Apple Inc. All rights reserved.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-/*globals jQuery*/
-
-})();
-
-(function() {
-var get = Ember.get, set = Ember.set;
-
-/**
-  @class
-
-  @extends Ember.Object
-*/
-Ember.State = Ember.Object.extend(Ember.Evented,
-/** @scope Ember.State.prototype */{
-  isState: true,
-
-  /**
-    A reference to the parent state.
-
-    @type Ember.State
-  */
-  parentState: null,
-  start: null,
-
-  /**
-    The name of this state.
-
-    @type String
-  */
-  name: null,
-
-  /**
-    The full path to this state.
-
-    @type String
-    @readOnly
-  */
-  path: Ember.computed(function() {
-    var parentPath = get(this, 'parentState.path'),
-        path = get(this, 'name');
-
-    if (parentPath) {
-      path = parentPath + '.' + path;
-    }
-
-    return path;
-  }).property().cacheable(),
-
-  /**
-    @private
-
-    Override the default event firing from Ember.Evented to
-    also call methods with the given name.
-  */
-  trigger: function(name) {
-    if (this[name]) {
-      this[name].apply(this, [].slice.call(arguments, 1));
-    }
-    this._super.apply(this, arguments);
-  },
-
-  /** @private */
-  init: function() {
-    var states = get(this, 'states'), foundStates;
-    set(this, 'childStates', Ember.A());
-    set(this, 'eventTransitions', get(this, 'eventTransitions') || {});
-
-    var name, value, transitionTarget;
-
-    // As a convenience, loop over the properties
-    // of this state and look for any that are other
-    // Ember.State instances or classes, and move them
-    // to the `states` hash. This avoids having to
-    // create an explicit separate hash.
-
-    if (!states) {
-      states = {};
-
-      for (name in this) {
-        if (name === "constructor") { continue; }
-
-        if (value = this[name]) {
-          if (transitionTarget = value.transitionTarget) {
-            this.eventTransitions[name] = transitionTarget;
-          }
-
-          this.setupChild(states, name, value);
-        }
-      }
-
-      set(this, 'states', states);
-    } else {
-      for (name in states) {
-        this.setupChild(states, name, states[name]);
-      }
-    }
-
-    set(this, 'pathsCache', {});
-    set(this, 'pathsCacheNoContext', {});
-  },
-
-  /** @private */
-  setupChild: function(states, name, value) {
-    if (!value) { return false; }
-
-    if (value.isState) {
-      set(value, 'name', name);
-    } else if (Ember.State.detect(value)) {
-      value = value.create({
-        name: name
-      });
-    }
-
-    if (value.isState) {
-      set(value, 'parentState', this);
-      get(this, 'childStates').pushObject(value);
-      states[name] = value;
-    }
-  },
-
-  lookupEventTransition: function(name) {
-    var path, state = this;
-
-    while(state && !path) {
-      path = state.eventTransitions[name];
-      state = state.get('parentState');
-    }
-
-    return path;
-  },
-
-  /**
-    A Boolean value indicating whether the state is a leaf state
-    in the state hierarchy. This is false if the state has child
-    states; otherwise it is true.
-
-    @type Boolean
-  */
-  isLeaf: Ember.computed(function() {
-    return !get(this, 'childStates').length;
-  }).cacheable(),
-
-  /**
-    A boolean value indicating whether the state takes a context.
-    By default we assume all states take contexts.
-  */
-  hasContext: true,
-
-  /**
-    This is the default transition event.
-
-    @event
-    @param {Ember.StateManager} manager
-    @param context
-    @see Ember.StateManager#transitionEvent
-  */
-  setup: Ember.K,
-
-  /**
-    This event fires when the state is entered.
-
-    @event
-    @param {Ember.StateManager} manager
-  */
-  enter: Ember.K,
-
-  /**
-    This event fires when the state is exited.
-
-    @event
-    @param {Ember.StateManager} manager
-  */
-  exit: Ember.K
-});
-
-var Event = Ember.$ && Ember.$.Event;
-
-Ember.State.reopenClass(
-/** @scope Ember.State */{
-
-  /**
-  @static
-
-  Creates an action function for transitioning to the named state while preserving context.
-
-  The following example StateManagers are equivalent:
-
-      aManager = Ember.StateManager.create({
-        stateOne: Ember.State.create({
-          changeToStateTwo: Ember.State.transitionTo('stateTwo')
-        }),
-        stateTwo: Ember.State.create({})
-      })
-
-      bManager = Ember.StateManager.create({
-        stateOne: Ember.State.create({
-          changeToStateTwo: function(manager, context){
-            manager.transitionTo('stateTwo', context)
-          }
-        }),
-        stateTwo: Ember.State.create({})
-      })
-
-  @param {String} target
-  */
-  transitionTo: function(target) {
-    var event = function(stateManager, context) {
-      if (Event && context instanceof Event) {
-        if (context.hasOwnProperty('context')) {
-          context = context.context;
-        } else {
-          // If we received an event and it doesn't contain
-          // a context, don't pass along a superfluous
-          // context to the target of the event.
-          return stateManager.transitionTo(target);
-        }
-      }
-
-      stateManager.transitionTo(target, context);
-    };
-
-    event.transitionTarget = target;
-
-    return event;
-  }
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set, fmt = Ember.String.fmt;
-var arrayForEach = Ember.ArrayPolyfills.forEach;
-/**
-  @private
-
-  A Transition takes the enter, exit and resolve states and normalizes
-  them:
-
-  * takes any passed in contexts into consideration
-  * adds in `initialState`s
-*/
-var Transition = function(raw) {
-  this.enterStates = raw.enterStates.slice();
-  this.exitStates = raw.exitStates.slice();
-  this.resolveState = raw.resolveState;
-
-  this.finalState = raw.enterStates[raw.enterStates.length - 1] || raw.resolveState;
-};
-
-Transition.prototype = {
-  /**
-    @private
-
-    Normalize the passed in enter, exit and resolve states.
-
-    This process also adds `finalState` and `contexts` to the Transition object.
-
-    @param {Ember.StateManager} manager the state manager running the transition
-    @param {Array} contexts a list of contexts passed into `transitionTo`
-  */
-  normalize: function(manager, contexts) {
-    this.matchContextsToStates(contexts);
-    this.addInitialStates();
-    this.removeUnchangedContexts(manager);
-    return this;
-  },
-
-  /**
-    @private
-
-    Match each of the contexts passed to `transitionTo` to a state.
-    This process may also require adding additional enter and exit
-    states if there are more contexts than enter states.
-
-    @param {Array} contexts a list of contexts passed into `transitionTo`
-  */
-  matchContextsToStates: function(contexts) {
-    var stateIdx = this.enterStates.length - 1,
-        matchedContexts = [],
-        state,
-        context;
-
-    // Next, we will match the passed in contexts to the states they
-    // represent.
-    //
-    // First, assign a context to each enter state in reverse order. If
-    // any contexts are left, add a parent state to the list of states
-    // to enter and exit, and assign a context to the parent state.
-    //
-    // If there are still contexts left when the state manager is
-    // reached, raise an exception.
-    //
-    // This allows the following:
-    //
-    // |- root
-    // | |- post
-    // | | |- comments
-    // | |- about (* current state)
-    //
-    // For `transitionTo('post.comments', post, post.get('comments')`,
-    // the first context (`post`) will be assigned to `root.post`, and
-    // the second context (`post.get('comments')`) will be assigned
-    // to `root.post.comments`.
-    //
-    // For the following:
-    //
-    // |- root
-    // | |- post
-    // | | |- index (* current state)
-    // | | |- comments
-    //
-    // For `transitionTo('post.comments', otherPost, otherPost.get('comments')`,
-    // the `<root.post>` state will be added to the list of enter and exit
-    // states because its context has changed.
-
-    while (contexts.length > 0) {
-      if (stateIdx >= 0) {
-        state = this.enterStates[stateIdx--];
-      } else {
-        if (this.enterStates.length) {
-          state = get(this.enterStates[0], 'parentState');
-          if (!state) { throw "Cannot match all contexts to states"; }
-        } else {
-          // If re-entering the current state with a context, the resolve
-          // state will be the current state.
-          state = this.resolveState;
-        }
-
-        this.enterStates.unshift(state);
-        this.exitStates.unshift(state);
-      }
-
-      // in routers, only states with dynamic segments have a context
-      if (get(state, 'hasContext')) {
-        context = contexts.pop();
-      } else {
-        context = null;
-      }
-
-      matchedContexts.unshift(context);
-    }
-
-    this.contexts = matchedContexts;
-  },
-
-  /**
-    @private
-
-    Add any `initialState`s to the list of enter states.
-  */
-  addInitialStates: function() {
-    var finalState = this.finalState, initialState;
-
-    while(true) {
-      initialState = get(finalState, 'initialState') || 'start';
-      finalState = get(finalState, 'states.' + initialState);
-
-      if (!finalState) { break; }
-
-      this.finalState = finalState;
-      this.enterStates.push(finalState);
-      this.contexts.push(undefined);
-    }
-  },
-
-  /**
-    @private
-
-    Remove any states that were added because the number of contexts
-    exceeded the number of explicit enter states, but the context has
-    not changed since the last time the state was entered.
-
-    @param {Ember.StateManager} manager passed in to look up the last
-      context for a states
-  */
-  removeUnchangedContexts: function(manager) {
-    // Start from the beginning of the enter states. If the state was added
-    // to the list during the context matching phase, make sure the context
-    // has actually changed since the last time the state was entered.
-    while (this.enterStates.length > 0) {
-      if (this.enterStates[0] !== this.exitStates[0]) { break; }
-
-      if (this.enterStates.length === this.contexts.length) {
-        if (manager.getStateMeta(this.enterStates[0], 'context') !== this.contexts[0]) { break; }
-        this.contexts.shift();
-      }
-
-      this.resolveState = this.enterStates.shift();
-      this.exitStates.shift();
-    }
-  }
-};
-
-/**
-  @class
-
-  StateManager is part of Ember's implementation of a finite state machine. A StateManager
-  instance manages a number of properties that are instances of `Ember.State`,
-  tracks the current active state, and triggers callbacks when states have changed.
-
-  ## Defining States
-
-  The states of StateManager can be declared in one of two ways. First, you can define
-  a `states` property that contains all the states:
-
-      managerA = Ember.StateManager.create({
-        states: {
-          stateOne: Ember.State.create(),
-          stateTwo: Ember.State.create()
-        }
-      })
-
-      managerA.get('states')
-      // {
-      //   stateOne: Ember.State.create(),
-      //   stateTwo: Ember.State.create()
-      // }
-
-  You can also add instances of `Ember.State` (or an `Ember.State` subclass) directly as properties
-  of a StateManager. These states will be collected into the `states` property for you.
-
-      managerA = Ember.StateManager.create({
-        stateOne: Ember.State.create(),
-        stateTwo: Ember.State.create()
-      })
-
-      managerA.get('states')
-      // {
-      //   stateOne: Ember.State.create(),
-      //   stateTwo: Ember.State.create()
-      // }
-
-  ## The Initial State
-  When created a StateManager instance will immediately enter into the state
-  defined as its `start` property or the state referenced by name in its
-  `initialState` property:
-
-      managerA = Ember.StateManager.create({
-        start: Ember.State.create({})
-      })
-
-      managerA.get('currentState.name') // 'start'
-
-      managerB = Ember.StateManager.create({
-        initialState: 'beginHere',
-        beginHere: Ember.State.create({})
-      })
-
-      managerB.get('currentState.name') // 'beginHere'
-
-  Because it is a property you may also provide a computed function if you wish to derive
-  an `initialState` programmatically:
-
-      managerC = Ember.StateManager.create({
-        initialState: function(){
-          if (someLogic) {
-            return 'active';
-          } else {
-            return 'passive';
-          }
-        }.property(),
-        active: Ember.State.create({}),
-        passive: Ember.State.create({})
-      })
-
-  ## Moving Between States
-  A StateManager can have any number of Ember.State objects as properties
-  and can have a single one of these states as its current state.
-
-  Calling `transitionTo` transitions between states:
-
-      robotManager = Ember.StateManager.create({
-        initialState: 'poweredDown',
-        poweredDown: Ember.State.create({}),
-        poweredUp: Ember.State.create({})
-      })
-
-      robotManager.get('currentState.name') // 'poweredDown'
-      robotManager.transitionTo('poweredUp')
-      robotManager.get('currentState.name') // 'poweredUp'
-
-  Before transitioning into a new state the existing `currentState` will have its
-  `exit` method called with the StateManager instance as its first argument and
-  an object representing the transition as its second argument.
-
-  After transitioning into a new state the new `currentState` will have its
-  `enter` method called with the StateManager instance as its first argument and
-  an object representing the transition as its second argument.
-
-      robotManager = Ember.StateManager.create({
-        initialState: 'poweredDown',
-        poweredDown: Ember.State.create({
-          exit: function(stateManager){
-            console.log("exiting the poweredDown state")
-          }
-        }),
-        poweredUp: Ember.State.create({
-          enter: function(stateManager){
-            console.log("entering the poweredUp state. Destroy all humans.")
-          }
-        })
-      })
-
-      robotManager.get('currentState.name') // 'poweredDown'
-      robotManager.transitionTo('poweredUp')
-      // will log
-      // 'exiting the poweredDown state'
-      // 'entering the poweredUp state. Destroy all humans.'
-
-
-  Once a StateManager is already in a state, subsequent attempts to enter that state will
-  not trigger enter or exit method calls. Attempts to transition into a state that the
-  manager does not have will result in no changes in the StateManager's current state:
-
-      robotManager = Ember.StateManager.create({
-        initialState: 'poweredDown',
-        poweredDown: Ember.State.create({
-          exit: function(stateManager){
-            console.log("exiting the poweredDown state")
-          }
-        }),
-        poweredUp: Ember.State.create({
-          enter: function(stateManager){
-            console.log("entering the poweredUp state. Destroy all humans.")
-          }
-        })
-      })
-
-      robotManager.get('currentState.name') // 'poweredDown'
-      robotManager.transitionTo('poweredUp')
-      // will log
-      // 'exiting the poweredDown state'
-      // 'entering the poweredUp state. Destroy all humans.'
-      robotManager.transitionTo('poweredUp') // no logging, no state change
-
-      robotManager.transitionTo('someUnknownState') // silently fails
-      robotManager.get('currentState.name') // 'poweredUp'
-
-
-  Each state property may itself contain properties that are instances of Ember.State.
-  The StateManager can transition to specific sub-states in a series of transitionTo method calls or
-  via a single transitionTo with the full path to the specific state. The StateManager will also
-  keep track of the full path to its currentState
-
-      robotManager = Ember.StateManager.create({
-        initialState: 'poweredDown',
-        poweredDown: Ember.State.create({
-          charging: Ember.State.create(),
-          charged: Ember.State.create()
-        }),
-        poweredUp: Ember.State.create({
-          mobile: Ember.State.create(),
-          stationary: Ember.State.create()
-        })
-      })
-
-      robotManager.get('currentState.name') // 'poweredDown'
-
-      robotManager.transitionTo('poweredUp')
-      robotManager.get('currentState.name') // 'poweredUp'
-
-      robotManager.transitionTo('mobile')
-      robotManager.get('currentState.name') // 'mobile'
-
-      // transition via a state path
-      robotManager.transitionTo('poweredDown.charging')
-      robotManager.get('currentState.name') // 'charging'
-
-      robotManager.get('currentState.path') // 'poweredDown.charging'
-
-  Enter transition methods will be called for each state and nested child state in their
-  hierarchical order.  Exit methods will be called for each state and its nested states in
-  reverse hierarchical order.
-
-  Exit transitions for a parent state are not called when entering into one of its child states,
-  only when transitioning to a new section of possible states in the hierarchy.
-
-      robotManager = Ember.StateManager.create({
-        initialState: 'poweredDown',
-        poweredDown: Ember.State.create({
-          enter: function(){},
-          exit: function(){
-            console.log("exited poweredDown state")
-          },
-          charging: Ember.State.create({
-            enter: function(){},
-            exit: function(){}
-          }),
-          charged: Ember.State.create({
-            enter: function(){
-              console.log("entered charged state")
-            },
-            exit: function(){
-              console.log("exited charged state")
-            }
-          })
-        }),
-        poweredUp: Ember.State.create({
-          enter: function(){
-            console.log("entered poweredUp state")
-          },
-          exit: function(){},
-          mobile: Ember.State.create({
-            enter: function(){
-              console.log("entered mobile state")
-            },
-            exit: function(){}
-          }),
-          stationary: Ember.State.create({
-            enter: function(){},
-            exit: function(){}
-          })
-        })
-      })
-
-
-      robotManager.get('currentState.path') // 'poweredDown'
-      robotManager.transitionTo('charged')
-      // logs 'entered charged state'
-      // but does *not* log  'exited poweredDown state'
-      robotManager.get('currentState.name') // 'charged
-
-      robotManager.transitionTo('poweredUp.mobile')
-      // logs
-      // 'exited charged state'
-      // 'exited poweredDown state'
-      // 'entered poweredUp state'
-      // 'entered mobile state'
-
-  During development you can set a StateManager's `enableLogging` property to `true` to
-  receive console messages of state transitions.
-
-      robotManager = Ember.StateManager.create({
-        enableLogging: true
-      })
-
-  ## Managing currentState with Actions
-  To control which transitions between states are possible for a given state, StateManager
-  can receive and route action messages to its states via the `send` method.  Calling to `send` with
-  an action name will begin searching for a method with the same name starting at the current state
-  and moving up through the parent states in a state hierarchy until an appropriate method is found
-  or the StateManager instance itself is reached.
-
-  If an appropriately named method is found it will be called with the state manager as the first
-  argument and an optional `context` object as the second argument.
-
-      managerA = Ember.StateManager.create({
-        initialState: 'stateOne.substateOne.subsubstateOne',
-        stateOne: Ember.State.create({
-          substateOne: Ember.State.create({
-            anAction: function(manager, context){
-              console.log("an action was called")
-            },
-            subsubstateOne: Ember.State.create({})
-          })
-        })
-      })
-
-      managerA.get('currentState.name') // 'subsubstateOne'
-      managerA.send('anAction')
-      // 'stateOne.substateOne.subsubstateOne' has no anAction method
-      // so the 'anAction' method of 'stateOne.substateOne' is called
-      // and logs "an action was called"
-      // with managerA as the first argument
-      // and no second argument
-
-      someObject = {}
-      managerA.send('anAction', someObject)
-      // the 'anAction' method of 'stateOne.substateOne' is called again
-      // with managerA as the first argument and
-      // someObject as the second argument.
-
-
-  If the StateManager attempts to send an action but does not find an appropriately named
-  method in the current state or while moving upwards through the state hierarchy
-  it will throw a new Ember.Error. Action detection only moves upwards through the state hierarchy
-  from the current state. It does not search in other portions of the hierarchy.
-
-      managerB = Ember.StateManager.create({
-        initialState: 'stateOne.substateOne.subsubstateOne',
-        stateOne: Ember.State.create({
-          substateOne: Ember.State.create({
-            subsubstateOne: Ember.State.create({})
-          })
-        }),
-        stateTwo: Ember.State.create({
-         anAction: function(manager, context){
-           // will not be called below because it is
-           // not a parent of the current state
-         }
-        })
-      })
-
-      managerB.get('currentState.name') // 'subsubstateOne'
-      managerB.send('anAction')
-      // Error: <Ember.StateManager:ember132> could not
-      // respond to event anAction in state stateOne.substateOne.subsubstateOne.
-
-  Inside of an action method the given state should delegate `transitionTo` calls on its
-  StateManager.
-
-      robotManager = Ember.StateManager.create({
-        initialState: 'poweredDown.charging',
-        poweredDown: Ember.State.create({
-          charging: Ember.State.create({
-            chargeComplete: function(manager, context){
-              manager.transitionTo('charged')
-            }
-          }),
-          charged: Ember.State.create({
-            boot: function(manager, context){
-              manager.transitionTo('poweredUp')
-            }
-          })
-        }),
-        poweredUp: Ember.State.create({
-          beginExtermination: function(manager, context){
-            manager.transitionTo('rampaging')
-          },
-          rampaging: Ember.State.create()
-        })
-      })
-
-      robotManager.get('currentState.name') // 'charging'
-      robotManager.send('boot') // throws error, no boot action
-                                // in current hierarchy
-      robotManager.get('currentState.name') // remains 'charging'
-
-      robotManager.send('beginExtermination') // throws error, no beginExtermination
-                                              // action in current hierarchy
-      robotManager.get('currentState.name') // remains 'charging'
-
-      robotManager.send('chargeComplete')
-      robotManager.get('currentState.name') // 'charged'
-
-      robotManager.send('boot')
-      robotManager.get('currentState.name') // 'poweredUp'
-
-      robotManager.send('beginExtermination', allHumans)
-      robotManager.get('currentState.name') // 'rampaging'
-
-  Transition actions can also be created using the `transitionTo` method of the Ember.State class. The
-  following example StateManagers are equivalent:
-
-      aManager = Ember.StateManager.create({
-        stateOne: Ember.State.create({
-          changeToStateTwo: Ember.State.transitionTo('stateTwo')
-        }),
-        stateTwo: Ember.State.create({})
-      })
-
-      bManager = Ember.StateManager.create({
-        stateOne: Ember.State.create({
-          changeToStateTwo: function(manager, context){
-            manager.transitionTo('stateTwo', context)
-          }
-        }),
-        stateTwo: Ember.State.create({})
-      })
-**/
-Ember.StateManager = Ember.State.extend(
-/** @scope Ember.StateManager.prototype */ {
-
-  /**
-    When creating a new statemanager, look for a default state to transition
-    into. This state can either be named `start`, or can be specified using the
-    `initialState` property.
-  */
-  init: function() {
-    this._super();
-
-    set(this, 'stateMeta', Ember.Map.create());
-
-    var initialState = get(this, 'initialState');
-
-    if (!initialState && get(this, 'states.start')) {
-      initialState = 'start';
-    }
-
-    if (initialState) {
-      this.transitionTo(initialState);
-      Ember.assert('Failed to transition to initial state "' + initialState + '"', !!get(this, 'currentState'));
-    }
-  },
-
-  stateMetaFor: function(state) {
-    var meta = get(this, 'stateMeta'),
-        stateMeta = meta.get(state);
-
-    if (!stateMeta) {
-      stateMeta = {};
-      meta.set(state, stateMeta);
-    }
-
-    return stateMeta;
-  },
-
-  setStateMeta: function(state, key, value) {
-    return set(this.stateMetaFor(state), key, value);
-  },
-
-  getStateMeta: function(state, key) {
-    return get(this.stateMetaFor(state), key);
-  },
-
-  /**
-    The current state from among the manager's possible states. This property should
-    not be set directly.  Use `transitionTo` to move between states by name.
-
-    @type Ember.State
-    @readOnly
-  */
-  currentState: null,
-
-  /**
-    The name of transitionEvent that this stateManager will dispatch
-
-    @property {String}
-    @default 'setup'
-  */
-  transitionEvent: 'setup',
-
-  /**
-    If set to true, `errorOnUnhandledEvents` will cause an exception to be
-    raised if you attempt to send an event to a state manager that is not
-    handled by the current state or any of its parent states.
-
-    @type Boolean
-    @default true
-  */
-  errorOnUnhandledEvent: true,
-
-  send: function(event, context) {
-    Ember.assert('Cannot send event "' + event + '" while currentState is ' + get(this, 'currentState'), get(this, 'currentState'));
-    return this.sendRecursively(event, get(this, 'currentState'), context);
-  },
-
-  sendRecursively: function(event, currentState, context) {
-    var log = this.enableLogging,
-        action = currentState[event];
-
-    // Test to see if the action is a method that
-    // can be invoked. Don't blindly check just for
-    // existence, because it is possible the state
-    // manager has a child state of the given name,
-    // and we should still raise an exception in that
-    // case.
-    if (typeof action === 'function') {
-      if (log) { Ember.Logger.log(fmt("STATEMANAGER: Sending event '%@' to state %@.", [event, get(currentState, 'path')])); }
-      return action.call(currentState, this, context);
-    } else {
-      var parentState = get(currentState, 'parentState');
-      if (parentState) {
-        return this.sendRecursively(event, parentState, context);
-      } else if (get(this, 'errorOnUnhandledEvent')) {
-        throw new Ember.Error(this.toString() + " could not respond to event " + event + " in state " + get(this, 'currentState.path') + ".");
-      }
-    }
-  },
-
-  /**
-    Finds a state by its state path.
-
-    Example:
-
-        manager = Ember.StateManager.create({
-          root: Ember.State.create({
-            dashboard: Ember.State.create()
-          })
-        });
-
-        manager.getStateByPath(manager, "root.dashboard")
-
-        // returns the dashboard state
-
-    @param {Ember.State} root the state to start searching from
-    @param {String} path the state path to follow
-    @returns {Ember.State} the state at the end of the path
-  */
-  getStateByPath: function(root, path) {
-    var parts = path.split('.'),
-        state = root;
-
-    for (var i=0, l=parts.length; i<l; i++) {
-      state = get(get(state, 'states'), parts[i]);
-      if (!state) { break; }
-    }
-
-    return state;
-  },
-
-  findStateByPath: function(state, path) {
-    var possible;
-
-    while (!possible && state) {
-      possible = this.getStateByPath(state, path);
-      state = get(state, 'parentState');
-    }
-
-    return possible;
-  },
-
-  /**
-    @private
-
-    A state stores its child states in its `states` hash.
-    This code takes a path like `posts.show` and looks
-    up `origin.states.posts.states.show`.
-
-    It returns a list of all of the states from the
-    origin, which is the list of states to call `enter`
-    on.
-  */
-  findStatesByPath: function(origin, path) {
-    if (!path || path === "") { return undefined; }
-    var r = path.split('.'),
-        ret = [];
-
-    for (var i=0, len = r.length; i < len; i++) {
-      var states = get(origin, 'states');
-
-      if (!states) { return undefined; }
-
-      var s = get(states, r[i]);
-      if (s) { origin = s; ret.push(s); }
-      else { return undefined; }
-    }
-
-    return ret;
-  },
-
-  goToState: function() {
-    // not deprecating this yet so people don't constantly need to
-    // make trivial changes for little reason.
-    return this.transitionTo.apply(this, arguments);
-  },
-
-  transitionTo: function(path, context) {
-    // XXX When is transitionTo called with no path
-    if (Ember.empty(path)) { return; }
-
-    // The ES6 signature of this function is `path, ...contexts`
-    var contexts = context ? Array.prototype.slice.call(arguments, 1) : [],
-        currentState = get(this, 'currentState') || this;
-
-    // First, get the enter, exit and resolve states for the current state
-    // and specified path. If possible, use an existing cache.
-    var hash = this.contextFreeTransition(currentState, path);
-
-    // Next, process the raw state information for the contexts passed in.
-    var transition = new Transition(hash).normalize(this, contexts);
-
-    this.enterState(transition);
-    this.triggerSetupContext(transition);
-  },
-
-  contextFreeTransition: function(currentState, path) {
-    var cache = currentState.pathsCache[path];
-    if (cache) { return cache; }
-
-    var enterStates = this.findStatesByPath(currentState, path),
-        exitStates = [],
-        resolveState = currentState;
-
-    // Walk up the states. For each state, check whether a state matching
-    // the `path` is nested underneath. This will find the closest
-    // parent state containing `path`.
-    //
-    // This allows the user to pass in a relative path. For example, for
-    // the following state hierarchy:
-    //
-    //    | |root
-    //    | |- posts
-    //    | | |- show (* current)
-    //    | |- comments
-    //    | | |- show
-    //
-    // If the current state is `<root.posts.show>`, an attempt to
-    // transition to `comments.show` will match `<root.comments.show>`.
-    //
-    // First, this code will look for root.posts.show.comments.show.
-    // Next, it will look for root.posts.comments.show. Finally,
-    // it will look for `root.comments.show`, and find the state.
-    //
-    // After this process, the following variables will exist:
-    //
-    // * resolveState: a common parent state between the current
-    //   and target state. In the above example, `<root>` is the
-    //   `resolveState`.
-    // * enterStates: a list of all of the states represented
-    //   by the path from the `resolveState`. For example, for
-    //   the path `root.comments.show`, `enterStates` would have
-    //   `[<root.comments>, <root.comments.show>]`
-    // * exitStates: a list of all of the states from the
-    //   `resolveState` to the `currentState`. In the above
-    //   example, `exitStates` would have
-    //   `[<root.posts>`, `<root.posts.show>]`.
-    while (resolveState && !enterStates) {
-      exitStates.unshift(resolveState);
-
-      resolveState = get(resolveState, 'parentState');
-      if (!resolveState) {
-        enterStates = this.findStatesByPath(this, path);
-        if (!enterStates) {
-          Ember.assert('Could not find state for path: "'+path+'"');
-          return;
-        }
-      }
-      enterStates = this.findStatesByPath(resolveState, path);
-    }
-
-    // If the path contains some states that are parents of both the
-    // current state and the target state, remove them.
-    //
-    // For example, in the following hierarchy:
-    //
-    // |- root
-    // | |- post
-    // | | |- index (* current)
-    // | | |- show
-    //
-    // If the `path` is `root.post.show`, the three variables will
-    // be:
-    //
-    // * resolveState: `<state manager>`
-    // * enterStates: `[<root>, <root.post>, <root.post.show>]`
-    // * exitStates: `[<root>, <root.post>, <root.post.index>]`
-    //
-    // The goal of this code is to remove the common states, so we
-    // have:
-    //
-    // * resolveState: `<root.post>`
-    // * enterStates: `[<root.post.show>]`
-    // * exitStates: `[<root.post.index>]`
-    //
-    // This avoid unnecessary calls to the enter and exit transitions.
-    while (enterStates.length > 0 && enterStates[0] === exitStates[0]) {
-      resolveState = enterStates.shift();
-      exitStates.shift();
-    }
-
-    // Cache the enterStates, exitStates, and resolveState for the
-    // current state and the `path`.
-    var transitions = currentState.pathsCache[path] = {
-      exitStates: exitStates,
-      enterStates: enterStates,
-      resolveState: resolveState
-    };
-
-    return transitions;
-  },
-
-  triggerSetupContext: function(transitions) {
-    var contexts = transitions.contexts,
-        offset = transitions.enterStates.length - contexts.length,
-        enterStates = transitions.enterStates,
-        transitionEvent = get(this, 'transitionEvent');
-
-    Ember.assert("More contexts provided than states", offset >= 0);
-
-    arrayForEach.call(enterStates, function(state, idx) {
-      state.trigger(transitionEvent, this, contexts[idx-offset]);
-    }, this);
-  },
-
-  getState: function(name) {
-    var state = get(this, name),
-        parentState = get(this, 'parentState');
-
-    if (state) {
-      return state;
-    } else if (parentState) {
-      return parentState.getState(name);
-    }
-  },
-
-  enterState: function(transition) {
-    var log = this.enableLogging;
-
-    var exitStates = transition.exitStates.slice(0).reverse();
-    arrayForEach.call(exitStates, function(state) {
-      state.trigger('exit', this);
-    }, this);
-
-    arrayForEach.call(transition.enterStates, function(state) {
-      if (log) { Ember.Logger.log("STATEMANAGER: Entering " + get(state, 'path')); }
-      state.trigger('enter', this);
-    }, this);
-
-    set(this, 'currentState', transition.finalState);
-  }
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Statecharts
-// Copyright: ©2011 Living Social Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-})();
-
-(function() {
-var get = Ember.get;
-
-Ember._ResolvedState = Ember.Object.extend({
-  manager: null,
-  state: null,
-  match: null,
-
-  object: Ember.computed(function(key, value) {
-    if (arguments.length === 2) {
-      this._object = value;
-      return value;
-    } else {
-      if (this._object) {
-        return this._object;
-      } else {
-        var state = get(this, 'state'),
-            match = get(this, 'match'),
-            manager = get(this, 'manager');
-        return state.deserialize(manager, match.hash);
-      }
-    }
-  }).property(),
-
-  hasPromise: Ember.computed(function() {
-    return Ember.canInvoke(get(this, 'object'), 'then');
-  }).property('object'),
-
-  promise: Ember.computed(function() {
-    var object = get(this, 'object');
-    if (Ember.canInvoke(object, 'then')) {
-      return object;
-    } else {
-      return {
-        then: function(success) { success(object); }
-      };
-    }
-  }).property('object'),
-
-  transition: function() {
-    var manager = get(this, 'manager'),
-        path = get(this, 'state.path'),
-        object = get(this, 'object');
-    manager.transitionTo(path, object);
-  }
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get;
-
-// The Ember Routable mixin assumes the existance of a simple
-// routing shim that supports the following three behaviors:
-//
-// * .getURL() - this is called when the page loads
-// * .setURL(newURL) - this is called from within the state
-//   manager when the state changes to a routable state
-// * .onURLChange(callback) - this happens when the user presses
-//   the back or forward button
-
-var paramForClass = function(classObject) {
-  var className = classObject.toString(),
-      parts = className.split("."),
-      last = parts[parts.length - 1];
-
-  return Ember.String.underscore(last) + "_id";
-};
-
-var merge = function(original, hash) {
-  for (var prop in hash) {
-    if (!hash.hasOwnProperty(prop)) { continue; }
-    if (original.hasOwnProperty(prop)) { continue; }
-
-    original[prop] = hash[prop];
-  }
-};
-
-/**
-  @class
-  @extends Ember.Mixin
-*/
-Ember.Routable = Ember.Mixin.create({
-  init: function() {
-    var redirection;
-    this.on('connectOutlets', this, this.stashContext);
-
-    if (redirection = get(this, 'redirectsTo')) {
-      Ember.assert("You cannot use `redirectsTo` if you already have a `connectOutlets` method", this.connectOutlets === Ember.K);
-
-      this.connectOutlets = function(router) {
-        router.transitionTo(redirection);
-      };
-    }
-
-    // normalize empty route to '/'
-    var route = get(this, 'route');
-    if (route === '') {
-      route = '/';
-    }
-
-    this._super();
-
-    Ember.assert("You cannot use `redirectsTo` on a state that has child states", !redirection || (!!redirection && !!get(this, 'isLeaf')));
-  },
-
-  /**
-    @private
-
-    Whenever a routable state is entered, the context it was entered with
-    is stashed so that we can regenerate the state's `absoluteURL` on
-    demand.
-  */
-  stashContext: function(manager, context) {
-    var serialized = this.serialize(manager, context);
-    Ember.assert('serialize must return a hash', !serialized || typeof serialized === 'object');
-
-    manager.setStateMeta(this, 'context', context);
-    manager.setStateMeta(this, 'serialized', serialized);
-
-    if (get(this, 'isRoutable') && !get(manager, 'isRouting')) {
-      this.updateRoute(manager, get(manager, 'location'));
-    }
-  },
-
-  /**
-    @private
-
-    Whenever a routable state is entered, the router's location object
-    is notified to set the URL to the current absolute path.
-
-    In general, this will update the browser's URL.
-  */
-  updateRoute: function(manager, location) {
-    if (get(this, 'isLeafRoute')) {
-      var path = this.absoluteRoute(manager);
-      location.setURL(path);
-    }
-  },
-
-  /**
-    @private
-
-    Get the absolute route for the current state and a given
-    hash.
-
-    This method is private, as it expects a serialized hash,
-    not the original context object.
-  */
-  absoluteRoute: function(manager, hash) {
-    var parentState = get(this, 'parentState');
-    var path = '', generated;
-
-    // If the parent state is routable, use its current path
-    // as this route's prefix.
-    if (get(parentState, 'isRoutable')) {
-      path = parentState.absoluteRoute(manager, hash);
-    }
-
-    var matcher = get(this, 'routeMatcher'),
-        serialized = manager.getStateMeta(this, 'serialized');
-
-    // merge the existing serialized object in with the passed
-    // in hash.
-    hash = hash || {};
-    merge(hash, serialized);
-
-    generated = matcher && matcher.generate(hash);
-
-    if (generated) {
-      path = path + '/' + generated;
-    }
-
-    return path;
-  },
-
-  /**
-    @private
-
-    At the moment, a state is routable if it has a string `route`
-    property. This heuristic may change.
-  */
-  isRoutable: Ember.computed(function() {
-    return typeof get(this, 'route') === 'string';
-  }).cacheable(),
-
-  /**
-    @private
-
-    Determine if this is the last routeable state
-  */
-  isLeafRoute: Ember.computed(function() {
-    if (get(this, 'isLeaf')) { return true; }
-    return !get(this, 'childStates').findProperty('isRoutable');
-  }).cacheable(),
-
-  /**
-    @private
-
-    A _RouteMatcher object generated from the current route's `route`
-    string property.
-  */
-  routeMatcher: Ember.computed(function() {
-    var route = get(this, 'route');
-    if (route) {
-      return Ember._RouteMatcher.create({ route: route });
-    }
-  }).cacheable(),
-
-  /**
-    @private
-
-    Check whether the route has dynamic segments and therefore takes
-    a context.
-  */
-  hasContext: Ember.computed(function() {
-    var routeMatcher = get(this, 'routeMatcher');
-    if (routeMatcher) {
-      return routeMatcher.identifiers.length > 0;
-    }
-  }).cacheable(),
-
-  /**
-    @private
-
-    The model class associated with the current state. This property
-    uses the `modelType` property, in order to allow it to be
-    specified as a String.
-  */
-  modelClass: Ember.computed(function() {
-    var modelType = get(this, 'modelType');
-
-    if (typeof modelType === 'string') {
-      return Ember.get(window, modelType);
-    } else {
-      return modelType;
-    }
-  }).cacheable(),
-
-  /**
-    @private
-
-    Get the model class for the state. The heuristic is:
-
-    * The state must have a single dynamic segment
-    * The dynamic segment must end in `_id`
-    * A dynamic segment like `blog_post_id` is converted into `BlogPost`
-    * The name is then looked up on the passed in namespace
-
-    The process of initializing an application with a router will
-    pass the application's namespace into the router, which will be
-    used here.
-  */
-  modelClassFor: function(namespace) {
-    var modelClass, routeMatcher, identifiers, match, className;
-
-    // if an explicit modelType was specified, use that
-    if (modelClass = get(this, 'modelClass')) { return modelClass; }
-
-    // if the router has no lookup namespace, we won't be able to guess
-    // the modelType
-    if (!namespace) { return; }
-
-    // make sure this state is actually a routable state
-    routeMatcher = get(this, 'routeMatcher');
-    if (!routeMatcher) { return; }
-
-    // only guess modelType for states with a single dynamic segment
-    // (no more, no fewer)
-    identifiers = routeMatcher.identifiers;
-    if (identifiers.length !== 2) { return; }
-
-    // extract the `_id` from the end of the dynamic segment; if the
-    // dynamic segment does not end in `_id`, we can't guess the
-    // modelType
-    match = identifiers[1].match(/^(.*)_id$/);
-    if (!match) { return; }
-
-    // convert the underscored type into a class form and look it up
-    // on the router's namespace
-    className = Ember.String.classify(match[1]);
-    return get(namespace, className);
-  },
-
-  /**
-    The default method that takes a `params` object and converts
-    it into an object.
-
-    By default, a params hash that looks like `{ post_id: 1 }`
-    will be looked up as `namespace.Post.find(1)`. This is
-    designed to work seamlessly with Ember Data, but will work
-    fine with any class that has a `find` method.
-  */
-  deserialize: function(manager, params) {
-    var modelClass, routeMatcher, param;
-
-    if (modelClass = this.modelClassFor(get(manager, 'namespace'))) {
-      Ember.assert("Expected "+modelClass.toString()+" to implement `find` for use in '"+this.get('path')+"' `deserialize`. Please implement the `find` method or overwrite `deserialize`.", modelClass.find);
-      return modelClass.find(params[paramForClass(modelClass)]);
-    }
-
-    return params;
-  },
-
-  /**
-    The default method that takes an object and converts it into
-    a params hash.
-
-    By default, if there is a single dynamic segment named
-    `blog_post_id` and the object is a `BlogPost` with an
-    `id` of `12`, the serialize method will produce:
-
-        { blog_post_id: 12 }
-  */
-  serialize: function(manager, context) {
-    var modelClass, routeMatcher, namespace, param, id;
-
-    if (Ember.empty(context)) { return ''; }
-
-    if (modelClass = this.modelClassFor(get(manager, 'namespace'))) {
-      param = paramForClass(modelClass);
-      id = get(context, 'id');
-      context = {};
-      context[param] = id;
-    }
-
-    return context;
-  },
-
-  /**
-    @private
-  */
-  resolvePath: function(manager, path) {
-    if (get(this, 'isLeafRoute')) { return Ember.A(); }
-
-    var childStates = get(this, 'childStates'), match;
-
-    childStates = Ember.A(childStates.filterProperty('isRoutable'));
-
-    childStates = childStates.sort(function(a, b) {
-      var aDynamicSegments = get(a, 'routeMatcher.identifiers.length'),
-          bDynamicSegments = get(b, 'routeMatcher.identifiers.length'),
-          aRoute = get(a, 'route'),
-          bRoute = get(b, 'route');
-
-      if (aRoute.indexOf(bRoute) === 0) {
-        return -1;
-      } else if (bRoute.indexOf(aRoute) === 0) {
-        return 1;
-      }
-
-      if (aDynamicSegments !== bDynamicSegments) {
-        return aDynamicSegments - bDynamicSegments;
-      }
-
-      return get(b, 'route.length') - get(a, 'route.length');
-    });
-
-    var state = childStates.find(function(state) {
-      var matcher = get(state, 'routeMatcher');
-      if (match = matcher.match(path)) { return true; }
-    });
-
-    Ember.assert("Could not find state for path " + path, !!state);
-
-    var resolvedState = Ember._ResolvedState.create({
-      manager: manager,
-      state: state,
-      match: match
-    });
-
-    var states = state.resolvePath(manager, match.remaining);
-
-    return Ember.A([resolvedState]).pushObjects(states);
-  },
-
-  /**
-    @private
-
-    Once `unroute` has finished unwinding, `routePath` will be called
-    with the remainder of the route.
-
-    For example, if you were in the /posts/1/comments state, and you
-    moved into the /posts/2/comments state, `routePath` will be called
-    on the state whose path is `/posts` with the path `/2/comments`.
-  */
-  routePath: function(manager, path) {
-    if (get(this, 'isLeafRoute')) { return; }
-
-    var resolvedStates = this.resolvePath(manager, path),
-        hasPromises = resolvedStates.some(function(s) { return get(s, 'hasPromise'); });
-
-    function runTransition() {
-      resolvedStates.forEach(function(rs) { rs.transition(); });
-    }
-
-    if (hasPromises) {
-      manager.transitionTo('loading');
-
-      Ember.assert('Loading state should be the child of a route', Ember.Routable.detect(get(manager, 'currentState.parentState')));
-      Ember.assert('Loading state should not be a route', !Ember.Routable.detect(get(manager, 'currentState')));
-
-      manager.handleStatePromises(resolvedStates, runTransition);
-    } else {
-      runTransition();
-    }
-  },
-
-  /**
-    @private
-
-    When you move to a new route by pressing the back
-    or forward button, this method is called first.
-
-    Its job is to move the state manager into a parent
-    state of the state it will eventually move into.
-  */
-  unroutePath: function(router, path) {
-    var parentState = get(this, 'parentState');
-
-    // If we're at the root state, we're done
-    if (parentState === router) {
-      return;
-    }
-
-    path = path.replace(/^(?=[^\/])/, "/");
-    var absolutePath = this.absoluteRoute(router);
-
-    var route = get(this, 'route');
-
-    // If the current path is empty, move up one state,
-    // because the index ('/') state must be a leaf node.
-    if (route !== '/') {
-      // If the current path is a prefix of the path we're trying
-      // to go to, we're done.
-      var index = path.indexOf(absolutePath),
-          next = path.charAt(absolutePath.length);
-
-      if (index === 0 && (next === "/" || next === "")) {
-        return;
-      }
-    }
-
-    // Transition to the parent and call unroute again.
-    router.enterState({
-      exitStates: [this],
-      enterStates: [],
-      finalState: parentState
-    });
-
-    router.send('unroutePath', path);
-  },
-
-  /**
-    The `connectOutlets` event will be triggered once a
-    state has been entered. It will be called with the
-    route's context.
-  */
-  connectOutlets: Ember.K,
-
-  /**
-   The `navigateAway` event will be triggered when the
-   URL changes due to the back/forward button
-  */
-  navigateAway: Ember.K
-});
-
-})();
-
-
-
-(function() {
-/**
-  @class
-  @extends Ember.Routable
-*/
-Ember.Route = Ember.State.extend(Ember.Routable);
-
-})();
-
-
-
-(function() {
-var escapeForRegex = function(text) {
-  return text.replace(/[\-\[\]{}()*+?.,\\\^\$|#\s]/g, "\\$&");
-};
-
-Ember._RouteMatcher = Ember.Object.extend({
-  state: null,
-
-  init: function() {
-    var route = this.route,
-        identifiers = [],
-        count = 1,
-        escaped;
-
-    // Strip off leading slash if present
-    if (route.charAt(0) === '/') {
-      route = this.route = route.substr(1);
-    }
-
-    escaped = escapeForRegex(route);
-
-    var regex = escaped.replace(/:([a-z_]+)(?=$|\/)/gi, function(match, id) {
-      identifiers[count++] = id;
-      return "([^/]+)";
-    });
-
-    this.identifiers = identifiers;
-    this.regex = new RegExp("^/?" + regex);
-  },
-
-  match: function(path) {
-    var match = path.match(this.regex);
-
-    if (match) {
-      var identifiers = this.identifiers,
-          hash = {};
-
-      for (var i=1, l=identifiers.length; i<l; i++) {
-        hash[identifiers[i]] = match[i];
-      }
-
-      return {
-        remaining: path.substr(match[0].length),
-        hash: identifiers.length > 0 ? hash : null
-      };
-    }
-  },
-
-  generate: function(hash) {
-    var identifiers = this.identifiers, route = this.route, id;
-    for (var i=1, l=identifiers.length; i<l; i++) {
-      id = identifiers[i];
-      route = route.replace(new RegExp(":" + id), hash[id]);
-    }
-    return route;
-  }
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set;
-
-var merge = function(original, hash) {
-  for (var prop in hash) {
-    if (!hash.hasOwnProperty(prop)) { continue; }
-    if (original.hasOwnProperty(prop)) { continue; }
-
-    original[prop] = hash[prop];
-  }
-};
-
-/**
-  @class
-
-  `Ember.Router` is the subclass of `Ember.StateManager` responsible for providing URL-based
-  application state detection. The `Ember.Router` instance of an application detects the browser URL
-  at application load time and attempts to match it to a specific application state. Additionally
-  the router will update the URL to reflect an application's state changes over time.
-
-  ## Adding a Router Instance to Your Application
-  An instance of Ember.Router can be associated with an instance of Ember.Application in one of two ways:
-
-  You can provide a subclass of Ember.Router as the `Router` property of your application. An instance
-  of this Router class will be instantiated and route detection will be enabled when the application's
-  `initialize` method is called. The Router instance will be available as the `router` property
-  of the application:
-
-      App = Ember.Application.create({
-        Router: Ember.Router.extend({ ... })
-      });
-
-      App.initialize();
-      App.get('router') // an instance of App.Router
-
-  If you want to define a Router instance elsewhere, you can pass the instance to the application's
-  `initialize` method:
-
-      App = Ember.Application.create();
-      aRouter = Ember.Router.create({ ... });
-
-      App.initialize(aRouter);
-      App.get('router') // aRouter
-
-  ## Adding Routes to a Router
-  The `initialState` property of Ember.Router instances is named `root`. The state stored in this
-  property must be a subclass of Ember.Route. The `root` route acts as the container for the
-  set of routable states but is not routable itself. It should have states that are also subclasses
-  of Ember.Route which each have a `route` property describing the URL pattern you would like to detect.
-
-      App = Ember.Application.create({
-        Router: Ember.Router.extend({
-          root: Ember.Route.extend({
-            index: Ember.Route.extend({
-              route: '/'
-            }),
-            ... additional Ember.Routes ...
-          })
-        })
-      });
-      App.initialize();
-
-
-  When an application loads, Ember will parse the URL and attempt to find an Ember.Route within
-  the application's states that matches. (The example URL-matching below will use the default
-  'hash syntax' provided by `Ember.HashLocation`.)
-
-  In the following route structure:
-
-      App = Ember.Application.create({
-        Router: Ember.Router.extend({
-          root: Ember.Route.extend({
-            aRoute: Ember.Route.extend({
-              route: '/'
-            }),
-            bRoute: Ember.Route.extend({
-              route: '/alphabeta'
-            })
-          })
-        })
-      });
-      App.initialize();
-
-  Loading the page at the URL '#/' will detect the route property of 'root.aRoute' ('/') and
-  transition the router first to the state named 'root' and then to the substate 'aRoute'.
-
-  Respectively, loading the page at the URL '#/alphabeta' would detect the route property of
-  'root.bRoute' ('/alphabeta') and transition the router first to the state named 'root' and
-  then to the substate 'bRoute'.
-  
-  ## Adding Nested Routes to a Router
-  Routes can contain nested subroutes each with their own `route` property describing the nested
-  portion of the URL they would like to detect and handle. Router, like all instances of StateManager,
-  cannot call `transitonTo` with an intermediary state. To avoid transitioning the Router into an
-  intermediary state when detecting URLs, a Route with nested routes must define both a base `route`
-  property for itself and a child Route with a `route` property of `'/'` which will be transitioned
-  to when the base route is detected in the URL:
-  
-  Given the following application code:
-
-      App = Ember.Application.create({
-        Router: Ember.Router.extend({
-          root: Ember.Route.extend({
-            aRoute: Ember.Route.extend({
-              route: '/theBaseRouteForThisSet', 
-              
-              indexSubRoute: Ember.Route.extend({
-                route: '/',
-              }),
-              
-              subRouteOne: Ember.Route.extend({
-                route: '/subroute1
-              }),
-              
-              subRouteTwo: Ember.Route.extend({
-                route: '/subRoute2'
-              })
-              
-            })
-          })
-        })
-      });
-      App.initialize();
-
-  When the application is loaded at '/theBaseRouteForThisSet' the Router will transition to the route
-  at path 'root.aRoute' and then transition to state 'indexSubRoute'.
-  
-  When the application is loaded at '/theBaseRouteForThisSet/subRoute1' the Router will transition to
-  the route at path 'root.aRoute' and then transition to state 'subRouteOne'.
-  
-  ## Route Transition Events
-  Transitioning between Ember.Route instances (including the transition into the detected
-  route when loading the application)  triggers the same transition events as state transitions for
-  base `Ember.State`s. However, the default `setup` transition event is named `connectOutlets` on
-  Ember.Router instances (see 'Changing View Hierarchy in Response To State Change').
-
-  The following route structure when loaded with the URL "#/"
-
-      App = Ember.Application.create({
-        Router: Ember.Router.extend({
-          root: Ember.Route.extend({
-            aRoute: Ember.Route.extend({
-              route: '/',
-              enter: function(router) {
-                console.log("entering root.aRoute from", router.get('currentState.name'));
-              },
-              connectOutlets: function(router) {
-                console.log("entered root.aRoute, fully transitioned to", router.get('currentState.path'));
-              }
-            })
-          })
-        })
-      });
-      App.initialize();
-
-  Will result in console output of:
-
-      'entering root.aRoute from root'
-      'entered root.aRoute, fully transitioned to root.aRoute '
-
-  Ember.Route has two additional callbacks for handling URL serialization and deserialization. See
-  'Serializing/Deserializing URLs'
-
-  ## Routes With Dynamic Segments
-  An Ember.Route's `route` property can reference dynamic sections of the URL by prefacing a URL segment
-  with the ':' character.  The values of these dynamic segments will be passed as a hash to the
-  `deserialize` method of the matching Route (see 'Serializing/Deserializing URLs').
-
-  ## Serializing/Deserializing URLs
-  Ember.Route has two callbacks for associating a particular object context with a URL: `serialize`
-  for converting an object into a parameters hash to fill dynamic segments of a URL and `deserialize`
-  for converting a hash of dynamic segments from the URL into the appropriate object.
-
-  ### Deserializing A URL's Dynamic Segments
-  When an application is first loaded or the URL is changed manually (e.g. through the browser's
-  back button) the `deserialize` method of the URL's matching Ember.Route will be called with
-  the application's router as its first argument and a hash of the URLs dynamic segments and values
-  as its second argument.
-
-  The following route structure when loaded with the URL "#/fixed/thefirstvalue/anotherFixed/thesecondvalue":
-
-      App = Ember.Application.create({
-        Router: Ember.Router.extend({
-          root: Ember.Route.extend({
-            aRoute: Ember.Route.extend({
-              route: '/fixed/:dynamicSectionA/anotherFixed/:dynamicSectionB',
-              deserialize: function(router, params) {}
-            })
-          })
-        })
-      });
-      App.initialize();
-
-  Will call the 'deserialize' method of the Route instance at the path 'root.aRoute' with the
-  following hash as its second argument:
-
-      {
-        dynamicSectionA: 'thefirstvalue',
-        dynamicSectionB: 'thesecondvalue'
-      }
-
-  Within `deserialize` you should use this information to retrieve or create an appropriate context
-  object for the given URL (e.g. by loading from a remote API or accessing the browser's
-  `localStorage`). This object must be the `return` value of `deserialize` and will be
-  passed to the Route's `connectOutlets` and `serialize` methods.
-
-  When an application's state is changed from within the application itself, the context provided for
-  the transition will be passed and `deserialize` is not called (see 'Transitions Between States').
-
-  ### Serializing An Object For URLs with Dynamic Segments
-  When transitioning into a Route whose `route` property contains dynamic segments the Route's
-  `serialize` method is called with the Route's router as the first argument and the Route's
-  context as the second argument.  The return value of `serialize` will be use to populate the
-  dynamic segments and should be a object with keys that match the names of the dynamic sections.
-
-  Given the following route structure:
-
-      App = Ember.Application.create({
-        Router: Ember.Router.extend({
-          root: Ember.Route.extend({
-            aRoute: Ember.Route.extend({
-              route: '/'
-            }),
-            bRoute: Ember.Route.extend({
-              route: '/staticSection/:someDynamicSegment',
-              serialize: function(router, context) {
-                return {
-                  someDynamicSegment: context.get('name')
-                }
-              }
-            })
-          })
-        })
-      });
-      App.initialize();
-
-
-  Transitioning to "root.bRoute" with a context of `Object.create({name: 'Yehuda'})` will call
-  the Route's `serialize` method with the context as its second argument and update the URL to
-  '#/staticSection/Yehuda'.
-
-  ## Transitions Between States
-  Once a routed application has initialized its state based on the entry URL, subsequent transitions to other
-  states will update the URL if the entered Route has a `route` property. Given the following route structure
-  loaded at the URL '#/':
-
-      App = Ember.Application.create({
-        Router: Ember.Router.extend({
-          root: Ember.Route.extend({
-            aRoute: Ember.Route.extend({
-              route: '/',
-              moveElsewhere: Ember.Route.transitionTo('bRoute')
-            }),
-            bRoute: Ember.Route.extend({
-              route: '/someOtherLocation'
-            })
-          })
-        })
-      });
-      App.initialize();
-
-  And application code:
-
-      App.get('router').send('moveElsewhere');
-
-  Will transition the application's state to 'root.bRoute' and trigger an update of the URL to
-  '#/someOtherLocation'.
-
-  For URL patterns with dynamic segments a context can be supplied as the second argument to `send`.
-  The router will match dynamic segments names to keys on this object and fill in the URL with the
-  supplied values. Given the following state structure loaded at the URL '#/':
-
-      App = Ember.Application.create({
-        Router: Ember.Router.extend({
-          root: Ember.Route.extend({
-            aRoute: Ember.Route.extend({
-              route: '/',
-              moveElsewhere: Ember.Route.transitionTo('bRoute')
-            }),
-            bRoute: Ember.Route.extend({
-              route: '/a/route/:dynamicSection/:anotherDynamicSection',
-              connectOutlets: function(router, context) {},
-            })
-          })
-        })
-      });
-      App.initialize();
-
-  And application code:
-
-      App.get('router').send('moveElsewhere', {
-        dynamicSection: '42',
-        anotherDynamicSection: 'Life'
-      });
-
-  Will transition the application's state to 'root.bRoute' and trigger an update of the URL to
-  '#/a/route/42/Life'.
-
-  The context argument will also be passed as the second argument to the `serialize` method call.
-
-  ## Injection of Controller Singletons
-  During application initialization Ember will detect properties of the application ending in 'Controller',
-  create singleton instances of each class, and assign them as a properties on the router.  The property name
-  will be the UpperCamel name converted to lowerCamel format. These controller classes should be subclasses
-  of Ember.ObjectController, Ember.ArrayController, Ember.Controller, or a custom Ember.Object that includes the
-  Ember.ControllerMixin mixin.
-
-      App = Ember.Application.create({
-        FooController: Ember.Object.create(Ember.ControllerMixin),
-        Router: Ember.Router.extend({ ... })
-      });
-
-      App.get('router.fooController'); // instance of App.FooController
-
-  The controller singletons will have their `namespace` property set to the application and their `target`
-  property set to the application's router singleton for easy integration with Ember's user event system.
-  See 'Changing View Hierarchy in Response To State Change' and 'Responding to User-initiated Events'
-
-  ## Responding to User-initiated Events
-  Controller instances injected into the router at application initialization have their `target` property
-  set to the application's router instance. These controllers will also be the default `context` for their
-  associated views.  Uses of the `{{action}}` helper will automatically target the application's router.
-
-  Given the following application entered at the URL '#/':
-
-      App = Ember.Application.create({
-        Router: Ember.Router.extend({
-          root: Ember.Route.extend({
-            aRoute: Ember.Route.extend({
-              route: '/',
-              anActionOnTheRouter: function(router, context) {
-                router.transitionTo('anotherState', context);
-              }
-            })
-            anotherState: Ember.Route.extend({
-              route: '/differentUrl',
-              connectOutlets: function(router, context) {
-
-              }
-            })
-          })
-        })
-      });
-      App.initialize();
-
-  The following template:
-
-      <script type="text/x-handlebars" data-template-name="aView">
-          <h1><a {{action anActionOnTheRouter}}>{{title}}</a></h1>
-      </script>
-
-  Will delegate `click` events on the rendered `h1` to the application's router instance. In this case the
-  `anActionOnTheRouter` method of the state at 'root.aRoute' will be called with the view's controller
-  as the context argument. This context will be passed to the `connectOutlets` as its second argument.
-
-  Different `context` can be supplied from within the `{{action}}` helper, allowing specific context passing
-  between application states:
-
-      <script type="text/x-handlebars" data-template-name="photos">
-        {{#each photo in controller}}
-          <h1><a {{action showPhoto photo}}>{{title}}</a></h1>
-        {{/each}}
-      </script>
-
-  See Handlebars.helpers.action for additional usage examples.
-
-
-  ## Changing View Hierarchy in Response To State Change
-  Changes in application state that change the URL should be accompanied by associated changes in view
-  hierarchy.  This can be accomplished by calling 'connectOutlet' on the injected controller singletons from
-  within the 'connectOutlets' event of an Ember.Route:
-
-      App = Ember.Application.create({
-        OneController: Ember.ObjectController.extend(),
-        OneView: Ember.View.extend(),
-
-        AnotherController: Ember.ObjectController.extend(),
-        AnotherView: Ember.View.extend(),
-
-        Router: Ember.Router.extend({
-          root: Ember.Route.extend({
-            aRoute: Ember.Route.extend({
-              route: '/',
-              connectOutlets: function(router, context) {
-                router.get('oneController').connectOutlet('another');
-              },
-            })
-          })
-        })
-      });
-      App.initialize();
-
-
-  This will detect the '{{outlet}}' portion of `oneController`'s view (an instance of `App.OneView`) and
-  fill it with a rendered instance of `App.AnotherView` whose `context` will be the single instance of
-  `App.AnotherController` stored on the router in the `anotherController` property.
-
-  For more information about Outlets, see `Ember.Handlebars.helpers.outlet`. For additional information on
-  the `connectOutlet` method, see `Ember.Controller.connectOutlet`. For more information on
-  controller injections, see `Ember.Application#initialize()`. For additional information about view context,
-  see `Ember.View`.
-
-  @extends Ember.StateManager
-*/
-Ember.Router = Ember.StateManager.extend(
-/** @scope Ember.Router.prototype */ {
-
-  /**
-    @property {String}
-    @default 'root'
-  */
-  initialState: 'root',
-
-  /**
-    The `Ember.Location` implementation to be used to manage the application
-    URL state. The following values are supported:
-
-    * 'hash': Uses URL fragment identifiers (like #/blog/1) for routing.
-    * 'none': Does not read or set the browser URL, but still allows for
-      routing to happen. Useful for testing.
-
-    @type String
-    @default 'hash'
-  */
-  location: 'hash',
-
-  /**
-    This is only used when a history location is used so that applications that
-    don't live at the root of the domain can append paths to their root.
-
-    @type String
-    @default '/'
-  */
-
-  rootURL: '/',
-
-  /**
-    On router, transitionEvent should be called connectOutlets
-
-    @property {String}
-    @default 'connectOutlets'
-  */
-  transitionEvent: 'connectOutlets',
-
-  transitionTo: function() {
-    this.abortRoutingPromises();
-    this._super.apply(this, arguments);
-  },
-
-  route: function(path) {
-    this.abortRoutingPromises();
-
-    set(this, 'isRouting', true);
-
-    var routableState;
-
-    try {
-      path = path.replace(/^(?=[^\/])/, "/");
-
-      this.send('navigateAway');
-      this.send('unroutePath', path);
-
-      routableState = get(this, 'currentState');
-      while (routableState && !routableState.get('isRoutable')) {
-        routableState = get(routableState, 'parentState');
-      }
-      var currentURL = routableState ? routableState.absoluteRoute(this) : '';
-      var rest = path.substr(currentURL.length);
-
-      this.send('routePath', rest);
-    } finally {
-      set(this, 'isRouting', false);
-    }
-
-    routableState = get(this, 'currentState');
-    while (routableState && !routableState.get('isRoutable')) {
-      routableState = get(routableState, 'parentState');
-    }
-
-    if (routableState) {
-      routableState.updateRoute(this, get(this, 'location'));
-    }
-  },
-
-  urlFor: function(path, hash) {
-    var currentState = get(this, 'currentState') || this,
-        state = this.findStateByPath(currentState, path);
-
-    Ember.assert(Ember.String.fmt("Could not find route with path '%@'", [path]), !!state);
-    Ember.assert("To get a URL for a state, it must have a `route` property.", !!get(state, 'routeMatcher'));
-
-    var location = get(this, 'location'),
-        absoluteRoute = state.absoluteRoute(this, hash);
-
-    return location.formatURL(absoluteRoute);
-  },
-
-  urlForEvent: function(eventName) {
-    var contexts = Array.prototype.slice.call(arguments, 1);
-    var currentState = get(this, 'currentState');
-    var targetStateName = currentState.lookupEventTransition(eventName);
-
-    Ember.assert(Ember.String.fmt("You must specify a target state for event '%@' in order to link to it in the current state '%@'.", [eventName, get(currentState, 'path')]), !!targetStateName);
-
-    var targetState = this.findStateByPath(currentState, targetStateName);
-
-    Ember.assert("Your target state name " + targetStateName + " for event " + eventName + " did not resolve to a state", !!targetState);
-
-    var hash = this.serializeRecursively(targetState, contexts, {});
-
-    return this.urlFor(targetStateName, hash);
-  },
-
-  /** @private */
-  serializeRecursively: function(state, contexts, hash) {
-    var parentState,
-        context = get(state, 'hasContext') ? contexts.pop() : null;
-    merge(hash, state.serialize(this, context));
-    parentState = state.get("parentState");
-    if (parentState && parentState instanceof Ember.Route) {
-      return this.serializeRecursively(parentState, contexts, hash);
-    } else {
-      return hash;
-    }
-  },
-
-  abortRoutingPromises: function() {
-    if (this._routingPromises) {
-      this._routingPromises.abort();
-      this._routingPromises = null;
-    }
-  },
-
-  /**
-    @private
-  */
-  handleStatePromises: function(states, complete) {
-    this.abortRoutingPromises();
-
-    this.set('isLocked', true);
-
-    var manager = this;
-
-    this._routingPromises = Ember._PromiseChain.create({
-      promises: states.slice(),
-
-      successCallback: function() {
-        manager.set('isLocked', false);
-        complete();
-      },
-
-      failureCallback: function() {
-        throw "Unable to load object";
-      },
-
-      promiseSuccessCallback: function(item, args) {
-        set(item, 'object', args[0]);
-      },
-
-      abortCallback: function() {
-        manager.set('isLocked', false);
-      }
-    }).start();
-  },
-
-  /** @private */
-  init: function() {
-    this._super();
-
-    var location = get(this, 'location'),
-        rootURL = get(this, 'rootURL');
-
-    if ('string' === typeof location) {
-      set(this, 'location', Ember.Location.create({
-        implementation: location,
-        rootURL: rootURL
-      }));
-    }
-  },
-
-  /** @private */
-  willDestroy: function() {
-    get(this, 'location').destroy();
-  }
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Routing
-// Copyright: ©2012 Tilde Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-})();
-
-(function() {
-var get = Ember.get;
-
-Ember.StateManager.reopen(
-/** @scope Ember.StateManager.prototype */ {
-
-  /**
-    If the current state is a view state or the descendent of a view state,
-    this property will be the view associated with it. If there is no
-    view state active in this state manager, this value will be null.
-
-    @type Ember.View
-  */
-  currentView: Ember.computed(function() {
-    var currentState = get(this, 'currentState'),
-        view;
-
-    while (currentState) {
-      // TODO: Remove this when view state is removed
-      if (get(currentState, 'isViewState')) {
-        view = get(currentState, 'view');
-        if (view) { return view; }
-      }
-
-      currentState = get(currentState, 'parentState');
-    }
-
-    return null;
-  }).property('currentState').cacheable()
-
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set;
-/**
-  @class
-  @deprecated
-
-  Ember.ViewState extends Ember.State to control the presence of a childView within a
-  container based on the current state of the ViewState's StateManager.
-
-  ## Interactions with Ember's View System.
-  When combined with instances of `Ember.StateManager`, ViewState is designed to
-  interact with Ember's view system to control which views are added to
-  and removed from the DOM based on the manager's current state.
-
-  By default, a StateManager will manage views inside the 'body' element. This can be
-  customized by setting the `rootElement` property to a CSS selector of an existing
-  HTML element you would prefer to receive view rendering.
-
-
-      viewStates = Ember.StateManager.create({
-        rootElement: '#some-other-element'
-      })
-
-  You can also specify a particular instance of `Ember.ContainerView` you would like to receive
-  view rendering by setting the `rootView` property. You will be responsible for placing
-  this element into the DOM yourself.
-
-      aLayoutView = Ember.ContainerView.create()
-
-      // make sure this view instance is added to the browser
-      aLayoutView.appendTo('body')
-
-      App.viewStates = Ember.StateManager.create({
-        rootView: aLayoutView
-      })
-
-
-  Once you have an instance of StateManager controlling a view, you can provide states
-  that are instances of `Ember.ViewState`.  When the StateManager enters a state
-  that is an instance of `Ember.ViewState` that `ViewState`'s `view` property will be
-  instantiated and inserted into the StateManager's `rootView` or `rootElement`.
-  When a state is exited, the `ViewState`'s view will be removed from the StateManager's
-  view.
-
-      ContactListView = Ember.View.extend({
-        classNames: ['my-contacts-css-class'],
-        template: Ember.Handlebars.compile('<h2>People</h2>')
-      })
-
-      PhotoListView = Ember.View.extend({
-        classNames: ['my-photos-css-class'],
-        template: Ember.Handlebars.compile('<h2>Photos</h2>')
-      })
-
-      viewStates = Ember.StateManager.create({
-        showingPeople: Ember.ViewState.create({
-          view: ContactListView
-        }),
-        showingPhotos: Ember.ViewState.create({
-          view: PhotoListView
-        })
-      })
-
-      viewStates.transitionTo('showingPeople')
-
-  The above code will change the rendered HTML from
-
-      <body></body>
-
-  to
-
-      <body>
-        <div id="ember1" class="ember-view my-contacts-css-class">
-          <h2>People</h2>
-        </div>
-      </body>
-
-  Changing the current state via `transitionTo` from `showingPeople` to
-  `showingPhotos` will remove the `showingPeople` view and add the `showingPhotos` view:
-
-      viewStates.transitionTo('showingPhotos')
-
-  will change the rendered HTML to
-
-      <body>
-        <div id="ember2" class="ember-view my-photos-css-class">
-          <h2>Photos</h2>
-        </div>
-      </body>
-
-
-  When entering nested `ViewState`s, each state's view will be draw into the the StateManager's
-  `rootView` or `rootElement` as siblings.
-
-
-      ContactListView = Ember.View.extend({
-        classNames: ['my-contacts-css-class'],
-        template: Ember.Handlebars.compile('<h2>People</h2>')
-      })
-
-      EditAContactView = Ember.View.extend({
-        classNames: ['editing-a-contact-css-class'],
-        template: Ember.Handlebars.compile('Editing...')
-      })
-
-      viewStates = Ember.StateManager.create({
-        showingPeople: Ember.ViewState.create({
-          view: ContactListView,
-
-          withEditingPanel: Ember.ViewState.create({
-            view: EditAContactView
-          })
-        })
-      })
-
-
-      viewStates.transitionTo('showingPeople.withEditingPanel')
-
-
-  Will result in the following rendered HTML:
-
-      <body>
-        <div id="ember2" class="ember-view my-contacts-css-class">
-          <h2>People</h2>
-        </div>
-
-        <div id="ember2" class="ember-view editing-a-contact-css-class">
-          Editing...
-        </div>
-      </body>
-
-
-  ViewState views are added and removed from their StateManager's view via their
-  `enter` and `exit` methods. If you need to override these methods, be sure to call
-  `_super` to maintain the adding and removing behavior:
-
-      viewStates = Ember.StateManager.create({
-        aState: Ember.ViewState.create({
-          view: Ember.View.extend({}),
-          enter: function(manager){
-            // calling _super ensures this view will be
-            // properly inserted
-            this._super(manager);
-
-            // now you can do other things
-          }
-        })
-      })
-
-  ## Managing Multiple Sections of A Page With States
-  Multiple StateManagers can be combined to control multiple areas of an application's rendered views.
-  Given the following HTML body:
-
-      <body>
-        <div id='sidebar-nav'>
-        </div>
-        <div id='content-area'>
-        </div>
-      </body>
-
-  You could separately manage view state for each section with two StateManagers
-
-      navigationStates = Ember.StateManager.create({
-        rootElement: '#sidebar-nav',
-        userAuthenticated: Em.ViewState.create({
-          view: Ember.View.extend({})
-        }),
-        userNotAuthenticated: Em.ViewState.create({
-          view: Ember.View.extend({})
-        })
-      })
-
-      contentStates = Ember.StateManager.create({
-        rootElement: '#content-area',
-        books: Em.ViewState.create({
-          view: Ember.View.extend({})
-        }),
-        music: Em.ViewState.create({
-          view: Ember.View.extend({})
-        })
-      })
-
-
-  If you prefer to start with an empty body and manage state programmatically you
-  can also take advantage of StateManager's `rootView` property and the ability of
-  `Ember.ContainerView`s to manually manage their child views.
-
-
-      dashboard = Ember.ContainerView.create({
-        childViews: ['navigationAreaView', 'contentAreaView'],
-        navigationAreaView: Ember.ContainerView.create({}),
-        contentAreaView: Ember.ContainerView.create({})
-      })
-
-      navigationStates = Ember.StateManager.create({
-        rootView: dashboard.get('navigationAreaView'),
-        userAuthenticated: Em.ViewState.create({
-          view: Ember.View.extend({})
-        }),
-        userNotAuthenticated: Em.ViewState.create({
-          view: Ember.View.extend({})
-        })
-      })
-
-      contentStates = Ember.StateManager.create({
-        rootView: dashboard.get('contentAreaView'),
-        books: Em.ViewState.create({
-          view: Ember.View.extend({})
-        }),
-        music: Em.ViewState.create({
-          view: Ember.View.extend({})
-        })
-      })
-
-      dashboard.appendTo('body')
-
-  ## User Manipulation of State via `{{action}}` Helpers
-  The Handlebars `{{action}}` helper is StateManager-aware and will use StateManager action sending
-  to connect user interaction to action-based state transitions.
-
-  Given the following body and handlebars template
-
-      <body>
-        <script type='text/x-handlebars'>
-          <a href="#" {{action "anAction" target="App.appStates"}}> Go </a>
-        </script>
-      </body>
-
-  And application code
-
-      App = Ember.Application.create()
-      App.appStates = Ember.StateManager.create({
-        initialState: 'aState',
-        aState: Ember.State.create({
-          anAction: function(manager, context){}
-        }),
-        bState: Ember.State.create({})
-      })
-
-  A user initiated click or touch event on "Go" will trigger the 'anAction' method of
-  `App.appStates.aState` with `App.appStates` as the first argument and a
-  `jQuery.Event` object as the second object. The `jQuery.Event` will include a property
-  `view` that references the `Ember.View` object that was interacted with.
-
-**/
-Ember.ViewState = Ember.State.extend(
-/** @scope Ember.ViewState.prototype */ {
-  isViewState: true,
-
-  init: function() {
-    Ember.deprecate("Ember.ViewState is deprecated and will be removed from future releases. Consider using the outlet pattern to display nested views instead. For more information, see http://emberjs.com/guides/outlets/.");
-    return this._super();
-  },
-
-  enter: function(stateManager) {
-    var view = get(this, 'view'), root, childViews;
-
-    if (view) {
-      if (Ember.View.detect(view)) {
-        view = view.create();
-        set(this, 'view', view);
-      }
-
-      Ember.assert('view must be an Ember.View', view instanceof Ember.View);
-
-      root = stateManager.get('rootView');
-
-      if (root) {
-        childViews = get(root, 'childViews');
-        childViews.pushObject(view);
-      } else {
-        root = stateManager.get('rootElement') || 'body';
-        view.appendTo(root);
-      }
-    }
-  },
-
-  exit: function(stateManager) {
-    var view = get(this, 'view');
-
-    if (view) {
-      // If the view has a parent view, then it is
-      // part of a view hierarchy and should be removed
-      // from its parent.
-      if (get(view, 'parentView')) {
-        view.removeFromParent();
-      } else {
-
-        // Otherwise, the view is a "root view" and
-        // was appended directly to the DOM.
-        view.remove();
-      }
-    }
-  }
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:  Ember Statecharts
-// Copyright: ©2011 Living Social Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-})();
-
-(function() {
-// ==========================================================================
-// Project:   metamorph
-// Copyright: ©2011 My Company Inc. All rights reserved.
-// ==========================================================================
-
-(function(window) {
-
-  var K = function(){},
-      guid = 0,
-      document = window.document,
-
-      // Feature-detect the W3C range API, the extended check is for IE9 which only partially supports ranges
-      supportsRange = ('createRange' in document) && (typeof Range !== 'undefined') && Range.prototype.createContextualFragment,
-
-      // Internet Explorer prior to 9 does not allow setting innerHTML if the first element
-      // is a "zero-scope" element. This problem can be worked around by making
-      // the first node an invisible text node. We, like Modernizr, use &shy;
-      needsShy = (function(){
-        var testEl = document.createElement('div');
-        testEl.innerHTML = "<div></div>";
-        testEl.firstChild.innerHTML = "<script></script>";
-        return testEl.firstChild.innerHTML === '';
-      })();
-
-  // Constructor that supports either Metamorph('foo') or new
-  // Metamorph('foo');
-  //
-  // Takes a string of HTML as the argument.
-
-  var Metamorph = function(html) {
-    var self;
-
-    if (this instanceof Metamorph) {
-      self = this;
-    } else {
-      self = new K();
-    }
-
-    self.innerHTML = html;
-    var myGuid = 'metamorph-'+(guid++);
-    self.start = myGuid + '-start';
-    self.end = myGuid + '-end';
-
-    return self;
-  };
-
-  K.prototype = Metamorph.prototype;
-
-  var rangeFor, htmlFunc, removeFunc, outerHTMLFunc, appendToFunc, afterFunc, prependFunc, startTagFunc, endTagFunc;
-
-  outerHTMLFunc = function() {
-    return this.startTag() + this.innerHTML + this.endTag();
-  };
-
-  startTagFunc = function() {
-    return "<script id='" + this.start + "' type='text/x-placeholder'></script>";
-  };
-
-  endTagFunc = function() {
-    return "<script id='" + this.end + "' type='text/x-placeholder'></script>";
-  };
-
-  // If we have the W3C range API, this process is relatively straight forward.
-  if (supportsRange) {
-
-    // Get a range for the current morph. Optionally include the starting and
-    // ending placeholders.
-    rangeFor = function(morph, outerToo) {
-      var range = document.createRange();
-      var before = document.getElementById(morph.start);
-      var after = document.getElementById(morph.end);
-
-      if (outerToo) {
-        range.setStartBefore(before);
-        range.setEndAfter(after);
-      } else {
-        range.setStartAfter(before);
-        range.setEndBefore(after);
-      }
-
-      return range;
-    };
-
-    htmlFunc = function(html, outerToo) {
-      // get a range for the current metamorph object
-      var range = rangeFor(this, outerToo);
-
-      // delete the contents of the range, which will be the
-      // nodes between the starting and ending placeholder.
-      range.deleteContents();
-
-      // create a new document fragment for the HTML
-      var fragment = range.createContextualFragment(html);
-
-      // insert the fragment into the range
-      range.insertNode(fragment);
-    };
-
-    removeFunc = function() {
-      // get a range for the current metamorph object including
-      // the starting and ending placeholders.
-      var range = rangeFor(this, true);
-
-      // delete the entire range.
-      range.deleteContents();
-    };
-
-    appendToFunc = function(node) {
-      var range = document.createRange();
-      range.setStart(node);
-      range.collapse(false);
-      var frag = range.createContextualFragment(this.outerHTML());
-      node.appendChild(frag);
-    };
-
-    afterFunc = function(html) {
-      var range = document.createRange();
-      var after = document.getElementById(this.end);
-
-      range.setStartAfter(after);
-      range.setEndAfter(after);
-
-      var fragment = range.createContextualFragment(html);
-      range.insertNode(fragment);
-    };
-
-    prependFunc = function(html) {
-      var range = document.createRange();
-      var start = document.getElementById(this.start);
-
-      range.setStartAfter(start);
-      range.setEndAfter(start);
-
-      var fragment = range.createContextualFragment(html);
-      range.insertNode(fragment);
-    };
-
-  } else {
-    /**
-     * This code is mostly taken from jQuery, with one exception. In jQuery's case, we
-     * have some HTML and we need to figure out how to convert it into some nodes.
-     *
-     * In this case, jQuery needs to scan the HTML looking for an opening tag and use
-     * that as the key for the wrap map. In our case, we know the parent node, and
-     * can use its type as the key for the wrap map.
-     **/
-    var wrapMap = {
-      select: [ 1, "<select multiple='multiple'>", "</select>" ],
-      fieldset: [ 1, "<fieldset>", "</fieldset>" ],
-      table: [ 1, "<table>", "</table>" ],
-      tbody: [ 2, "<table><tbody>", "</tbody></table>" ],
-      tr: [ 3, "<table><tbody><tr>", "</tr></tbody></table>" ],
-      colgroup: [ 2, "<table><tbody></tbody><colgroup>", "</colgroup></table>" ],
-      map: [ 1, "<map>", "</map>" ],
-      _default: [ 0, "", "" ]
-    };
-
-    /**
-     * Given a parent node and some HTML, generate a set of nodes. Return the first
-     * node, which will allow us to traverse the rest using nextSibling.
-     *
-     * We need to do this because innerHTML in IE does not really parse the nodes.
-     **/
-    var firstNodeFor = function(parentNode, html) {
-      var arr = wrapMap[parentNode.tagName.toLowerCase()] || wrapMap._default;
-      var depth = arr[0], start = arr[1], end = arr[2];
-
-      if (needsShy) { html = '&shy;'+html; }
-
-      var element = document.createElement('div');
-      element.innerHTML = start + html + end;
-
-      for (var i=0; i<=depth; i++) {
-        element = element.firstChild;
-      }
-
-      // Look for &shy; to remove it.
-      if (needsShy) {
-        var shyElement = element;
-
-        // Sometimes we get nameless elements with the shy inside
-        while (shyElement.nodeType === 1 && !shyElement.nodeName) {
-          shyElement = shyElement.firstChild;
-        }
-
-        // At this point it's the actual unicode character.
-        if (shyElement.nodeType === 3 && shyElement.nodeValue.charAt(0) === "\u00AD") {
-          shyElement.nodeValue = shyElement.nodeValue.slice(1);
-        }
-      }
-
-      return element;
-    };
-
-    /**
-     * In some cases, Internet Explorer can create an anonymous node in
-     * the hierarchy with no tagName. You can create this scenario via:
-     *
-     *     div = document.createElement("div");
-     *     div.innerHTML = "<table>&shy<script></script><tr><td>hi</td></tr></table>";
-     *     div.firstChild.firstChild.tagName //=> ""
-     *
-     * If our script markers are inside such a node, we need to find that
-     * node and use *it* as the marker.
-     **/
-    var realNode = function(start) {
-      while (start.parentNode.tagName === "") {
-        start = start.parentNode;
-      }
-
-      return start;
-    };
-
-    /**
-     * When automatically adding a tbody, Internet Explorer inserts the
-     * tbody immediately before the first <tr>. Other browsers create it
-     * before the first node, no matter what.
-     *
-     * This means the the following code:
-     *
-     *     div = document.createElement("div");
-     *     div.innerHTML = "<table><script id='first'></script><tr><td>hi</td></tr><script id='last'></script></table>
-     *
-     * Generates the following DOM in IE:
-     *
-     *     + div
-     *       + table
-     *         - script id='first'
-     *         + tbody
-     *           + tr
-     *             + td
-     *               - "hi"
-     *           - script id='last'
-     *
-     * Which means that the two script tags, even though they were
-     * inserted at the same point in the hierarchy in the original
-     * HTML, now have different parents.
-     *
-     * This code reparents the first script tag by making it the tbody's
-     * first child.
-     **/
-    var fixParentage = function(start, end) {
-      if (start.parentNode !== end.parentNode) {
-        end.parentNode.insertBefore(start, end.parentNode.firstChild);
-      }
-    };
-
-    htmlFunc = function(html, outerToo) {
-      // get the real starting node. see realNode for details.
-      var start = realNode(document.getElementById(this.start));
-      var end = document.getElementById(this.end);
-      var parentNode = end.parentNode;
-      var node, nextSibling, last;
-
-      // make sure that the start and end nodes share the same
-      // parent. If not, fix it.
-      fixParentage(start, end);
-
-      // remove all of the nodes after the starting placeholder and
-      // before the ending placeholder.
-      node = start.nextSibling;
-      while (node) {
-        nextSibling = node.nextSibling;
-        last = node === end;
-
-        // if this is the last node, and we want to remove it as well,
-        // set the `end` node to the next sibling. This is because
-        // for the rest of the function, we insert the new nodes
-        // before the end (note that insertBefore(node, null) is
-        // the same as appendChild(node)).
-        //
-        // if we do not want to remove it, just break.
-        if (last) {
-          if (outerToo) { end = node.nextSibling; } else { break; }
-        }
-
-        node.parentNode.removeChild(node);
-
-        // if this is the last node and we didn't break before
-        // (because we wanted to remove the outer nodes), break
-        // now.
-        if (last) { break; }
-
-        node = nextSibling;
-      }
-
-      // get the first node for the HTML string, even in cases like
-      // tables and lists where a simple innerHTML on a div would
-      // swallow some of the content.
-      node = firstNodeFor(start.parentNode, html);
-
-      // copy the nodes for the HTML between the starting and ending
-      // placeholder.
-      while (node) {
-        nextSibling = node.nextSibling;
-        parentNode.insertBefore(node, end);
-        node = nextSibling;
-      }
-    };
-
-    // remove the nodes in the DOM representing this metamorph.
-    //
-    // this includes the starting and ending placeholders.
-    removeFunc = function() {
-      var start = realNode(document.getElementById(this.start));
-      var end = document.getElementById(this.end);
-
-      this.html('');
-      start.parentNode.removeChild(start);
-      end.parentNode.removeChild(end);
-    };
-
-    appendToFunc = function(parentNode) {
-      var node = firstNodeFor(parentNode, this.outerHTML());
-
-      while (node) {
-        nextSibling = node.nextSibling;
-        parentNode.appendChild(node);
-        node = nextSibling;
-      }
-    };
-
-    afterFunc = function(html) {
-      // get the real starting node. see realNode for details.
-      var end = document.getElementById(this.end);
-      var insertBefore = end.nextSibling;
-      var parentNode = end.parentNode;
-      var nextSibling;
-      var node;
-
-      // get the first node for the HTML string, even in cases like
-      // tables and lists where a simple innerHTML on a div would
-      // swallow some of the content.
-      node = firstNodeFor(parentNode, html);
-
-      // copy the nodes for the HTML between the starting and ending
-      // placeholder.
-      while (node) {
-        nextSibling = node.nextSibling;
-        parentNode.insertBefore(node, insertBefore);
-        node = nextSibling;
-      }
-    };
-
-    prependFunc = function(html) {
-      var start = document.getElementById(this.start);
-      var parentNode = start.parentNode;
-      var nextSibling;
-      var node;
-
-      node = firstNodeFor(parentNode, html);
-      var insertBefore = start.nextSibling;
-
-      while (node) {
-        nextSibling = node.nextSibling;
-        parentNode.insertBefore(node, insertBefore);
-        node = nextSibling;
-      }
-    }
-  }
-
-  Metamorph.prototype.html = function(html) {
-    this.checkRemoved();
-    if (html === undefined) { return this.innerHTML; }
-
-    htmlFunc.call(this, html);
-
-    this.innerHTML = html;
-  };
-
-  Metamorph.prototype.replaceWith = function(html) {
-    this.checkRemoved();
-    htmlFunc.call(this, html, true);
-  };
-
-  Metamorph.prototype.remove = removeFunc;
-  Metamorph.prototype.outerHTML = outerHTMLFunc;
-  Metamorph.prototype.appendTo = appendToFunc;
-  Metamorph.prototype.after = afterFunc;
-  Metamorph.prototype.prepend = prependFunc;
-  Metamorph.prototype.startTag = startTagFunc;
-  Metamorph.prototype.endTag = endTagFunc;
-
-  Metamorph.prototype.isRemoved = function() {
-    var before = document.getElementById(this.start);
-    var after = document.getElementById(this.end);
-
-    return !before || !after;
-  };
-
-  Metamorph.prototype.checkRemoved = function() {
-    if (this.isRemoved()) {
-      throw new Error("Cannot perform operations on a Metamorph that is not in the DOM.");
-    }
-  };
-
-  window.Metamorph = Metamorph;
-})(this);
-
-
-})();
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-/*globals Handlebars */
-var objectCreate = Ember.create;
-
-/**
-  @namespace
-  @name Handlebars
-  @private
-*/
-
-/**
-  @namespace
-  @name Handlebars.helpers
-  @description Helpers for Handlebars templates
-*/
-
-Ember.assert("Ember Handlebars requires Handlebars 1.0.beta.5 or greater", window.Handlebars && window.Handlebars.VERSION.match(/^1\.0\.beta\.[56789]$|^1\.0\.rc\.[123456789]+/));
-
-/**
-  @class
-
-  Prepares the Handlebars templating library for use inside Ember's view
-  system.
-
-  The Ember.Handlebars object is the standard Handlebars library, extended to use
-  Ember's get() method instead of direct property access, which allows
-  computed properties to be used inside templates.
-
-  To create an Ember.Handlebars template, call Ember.Handlebars.compile().  This will
-  return a function that can be used by Ember.View for rendering.
-*/
-Ember.Handlebars = objectCreate(Handlebars);
-
-Ember.Handlebars.helpers = objectCreate(Handlebars.helpers);
-
-/**
-  Override the the opcode compiler and JavaScript compiler for Handlebars.
-  @private
-*/
-Ember.Handlebars.Compiler = function() {};
-Ember.Handlebars.Compiler.prototype = objectCreate(Handlebars.Compiler.prototype);
-Ember.Handlebars.Compiler.prototype.compiler = Ember.Handlebars.Compiler;
-
-/** @private */
-Ember.Handlebars.JavaScriptCompiler = function() {};
-Ember.Handlebars.JavaScriptCompiler.prototype = objectCreate(Handlebars.JavaScriptCompiler.prototype);
-Ember.Handlebars.JavaScriptCompiler.prototype.compiler = Ember.Handlebars.JavaScriptCompiler;
-Ember.Handlebars.JavaScriptCompiler.prototype.namespace = "Ember.Handlebars";
-
-
-Ember.Handlebars.JavaScriptCompiler.prototype.initializeBuffer = function() {
-  return "''";
-};
-
-/**
-  Override the default buffer for Ember Handlebars. By default, Handlebars creates
-  an empty String at the beginning of each invocation and appends to it. Ember's
-  Handlebars overrides this to append to a single shared buffer.
-
-  @private
-*/
-Ember.Handlebars.JavaScriptCompiler.prototype.appendToBuffer = function(string) {
-  return "data.buffer.push("+string+");";
-};
-
-/**
-  Rewrite simple mustaches from {{foo}} to {{bind "foo"}}. This means that all simple
-  mustaches in Ember's Handlebars will also set up an observer to keep the DOM
-  up to date when the underlying property changes.
-
-  @private
-*/
-Ember.Handlebars.Compiler.prototype.mustache = function(mustache) {
-  if (mustache.params.length || mustache.hash) {
-    return Handlebars.Compiler.prototype.mustache.call(this, mustache);
-  } else {
-    var id = new Handlebars.AST.IdNode(['_triageMustache']);
-
-    // Update the mustache node to include a hash value indicating whether the original node
-    // was escaped. This will allow us to properly escape values when the underlying value
-    // changes and we need to re-render the value.
-    if(!mustache.escaped) {
-      mustache.hash = mustache.hash || new Handlebars.AST.HashNode([]);
-      mustache.hash.pairs.push(["unescaped", new Handlebars.AST.StringNode("true")]);
-    }
-    mustache = new Handlebars.AST.MustacheNode([id].concat([mustache.id]), mustache.hash, !mustache.escaped);
-    return Handlebars.Compiler.prototype.mustache.call(this, mustache);
-  }
-};
-
-/**
-  Used for precompilation of Ember Handlebars templates. This will not be used during normal
-  app execution.
-
-  @param {String} string The template to precompile
-*/
-Ember.Handlebars.precompile = function(string) {
-  var ast = Handlebars.parse(string);
-
-  var options = {
-    knownHelpers: {
-      action: true,
-      unbound: true,
-      bindAttr: true,
-      template: true,
-      view: true,
-      _triageMustache: true
-    },
-    data: true,
-    stringParams: true
-  };
-
-  var environment = new Ember.Handlebars.Compiler().compile(ast, options);
-  return new Ember.Handlebars.JavaScriptCompiler().compile(environment, options, undefined, true);
-};
-
-/**
-  The entry point for Ember Handlebars. This replaces the default Handlebars.compile and turns on
-  template-local data and String parameters.
-
-  @param {String} string The template to compile
-*/
-Ember.Handlebars.compile = function(string) {
-  var ast = Handlebars.parse(string);
-  var options = { data: true, stringParams: true };
-  var environment = new Ember.Handlebars.Compiler().compile(ast, options);
-  var templateSpec = new Ember.Handlebars.JavaScriptCompiler().compile(environment, options, undefined, true);
-
-  return Handlebars.template(templateSpec);
-};
-
-/**
-  If a path starts with a reserved keyword, returns the root
-  that should be used.
-
-  @private
-*/
-var normalizePath = Ember.Handlebars.normalizePath = function(root, path, data) {
-  var keywords = (data && data.keywords) || {},
-      keyword, isKeyword;
-
-  // Get the first segment of the path. For example, if the
-  // path is "foo.bar.baz", returns "foo".
-  keyword = path.split('.', 1)[0];
-
-  // Test to see if the first path is a keyword that has been
-  // passed along in the view's data hash. If so, we will treat
-  // that object as the new root.
-  if (keywords.hasOwnProperty(keyword)) {
-    // Look up the value in the template's data hash.
-    root = keywords[keyword];
-    isKeyword = true;
-
-    // Handle cases where the entire path is the reserved
-    // word. In that case, return the object itself.
-    if (path === keyword) {
-      path = '';
-    } else {
-      // Strip the keyword from the path and look up
-      // the remainder from the newly found root.
-      path = path.substr(keyword.length+1);
-    }
-  }
-
-  return { root: root, path: path, isKeyword: isKeyword };
-};
-/**
-  Lookup both on root and on window. If the path starts with
-  a keyword, the corresponding object will be looked up in the
-  template's data hash and used to resolve the path.
-
-  @param {Object} root The object to look up the property on
-  @param {String} path The path to be lookedup
-  @param {Object} options The template's option hash
-*/
-
-Ember.Handlebars.getPath = function(root, path, options) {
-  var data = options && options.data,
-      normalizedPath = normalizePath(root, path, data),
-      value;
-
-  // In cases where the path begins with a keyword, change the
-  // root to the value represented by that keyword, and ensure
-  // the path is relative to it.
-  root = normalizedPath.root;
-  path = normalizedPath.path;
-
-  value = Ember.get(root, path);
-
-  if (value === undefined && root !== window && Ember.isGlobalPath(path)) {
-    value = Ember.get(window, path);
-  }
-  return value;
-};
-
-/**
-  Registers a helper in Handlebars that will be called if no property with the
-  given name can be found on the current context object, and no helper with
-  that name is registered.
-
-  This throws an exception with a more helpful error message so the user can
-  track down where the problem is happening.
-
-  @name Handlebars.helpers.helperMissing
-  @param {String} path
-  @param {Hash} options
-*/
-Ember.Handlebars.registerHelper('helperMissing', function(path, options) {
-  var error, view = "";
-
-  error = "%@ Handlebars error: Could not find property '%@' on object %@.";
-  if (options.data){
-    view = options.data.view;
-  }
-  throw new Ember.Error(Ember.String.fmt(error, [view, path, this]));
-});
-
-
-})();
-
-
-
-(function() {
-
-Ember.String.htmlSafe = function(str) {
-  return new Handlebars.SafeString(str);
-};
-
-var htmlSafe = Ember.String.htmlSafe;
-
-if (Ember.EXTEND_PROTOTYPES) {
-
-  /**
-    @see Ember.String.htmlSafe
-  */
-  String.prototype.htmlSafe = function() {
-    return htmlSafe(this);
-  };
-
-}
-
-})();
-
-
-
-(function() {
-/*jshint newcap:false*/
-var set = Ember.set, get = Ember.get;
-
-var DOMManager = {
-  remove: function(view) {
-    var morph = view.morph;
-    if (morph.isRemoved()) { return; }
-    set(view, 'element', null);
-    view._lastInsert = null;
-    morph.remove();
-  },
-
-  prepend: function(view, childView) {
-    childView._insertElementLater(function() {
-      var morph = view.morph;
-      morph.prepend(childView.outerHTML);
-      childView.outerHTML = null;
-    });
-  },
-
-  after: function(view, nextView) {
-    nextView._insertElementLater(function() {
-      var morph = view.morph;
-      morph.after(nextView.outerHTML);
-      nextView.outerHTML = null;
-    });
-  },
-
-  replace: function(view) {
-    var morph = view.morph;
-
-    view.transitionTo('preRender');
-    view.clearRenderedChildren();
-    var buffer = view.renderToBuffer();
-
-    Ember.run.schedule('render', this, function() {
-      if (get(view, 'isDestroyed')) { return; }
-      view.invalidateRecursively('element');
-      view._notifyWillInsertElement();
-      morph.replaceWith(buffer.string());
-      view.transitionTo('inDOM');
-      view._notifyDidInsertElement();
-    });
-  },
-
-  empty: function(view) {
-    view.morph.html("");
-  }
-};
-
-// The `morph` and `outerHTML` properties are internal only
-// and not observable.
-
-Ember._Metamorph = Ember.Mixin.create({
-  isVirtual: true,
-  tagName: '',
-
-  init: function() {
-    this._super();
-    this.morph = Metamorph();
-  },
-
-  beforeRender: function(buffer) {
-    buffer.push(this.morph.startTag());
-  },
-
-  afterRender: function(buffer) {
-    buffer.push(this.morph.endTag());
-  },
-
-  createElement: function() {
-    var buffer = this.renderToBuffer();
-    this.outerHTML = buffer.string();
-    this.clearBuffer();
-  },
-
-  domManager: DOMManager
-});
-
-Ember._MetamorphView = Ember.View.extend(Ember._Metamorph);
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-/*globals Handlebars */
-
-var get = Ember.get, set = Ember.set, getPath = Ember.Handlebars.getPath;
-/**
-  @ignore
-  @private
-  @class
-
-  Ember._HandlebarsBoundView is a private view created by the Handlebars `{{bind}}`
-  helpers that is used to keep track of bound properties.
-
-  Every time a property is bound using a `{{mustache}}`, an anonymous subclass
-  of Ember._HandlebarsBoundView is created with the appropriate sub-template and
-  context set up. When the associated property changes, just the template for
-  this view will re-render.
-*/
-Ember._HandlebarsBoundView = Ember._MetamorphView.extend({
-/** @scope Ember._HandlebarsBoundView.prototype */
-
-  /**
-    The function used to determine if the `displayTemplate` or
-    `inverseTemplate` should be rendered. This should be a function that takes
-    a value and returns a Boolean.
-
-    @type Function
-    @default null
-  */
-  shouldDisplayFunc: null,
-
-  /**
-    Whether the template rendered by this view gets passed the context object
-    of its parent template, or gets passed the value of retrieving `path`
-    from the `pathRoot`.
-
-    For example, this is true when using the `{{#if}}` helper, because the
-    template inside the helper should look up properties relative to the same
-    object as outside the block. This would be false when used with `{{#with
-    foo}}` because the template should receive the object found by evaluating
-    `foo`.
-
-    @type Boolean
-    @default false
-  */
-  preserveContext: false,
-
-  /**
-    If `preserveContext` is true, this is the object that will be used
-    to render the template.
-
-    @type Object
-  */
-  previousContext: null,
-
-  /**
-    The template to render when `shouldDisplayFunc` evaluates to true.
-
-    @type Function
-    @default null
-  */
-  displayTemplate: null,
-
-  /**
-    The template to render when `shouldDisplayFunc` evaluates to false.
-
-    @type Function
-    @default null
-  */
-  inverseTemplate: null,
-
-
-  /**
-    The path to look up on `pathRoot` that is passed to
-    `shouldDisplayFunc` to determine which template to render.
-
-    In addition, if `preserveContext` is false, the object at this path will
-    be passed to the template when rendering.
-
-    @type String
-    @default null
-  */
-  path: null,
-
-  /**
-    The object from which the `path` will be looked up. Sometimes this is the
-    same as the `previousContext`, but in cases where this view has been generated
-    for paths that start with a keyword such as `view` or `controller`, the
-    path root will be that resolved object.
-
-    @type Object
-  */
-  pathRoot: null,
-
-  normalizedValue: Ember.computed(function() {
-    var path = get(this, 'path'),
-        pathRoot  = get(this, 'pathRoot'),
-        valueNormalizer = get(this, 'valueNormalizerFunc'),
-        result, templateData;
-
-    // Use the pathRoot as the result if no path is provided. This
-    // happens if the path is `this`, which gets normalized into
-    // a `pathRoot` of the current Handlebars context and a path
-    // of `''`.
-    if (path === '') {
-      result = pathRoot;
-    } else {
-      templateData = get(this, 'templateData');
-      result = getPath(pathRoot, path, { data: templateData });
-    }
-
-    return valueNormalizer ? valueNormalizer(result) : result;
-  }).property('path', 'pathRoot', 'valueNormalizerFunc').volatile(),
-
-  rerenderIfNeeded: function() {
-    if (!get(this, 'isDestroyed') && get(this, 'normalizedValue') !== this._lastNormalizedValue) {
-      this.rerender();
-    }
-  },
-
-  /**
-    Determines which template to invoke, sets up the correct state based on
-    that logic, then invokes the default Ember.View `render` implementation.
-
-    This method will first look up the `path` key on `pathRoot`,
-    then pass that value to the `shouldDisplayFunc` function. If that returns
-    true, the `displayTemplate` function will be rendered to DOM. Otherwise,
-    `inverseTemplate`, if specified, will be rendered.
-
-    For example, if this Ember._HandlebarsBoundView represented the {{#with foo}}
-    helper, it would look up the `foo` property of its context, and
-    `shouldDisplayFunc` would always return true. The object found by looking
-    up `foo` would be passed to `displayTemplate`.
-
-    @param {Ember.RenderBuffer} buffer
-  */
-  render: function(buffer) {
-    // If not invoked via a triple-mustache ({{{foo}}}), escape
-    // the content of the template.
-    var escape = get(this, 'isEscaped');
-
-    var shouldDisplay = get(this, 'shouldDisplayFunc'),
-        preserveContext = get(this, 'preserveContext'),
-        context = get(this, 'previousContext');
-
-    var inverseTemplate = get(this, 'inverseTemplate'),
-        displayTemplate = get(this, 'displayTemplate');
-
-    var result = get(this, 'normalizedValue');
-    this._lastNormalizedValue = result;
-
-    // First, test the conditional to see if we should
-    // render the template or not.
-    if (shouldDisplay(result)) {
-      set(this, 'template', displayTemplate);
-
-      // If we are preserving the context (for example, if this
-      // is an #if block, call the template with the same object.
-      if (preserveContext) {
-        set(this, '_context', context);
-      } else {
-      // Otherwise, determine if this is a block bind or not.
-      // If so, pass the specified object to the template
-        if (displayTemplate) {
-          set(this, '_context', result);
-        } else {
-        // This is not a bind block, just push the result of the
-        // expression to the render context and return.
-          if (result === null || result === undefined) {
-            result = "";
-          } else if (!(result instanceof Handlebars.SafeString)) {
-            result = String(result);
-          }
-
-          if (escape) { result = Handlebars.Utils.escapeExpression(result); }
-          buffer.push(result);
-          return;
-        }
-      }
-    } else if (inverseTemplate) {
-      set(this, 'template', inverseTemplate);
-
-      if (preserveContext) {
-        set(this, '_context', context);
-      } else {
-        set(this, '_context', result);
-      }
-    } else {
-      set(this, 'template', function() { return ''; });
-    }
-
-    return this._super(buffer);
-  }
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set, fmt = Ember.String.fmt;
-var getPath = Ember.Handlebars.getPath, normalizePath = Ember.Handlebars.normalizePath;
-var forEach = Ember.ArrayPolyfills.forEach;
-
-var EmberHandlebars = Ember.Handlebars, helpers = EmberHandlebars.helpers;
-
-// Binds a property into the DOM. This will create a hook in DOM that the
-// KVO system will look for and update if the property changes.
-/** @private */
-function bind(property, options, preserveContext, shouldDisplay, valueNormalizer) {
-  var data = options.data,
-      fn = options.fn,
-      inverse = options.inverse,
-      view = data.view,
-      currentContext = this,
-      pathRoot, path, normalized;
-
-  normalized = normalizePath(currentContext, property, data);
-
-  pathRoot = normalized.root;
-  path = normalized.path;
-
-  // Set up observers for observable objects
-  if ('object' === typeof this) {
-    // Create the view that will wrap the output of this template/property
-    // and add it to the nearest view's childViews array.
-    // See the documentation of Ember._HandlebarsBoundView for more.
-    var bindView = view.createChildView(Ember._HandlebarsBoundView, {
-      preserveContext: preserveContext,
-      shouldDisplayFunc: shouldDisplay,
-      valueNormalizerFunc: valueNormalizer,
-      displayTemplate: fn,
-      inverseTemplate: inverse,
-      path: path,
-      pathRoot: pathRoot,
-      previousContext: currentContext,
-      isEscaped: !options.hash.unescaped,
-      templateData: options.data
-    });
-
-    view.appendChild(bindView);
-
-    /** @private */
-    var observer = function() {
-      Ember.run.once(bindView, 'rerenderIfNeeded');
-    };
-
-    // Observes the given property on the context and
-    // tells the Ember._HandlebarsBoundView to re-render. If property
-    // is an empty string, we are printing the current context
-    // object ({{this}}) so updating it is not our responsibility.
-    if (path !== '') {
-      Ember.addObserver(pathRoot, path, observer);
-    }
-  } else {
-    // The object is not observable, so just render it out and
-    // be done with it.
-    data.buffer.push(getPath(pathRoot, path, options));
-  }
-}
-
-/**
-  '_triageMustache' is used internally select between a binding and helper for
-  the given context. Until this point, it would be hard to determine if the
-  mustache is a property reference or a regular helper reference. This triage
-  helper resolves that.
-
-  This would not be typically invoked by directly.
-
-  @private
-  @name Handlebars.helpers._triageMustache
-  @param {String} property Property/helperID to triage
-  @param {Function} fn Context to provide for rendering
-  @returns {String} HTML string
-*/
-EmberHandlebars.registerHelper('_triageMustache', function(property, fn) {
-  Ember.assert("You cannot pass more than one argument to the _triageMustache helper", arguments.length <= 2);
-  if (helpers[property]) {
-    return helpers[property].call(this, fn);
-  }
-  else {
-    return helpers.bind.apply(this, arguments);
-  }
-});
-
-/**
-  `bind` can be used to display a value, then update that value if it
-  changes. For example, if you wanted to print the `title` property of
-  `content`:
-
-      {{bind "content.title"}}
-
-  This will return the `title` property as a string, then create a new
-  observer at the specified path. If it changes, it will update the value in
-  DOM. Note that if you need to support IE7 and IE8 you must modify the
-  model objects properties using Ember.get() and Ember.set() for this to work as
-  it relies on Ember's KVO system.  For all other browsers this will be handled
-  for you automatically.
-
-  @private
-  @name Handlebars.helpers.bind
-  @param {String} property Property to bind
-  @param {Function} fn Context to provide for rendering
-  @returns {String} HTML string
-*/
-EmberHandlebars.registerHelper('bind', function(property, fn) {
-  Ember.assert("You cannot pass more than one argument to the bind helper", arguments.length <= 2);
-
-  var context = (fn.contexts && fn.contexts[0]) || this;
-
-  return bind.call(context, property, fn, false, function(result) {
-    return !Ember.none(result);
-  });
-});
-
-/**
-  Use the `boundIf` helper to create a conditional that re-evaluates
-  whenever the bound value changes.
-
-      {{#boundIf "content.shouldDisplayTitle"}}
-        {{content.title}}
-      {{/boundIf}}
-
-  @private
-  @name Handlebars.helpers.boundIf
-  @param {String} property Property to bind
-  @param {Function} fn Context to provide for rendering
-  @returns {String} HTML string
-*/
-EmberHandlebars.registerHelper('boundIf', function(property, fn) {
-  var context = (fn.contexts && fn.contexts[0]) || this;
-  var func = function(result) {
-    if (Ember.typeOf(result) === 'array') {
-      return get(result, 'length') !== 0;
-    } else {
-      return !!result;
-    }
-  };
-
-  return bind.call(context, property, fn, true, func, func);
-});
-
-/**
-  @name Handlebars.helpers.with
-  @param {Function} context
-  @param {Hash} options
-  @returns {String} HTML string
-*/
-EmberHandlebars.registerHelper('with', function(context, options) {
-  if (arguments.length === 4) {
-    var keywordName, path, rootPath, normalized;
-
-    Ember.assert("If you pass more than one argument to the with helper, it must be in the form #with foo as bar", arguments[1] === "as");
-    options = arguments[3];
-    keywordName = arguments[2];
-    path = arguments[0];
-
-    Ember.assert("You must pass a block to the with helper", options.fn && options.fn !== Handlebars.VM.noop);
-
-    if (Ember.isGlobalPath(path)) {
-      Ember.bind(options.data.keywords, keywordName, path);
-    } else {
-      normalized = normalizePath(this, path, options.data);
-      path = normalized.path;
-      rootPath = normalized.root;
-
-      // This is a workaround for the fact that you cannot bind separate objects
-      // together. When we implement that functionality, we should use it here.
-      var contextKey = Ember.$.expando + Ember.guidFor(rootPath);
-      options.data.keywords[contextKey] = rootPath;
-
-      // if the path is '' ("this"), just bind directly to the current context
-      var contextPath = path ? contextKey + '.' + path : contextKey;
-      Ember.bind(options.data.keywords, keywordName, contextPath);
-    }
-
-    return bind.call(this, path, options.fn, true, function(result) {
-      return !Ember.none(result);
-    });
-  } else {
-    Ember.assert("You must pass exactly one argument to the with helper", arguments.length === 2);
-    Ember.assert("You must pass a block to the with helper", options.fn && options.fn !== Handlebars.VM.noop);
-    return helpers.bind.call(options.contexts[0], context, options);
-  }
-});
-
-
-/**
-  @name Handlebars.helpers.if
-  @param {Function} context
-  @param {Hash} options
-  @returns {String} HTML string
-*/
-EmberHandlebars.registerHelper('if', function(context, options) {
-  Ember.assert("You must pass exactly one argument to the if helper", arguments.length === 2);
-  Ember.assert("You must pass a block to the if helper", options.fn && options.fn !== Handlebars.VM.noop);
-
-  return helpers.boundIf.call(options.contexts[0], context, options);
-});
-
-/**
-  @name Handlebars.helpers.unless
-  @param {Function} context
-  @param {Hash} options
-  @returns {String} HTML string
-*/
-EmberHandlebars.registerHelper('unless', function(context, options) {
-  Ember.assert("You must pass exactly one argument to the unless helper", arguments.length === 2);
-  Ember.assert("You must pass a block to the unless helper", options.fn && options.fn !== Handlebars.VM.noop);
-
-  var fn = options.fn, inverse = options.inverse;
-
-  options.fn = inverse;
-  options.inverse = fn;
-
-  return helpers.boundIf.call(options.contexts[0], context, options);
-});
-
-/**
-  `bindAttr` allows you to create a binding between DOM element attributes and
-  Ember objects. For example:
-
-      <img {{bindAttr src="imageUrl" alt="imageTitle"}}>
-
-  @name Handlebars.helpers.bindAttr
-  @param {Hash} options
-  @returns {String} HTML string
-*/
-EmberHandlebars.registerHelper('bindAttr', function(options) {
-
-  var attrs = options.hash;
-
-  Ember.assert("You must specify at least one hash argument to bindAttr", !!Ember.keys(attrs).length);
-
-  var view = options.data.view;
-  var ret = [];
-  var ctx = this;
-
-  // Generate a unique id for this element. This will be added as a
-  // data attribute to the element so it can be looked up when
-  // the bound property changes.
-  var dataId = ++Ember.$.uuid;
-
-  // Handle classes differently, as we can bind multiple classes
-  var classBindings = attrs['class'];
-  if (classBindings !== null && classBindings !== undefined) {
-    var classResults = EmberHandlebars.bindClasses(this, classBindings, view, dataId, options);
-    ret.push('class="' + Handlebars.Utils.escapeExpression(classResults.join(' ')) + '"');
-    delete attrs['class'];
-  }
-
-  var attrKeys = Ember.keys(attrs);
-
-  // For each attribute passed, create an observer and emit the
-  // current value of the property as an attribute.
-  forEach.call(attrKeys, function(attr) {
-    var path = attrs[attr],
-        pathRoot, normalized;
-
-    Ember.assert(fmt("You must provide a String for a bound attribute, not %@", [path]), typeof path === 'string');
-
-    normalized = normalizePath(ctx, path, options.data);
-
-    pathRoot = normalized.root;
-    path = normalized.path;
-
-    var value = (path === 'this') ? pathRoot : getPath(pathRoot, path, options),
-        type = Ember.typeOf(value);
-
-    Ember.assert(fmt("Attributes must be numbers, strings or booleans, not %@", [value]), value === null || value === undefined || type === 'number' || type === 'string' || type === 'boolean');
-
-    var observer, invoker;
-
-    /** @private */
-    observer = function observer() {
-      var result = getPath(pathRoot, path, options);
-
-      Ember.assert(fmt("Attributes must be numbers, strings or booleans, not %@", [result]), result === null || result === undefined || typeof result === 'number' || typeof result === 'string' || typeof result === 'boolean');
-
-      var elem = view.$("[data-bindattr-" + dataId + "='" + dataId + "']");
-
-      // If we aren't able to find the element, it means the element
-      // to which we were bound has been removed from the view.
-      // In that case, we can assume the template has been re-rendered
-      // and we need to clean up the observer.
-      if (!elem || elem.length === 0) {
-        Ember.removeObserver(pathRoot, path, invoker);
-        return;
-      }
-
-      Ember.View.applyAttributeBindings(elem, attr, result);
-    };
-
-    /** @private */
-    invoker = function() {
-      Ember.run.once(observer);
-    };
-
-    // Add an observer to the view for when the property changes.
-    // When the observer fires, find the element using the
-    // unique data id and update the attribute to the new value.
-    if (path !== 'this') {
-      Ember.addObserver(pathRoot, path, invoker);
-    }
-
-    // if this changes, also change the logic in ember-views/lib/views/view.js
-    if ((type === 'string' || (type === 'number' && !isNaN(value)))) {
-      ret.push(attr + '="' + Handlebars.Utils.escapeExpression(value) + '"');
-    } else if (value && type === 'boolean') {
-      // The developer controls the attr name, so it should always be safe
-      ret.push(attr + '="' + attr + '"');
-    }
-  }, this);
-
-  // Add the unique identifier
-  // NOTE: We use all lower-case since Firefox has problems with mixed case in SVG
-  ret.push('data-bindattr-' + dataId + '="' + dataId + '"');
-  return new EmberHandlebars.SafeString(ret.join(' '));
-});
-
-/**
-  Helper that, given a space-separated string of property paths and a context,
-  returns an array of class names. Calling this method also has the side
-  effect of setting up observers at those property paths, such that if they
-  change, the correct class name will be reapplied to the DOM element.
-
-  For example, if you pass the string "fooBar", it will first look up the
-  "fooBar" value of the context. If that value is true, it will add the
-  "foo-bar" class to the current element (i.e., the dasherized form of
-  "fooBar"). If the value is a string, it will add that string as the class.
-  Otherwise, it will not add any new class name.
-
-  @param {Ember.Object} context
-    The context from which to lookup properties
-
-  @param {String} classBindings
-    A string, space-separated, of class bindings to use
-
-  @param {Ember.View} view
-    The view in which observers should look for the element to update
-
-  @param {Srting} bindAttrId
-    Optional bindAttr id used to lookup elements
-
-  @returns {Array} An array of class names to add
-*/
-EmberHandlebars.bindClasses = function(context, classBindings, view, bindAttrId, options) {
-  var ret = [], newClass, value, elem;
-
-  // Helper method to retrieve the property from the context and
-  // determine which class string to return, based on whether it is
-  // a Boolean or not.
-  var classStringForPath = function(root, parsedPath, options) {
-    var val,
-        path = parsedPath.path;
-
-    if (path === 'this') {
-      val = root;
-    } else if (path === '') {
-      val = true;
-    } else {
-      val = getPath(root, path, options);
-    }
-
-    return Ember.View._classStringForValue(path, val, parsedPath.className, parsedPath.falsyClassName);
-  };
-
-  // For each property passed, loop through and setup
-  // an observer.
-  forEach.call(classBindings.split(' '), function(binding) {
-
-    // Variable in which the old class value is saved. The observer function
-    // closes over this variable, so it knows which string to remove when
-    // the property changes.
-    var oldClass;
-
-    var observer, invoker;
-
-    var parsedPath = Ember.View._parsePropertyPath(binding),
-        path = parsedPath.path,
-        pathRoot = context,
-        normalized;
-
-    if (path !== '' && path !== 'this') {
-      normalized = normalizePath(context, path, options.data);
-
-      pathRoot = normalized.root;
-      path = normalized.path;
-    }
-
-    // Set up an observer on the context. If the property changes, toggle the
-    // class name.
-    /** @private */
-    observer = function() {
-      // Get the current value of the property
-      newClass = classStringForPath(pathRoot, parsedPath, options);
-      elem = bindAttrId ? view.$("[data-bindattr-" + bindAttrId + "='" + bindAttrId + "']") : view.$();
-
-      // If we can't find the element anymore, a parent template has been
-      // re-rendered and we've been nuked. Remove the observer.
-      if (!elem || elem.length === 0) {
-        Ember.removeObserver(pathRoot, path, invoker);
-      } else {
-        // If we had previously added a class to the element, remove it.
-        if (oldClass) {
-          elem.removeClass(oldClass);
-        }
-
-        // If necessary, add a new class. Make sure we keep track of it so
-        // it can be removed in the future.
-        if (newClass) {
-          elem.addClass(newClass);
-          oldClass = newClass;
-        } else {
-          oldClass = null;
-        }
-      }
-    };
-
-    /** @private */
-    invoker = function() {
-      Ember.run.once(observer);
-    };
-
-    if (path !== '' && path !== 'this') {
-      Ember.addObserver(pathRoot, path, invoker);
-    }
-
-    // We've already setup the observer; now we just need to figure out the
-    // correct behavior right now on the first pass through.
-    value = classStringForPath(pathRoot, parsedPath, options);
-
-    if (value) {
-      ret.push(value);
-
-      // Make sure we save the current value so that it can be removed if the
-      // observer fires.
-      oldClass = value;
-    }
-  });
-
-  return ret;
-};
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-/*globals Handlebars */
-
-// TODO: Don't require the entire module
-var get = Ember.get, set = Ember.set;
-var PARENT_VIEW_PATH = /^parentView\./;
-var EmberHandlebars = Ember.Handlebars;
-var VIEW_PRESERVES_CONTEXT = Ember.VIEW_PRESERVES_CONTEXT;
-
-/** @private */
-EmberHandlebars.ViewHelper = Ember.Object.create({
-
-  propertiesFromHTMLOptions: function(options, thisContext) {
-    var hash = options.hash, data = options.data;
-    var extensions = {},
-        classes = hash['class'],
-        dup = false;
-
-    if (hash.id) {
-      extensions.elementId = hash.id;
-      dup = true;
-    }
-
-    if (classes) {
-      classes = classes.split(' ');
-      extensions.classNames = classes;
-      dup = true;
-    }
-
-    if (hash.classBinding) {
-      extensions.classNameBindings = hash.classBinding.split(' ');
-      dup = true;
-    }
-
-    if (hash.classNameBindings) {
-      if (extensions.classNameBindings === undefined) extensions.classNameBindings = [];
-      extensions.classNameBindings = extensions.classNameBindings.concat(hash.classNameBindings.split(' '));
-      dup = true;
-    }
-
-    if (hash.attributeBindings) {
-      Ember.assert("Setting 'attributeBindings' via Handlebars is not allowed. Please subclass Ember.View and set it there instead.");
-      extensions.attributeBindings = null;
-      dup = true;
-    }
-
-    if (dup) {
-      hash = Ember.$.extend({}, hash);
-      delete hash.id;
-      delete hash['class'];
-      delete hash.classBinding;
-    }
-
-    // Set the proper context for all bindings passed to the helper. This applies to regular attribute bindings
-    // as well as class name bindings. If the bindings are local, make them relative to the current context
-    // instead of the view.
-    var path;
-
-    // Evaluate the context of regular attribute bindings:
-    for (var prop in hash) {
-      if (!hash.hasOwnProperty(prop)) { continue; }
-
-      // Test if the property ends in "Binding"
-      if (Ember.IS_BINDING.test(prop) && typeof hash[prop] === 'string') {
-        path = this.contextualizeBindingPath(hash[prop], data);
-        if (path) { hash[prop] = path; }
-      }
-    }
-
-    // Evaluate the context of class name bindings:
-    if (extensions.classNameBindings) {
-      for (var b in extensions.classNameBindings) {
-        var full = extensions.classNameBindings[b];
-        if (typeof full === 'string') {
-          // Contextualize the path of classNameBinding so this:
-          //
-          //     classNameBinding="isGreen:green"
-          //
-          // is converted to this:
-          //
-          //     classNameBinding="bindingContext.isGreen:green"
-          var parsedPath = Ember.View._parsePropertyPath(full);
-          path = this.contextualizeBindingPath(parsedPath.path, data);
-          if (path) { extensions.classNameBindings[b] = path + parsedPath.classNames; }
-        }
-      }
-    }
-
-    // Make the current template context available to the view
-    // for the bindings set up above.
-    extensions.bindingContext = thisContext;
-
-    return Ember.$.extend(hash, extensions);
-  },
-
-  // Transform bindings from the current context to a context that can be evaluated within the view.
-  // Returns null if the path shouldn't be changed.
-  //
-  // TODO: consider the addition of a prefix that would allow this method to return `path`.
-  contextualizeBindingPath: function(path, data) {
-    var normalized = Ember.Handlebars.normalizePath(null, path, data);
-    if (normalized.isKeyword) {
-      return 'templateData.keywords.' + path;
-    } else if (Ember.isGlobalPath(path)) {
-      return null;
-    } else if (path === 'this') {
-      return 'bindingContext';
-    } else {
-      return 'bindingContext.' + path;
-    }
-  },
-
-  helper: function(thisContext, path, options) {
-    var inverse = options.inverse,
-        data = options.data,
-        view = data.view,
-        fn = options.fn,
-        hash = options.hash,
-        newView;
-
-    if ('string' === typeof path) {
-      newView = EmberHandlebars.getPath(thisContext, path, options);
-      Ember.assert("Unable to find view at path '" + path + "'", !!newView);
-    } else {
-      newView = path;
-    }
-
-    Ember.assert(Ember.String.fmt('You must pass a view class to the #view helper, not %@ (%@)', [path, newView]), Ember.View.detect(newView));
-
-    var viewOptions = this.propertiesFromHTMLOptions(options, thisContext);
-    var currentView = data.view;
-    viewOptions.templateData = options.data;
-
-    if (fn) {
-      Ember.assert("You cannot provide a template block if you also specified a templateName", !get(viewOptions, 'templateName') && !get(newView.proto(), 'templateName'));
-      viewOptions.template = fn;
-    }
-
-    // We only want to override the `_context` computed property if there is
-    // no specified controller. See View#_context for more information.
-    if (VIEW_PRESERVES_CONTEXT && !newView.proto().controller && !newView.proto().controllerBinding && !viewOptions.controller && !viewOptions.controllerBinding) {
-      viewOptions._context = thisContext;
-    }
-
-    currentView.appendChild(newView, viewOptions);
-  }
-});
-
-/**
-  `{{view}}` inserts a new instance of `Ember.View` into a template passing its options
-  to the `Ember.View`'s `create` method and using the supplied block as the view's own template.
-
-  An empty `<body>` and the following template:
-
-      <script type="text/x-handlebars">
-        A span:
-        {{#view tagName="span"}}
-          hello.
-        {{/view}}
-      </script>
-
-  Will result in HTML structure:
-
-      <body>
-        <!-- Note: the handlebars template script 
-             also results in a rendered Ember.View
-             which is the outer <div> here -->
-
-        <div class="ember-view">
-          A span:
-          <span id="ember1" class="ember-view">
-            Hello.
-          </span>
-        </div>
-      </body>
-
-  ### parentView setting
-
-  The `parentView` property of the new `Ember.View` instance created through `{{view}}`
-  will be set to the `Ember.View` instance of the template where `{{view}}` was called.
-
-      aView = Ember.View.create({
-        template: Ember.Handlebars.compile("{{#view}} my parent: {{parentView.elementId}} {{/view}}")
-      })
-
-      aView.appendTo('body')
-    
-  Will result in HTML structure:
-
-      <div id="ember1" class="ember-view">
-        <div id="ember2" class="ember-view">
-          my parent: ember1
-        </div>
-      </div>
-
-  ### Setting CSS id and class attributes
-
-  The HTML `id` attribute can be set on the `{{view}}`'s resulting element with the `id` option.
-  This option will _not_ be passed to `Ember.View.create`.
-
-      <script type="text/x-handlebars">
-        {{#view tagName="span" id="a-custom-id"}}
-          hello.
-        {{/view}}
-      </script>
-
-  Results in the following HTML structure:
-
-      <div class="ember-view">
-        <span id="a-custom-id" class="ember-view">
-          hello.
-        </span>
-      </div>
-
-  The HTML `class` attribute can be set on the `{{view}}`'s resulting element with
-  the `class` or `classNameBindings` options. The `class` option
-  will directly set the CSS `class` attribute and will not be passed to
-  `Ember.View.create`. `classNameBindings` will be passed to `create` and use
-  `Ember.View`'s class name binding functionality:
-
-      <script type="text/x-handlebars">
-        {{#view tagName="span" class="a-custom-class"}}
-          hello.
-        {{/view}}
-      </script>
-
-  Results in the following HTML structure:
-
-      <div class="ember-view">
-        <span id="ember2" class="ember-view a-custom-class">
-          hello.
-        </span>
-      </div>
-
-  ### Supplying a different view class
-  `{{view}}` can take an optional first argument before its supplied options to specify a
-  path to a custom view class.
-
-      <script type="text/x-handlebars">
-        {{#view "MyApp.CustomView"}}
-          hello.
-        {{/view}}
-      </script>
-
-  The first argument can also be a relative path. Ember will search for the view class
-  starting at the `Ember.View` of the template where `{{view}}` was used as the root object:
-
-      MyApp = Ember.Application.create({})
-      MyApp.OuterView = Ember.View.extend({
-        innerViewClass: Ember.View.extend({
-          classNames: ['a-custom-view-class-as-property']
-        }),
-        template: Ember.Handlebars.compile('{{#view "innerViewClass"}} hi {{/view}}')
-      })
-
-      MyApp.OuterView.create().appendTo('body')
-
-Will result in the following HTML:
-
-      <div id="ember1" class="ember-view">
-        <div id="ember2" class="ember-view a-custom-view-class-as-property"> 
-          hi
-        </div>
-      </div>
-
-  ### Blockless use
-
-  If you supply a custom `Ember.View` subclass that specifies its own template
-  or provide a `templateName` option to `{{view}}` it can be used without supplying a block.
-  Attempts to use both a `templateName` option and supply a block will throw an error.
-
-      <script type="text/x-handlebars">
-        {{view "MyApp.ViewWithATemplateDefined"}}
-      </script>
-
-  ### viewName property
-
-  You can supply a `viewName` option to `{{view}}`. The `Ember.View` instance will
-  be referenced as a property of its parent view by this name.
-
-      aView = Ember.View.create({
-        template: Ember.Handlebars.compile('{{#view viewName="aChildByName"}} hi {{/view}}')
-      })
-
-      aView.appendTo('body')
-      aView.get('aChildByName') // the instance of Ember.View created by {{view}} helper
-
-  @name Handlebars.helpers.view
-  @param {String} path
-  @param {Hash} options
-  @returns {String} HTML string
-*/
-EmberHandlebars.registerHelper('view', function(path, options) {
-  Ember.assert("The view helper only takes a single argument", arguments.length <= 2);
-
-  // If no path is provided, treat path param as options.
-  if (path && path.data && path.data.isRenderData) {
-    options = path;
-    path = "Ember.View";
-  }
-
-  return EmberHandlebars.ViewHelper.helper(this, path, options);
-});
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-/*globals Handlebars */
-
-// TODO: Don't require all of this module
-var get = Ember.get, getPath = Ember.Handlebars.getPath, fmt = Ember.String.fmt;
-
-/**
-  `{{collection}}` is a `Ember.Handlebars` helper for adding instances of
-  `Ember.CollectionView` to a template.  See `Ember.CollectionView` for additional
-  information on how a `CollectionView` functions.
-
-  `{{collection}}`'s primary use is as a block helper with a `contentBinding` option
-  pointing towards an `Ember.Array`-compatible object.  An `Ember.View` instance will
-  be created for each item in its `content` property. Each view will have its own
-  `content` property set to the appropriate item in the collection.
-
-  The provided block will be applied as the template for each item's view.
-
-  Given an empty `<body>` the following template:
-
-      <script type="text/x-handlebars">
-        {{#collection contentBinding="App.items"}}
-          Hi {{content.name}}
-        {{/collection}}
-      </script>
-
-  And the following application code
-
-      App = Ember.Application.create()
-      App.items = [
-        Ember.Object.create({name: 'Dave'}),
-        Ember.Object.create({name: 'Mary'}),
-        Ember.Object.create({name: 'Sara'})
-      ]
-
-  Will result in the HTML structure below
-
-      <div class="ember-view">
-        <div class="ember-view">Hi Dave</div>
-        <div class="ember-view">Hi Mary</div>
-        <div class="ember-view">Hi Sara</div>
-      </div>
-
-  ### Blockless Use
-  If you provide an `itemViewClass` option that has its own `template` you can omit
-  the block.
-
-  The following template:
-
-      <script type="text/x-handlebars">
-        {{collection contentBinding="App.items" itemViewClass="App.AnItemView"}}
-      </script>
-
-  And application code
-
-      App = Ember.Application.create()
-      App.items = [
-        Ember.Object.create({name: 'Dave'}),
-        Ember.Object.create({name: 'Mary'}),
-        Ember.Object.create({name: 'Sara'})
-      ]
-
-      App.AnItemView = Ember.View.extend({
-        template: Ember.Handlebars.compile("Greetings {{content.name}}")
-      })
-
-  Will result in the HTML structure below
-
-      <div class="ember-view">
-        <div class="ember-view">Greetings Dave</div>
-        <div class="ember-view">Greetings Mary</div>
-        <div class="ember-view">Greetings Sara</div>
-      </div>
-
-  ### Specifying a CollectionView subclass
-  By default the `{{collection}}` helper will create an instance of `Ember.CollectionView`.
-  You can supply a `Ember.CollectionView` subclass to the helper by passing it
-  as the first argument:
-
-      <script type="text/x-handlebars">
-        {{#collection App.MyCustomCollectionClass contentBinding="App.items"}}
-          Hi {{content.name}}
-        {{/collection}}
-      </script>
-
-
-  ### Forwarded `item.*`-named Options
-  As with the `{{view}}`, helper options passed to the `{{collection}}` will be set on
-  the resulting `Ember.CollectionView` as properties. Additionally, options prefixed with
-  `item` will be applied to the views rendered for each item (note the camelcasing):
-
-        <script type="text/x-handlebars">
-          {{#collection contentBinding="App.items"
-                        itemTagName="p"
-                        itemClassNames="greeting"}}
-            Howdy {{content.name}}
-          {{/collection}}
-        </script>
-
-  Will result in the following HTML structure:
-
-      <div class="ember-view">
-        <p class="ember-view greeting">Howdy Dave</p>
-        <p class="ember-view greeting">Howdy Mary</p>
-        <p class="ember-view greeting">Howdy Sara</p>
-      </div>
-  
-  @name Handlebars.helpers.collection
-  @param {String} path
-  @param {Hash} options
-  @returns {String} HTML string
-*/
-Ember.Handlebars.registerHelper('collection', function(path, options) {
-  // If no path is provided, treat path param as options.
-  if (path && path.data && path.data.isRenderData) {
-    options = path;
-    path = undefined;
-    Ember.assert("You cannot pass more than one argument to the collection helper", arguments.length === 1);
-  } else {
-    Ember.assert("You cannot pass more than one argument to the collection helper", arguments.length === 2);
-  }
-
-  var fn = options.fn;
-  var data = options.data;
-  var inverse = options.inverse;
-
-  // If passed a path string, convert that into an object.
-  // Otherwise, just default to the standard class.
-  var collectionClass;
-  collectionClass = path ? getPath(this, path, options) : Ember.CollectionView;
-  Ember.assert(fmt("%@ #collection: Could not find collection class %@", [data.view, path]), !!collectionClass);
-
-  var hash = options.hash, itemHash = {}, match;
-
-  // Extract item view class if provided else default to the standard class
-  var itemViewClass, itemViewPath = hash.itemViewClass;
-  var collectionPrototype = collectionClass.proto();
-  delete hash.itemViewClass;
-  itemViewClass = itemViewPath ? getPath(collectionPrototype, itemViewPath, options) : collectionPrototype.itemViewClass;
-  Ember.assert(fmt("%@ #collection: Could not find itemViewClass %@", [data.view, itemViewPath]), !!itemViewClass);
-
-  // Go through options passed to the {{collection}} helper and extract options
-  // that configure item views instead of the collection itself.
-  for (var prop in hash) {
-    if (hash.hasOwnProperty(prop)) {
-      match = prop.match(/^item(.)(.*)$/);
-
-      if(match) {
-        // Convert itemShouldFoo -> shouldFoo
-        itemHash[match[1].toLowerCase() + match[2]] = hash[prop];
-        // Delete from hash as this will end up getting passed to the
-        // {{view}} helper method.
-        delete hash[prop];
-      }
-    }
-  }
-
-  var tagName = hash.tagName || collectionPrototype.tagName;
-
-  if (fn) {
-    itemHash.template = fn;
-    delete options.fn;
-  }
-
-  var emptyViewClass;
-  if (inverse && inverse !== Handlebars.VM.noop) {
-    emptyViewClass = get(collectionPrototype, 'emptyViewClass');
-    emptyViewClass = emptyViewClass.extend({
-          template: inverse,
-          tagName: itemHash.tagName
-    });
-  } else if (hash.emptyViewClass) {
-    emptyViewClass = getPath(this, hash.emptyViewClass, options);
-  }
-  hash.emptyView = emptyViewClass;
-
-  if (hash.eachHelper === 'each') {
-    itemHash._context = Ember.computed(function() {
-      return get(this, 'content');
-    }).property('content');
-    delete hash.eachHelper;
-  }
-
-  var viewOptions = Ember.Handlebars.ViewHelper.propertiesFromHTMLOptions({ data: data, hash: itemHash }, this);
-  hash.itemViewClass = itemViewClass.extend(viewOptions);
-
-  return Ember.Handlebars.helpers.view.call(this, collectionClass, options);
-});
-
-
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-/*globals Handlebars */
-var getPath = Ember.Handlebars.getPath;
-
-/**
-  `unbound` allows you to output a property without binding. *Important:* The
-  output will not be updated if the property changes. Use with caution.
-
-      <div>{{unbound somePropertyThatDoesntChange}}</div>
-
-  @name Handlebars.helpers.unbound
-  @param {String} property
-  @returns {String} HTML string
-*/
-Ember.Handlebars.registerHelper('unbound', function(property, fn) {
-  var context = (fn.contexts && fn.contexts[0]) || this;
-  return getPath(context, property, fn);
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-/*jshint debug:true*/
-var getPath = Ember.Handlebars.getPath, normalizePath = Ember.Handlebars.normalizePath;
-
-/**
-  `log` allows you to output the value of a value in the current rendering
-  context.
-
-      {{log myVariable}}
-
-  @name Handlebars.helpers.log
-  @param {String} property
-*/
-Ember.Handlebars.registerHelper('log', function(property, options) {
-  var context = (options.contexts && options.contexts[0]) || this,
-      normalized = normalizePath(context, property, options.data),
-      pathRoot = normalized.root,
-      path = normalized.path,
-      value = (path === 'this') ? pathRoot : getPath(pathRoot, path, options);
-  Ember.Logger.log(value);
-});
-
-/**
-  The `debugger` helper executes the `debugger` statement in the current
-  context.
-
-      {{debugger}}
-
-  @name Handlebars.helpers.debugger
-  @param {String} property
-*/
-Ember.Handlebars.registerHelper('debugger', function() {
-  debugger;
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set;
-
-Ember.Handlebars.EachView = Ember.CollectionView.extend(Ember._Metamorph, {
-  itemViewClass: Ember._MetamorphView,
-  emptyViewClass: Ember._MetamorphView,
-
-  createChildView: function(view, attrs) {
-    view = this._super(view, attrs);
-
-    // At the moment, if a container view subclass wants
-    // to insert keywords, it is responsible for cloning
-    // the keywords hash. This will be fixed momentarily.
-    var keyword = get(this, 'keyword');
-
-    if (keyword) {
-      var data = get(view, 'templateData');
-
-      data = Ember.copy(data);
-      data.keywords = view.cloneKeywords();
-      set(view, 'templateData', data);
-
-      var content = get(view, 'content');
-
-      // In this case, we do not bind, because the `content` of
-      // a #each item cannot change.
-      data.keywords[keyword] = content;
-    }
-
-    return view;
-  }
-});
-
-Ember.Handlebars.registerHelper('each', function(path, options) {
-  if (arguments.length === 4) {
-    Ember.assert("If you pass more than one argument to the each helper, it must be in the form #each foo in bar", arguments[1] === "in");
-
-    var keywordName = arguments[0];
-
-    options = arguments[3];
-    path = arguments[2];
-    if (path === '') { path = "this"; }
-
-    options.hash.keyword = keywordName;
-  } else {
-    options.hash.eachHelper = 'each';
-  }
-
-  Ember.assert("You must pass a block to the each helper", options.fn && options.fn !== Handlebars.VM.noop);
-
-  options.hash.contentBinding = path;
-  // Set up emptyView as a metamorph with no tag
-  //options.hash.emptyViewClass = Ember._MetamorphView;
-
-  return Ember.Handlebars.helpers.collection.call(this, 'Ember.Handlebars.EachView', options);
-});
-
-})();
-
-
-
-(function() {
-/**
-  `template` allows you to render a template from inside another template.
-  This allows you to re-use the same template in multiple places. For example:
-
-      <script type="text/x-handlebars">
-        {{#with loggedInUser}}
-          Last Login: {{lastLogin}}
-          User Info: {{template "user_info"}}
-        {{/with}}
-      </script>
-
-      <script type="text/x-handlebars" data-template-name="user_info">
-        Name: <em>{{name}}</em>
-        Karma: <em>{{karma}}</em>
-      </script>
-
-  This helper looks for templates in the global Ember.TEMPLATES hash. If you
-  add &lt;script&gt; tags to your page with the `data-template-name` attribute set,
-  they will be compiled and placed in this hash automatically.
-
-  You can also manually register templates by adding them to the hash:
-
-      Ember.TEMPLATES["my_cool_template"] = Ember.Handlebars.compile('<b>{{user}}</b>');
-
-  @name Handlebars.helpers.template
-  @param {String} templateName the template to render
-*/
-
-Ember.Handlebars.registerHelper('template', function(name, options) {
-  var template = Ember.TEMPLATES[name];
-
-  Ember.assert("Unable to find template with name '"+name+"'.", !!template);
-
-  Ember.TEMPLATES[name](this, { data: options.data });
-});
-
-})();
-
-
-
-(function() {
-var EmberHandlebars = Ember.Handlebars,
-    getPath = EmberHandlebars.getPath,
-    get = Ember.get,
-    a_slice = Array.prototype.slice;
-
-var ActionHelper = EmberHandlebars.ActionHelper = {
-  registeredActions: {}
-};
-
-ActionHelper.registerAction = function(actionName, options) {
-  var actionId = (++Ember.$.uuid).toString();
-
-  ActionHelper.registeredActions[actionId] = {
-    eventName: options.eventName,
-    handler: function(event) {
-      var modifier = event.shiftKey || event.metaKey || event.altKey || event.ctrlKey,
-          secondaryClick = event.which > 1, // IE9 may return undefined
-          nonStandard = modifier || secondaryClick;
-
-      if (options.link && nonStandard) {
-        // Allow the browser to handle special link clicks normally
-        return;
-      }
-
-      event.preventDefault();
-
-      event.view = options.view;
-
-      if (options.hasOwnProperty('context')) {
-        event.context = options.context;
-      }
-
-      if (options.hasOwnProperty('contexts')) {
-        event.contexts = options.contexts;
-      }
-
-      var target = options.target;
-
-      // Check for StateManager (or compatible object)
-      if (target.isState && typeof target.send === 'function') {
-        return target.send(actionName, event);
-      } else {
-        Ember.assert(Ember.String.fmt('Target %@ does not have action %@', [target, actionName]), target[actionName]);
-        return target[actionName].call(target, event);
-      }
-    }
-  };
-
-  options.view.on('willRerender', function() {
-    delete ActionHelper.registeredActions[actionId];
-  });
-
-  return actionId;
-};
-
-/**
-  The `{{action}}` helper registers an HTML element within a template for
-  DOM event handling and forwards that interaction to the Application's router,
-  the template's `Ember.View` instance, or supplied `target` option (see 'Specifiying a Target').
-  
-  User interaction with that element will invoke the supplied action name on
-  the appropriate target.
-
-  Given the following Handlebars template on the page
-
-      <script type="text/x-handlebars" data-template-name='a-template'>
-        <div {{action anActionName}}>
-          click me
-        </div>
-      </script>
-
-  And application code
-
-      AView = Ember.View.extend({
-        templateName; 'a-template',
-        anActionName: function(event){}
-      });
-
-      aView = AView.create();
-      aView.appendTo('body');
-
-  Will results in the following rendered HTML
-
-      <div class="ember-view">
-        <div data-ember-action="1">
-          click me
-        </div>
-      </div>
-
-  Clicking "click me" will trigger the `anActionName` method of the `aView`
-  object with a  `jQuery.Event` object as its argument. The `jQuery.Event`
-  object will be extended to include a `view` property that is set to the
-  original view interacted with (in this case the `aView` object).
-
-  ### Event Propagation
-
-  Events triggered through the action helper will automatically have
-  `.preventDefault()` called on them. You do not need to do so in your event
-  handlers. To stop propagation of the event, simply return `false` from your
-  handler.
-
-  If you need the default handler to trigger you should either register your
-  own event handler, or use event methods on your view class. See Ember.View
-  'Responding to Browser Events' for more information.
-  
-  ### Specifying DOM event type
-
-  By default the `{{action}}` helper registers for DOM `click` events. You can
-  supply an `on` option to the helper to specify a different DOM event name:
-
-      <script type="text/x-handlebars" data-template-name='a-template'>
-        <div {{action anActionName on="doubleClick"}}>
-          click me
-        </div>
-      </script>
-
-  See Ember.View 'Responding to Browser Events' for a list of
-  acceptable DOM event names.
-
-  Because `{{action}}` depends on Ember's event dispatch system it will only
-  function if an `Ember.EventDispatcher` instance is available. An
-  `Ember.EventDispatcher` instance will be created when a new
-  `Ember.Application` is created. Having an instance of `Ember.Application`
-  will satisfy this requirement.
-  
-  
-  ### Specifying a Target
-  There are several possible target objects for `{{action}}` helpers:
-  
-  In a typical `Ember.Router`-backed Application where views are managed
-  through use of the `{{outlet}}` helper, actions will be forwarded to the
-  current state of the Applications's Router. See Ember.Router 'Responding
-  to User-initiated Events' for more information.
-  
-  If you manaully set the `target` property on the controller of a template's
-  `Ember.View` instance, the specifed `controller.target` will become the target
-  for any actions. Likely custom values for a controller's `target` are the
-  controller itself or a StateManager other than the Application's Router.
-  
-  If the templates's view lacks a controller property the view itself is the target.
-  
-  Finally, a `target` option can be provided to the helper to change which object
-  will receive the method call. This option must be a string representing a
-  path to an object:
-
-      <script type="text/x-handlebars" data-template-name='a-template'>
-        <div {{action anActionName target="MyApplication.someObject"}}>
-          click me
-        </div>
-      </script>
-
-  Clicking "click me" in the rendered HTML of the above template will trigger
-  the  `anActionName` method of the object at `MyApplication.someObject`.
-  The first argument to this method will be a `jQuery.Event` extended to
-  include a `view` property that is set to the original view interacted with.
-
-  A path relative to the template's `Ember.View` instance can also be used as
-  a target:
-
-      <script type="text/x-handlebars" data-template-name='a-template'>
-        <div {{action anActionName target="parentView"}}>
-          click me
-        </div>
-      </script>
-
-  Clicking "click me" in the rendered HTML of the above template will trigger
-  the `anActionName` method of the view's parent view.
-
-  The `{{action}}` helper is `Ember.StateManager` aware. If the target of the
-  action is an `Ember.StateManager` instance `{{action}}` will use the `send`
-  functionality of StateManagers. The documentation for `Ember.StateManager`
-  has additional information about this use.
-
-  If an action's target does not implement a method that matches the supplied
-  action name an error will be thrown.
-
-      <script type="text/x-handlebars" data-template-name='a-template'>
-        <div {{action aMethodNameThatIsMissing}}>
-          click me
-        </div>
-      </script>
-
-  With the following application code
-
-      AView = Ember.View.extend({
-        templateName; 'a-template',
-        // note: no method 'aMethodNameThatIsMissing'
-        anActionName: function(event){}
-      });
-
-      aView = AView.create();
-      aView.appendTo('body');
-
-  Will throw `Uncaught TypeError: Cannot call method 'call' of undefined` when
-  "click me" is clicked.
-  
-  ### Specifying a context
-
-  By default the `{{action}}` helper passes the current Handlebars context
-  along in the `jQuery.Event` object. You may specify an alternate object to
-  pass as the context by providing a property path:
-
-      <script type="text/x-handlebars" data-template-name='a-template'>
-        {{#each person in people}}
-          <div {{action edit person}}>
-            click me
-          </div>
-        {{/each}}
-      </script>
-
-  @name Handlebars.helpers.action
-  @param {String} actionName
-  @param {Object...} contexts
-  @param {Hash} options
-*/
-EmberHandlebars.registerHelper('action', function(actionName) {
-  var options = arguments[arguments.length - 1],
-      contexts = a_slice.call(arguments, 1, -1);
-
-  var hash = options.hash,
-      view = options.data.view,
-      target, controller, link;
-
-  // create a hash to pass along to registerAction
-  var action = {
-    eventName: hash.on || "click"
-  };
-
-  action.view = view = get(view, 'concreteView');
-
-  if (hash.target) {
-    target = getPath(this, hash.target, options);
-  } else if (controller = options.data.keywords.controller) {
-    target = get(controller, 'target');
-  }
-
-  action.target = target = target || view;
-
-  if (contexts.length) {
-    action.contexts = contexts = Ember.EnumerableUtils.map(contexts, function(context) {
-      return getPath(this, context, options);
-    }, this);
-    action.context = contexts[0];
-  }
-
-  var output = [], url;
-
-  if (hash.href && target.urlForEvent) {
-    url = target.urlForEvent.apply(target, [actionName].concat(contexts));
-    output.push('href="' + url + '"');
-    action.link = true;
-  }
-
-  var actionId = ActionHelper.registerAction(actionName, action);
-  output.push('data-ember-action="' + actionId + '"');
-
-  return new EmberHandlebars.SafeString(output.join(" "));
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get, set = Ember.set;
-
-/**
-
-  When used in a Handlebars template that is assigned to an `Ember.View` instance's
-  `layout` property Ember will render the layout template first, inserting the view's
-  own rendered output at the `{{ yield }}` location.
-
-  An empty `<body>` and the following application code:
-
-      AView = Ember.View.extend({
-        classNames: ['a-view-with-layout'],
-        layout: Ember.Handlebars.compile('<div class="wrapper">{{ yield }}</div>'),
-        template: Ember.Handlebars.compile('<span>I am wrapped</span>')
-      })
-
-      aView = AView.create()
-      aView.appendTo('body')
-
-  Will result in the following HTML output:
-
-      <body>
-        <div class='ember-view a-view-with-layout'>
-          <div class="wrapper">
-            <span>I am wrapped</span>
-          </div>
-        </div>
-      </body>
-
-  The yield helper cannot be used outside of a template assigned to an `Ember.View`'s `layout` property
-  and will throw an error if attempted.
-
-      BView = Ember.View.extend({
-        classNames: ['a-view-with-layout'],
-        template: Ember.Handlebars.compile('{{yield}}')
-      })
-
-      bView = BView.create()
-      bView.appendTo('body')
-
-      // throws
-      // Uncaught Error: assertion failed: You called yield in a template that was not a layout
-
-  @name Handlebars.helpers.yield
-  @param {Hash} options
-  @returns {String} HTML string
-*/
-Ember.Handlebars.registerHelper('yield', function(options) {
-  var view = options.data.view, template;
-
-  while (view && !get(view, 'layout')) {
-    view = get(view, 'parentView');
-  }
-
-  Ember.assert("You called yield in a template that was not a layout", !!view);
-
-  template = get(view, 'template');
-
-  if (template) { template(this, options); }
-});
-
-})();
-
-
-
-(function() {
-/**
-  The `outlet` helper allows you to specify that the current
-  view's controller will fill in the view for a given area.
-
-      {{outlet}}
-
-  By default, when the the current controller's `view`
-  property changes, the outlet will replace its current
-  view with the new view.
-
-      controller.set('view', someView);
-
-  You can also specify a particular name, other than view:
-
-      {{outlet masterView}}
-      {{outlet detailView}}
-
-  Then, you can control several outlets from a single
-  controller:
-
-      controller.set('masterView', postsView);
-      controller.set('detailView', postView);
-
-  @name Handlebars.helpers.outlet
-  @param {String} property the property on the controller
-    that holds the view for this outlet
-*/
-Ember.Handlebars.registerHelper('outlet', function(property, options) {
-  if (property && property.data && property.data.isRenderData) {
-    options = property;
-    property = 'view';
-  }
-
-  options.hash.currentViewBinding = "controller." + property;
-
-  return Ember.Handlebars.helpers.view.call(this, Ember.ContainerView, options);
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var set = Ember.set, get = Ember.get;
-
-/**
-  @class
-
-  Creates an HTML input of type 'checkbox' with HTML related properties 
-  applied directly to the input.
-
-      {{view Ember.Checkbox classNames="applicaton-specific-checkbox"}}
-
-      <input id="ember1" class="ember-view ember-checkbox applicaton-specific-checkbox" type="checkbox">
-
-  You can add a `label` tag yourself in the template where the Ember.Checkbox is being used.
-
-      <label>
-        Some Title
-        {{view Ember.Checkbox classNames="applicaton-specific-checkbox"}}
-      </label>
-
-
-  The `checked` attribute of an Ember.Checkbox object should always be set
-  through the Ember object or by interacting with its rendered element representation
-  via the mouse, keyboard, or touch.  Updating the value of the checkbox via jQuery will
-  result in the checked value of the object and its element losing synchronization.
-
-  ## Layout and LayoutName properties
-  Because HTML `input` elements are self closing `layout` and `layoutName` properties will
-  not be applied. See `Ember.View`'s layout section for more information.
-
-  @extends Ember.View
-*/
-Ember.Checkbox = Ember.View.extend({
-  classNames: ['ember-checkbox'],
-
-  tagName: 'input',
-
-  attributeBindings: ['type', 'checked', 'disabled', 'tabindex'],
-
-  type: "checkbox",
-  checked: false,
-  disabled: false,
-
-  init: function() {
-    this._super();
-    this.on("change", this, this._updateElementValue);
-  },
-
-  /**
-    @private
-  */
-  _updateElementValue: function() {
-    set(this, 'checked', this.$().prop('checked'));
-  }
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set;
-
-/** @class */
-Ember.TextSupport = Ember.Mixin.create(
-/** @scope Ember.TextSupport.prototype */ {
-
-  value: "",
-
-  attributeBindings: ['placeholder', 'disabled', 'maxlength', 'tabindex'],
-  placeholder: null,
-  disabled: false,
-  maxlength: null,
-
-  insertNewline: Ember.K,
-  cancel: Ember.K,
-
-  /** @private */
-  init: function() {
-    this._super();
-    this.on("focusOut", this, this._elementValueDidChange);
-    this.on("change", this, this._elementValueDidChange);
-    this.on("keyUp", this, this.interpretKeyEvents);
-  },
-
-  /**
-    @private
-  */
-  interpretKeyEvents: function(event) {
-    var map = Ember.TextSupport.KEY_EVENTS;
-    var method = map[event.keyCode];
-
-    this._elementValueDidChange();
-    if (method) { return this[method](event); }
-  },
-
-  _elementValueDidChange: function() {
-    set(this, 'value', this.$().val());
-  }
-
-});
-
-Ember.TextSupport.KEY_EVENTS = {
-  13: 'insertNewline',
-  27: 'cancel'
-};
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set;
-
-/**
-  @class
-
-  The `Ember.TextField` view class renders a text
-  [input](https://developer.mozilla.org/en/HTML/Element/Input) element. It
-  allows for binding Ember properties to the text field contents (`value`),
-  live-updating as the user inputs text.
-
-  Example:
-
-      {{view Ember.TextField valueBinding="firstName"}}
-
-  ## Layout and LayoutName properties
-  Because HTML `input` elements are self closing `layout` and `layoutName` properties will
-  not be applied. See `Ember.View`'s layout section for more information.
-
-  @extends Ember.View
-  @extends Ember.TextSupport
-*/
-Ember.TextField = Ember.View.extend(Ember.TextSupport,
-  /** @scope Ember.TextField.prototype */ {
-
-  classNames: ['ember-text-field'],
-  tagName: "input",
-  attributeBindings: ['type', 'value', 'size'],
-
-  /**
-    The value attribute of the input element. As the user inputs text, this
-    property is updated live.
-
-    @type String
-    @default ""
-  */
-  value: "",
-
-  /**
-    The type attribute of the input element.
-
-    @type String
-    @default "text"
-  */
-  type: "text",
-
-  /**
-    The size of the text field in characters.
-
-    @type String
-    @default null
-  */
-  size: null
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set;
-
-Ember.Button = Ember.View.extend(Ember.TargetActionSupport, {
-  classNames: ['ember-button'],
-  classNameBindings: ['isActive'],
-
-  tagName: 'button',
-
-  propagateEvents: false,
-
-  attributeBindings: ['type', 'disabled', 'href', 'tabindex'],
-
-  /** @private
-    Overrides TargetActionSupport's targetObject computed
-    property to use Handlebars-specific path resolution.
-  */
-  targetObject: Ember.computed(function() {
-    var target = get(this, 'target'),
-        root = get(this, 'context'),
-        data = get(this, 'templateData');
-
-    if (typeof target !== 'string') { return target; }
-
-    return Ember.Handlebars.getPath(root, target, { data: data });
-  }).property('target').cacheable(),
-
-  // Defaults to 'button' if tagName is 'input' or 'button'
-  type: Ember.computed(function(key, value) {
-    var tagName = this.get('tagName');
-    if (value !== undefined) { this._type = value; }
-    if (this._type !== undefined) { return this._type; }
-    if (tagName === 'input' || tagName === 'button') { return 'button'; }
-  }).property('tagName').cacheable(),
-
-  disabled: false,
-
-  // Allow 'a' tags to act like buttons
-  href: Ember.computed(function() {
-    return this.get('tagName') === 'a' ? '#' : null;
-  }).property('tagName').cacheable(),
-
-  mouseDown: function() {
-    if (!get(this, 'disabled')) {
-      set(this, 'isActive', true);
-      this._mouseDown = true;
-      this._mouseEntered = true;
-    }
-    return get(this, 'propagateEvents');
-  },
-
-  mouseLeave: function() {
-    if (this._mouseDown) {
-      set(this, 'isActive', false);
-      this._mouseEntered = false;
-    }
-  },
-
-  mouseEnter: function() {
-    if (this._mouseDown) {
-      set(this, 'isActive', true);
-      this._mouseEntered = true;
-    }
-  },
-
-  mouseUp: function(event) {
-    if (get(this, 'isActive')) {
-      // Actually invoke the button's target and action.
-      // This method comes from the Ember.TargetActionSupport mixin.
-      this.triggerAction();
-      set(this, 'isActive', false);
-    }
-
-    this._mouseDown = false;
-    this._mouseEntered = false;
-    return get(this, 'propagateEvents');
-  },
-
-  keyDown: function(event) {
-    // Handle space or enter
-    if (event.keyCode === 13 || event.keyCode === 32) {
-      this.mouseDown();
-    }
-  },
-
-  keyUp: function(event) {
-    // Handle space or enter
-    if (event.keyCode === 13 || event.keyCode === 32) {
-      this.mouseUp();
-    }
-  },
-
-  // TODO: Handle proper touch behavior.  Including should make inactive when
-  // finger moves more than 20x outside of the edge of the button (vs mouse
-  // which goes inactive as soon as mouse goes out of edges.)
-
-  touchStart: function(touch) {
-    return this.mouseDown(touch);
-  },
-
-  touchEnd: function(touch) {
-    return this.mouseUp(touch);
-  },
-
-  init: function() {
-    Ember.deprecate("Ember.Button is deprecated and will be removed from future releases. Consider using the `{{action}}` helper.");
-    this._super();
-  }
-});
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-var get = Ember.get, set = Ember.set;
-
-/**
-  @class
-
-  The `Ember.TextArea` view class renders a
-  [textarea](https://developer.mozilla.org/en/HTML/Element/textarea) element.
-  It allows for binding Ember properties to the text area contents (`value`),
-  live-updating as the user inputs text.
-
-  ## Layout and LayoutName properties
-
-  Because HTML `textarea` elements do not contain inner HTML the `layout` and `layoutName` 
-  properties will not be applied. See `Ember.View`'s layout section for more information.
-
-  @extends Ember.View
-  @extends Ember.TextSupport
-*/
-Ember.TextArea = Ember.View.extend(Ember.TextSupport,
-/** @scope Ember.TextArea.prototype */ {
-
-  classNames: ['ember-text-area'],
-
-  tagName: "textarea",
-  attributeBindings: ['rows', 'cols'],
-  rows: null,
-  cols: null,
-
-  _updateElementValue: Ember.observer(function() {
-    // We do this check so cursor position doesn't get affected in IE
-    var value = get(this, 'value'),
-        $el = this.$();
-    if ($el && value !== $el.val()) {
-      $el.val(value);
-    }
-  }, 'value'),
-
-  /** @private */
-  init: function() {
-    this._super();
-    this.on("didInsertElement", this, this._updateElementValue);
-  }
-
-});
-
-})();
-
-
-
-(function() {
-Ember.TabContainerView = Ember.View.extend({
-  init: function() {
-    Ember.deprecate("Ember.TabContainerView is deprecated and will be removed from future releases.");
-    this._super();
-  }
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get;
-
-Ember.TabPaneView = Ember.View.extend({
-  tabsContainer: Ember.computed(function() {
-    return this.nearestInstanceOf(Ember.TabContainerView);
-  }).property().volatile(),
-
-  isVisible: Ember.computed(function() {
-    return get(this, 'viewName') === get(this, 'tabsContainer.currentView');
-  }).property('tabsContainer.currentView').volatile(),
-
-  init: function() {
-    Ember.deprecate("Ember.TabPaneView is deprecated and will be removed from future releases.");
-    this._super();
-  }
-});
-
-})();
-
-
-
-(function() {
-var get = Ember.get, setPath = Ember.setPath;
-
-Ember.TabView = Ember.View.extend({
-  tabsContainer: Ember.computed(function() {
-    return this.nearestInstanceOf(Ember.TabContainerView);
-  }).property().volatile(),
-
-  mouseUp: function() {
-    setPath(this, 'tabsContainer.currentView', get(this, 'value'));
-  },
-
-  init: function() {
-    Ember.deprecate("Ember.TabView is deprecated and will be removed from future releases.");
-    this._super();
-  }
-});
-
-})();
-
-
-
-(function() {
-
-})();
-
-
-
-(function() {
-/*jshint eqeqeq:false */
-
-var set = Ember.set, get = Ember.get;
-var indexOf = Ember.EnumerableUtils.indexOf, indexesOf = Ember.EnumerableUtils.indexesOf;
-
-/**
-  @class
-
-  The Ember.Select view class renders a
-  [select](https://developer.mozilla.org/en/HTML/Element/select) HTML element,
-  allowing the user to choose from a list of options. The selected option(s)
-  are updated live in the `selection` property, while the corresponding value
-  is updated in the `value` property.
-
-  ### Using Strings
-  The simplest version of an Ember.Select takes an array of strings for the options
-  of a select box and a valueBinding to set the value.
-
-  Example:
-
-      App.controller = Ember.Object.create({
-        selected: null,
-        content: [
-          "Yehuda",
-          "Tom"
-        ]
-      })
-
-      {{view Ember.Select
-             contentBinding="App.controller.content"
-             valueBinding="App.controller.selected"
-      }}
-
-  Would result in the following HTML:
-
-      <select class="ember-select">
-        <option value="Yehuda">Yehuda</option>
-        <option value="Tom">Tom</option>
-      </select>
-
-  Selecting Yehuda from the select box will set `App.controller.selected` to "Yehuda"
-
-  ### Using Objects
-  An Ember.Select can also take an array of JS or Ember objects.
-
-  When using objects you need to supply optionLabelPath and optionValuePath parameters
-  which will be used to get the label and value for each of the options.
-
-  Usually you will bind to either the selection or the value attribute of the select.
-
-  Use selectionBinding if you would like to set the whole object as a property on the target.
-  Use valueBinding if you would like to set just the value.
-
-  Example using selectionBinding:
-
-      App.controller = Ember.Object.create({
-        selectedPerson: null,
-        selectedPersonId: null,
-        content: [
-          Ember.Object.create({firstName: "Yehuda", id: 1}),
-          Ember.Object.create({firstName: "Tom",    id: 2})
-        ]
-      })
-
-      {{view Ember.Select
-             contentBinding="App.controller.content"
-             optionLabelPath="content.firstName"
-             optionValuePath="content.id"
-             selectionBinding="App.controller.selectedPerson"
-             prompt="Please Select"}}
-
-      <select class="ember-select">
-        <option value>Please Select</option>
-        <option value="1">Yehuda</option>
-        <option value="2">Tom</option>
-      </select>
-
-  Selecting Yehuda here will set `App.controller.selectedPerson` to
-  the Yehuda object.
-
-  Example using valueBinding:
-
-      {{view Ember.Select
-             contentBinding="App.controller.content"
-             optionLabelPath="content.firstName"
-             optionValuePath="content.id"
-             valueBinding="App.controller.selectedPersonId"
-             prompt="Please Select"}}
-
-  Selecting Yehuda in this case will set `App.controller.selectedPersonId` to 1.
-
-  @extends Ember.View
-*/
-Ember.Select = Ember.View.extend(
-  /** @scope Ember.Select.prototype */ {
-
-  tagName: 'select',
-  classNames: ['ember-select'],
-  defaultTemplate: Ember.Handlebars.compile('{{#if view.prompt}}<option value>{{view.prompt}}</option>{{/if}}{{#each view.content}}{{view Ember.SelectOption contentBinding="this"}}{{/each}}'),
-  attributeBindings: ['multiple', 'tabindex'],
-
-  /**
-    The `multiple` attribute of the select element. Indicates whether multiple
-    options can be selected.
-
-    @type Boolean
-    @default false
-  */
-  multiple: false,
-
-  /**
-    The list of options.
-
-    If `optionLabelPath` and `optionValuePath` are not overridden, this should
-    be a list of strings, which will serve simultaneously as labels and values.
-
-    Otherwise, this should be a list of objects. For instance:
-
-        content: Ember.A([
-            { id: 1, firstName: 'Yehuda' },
-            { id: 2, firstName: 'Tom' }
-          ]),
-        optionLabelPath: 'content.firstName',
-        optionValuePath: 'content.id'
-
-    @type Array
-    @default null
-  */
-  content: null,
-
-  /**
-    When `multiple` is false, the element of `content` that is currently
-    selected, if any.
-
-    When `multiple` is true, an array of such elements.
-
-    @type Object or Array
-    @default null
-  */
-  selection: null,
-
-  /**
-    In single selection mode (when `multiple` is false), value can be used to get
-    the current selection's value or set the selection by it's value.
-
-    It is not currently supported in multiple selection mode.
-
-    @type String
-    @default null
-  */
-  value: Ember.computed(function(key, value) {
-    if (arguments.length === 2) { return value; }
-
-    var valuePath = get(this, 'optionValuePath').replace(/^content\.?/, '');
-    return valuePath ? get(this, 'selection.' + valuePath) : get(this, 'selection');
-  }).property('selection').cacheable(),
-
-  /**
-    If given, a top-most dummy option will be rendered to serve as a user
-    prompt.
-
-    @type String
-    @default null
-  */
-  prompt: null,
-
-  /**
-    The path of the option labels. See `content`.
-
-    @type String
-    @default 'content'
-  */
-  optionLabelPath: 'content',
-
-  /**
-    The path of the option values. See `content`.
-
-    @type String
-    @default 'content'
-  */
-  optionValuePath: 'content',
-
-  _change: function() {
-    if (get(this, 'multiple')) {
-      this._changeMultiple();
-    } else {
-      this._changeSingle();
-    }
-  },
-
-  selectionDidChange: Ember.observer(function() {
-    var selection = get(this, 'selection'),
-        isArray = Ember.isArray(selection);
-    if (get(this, 'multiple')) {
-      if (!isArray) {
-        set(this, 'selection', Ember.A([selection]));
-        return;
-      }
-      this._selectionDidChangeMultiple();
-    } else {
-      this._selectionDidChangeSingle();
-    }
-  }, 'selection'),
-
-  valueDidChange: Ember.observer(function() {
-    var content = get(this, 'content'),
-        value = get(this, 'value'),
-        valuePath = get(this, 'optionValuePath').replace(/^content\.?/, ''),
-        selectedValue = (valuePath ? get(this, 'selection.' + valuePath) : get(this, 'selection')),
-        selection;
-
-    if (value !== selectedValue) {
-      selection = content.find(function(obj) {
-        return value === (valuePath ? get(obj, valuePath) : obj);
-      });
-
-      this.set('selection', selection);
-    }
-  }, 'value'),
-
-
-  _triggerChange: function() {
-    var selection = get(this, 'selection');
-
-    if (selection) { this.selectionDidChange(); }
-
-    this._change();
-  },
-
-  _changeSingle: function() {
-    var selectedIndex = this.$()[0].selectedIndex,
-        content = get(this, 'content'),
-        prompt = get(this, 'prompt');
-
-    if (!content) { return; }
-    if (prompt && selectedIndex === 0) { set(this, 'selection', null); return; }
-
-    if (prompt) { selectedIndex -= 1; }
-    set(this, 'selection', content.objectAt(selectedIndex));
-  },
-
-  _changeMultiple: function() {
-    var options = this.$('option:selected'),
-        prompt = get(this, 'prompt'),
-        offset = prompt ? 1 : 0,
-        content = get(this, 'content');
-
-    if (!content){ return; }
-    if (options) {
-      var selectedIndexes = options.map(function(){
-        return this.index - offset;
-      }).toArray();
-      set(this, 'selection', content.objectsAt(selectedIndexes));
-    }
-  },
-
-  _selectionDidChangeSingle: function() {
-    var el = this.get('element');
-    if (!el) { return; }
-
-    var content = get(this, 'content'),
-        selection = get(this, 'selection'),
-        selectionIndex = content ? indexOf(content, selection) : -1,
-        prompt = get(this, 'prompt');
-
-    if (prompt) { selectionIndex += 1; }
-    if (el) { el.selectedIndex = selectionIndex; }
-  },
-
-  _selectionDidChangeMultiple: function() {
-    var content = get(this, 'content'),
-        selection = get(this, 'selection'),
-        selectedIndexes = content ? indexesOf(content, selection) : [-1],
-        prompt = get(this, 'prompt'),
-        offset = prompt ? 1 : 0,
-        options = this.$('option'),
-        adjusted;
-
-    if (options) {
-      options.each(function() {
-        adjusted = this.index > -1 ? this.index + offset : -1;
-        this.selected = indexOf(selectedIndexes, adjusted) > -1;
-      });
-    }
-  },
-
-  init: function() {
-    this._super();
-    this.on("didInsertElement", this, this._triggerChange);
-    this.on("change", this, this._change);
-  }
-});
-
-Ember.SelectOption = Ember.View.extend({
-  tagName: 'option',
-  attributeBindings: ['value', 'selected'],
-
-  defaultTemplate: function(context, options) {
-    options = { data: options.data, hash: {} };
-    Ember.Handlebars.helpers.bind.call(context, "view.label", options);
-  },
-
-  init: function() {
-    this.labelPathDidChange();
-    this.valuePathDidChange();
-
-    this._super();
-  },
-
-  selected: Ember.computed(function() {
-    var content = get(this, 'content'),
-        selection = get(this, 'parentView.selection');
-    if (get(this, 'parentView.multiple')) {
-      return selection && indexOf(selection, content) > -1;
-    } else {
-      // Primitives get passed through bindings as objects... since
-      // `new Number(4) !== 4`, we use `==` below
-      return content == selection;
-    }
-  }).property('content', 'parentView.selection').volatile(),
-
-  labelPathDidChange: Ember.observer(function() {
-    var labelPath = get(this, 'parentView.optionLabelPath');
-
-    if (!labelPath) { return; }
-
-    Ember.defineProperty(this, 'label', Ember.computed(function() {
-      return get(this, labelPath);
-    }).property(labelPath).cacheable());
-  }, 'parentView.optionLabelPath'),
-
-  valuePathDidChange: Ember.observer(function() {
-    var valuePath = get(this, 'parentView.optionValuePath');
-
-    if (!valuePath) { return; }
-
-    Ember.defineProperty(this, 'value', Ember.computed(function() {
-      return get(this, valuePath);
-    }).property(valuePath).cacheable());
-  }, 'parentView.optionValuePath')
-});
-
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-/*globals Handlebars */
-// Find templates stored in the head tag as script tags and make them available
-// to Ember.CoreView in the global Ember.TEMPLATES object. This will be run as as
-// jQuery DOM-ready callback.
-//
-// Script tags with "text/x-handlebars" will be compiled
-// with Ember's Handlebars and are suitable for use as a view's template.
-// Those with type="text/x-raw-handlebars" will be compiled with regular
-// Handlebars and are suitable for use in views' computed properties.
-Ember.Handlebars.bootstrap = function(ctx) {
-  var selectors = 'script[type="text/x-handlebars"], script[type="text/x-raw-handlebars"]';
-
-  Ember.$(selectors, ctx)
-    .each(function() {
-    // Get a reference to the script tag
-    var script = Ember.$(this),
-        type   = script.attr('type');
-
-    var compile = (script.attr('type') === 'text/x-raw-handlebars') ?
-                  Ember.$.proxy(Handlebars.compile, Handlebars) :
-                  Ember.$.proxy(Ember.Handlebars.compile, Ember.Handlebars),
-      // Get the name of the script, used by Ember.View's templateName property.
-      // First look for data-template-name attribute, then fall back to its
-      // id if no name is found.
-      templateName = script.attr('data-template-name') || script.attr('id'),
-      template = compile(script.html()),
-      view, viewPath, elementId, options;
-
-    if (templateName) {
-      // For templates which have a name, we save them and then remove them from the DOM
-      Ember.TEMPLATES[templateName] = template;
-
-      // Remove script tag from DOM
-      script.remove();
-    } else {
-      if (script.parents('head').length !== 0) {
-        // don't allow inline templates in the head
-        throw new Ember.Error("Template found in <head> without a name specified. " +
-                         "Please provide a data-template-name attribute.\n" +
-                         script.html());
-      }
-
-      // For templates which will be evaluated inline in the HTML document, instantiates a new
-      // view, and replaces the script tag holding the template with the new
-      // view's DOM representation.
-      //
-      // Users can optionally specify a custom view subclass to use by setting the
-      // data-view attribute of the script tag.
-      viewPath = script.attr('data-view');
-      view = viewPath ? Ember.get(viewPath) : Ember.View;
-
-      // Get the id of the script, used by Ember.View's elementId property,
-      // Look for data-element-id attribute.
-      elementId = script.attr('data-element-id');
-
-      options = { template: template };
-      if (elementId) { options.elementId = elementId; }
-
-      view = view.create(options);
-
-      view._insertElementLater(function() {
-        script.replaceWith(this.$());
-
-        // Avoid memory leak in IE
-        script = null;
-      });
-    }
-  });
-};
-
-/** @private */
-function bootstrap() {
-  Ember.Handlebars.bootstrap( Ember.$(document) );
-}
-
-/*
-  We tie this to application.load to ensure that we've at least
-  attempted to bootstrap at the point that the application is loaded.
-
-  We also tie this to document ready since we're guaranteed that all
-  the inline templates are present at this point.
-
-  There's no harm to running this twice, since we remove the templates
-  from the DOM after processing.
-*/
-
-Ember.$(document).ready(bootstrap);
-Ember.onLoad('application', bootstrap);
-
-})();
-
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember Handlebars Views
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-})();
-
-// Version: v1.0.pre
-// Last commit: 7955b85 (2012-08-03 14:50:17 -0700)
-
-
-(function() {
-// ==========================================================================
-// Project:   Ember
-// Copyright: ©2011 Strobe Inc. and contributors.
-// License:   Licensed under MIT license (see license.js)
-// ==========================================================================
-
-})();
-
diff --git a/branch-1.2/ambari-web/vendor/scripts/handlebars-1.0.0.beta.6.js b/branch-1.2/ambari-web/vendor/scripts/handlebars-1.0.0.beta.6.js
deleted file mode 100644
index 83119ff..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/handlebars-1.0.0.beta.6.js
+++ /dev/null
@@ -1,1550 +0,0 @@
-// lib/handlebars/base.js
-var Handlebars = {};
-
-Handlebars.VERSION = "1.0.beta.6";
-
-Handlebars.helpers  = {};
-Handlebars.partials = {};
-
-Handlebars.registerHelper = function(name, fn, inverse) {
-  if(inverse) { fn.not = inverse; }
-  this.helpers[name] = fn;
-};
-
-Handlebars.registerPartial = function(name, str) {
-  this.partials[name] = str;
-};
-
-Handlebars.registerHelper('helperMissing', function(arg) {
-  if(arguments.length === 2) {
-    return undefined;
-  } else {
-    throw new Error("Could not find property '" + arg + "'");
-  }
-});
-
-var toString = Object.prototype.toString, functionType = "[object Function]";
-
-Handlebars.registerHelper('blockHelperMissing', function(context, options) {
-  var inverse = options.inverse || function() {}, fn = options.fn;
-
-
-  var ret = "";
-  var type = toString.call(context);
-
-  if(type === functionType) { context = context.call(this); }
-
-  if(context === true) {
-    return fn(this);
-  } else if(context === false || context == null) {
-    return inverse(this);
-  } else if(type === "[object Array]") {
-    if(context.length > 0) {
-      for(var i=0, j=context.length; i<j; i++) {
-        ret = ret + fn(context[i]);
-      }
-    } else {
-      ret = inverse(this);
-    }
-    return ret;
-  } else {
-    return fn(context);
-  }
-});
-
-Handlebars.registerHelper('each', function(context, options) {
-  var fn = options.fn, inverse = options.inverse;
-  var ret = "";
-
-  if(context && context.length > 0) {
-    for(var i=0, j=context.length; i<j; i++) {
-      ret = ret + fn(context[i]);
-    }
-  } else {
-    ret = inverse(this);
-  }
-  return ret;
-});
-
-Handlebars.registerHelper('if', function(context, options) {
-  var type = toString.call(context);
-  if(type === functionType) { context = context.call(this); }
-
-  if(!context || Handlebars.Utils.isEmpty(context)) {
-    return options.inverse(this);
-  } else {
-    return options.fn(this);
-  }
-});
-
-Handlebars.registerHelper('unless', function(context, options) {
-  var fn = options.fn, inverse = options.inverse;
-  options.fn = inverse;
-  options.inverse = fn;
-
-  return Handlebars.helpers['if'].call(this, context, options);
-});
-
-Handlebars.registerHelper('with', function(context, options) {
-  return options.fn(context);
-});
-
-Handlebars.registerHelper('log', function(context) {
-  Handlebars.log(context);
-});
-;
-// lib/handlebars/compiler/parser.js
-/* Jison generated parser */
-var handlebars = (function(){
-
-var parser = {trace: function trace() { },
-yy: {},
-symbols_: {"error":2,"root":3,"program":4,"EOF":5,"statements":6,"simpleInverse":7,"statement":8,"openInverse":9,"closeBlock":10,"openBlock":11,"mustache":12,"partial":13,"CONTENT":14,"COMMENT":15,"OPEN_BLOCK":16,"inMustache":17,"CLOSE":18,"OPEN_INVERSE":19,"OPEN_ENDBLOCK":20,"path":21,"OPEN":22,"OPEN_UNESCAPED":23,"OPEN_PARTIAL":24,"params":25,"hash":26,"param":27,"STRING":28,"INTEGER":29,"BOOLEAN":30,"hashSegments":31,"hashSegment":32,"ID":33,"EQUALS":34,"pathSegments":35,"SEP":36,"$accept":0,"$end":1},
-terminals_: {2:"error",5:"EOF",14:"CONTENT",15:"COMMENT",16:"OPEN_BLOCK",18:"CLOSE",19:"OPEN_INVERSE",20:"OPEN_ENDBLOCK",22:"OPEN",23:"OPEN_UNESCAPED",24:"OPEN_PARTIAL",28:"STRING",29:"INTEGER",30:"BOOLEAN",33:"ID",34:"EQUALS",36:"SEP"},
-productions_: [0,[3,2],[4,3],[4,1],[4,0],[6,1],[6,2],[8,3],[8,3],[8,1],[8,1],[8,1],[8,1],[11,3],[9,3],[10,3],[12,3],[12,3],[13,3],[13,4],[7,2],[17,3],[17,2],[17,2],[17,1],[25,2],[25,1],[27,1],[27,1],[27,1],[27,1],[26,1],[31,2],[31,1],[32,3],[32,3],[32,3],[32,3],[21,1],[35,3],[35,1]],
-performAction: function anonymous(yytext,yyleng,yylineno,yy,yystate,$$,_$) {
-
-var $0 = $$.length - 1;
-switch (yystate) {
-case 1: return $$[$0-1] 
-break;
-case 2: this.$ = new yy.ProgramNode($$[$0-2], $$[$0]) 
-break;
-case 3: this.$ = new yy.ProgramNode($$[$0]) 
-break;
-case 4: this.$ = new yy.ProgramNode([]) 
-break;
-case 5: this.$ = [$$[$0]] 
-break;
-case 6: $$[$0-1].push($$[$0]); this.$ = $$[$0-1] 
-break;
-case 7: this.$ = new yy.InverseNode($$[$0-2], $$[$0-1], $$[$0]) 
-break;
-case 8: this.$ = new yy.BlockNode($$[$0-2], $$[$0-1], $$[$0]) 
-break;
-case 9: this.$ = $$[$0] 
-break;
-case 10: this.$ = $$[$0] 
-break;
-case 11: this.$ = new yy.ContentNode($$[$0]) 
-break;
-case 12: this.$ = new yy.CommentNode($$[$0]) 
-break;
-case 13: this.$ = new yy.MustacheNode($$[$0-1][0], $$[$0-1][1]) 
-break;
-case 14: this.$ = new yy.MustacheNode($$[$0-1][0], $$[$0-1][1]) 
-break;
-case 15: this.$ = $$[$0-1] 
-break;
-case 16: this.$ = new yy.MustacheNode($$[$0-1][0], $$[$0-1][1]) 
-break;
-case 17: this.$ = new yy.MustacheNode($$[$0-1][0], $$[$0-1][1], true) 
-break;
-case 18: this.$ = new yy.PartialNode($$[$0-1]) 
-break;
-case 19: this.$ = new yy.PartialNode($$[$0-2], $$[$0-1]) 
-break;
-case 20: 
-break;
-case 21: this.$ = [[$$[$0-2]].concat($$[$0-1]), $$[$0]] 
-break;
-case 22: this.$ = [[$$[$0-1]].concat($$[$0]), null] 
-break;
-case 23: this.$ = [[$$[$0-1]], $$[$0]] 
-break;
-case 24: this.$ = [[$$[$0]], null] 
-break;
-case 25: $$[$0-1].push($$[$0]); this.$ = $$[$0-1]; 
-break;
-case 26: this.$ = [$$[$0]] 
-break;
-case 27: this.$ = $$[$0] 
-break;
-case 28: this.$ = new yy.StringNode($$[$0]) 
-break;
-case 29: this.$ = new yy.IntegerNode($$[$0]) 
-break;
-case 30: this.$ = new yy.BooleanNode($$[$0]) 
-break;
-case 31: this.$ = new yy.HashNode($$[$0]) 
-break;
-case 32: $$[$0-1].push($$[$0]); this.$ = $$[$0-1] 
-break;
-case 33: this.$ = [$$[$0]] 
-break;
-case 34: this.$ = [$$[$0-2], $$[$0]] 
-break;
-case 35: this.$ = [$$[$0-2], new yy.StringNode($$[$0])] 
-break;
-case 36: this.$ = [$$[$0-2], new yy.IntegerNode($$[$0])] 
-break;
-case 37: this.$ = [$$[$0-2], new yy.BooleanNode($$[$0])] 
-break;
-case 38: this.$ = new yy.IdNode($$[$0]) 
-break;
-case 39: $$[$0-2].push($$[$0]); this.$ = $$[$0-2]; 
-break;
-case 40: this.$ = [$$[$0]] 
-break;
-}
-},
-table: [{3:1,4:2,5:[2,4],6:3,8:4,9:5,11:6,12:7,13:8,14:[1,9],15:[1,10],16:[1,12],19:[1,11],22:[1,13],23:[1,14],24:[1,15]},{1:[3]},{5:[1,16]},{5:[2,3],7:17,8:18,9:5,11:6,12:7,13:8,14:[1,9],15:[1,10],16:[1,12],19:[1,19],20:[2,3],22:[1,13],23:[1,14],24:[1,15]},{5:[2,5],14:[2,5],15:[2,5],16:[2,5],19:[2,5],20:[2,5],22:[2,5],23:[2,5],24:[2,5]},{4:20,6:3,8:4,9:5,11:6,12:7,13:8,14:[1,9],15:[1,10],16:[1,12],19:[1,11],20:[2,4],22:[1,13],23:[1,14],24:[1,15]},{4:21,6:3,8:4,9:5,11:6,12:7,13:8,14:[1,9],15:[1,10],16:[1,12],19:[1,11],20:[2,4],22:[1,13],23:[1,14],24:[1,15]},{5:[2,9],14:[2,9],15:[2,9],16:[2,9],19:[2,9],20:[2,9],22:[2,9],23:[2,9],24:[2,9]},{5:[2,10],14:[2,10],15:[2,10],16:[2,10],19:[2,10],20:[2,10],22:[2,10],23:[2,10],24:[2,10]},{5:[2,11],14:[2,11],15:[2,11],16:[2,11],19:[2,11],20:[2,11],22:[2,11],23:[2,11],24:[2,11]},{5:[2,12],14:[2,12],15:[2,12],16:[2,12],19:[2,12],20:[2,12],22:[2,12],23:[2,12],24:[2,12]},{17:22,21:23,33:[1,25],35:24},{17:26,21:23,33:[1,25],35:24},{17:27,21:23,33:[1,25],35:24},{17:28,21:23,33:[1,25],35:24},{21:29,33:[1,25],35:24},{1:[2,1]},{6:30,8:4,9:5,11:6,12:7,13:8,14:[1,9],15:[1,10],16:[1,12],19:[1,11],22:[1,13],23:[1,14],24:[1,15]},{5:[2,6],14:[2,6],15:[2,6],16:[2,6],19:[2,6],20:[2,6],22:[2,6],23:[2,6],24:[2,6]},{17:22,18:[1,31],21:23,33:[1,25],35:24},{10:32,20:[1,33]},{10:34,20:[1,33]},{18:[1,35]},{18:[2,24],21:40,25:36,26:37,27:38,28:[1,41],29:[1,42],30:[1,43],31:39,32:44,33:[1,45],35:24},{18:[2,38],28:[2,38],29:[2,38],30:[2,38],33:[2,38],36:[1,46]},{18:[2,40],28:[2,40],29:[2,40],30:[2,40],33:[2,40],36:[2,40]},{18:[1,47]},{18:[1,48]},{18:[1,49]},{18:[1,50],21:51,33:[1,25],35:24},{5:[2,2],8:18,9:5,11:6,12:7,13:8,14:[1,9],15:[1,10],16:[1,12],19:[1,11],20:[2,2],22:[1,13],23:[1,14],24:[1,15]},{14:[2,20],15:[2,20],16:[2,20],19:[2,20],22:[2,20],23:[2,20],24:[2,20]},{5:[2,7],14:[2,7],15:[2,7],16:[2,7],19:[2,7],20:[2,7],22:[2,7],23:[2,7],24:[2,7]},{21:52,33:[1,25],35:24},{5:[2,8],14:[2,8],15:[2,8],16:[2,8],19:[2,8],20:[2,8],22:[2,8],23:[2,8],24:[2,8]},{14:[2,14],15:[2,14],16:[2,14],19:[2,14],20:[2,14],22:[2,14],23:[2,14],24:[2,14]},{18:[2,22],21:40,26:53,27:54,28:[1,41],29:[1,42],30:[1,43],31:39,32:44,33:[1,45],35:24},{18:[2,23]},{18:[2,26],28:[2,26],29:[2,26],30:[2,26],33:[2,26]},{18:[2,31],32:55,33:[1,56]},{18:[2,27],28:[2,27],29:[2,27],30:[2,27],33:[2,27]},{18:[2,28],28:[2,28],29:[2,28],30:[2,28],33:[2,28]},{18:[2,29],28:[2,29],29:[2,29],30:[2,29],33:[2,29]},{18:[2,30],28:[2,30],29:[2,30],30:[2,30],33:[2,30]},{18:[2,33],33:[2,33]},{18:[2,40],28:[2,40],29:[2,40],30:[2,40],33:[2,40],34:[1,57],36:[2,40]},{33:[1,58]},{14:[2,13],15:[2,13],16:[2,13],19:[2,13],20:[2,13],22:[2,13],23:[2,13],24:[2,13]},{5:[2,16],14:[2,16],15:[2,16],16:[2,16],19:[2,16],20:[2,16],22:[2,16],23:[2,16],24:[2,16]},{5:[2,17],14:[2,17],15:[2,17],16:[2,17],19:[2,17],20:[2,17],22:[2,17],23:[2,17],24:[2,17]},{5:[2,18],14:[2,18],15:[2,18],16:[2,18],19:[2,18],20:[2,18],22:[2,18],23:[2,18],24:[2,18]},{18:[1,59]},{18:[1,60]},{18:[2,21]},{18:[2,25],28:[2,25],29:[2,25],30:[2,25],33:[2,25]},{18:[2,32],33:[2,32]},{34:[1,57]},{21:61,28:[1,62],29:[1,63],30:[1,64],33:[1,25],35:24},{18:[2,39],28:[2,39],29:[2,39],30:[2,39],33:[2,39],36:[2,39]},{5:[2,19],14:[2,19],15:[2,19],16:[2,19],19:[2,19],20:[2,19],22:[2,19],23:[2,19],24:[2,19]},{5:[2,15],14:[2,15],15:[2,15],16:[2,15],19:[2,15],20:[2,15],22:[2,15],23:[2,15],24:[2,15]},{18:[2,34],33:[2,34]},{18:[2,35],33:[2,35]},{18:[2,36],33:[2,36]},{18:[2,37],33:[2,37]}],
-defaultActions: {16:[2,1],37:[2,23],53:[2,21]},
-parseError: function parseError(str, hash) {
-    throw new Error(str);
-},
-parse: function parse(input) {
-    var self = this, stack = [0], vstack = [null], lstack = [], table = this.table, yytext = "", yylineno = 0, yyleng = 0, recovering = 0, TERROR = 2, EOF = 1;
-    this.lexer.setInput(input);
-    this.lexer.yy = this.yy;
-    this.yy.lexer = this.lexer;
-    if (typeof this.lexer.yylloc == "undefined")
-        this.lexer.yylloc = {};
-    var yyloc = this.lexer.yylloc;
-    lstack.push(yyloc);
-    if (typeof this.yy.parseError === "function")
-        this.parseError = this.yy.parseError;
-    function popStack(n) {
-        stack.length = stack.length - 2 * n;
-        vstack.length = vstack.length - n;
-        lstack.length = lstack.length - n;
-    }
-    function lex() {
-        var token;
-        token = self.lexer.lex() || 1;
-        if (typeof token !== "number") {
-            token = self.symbols_[token] || token;
-        }
-        return token;
-    }
-    var symbol, preErrorSymbol, state, action, a, r, yyval = {}, p, len, newState, expected;
-    while (true) {
-        state = stack[stack.length - 1];
-        if (this.defaultActions[state]) {
-            action = this.defaultActions[state];
-        } else {
-            if (symbol == null)
-                symbol = lex();
-            action = table[state] && table[state][symbol];
-        }
-        if (typeof action === "undefined" || !action.length || !action[0]) {
-            if (!recovering) {
-                expected = [];
-                for (p in table[state])
-                    if (this.terminals_[p] && p > 2) {
-                        expected.push("'" + this.terminals_[p] + "'");
-                    }
-                var errStr = "";
-                if (this.lexer.showPosition) {
-                    errStr = "Parse error on line " + (yylineno + 1) + ":\n" + this.lexer.showPosition() + "\nExpecting " + expected.join(", ") + ", got '" + this.terminals_[symbol] + "'";
-                } else {
-                    errStr = "Parse error on line " + (yylineno + 1) + ": Unexpected " + (symbol == 1?"end of input":"'" + (this.terminals_[symbol] || symbol) + "'");
-                }
-                this.parseError(errStr, {text: this.lexer.match, token: this.terminals_[symbol] || symbol, line: this.lexer.yylineno, loc: yyloc, expected: expected});
-            }
-        }
-        if (action[0] instanceof Array && action.length > 1) {
-            throw new Error("Parse Error: multiple actions possible at state: " + state + ", token: " + symbol);
-        }
-        switch (action[0]) {
-        case 1:
-            stack.push(symbol);
-            vstack.push(this.lexer.yytext);
-            lstack.push(this.lexer.yylloc);
-            stack.push(action[1]);
-            symbol = null;
-            if (!preErrorSymbol) {
-                yyleng = this.lexer.yyleng;
-                yytext = this.lexer.yytext;
-                yylineno = this.lexer.yylineno;
-                yyloc = this.lexer.yylloc;
-                if (recovering > 0)
-                    recovering--;
-            } else {
-                symbol = preErrorSymbol;
-                preErrorSymbol = null;
-            }
-            break;
-        case 2:
-            len = this.productions_[action[1]][1];
-            yyval.$ = vstack[vstack.length - len];
-            yyval._$ = {first_line: lstack[lstack.length - (len || 1)].first_line, last_line: lstack[lstack.length - 1].last_line, first_column: lstack[lstack.length - (len || 1)].first_column, last_column: lstack[lstack.length - 1].last_column};
-            r = this.performAction.call(yyval, yytext, yyleng, yylineno, this.yy, action[1], vstack, lstack);
-            if (typeof r !== "undefined") {
-                return r;
-            }
-            if (len) {
-                stack = stack.slice(0, -1 * len * 2);
-                vstack = vstack.slice(0, -1 * len);
-                lstack = lstack.slice(0, -1 * len);
-            }
-            stack.push(this.productions_[action[1]][0]);
-            vstack.push(yyval.$);
-            lstack.push(yyval._$);
-            newState = table[stack[stack.length - 2]][stack[stack.length - 1]];
-            stack.push(newState);
-            break;
-        case 3:
-            return true;
-        }
-    }
-    return true;
-}
-};/* Jison generated lexer */
-var lexer = (function(){
-
-var lexer = ({EOF:1,
-parseError:function parseError(str, hash) {
-        if (this.yy.parseError) {
-            this.yy.parseError(str, hash);
-        } else {
-            throw new Error(str);
-        }
-    },
-setInput:function (input) {
-        this._input = input;
-        this._more = this._less = this.done = false;
-        this.yylineno = this.yyleng = 0;
-        this.yytext = this.matched = this.match = '';
-        this.conditionStack = ['INITIAL'];
-        this.yylloc = {first_line:1,first_column:0,last_line:1,last_column:0};
-        return this;
-    },
-input:function () {
-        var ch = this._input[0];
-        this.yytext+=ch;
-        this.yyleng++;
-        this.match+=ch;
-        this.matched+=ch;
-        var lines = ch.match(/\n/);
-        if (lines) this.yylineno++;
-        this._input = this._input.slice(1);
-        return ch;
-    },
-unput:function (ch) {
-        this._input = ch + this._input;
-        return this;
-    },
-more:function () {
-        this._more = true;
-        return this;
-    },
-pastInput:function () {
-        var past = this.matched.substr(0, this.matched.length - this.match.length);
-        return (past.length > 20 ? '...':'') + past.substr(-20).replace(/\n/g, "");
-    },
-upcomingInput:function () {
-        var next = this.match;
-        if (next.length < 20) {
-            next += this._input.substr(0, 20-next.length);
-        }
-        return (next.substr(0,20)+(next.length > 20 ? '...':'')).replace(/\n/g, "");
-    },
-showPosition:function () {
-        var pre = this.pastInput();
-        var c = new Array(pre.length + 1).join("-");
-        return pre + this.upcomingInput() + "\n" + c+"^";
-    },
-next:function () {
-        if (this.done) {
-            return this.EOF;
-        }
-        if (!this._input) this.done = true;
-
-        var token,
-            match,
-            col,
-            lines;
-        if (!this._more) {
-            this.yytext = '';
-            this.match = '';
-        }
-        var rules = this._currentRules();
-        for (var i=0;i < rules.length; i++) {
-            match = this._input.match(this.rules[rules[i]]);
-            if (match) {
-                lines = match[0].match(/\n.*/g);
-                if (lines) this.yylineno += lines.length;
-                this.yylloc = {first_line: this.yylloc.last_line,
-                               last_line: this.yylineno+1,
-                               first_column: this.yylloc.last_column,
-                               last_column: lines ? lines[lines.length-1].length-1 : this.yylloc.last_column + match[0].length}
-                this.yytext += match[0];
-                this.match += match[0];
-                this.matches = match;
-                this.yyleng = this.yytext.length;
-                this._more = false;
-                this._input = this._input.slice(match[0].length);
-                this.matched += match[0];
-                token = this.performAction.call(this, this.yy, this, rules[i],this.conditionStack[this.conditionStack.length-1]);
-                if (token) return token;
-                else return;
-            }
-        }
-        if (this._input === "") {
-            return this.EOF;
-        } else {
-            this.parseError('Lexical error on line '+(this.yylineno+1)+'. Unrecognized text.\n'+this.showPosition(), 
-                    {text: "", token: null, line: this.yylineno});
-        }
-    },
-lex:function lex() {
-        var r = this.next();
-        if (typeof r !== 'undefined') {
-            return r;
-        } else {
-            return this.lex();
-        }
-    },
-begin:function begin(condition) {
-        this.conditionStack.push(condition);
-    },
-popState:function popState() {
-        return this.conditionStack.pop();
-    },
-_currentRules:function _currentRules() {
-        return this.conditions[this.conditionStack[this.conditionStack.length-1]].rules;
-    },
-topState:function () {
-        return this.conditionStack[this.conditionStack.length-2];
-    },
-pushState:function begin(condition) {
-        this.begin(condition);
-    }});
-lexer.performAction = function anonymous(yy,yy_,$avoiding_name_collisions,YY_START) {
-
-var YYSTATE=YY_START
-switch($avoiding_name_collisions) {
-case 0:
-                                   if(yy_.yytext.slice(-1) !== "\\") this.begin("mu");
-                                   if(yy_.yytext.slice(-1) === "\\") yy_.yytext = yy_.yytext.substr(0,yy_.yyleng-1), this.begin("emu");
-                                   if(yy_.yytext) return 14;
-                                 
-break;
-case 1: return 14; 
-break;
-case 2: this.popState(); return 14; 
-break;
-case 3: return 24; 
-break;
-case 4: return 16; 
-break;
-case 5: return 20; 
-break;
-case 6: return 19; 
-break;
-case 7: return 19; 
-break;
-case 8: return 23; 
-break;
-case 9: return 23; 
-break;
-case 10: yy_.yytext = yy_.yytext.substr(3,yy_.yyleng-5); this.popState(); return 15; 
-break;
-case 11: return 22; 
-break;
-case 12: return 34; 
-break;
-case 13: return 33; 
-break;
-case 14: return 33; 
-break;
-case 15: return 36; 
-break;
-case 16: /*ignore whitespace*/ 
-break;
-case 17: this.popState(); return 18; 
-break;
-case 18: this.popState(); return 18; 
-break;
-case 19: yy_.yytext = yy_.yytext.substr(1,yy_.yyleng-2).replace(/\\"/g,'"'); return 28; 
-break;
-case 20: return 30; 
-break;
-case 21: return 30; 
-break;
-case 22: return 29; 
-break;
-case 23: return 33; 
-break;
-case 24: yy_.yytext = yy_.yytext.substr(1, yy_.yyleng-2); return 33; 
-break;
-case 25: return 'INVALID'; 
-break;
-case 26: return 5; 
-break;
-}
-};
-lexer.rules = [/^[^\x00]*?(?=(\{\{))/,/^[^\x00]+/,/^[^\x00]{2,}?(?=(\{\{))/,/^\{\{>/,/^\{\{#/,/^\{\{\//,/^\{\{\^/,/^\{\{\s*else\b/,/^\{\{\{/,/^\{\{&/,/^\{\{![\s\S]*?\}\}/,/^\{\{/,/^=/,/^\.(?=[} ])/,/^\.\./,/^[\/.]/,/^\s+/,/^\}\}\}/,/^\}\}/,/^"(\\["]|[^"])*"/,/^true(?=[}\s])/,/^false(?=[}\s])/,/^[0-9]+(?=[}\s])/,/^[a-zA-Z0-9_$-]+(?=[=}\s\/.])/,/^\[[^\]]*\]/,/^./,/^$/];
-lexer.conditions = {"mu":{"rules":[3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26],"inclusive":false},"emu":{"rules":[2],"inclusive":false},"INITIAL":{"rules":[0,1,26],"inclusive":true}};return lexer;})()
-parser.lexer = lexer;
-return parser;
-})();
-if (typeof require !== 'undefined' && typeof exports !== 'undefined') {
-exports.parser = handlebars;
-exports.parse = function () { return handlebars.parse.apply(handlebars, arguments); }
-exports.main = function commonjsMain(args) {
-    if (!args[1])
-        throw new Error('Usage: '+args[0]+' FILE');
-    if (typeof process !== 'undefined') {
-        var source = require('fs').readFileSync(require('path').join(process.cwd(), args[1]), "utf8");
-    } else {
-        var cwd = require("file").path(require("file").cwd());
-        var source = cwd.join(args[1]).read({charset: "utf-8"});
-    }
-    return exports.parser.parse(source);
-}
-if (typeof module !== 'undefined' && require.main === module) {
-  exports.main(typeof process !== 'undefined' ? process.argv.slice(1) : require("system").args);
-}
-};
-;
-// lib/handlebars/compiler/base.js
-Handlebars.Parser = handlebars;
-
-Handlebars.parse = function(string) {
-  Handlebars.Parser.yy = Handlebars.AST;
-  return Handlebars.Parser.parse(string);
-};
-
-Handlebars.print = function(ast) {
-  return new Handlebars.PrintVisitor().accept(ast);
-};
-
-Handlebars.logger = {
-  DEBUG: 0, INFO: 1, WARN: 2, ERROR: 3, level: 3,
-
-  // override in the host environment
-  log: function(level, str) {}
-};
-
-Handlebars.log = function(level, str) { Handlebars.logger.log(level, str); };
-;
-// lib/handlebars/compiler/ast.js
-(function() {
-
-  Handlebars.AST = {};
-
-  Handlebars.AST.ProgramNode = function(statements, inverse) {
-    this.type = "program";
-    this.statements = statements;
-    if(inverse) { this.inverse = new Handlebars.AST.ProgramNode(inverse); }
-  };
-
-  Handlebars.AST.MustacheNode = function(params, hash, unescaped) {
-    this.type = "mustache";
-    this.id = params[0];
-    this.params = params.slice(1);
-    this.hash = hash;
-    this.escaped = !unescaped;
-  };
-
-  Handlebars.AST.PartialNode = function(id, context) {
-    this.type    = "partial";
-
-    // TODO: disallow complex IDs
-
-    this.id      = id;
-    this.context = context;
-  };
-
-  var verifyMatch = function(open, close) {
-    if(open.original !== close.original) {
-      throw new Handlebars.Exception(open.original + " doesn't match " + close.original);
-    }
-  };
-
-  Handlebars.AST.BlockNode = function(mustache, program, close) {
-    verifyMatch(mustache.id, close);
-    this.type = "block";
-    this.mustache = mustache;
-    this.program  = program;
-  };
-
-  Handlebars.AST.InverseNode = function(mustache, program, close) {
-    verifyMatch(mustache.id, close);
-    this.type = "inverse";
-    this.mustache = mustache;
-    this.program  = program;
-  };
-
-  Handlebars.AST.ContentNode = function(string) {
-    this.type = "content";
-    this.string = string;
-  };
-
-  Handlebars.AST.HashNode = function(pairs) {
-    this.type = "hash";
-    this.pairs = pairs;
-  };
-
-  Handlebars.AST.IdNode = function(parts) {
-    this.type = "ID";
-    this.original = parts.join(".");
-
-    var dig = [], depth = 0;
-
-    for(var i=0,l=parts.length; i<l; i++) {
-      var part = parts[i];
-
-      if(part === "..") { depth++; }
-      else if(part === "." || part === "this") { this.isScoped = true; }
-      else { dig.push(part); }
-    }
-
-    this.parts    = dig;
-    this.string   = dig.join('.');
-    this.depth    = depth;
-    this.isSimple = (dig.length === 1) && (depth === 0);
-  };
-
-  Handlebars.AST.StringNode = function(string) {
-    this.type = "STRING";
-    this.string = string;
-  };
-
-  Handlebars.AST.IntegerNode = function(integer) {
-    this.type = "INTEGER";
-    this.integer = integer;
-  };
-
-  Handlebars.AST.BooleanNode = function(bool) {
-    this.type = "BOOLEAN";
-    this.bool = bool;
-  };
-
-  Handlebars.AST.CommentNode = function(comment) {
-    this.type = "comment";
-    this.comment = comment;
-  };
-
-})();;
-// lib/handlebars/utils.js
-Handlebars.Exception = function(message) {
-  var tmp = Error.prototype.constructor.apply(this, arguments);
-
-  for (var p in tmp) {
-    if (tmp.hasOwnProperty(p)) { this[p] = tmp[p]; }
-  }
-
-  this.message = tmp.message;
-};
-Handlebars.Exception.prototype = new Error;
-
-// Build out our basic SafeString type
-Handlebars.SafeString = function(string) {
-  this.string = string;
-};
-Handlebars.SafeString.prototype.toString = function() {
-  return this.string.toString();
-};
-
-(function() {
-  var escape = {
-    "<": "&lt;",
-    ">": "&gt;",
-    '"': "&quot;",
-    "'": "&#x27;",
-    "`": "&#x60;"
-  };
-
-  var badChars = /&(?!\w+;)|[<>"'`]/g;
-  var possible = /[&<>"'`]/;
-
-  var escapeChar = function(chr) {
-    return escape[chr] || "&amp;";
-  };
-
-  Handlebars.Utils = {
-    escapeExpression: function(string) {
-      // don't escape SafeStrings, since they're already safe
-      if (string instanceof Handlebars.SafeString) {
-        return string.toString();
-      } else if (string == null || string === false) {
-        return "";
-      }
-
-      if(!possible.test(string)) { return string; }
-      return string.replace(badChars, escapeChar);
-    },
-
-    isEmpty: function(value) {
-      if (typeof value === "undefined") {
-        return true;
-      } else if (value === null) {
-        return true;
-      } else if (value === false) {
-        return true;
-      } else if(Object.prototype.toString.call(value) === "[object Array]" && value.length === 0) {
-        return true;
-      } else {
-        return false;
-      }
-    }
-  };
-})();;
-// lib/handlebars/compiler/compiler.js
-Handlebars.Compiler = function() {};
-Handlebars.JavaScriptCompiler = function() {};
-
-(function(Compiler, JavaScriptCompiler) {
-  Compiler.OPCODE_MAP = {
-    appendContent: 1,
-    getContext: 2,
-    lookupWithHelpers: 3,
-    lookup: 4,
-    append: 5,
-    invokeMustache: 6,
-    appendEscaped: 7,
-    pushString: 8,
-    truthyOrFallback: 9,
-    functionOrFallback: 10,
-    invokeProgram: 11,
-    invokePartial: 12,
-    push: 13,
-    assignToHash: 15,
-    pushStringParam: 16
-  };
-
-  Compiler.MULTI_PARAM_OPCODES = {
-    appendContent: 1,
-    getContext: 1,
-    lookupWithHelpers: 2,
-    lookup: 1,
-    invokeMustache: 3,
-    pushString: 1,
-    truthyOrFallback: 1,
-    functionOrFallback: 1,
-    invokeProgram: 3,
-    invokePartial: 1,
-    push: 1,
-    assignToHash: 1,
-    pushStringParam: 1
-  };
-
-  Compiler.DISASSEMBLE_MAP = {};
-
-  for(var prop in Compiler.OPCODE_MAP) {
-    var value = Compiler.OPCODE_MAP[prop];
-    Compiler.DISASSEMBLE_MAP[value] = prop;
-  }
-
-  Compiler.multiParamSize = function(code) {
-    return Compiler.MULTI_PARAM_OPCODES[Compiler.DISASSEMBLE_MAP[code]];
-  };
-
-  Compiler.prototype = {
-    compiler: Compiler,
-
-    disassemble: function() {
-      var opcodes = this.opcodes, opcode, nextCode;
-      var out = [], str, name, value;
-
-      for(var i=0, l=opcodes.length; i<l; i++) {
-        opcode = opcodes[i];
-
-        if(opcode === 'DECLARE') {
-          name = opcodes[++i];
-          value = opcodes[++i];
-          out.push("DECLARE " + name + " = " + value);
-        } else {
-          str = Compiler.DISASSEMBLE_MAP[opcode];
-
-          var extraParams = Compiler.multiParamSize(opcode);
-          var codes = [];
-
-          for(var j=0; j<extraParams; j++) {
-            nextCode = opcodes[++i];
-
-            if(typeof nextCode === "string") {
-              nextCode = "\"" + nextCode.replace("\n", "\\n") + "\"";
-            }
-
-            codes.push(nextCode);
-          }
-
-          str = str + " " + codes.join(" ");
-
-          out.push(str);
-        }
-      }
-
-      return out.join("\n");
-    },
-
-    guid: 0,
-
-    compile: function(program, options) {
-      this.children = [];
-      this.depths = {list: []};
-      this.options = options;
-
-      // These changes will propagate to the other compiler components
-      var knownHelpers = this.options.knownHelpers;
-      this.options.knownHelpers = {
-        'helperMissing': true,
-        'blockHelperMissing': true,
-        'each': true,
-        'if': true,
-        'unless': true,
-        'with': true,
-        'log': true
-      };
-      if (knownHelpers) {
-        for (var name in knownHelpers) {
-          this.options.knownHelpers[name] = knownHelpers[name];
-        }
-      }
-
-      return this.program(program);
-    },
-
-    accept: function(node) {
-      return this[node.type](node);
-    },
-
-    program: function(program) {
-      var statements = program.statements, statement;
-      this.opcodes = [];
-
-      for(var i=0, l=statements.length; i<l; i++) {
-        statement = statements[i];
-        this[statement.type](statement);
-      }
-      this.isSimple = l === 1;
-
-      this.depths.list = this.depths.list.sort(function(a, b) {
-        return a - b;
-      });
-
-      return this;
-    },
-
-    compileProgram: function(program) {
-      var result = new this.compiler().compile(program, this.options);
-      var guid = this.guid++;
-
-      this.usePartial = this.usePartial || result.usePartial;
-
-      this.children[guid] = result;
-
-      for(var i=0, l=result.depths.list.length; i<l; i++) {
-        depth = result.depths.list[i];
-
-        if(depth < 2) { continue; }
-        else { this.addDepth(depth - 1); }
-      }
-
-      return guid;
-    },
-
-    block: function(block) {
-      var mustache = block.mustache;
-      var depth, child, inverse, inverseGuid;
-
-      var params = this.setupStackForMustache(mustache);
-
-      var programGuid = this.compileProgram(block.program);
-
-      if(block.program.inverse) {
-        inverseGuid = this.compileProgram(block.program.inverse);
-        this.declare('inverse', inverseGuid);
-      }
-
-      this.opcode('invokeProgram', programGuid, params.length, !!mustache.hash);
-      this.declare('inverse', null);
-      this.opcode('append');
-    },
-
-    inverse: function(block) {
-      var params = this.setupStackForMustache(block.mustache);
-
-      var programGuid = this.compileProgram(block.program);
-
-      this.declare('inverse', programGuid);
-
-      this.opcode('invokeProgram', null, params.length, !!block.mustache.hash);
-      this.declare('inverse', null);
-      this.opcode('append');
-    },
-
-    hash: function(hash) {
-      var pairs = hash.pairs, pair, val;
-
-      this.opcode('push', '{}');
-
-      for(var i=0, l=pairs.length; i<l; i++) {
-        pair = pairs[i];
-        val  = pair[1];
-
-        this.accept(val);
-        this.opcode('assignToHash', pair[0]);
-      }
-    },
-
-    partial: function(partial) {
-      var id = partial.id;
-      this.usePartial = true;
-
-      if(partial.context) {
-        this.ID(partial.context);
-      } else {
-        this.opcode('push', 'depth0');
-      }
-
-      this.opcode('invokePartial', id.original);
-      this.opcode('append');
-    },
-
-    content: function(content) {
-      this.opcode('appendContent', content.string);
-    },
-
-    mustache: function(mustache) {
-      var params = this.setupStackForMustache(mustache);
-
-      this.opcode('invokeMustache', params.length, mustache.id.original, !!mustache.hash);
-
-      if(mustache.escaped && !this.options.noEscape) {
-        this.opcode('appendEscaped');
-      } else {
-        this.opcode('append');
-      }
-    },
-
-    ID: function(id) {
-      this.addDepth(id.depth);
-
-      this.opcode('getContext', id.depth);
-
-      this.opcode('lookupWithHelpers', id.parts[0] || null, id.isScoped || false);
-
-      for(var i=1, l=id.parts.length; i<l; i++) {
-        this.opcode('lookup', id.parts[i]);
-      }
-    },
-
-    STRING: function(string) {
-      this.opcode('pushString', string.string);
-    },
-
-    INTEGER: function(integer) {
-      this.opcode('push', integer.integer);
-    },
-
-    BOOLEAN: function(bool) {
-      this.opcode('push', bool.bool);
-    },
-
-    comment: function() {},
-
-    // HELPERS
-    pushParams: function(params) {
-      var i = params.length, param;
-
-      while(i--) {
-        param = params[i];
-
-        if(this.options.stringParams) {
-          if(param.depth) {
-            this.addDepth(param.depth);
-          }
-
-          this.opcode('getContext', param.depth || 0);
-          this.opcode('pushStringParam', param.string);
-        } else {
-          this[param.type](param);
-        }
-      }
-    },
-
-    opcode: function(name, val1, val2, val3) {
-      this.opcodes.push(Compiler.OPCODE_MAP[name]);
-      if(val1 !== undefined) { this.opcodes.push(val1); }
-      if(val2 !== undefined) { this.opcodes.push(val2); }
-      if(val3 !== undefined) { this.opcodes.push(val3); }
-    },
-
-    declare: function(name, value) {
-      this.opcodes.push('DECLARE');
-      this.opcodes.push(name);
-      this.opcodes.push(value);
-    },
-
-    addDepth: function(depth) {
-      if(depth === 0) { return; }
-
-      if(!this.depths[depth]) {
-        this.depths[depth] = true;
-        this.depths.list.push(depth);
-      }
-    },
-
-    setupStackForMustache: function(mustache) {
-      var params = mustache.params;
-
-      this.pushParams(params);
-
-      if(mustache.hash) {
-        this.hash(mustache.hash);
-      }
-
-      this.ID(mustache.id);
-
-      return params;
-    }
-  };
-
-  JavaScriptCompiler.prototype = {
-    // PUBLIC API: You can override these methods in a subclass to provide
-    // alternative compiled forms for name lookup and buffering semantics
-    nameLookup: function(parent, name, type) {
-			if (/^[0-9]+$/.test(name)) {
-        return parent + "[" + name + "]";
-      } else if (JavaScriptCompiler.isValidJavaScriptVariableName(name)) {
-	    	return parent + "." + name;
-			}
-			else {
-				return parent + "['" + name + "']";
-      }
-    },
-
-    appendToBuffer: function(string) {
-      if (this.environment.isSimple) {
-        return "return " + string + ";";
-      } else {
-        return "buffer += " + string + ";";
-      }
-    },
-
-    initializeBuffer: function() {
-      return this.quotedString("");
-    },
-
-    namespace: "Handlebars",
-    // END PUBLIC API
-
-    compile: function(environment, options, context, asObject) {
-      this.environment = environment;
-      this.options = options || {};
-
-      this.name = this.environment.name;
-      this.isChild = !!context;
-      this.context = context || {
-        programs: [],
-        aliases: { self: 'this' },
-        registers: {list: []}
-      };
-
-      this.preamble();
-
-      this.stackSlot = 0;
-      this.stackVars = [];
-
-      this.compileChildren(environment, options);
-
-      var opcodes = environment.opcodes, opcode;
-
-      this.i = 0;
-
-      for(l=opcodes.length; this.i<l; this.i++) {
-        opcode = this.nextOpcode(0);
-
-        if(opcode[0] === 'DECLARE') {
-          this.i = this.i + 2;
-          this[opcode[1]] = opcode[2];
-        } else {
-          this.i = this.i + opcode[1].length;
-          this[opcode[0]].apply(this, opcode[1]);
-        }
-      }
-
-      return this.createFunctionContext(asObject);
-    },
-
-    nextOpcode: function(n) {
-      var opcodes = this.environment.opcodes, opcode = opcodes[this.i + n], name, val;
-      var extraParams, codes;
-
-      if(opcode === 'DECLARE') {
-        name = opcodes[this.i + 1];
-        val  = opcodes[this.i + 2];
-        return ['DECLARE', name, val];
-      } else {
-        name = Compiler.DISASSEMBLE_MAP[opcode];
-
-        extraParams = Compiler.multiParamSize(opcode);
-        codes = [];
-
-        for(var j=0; j<extraParams; j++) {
-          codes.push(opcodes[this.i + j + 1 + n]);
-        }
-
-        return [name, codes];
-      }
-    },
-
-    eat: function(opcode) {
-      this.i = this.i + opcode.length;
-    },
-
-    preamble: function() {
-      var out = [];
-
-      // this register will disambiguate helper lookup from finding a function in
-      // a context. This is necessary for mustache compatibility, which requires
-      // that context functions in blocks are evaluated by blockHelperMissing, and
-      // then proceed as if the resulting value was provided to blockHelperMissing.
-      this.useRegister('foundHelper');
-
-      if (!this.isChild) {
-        var namespace = this.namespace;
-        var copies = "helpers = helpers || " + namespace + ".helpers;";
-        if(this.environment.usePartial) { copies = copies + " partials = partials || " + namespace + ".partials;"; }
-        out.push(copies);
-      } else {
-        out.push('');
-      }
-
-      if (!this.environment.isSimple) {
-        out.push(", buffer = " + this.initializeBuffer());
-      } else {
-        out.push("");
-      }
-
-      // track the last context pushed into place to allow skipping the
-      // getContext opcode when it would be a noop
-      this.lastContext = 0;
-      this.source = out;
-    },
-
-    createFunctionContext: function(asObject) {
-      var locals = this.stackVars;
-      if (!this.isChild) {
-        locals = locals.concat(this.context.registers.list);
-      }
-
-      if(locals.length > 0) {
-        this.source[1] = this.source[1] + ", " + locals.join(", ");
-      }
-
-      // Generate minimizer alias mappings
-      if (!this.isChild) {
-        var aliases = []
-        for (var alias in this.context.aliases) {
-          this.source[1] = this.source[1] + ', ' + alias + '=' + this.context.aliases[alias];
-        }
-      }
-
-      if (this.source[1]) {
-        this.source[1] = "var " + this.source[1].substring(2) + ";";
-      }
-
-      // Merge children
-      if (!this.isChild) {
-        this.source[1] += '\n' + this.context.programs.join('\n') + '\n';
-      }
-
-      if (!this.environment.isSimple) {
-        this.source.push("return buffer;");
-      }
-
-      var params = this.isChild ? ["depth0", "data"] : ["Handlebars", "depth0", "helpers", "partials", "data"];
-
-      for(var i=0, l=this.environment.depths.list.length; i<l; i++) {
-        params.push("depth" + this.environment.depths.list[i]);
-      }
-
-      if (asObject) {
-        params.push(this.source.join("\n  "));
-
-        return Function.apply(this, params);
-      } else {
-        var functionSource = 'function ' + (this.name || '') + '(' + params.join(',') + ') {\n  ' + this.source.join("\n  ") + '}';
-        Handlebars.log(Handlebars.logger.DEBUG, functionSource + "\n\n");
-        return functionSource;
-      }
-    },
-
-    appendContent: function(content) {
-      this.source.push(this.appendToBuffer(this.quotedString(content)));
-    },
-
-    append: function() {
-      var local = this.popStack();
-      this.source.push("if(" + local + " || " + local + " === 0) { " + this.appendToBuffer(local) + " }");
-      if (this.environment.isSimple) {
-        this.source.push("else { " + this.appendToBuffer("''") + " }");
-      }
-    },
-
-    appendEscaped: function() {
-      var opcode = this.nextOpcode(1), extra = "";
-      this.context.aliases.escapeExpression = 'this.escapeExpression';
-
-      if(opcode[0] === 'appendContent') {
-        extra = " + " + this.quotedString(opcode[1][0]);
-        this.eat(opcode);
-      }
-
-      this.source.push(this.appendToBuffer("escapeExpression(" + this.popStack() + ")" + extra));
-    },
-
-    getContext: function(depth) {
-      if(this.lastContext !== depth) {
-        this.lastContext = depth;
-      }
-    },
-
-    lookupWithHelpers: function(name, isScoped) {
-      if(name) {
-        var topStack = this.nextStack();
-
-        this.usingKnownHelper = false;
-
-        var toPush;
-        if (!isScoped && this.options.knownHelpers[name]) {
-          toPush = topStack + " = " + this.nameLookup('helpers', name, 'helper');
-          this.usingKnownHelper = true;
-        } else if (isScoped || this.options.knownHelpersOnly) {
-          toPush = topStack + " = " + this.nameLookup('depth' + this.lastContext, name, 'context');
-        } else {
-          this.register('foundHelper', this.nameLookup('helpers', name, 'helper'));
-          toPush = topStack + " = foundHelper || " + this.nameLookup('depth' + this.lastContext, name, 'context');
-        }
-
-        toPush += ';';
-        this.source.push(toPush);
-      } else {
-        this.pushStack('depth' + this.lastContext);
-      }
-    },
-
-    lookup: function(name) {
-      var topStack = this.topStack();
-      this.source.push(topStack + " = (" + topStack + " === null || " + topStack + " === undefined || " + topStack + " === false ? " +
- 				topStack + " : " + this.nameLookup(topStack, name, 'context') + ");");
-    },
-
-    pushStringParam: function(string) {
-      this.pushStack('depth' + this.lastContext);
-      this.pushString(string);
-    },
-
-    pushString: function(string) {
-      this.pushStack(this.quotedString(string));
-    },
-
-    push: function(name) {
-      this.pushStack(name);
-    },
-
-    invokeMustache: function(paramSize, original, hasHash) {
-      this.populateParams(paramSize, this.quotedString(original), "{}", null, hasHash, function(nextStack, helperMissingString, id) {
-        if (!this.usingKnownHelper) {
-          this.context.aliases.helperMissing = 'helpers.helperMissing';
-          this.context.aliases.undef = 'void 0';
-          this.source.push("else if(" + id + "=== undef) { " + nextStack + " = helperMissing.call(" + helperMissingString + "); }");
-          if (nextStack !== id) {
-            this.source.push("else { " + nextStack + " = " + id + "; }");
-          }
-        }
-      });
-    },
-
-    invokeProgram: function(guid, paramSize, hasHash) {
-      var inverse = this.programExpression(this.inverse);
-      var mainProgram = this.programExpression(guid);
-
-      this.populateParams(paramSize, null, mainProgram, inverse, hasHash, function(nextStack, helperMissingString, id) {
-        if (!this.usingKnownHelper) {
-          this.context.aliases.blockHelperMissing = 'helpers.blockHelperMissing';
-          this.source.push("else { " + nextStack + " = blockHelperMissing.call(" + helperMissingString + "); }");
-        }
-      });
-    },
-
-    populateParams: function(paramSize, helperId, program, inverse, hasHash, fn) {
-      var needsRegister = hasHash || this.options.stringParams || inverse || this.options.data;
-      var id = this.popStack(), nextStack;
-      var params = [], param, stringParam, stringOptions;
-
-      if (needsRegister) {
-        this.register('tmp1', program);
-        stringOptions = 'tmp1';
-      } else {
-        stringOptions = '{ hash: {} }';
-      }
-
-      if (needsRegister) {
-        var hash = (hasHash ? this.popStack() : '{}');
-        this.source.push('tmp1.hash = ' + hash + ';');
-      }
-
-      if(this.options.stringParams) {
-        this.source.push('tmp1.contexts = [];');
-      }
-
-      for(var i=0; i<paramSize; i++) {
-        param = this.popStack();
-        params.push(param);
-
-        if(this.options.stringParams) {
-          this.source.push('tmp1.contexts.push(' + this.popStack() + ');');
-        }
-      }
-
-      if(inverse) {
-        this.source.push('tmp1.fn = tmp1;');
-        this.source.push('tmp1.inverse = ' + inverse + ';');
-      }
-
-      if(this.options.data) {
-        this.source.push('tmp1.data = data;');
-      }
-
-      params.push(stringOptions);
-
-      this.populateCall(params, id, helperId || id, fn, program !== '{}');
-    },
-
-    populateCall: function(params, id, helperId, fn, program) {
-      var paramString = ["depth0"].concat(params).join(", ");
-      var helperMissingString = ["depth0"].concat(helperId).concat(params).join(", ");
-
-      var nextStack = this.nextStack();
-
-      if (this.usingKnownHelper) {
-        this.source.push(nextStack + " = " + id + ".call(" + paramString + ");");
-      } else {
-        this.context.aliases.functionType = '"function"';
-        var condition = program ? "foundHelper && " : ""
-        this.source.push("if(" + condition + "typeof " + id + " === functionType) { " + nextStack + " = " + id + ".call(" + paramString + "); }");
-      }
-      fn.call(this, nextStack, helperMissingString, id);
-      this.usingKnownHelper = false;
-    },
-
-    invokePartial: function(context) {
-      params = [this.nameLookup('partials', context, 'partial'), "'" + context + "'", this.popStack(), "helpers", "partials"];
-
-      if (this.options.data) {
-        params.push("data");
-      }
-
-      this.pushStack("self.invokePartial(" + params.join(", ") + ");");
-    },
-
-    assignToHash: function(key) {
-      var value = this.popStack();
-      var hash = this.topStack();
-
-      this.source.push(hash + "['" + key + "'] = " + value + ";");
-    },
-
-    // HELPERS
-
-    compiler: JavaScriptCompiler,
-
-    compileChildren: function(environment, options) {
-      var children = environment.children, child, compiler;
-
-      for(var i=0, l=children.length; i<l; i++) {
-        child = children[i];
-        compiler = new this.compiler();
-
-        this.context.programs.push('');     // Placeholder to prevent name conflicts for nested children
-        var index = this.context.programs.length;
-        child.index = index;
-        child.name = 'program' + index;
-        this.context.programs[index] = compiler.compile(child, options, this.context);
-      }
-    },
-
-    programExpression: function(guid) {
-      if(guid == null) { return "self.noop"; }
-
-      var child = this.environment.children[guid],
-          depths = child.depths.list;
-      var programParams = [child.index, child.name, "data"];
-
-      for(var i=0, l = depths.length; i<l; i++) {
-        depth = depths[i];
-
-        if(depth === 1) { programParams.push("depth0"); }
-        else { programParams.push("depth" + (depth - 1)); }
-      }
-
-      if(depths.length === 0) {
-        return "self.program(" + programParams.join(", ") + ")";
-      } else {
-        programParams.shift();
-        return "self.programWithDepth(" + programParams.join(", ") + ")";
-      }
-    },
-
-    register: function(name, val) {
-      this.useRegister(name);
-      this.source.push(name + " = " + val + ";");
-    },
-
-    useRegister: function(name) {
-      if(!this.context.registers[name]) {
-        this.context.registers[name] = true;
-        this.context.registers.list.push(name);
-      }
-    },
-
-    pushStack: function(item) {
-      this.source.push(this.nextStack() + " = " + item + ";");
-      return "stack" + this.stackSlot;
-    },
-
-    nextStack: function() {
-      this.stackSlot++;
-      if(this.stackSlot > this.stackVars.length) { this.stackVars.push("stack" + this.stackSlot); }
-      return "stack" + this.stackSlot;
-    },
-
-    popStack: function() {
-      return "stack" + this.stackSlot--;
-    },
-
-    topStack: function() {
-      return "stack" + this.stackSlot;
-    },
-
-    quotedString: function(str) {
-      return '"' + str
-        .replace(/\\/g, '\\\\')
-        .replace(/"/g, '\\"')
-        .replace(/\n/g, '\\n')
-        .replace(/\r/g, '\\r') + '"';
-    }
-  };
-
-  var reservedWords = (
-    "break else new var" +
-    " case finally return void" +
-    " catch for switch while" +
-    " continue function this with" +
-    " default if throw" +
-    " delete in try" +
-    " do instanceof typeof" +
-    " abstract enum int short" +
-    " boolean export interface static" +
-    " byte extends long super" +
-    " char final native synchronized" +
-    " class float package throws" +
-    " const goto private transient" +
-    " debugger implements protected volatile" +
-    " double import public let yield"
-  ).split(" ");
-
-  var compilerWords = JavaScriptCompiler.RESERVED_WORDS = {};
-
-  for(var i=0, l=reservedWords.length; i<l; i++) {
-    compilerWords[reservedWords[i]] = true;
-  }
-
-	JavaScriptCompiler.isValidJavaScriptVariableName = function(name) {
-		if(!JavaScriptCompiler.RESERVED_WORDS[name] && /^[a-zA-Z_$][0-9a-zA-Z_$]+$/.test(name)) {
-			return true;
-		}
-		return false;
-	}
-
-})(Handlebars.Compiler, Handlebars.JavaScriptCompiler);
-
-Handlebars.precompile = function(string, options) {
-  options = options || {};
-
-  var ast = Handlebars.parse(string);
-  var environment = new Handlebars.Compiler().compile(ast, options);
-  return new Handlebars.JavaScriptCompiler().compile(environment, options);
-};
-
-Handlebars.compile = function(string, options) {
-  options = options || {};
-
-  var compiled;
-  function compile() {
-    var ast = Handlebars.parse(string);
-    var environment = new Handlebars.Compiler().compile(ast, options);
-    var templateSpec = new Handlebars.JavaScriptCompiler().compile(environment, options, undefined, true);
-    return Handlebars.template(templateSpec);
-  }
-
-  // Template is only compiled on first use and cached after that point.
-  return function(context, options) {
-    if (!compiled) {
-      compiled = compile();
-    }
-    return compiled.call(this, context, options);
-  };
-};
-;
-// lib/handlebars/runtime.js
-Handlebars.VM = {
-  template: function(templateSpec) {
-    // Just add water
-    var container = {
-      escapeExpression: Handlebars.Utils.escapeExpression,
-      invokePartial: Handlebars.VM.invokePartial,
-      programs: [],
-      program: function(i, fn, data) {
-        var programWrapper = this.programs[i];
-        if(data) {
-          return Handlebars.VM.program(fn, data);
-        } else if(programWrapper) {
-          return programWrapper;
-        } else {
-          programWrapper = this.programs[i] = Handlebars.VM.program(fn);
-          return programWrapper;
-        }
-      },
-      programWithDepth: Handlebars.VM.programWithDepth,
-      noop: Handlebars.VM.noop
-    };
-
-    return function(context, options) {
-      options = options || {};
-      return templateSpec.call(container, Handlebars, context, options.helpers, options.partials, options.data);
-    };
-  },
-
-  programWithDepth: function(fn, data, $depth) {
-    var args = Array.prototype.slice.call(arguments, 2);
-
-    return function(context, options) {
-      options = options || {};
-
-      return fn.apply(this, [context, options.data || data].concat(args));
-    };
-  },
-  program: function(fn, data) {
-    return function(context, options) {
-      options = options || {};
-
-      return fn(context, options.data || data);
-    };
-  },
-  noop: function() { return ""; },
-  invokePartial: function(partial, name, context, helpers, partials, data) {
-    options = { helpers: helpers, partials: partials, data: data };
-
-    if(partial === undefined) {
-      throw new Handlebars.Exception("The partial " + name + " could not be found");
-    } else if(partial instanceof Function) {
-      return partial(context, options);
-    } else if (!Handlebars.compile) {
-      throw new Handlebars.Exception("The partial " + name + " could not be compiled when running in runtime-only mode");
-    } else {
-      partials[name] = Handlebars.compile(partial);
-      return partials[name](context, options);
-    }
-  }
-};
-
-Handlebars.template = Handlebars.VM.template;
-;
diff --git a/branch-1.2/ambari-web/vendor/scripts/jquery-1.7.2.min.js b/branch-1.2/ambari-web/vendor/scripts/jquery-1.7.2.min.js
deleted file mode 100644
index 16ad06c..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/jquery-1.7.2.min.js
+++ /dev/null
@@ -1,4 +0,0 @@
-/*! jQuery v1.7.2 jquery.com | jquery.org/license */
-(function(a,b){function cy(a){return f.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:!1}function cu(a){if(!cj[a]){var b=c.body,d=f("<"+a+">").appendTo(b),e=d.css("display");d.remove();if(e==="none"||e===""){ck||(ck=c.createElement("iframe"),ck.frameBorder=ck.width=ck.height=0),b.appendChild(ck);if(!cl||!ck.createElement)cl=(ck.contentWindow||ck.contentDocument).document,cl.write((f.support.boxModel?"<!doctype html>":"")+"<html><body>"),cl.close();d=cl.createElement(a),cl.body.appendChild(d),e=f.css(d,"display"),b.removeChild(ck)}cj[a]=e}return cj[a]}function ct(a,b){var c={};f.each(cp.concat.apply([],cp.slice(0,b)),function(){c[this]=a});return c}function cs(){cq=b}function cr(){setTimeout(cs,0);return cq=f.now()}function ci(){try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}}function ch(){try{return new a.XMLHttpRequest}catch(b){}}function cb(a,c){a.dataFilter&&(c=a.dataFilter(c,a.dataType));var d=a.dataTypes,e={},g,h,i=d.length,j,k=d[0],l,m,n,o,p;for(g=1;g<i;g++){if(g===1)for(h in a.converters)typeof h=="string"&&(e[h.toLowerCase()]=a.converters[h]);l=k,k=d[g];if(k==="*")k=l;else if(l!=="*"&&l!==k){m=l+" "+k,n=e[m]||e["* "+k];if(!n){p=b;for(o in e){j=o.split(" ");if(j[0]===l||j[0]==="*"){p=e[j[1]+" "+k];if(p){o=e[o],o===!0?n=p:p===!0&&(n=o);break}}}}!n&&!p&&f.error("No conversion from "+m.replace(" "," to ")),n!==!0&&(c=n?n(c):p(o(c)))}}return c}function ca(a,c,d){var e=a.contents,f=a.dataTypes,g=a.responseFields,h,i,j,k;for(i in g)i in d&&(c[g[i]]=d[i]);while(f[0]==="*")f.shift(),h===b&&(h=a.mimeType||c.getResponseHeader("content-type"));if(h)for(i in e)if(e[i]&&e[i].test(h)){f.unshift(i);break}if(f[0]in d)j=f[0];else{for(i in d){if(!f[0]||a.converters[i+" "+f[0]]){j=i;break}k||(k=i)}j=j||k}if(j){j!==f[0]&&f.unshift(j);return d[j]}}function b_(a,b,c,d){if(f.isArray(b))f.each(b,function(b,e){c||bD.test(a)?d(a,e):b_(a+"["+(typeof e=="object"?b:"")+"]",e,c,d)});else if(!c&&f.type(b)==="object")for(var e in b)b_(a+"["+e+"]",b[e],c,d);else d(a,b)}function b$(a,c){var d,e,g=f.ajaxSettings.flatOptions||{};for(d in c)c[d]!==b&&((g[d]?a:e||(e={}))[d]=c[d]);e&&f.extend(!0,a,e)}function bZ(a,c,d,e,f,g){f=f||c.dataTypes[0],g=g||{},g[f]=!0;var h=a[f],i=0,j=h?h.length:0,k=a===bS,l;for(;i<j&&(k||!l);i++)l=h[i](c,d,e),typeof l=="string"&&(!k||g[l]?l=b:(c.dataTypes.unshift(l),l=bZ(a,c,d,e,l,g)));(k||!l)&&!g["*"]&&(l=bZ(a,c,d,e,"*",g));return l}function bY(a){return function(b,c){typeof b!="string"&&(c=b,b="*");if(f.isFunction(c)){var d=b.toLowerCase().split(bO),e=0,g=d.length,h,i,j;for(;e<g;e++)h=d[e],j=/^\+/.test(h),j&&(h=h.substr(1)||"*"),i=a[h]=a[h]||[],i[j?"unshift":"push"](c)}}}function bB(a,b,c){var d=b==="width"?a.offsetWidth:a.offsetHeight,e=b==="width"?1:0,g=4;if(d>0){if(c!=="border")for(;e<g;e+=2)c||(d-=parseFloat(f.css(a,"padding"+bx[e]))||0),c==="margin"?d+=parseFloat(f.css(a,c+bx[e]))||0:d-=parseFloat(f.css(a,"border"+bx[e]+"Width"))||0;return d+"px"}d=by(a,b);if(d<0||d==null)d=a.style[b];if(bt.test(d))return d;d=parseFloat(d)||0;if(c)for(;e<g;e+=2)d+=parseFloat(f.css(a,"padding"+bx[e]))||0,c!=="padding"&&(d+=parseFloat(f.css(a,"border"+bx[e]+"Width"))||0),c==="margin"&&(d+=parseFloat(f.css(a,c+bx[e]))||0);return d+"px"}function bo(a){var b=c.createElement("div");bh.appendChild(b),b.innerHTML=a.outerHTML;return b.firstChild}function bn(a){var b=(a.nodeName||"").toLowerCase();b==="input"?bm(a):b!=="script"&&typeof a.getElementsByTagName!="undefined"&&f.grep(a.getElementsByTagName("input"),bm)}function bm(a){if(a.type==="checkbox"||a.type==="radio")a.defaultChecked=a.checked}function bl(a){return typeof a.getElementsByTagName!="undefined"?a.getElementsByTagName("*"):typeof a.querySelectorAll!="undefined"?a.querySelectorAll("*"):[]}function bk(a,b){var c;b.nodeType===1&&(b.clearAttributes&&b.clearAttributes(),b.mergeAttributes&&b.mergeAttributes(a),c=b.nodeName.toLowerCase(),c==="object"?b.outerHTML=a.outerHTML:c!=="input"||a.type!=="checkbox"&&a.type!=="radio"?c==="option"?b.selected=a.defaultSelected:c==="input"||c==="textarea"?b.defaultValue=a.defaultValue:c==="script"&&b.text!==a.text&&(b.text=a.text):(a.checked&&(b.defaultChecked=b.checked=a.checked),b.value!==a.value&&(b.value=a.value)),b.removeAttribute(f.expando),b.removeAttribute("_submit_attached"),b.removeAttribute("_change_attached"))}function bj(a,b){if(b.nodeType===1&&!!f.hasData(a)){var c,d,e,g=f._data(a),h=f._data(b,g),i=g.events;if(i){delete h.handle,h.events={};for(c in i)for(d=0,e=i[c].length;d<e;d++)f.event.add(b,c,i[c][d])}h.data&&(h.data=f.extend({},h.data))}}function bi(a,b){return f.nodeName(a,"table")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function U(a){var b=V.split("|"),c=a.createDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return c}function T(a,b,c){b=b||0;if(f.isFunction(b))return f.grep(a,function(a,d){var e=!!b.call(a,d,a);return e===c});if(b.nodeType)return f.grep(a,function(a,d){return a===b===c});if(typeof b=="string"){var d=f.grep(a,function(a){return a.nodeType===1});if(O.test(b))return f.filter(b,d,!c);b=f.filter(b,d)}return f.grep(a,function(a,d){return f.inArray(a,b)>=0===c})}function S(a){return!a||!a.parentNode||a.parentNode.nodeType===11}function K(){return!0}function J(){return!1}function n(a,b,c){var d=b+"defer",e=b+"queue",g=b+"mark",h=f._data(a,d);h&&(c==="queue"||!f._data(a,e))&&(c==="mark"||!f._data(a,g))&&setTimeout(function(){!f._data(a,e)&&!f._data(a,g)&&(f.removeData(a,d,!0),h.fire())},0)}function m(a){for(var b in a){if(b==="data"&&f.isEmptyObject(a[b]))continue;if(b!=="toJSON")return!1}return!0}function l(a,c,d){if(d===b&&a.nodeType===1){var e="data-"+c.replace(k,"-$1").toLowerCase();d=a.getAttribute(e);if(typeof d=="string"){try{d=d==="true"?!0:d==="false"?!1:d==="null"?null:f.isNumeric(d)?+d:j.test(d)?f.parseJSON(d):d}catch(g){}f.data(a,c,d)}else d=b}return d}function h(a){var b=g[a]={},c,d;a=a.split(/\s+/);for(c=0,d=a.length;c<d;c++)b[a[c]]=!0;return b}var c=a.document,d=a.navigator,e=a.location,f=function(){function J(){if(!e.isReady){try{c.documentElement.doScroll("left")}catch(a){setTimeout(J,1);return}e.ready()}}var e=function(a,b){return new e.fn.init(a,b,h)},f=a.jQuery,g=a.$,h,i=/^(?:[^#<]*(<[\w\W]+>)[^>]*$|#([\w\-]*)$)/,j=/\S/,k=/^\s+/,l=/\s+$/,m=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,n=/^[\],:{}\s]*$/,o=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,p=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,q=/(?:^|:|,)(?:\s*\[)+/g,r=/(webkit)[ \/]([\w.]+)/,s=/(opera)(?:.*version)?[ \/]([\w.]+)/,t=/(msie) ([\w.]+)/,u=/(mozilla)(?:.*? rv:([\w.]+))?/,v=/-([a-z]|[0-9])/ig,w=/^-ms-/,x=function(a,b){return(b+"").toUpperCase()},y=d.userAgent,z,A,B,C=Object.prototype.toString,D=Object.prototype.hasOwnProperty,E=Array.prototype.push,F=Array.prototype.slice,G=String.prototype.trim,H=Array.prototype.indexOf,I={};e.fn=e.prototype={constructor:e,init:function(a,d,f){var g,h,j,k;if(!a)return this;if(a.nodeType){this.context=this[0]=a,this.length=1;return this}if(a==="body"&&!d&&c.body){this.context=c,this[0]=c.body,this.selector=a,this.length=1;return this}if(typeof a=="string"){a.charAt(0)!=="<"||a.charAt(a.length-1)!==">"||a.length<3?g=i.exec(a):g=[null,a,null];if(g&&(g[1]||!d)){if(g[1]){d=d instanceof e?d[0]:d,k=d?d.ownerDocument||d:c,j=m.exec(a),j?e.isPlainObject(d)?(a=[c.createElement(j[1])],e.fn.attr.call(a,d,!0)):a=[k.createElement(j[1])]:(j=e.buildFragment([g[1]],[k]),a=(j.cacheable?e.clone(j.fragment):j.fragment).childNodes);return e.merge(this,a)}h=c.getElementById(g[2]);if(h&&h.parentNode){if(h.id!==g[2])return f.find(a);this.length=1,this[0]=h}this.context=c,this.selector=a;return this}return!d||d.jquery?(d||f).find(a):this.constructor(d).find(a)}if(e.isFunction(a))return f.ready(a);a.selector!==b&&(this.selector=a.selector,this.context=a.context);return e.makeArray(a,this)},selector:"",jquery:"1.7.2",length:0,size:function(){return this.length},toArray:function(){return F.call(this,0)},get:function(a){return a==null?this.toArray():a<0?this[this.length+a]:this[a]},pushStack:function(a,b,c){var d=this.constructor();e.isArray(a)?E.apply(d,a):e.merge(d,a),d.prevObject=this,d.context=this.context,b==="find"?d.selector=this.selector+(this.selector?" ":"")+c:b&&(d.selector=this.selector+"."+b+"("+c+")");return d},each:function(a,b){return e.each(this,a,b)},ready:function(a){e.bindReady(),A.add(a);return this},eq:function(a){a=+a;return a===-1?this.slice(a):this.slice(a,a+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(F.apply(this,arguments),"slice",F.call(arguments).join(","))},map:function(a){return this.pushStack(e.map(this,function(b,c){return a.call(b,c,b)}))},end:function(){return this.prevObject||this.constructor(null)},push:E,sort:[].sort,splice:[].splice},e.fn.init.prototype=e.fn,e.extend=e.fn.extend=function(){var a,c,d,f,g,h,i=arguments[0]||{},j=1,k=arguments.length,l=!1;typeof i=="boolean"&&(l=i,i=arguments[1]||{},j=2),typeof i!="object"&&!e.isFunction(i)&&(i={}),k===j&&(i=this,--j);for(;j<k;j++)if((a=arguments[j])!=null)for(c in a){d=i[c],f=a[c];if(i===f)continue;l&&f&&(e.isPlainObject(f)||(g=e.isArray(f)))?(g?(g=!1,h=d&&e.isArray(d)?d:[]):h=d&&e.isPlainObject(d)?d:{},i[c]=e.extend(l,h,f)):f!==b&&(i[c]=f)}return i},e.extend({noConflict:function(b){a.$===e&&(a.$=g),b&&a.jQuery===e&&(a.jQuery=f);return e},isReady:!1,readyWait:1,holdReady:function(a){a?e.readyWait++:e.ready(!0)},ready:function(a){if(a===!0&&!--e.readyWait||a!==!0&&!e.isReady){if(!c.body)return setTimeout(e.ready,1);e.isReady=!0;if(a!==!0&&--e.readyWait>0)return;A.fireWith(c,[e]),e.fn.trigger&&e(c).trigger("ready").off("ready")}},bindReady:function(){if(!A){A=e.Callbacks("once memory");if(c.readyState==="complete")return setTimeout(e.ready,1);if(c.addEventListener)c.addEventListener("DOMContentLoaded",B,!1),a.addEventListener("load",e.ready,!1);else if(c.attachEvent){c.attachEvent("onreadystatechange",B),a.attachEvent("onload",e.ready);var b=!1;try{b=a.frameElement==null}catch(d){}c.documentElement.doScroll&&b&&J()}}},isFunction:function(a){return e.type(a)==="function"},isArray:Array.isArray||function(a){return e.type(a)==="array"},isWindow:function(a){return a!=null&&a==a.window},isNumeric:function(a){return!isNaN(parseFloat(a))&&isFinite(a)},type:function(a){return a==null?String(a):I[C.call(a)]||"object"},isPlainObject:function(a){if(!a||e.type(a)!=="object"||a.nodeType||e.isWindow(a))return!1;try{if(a.constructor&&!D.call(a,"constructor")&&!D.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}var d;for(d in a);return d===b||D.call(a,d)},isEmptyObject:function(a){for(var b in a)return!1;return!0},error:function(a){throw new Error(a)},parseJSON:function(b){if(typeof b!="string"||!b)return null;b=e.trim(b);if(a.JSON&&a.JSON.parse)return a.JSON.parse(b);if(n.test(b.replace(o,"@").replace(p,"]").replace(q,"")))return(new Function("return "+b))();e.error("Invalid JSON: "+b)},parseXML:function(c){if(typeof c!="string"||!c)return null;var d,f;try{a.DOMParser?(f=new DOMParser,d=f.parseFromString(c,"text/xml")):(d=new ActiveXObject("Microsoft.XMLDOM"),d.async="false",d.loadXML(c))}catch(g){d=b}(!d||!d.documentElement||d.getElementsByTagName("parsererror").length)&&e.error("Invalid XML: "+c);return d},noop:function(){},globalEval:function(b){b&&j.test(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(w,"ms-").replace(v,x)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toUpperCase()===b.toUpperCase()},each:function(a,c,d){var f,g=0,h=a.length,i=h===b||e.isFunction(a);if(d){if(i){for(f in a)if(c.apply(a[f],d)===!1)break}else for(;g<h;)if(c.apply(a[g++],d)===!1)break}else if(i){for(f in a)if(c.call(a[f],f,a[f])===!1)break}else for(;g<h;)if(c.call(a[g],g,a[g++])===!1)break;return a},trim:G?function(a){return a==null?"":G.call(a)}:function(a){return a==null?"":(a+"").replace(k,"").replace(l,"")},makeArray:function(a,b){var c=b||[];if(a!=null){var d=e.type(a);a.length==null||d==="string"||d==="function"||d==="regexp"||e.isWindow(a)?E.call(c,a):e.merge(c,a)}return c},inArray:function(a,b,c){var d;if(b){if(H)return H.call(b,a,c);d=b.length,c=c?c<0?Math.max(0,d+c):c:0;for(;c<d;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,c){var d=a.length,e=0;if(typeof c.length=="number")for(var f=c.length;e<f;e++)a[d++]=c[e];else while(c[e]!==b)a[d++]=c[e++];a.length=d;return a},grep:function(a,b,c){var d=[],e;c=!!c;for(var f=0,g=a.length;f<g;f++)e=!!b(a[f],f),c!==e&&d.push(a[f]);return d},map:function(a,c,d){var f,g,h=[],i=0,j=a.length,k=a instanceof e||j!==b&&typeof j=="number"&&(j>0&&a[0]&&a[j-1]||j===0||e.isArray(a));if(k)for(;i<j;i++)f=c(a[i],i,d),f!=null&&(h[h.length]=f);else for(g in a)f=c(a[g],g,d),f!=null&&(h[h.length]=f);return h.concat.apply([],h)},guid:1,proxy:function(a,c){if(typeof c=="string"){var d=a[c];c=a,a=d}if(!e.isFunction(a))return b;var f=F.call(arguments,2),g=function(){return a.apply(c,f.concat(F.call(arguments)))};g.guid=a.guid=a.guid||g.guid||e.guid++;return g},access:function(a,c,d,f,g,h,i){var j,k=d==null,l=0,m=a.length;if(d&&typeof d=="object"){for(l in d)e.access(a,c,l,d[l],1,h,f);g=1}else if(f!==b){j=i===b&&e.isFunction(f),k&&(j?(j=c,c=function(a,b,c){return j.call(e(a),c)}):(c.call(a,f),c=null));if(c)for(;l<m;l++)c(a[l],d,j?f.call(a[l],l,c(a[l],d)):f,i);g=1}return g?a:k?c.call(a):m?c(a[0],d):h},now:function(){return(new Date).getTime()},uaMatch:function(a){a=a.toLowerCase();var b=r.exec(a)||s.exec(a)||t.exec(a)||a.indexOf("compatible")<0&&u.exec(a)||[];return{browser:b[1]||"",version:b[2]||"0"}},sub:function(){function a(b,c){return new a.fn.init(b,c)}e.extend(!0,a,this),a.superclass=this,a.fn=a.prototype=this(),a.fn.constructor=a,a.sub=this.sub,a.fn.init=function(d,f){f&&f instanceof e&&!(f instanceof a)&&(f=a(f));return e.fn.init.call(this,d,f,b)},a.fn.init.prototype=a.fn;var b=a(c);return a},browser:{}}),e.each("Boolean Number String Function Array Date RegExp Object".split(" "),function(a,b){I["[object "+b+"]"]=b.toLowerCase()}),z=e.uaMatch(y),z.browser&&(e.browser[z.browser]=!0,e.browser.version=z.version),e.browser.webkit&&(e.browser.safari=!0),j.test(" ")&&(k=/^[\s\xA0]+/,l=/[\s\xA0]+$/),h=e(c),c.addEventListener?B=function(){c.removeEventListener("DOMContentLoaded",B,!1),e.ready()}:c.attachEvent&&(B=function(){c.readyState==="complete"&&(c.detachEvent("onreadystatechange",B),e.ready())});return e}(),g={};f.Callbacks=function(a){a=a?g[a]||h(a):{};var c=[],d=[],e,i,j,k,l,m,n=function(b){var d,e,g,h,i;for(d=0,e=b.length;d<e;d++)g=b[d],h=f.type(g),h==="array"?n(g):h==="function"&&(!a.unique||!p.has(g))&&c.push(g)},o=function(b,f){f=f||[],e=!a.memory||[b,f],i=!0,j=!0,m=k||0,k=0,l=c.length;for(;c&&m<l;m++)if(c[m].apply(b,f)===!1&&a.stopOnFalse){e=!0;break}j=!1,c&&(a.once?e===!0?p.disable():c=[]:d&&d.length&&(e=d.shift(),p.fireWith(e[0],e[1])))},p={add:function(){if(c){var a=c.length;n(arguments),j?l=c.length:e&&e!==!0&&(k=a,o(e[0],e[1]))}return this},remove:function(){if(c){var b=arguments,d=0,e=b.length;for(;d<e;d++)for(var f=0;f<c.length;f++)if(b[d]===c[f]){j&&f<=l&&(l--,f<=m&&m--),c.splice(f--,1);if(a.unique)break}}return this},has:function(a){if(c){var b=0,d=c.length;for(;b<d;b++)if(a===c[b])return!0}return!1},empty:function(){c=[];return this},disable:function(){c=d=e=b;return this},disabled:function(){return!c},lock:function(){d=b,(!e||e===!0)&&p.disable();return this},locked:function(){return!d},fireWith:function(b,c){d&&(j?a.once||d.push([b,c]):(!a.once||!e)&&o(b,c));return this},fire:function(){p.fireWith(this,arguments);return this},fired:function(){return!!i}};return p};var i=[].slice;f.extend({Deferred:function(a){var b=f.Callbacks("once memory"),c=f.Callbacks("once memory"),d=f.Callbacks("memory"),e="pending",g={resolve:b,reject:c,notify:d},h={done:b.add,fail:c.add,progress:d.add,state:function(){return e},isResolved:b.fired,isRejected:c.fired,then:function(a,b,c){i.done(a).fail(b).progress(c);return this},always:function(){i.done.apply(i,arguments).fail.apply(i,arguments);return this},pipe:function(a,b,c){return f.Deferred(function(d){f.each({done:[a,"resolve"],fail:[b,"reject"],progress:[c,"notify"]},function(a,b){var c=b[0],e=b[1],g;f.isFunction(c)?i[a](function(){g=c.apply(this,arguments),g&&f.isFunction(g.promise)?g.promise().then(d.resolve,d.reject,d.notify):d[e+"With"](this===i?d:this,[g])}):i[a](d[e])})}).promise()},promise:function(a){if(a==null)a=h;else for(var b in h)a[b]=h[b];return a}},i=h.promise({}),j;for(j in g)i[j]=g[j].fire,i[j+"With"]=g[j].fireWith;i.done(function(){e="resolved"},c.disable,d.lock).fail(function(){e="rejected"},b.disable,d.lock),a&&a.call(i,i);return i},when:function(a){function m(a){return function(b){e[a]=arguments.length>1?i.call(arguments,0):b,j.notifyWith(k,e)}}function l(a){return function(c){b[a]=arguments.length>1?i.call(arguments,0):c,--g||j.resolveWith(j,b)}}var b=i.call(arguments,0),c=0,d=b.length,e=Array(d),g=d,h=d,j=d<=1&&a&&f.isFunction(a.promise)?a:f.Deferred(),k=j.promise();if(d>1){for(;c<d;c++)b[c]&&b[c].promise&&f.isFunction(b[c].promise)?b[c].promise().then(l(c),j.reject,m(c)):--g;g||j.resolveWith(j,b)}else j!==a&&j.resolveWith(j,d?[a]:[]);return k}}),f.support=function(){var b,d,e,g,h,i,j,k,l,m,n,o,p=c.createElement("div"),q=c.documentElement;p.setAttribute("className","t"),p.innerHTML="   <link/><table></table><a href='/a' style='top:1px;float:left;opacity:.55;'>a</a><input type='checkbox'/>",d=p.getElementsByTagName("*"),e=p.getElementsByTagName("a")[0];if(!d||!d.length||!e)return{};g=c.createElement("select"),h=g.appendChild(c.createElement("option")),i=p.getElementsByTagName("input")[0],b={leadingWhitespace:p.firstChild.nodeType===3,tbody:!p.getElementsByTagName("tbody").length,htmlSerialize:!!p.getElementsByTagName("link").length,style:/top/.test(e.getAttribute("style")),hrefNormalized:e.getAttribute("href")==="/a",opacity:/^0.55/.test(e.style.opacity),cssFloat:!!e.style.cssFloat,checkOn:i.value==="on",optSelected:h.selected,getSetAttribute:p.className!=="t",enctype:!!c.createElement("form").enctype,html5Clone:c.createElement("nav").cloneNode(!0).outerHTML!=="<:nav></:nav>",submitBubbles:!0,changeBubbles:!0,focusinBubbles:!1,deleteExpando:!0,noCloneEvent:!0,inlineBlockNeedsLayout:!1,shrinkWrapBlocks:!1,reliableMarginRight:!0,pixelMargin:!0},f.boxModel=b.boxModel=c.compatMode==="CSS1Compat",i.checked=!0,b.noCloneChecked=i.cloneNode(!0).checked,g.disabled=!0,b.optDisabled=!h.disabled;try{delete p.test}catch(r){b.deleteExpando=!1}!p.addEventListener&&p.attachEvent&&p.fireEvent&&(p.attachEvent("onclick",function(){b.noCloneEvent=!1}),p.cloneNode(!0).fireEvent("onclick")),i=c.createElement("input"),i.value="t",i.setAttribute("type","radio"),b.radioValue=i.value==="t",i.setAttribute("checked","checked"),i.setAttribute("name","t"),p.appendChild(i),j=c.createDocumentFragment(),j.appendChild(p.lastChild),b.checkClone=j.cloneNode(!0).cloneNode(!0).lastChild.checked,b.appendChecked=i.checked,j.removeChild(i),j.appendChild(p);if(p.attachEvent)for(n in{submit:1,change:1,focusin:1})m="on"+n,o=m in p,o||(p.setAttribute(m,"return;"),o=typeof p[m]=="function"),b[n+"Bubbles"]=o;j.removeChild(p),j=g=h=p=i=null,f(function(){var d,e,g,h,i,j,l,m,n,q,r,s,t,u=c.getElementsByTagName("body")[0];!u||(m=1,t="padding:0;margin:0;border:",r="position:absolute;top:0;left:0;width:1px;height:1px;",s=t+"0;visibility:hidden;",n="style='"+r+t+"5px solid #000;",q="<div "+n+"display:block;'><div style='"+t+"0;display:block;overflow:hidden;'></div></div>"+"<table "+n+"' cellpadding='0' cellspacing='0'>"+"<tr><td></td></tr></table>",d=c.createElement("div"),d.style.cssText=s+"width:0;height:0;position:static;top:0;margin-top:"+m+"px",u.insertBefore(d,u.firstChild),p=c.createElement("div"),d.appendChild(p),p.innerHTML="<table><tr><td style='"+t+"0;display:none'></td><td>t</td></tr></table>",k=p.getElementsByTagName("td"),o=k[0].offsetHeight===0,k[0].style.display="",k[1].style.display="none",b.reliableHiddenOffsets=o&&k[0].offsetHeight===0,a.getComputedStyle&&(p.innerHTML="",l=c.createElement("div"),l.style.width="0",l.style.marginRight="0",p.style.width="2px",p.appendChild(l),b.reliableMarginRight=(parseInt((a.getComputedStyle(l,null)||{marginRight:0}).marginRight,10)||0)===0),typeof p.style.zoom!="undefined"&&(p.innerHTML="",p.style.width=p.style.padding="1px",p.style.border=0,p.style.overflow="hidden",p.style.display="inline",p.style.zoom=1,b.inlineBlockNeedsLayout=p.offsetWidth===3,p.style.display="block",p.style.overflow="visible",p.innerHTML="<div style='width:5px;'></div>",b.shrinkWrapBlocks=p.offsetWidth!==3),p.style.cssText=r+s,p.innerHTML=q,e=p.firstChild,g=e.firstChild,i=e.nextSibling.firstChild.firstChild,j={doesNotAddBorder:g.offsetTop!==5,doesAddBorderForTableAndCells:i.offsetTop===5},g.style.position="fixed",g.style.top="20px",j.fixedPosition=g.offsetTop===20||g.offsetTop===15,g.style.position=g.style.top="",e.style.overflow="hidden",e.style.position="relative",j.subtractsBorderForOverflowNotVisible=g.offsetTop===-5,j.doesNotIncludeMarginInBodyOffset=u.offsetTop!==m,a.getComputedStyle&&(p.style.marginTop="1%",b.pixelMargin=(a.getComputedStyle(p,null)||{marginTop:0}).marginTop!=="1%"),typeof d.style.zoom!="undefined"&&(d.style.zoom=1),u.removeChild(d),l=p=d=null,f.extend(b,j))});return b}();var j=/^(?:\{.*\}|\[.*\])$/,k=/([A-Z])/g;f.extend({cache:{},uuid:0,expando:"jQuery"+(f.fn.jquery+Math.random()).replace(/\D/g,""),noData:{embed:!0,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:!0},hasData:function(a){a=a.nodeType?f.cache[a[f.expando]]:a[f.expando];return!!a&&!m(a)},data:function(a,c,d,e){if(!!f.acceptData(a)){var g,h,i,j=f.expando,k=typeof c=="string",l=a.nodeType,m=l?f.cache:a,n=l?a[j]:a[j]&&j,o=c==="events";if((!n||!m[n]||!o&&!e&&!m[n].data)&&k&&d===b)return;n||(l?a[j]=n=++f.uuid:n=j),m[n]||(m[n]={},l||(m[n].toJSON=f.noop));if(typeof c=="object"||typeof c=="function")e?m[n]=f.extend(m[n],c):m[n].data=f.extend(m[n].data,c);g=h=m[n],e||(h.data||(h.data={}),h=h.data),d!==b&&(h[f.camelCase(c)]=d);if(o&&!h[c])return g.events;k?(i=h[c],i==null&&(i=h[f.camelCase(c)])):i=h;return i}},removeData:function(a,b,c){if(!!f.acceptData(a)){var d,e,g,h=f.expando,i=a.nodeType,j=i?f.cache:a,k=i?a[h]:h;if(!j[k])return;if(b){d=c?j[k]:j[k].data;if(d){f.isArray(b)||(b in d?b=[b]:(b=f.camelCase(b),b in d?b=[b]:b=b.split(" ")));for(e=0,g=b.length;e<g;e++)delete d[b[e]];if(!(c?m:f.isEmptyObject)(d))return}}if(!c){delete j[k].data;if(!m(j[k]))return}f.support.deleteExpando||!j.setInterval?delete j[k]:j[k]=null,i&&(f.support.deleteExpando?delete a[h]:a.removeAttribute?a.removeAttribute(h):a[h]=null)}},_data:function(a,b,c){return f.data(a,b,c,!0)},acceptData:function(a){if(a.nodeName){var b=f.noData[a.nodeName.toLowerCase()];if(b)return b!==!0&&a.getAttribute("classid")===b}return!0}}),f.fn.extend({data:function(a,c){var d,e,g,h,i,j=this[0],k=0,m=null;if(a===b){if(this.length){m=f.data(j);if(j.nodeType===1&&!f._data(j,"parsedAttrs")){g=j.attributes;for(i=g.length;k<i;k++)h=g[k].name,h.indexOf("data-")===0&&(h=f.camelCase(h.substring(5)),l(j,h,m[h]));f._data(j,"parsedAttrs",!0)}}return m}if(typeof a=="object")return this.each(function(){f.data(this,a)});d=a.split(".",2),d[1]=d[1]?"."+d[1]:"",e=d[1]+"!";return f.access(this,function(c){if(c===b){m=this.triggerHandler("getData"+e,[d[0]]),m===b&&j&&(m=f.data(j,a),m=l(j,a,m));return m===b&&d[1]?this.data(d[0]):m}d[1]=c,this.each(function(){var b=f(this);b.triggerHandler("setData"+e,d),f.data(this,a,c),b.triggerHandler("changeData"+e,d)})},null,c,arguments.length>1,null,!1)},removeData:function(a){return this.each(function(){f.removeData(this,a)})}}),f.extend({_mark:function(a,b){a&&(b=(b||"fx")+"mark",f._data(a,b,(f._data(a,b)||0)+1))},_unmark:function(a,b,c){a!==!0&&(c=b,b=a,a=!1);if(b){c=c||"fx";var d=c+"mark",e=a?0:(f._data(b,d)||1)-1;e?f._data(b,d,e):(f.removeData(b,d,!0),n(b,c,"mark"))}},queue:function(a,b,c){var d;if(a){b=(b||"fx")+"queue",d=f._data(a,b),c&&(!d||f.isArray(c)?d=f._data(a,b,f.makeArray(c)):d.push(c));return d||[]}},dequeue:function(a,b){b=b||"fx";var c=f.queue(a,b),d=c.shift(),e={};d==="inprogress"&&(d=c.shift()),d&&(b==="fx"&&c.unshift("inprogress"),f._data(a,b+".run",e),d.call(a,function(){f.dequeue(a,b)},e)),c.length||(f.removeData(a,b+"queue "+b+".run",!0),n(a,b,"queue"))}}),f.fn.extend({queue:function(a,c){var d=2;typeof a!="string"&&(c=a,a="fx",d--);if(arguments.length<d)return f.queue(this[0],a);return c===b?this:this.each(function(){var b=f.queue(this,a,c);a==="fx"&&b[0]!=="inprogress"&&f.dequeue(this,a)})},dequeue:function(a){return this.each(function(){f.dequeue(this,a)})},delay:function(a,b){a=f.fx?f.fx.speeds[a]||a:a,b=b||"fx";return this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,c){function m(){--h||d.resolveWith(e,[e])}typeof a!="string"&&(c=a,a=b),a=a||"fx";var d=f.Deferred(),e=this,g=e.length,h=1,i=a+"defer",j=a+"queue",k=a+"mark",l;while(g--)if(l=f.data(e[g],i,b,!0)||(f.data(e[g],j,b,!0)||f.data(e[g],k,b,!0))&&f.data(e[g],i,f.Callbacks("once memory"),!0))h++,l.add(m);m();return d.promise(c)}});var o=/[\n\t\r]/g,p=/\s+/,q=/\r/g,r=/^(?:button|input)$/i,s=/^(?:button|input|object|select|textarea)$/i,t=/^a(?:rea)?$/i,u=/^(?:autofocus|autoplay|async|checked|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped|selected)$/i,v=f.support.getSetAttribute,w,x,y;f.fn.extend({attr:function(a,b){return f.access(this,f.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){f.removeAttr(this,a)})},prop:function(a,b){return f.access(this,f.prop,a,b,arguments.length>1)},removeProp:function(a){a=f.propFix[a]||a;return this.each(function(){try{this[a]=b,delete this[a]}catch(c){}})},addClass:function(a){var b,c,d,e,g,h,i;if(f.isFunction(a))return this.each(function(b){f(this).addClass(a.call(this,b,this.className))});if(a&&typeof a=="string"){b=a.split(p);for(c=0,d=this.length;c<d;c++){e=this[c];if(e.nodeType===1)if(!e.className&&b.length===1)e.className=a;else{g=" "+e.className+" ";for(h=0,i=b.length;h<i;h++)~g.indexOf(" "+b[h]+" ")||(g+=b[h]+" ");e.className=f.trim(g)}}}return this},removeClass:function(a){var c,d,e,g,h,i,j;if(f.isFunction(a))return this.each(function(b){f(this).removeClass(a.call(this,b,this.className))});if(a&&typeof a=="string"||a===b){c=(a||"").split(p);for(d=0,e=this.length;d<e;d++){g=this[d];if(g.nodeType===1&&g.className)if(a){h=(" "+g.className+" ").replace(o," ");for(i=0,j=c.length;i<j;i++)h=h.replace(" "+c[i]+" "," ");g.className=f.trim(h)}else g.className=""}}return this},toggleClass:function(a,b){var c=typeof a,d=typeof b=="boolean";if(f.isFunction(a))return this.each(function(c){f(this).toggleClass(a.call(this,c,this.className,b),b)});return this.each(function(){if(c==="string"){var e,g=0,h=f(this),i=b,j=a.split(p);while(e=j[g++])i=d?i:!h.hasClass(e),h[i?"addClass":"removeClass"](e)}else if(c==="undefined"||c==="boolean")this.className&&f._data(this,"__className__",this.className),this.className=this.className||a===!1?"":f._data(this,"__className__")||""})},hasClass:function(a){var b=" "+a+" ",c=0,d=this.length;for(;c<d;c++)if(this[c].nodeType===1&&(" "+this[c].className+" ").replace(o," ").indexOf(b)>-1)return!0;return!1},val:function(a){var c,d,e,g=this[0];{if(!!arguments.length){e=f.isFunction(a);return this.each(function(d){var g=f(this),h;if(this.nodeType===1){e?h=a.call(this,d,g.val()):h=a,h==null?h="":typeof h=="number"?h+="":f.isArray(h)&&(h=f.map(h,function(a){return a==null?"":a+""})),c=f.valHooks[this.type]||f.valHooks[this.nodeName.toLowerCase()];if(!c||!("set"in c)||c.set(this,h,"value")===b)this.value=h}})}if(g){c=f.valHooks[g.type]||f.valHooks[g.nodeName.toLowerCase()];if(c&&"get"in c&&(d=c.get(g,"value"))!==b)return d;d=g.value;return typeof d=="string"?d.replace(q,""):d==null?"":d}}}}),f.extend({valHooks:{option:{get:function(a){var b=a.attributes.value;return!b||b.specified?a.value:a.text}},select:{get:function(a){var b,c,d,e,g=a.selectedIndex,h=[],i=a.options,j=a.type==="select-one";if(g<0)return null;c=j?g:0,d=j?g+1:i.length;for(;c<d;c++){e=i[c];if(e.selected&&(f.support.optDisabled?!e.disabled:e.getAttribute("disabled")===null)&&(!e.parentNode.disabled||!f.nodeName(e.parentNode,"optgroup"))){b=f(e).val();if(j)return b;h.push(b)}}if(j&&!h.length&&i.length)return f(i[g]).val();return h},set:function(a,b){var c=f.makeArray(b);f(a).find("option").each(function(){this.selected=f.inArray(f(this).val(),c)>=0}),c.length||(a.selectedIndex=-1);return c}}},attrFn:{val:!0,css:!0,html:!0,text:!0,data:!0,width:!0,height:!0,offset:!0},attr:function(a,c,d,e){var g,h,i,j=a.nodeType;if(!!a&&j!==3&&j!==8&&j!==2){if(e&&c in f.attrFn)return f(a)[c](d);if(typeof a.getAttribute=="undefined")return f.prop(a,c,d);i=j!==1||!f.isXMLDoc(a),i&&(c=c.toLowerCase(),h=f.attrHooks[c]||(u.test(c)?x:w));if(d!==b){if(d===null){f.removeAttr(a,c);return}if(h&&"set"in h&&i&&(g=h.set(a,d,c))!==b)return g;a.setAttribute(c,""+d);return d}if(h&&"get"in h&&i&&(g=h.get(a,c))!==null)return g;g=a.getAttribute(c);return g===null?b:g}},removeAttr:function(a,b){var c,d,e,g,h,i=0;if(b&&a.nodeType===1){d=b.toLowerCase().split(p),g=d.length;for(;i<g;i++)e=d[i],e&&(c=f.propFix[e]||e,h=u.test(e),h||f.attr(a,e,""),a.removeAttribute(v?e:c),h&&c in a&&(a[c]=!1))}},attrHooks:{type:{set:function(a,b){if(r.test(a.nodeName)&&a.parentNode)f.error("type property can't be changed");else if(!f.support.radioValue&&b==="radio"&&f.nodeName(a,"input")){var c=a.value;a.setAttribute("type",b),c&&(a.value=c);return b}}},value:{get:function(a,b){if(w&&f.nodeName(a,"button"))return w.get(a,b);return b in a?a.value:null},set:function(a,b,c){if(w&&f.nodeName(a,"button"))return w.set(a,b,c);a.value=b}}},propFix:{tabindex:"tabIndex",readonly:"readOnly","for":"htmlFor","class":"className",maxlength:"maxLength",cellspacing:"cellSpacing",cellpadding:"cellPadding",rowspan:"rowSpan",colspan:"colSpan",usemap:"useMap",frameborder:"frameBorder",contenteditable:"contentEditable"},prop:function(a,c,d){var e,g,h,i=a.nodeType;if(!!a&&i!==3&&i!==8&&i!==2){h=i!==1||!f.isXMLDoc(a),h&&(c=f.propFix[c]||c,g=f.propHooks[c]);return d!==b?g&&"set"in g&&(e=g.set(a,d,c))!==b?e:a[c]=d:g&&"get"in g&&(e=g.get(a,c))!==null?e:a[c]}},propHooks:{tabIndex:{get:function(a){var c=a.getAttributeNode("tabindex");return c&&c.specified?parseInt(c.value,10):s.test(a.nodeName)||t.test(a.nodeName)&&a.href?0:b}}}}),f.attrHooks.tabindex=f.propHooks.tabIndex,x={get:function(a,c){var d,e=f.prop(a,c);return e===!0||typeof e!="boolean"&&(d=a.getAttributeNode(c))&&d.nodeValue!==!1?c.toLowerCase():b},set:function(a,b,c){var d;b===!1?f.removeAttr(a,c):(d=f.propFix[c]||c,d in a&&(a[d]=!0),a.setAttribute(c,c.toLowerCase()));return c}},v||(y={name:!0,id:!0,coords:!0},w=f.valHooks.button={get:function(a,c){var d;d=a.getAttributeNode(c);return d&&(y[c]?d.nodeValue!=="":d.specified)?d.nodeValue:b},set:function(a,b,d){var e=a.getAttributeNode(d);e||(e=c.createAttribute(d),a.setAttributeNode(e));return e.nodeValue=b+""}},f.attrHooks.tabindex.set=w.set,f.each(["width","height"],function(a,b){f.attrHooks[b]=f.extend(f.attrHooks[b],{set:function(a,c){if(c===""){a.setAttribute(b,"auto");return c}}})}),f.attrHooks.contenteditable={get:w.get,set:function(a,b,c){b===""&&(b="false"),w.set(a,b,c)}}),f.support.hrefNormalized||f.each(["href","src","width","height"],function(a,c){f.attrHooks[c]=f.extend(f.attrHooks[c],{get:function(a){var d=a.getAttribute(c,2);return d===null?b:d}})}),f.support.style||(f.attrHooks.style={get:function(a){return a.style.cssText.toLowerCase()||b},set:function(a,b){return a.style.cssText=""+b}}),f.support.optSelected||(f.propHooks.selected=f.extend(f.propHooks.selected,{get:function(a){var b=a.parentNode;b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex);return null}})),f.support.enctype||(f.propFix.enctype="encoding"),f.support.checkOn||f.each(["radio","checkbox"],function(){f.valHooks[this]={get:function(a){return a.getAttribute("value")===null?"on":a.value}}}),f.each(["radio","checkbox"],function(){f.valHooks[this]=f.extend(f.valHooks[this],{set:function(a,b){if(f.isArray(b))return a.checked=f.inArray(f(a).val(),b)>=0}})});var z=/^(?:textarea|input|select)$/i,A=/^([^\.]*)?(?:\.(.+))?$/,B=/(?:^|\s)hover(\.\S+)?\b/,C=/^key/,D=/^(?:mouse|contextmenu)|click/,E=/^(?:focusinfocus|focusoutblur)$/,F=/^(\w*)(?:#([\w\-]+))?(?:\.([\w\-]+))?$/,G=function(
-a){var b=F.exec(a);b&&(b[1]=(b[1]||"").toLowerCase(),b[3]=b[3]&&new RegExp("(?:^|\\s)"+b[3]+"(?:\\s|$)"));return b},H=function(a,b){var c=a.attributes||{};return(!b[1]||a.nodeName.toLowerCase()===b[1])&&(!b[2]||(c.id||{}).value===b[2])&&(!b[3]||b[3].test((c["class"]||{}).value))},I=function(a){return f.event.special.hover?a:a.replace(B,"mouseenter$1 mouseleave$1")};f.event={add:function(a,c,d,e,g){var h,i,j,k,l,m,n,o,p,q,r,s;if(!(a.nodeType===3||a.nodeType===8||!c||!d||!(h=f._data(a)))){d.handler&&(p=d,d=p.handler,g=p.selector),d.guid||(d.guid=f.guid++),j=h.events,j||(h.events=j={}),i=h.handle,i||(h.handle=i=function(a){return typeof f!="undefined"&&(!a||f.event.triggered!==a.type)?f.event.dispatch.apply(i.elem,arguments):b},i.elem=a),c=f.trim(I(c)).split(" ");for(k=0;k<c.length;k++){l=A.exec(c[k])||[],m=l[1],n=(l[2]||"").split(".").sort(),s=f.event.special[m]||{},m=(g?s.delegateType:s.bindType)||m,s=f.event.special[m]||{},o=f.extend({type:m,origType:l[1],data:e,handler:d,guid:d.guid,selector:g,quick:g&&G(g),namespace:n.join(".")},p),r=j[m];if(!r){r=j[m]=[],r.delegateCount=0;if(!s.setup||s.setup.call(a,e,n,i)===!1)a.addEventListener?a.addEventListener(m,i,!1):a.attachEvent&&a.attachEvent("on"+m,i)}s.add&&(s.add.call(a,o),o.handler.guid||(o.handler.guid=d.guid)),g?r.splice(r.delegateCount++,0,o):r.push(o),f.event.global[m]=!0}a=null}},global:{},remove:function(a,b,c,d,e){var g=f.hasData(a)&&f._data(a),h,i,j,k,l,m,n,o,p,q,r,s;if(!!g&&!!(o=g.events)){b=f.trim(I(b||"")).split(" ");for(h=0;h<b.length;h++){i=A.exec(b[h])||[],j=k=i[1],l=i[2];if(!j){for(j in o)f.event.remove(a,j+b[h],c,d,!0);continue}p=f.event.special[j]||{},j=(d?p.delegateType:p.bindType)||j,r=o[j]||[],m=r.length,l=l?new RegExp("(^|\\.)"+l.split(".").sort().join("\\.(?:.*\\.)?")+"(\\.|$)"):null;for(n=0;n<r.length;n++)s=r[n],(e||k===s.origType)&&(!c||c.guid===s.guid)&&(!l||l.test(s.namespace))&&(!d||d===s.selector||d==="**"&&s.selector)&&(r.splice(n--,1),s.selector&&r.delegateCount--,p.remove&&p.remove.call(a,s));r.length===0&&m!==r.length&&((!p.teardown||p.teardown.call(a,l)===!1)&&f.removeEvent(a,j,g.handle),delete o[j])}f.isEmptyObject(o)&&(q=g.handle,q&&(q.elem=null),f.removeData(a,["events","handle"],!0))}},customEvent:{getData:!0,setData:!0,changeData:!0},trigger:function(c,d,e,g){if(!e||e.nodeType!==3&&e.nodeType!==8){var h=c.type||c,i=[],j,k,l,m,n,o,p,q,r,s;if(E.test(h+f.event.triggered))return;h.indexOf("!")>=0&&(h=h.slice(0,-1),k=!0),h.indexOf(".")>=0&&(i=h.split("."),h=i.shift(),i.sort());if((!e||f.event.customEvent[h])&&!f.event.global[h])return;c=typeof c=="object"?c[f.expando]?c:new f.Event(h,c):new f.Event(h),c.type=h,c.isTrigger=!0,c.exclusive=k,c.namespace=i.join("."),c.namespace_re=c.namespace?new RegExp("(^|\\.)"+i.join("\\.(?:.*\\.)?")+"(\\.|$)"):null,o=h.indexOf(":")<0?"on"+h:"";if(!e){j=f.cache;for(l in j)j[l].events&&j[l].events[h]&&f.event.trigger(c,d,j[l].handle.elem,!0);return}c.result=b,c.target||(c.target=e),d=d!=null?f.makeArray(d):[],d.unshift(c),p=f.event.special[h]||{};if(p.trigger&&p.trigger.apply(e,d)===!1)return;r=[[e,p.bindType||h]];if(!g&&!p.noBubble&&!f.isWindow(e)){s=p.delegateType||h,m=E.test(s+h)?e:e.parentNode,n=null;for(;m;m=m.parentNode)r.push([m,s]),n=m;n&&n===e.ownerDocument&&r.push([n.defaultView||n.parentWindow||a,s])}for(l=0;l<r.length&&!c.isPropagationStopped();l++)m=r[l][0],c.type=r[l][1],q=(f._data(m,"events")||{})[c.type]&&f._data(m,"handle"),q&&q.apply(m,d),q=o&&m[o],q&&f.acceptData(m)&&q.apply(m,d)===!1&&c.preventDefault();c.type=h,!g&&!c.isDefaultPrevented()&&(!p._default||p._default.apply(e.ownerDocument,d)===!1)&&(h!=="click"||!f.nodeName(e,"a"))&&f.acceptData(e)&&o&&e[h]&&(h!=="focus"&&h!=="blur"||c.target.offsetWidth!==0)&&!f.isWindow(e)&&(n=e[o],n&&(e[o]=null),f.event.triggered=h,e[h](),f.event.triggered=b,n&&(e[o]=n));return c.result}},dispatch:function(c){c=f.event.fix(c||a.event);var d=(f._data(this,"events")||{})[c.type]||[],e=d.delegateCount,g=[].slice.call(arguments,0),h=!c.exclusive&&!c.namespace,i=f.event.special[c.type]||{},j=[],k,l,m,n,o,p,q,r,s,t,u;g[0]=c,c.delegateTarget=this;if(!i.preDispatch||i.preDispatch.call(this,c)!==!1){if(e&&(!c.button||c.type!=="click")){n=f(this),n.context=this.ownerDocument||this;for(m=c.target;m!=this;m=m.parentNode||this)if(m.disabled!==!0){p={},r=[],n[0]=m;for(k=0;k<e;k++)s=d[k],t=s.selector,p[t]===b&&(p[t]=s.quick?H(m,s.quick):n.is(t)),p[t]&&r.push(s);r.length&&j.push({elem:m,matches:r})}}d.length>e&&j.push({elem:this,matches:d.slice(e)});for(k=0;k<j.length&&!c.isPropagationStopped();k++){q=j[k],c.currentTarget=q.elem;for(l=0;l<q.matches.length&&!c.isImmediatePropagationStopped();l++){s=q.matches[l];if(h||!c.namespace&&!s.namespace||c.namespace_re&&c.namespace_re.test(s.namespace))c.data=s.data,c.handleObj=s,o=((f.event.special[s.origType]||{}).handle||s.handler).apply(q.elem,g),o!==b&&(c.result=o,o===!1&&(c.preventDefault(),c.stopPropagation()))}}i.postDispatch&&i.postDispatch.call(this,c);return c.result}},props:"attrChange attrName relatedNode srcElement altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){a.which==null&&(a.which=b.charCode!=null?b.charCode:b.keyCode);return a}},mouseHooks:{props:"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,d){var e,f,g,h=d.button,i=d.fromElement;a.pageX==null&&d.clientX!=null&&(e=a.target.ownerDocument||c,f=e.documentElement,g=e.body,a.pageX=d.clientX+(f&&f.scrollLeft||g&&g.scrollLeft||0)-(f&&f.clientLeft||g&&g.clientLeft||0),a.pageY=d.clientY+(f&&f.scrollTop||g&&g.scrollTop||0)-(f&&f.clientTop||g&&g.clientTop||0)),!a.relatedTarget&&i&&(a.relatedTarget=i===a.target?d.toElement:i),!a.which&&h!==b&&(a.which=h&1?1:h&2?3:h&4?2:0);return a}},fix:function(a){if(a[f.expando])return a;var d,e,g=a,h=f.event.fixHooks[a.type]||{},i=h.props?this.props.concat(h.props):this.props;a=f.Event(g);for(d=i.length;d;)e=i[--d],a[e]=g[e];a.target||(a.target=g.srcElement||c),a.target.nodeType===3&&(a.target=a.target.parentNode),a.metaKey===b&&(a.metaKey=a.ctrlKey);return h.filter?h.filter(a,g):a},special:{ready:{setup:f.bindReady},load:{noBubble:!0},focus:{delegateType:"focusin"},blur:{delegateType:"focusout"},beforeunload:{setup:function(a,b,c){f.isWindow(this)&&(this.onbeforeunload=c)},teardown:function(a,b){this.onbeforeunload===b&&(this.onbeforeunload=null)}}},simulate:function(a,b,c,d){var e=f.extend(new f.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?f.event.trigger(e,null,b):f.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},f.event.handle=f.event.dispatch,f.removeEvent=c.removeEventListener?function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)}:function(a,b,c){a.detachEvent&&a.detachEvent("on"+b,c)},f.Event=function(a,b){if(!(this instanceof f.Event))return new f.Event(a,b);a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||a.returnValue===!1||a.getPreventDefault&&a.getPreventDefault()?K:J):this.type=a,b&&f.extend(this,b),this.timeStamp=a&&a.timeStamp||f.now(),this[f.expando]=!0},f.Event.prototype={preventDefault:function(){this.isDefaultPrevented=K;var a=this.originalEvent;!a||(a.preventDefault?a.preventDefault():a.returnValue=!1)},stopPropagation:function(){this.isPropagationStopped=K;var a=this.originalEvent;!a||(a.stopPropagation&&a.stopPropagation(),a.cancelBubble=!0)},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=K,this.stopPropagation()},isDefaultPrevented:J,isPropagationStopped:J,isImmediatePropagationStopped:J},f.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(a,b){f.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c=this,d=a.relatedTarget,e=a.handleObj,g=e.selector,h;if(!d||d!==c&&!f.contains(c,d))a.type=e.origType,h=e.handler.apply(this,arguments),a.type=b;return h}}}),f.support.submitBubbles||(f.event.special.submit={setup:function(){if(f.nodeName(this,"form"))return!1;f.event.add(this,"click._submit keypress._submit",function(a){var c=a.target,d=f.nodeName(c,"input")||f.nodeName(c,"button")?c.form:b;d&&!d._submit_attached&&(f.event.add(d,"submit._submit",function(a){a._submit_bubble=!0}),d._submit_attached=!0)})},postDispatch:function(a){a._submit_bubble&&(delete a._submit_bubble,this.parentNode&&!a.isTrigger&&f.event.simulate("submit",this.parentNode,a,!0))},teardown:function(){if(f.nodeName(this,"form"))return!1;f.event.remove(this,"._submit")}}),f.support.changeBubbles||(f.event.special.change={setup:function(){if(z.test(this.nodeName)){if(this.type==="checkbox"||this.type==="radio")f.event.add(this,"propertychange._change",function(a){a.originalEvent.propertyName==="checked"&&(this._just_changed=!0)}),f.event.add(this,"click._change",function(a){this._just_changed&&!a.isTrigger&&(this._just_changed=!1,f.event.simulate("change",this,a,!0))});return!1}f.event.add(this,"beforeactivate._change",function(a){var b=a.target;z.test(b.nodeName)&&!b._change_attached&&(f.event.add(b,"change._change",function(a){this.parentNode&&!a.isSimulated&&!a.isTrigger&&f.event.simulate("change",this.parentNode,a,!0)}),b._change_attached=!0)})},handle:function(a){var b=a.target;if(this!==b||a.isSimulated||a.isTrigger||b.type!=="radio"&&b.type!=="checkbox")return a.handleObj.handler.apply(this,arguments)},teardown:function(){f.event.remove(this,"._change");return z.test(this.nodeName)}}),f.support.focusinBubbles||f.each({focus:"focusin",blur:"focusout"},function(a,b){var d=0,e=function(a){f.event.simulate(b,a.target,f.event.fix(a),!0)};f.event.special[b]={setup:function(){d++===0&&c.addEventListener(a,e,!0)},teardown:function(){--d===0&&c.removeEventListener(a,e,!0)}}}),f.fn.extend({on:function(a,c,d,e,g){var h,i;if(typeof a=="object"){typeof c!="string"&&(d=d||c,c=b);for(i in a)this.on(i,c,d,a[i],g);return this}d==null&&e==null?(e=c,d=c=b):e==null&&(typeof c=="string"?(e=d,d=b):(e=d,d=c,c=b));if(e===!1)e=J;else if(!e)return this;g===1&&(h=e,e=function(a){f().off(a);return h.apply(this,arguments)},e.guid=h.guid||(h.guid=f.guid++));return this.each(function(){f.event.add(this,a,e,d,c)})},one:function(a,b,c,d){return this.on(a,b,c,d,1)},off:function(a,c,d){if(a&&a.preventDefault&&a.handleObj){var e=a.handleObj;f(a.delegateTarget).off(e.namespace?e.origType+"."+e.namespace:e.origType,e.selector,e.handler);return this}if(typeof a=="object"){for(var g in a)this.off(g,c,a[g]);return this}if(c===!1||typeof c=="function")d=c,c=b;d===!1&&(d=J);return this.each(function(){f.event.remove(this,a,d,c)})},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},live:function(a,b,c){f(this.context).on(a,this.selector,b,c);return this},die:function(a,b){f(this.context).off(a,this.selector||"**",b);return this},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return arguments.length==1?this.off(a,"**"):this.off(b,a,c)},trigger:function(a,b){return this.each(function(){f.event.trigger(a,b,this)})},triggerHandler:function(a,b){if(this[0])return f.event.trigger(a,b,this[0],!0)},toggle:function(a){var b=arguments,c=a.guid||f.guid++,d=0,e=function(c){var e=(f._data(this,"lastToggle"+a.guid)||0)%d;f._data(this,"lastToggle"+a.guid,e+1),c.preventDefault();return b[e].apply(this,arguments)||!1};e.guid=c;while(d<b.length)b[d++].guid=c;return this.click(e)},hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}}),f.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){f.fn[b]=function(a,c){c==null&&(c=a,a=null);return arguments.length>0?this.on(b,null,a,c):this.trigger(b)},f.attrFn&&(f.attrFn[b]=!0),C.test(b)&&(f.event.fixHooks[b]=f.event.keyHooks),D.test(b)&&(f.event.fixHooks[b]=f.event.mouseHooks)}),function(){function x(a,b,c,e,f,g){for(var h=0,i=e.length;h<i;h++){var j=e[h];if(j){var k=!1;j=j[a];while(j){if(j[d]===c){k=e[j.sizset];break}if(j.nodeType===1){g||(j[d]=c,j.sizset=h);if(typeof b!="string"){if(j===b){k=!0;break}}else if(m.filter(b,[j]).length>0){k=j;break}}j=j[a]}e[h]=k}}}function w(a,b,c,e,f,g){for(var h=0,i=e.length;h<i;h++){var j=e[h];if(j){var k=!1;j=j[a];while(j){if(j[d]===c){k=e[j.sizset];break}j.nodeType===1&&!g&&(j[d]=c,j.sizset=h);if(j.nodeName.toLowerCase()===b){k=j;break}j=j[a]}e[h]=k}}}var a=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,d="sizcache"+(Math.random()+"").replace(".",""),e=0,g=Object.prototype.toString,h=!1,i=!0,j=/\\/g,k=/\r\n/g,l=/\W/;[0,0].sort(function(){i=!1;return 0});var m=function(b,d,e,f){e=e||[],d=d||c;var h=d;if(d.nodeType!==1&&d.nodeType!==9)return[];if(!b||typeof b!="string")return e;var i,j,k,l,n,q,r,t,u=!0,v=m.isXML(d),w=[],x=b;do{a.exec(""),i=a.exec(x);if(i){x=i[3],w.push(i[1]);if(i[2]){l=i[3];break}}}while(i);if(w.length>1&&p.exec(b))if(w.length===2&&o.relative[w[0]])j=y(w[0]+w[1],d,f);else{j=o.relative[w[0]]?[d]:m(w.shift(),d);while(w.length)b=w.shift(),o.relative[b]&&(b+=w.shift()),j=y(b,j,f)}else{!f&&w.length>1&&d.nodeType===9&&!v&&o.match.ID.test(w[0])&&!o.match.ID.test(w[w.length-1])&&(n=m.find(w.shift(),d,v),d=n.expr?m.filter(n.expr,n.set)[0]:n.set[0]);if(d){n=f?{expr:w.pop(),set:s(f)}:m.find(w.pop(),w.length===1&&(w[0]==="~"||w[0]==="+")&&d.parentNode?d.parentNode:d,v),j=n.expr?m.filter(n.expr,n.set):n.set,w.length>0?k=s(j):u=!1;while(w.length)q=w.pop(),r=q,o.relative[q]?r=w.pop():q="",r==null&&(r=d),o.relative[q](k,r,v)}else k=w=[]}k||(k=j),k||m.error(q||b);if(g.call(k)==="[object Array]")if(!u)e.push.apply(e,k);else if(d&&d.nodeType===1)for(t=0;k[t]!=null;t++)k[t]&&(k[t]===!0||k[t].nodeType===1&&m.contains(d,k[t]))&&e.push(j[t]);else for(t=0;k[t]!=null;t++)k[t]&&k[t].nodeType===1&&e.push(j[t]);else s(k,e);l&&(m(l,h,e,f),m.uniqueSort(e));return e};m.uniqueSort=function(a){if(u){h=i,a.sort(u);if(h)for(var b=1;b<a.length;b++)a[b]===a[b-1]&&a.splice(b--,1)}return a},m.matches=function(a,b){return m(a,null,null,b)},m.matchesSelector=function(a,b){return m(b,null,null,[a]).length>0},m.find=function(a,b,c){var d,e,f,g,h,i;if(!a)return[];for(e=0,f=o.order.length;e<f;e++){h=o.order[e];if(g=o.leftMatch[h].exec(a)){i=g[1],g.splice(1,1);if(i.substr(i.length-1)!=="\\"){g[1]=(g[1]||"").replace(j,""),d=o.find[h](g,b,c);if(d!=null){a=a.replace(o.match[h],"");break}}}}d||(d=typeof b.getElementsByTagName!="undefined"?b.getElementsByTagName("*"):[]);return{set:d,expr:a}},m.filter=function(a,c,d,e){var f,g,h,i,j,k,l,n,p,q=a,r=[],s=c,t=c&&c[0]&&m.isXML(c[0]);while(a&&c.length){for(h in o.filter)if((f=o.leftMatch[h].exec(a))!=null&&f[2]){k=o.filter[h],l=f[1],g=!1,f.splice(1,1);if(l.substr(l.length-1)==="\\")continue;s===r&&(r=[]);if(o.preFilter[h]){f=o.preFilter[h](f,s,d,r,e,t);if(!f)g=i=!0;else if(f===!0)continue}if(f)for(n=0;(j=s[n])!=null;n++)j&&(i=k(j,f,n,s),p=e^i,d&&i!=null?p?g=!0:s[n]=!1:p&&(r.push(j),g=!0));if(i!==b){d||(s=r),a=a.replace(o.match[h],"");if(!g)return[];break}}if(a===q)if(g==null)m.error(a);else break;q=a}return s},m.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)};var n=m.getText=function(a){var b,c,d=a.nodeType,e="";if(d){if(d===1||d===9||d===11){if(typeof a.textContent=="string")return a.textContent;if(typeof a.innerText=="string")return a.innerText.replace(k,"");for(a=a.firstChild;a;a=a.nextSibling)e+=n(a)}else if(d===3||d===4)return a.nodeValue}else for(b=0;c=a[b];b++)c.nodeType!==8&&(e+=n(c));return e},o=m.selectors={order:["ID","NAME","TAG"],match:{ID:/#((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,CLASS:/\.((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,NAME:/\[name=['"]*((?:[\w\u00c0-\uFFFF\-]|\\.)+)['"]*\]/,ATTR:/\[\s*((?:[\w\u00c0-\uFFFF\-]|\\.)+)\s*(?:(\S?=)\s*(?:(['"])(.*?)\3|(#?(?:[\w\u00c0-\uFFFF\-]|\\.)*)|)|)\s*\]/,TAG:/^((?:[\w\u00c0-\uFFFF\*\-]|\\.)+)/,CHILD:/:(only|nth|last|first)-child(?:\(\s*(even|odd|(?:[+\-]?\d+|(?:[+\-]?\d*)?n\s*(?:[+\-]\s*\d+)?))\s*\))?/,POS:/:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^\-]|$)/,PSEUDO:/:((?:[\w\u00c0-\uFFFF\-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/},leftMatch:{},attrMap:{"class":"className","for":"htmlFor"},attrHandle:{href:function(a){return a.getAttribute("href")},type:function(a){return a.getAttribute("type")}},relative:{"+":function(a,b){var c=typeof b=="string",d=c&&!l.test(b),e=c&&!d;d&&(b=b.toLowerCase());for(var f=0,g=a.length,h;f<g;f++)if(h=a[f]){while((h=h.previousSibling)&&h.nodeType!==1);a[f]=e||h&&h.nodeName.toLowerCase()===b?h||!1:h===b}e&&m.filter(b,a,!0)},">":function(a,b){var c,d=typeof b=="string",e=0,f=a.length;if(d&&!l.test(b)){b=b.toLowerCase();for(;e<f;e++){c=a[e];if(c){var g=c.parentNode;a[e]=g.nodeName.toLowerCase()===b?g:!1}}}else{for(;e<f;e++)c=a[e],c&&(a[e]=d?c.parentNode:c.parentNode===b);d&&m.filter(b,a,!0)}},"":function(a,b,c){var d,f=e++,g=x;typeof b=="string"&&!l.test(b)&&(b=b.toLowerCase(),d=b,g=w),g("parentNode",b,f,a,d,c)},"~":function(a,b,c){var d,f=e++,g=x;typeof b=="string"&&!l.test(b)&&(b=b.toLowerCase(),d=b,g=w),g("previousSibling",b,f,a,d,c)}},find:{ID:function(a,b,c){if(typeof b.getElementById!="undefined"&&!c){var d=b.getElementById(a[1]);return d&&d.parentNode?[d]:[]}},NAME:function(a,b){if(typeof b.getElementsByName!="undefined"){var c=[],d=b.getElementsByName(a[1]);for(var e=0,f=d.length;e<f;e++)d[e].getAttribute("name")===a[1]&&c.push(d[e]);return c.length===0?null:c}},TAG:function(a,b){if(typeof b.getElementsByTagName!="undefined")return b.getElementsByTagName(a[1])}},preFilter:{CLASS:function(a,b,c,d,e,f){a=" "+a[1].replace(j,"")+" ";if(f)return a;for(var g=0,h;(h=b[g])!=null;g++)h&&(e^(h.className&&(" "+h.className+" ").replace(/[\t\n\r]/g," ").indexOf(a)>=0)?c||d.push(h):c&&(b[g]=!1));return!1},ID:function(a){return a[1].replace(j,"")},TAG:function(a,b){return a[1].replace(j,"").toLowerCase()},CHILD:function(a){if(a[1]==="nth"){a[2]||m.error(a[0]),a[2]=a[2].replace(/^\+|\s*/g,"");var b=/(-?)(\d*)(?:n([+\-]?\d*))?/.exec(a[2]==="even"&&"2n"||a[2]==="odd"&&"2n+1"||!/\D/.test(a[2])&&"0n+"+a[2]||a[2]);a[2]=b[1]+(b[2]||1)-0,a[3]=b[3]-0}else a[2]&&m.error(a[0]);a[0]=e++;return a},ATTR:function(a,b,c,d,e,f){var g=a[1]=a[1].replace(j,"");!f&&o.attrMap[g]&&(a[1]=o.attrMap[g]),a[4]=(a[4]||a[5]||"").replace(j,""),a[2]==="~="&&(a[4]=" "+a[4]+" ");return a},PSEUDO:function(b,c,d,e,f){if(b[1]==="not")if((a.exec(b[3])||"").length>1||/^\w/.test(b[3]))b[3]=m(b[3],null,null,c);else{var g=m.filter(b[3],c,d,!0^f);d||e.push.apply(e,g);return!1}else if(o.match.POS.test(b[0])||o.match.CHILD.test(b[0]))return!0;return b},POS:function(a){a.unshift(!0);return a}},filters:{enabled:function(a){return a.disabled===!1&&a.type!=="hidden"},disabled:function(a){return a.disabled===!0},checked:function(a){return a.checked===!0},selected:function(a){a.parentNode&&a.parentNode.selectedIndex;return a.selected===!0},parent:function(a){return!!a.firstChild},empty:function(a){return!a.firstChild},has:function(a,b,c){return!!m(c[3],a).length},header:function(a){return/h\d/i.test(a.nodeName)},text:function(a){var b=a.getAttribute("type"),c=a.type;return a.nodeName.toLowerCase()==="input"&&"text"===c&&(b===c||b===null)},radio:function(a){return a.nodeName.toLowerCase()==="input"&&"radio"===a.type},checkbox:function(a){return a.nodeName.toLowerCase()==="input"&&"checkbox"===a.type},file:function(a){return a.nodeName.toLowerCase()==="input"&&"file"===a.type},password:function(a){return a.nodeName.toLowerCase()==="input"&&"password"===a.type},submit:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"submit"===a.type},image:function(a){return a.nodeName.toLowerCase()==="input"&&"image"===a.type},reset:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"reset"===a.type},button:function(a){var b=a.nodeName.toLowerCase();return b==="input"&&"button"===a.type||b==="button"},input:function(a){return/input|select|textarea|button/i.test(a.nodeName)},focus:function(a){return a===a.ownerDocument.activeElement}},setFilters:{first:function(a,b){return b===0},last:function(a,b,c,d){return b===d.length-1},even:function(a,b){return b%2===0},odd:function(a,b){return b%2===1},lt:function(a,b,c){return b<c[3]-0},gt:function(a,b,c){return b>c[3]-0},nth:function(a,b,c){return c[3]-0===b},eq:function(a,b,c){return c[3]-0===b}},filter:{PSEUDO:function(a,b,c,d){var e=b[1],f=o.filters[e];if(f)return f(a,c,b,d);if(e==="contains")return(a.textContent||a.innerText||n([a])||"").indexOf(b[3])>=0;if(e==="not"){var g=b[3];for(var h=0,i=g.length;h<i;h++)if(g[h]===a)return!1;return!0}m.error(e)},CHILD:function(a,b){var c,e,f,g,h,i,j,k=b[1],l=a;switch(k){case"only":case"first":while(l=l.previousSibling)if(l.nodeType===1)return!1;if(k==="first")return!0;l=a;case"last":while(l=l.nextSibling)if(l.nodeType===1)return!1;return!0;case"nth":c=b[2],e=b[3];if(c===1&&e===0)return!0;f=b[0],g=a.parentNode;if(g&&(g[d]!==f||!a.nodeIndex)){i=0;for(l=g.firstChild;l;l=l.nextSibling)l.nodeType===1&&(l.nodeIndex=++i);g[d]=f}j=a.nodeIndex-e;return c===0?j===0:j%c===0&&j/c>=0}},ID:function(a,b){return a.nodeType===1&&a.getAttribute("id")===b},TAG:function(a,b){return b==="*"&&a.nodeType===1||!!a.nodeName&&a.nodeName.toLowerCase()===b},CLASS:function(a,b){return(" "+(a.className||a.getAttribute("class"))+" ").indexOf(b)>-1},ATTR:function(a,b){var c=b[1],d=m.attr?m.attr(a,c):o.attrHandle[c]?o.attrHandle[c](a):a[c]!=null?a[c]:a.getAttribute(c),e=d+"",f=b[2],g=b[4];return d==null?f==="!=":!f&&m.attr?d!=null:f==="="?e===g:f==="*="?e.indexOf(g)>=0:f==="~="?(" "+e+" ").indexOf(g)>=0:g?f==="!="?e!==g:f==="^="?e.indexOf(g)===0:f==="$="?e.substr(e.length-g.length)===g:f==="|="?e===g||e.substr(0,g.length+1)===g+"-":!1:e&&d!==!1},POS:function(a,b,c,d){var e=b[2],f=o.setFilters[e];if(f)return f(a,c,b,d)}}},p=o.match.POS,q=function(a,b){return"\\"+(b-0+1)};for(var r in o.match)o.match[r]=new RegExp(o.match[r].source+/(?![^\[]*\])(?![^\(]*\))/.source),o.leftMatch[r]=new RegExp(/(^(?:.|\r|\n)*?)/.source+o.match[r].source.replace(/\\(\d+)/g,q));o.match.globalPOS=p;var s=function(a,b){a=Array.prototype.slice.call(a,0);if(b){b.push.apply(b,a);return b}return a};try{Array.prototype.slice.call(c.documentElement.childNodes,0)[0].nodeType}catch(t){s=function(a,b){var c=0,d=b||[];if(g.call(a)==="[object Array]")Array.prototype.push.apply(d,a);else if(typeof a.length=="number")for(var e=a.length;c<e;c++)d.push(a[c]);else for(;a[c];c++)d.push(a[c]);return d}}var u,v;c.documentElement.compareDocumentPosition?u=function(a,b){if(a===b){h=!0;return 0}if(!a.compareDocumentPosition||!b.compareDocumentPosition)return a.compareDocumentPosition?-1:1;return a.compareDocumentPosition(b)&4?-1:1}:(u=function(a,b){if(a===b){h=!0;return 0}if(a.sourceIndex&&b.sourceIndex)return a.sourceIndex-b.sourceIndex;var c,d,e=[],f=[],g=a.parentNode,i=b.parentNode,j=g;if(g===i)return v(a,b);if(!g)return-1;if(!i)return 1;while(j)e.unshift(j),j=j.parentNode;j=i;while(j)f.unshift(j),j=j.parentNode;c=e.length,d=f.length;for(var k=0;k<c&&k<d;k++)if(e[k]!==f[k])return v(e[k],f[k]);return k===c?v(a,f[k],-1):v(e[k],b,1)},v=function(a,b,c){if(a===b)return c;var d=a.nextSibling;while(d){if(d===b)return-1;d=d.nextSibling}return 1}),function(){var a=c.createElement("div"),d="script"+(new Date).getTime(),e=c.documentElement;a.innerHTML="<a name='"+d+"'/>",e.insertBefore(a,e.firstChild),c.getElementById(d)&&(o.find.ID=function(a,c,d){if(typeof c.getElementById!="undefined"&&!d){var e=c.getElementById(a[1]);return e?e.id===a[1]||typeof e.getAttributeNode!="undefined"&&e.getAttributeNode("id").nodeValue===a[1]?[e]:b:[]}},o.filter.ID=function(a,b){var c=typeof a.getAttributeNode!="undefined"&&a.getAttributeNode("id");return a.nodeType===1&&c&&c.nodeValue===b}),e.removeChild(a),e=a=null}(),function(){var a=c.createElement("div");a.appendChild(c.createComment("")),a.getElementsByTagName("*").length>0&&(o.find.TAG=function(a,b){var c=b.getElementsByTagName(a[1]);if(a[1]==="*"){var d=[];for(var e=0;c[e];e++)c[e].nodeType===1&&d.push(c[e]);c=d}return c}),a.innerHTML="<a href='#'></a>",a.firstChild&&typeof a.firstChild.getAttribute!="undefined"&&a.firstChild.getAttribute("href")!=="#"&&(o.attrHandle.href=function(a){return a.getAttribute("href",2)}),a=null}(),c.querySelectorAll&&function(){var a=m,b=c.createElement("div"),d="__sizzle__";b.innerHTML="<p class='TEST'></p>";if(!b.querySelectorAll||b.querySelectorAll(".TEST").length!==0){m=function(b,e,f,g){e=e||c;if(!g&&!m.isXML(e)){var h=/^(\w+$)|^\.([\w\-]+$)|^#([\w\-]+$)/.exec(b);if(h&&(e.nodeType===1||e.nodeType===9)){if(h[1])return s(e.getElementsByTagName(b),f);if(h[2]&&o.find.CLASS&&e.getElementsByClassName)return s(e.getElementsByClassName(h[2]),f)}if(e.nodeType===9){if(b==="body"&&e.body)return s([e.body],f);if(h&&h[3]){var i=e.getElementById(h[3]);if(!i||!i.parentNode)return s([],f);if(i.id===h[3])return s([i],f)}try{return s(e.querySelectorAll(b),f)}catch(j){}}else if(e.nodeType===1&&e.nodeName.toLowerCase()!=="object"){var k=e,l=e.getAttribute("id"),n=l||d,p=e.parentNode,q=/^\s*[+~]/.test(b);l?n=n.replace(/'/g,"\\$&"):e.setAttribute("id",n),q&&p&&(e=e.parentNode);try{if(!q||p)return s(e.querySelectorAll("[id='"+n+"'] "+b),f)}catch(r){}finally{l||k.removeAttribute("id")}}}return a(b,e,f,g)};for(var e in a)m[e]=a[e];b=null}}(),function(){var a=c.documentElement,b=a.matchesSelector||a.mozMatchesSelector||a.webkitMatchesSelector||a.msMatchesSelector;if(b){var d=!b.call(c.createElement("div"),"div"),e=!1;try{b.call(c.documentElement,"[test!='']:sizzle")}catch(f){e=!0}m.matchesSelector=function(a,c){c=c.replace(/\=\s*([^'"\]]*)\s*\]/g,"='$1']");if(!m.isXML(a))try{if(e||!o.match.PSEUDO.test(c)&&!/!=/.test(c)){var f=b.call(a,c);if(f||!d||a.document&&a.document.nodeType!==11)return f}}catch(g){}return m(c,null,null,[a]).length>0}}}(),function(){var a=c.createElement("div");a.innerHTML="<div class='test e'></div><div class='test'></div>";if(!!a.getElementsByClassName&&a.getElementsByClassName("e").length!==0){a.lastChild.className="e";if(a.getElementsByClassName("e").length===1)return;o.order.splice(1,0,"CLASS"),o.find.CLASS=function(a,b,c){if(typeof b.getElementsByClassName!="undefined"&&!c)return b.getElementsByClassName(a[1])},a=null}}(),c.documentElement.contains?m.contains=function(a,b){return a!==b&&(a.contains?a.contains(b):!0)}:c.documentElement.compareDocumentPosition?m.contains=function(a,b){return!!(a.compareDocumentPosition(b)&16)}:m.contains=function(){return!1},m.isXML=function(a){var b=(a?a.ownerDocument||a:0).documentElement;return b?b.nodeName!=="HTML":!1};var y=function(a,b,c){var d,e=[],f="",g=b.nodeType?[b]:b;while(d=o.match.PSEUDO.exec(a))f+=d[0],a=a.replace(o.match.PSEUDO,"");a=o.relative[a]?a+"*":a;for(var h=0,i=g.length;h<i;h++)m(a,g[h],e,c);return m.filter(f,e)};m.attr=f.attr,m.selectors.attrMap={},f.find=m,f.expr=m.selectors,f.expr[":"]=f.expr.filters,f.unique=m.uniqueSort,f.text=m.getText,f.isXMLDoc=m.isXML,f.contains=m.contains}();var L=/Until$/,M=/^(?:parents|prevUntil|prevAll)/,N=/,/,O=/^.[^:#\[\.,]*$/,P=Array.prototype.slice,Q=f.expr.match.globalPOS,R={children:!0,contents:!0,next:!0,prev:!0};f.fn.extend({find:function(a){var b=this,c,d;if(typeof a!="string")return f(a).filter(function(){for(c=0,d=b.length;c<d;c++)if(f.contains(b[c],this))return!0});var e=this.pushStack("","find",a),g,h,i;for(c=0,d=this.length;c<d;c++){g=e.length,f.find(a,this[c],e);if(c>0)for(h=g;h<e.length;h++)for(i=0;i<g;i++)if(e[i]===e[h]){e.splice(h--,1);break}}return e},has:function(a){var b=f(a);return this.filter(function(){for(var a=0,c=b.length;a<c;a++)if(f.contains(this,b[a]))return!0})},not:function(a){return this.pushStack(T(this,a,!1),"not",a)},filter:function(a){return this.pushStack(T(this,a,!0),"filter",a)},is:function(a){return!!a&&(typeof a=="string"?Q.test(a)?f(a,this.context).index(this[0])>=0:f.filter(a,this).length>0:this.filter(a).length>0)},closest:function(a,b){var c=[],d,e,g=this[0];if(f.isArray(a)){var h=1;while(g&&g.ownerDocument&&g!==b){for(d=0;d<a.length;d++)f(g).is(a[d])&&c.push({selector:a[d],elem:g,level:h});g=g.parentNode,h++}return c}var i=Q.test(a)||typeof a!="string"?f(a,b||this.context):0;for(d=0,e=this.length;d<e;d++){g=this[d];while(g){if(i?i.index(g)>-1:f.find.matchesSelector(g,a)){c.push(g);break}g=g.parentNode;if(!g||!g.ownerDocument||g===b||g.nodeType===11)break}}c=c.length>1?f.unique(c):c;return this.pushStack(c,"closest",a)},index:function(a){if(!a)return this[0]&&this[0].parentNode?this.prevAll().length:-1;if(typeof a=="string")return f.inArray(this[0],f(a));return f.inArray(a.jquery?a[0]:a,this)},add:function(a,b){var c=typeof a=="string"?f(a,b):f.makeArray(a&&a.nodeType?[a]:a),d=f.merge(this.get(),c);return this.pushStack(S(c[0])||S(d[0])?d:f.unique(d))},andSelf:function(){return this.add(this.prevObject)}}),f.each({parent:function(a){var b=a.parentNode;return b&&b.nodeType!==11?b:null},parents:function(a){return f.dir(a,"parentNode")},parentsUntil:function(a,b,c){return f.dir(a,"parentNode",c)},next:function(a){return f.nth(a,2,"nextSibling")},prev:function(a){return f.nth(a,2,"previousSibling")},nextAll:function(a){return f.dir(a,"nextSibling")},prevAll:function(a){return f.dir(a,"previousSibling")},nextUntil:function(a,b,c){return f.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return f.dir(a,"previousSibling",c)},siblings:function(a){return f.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return f.sibling(a.firstChild)},contents:function(a){return f.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:f.makeArray(a.childNodes)}},function(a,b){f.fn[a]=function(c,d){var e=f.map(this,b,c);L.test(a)||(d=c),d&&typeof d=="string"&&(e=f.filter(d,e)),e=this.length>1&&!R[a]?f.unique(e):e,(this.length>1||N.test(d))&&M.test(a)&&(e=e.reverse());return this.pushStack(e,a,P.call(arguments).join(","))}}),f.extend({filter:function(a,b,c){c&&(a=":not("+a+")");return b.length===1?f.find.matchesSelector(b[0],a)?[b[0]]:[]:f.find.matches(a,b)},dir:function(a,c,d){var e=[],g=a[c];while(g&&g.nodeType!==9&&(d===b||g.nodeType!==1||!f(g).is(d)))g.nodeType===1&&e.push(g),g=g[c];return e},nth:function(a,b,c,d){b=b||1;var e=0;for(;a;a=a[c])if(a.nodeType===1&&++e===b)break;return a},sibling:function(a,b){var c=[];for(;a;a=a.nextSibling)a.nodeType===1&&a!==b&&c.push(a);return c}});var V="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",W=/ jQuery\d+="(?:\d+|null)"/g,X=/^\s+/,Y=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,Z=/<([\w:]+)/,$=/<tbody/i,_=/<|&#?\w+;/,ba=/<(?:script|style)/i,bb=/<(?:script|object|embed|option|style)/i,bc=new RegExp("<(?:"+V+")[\\s/>]","i"),bd=/checked\s*(?:[^=]|=\s*.checked.)/i,be=/\/(java|ecma)script/i,bf=/^\s*<!(?:\[CDATA\[|\-\-)/,bg={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],area:[1,"<map>","</map>"],_default:[0,"",""]},bh=U(c);bg.optgroup=bg.option,bg.tbody=bg.tfoot=bg.colgroup=bg.caption=bg.thead,bg.th=bg.td,f.support.htmlSerialize||(bg._default=[1,"div<div>","</div>"]),f.fn.extend({text:function(a){return f.access(this,function(a){return a===b?f.text(this):this.empty().append((this[0]&&this[0].ownerDocument||c).createTextNode(a))},null,a,arguments.length)},wrapAll:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapAll(a.call(this,b))});if(this[0]){var b=f(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&a.firstChild.nodeType===1)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapInner(a.call(this,b))});return this.each(function(){var b=f(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=f.isFunction(a);return this.each(function(c){f(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){f.nodeName(this,"body")||f(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.appendChild(a)})},prepend:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this)});if(arguments.length){var a=f
-.clean(arguments);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this.nextSibling)});if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,f.clean(arguments));return a}},remove:function(a,b){for(var c=0,d;(d=this[c])!=null;c++)if(!a||f.filter(a,[d]).length)!b&&d.nodeType===1&&(f.cleanData(d.getElementsByTagName("*")),f.cleanData([d])),d.parentNode&&d.parentNode.removeChild(d);return this},empty:function(){for(var a=0,b;(b=this[a])!=null;a++){b.nodeType===1&&f.cleanData(b.getElementsByTagName("*"));while(b.firstChild)b.removeChild(b.firstChild)}return this},clone:function(a,b){a=a==null?!1:a,b=b==null?a:b;return this.map(function(){return f.clone(this,a,b)})},html:function(a){return f.access(this,function(a){var c=this[0]||{},d=0,e=this.length;if(a===b)return c.nodeType===1?c.innerHTML.replace(W,""):null;if(typeof a=="string"&&!ba.test(a)&&(f.support.leadingWhitespace||!X.test(a))&&!bg[(Z.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(Y,"<$1></$2>");try{for(;d<e;d++)c=this[d]||{},c.nodeType===1&&(f.cleanData(c.getElementsByTagName("*")),c.innerHTML=a);c=0}catch(g){}}c&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(a){if(this[0]&&this[0].parentNode){if(f.isFunction(a))return this.each(function(b){var c=f(this),d=c.html();c.replaceWith(a.call(this,b,d))});typeof a!="string"&&(a=f(a).detach());return this.each(function(){var b=this.nextSibling,c=this.parentNode;f(this).remove(),b?f(b).before(a):f(c).append(a)})}return this.length?this.pushStack(f(f.isFunction(a)?a():a),"replaceWith",a):this},detach:function(a){return this.remove(a,!0)},domManip:function(a,c,d){var e,g,h,i,j=a[0],k=[];if(!f.support.checkClone&&arguments.length===3&&typeof j=="string"&&bd.test(j))return this.each(function(){f(this).domManip(a,c,d,!0)});if(f.isFunction(j))return this.each(function(e){var g=f(this);a[0]=j.call(this,e,c?g.html():b),g.domManip(a,c,d)});if(this[0]){i=j&&j.parentNode,f.support.parentNode&&i&&i.nodeType===11&&i.childNodes.length===this.length?e={fragment:i}:e=f.buildFragment(a,this,k),h=e.fragment,h.childNodes.length===1?g=h=h.firstChild:g=h.firstChild;if(g){c=c&&f.nodeName(g,"tr");for(var l=0,m=this.length,n=m-1;l<m;l++)d.call(c?bi(this[l],g):this[l],e.cacheable||m>1&&l<n?f.clone(h,!0,!0):h)}k.length&&f.each(k,function(a,b){b.src?f.ajax({type:"GET",global:!1,url:b.src,async:!1,dataType:"script"}):f.globalEval((b.text||b.textContent||b.innerHTML||"").replace(bf,"/*$0*/")),b.parentNode&&b.parentNode.removeChild(b)})}return this}}),f.buildFragment=function(a,b,d){var e,g,h,i,j=a[0];b&&b[0]&&(i=b[0].ownerDocument||b[0]),i.createDocumentFragment||(i=c),a.length===1&&typeof j=="string"&&j.length<512&&i===c&&j.charAt(0)==="<"&&!bb.test(j)&&(f.support.checkClone||!bd.test(j))&&(f.support.html5Clone||!bc.test(j))&&(g=!0,h=f.fragments[j],h&&h!==1&&(e=h)),e||(e=i.createDocumentFragment(),f.clean(a,i,e,d)),g&&(f.fragments[j]=h?e:1);return{fragment:e,cacheable:g}},f.fragments={},f.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){f.fn[a]=function(c){var d=[],e=f(c),g=this.length===1&&this[0].parentNode;if(g&&g.nodeType===11&&g.childNodes.length===1&&e.length===1){e[b](this[0]);return this}for(var h=0,i=e.length;h<i;h++){var j=(h>0?this.clone(!0):this).get();f(e[h])[b](j),d=d.concat(j)}return this.pushStack(d,a,e.selector)}}),f.extend({clone:function(a,b,c){var d,e,g,h=f.support.html5Clone||f.isXMLDoc(a)||!bc.test("<"+a.nodeName+">")?a.cloneNode(!0):bo(a);if((!f.support.noCloneEvent||!f.support.noCloneChecked)&&(a.nodeType===1||a.nodeType===11)&&!f.isXMLDoc(a)){bk(a,h),d=bl(a),e=bl(h);for(g=0;d[g];++g)e[g]&&bk(d[g],e[g])}if(b){bj(a,h);if(c){d=bl(a),e=bl(h);for(g=0;d[g];++g)bj(d[g],e[g])}}d=e=null;return h},clean:function(a,b,d,e){var g,h,i,j=[];b=b||c,typeof b.createElement=="undefined"&&(b=b.ownerDocument||b[0]&&b[0].ownerDocument||c);for(var k=0,l;(l=a[k])!=null;k++){typeof l=="number"&&(l+="");if(!l)continue;if(typeof l=="string")if(!_.test(l))l=b.createTextNode(l);else{l=l.replace(Y,"<$1></$2>");var m=(Z.exec(l)||["",""])[1].toLowerCase(),n=bg[m]||bg._default,o=n[0],p=b.createElement("div"),q=bh.childNodes,r;b===c?bh.appendChild(p):U(b).appendChild(p),p.innerHTML=n[1]+l+n[2];while(o--)p=p.lastChild;if(!f.support.tbody){var s=$.test(l),t=m==="table"&&!s?p.firstChild&&p.firstChild.childNodes:n[1]==="<table>"&&!s?p.childNodes:[];for(i=t.length-1;i>=0;--i)f.nodeName(t[i],"tbody")&&!t[i].childNodes.length&&t[i].parentNode.removeChild(t[i])}!f.support.leadingWhitespace&&X.test(l)&&p.insertBefore(b.createTextNode(X.exec(l)[0]),p.firstChild),l=p.childNodes,p&&(p.parentNode.removeChild(p),q.length>0&&(r=q[q.length-1],r&&r.parentNode&&r.parentNode.removeChild(r)))}var u;if(!f.support.appendChecked)if(l[0]&&typeof (u=l.length)=="number")for(i=0;i<u;i++)bn(l[i]);else bn(l);l.nodeType?j.push(l):j=f.merge(j,l)}if(d){g=function(a){return!a.type||be.test(a.type)};for(k=0;j[k];k++){h=j[k];if(e&&f.nodeName(h,"script")&&(!h.type||be.test(h.type)))e.push(h.parentNode?h.parentNode.removeChild(h):h);else{if(h.nodeType===1){var v=f.grep(h.getElementsByTagName("script"),g);j.splice.apply(j,[k+1,0].concat(v))}d.appendChild(h)}}}return j},cleanData:function(a){var b,c,d=f.cache,e=f.event.special,g=f.support.deleteExpando;for(var h=0,i;(i=a[h])!=null;h++){if(i.nodeName&&f.noData[i.nodeName.toLowerCase()])continue;c=i[f.expando];if(c){b=d[c];if(b&&b.events){for(var j in b.events)e[j]?f.event.remove(i,j):f.removeEvent(i,j,b.handle);b.handle&&(b.handle.elem=null)}g?delete i[f.expando]:i.removeAttribute&&i.removeAttribute(f.expando),delete d[c]}}}});var bp=/alpha\([^)]*\)/i,bq=/opacity=([^)]*)/,br=/([A-Z]|^ms)/g,bs=/^[\-+]?(?:\d*\.)?\d+$/i,bt=/^-?(?:\d*\.)?\d+(?!px)[^\d\s]+$/i,bu=/^([\-+])=([\-+.\de]+)/,bv=/^margin/,bw={position:"absolute",visibility:"hidden",display:"block"},bx=["Top","Right","Bottom","Left"],by,bz,bA;f.fn.css=function(a,c){return f.access(this,function(a,c,d){return d!==b?f.style(a,c,d):f.css(a,c)},a,c,arguments.length>1)},f.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=by(a,"opacity");return c===""?"1":c}return a.style.opacity}}},cssNumber:{fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":f.support.cssFloat?"cssFloat":"styleFloat"},style:function(a,c,d,e){if(!!a&&a.nodeType!==3&&a.nodeType!==8&&!!a.style){var g,h,i=f.camelCase(c),j=a.style,k=f.cssHooks[i];c=f.cssProps[i]||i;if(d===b){if(k&&"get"in k&&(g=k.get(a,!1,e))!==b)return g;return j[c]}h=typeof d,h==="string"&&(g=bu.exec(d))&&(d=+(g[1]+1)*+g[2]+parseFloat(f.css(a,c)),h="number");if(d==null||h==="number"&&isNaN(d))return;h==="number"&&!f.cssNumber[i]&&(d+="px");if(!k||!("set"in k)||(d=k.set(a,d))!==b)try{j[c]=d}catch(l){}}},css:function(a,c,d){var e,g;c=f.camelCase(c),g=f.cssHooks[c],c=f.cssProps[c]||c,c==="cssFloat"&&(c="float");if(g&&"get"in g&&(e=g.get(a,!0,d))!==b)return e;if(by)return by(a,c)},swap:function(a,b,c){var d={},e,f;for(f in b)d[f]=a.style[f],a.style[f]=b[f];e=c.call(a);for(f in b)a.style[f]=d[f];return e}}),f.curCSS=f.css,c.defaultView&&c.defaultView.getComputedStyle&&(bz=function(a,b){var c,d,e,g,h=a.style;b=b.replace(br,"-$1").toLowerCase(),(d=a.ownerDocument.defaultView)&&(e=d.getComputedStyle(a,null))&&(c=e.getPropertyValue(b),c===""&&!f.contains(a.ownerDocument.documentElement,a)&&(c=f.style(a,b))),!f.support.pixelMargin&&e&&bv.test(b)&&bt.test(c)&&(g=h.width,h.width=c,c=e.width,h.width=g);return c}),c.documentElement.currentStyle&&(bA=function(a,b){var c,d,e,f=a.currentStyle&&a.currentStyle[b],g=a.style;f==null&&g&&(e=g[b])&&(f=e),bt.test(f)&&(c=g.left,d=a.runtimeStyle&&a.runtimeStyle.left,d&&(a.runtimeStyle.left=a.currentStyle.left),g.left=b==="fontSize"?"1em":f,f=g.pixelLeft+"px",g.left=c,d&&(a.runtimeStyle.left=d));return f===""?"auto":f}),by=bz||bA,f.each(["height","width"],function(a,b){f.cssHooks[b]={get:function(a,c,d){if(c)return a.offsetWidth!==0?bB(a,b,d):f.swap(a,bw,function(){return bB(a,b,d)})},set:function(a,b){return bs.test(b)?b+"px":b}}}),f.support.opacity||(f.cssHooks.opacity={get:function(a,b){return bq.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?parseFloat(RegExp.$1)/100+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=f.isNumeric(b)?"alpha(opacity="+b*100+")":"",g=d&&d.filter||c.filter||"";c.zoom=1;if(b>=1&&f.trim(g.replace(bp,""))===""){c.removeAttribute("filter");if(d&&!d.filter)return}c.filter=bp.test(g)?g.replace(bp,e):g+" "+e}}),f(function(){f.support.reliableMarginRight||(f.cssHooks.marginRight={get:function(a,b){return f.swap(a,{display:"inline-block"},function(){return b?by(a,"margin-right"):a.style.marginRight})}})}),f.expr&&f.expr.filters&&(f.expr.filters.hidden=function(a){var b=a.offsetWidth,c=a.offsetHeight;return b===0&&c===0||!f.support.reliableHiddenOffsets&&(a.style&&a.style.display||f.css(a,"display"))==="none"},f.expr.filters.visible=function(a){return!f.expr.filters.hidden(a)}),f.each({margin:"",padding:"",border:"Width"},function(a,b){f.cssHooks[a+b]={expand:function(c){var d,e=typeof c=="string"?c.split(" "):[c],f={};for(d=0;d<4;d++)f[a+bx[d]+b]=e[d]||e[d-2]||e[0];return f}}});var bC=/%20/g,bD=/\[\]$/,bE=/\r?\n/g,bF=/#.*$/,bG=/^(.*?):[ \t]*([^\r\n]*)\r?$/mg,bH=/^(?:color|date|datetime|datetime-local|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,bI=/^(?:about|app|app\-storage|.+\-extension|file|res|widget):$/,bJ=/^(?:GET|HEAD)$/,bK=/^\/\//,bL=/\?/,bM=/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi,bN=/^(?:select|textarea)/i,bO=/\s+/,bP=/([?&])_=[^&]*/,bQ=/^([\w\+\.\-]+:)(?:\/\/([^\/?#:]*)(?::(\d+))?)?/,bR=f.fn.load,bS={},bT={},bU,bV,bW=["*/"]+["*"];try{bU=e.href}catch(bX){bU=c.createElement("a"),bU.href="",bU=bU.href}bV=bQ.exec(bU.toLowerCase())||[],f.fn.extend({load:function(a,c,d){if(typeof a!="string"&&bR)return bR.apply(this,arguments);if(!this.length)return this;var e=a.indexOf(" ");if(e>=0){var g=a.slice(e,a.length);a=a.slice(0,e)}var h="GET";c&&(f.isFunction(c)?(d=c,c=b):typeof c=="object"&&(c=f.param(c,f.ajaxSettings.traditional),h="POST"));var i=this;f.ajax({url:a,type:h,dataType:"html",data:c,complete:function(a,b,c){c=a.responseText,a.isResolved()&&(a.done(function(a){c=a}),i.html(g?f("<div>").append(c.replace(bM,"")).find(g):c)),d&&i.each(d,[c,b,a])}});return this},serialize:function(){return f.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?f.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||bN.test(this.nodeName)||bH.test(this.type))}).map(function(a,b){var c=f(this).val();return c==null?null:f.isArray(c)?f.map(c,function(a,c){return{name:b.name,value:a.replace(bE,"\r\n")}}):{name:b.name,value:c.replace(bE,"\r\n")}}).get()}}),f.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){f.fn[b]=function(a){return this.on(b,a)}}),f.each(["get","post"],function(a,c){f[c]=function(a,d,e,g){f.isFunction(d)&&(g=g||e,e=d,d=b);return f.ajax({type:c,url:a,data:d,success:e,dataType:g})}}),f.extend({getScript:function(a,c){return f.get(a,b,c,"script")},getJSON:function(a,b,c){return f.get(a,b,c,"json")},ajaxSetup:function(a,b){b?b$(a,f.ajaxSettings):(b=a,a=f.ajaxSettings),b$(a,b);return a},ajaxSettings:{url:bU,isLocal:bI.test(bV[1]),global:!0,type:"GET",contentType:"application/x-www-form-urlencoded; charset=UTF-8",processData:!0,async:!0,accepts:{xml:"application/xml, text/xml",html:"text/html",text:"text/plain",json:"application/json, text/javascript","*":bW},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText"},converters:{"* text":a.String,"text html":!0,"text json":f.parseJSON,"text xml":f.parseXML},flatOptions:{context:!0,url:!0}},ajaxPrefilter:bY(bS),ajaxTransport:bY(bT),ajax:function(a,c){function w(a,c,l,m){if(s!==2){s=2,q&&clearTimeout(q),p=b,n=m||"",v.readyState=a>0?4:0;var o,r,u,w=c,x=l?ca(d,v,l):b,y,z;if(a>=200&&a<300||a===304){if(d.ifModified){if(y=v.getResponseHeader("Last-Modified"))f.lastModified[k]=y;if(z=v.getResponseHeader("Etag"))f.etag[k]=z}if(a===304)w="notmodified",o=!0;else try{r=cb(d,x),w="success",o=!0}catch(A){w="parsererror",u=A}}else{u=w;if(!w||a)w="error",a<0&&(a=0)}v.status=a,v.statusText=""+(c||w),o?h.resolveWith(e,[r,w,v]):h.rejectWith(e,[v,w,u]),v.statusCode(j),j=b,t&&g.trigger("ajax"+(o?"Success":"Error"),[v,d,o?r:u]),i.fireWith(e,[v,w]),t&&(g.trigger("ajaxComplete",[v,d]),--f.active||f.event.trigger("ajaxStop"))}}typeof a=="object"&&(c=a,a=b),c=c||{};var d=f.ajaxSetup({},c),e=d.context||d,g=e!==d&&(e.nodeType||e instanceof f)?f(e):f.event,h=f.Deferred(),i=f.Callbacks("once memory"),j=d.statusCode||{},k,l={},m={},n,o,p,q,r,s=0,t,u,v={readyState:0,setRequestHeader:function(a,b){if(!s){var c=a.toLowerCase();a=m[c]=m[c]||a,l[a]=b}return this},getAllResponseHeaders:function(){return s===2?n:null},getResponseHeader:function(a){var c;if(s===2){if(!o){o={};while(c=bG.exec(n))o[c[1].toLowerCase()]=c[2]}c=o[a.toLowerCase()]}return c===b?null:c},overrideMimeType:function(a){s||(d.mimeType=a);return this},abort:function(a){a=a||"abort",p&&p.abort(a),w(0,a);return this}};h.promise(v),v.success=v.done,v.error=v.fail,v.complete=i.add,v.statusCode=function(a){if(a){var b;if(s<2)for(b in a)j[b]=[j[b],a[b]];else b=a[v.status],v.then(b,b)}return this},d.url=((a||d.url)+"").replace(bF,"").replace(bK,bV[1]+"//"),d.dataTypes=f.trim(d.dataType||"*").toLowerCase().split(bO),d.crossDomain==null&&(r=bQ.exec(d.url.toLowerCase()),d.crossDomain=!(!r||r[1]==bV[1]&&r[2]==bV[2]&&(r[3]||(r[1]==="http:"?80:443))==(bV[3]||(bV[1]==="http:"?80:443)))),d.data&&d.processData&&typeof d.data!="string"&&(d.data=f.param(d.data,d.traditional)),bZ(bS,d,c,v);if(s===2)return!1;t=d.global,d.type=d.type.toUpperCase(),d.hasContent=!bJ.test(d.type),t&&f.active++===0&&f.event.trigger("ajaxStart");if(!d.hasContent){d.data&&(d.url+=(bL.test(d.url)?"&":"?")+d.data,delete d.data),k=d.url;if(d.cache===!1){var x=f.now(),y=d.url.replace(bP,"$1_="+x);d.url=y+(y===d.url?(bL.test(d.url)?"&":"?")+"_="+x:"")}}(d.data&&d.hasContent&&d.contentType!==!1||c.contentType)&&v.setRequestHeader("Content-Type",d.contentType),d.ifModified&&(k=k||d.url,f.lastModified[k]&&v.setRequestHeader("If-Modified-Since",f.lastModified[k]),f.etag[k]&&v.setRequestHeader("If-None-Match",f.etag[k])),v.setRequestHeader("Accept",d.dataTypes[0]&&d.accepts[d.dataTypes[0]]?d.accepts[d.dataTypes[0]]+(d.dataTypes[0]!=="*"?", "+bW+"; q=0.01":""):d.accepts["*"]);for(u in d.headers)v.setRequestHeader(u,d.headers[u]);if(d.beforeSend&&(d.beforeSend.call(e,v,d)===!1||s===2)){v.abort();return!1}for(u in{success:1,error:1,complete:1})v[u](d[u]);p=bZ(bT,d,c,v);if(!p)w(-1,"No Transport");else{v.readyState=1,t&&g.trigger("ajaxSend",[v,d]),d.async&&d.timeout>0&&(q=setTimeout(function(){v.abort("timeout")},d.timeout));try{s=1,p.send(l,w)}catch(z){if(s<2)w(-1,z);else throw z}}return v},param:function(a,c){var d=[],e=function(a,b){b=f.isFunction(b)?b():b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};c===b&&(c=f.ajaxSettings.traditional);if(f.isArray(a)||a.jquery&&!f.isPlainObject(a))f.each(a,function(){e(this.name,this.value)});else for(var g in a)b_(g,a[g],c,e);return d.join("&").replace(bC,"+")}}),f.extend({active:0,lastModified:{},etag:{}});var cc=f.now(),cd=/(\=)\?(&|$)|\?\?/i;f.ajaxSetup({jsonp:"callback",jsonpCallback:function(){return f.expando+"_"+cc++}}),f.ajaxPrefilter("json jsonp",function(b,c,d){var e=typeof b.data=="string"&&/^application\/x\-www\-form\-urlencoded/.test(b.contentType);if(b.dataTypes[0]==="jsonp"||b.jsonp!==!1&&(cd.test(b.url)||e&&cd.test(b.data))){var g,h=b.jsonpCallback=f.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,i=a[h],j=b.url,k=b.data,l="$1"+h+"$2";b.jsonp!==!1&&(j=j.replace(cd,l),b.url===j&&(e&&(k=k.replace(cd,l)),b.data===k&&(j+=(/\?/.test(j)?"&":"?")+b.jsonp+"="+h))),b.url=j,b.data=k,a[h]=function(a){g=[a]},d.always(function(){a[h]=i,g&&f.isFunction(i)&&a[h](g[0])}),b.converters["script json"]=function(){g||f.error(h+" was not called");return g[0]},b.dataTypes[0]="json";return"script"}}),f.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/javascript|ecmascript/},converters:{"text script":function(a){f.globalEval(a);return a}}}),f.ajaxPrefilter("script",function(a){a.cache===b&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),f.ajaxTransport("script",function(a){if(a.crossDomain){var d,e=c.head||c.getElementsByTagName("head")[0]||c.documentElement;return{send:function(f,g){d=c.createElement("script"),d.async="async",a.scriptCharset&&(d.charset=a.scriptCharset),d.src=a.url,d.onload=d.onreadystatechange=function(a,c){if(c||!d.readyState||/loaded|complete/.test(d.readyState))d.onload=d.onreadystatechange=null,e&&d.parentNode&&e.removeChild(d),d=b,c||g(200,"success")},e.insertBefore(d,e.firstChild)},abort:function(){d&&d.onload(0,1)}}}});var ce=a.ActiveXObject?function(){for(var a in cg)cg[a](0,1)}:!1,cf=0,cg;f.ajaxSettings.xhr=a.ActiveXObject?function(){return!this.isLocal&&ch()||ci()}:ch,function(a){f.extend(f.support,{ajax:!!a,cors:!!a&&"withCredentials"in a})}(f.ajaxSettings.xhr()),f.support.ajax&&f.ajaxTransport(function(c){if(!c.crossDomain||f.support.cors){var d;return{send:function(e,g){var h=c.xhr(),i,j;c.username?h.open(c.type,c.url,c.async,c.username,c.password):h.open(c.type,c.url,c.async);if(c.xhrFields)for(j in c.xhrFields)h[j]=c.xhrFields[j];c.mimeType&&h.overrideMimeType&&h.overrideMimeType(c.mimeType),!c.crossDomain&&!e["X-Requested-With"]&&(e["X-Requested-With"]="XMLHttpRequest");try{for(j in e)h.setRequestHeader(j,e[j])}catch(k){}h.send(c.hasContent&&c.data||null),d=function(a,e){var j,k,l,m,n;try{if(d&&(e||h.readyState===4)){d=b,i&&(h.onreadystatechange=f.noop,ce&&delete cg[i]);if(e)h.readyState!==4&&h.abort();else{j=h.status,l=h.getAllResponseHeaders(),m={},n=h.responseXML,n&&n.documentElement&&(m.xml=n);try{m.text=h.responseText}catch(a){}try{k=h.statusText}catch(o){k=""}!j&&c.isLocal&&!c.crossDomain?j=m.text?200:404:j===1223&&(j=204)}}}catch(p){e||g(-1,p)}m&&g(j,k,m,l)},!c.async||h.readyState===4?d():(i=++cf,ce&&(cg||(cg={},f(a).unload(ce)),cg[i]=d),h.onreadystatechange=d)},abort:function(){d&&d(0,1)}}}});var cj={},ck,cl,cm=/^(?:toggle|show|hide)$/,cn=/^([+\-]=)?([\d+.\-]+)([a-z%]*)$/i,co,cp=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]],cq;f.fn.extend({show:function(a,b,c){var d,e;if(a||a===0)return this.animate(ct("show",3),a,b,c);for(var g=0,h=this.length;g<h;g++)d=this[g],d.style&&(e=d.style.display,!f._data(d,"olddisplay")&&e==="none"&&(e=d.style.display=""),(e===""&&f.css(d,"display")==="none"||!f.contains(d.ownerDocument.documentElement,d))&&f._data(d,"olddisplay",cu(d.nodeName)));for(g=0;g<h;g++){d=this[g];if(d.style){e=d.style.display;if(e===""||e==="none")d.style.display=f._data(d,"olddisplay")||""}}return this},hide:function(a,b,c){if(a||a===0)return this.animate(ct("hide",3),a,b,c);var d,e,g=0,h=this.length;for(;g<h;g++)d=this[g],d.style&&(e=f.css(d,"display"),e!=="none"&&!f._data(d,"olddisplay")&&f._data(d,"olddisplay",e));for(g=0;g<h;g++)this[g].style&&(this[g].style.display="none");return this},_toggle:f.fn.toggle,toggle:function(a,b,c){var d=typeof a=="boolean";f.isFunction(a)&&f.isFunction(b)?this._toggle.apply(this,arguments):a==null||d?this.each(function(){var b=d?a:f(this).is(":hidden");f(this)[b?"show":"hide"]()}):this.animate(ct("toggle",3),a,b,c);return this},fadeTo:function(a,b,c,d){return this.filter(":hidden").css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){function g(){e.queue===!1&&f._mark(this);var b=f.extend({},e),c=this.nodeType===1,d=c&&f(this).is(":hidden"),g,h,i,j,k,l,m,n,o,p,q;b.animatedProperties={};for(i in a){g=f.camelCase(i),i!==g&&(a[g]=a[i],delete a[i]);if((k=f.cssHooks[g])&&"expand"in k){l=k.expand(a[g]),delete a[g];for(i in l)i in a||(a[i]=l[i])}}for(g in a){h=a[g],f.isArray(h)?(b.animatedProperties[g]=h[1],h=a[g]=h[0]):b.animatedProperties[g]=b.specialEasing&&b.specialEasing[g]||b.easing||"swing";if(h==="hide"&&d||h==="show"&&!d)return b.complete.call(this);c&&(g==="height"||g==="width")&&(b.overflow=[this.style.overflow,this.style.overflowX,this.style.overflowY],f.css(this,"display")==="inline"&&f.css(this,"float")==="none"&&(!f.support.inlineBlockNeedsLayout||cu(this.nodeName)==="inline"?this.style.display="inline-block":this.style.zoom=1))}b.overflow!=null&&(this.style.overflow="hidden");for(i in a)j=new f.fx(this,b,i),h=a[i],cm.test(h)?(q=f._data(this,"toggle"+i)||(h==="toggle"?d?"show":"hide":0),q?(f._data(this,"toggle"+i,q==="show"?"hide":"show"),j[q]()):j[h]()):(m=cn.exec(h),n=j.cur(),m?(o=parseFloat(m[2]),p=m[3]||(f.cssNumber[i]?"":"px"),p!=="px"&&(f.style(this,i,(o||1)+p),n=(o||1)/j.cur()*n,f.style(this,i,n+p)),m[1]&&(o=(m[1]==="-="?-1:1)*o+n),j.custom(n,o,p)):j.custom(n,h,""));return!0}var e=f.speed(b,c,d);if(f.isEmptyObject(a))return this.each(e.complete,[!1]);a=f.extend({},a);return e.queue===!1?this.each(g):this.queue(e.queue,g)},stop:function(a,c,d){typeof a!="string"&&(d=c,c=a,a=b),c&&a!==!1&&this.queue(a||"fx",[]);return this.each(function(){function h(a,b,c){var e=b[c];f.removeData(a,c,!0),e.stop(d)}var b,c=!1,e=f.timers,g=f._data(this);d||f._unmark(!0,this);if(a==null)for(b in g)g[b]&&g[b].stop&&b.indexOf(".run")===b.length-4&&h(this,g,b);else g[b=a+".run"]&&g[b].stop&&h(this,g,b);for(b=e.length;b--;)e[b].elem===this&&(a==null||e[b].queue===a)&&(d?e[b](!0):e[b].saveState(),c=!0,e.splice(b,1));(!d||!c)&&f.dequeue(this,a)})}}),f.each({slideDown:ct("show",1),slideUp:ct("hide",1),slideToggle:ct("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){f.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),f.extend({speed:function(a,b,c){var d=a&&typeof a=="object"?f.extend({},a):{complete:c||!c&&b||f.isFunction(a)&&a,duration:a,easing:c&&b||b&&!f.isFunction(b)&&b};d.duration=f.fx.off?0:typeof d.duration=="number"?d.duration:d.duration in f.fx.speeds?f.fx.speeds[d.duration]:f.fx.speeds._default;if(d.queue==null||d.queue===!0)d.queue="fx";d.old=d.complete,d.complete=function(a){f.isFunction(d.old)&&d.old.call(this),d.queue?f.dequeue(this,d.queue):a!==!1&&f._unmark(this)};return d},easing:{linear:function(a){return a},swing:function(a){return-Math.cos(a*Math.PI)/2+.5}},timers:[],fx:function(a,b,c){this.options=b,this.elem=a,this.prop=c,b.orig=b.orig||{}}}),f.fx.prototype={update:function(){this.options.step&&this.options.step.call(this.elem,this.now,this),(f.fx.step[this.prop]||f.fx.step._default)(this)},cur:function(){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null))return this.elem[this.prop];var a,b=f.css(this.elem,this.prop);return isNaN(a=parseFloat(b))?!b||b==="auto"?0:b:a},custom:function(a,c,d){function h(a){return e.step(a)}var e=this,g=f.fx;this.startTime=cq||cr(),this.end=c,this.now=this.start=a,this.pos=this.state=0,this.unit=d||this.unit||(f.cssNumber[this.prop]?"":"px"),h.queue=this.options.queue,h.elem=this.elem,h.saveState=function(){f._data(e.elem,"fxshow"+e.prop)===b&&(e.options.hide?f._data(e.elem,"fxshow"+e.prop,e.start):e.options.show&&f._data(e.elem,"fxshow"+e.prop,e.end))},h()&&f.timers.push(h)&&!co&&(co=setInterval(g.tick,g.interval))},show:function(){var a=f._data(this.elem,"fxshow"+this.prop);this.options.orig[this.prop]=a||f.style(this.elem,this.prop),this.options.show=!0,a!==b?this.custom(this.cur(),a):this.custom(this.prop==="width"||this.prop==="height"?1:0,this.cur()),f(this.elem).show()},hide:function(){this.options.orig[this.prop]=f._data(this.elem,"fxshow"+this.prop)||f.style(this.elem,this.prop),this.options.hide=!0,this.custom(this.cur(),0)},step:function(a){var b,c,d,e=cq||cr(),g=!0,h=this.elem,i=this.options;if(a||e>=i.duration+this.startTime){this.now=this.end,this.pos=this.state=1,this.update(),i.animatedProperties[this.prop]=!0;for(b in i.animatedProperties)i.animatedProperties[b]!==!0&&(g=!1);if(g){i.overflow!=null&&!f.support.shrinkWrapBlocks&&f.each(["","X","Y"],function(a,b){h.style["overflow"+b]=i.overflow[a]}),i.hide&&f(h).hide();if(i.hide||i.show)for(b in i.animatedProperties)f.style(h,b,i.orig[b]),f.removeData(h,"fxshow"+b,!0),f.removeData(h,"toggle"+b,!0);d=i.complete,d&&(i.complete=!1,d.call(h))}return!1}i.duration==Infinity?this.now=e:(c=e-this.startTime,this.state=c/i.duration,this.pos=f.easing[i.animatedProperties[this.prop]](this.state,c,0,1,i.duration),this.now=this.start+(this.end-this.start)*this.pos),this.update();return!0}},f.extend(f.fx,{tick:function(){var a,b=f.timers,c=0;for(;c<b.length;c++)a=b[c],!a()&&b[c]===a&&b.splice(c--,1);b.length||f.fx.stop()},interval:13,stop:function(){clearInterval(co),co=null},speeds:{slow:600,fast:200,_default:400},step:{opacity:function(a){f.style(a.elem,"opacity",a.now)},_default:function(a){a.elem.style&&a.elem.style[a.prop]!=null?a.elem.style[a.prop]=a.now+a.unit:a.elem[a.prop]=a.now}}}),f.each(cp.concat.apply([],cp),function(a,b){b.indexOf("margin")&&(f.fx.step[b]=function(a){f.style(a.elem,b,Math.max(0,a.now)+a.unit)})}),f.expr&&f.expr.filters&&(f.expr.filters.animated=function(a){return f.grep(f.timers,function(b){return a===b.elem}).length});var cv,cw=/^t(?:able|d|h)$/i,cx=/^(?:body|html)$/i;"getBoundingClientRect"in c.documentElement?cv=function(a,b,c,d){try{d=a.getBoundingClientRect()}catch(e){}if(!d||!f.contains(c,a))return d?{top:d.top,left:d.left}:{top:0,left:0};var g=b.body,h=cy(b),i=c.clientTop||g.clientTop||0,j=c.clientLeft||g.clientLeft||0,k=h.pageYOffset||f.support.boxModel&&c.scrollTop||g.scrollTop,l=h.pageXOffset||f.support.boxModel&&c.scrollLeft||g.scrollLeft,m=d.top+k-i,n=d.left+l-j;return{top:m,left:n}}:cv=function(a,b,c){var d,e=a.offsetParent,g=a,h=b.body,i=b.defaultView,j=i?i.getComputedStyle(a,null):a.currentStyle,k=a.offsetTop,l=a.offsetLeft;while((a=a.parentNode)&&a!==h&&a!==c){if(f.support.fixedPosition&&j.position==="fixed")break;d=i?i.getComputedStyle(a,null):a.currentStyle,k-=a.scrollTop,l-=a.scrollLeft,a===e&&(k+=a.offsetTop,l+=a.offsetLeft,f.support.doesNotAddBorder&&(!f.support.doesAddBorderForTableAndCells||!cw.test(a.nodeName))&&(k+=parseFloat(d.borderTopWidth)||0,l+=parseFloat(d.borderLeftWidth)||0),g=e,e=a.offsetParent),f.support.subtractsBorderForOverflowNotVisible&&d.overflow!=="visible"&&(k+=parseFloat(d.borderTopWidth)||0,l+=parseFloat(d.borderLeftWidth)||0),j=d}if(j.position==="relative"||j.position==="static")k+=h.offsetTop,l+=h.offsetLeft;f.support.fixedPosition&&j.position==="fixed"&&(k+=Math.max(c.scrollTop,h.scrollTop),l+=Math.max(c.scrollLeft,h.scrollLeft));return{top:k,left:l}},f.fn.offset=function(a){if(arguments.length)return a===b?this:this.each(function(b){f.offset.setOffset(this,a,b)});var c=this[0],d=c&&c.ownerDocument;if(!d)return null;if(c===d.body)return f.offset.bodyOffset(c);return cv(c,d,d.documentElement)},f.offset={bodyOffset:function(a){var b=a.offsetTop,c=a.offsetLeft;f.support.doesNotIncludeMarginInBodyOffset&&(b+=parseFloat(f.css(a,"marginTop"))||0,c+=parseFloat(f.css(a,"marginLeft"))||0);return{top:b,left:c}},setOffset:function(a,b,c){var d=f.css(a,"position");d==="static"&&(a.style.position="relative");var e=f(a),g=e.offset(),h=f.css(a,"top"),i=f.css(a,"left"),j=(d==="absolute"||d==="fixed")&&f.inArray("auto",[h,i])>-1,k={},l={},m,n;j?(l=e.position(),m=l.top,n=l.left):(m=parseFloat(h)||0,n=parseFloat(i)||0),f.isFunction(b)&&(b=b.call(a,c,g)),b.top!=null&&(k.top=b.top-g.top+m),b.left!=null&&(k.left=b.left-g.left+n),"using"in b?b.using.call(a,k):e.css(k)}},f.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),c=this.offset(),d=cx.test(b[0].nodeName)?{top:0,left:0}:b.offset();c.top-=parseFloat(f.css(a,"marginTop"))||0,c.left-=parseFloat(f.css(a,"marginLeft"))||0,d.top+=parseFloat(f.css(b[0],"borderTopWidth"))||0,d.left+=parseFloat(f.css(b[0],"borderLeftWidth"))||0;return{top:c.top-d.top,left:c.left-d.left}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||c.body;while(a&&!cx.test(a.nodeName)&&f.css(a,"position")==="static")a=a.offsetParent;return a})}}),f.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(a,c){var d=/Y/.test(c);f.fn[a]=function(e){return f.access(this,function(a,e,g){var h=cy(a);if(g===b)return h?c in h?h[c]:f.support.boxModel&&h.document.documentElement[e]||h.document.body[e]:a[e];h?h.scrollTo(d?f(h).scrollLeft():g,d?g:f(h).scrollTop()):a[e]=g},a,e,arguments.length,null)}}),f.each({Height:"height",Width:"width"},function(a,c){var d="client"+a,e="scroll"+a,g="offset"+a;f.fn["inner"+a]=function(){var a=this[0];return a?a.style?parseFloat(f.css(a,c,"padding")):this[c]():null},f.fn["outer"+a]=function(a){var b=this[0];return b?b.style?parseFloat(f.css(b,c,a?"margin":"border")):this[c]():null},f.fn[c]=function(a){return f.access(this,function(a,c,h){var i,j,k,l;if(f.isWindow(a)){i=a.document,j=i.documentElement[d];return f.support.boxModel&&j||i.body&&i.body[d]||j}if(a.nodeType===9){i=a.documentElement;if(i[d]>=i[e])return i[d];return Math.max(a.body[e],i[e],a.body[g],i[g])}if(h===b){k=f.css(a,c),l=parseFloat(k);return f.isNumeric(l)?l:k}f(a).css(c,h)},c,a,arguments.length,null)}}),a.jQuery=a.$=f,typeof define=="function"&&define.amd&&define.amd.jQuery&&define("jquery",[],function(){return f})})(window);
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/vendor/scripts/jquery-ui-timepicker-addon.js b/branch-1.2/ambari-web/vendor/scripts/jquery-ui-timepicker-addon.js
deleted file mode 100644
index df949a9..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/jquery-ui-timepicker-addon.js
+++ /dev/null
@@ -1,1831 +0,0 @@
-/*
- * jQuery timepicker addon
- * By: Trent Richardson [http://trentrichardson.com]
- * Version 1.0.5
- * Last Modified: 10/06/2012
- *
- * Copyright 2012 Trent Richardson
- * You may use this project under MIT or GPL licenses.
- * http://trentrichardson.com/Impromptu/GPL-LICENSE.txt
- * http://trentrichardson.com/Impromptu/MIT-LICENSE.txt
- */
-
-/*jslint evil: true, white: false, undef: false, nomen: false */
-
-(function($) {
-
-	/*
-	* Lets not redefine timepicker, Prevent "Uncaught RangeError: Maximum call stack size exceeded"
-	*/
-	$.ui.timepicker = $.ui.timepicker || {};
-	if ($.ui.timepicker.version) {
-		return;
-	}
-
-	/*
-	* Extend jQueryUI, get it started with our version number
-	*/
-	$.extend($.ui, {
-		timepicker: {
-			version: "1.0.5"
-		}
-	});
-
-	/* 
-	* Timepicker manager.
-	* Use the singleton instance of this class, $.timepicker, to interact with the time picker.
-	* Settings for (groups of) time pickers are maintained in an instance object,
-	* allowing multiple different settings on the same page.
-	*/
-	function Timepicker() {
-		this.regional = []; // Available regional settings, indexed by language code
-		this.regional[''] = { // Default regional settings
-			currentText: 'Now',
-			closeText: 'Done',
-			ampm: false,
-			amNames: ['AM', 'A'],
-			pmNames: ['PM', 'P'],
-			timeFormat: 'hh:mm tt',
-			timeSuffix: '',
-			timeOnlyTitle: 'Choose Time',
-			timeText: 'Time',
-			hourText: 'Hour',
-			minuteText: 'Minute',
-			secondText: 'Second',
-			millisecText: 'Millisecond',
-			timezoneText: 'Time Zone',
-			isRTL: false
-		};
-		this._defaults = { // Global defaults for all the datetime picker instances
-			showButtonPanel: true,
-			timeOnly: false,
-			showHour: true,
-			showMinute: true,
-			showSecond: false,
-			showMillisec: false,
-			showTimezone: false,
-			showTime: true,
-			stepHour: 1,
-			stepMinute: 1,
-			stepSecond: 1,
-			stepMillisec: 1,
-			hour: 0,
-			minute: 0,
-			second: 0,
-			millisec: 0,
-			timezone: null,
-			useLocalTimezone: false,
-			defaultTimezone: "+0000",
-			hourMin: 0,
-			minuteMin: 0,
-			secondMin: 0,
-			millisecMin: 0,
-			hourMax: 23,
-			minuteMax: 59,
-			secondMax: 59,
-			millisecMax: 999,
-			minDateTime: null,
-			maxDateTime: null,
-			onSelect: null,
-			hourGrid: 0,
-			minuteGrid: 0,
-			secondGrid: 0,
-			millisecGrid: 0,
-			alwaysSetTime: true,
-			separator: ' ',
-			altFieldTimeOnly: true,
-			altSeparator: null,
-			altTimeSuffix: null,
-			showTimepicker: true,
-			timezoneIso8601: false,
-			timezoneList: null,
-			addSliderAccess: false,
-			sliderAccessArgs: null,
-			controlType: 'slider',
-			defaultValue: null
-		};
-		$.extend(this._defaults, this.regional['']);
-	}
-
-	$.extend(Timepicker.prototype, {
-		$input: null,
-		$altInput: null,
-		$timeObj: null,
-		inst: null,
-		hour_slider: null,
-		minute_slider: null,
-		second_slider: null,
-		millisec_slider: null,
-		timezone_select: null,
-		hour: 0,
-		minute: 0,
-		second: 0,
-		millisec: 0,
-		timezone: null,
-		defaultTimezone: "+0000",
-		hourMinOriginal: null,
-		minuteMinOriginal: null,
-		secondMinOriginal: null,
-		millisecMinOriginal: null,
-		hourMaxOriginal: null,
-		minuteMaxOriginal: null,
-		secondMaxOriginal: null,
-		millisecMaxOriginal: null,
-		ampm: '',
-		formattedDate: '',
-		formattedTime: '',
-		formattedDateTime: '',
-		timezoneList: null,
-		units: ['hour','minute','second','millisec'],
-		control: null,
-
-		/* 
-		* Override the default settings for all instances of the time picker.
-		* @param  settings  object - the new settings to use as defaults (anonymous object)
-		* @return the manager object
-		*/
-		setDefaults: function(settings) {
-			extendRemove(this._defaults, settings || {});
-			return this;
-		},
-
-		/*
-		* Create a new Timepicker instance
-		*/
-		_newInst: function($input, o) {
-			var tp_inst = new Timepicker(),
-				inlineSettings = {},
-                fns = {},
-		        overrides, i;
-
-			for (var attrName in this._defaults) {
-				if(this._defaults.hasOwnProperty(attrName)){
-					var attrValue = $input.attr('time:' + attrName);
-					if (attrValue) {
-						try {
-							inlineSettings[attrName] = eval(attrValue);
-						} catch (err) {
-							inlineSettings[attrName] = attrValue;
-						}
-					}
-				}
-			}
-		    overrides = {
-		        beforeShow: function (input, dp_inst) {
-		            if ($.isFunction(tp_inst._defaults.evnts.beforeShow)) {
-		                return tp_inst._defaults.evnts.beforeShow.call($input[0], input, dp_inst, tp_inst);
-		            }
-		        },
-		        onChangeMonthYear: function (year, month, dp_inst) {
-		            // Update the time as well : this prevents the time from disappearing from the $input field.
-		            tp_inst._updateDateTime(dp_inst);
-		            if ($.isFunction(tp_inst._defaults.evnts.onChangeMonthYear)) {
-		                tp_inst._defaults.evnts.onChangeMonthYear.call($input[0], year, month, dp_inst, tp_inst);
-		            }
-		        },
-		        onClose: function (dateText, dp_inst) {
-		            if (tp_inst.timeDefined === true && $input.val() !== '') {
-		                tp_inst._updateDateTime(dp_inst);
-		            }
-		            if ($.isFunction(tp_inst._defaults.evnts.onClose)) {
-		                tp_inst._defaults.evnts.onClose.call($input[0], dateText, dp_inst, tp_inst);
-		            }
-		        }
-		    };
-		    for (i in overrides) {
-		        if (overrides.hasOwnProperty(i)) {
-		            fns[i] = o[i] || null;
-		        }
-		    }
-		    tp_inst._defaults = $.extend({}, this._defaults, inlineSettings, o, overrides, {
-		        evnts:fns,
-		        timepicker: tp_inst // add timepicker as a property of datepicker: $.datepicker._get(dp_inst, 'timepicker');
-		    });
-			tp_inst.amNames = $.map(tp_inst._defaults.amNames, function(val) {
-				return val.toUpperCase();
-			});
-			tp_inst.pmNames = $.map(tp_inst._defaults.pmNames, function(val) {
-				return val.toUpperCase();
-			});
-
-			// controlType is string - key to our this._controls
-			if(typeof(tp_inst._defaults.controlType) === 'string'){
-				if(tp_inst._defaults.controlType == 'slider' && $.fn.slider === undefined){
-					tp_inst._defaults.controlType = 'select';
-				}
-				tp_inst.control = tp_inst._controls[tp_inst._defaults.controlType];
-			}
-			// controlType is an object and must implement create, options, value methods
-			else{ 
-				tp_inst.control = tp_inst._defaults.controlType;
-			}
-
-			if (tp_inst._defaults.timezoneList === null) {
-				var timezoneList = ['-1200', '-1100', '-1000', '-0930', '-0900', '-0800', '-0700', '-0600', '-0500', '-0430', '-0400', '-0330', '-0300', '-0200', '-0100', '+0000', 
-									'+0100', '+0200', '+0300', '+0330', '+0400', '+0430', '+0500', '+0530', '+0545', '+0600', '+0630', '+0700', '+0800', '+0845', '+0900', '+0930', 
-									'+1000', '+1030', '+1100', '+1130', '+1200', '+1245', '+1300', '+1400'];
-
-				if (tp_inst._defaults.timezoneIso8601) {
-					timezoneList = $.map(timezoneList, function(val) {
-						return val == '+0000' ? 'Z' : (val.substring(0, 3) + ':' + val.substring(3));
-					});
-				}
-				tp_inst._defaults.timezoneList = timezoneList;
-			}
-
-			tp_inst.timezone = tp_inst._defaults.timezone;
-			tp_inst.hour = tp_inst._defaults.hour;
-			tp_inst.minute = tp_inst._defaults.minute;
-			tp_inst.second = tp_inst._defaults.second;
-			tp_inst.millisec = tp_inst._defaults.millisec;
-			tp_inst.ampm = '';
-			tp_inst.$input = $input;
-
-			if (o.altField) {
-				tp_inst.$altInput = $(o.altField).css({
-					cursor: 'pointer'
-				}).focus(function() {
-					$input.trigger("focus");
-				});
-			}
-
-			if (tp_inst._defaults.minDate === 0 || tp_inst._defaults.minDateTime === 0) {
-				tp_inst._defaults.minDate = new Date();
-			}
-			if (tp_inst._defaults.maxDate === 0 || tp_inst._defaults.maxDateTime === 0) {
-				tp_inst._defaults.maxDate = new Date();
-			}
-
-			// datepicker needs minDate/maxDate, timepicker needs minDateTime/maxDateTime..
-			if (tp_inst._defaults.minDate !== undefined && tp_inst._defaults.minDate instanceof Date) {
-				tp_inst._defaults.minDateTime = new Date(tp_inst._defaults.minDate.getTime());
-			}
-			if (tp_inst._defaults.minDateTime !== undefined && tp_inst._defaults.minDateTime instanceof Date) {
-				tp_inst._defaults.minDate = new Date(tp_inst._defaults.minDateTime.getTime());
-			}
-			if (tp_inst._defaults.maxDate !== undefined && tp_inst._defaults.maxDate instanceof Date) {
-				tp_inst._defaults.maxDateTime = new Date(tp_inst._defaults.maxDate.getTime());
-			}
-			if (tp_inst._defaults.maxDateTime !== undefined && tp_inst._defaults.maxDateTime instanceof Date) {
-				tp_inst._defaults.maxDate = new Date(tp_inst._defaults.maxDateTime.getTime());
-			}
-			tp_inst.$input.bind('focus', function() {
-				tp_inst._onFocus();
-			});
-
-			return tp_inst;
-		},
-
-		/*
-		* add our sliders to the calendar
-		*/
-		_addTimePicker: function(dp_inst) {
-			var currDT = (this.$altInput && this._defaults.altFieldTimeOnly) ? this.$input.val() + ' ' + this.$altInput.val() : this.$input.val();
-
-			this.timeDefined = this._parseTime(currDT);
-			this._limitMinMaxDateTime(dp_inst, false);
-			this._injectTimePicker();
-		},
-
-		/*
-		* parse the time string from input value or _setTime
-		*/
-		_parseTime: function(timeString, withDate) {
-			if (!this.inst) {
-				this.inst = $.datepicker._getInst(this.$input[0]);
-			}
-
-			if (withDate || !this._defaults.timeOnly) {
-				var dp_dateFormat = $.datepicker._get(this.inst, 'dateFormat');
-				try {
-					var parseRes = parseDateTimeInternal(dp_dateFormat, this._defaults.timeFormat, timeString, $.datepicker._getFormatConfig(this.inst), this._defaults);
-					if (!parseRes.timeObj) {
-						return false;
-					}
-					$.extend(this, parseRes.timeObj);
-				} catch (err) {
-					return false;
-				}
-				return true;
-			} else {
-				var timeObj = $.datepicker.parseTime(this._defaults.timeFormat, timeString, this._defaults);
-				if (!timeObj) {
-					return false;
-				}
-				$.extend(this, timeObj);
-				return true;
-			}
-		},
-
-		/*
-		* generate and inject html for timepicker into ui datepicker
-		*/
-		_injectTimePicker: function() {
-			var $dp = this.inst.dpDiv,
-				o = this.inst.settings,
-				tp_inst = this,
-				litem = '',
-				uitem = '',
-				max = {},
-				gridSize = {},
-				size = null;
-
-			// Prevent displaying twice
-			if ($dp.find("div.ui-timepicker-div").length === 0 && o.showTimepicker) {
-				var noDisplay = ' style="display:none;"',
-					html = '<div class="ui-timepicker-div'+ (o.isRTL? ' ui-timepicker-rtl' : '') +'"><dl>' + '<dt class="ui_tpicker_time_label"' + ((o.showTime) ? '' : noDisplay) + '>' + o.timeText + '</dt>' + 
-								'<dd class="ui_tpicker_time"' + ((o.showTime) ? '' : noDisplay) + '></dd>';
-
-				// Create the markup
-				for(var i=0,l=this.units.length; i<l; i++){
-					litem = this.units[i];
-					uitem = litem.substr(0,1).toUpperCase() + litem.substr(1);
-					// Added by Peter Medeiros:
-					// - Figure out what the hour/minute/second max should be based on the step values.
-					// - Example: if stepMinute is 15, then minMax is 45.
-					max[litem] = parseInt((o[litem+'Max'] - ((o[litem+'Max'] - o[litem+'Min']) % o['step'+uitem])), 10);
-					gridSize[litem] = 0;
-
-					html += '<dt class="ui_tpicker_'+ litem +'_label"' + ((o['show'+uitem]) ? '' : noDisplay) + '>' + o[litem +'Text'] + '</dt>' + 
-								'<dd class="ui_tpicker_'+ litem +'"><div class="ui_tpicker_'+ litem +'_slider"' + ((o['show'+uitem]) ? '' : noDisplay) + '></div>';
-
-					if (o['show'+uitem] && o[litem+'Grid'] > 0) {
-						html += '<div style="padding-left: 1px"><table class="ui-tpicker-grid-label"><tr>';
-
-						if(litem == 'hour'){
-							for (var h = o[litem+'Min']; h <= max[litem]; h += parseInt(o[litem+'Grid'], 10)) {
-								gridSize[litem]++;
-								var tmph = (o.ampm && h > 12) ? h - 12 : h;
-								if (tmph < 10) {
-									tmph = '0' + tmph;
-								}
-								if (o.ampm) {
-									if (h === 0) {
-										tmph = 12 + 'a';
-									} else {
-										if (h < 12) {
-											tmph += 'a';
-										} else {
-											tmph += 'p';
-										}
-									}
-								}
-								html += '<td data-for="'+litem+'">' + tmph + '</td>';
-							}
-						}
-						else{
-							for (var m = o[litem+'Min']; m <= max[litem]; m += parseInt(o[litem+'Grid'], 10)) {
-								gridSize[litem]++;
-								html += '<td data-for="'+litem+'">' + ((m < 10) ? '0' : '') + m + '</td>';
-							}
-						}
-
-						html += '</tr></table></div>';
-					}
-					html += '</dd>';
-				}
-				
-				// Timezone
-				html += '<dt class="ui_tpicker_timezone_label"' + ((o.showTimezone) ? '' : noDisplay) + '>' + o.timezoneText + '</dt>';
-				html += '<dd class="ui_tpicker_timezone" ' + ((o.showTimezone) ? '' : noDisplay) + '></dd>';
-
-				// Create the elements from string
-				html += '</dl></div>';
-				var $tp = $(html);
-
-				// if we only want time picker...
-				if (o.timeOnly === true) {
-					$tp.prepend('<div class="ui-widget-header ui-helper-clearfix ui-corner-all">' + '<div class="ui-datepicker-title">' + o.timeOnlyTitle + '</div>' + '</div>');
-					$dp.find('.ui-datepicker-header, .ui-datepicker-calendar').hide();
-				}
-				
-				// add sliders, adjust grids, add events
-				for(var i=0,l=tp_inst.units.length; i<l; i++){
-					litem = tp_inst.units[i];
-					uitem = litem.substr(0,1).toUpperCase() + litem.substr(1);
-					
-					// add the slider
-					tp_inst[litem+'_slider'] = tp_inst.control.create(tp_inst, $tp.find('.ui_tpicker_'+litem+'_slider'), litem, tp_inst[litem], o[litem+'Min'], max[litem], o['step'+uitem]);
-
-					// adjust the grid and add click event
-					if (o['show'+uitem] && o[litem+'Grid'] > 0) {
-						size = 100 * gridSize[litem] * o[litem+'Grid'] / (max[litem] - o[litem+'Min']);
-						$tp.find('.ui_tpicker_'+litem+' table').css({
-							width: size + "%",
-							marginLeft: o.isRTL? '0' : ((size / (-2 * gridSize[litem])) + "%"),
-							marginRight: o.isRTL? ((size / (-2 * gridSize[litem])) + "%") : '0',
-							borderCollapse: 'collapse'
-						}).find("td").click(function(e){
-								var $t = $(this),
-									h = $t.html(),
-									f = $t.data('for'); // loses scope, so we use data-for
-
-								if (f == 'hour' && o.ampm) {
-									var ap = h.substring(2).toLowerCase(),
-										aph = parseInt(h.substring(0, 2), 10);
-									if (ap == 'a') {
-										if (aph == 12) {
-											h = 0;
-										} else {
-											h = aph;
-										}
-									} else if (aph == 12) {
-										h = 12;
-									} else {
-										h = aph + 12;
-									}
-								}
-								tp_inst.control.value(tp_inst, tp_inst[f+'_slider'], parseInt(h,10));
-
-								tp_inst._onTimeChange();
-								tp_inst._onSelectHandler();
-							})
-						.css({
-								cursor: 'pointer',
-								width: (100 / gridSize[litem]) + '%',
-								textAlign: 'center',
-								overflow: 'hidden'
-							});
-					} // end if grid > 0
-				} // end for loop
-
-				// Add timezone options
-				this.timezone_select = $tp.find('.ui_tpicker_timezone').append('<select></select>').find("select");
-				$.fn.append.apply(this.timezone_select,
-				$.map(o.timezoneList, function(val, idx) {
-					return $("<option />").val(typeof val == "object" ? val.value : val).text(typeof val == "object" ? val.label : val);
-				}));
-				if (typeof(this.timezone) != "undefined" && this.timezone !== null && this.timezone !== "") {
-					var local_date = new Date(this.inst.selectedYear, this.inst.selectedMonth, this.inst.selectedDay, 12);
-					var local_timezone = $.timepicker.timeZoneOffsetString(local_date);
-					if (local_timezone == this.timezone) {
-						selectLocalTimeZone(tp_inst);
-					} else {
-						this.timezone_select.val(this.timezone);
-					}
-				} else {
-					if (typeof(this.hour) != "undefined" && this.hour !== null && this.hour !== "") {
-						this.timezone_select.val(o.defaultTimezone);
-					} else {
-						selectLocalTimeZone(tp_inst);
-					}
-				}
-				this.timezone_select.change(function() {
-					tp_inst._defaults.useLocalTimezone = false;
-					tp_inst._onTimeChange();
-				});
-				// End timezone options
-				
-				// inject timepicker into datepicker
-				var $buttonPanel = $dp.find('.ui-datepicker-buttonpane');
-				if ($buttonPanel.length) {
-					$buttonPanel.before($tp);
-				} else {
-					$dp.append($tp);
-				}
-
-				this.$timeObj = $tp.find('.ui_tpicker_time');
-
-				if (this.inst !== null) {
-					var timeDefined = this.timeDefined;
-					this._onTimeChange();
-					this.timeDefined = timeDefined;
-				}
-
-				// slideAccess integration: http://trentrichardson.com/2011/11/11/jquery-ui-sliders-and-touch-accessibility/
-				if (this._defaults.addSliderAccess) {
-					var sliderAccessArgs = this._defaults.sliderAccessArgs,
-						rtl = this._defaults.isRTL;
-					sliderAccessArgs.isRTL = rtl;
-						
-					setTimeout(function() { // fix for inline mode
-						if ($tp.find('.ui-slider-access').length === 0) {
-							$tp.find('.ui-slider:visible').sliderAccess(sliderAccessArgs);
-
-							// fix any grids since sliders are shorter
-							var sliderAccessWidth = $tp.find('.ui-slider-access:eq(0)').outerWidth(true);
-							if (sliderAccessWidth) {
-								$tp.find('table:visible').each(function() {
-									var $g = $(this),
-										oldWidth = $g.outerWidth(),
-										oldMarginLeft = $g.css(rtl? 'marginRight':'marginLeft').toString().replace('%', ''),
-										newWidth = oldWidth - sliderAccessWidth,
-										newMarginLeft = ((oldMarginLeft * newWidth) / oldWidth) + '%',
-										css = { width: newWidth, marginRight: 0, marginLeft: 0 };
-									css[rtl? 'marginRight':'marginLeft'] = newMarginLeft;
-									$g.css(css);
-								});
-							}
-						}
-					}, 10);
-				}
-				// end slideAccess integration
-
-			}
-		},
-
-		/*
-		* This function tries to limit the ability to go outside the
-		* min/max date range
-		*/
-		_limitMinMaxDateTime: function(dp_inst, adjustSliders) {
-			var o = this._defaults,
-				dp_date = new Date(dp_inst.selectedYear, dp_inst.selectedMonth, dp_inst.selectedDay);
-
-			if (!this._defaults.showTimepicker) {
-				return;
-			} // No time so nothing to check here
-
-			if ($.datepicker._get(dp_inst, 'minDateTime') !== null && $.datepicker._get(dp_inst, 'minDateTime') !== undefined && dp_date) {
-				var minDateTime = $.datepicker._get(dp_inst, 'minDateTime'),
-					minDateTimeDate = new Date(minDateTime.getFullYear(), minDateTime.getMonth(), minDateTime.getDate(), 0, 0, 0, 0);
-
-				if (this.hourMinOriginal === null || this.minuteMinOriginal === null || this.secondMinOriginal === null || this.millisecMinOriginal === null) {
-					this.hourMinOriginal = o.hourMin;
-					this.minuteMinOriginal = o.minuteMin;
-					this.secondMinOriginal = o.secondMin;
-					this.millisecMinOriginal = o.millisecMin;
-				}
-
-				if (dp_inst.settings.timeOnly || minDateTimeDate.getTime() == dp_date.getTime()) {
-					this._defaults.hourMin = minDateTime.getHours();
-					if (this.hour <= this._defaults.hourMin) {
-						this.hour = this._defaults.hourMin;
-						this._defaults.minuteMin = minDateTime.getMinutes();
-						if (this.minute <= this._defaults.minuteMin) {
-							this.minute = this._defaults.minuteMin;
-							this._defaults.secondMin = minDateTime.getSeconds();
-							if (this.second <= this._defaults.secondMin) {
-								this.second = this._defaults.secondMin;
-								this._defaults.millisecMin = minDateTime.getMilliseconds();
-							} else {
-								if (this.millisec < this._defaults.millisecMin) {
-									this.millisec = this._defaults.millisecMin;
-								}
-								this._defaults.millisecMin = this.millisecMinOriginal;
-							}
-						} else {
-							this._defaults.secondMin = this.secondMinOriginal;
-							this._defaults.millisecMin = this.millisecMinOriginal;
-						}
-					} else {
-						this._defaults.minuteMin = this.minuteMinOriginal;
-						this._defaults.secondMin = this.secondMinOriginal;
-						this._defaults.millisecMin = this.millisecMinOriginal;
-					}
-				} else {
-					this._defaults.hourMin = this.hourMinOriginal;
-					this._defaults.minuteMin = this.minuteMinOriginal;
-					this._defaults.secondMin = this.secondMinOriginal;
-					this._defaults.millisecMin = this.millisecMinOriginal;
-				}
-			}
-
-			if ($.datepicker._get(dp_inst, 'maxDateTime') !== null && $.datepicker._get(dp_inst, 'maxDateTime') !== undefined && dp_date) {
-				var maxDateTime = $.datepicker._get(dp_inst, 'maxDateTime'),
-					maxDateTimeDate = new Date(maxDateTime.getFullYear(), maxDateTime.getMonth(), maxDateTime.getDate(), 0, 0, 0, 0);
-
-				if (this.hourMaxOriginal === null || this.minuteMaxOriginal === null || this.secondMaxOriginal === null) {
-					this.hourMaxOriginal = o.hourMax;
-					this.minuteMaxOriginal = o.minuteMax;
-					this.secondMaxOriginal = o.secondMax;
-					this.millisecMaxOriginal = o.millisecMax;
-				}
-
-				if (dp_inst.settings.timeOnly || maxDateTimeDate.getTime() == dp_date.getTime()) {
-					this._defaults.hourMax = maxDateTime.getHours();
-					if (this.hour >= this._defaults.hourMax) {
-						this.hour = this._defaults.hourMax;
-						this._defaults.minuteMax = maxDateTime.getMinutes();
-						if (this.minute >= this._defaults.minuteMax) {
-							this.minute = this._defaults.minuteMax;
-							this._defaults.secondMax = maxDateTime.getSeconds();
-						} else if (this.second >= this._defaults.secondMax) {
-							this.second = this._defaults.secondMax;
-							this._defaults.millisecMax = maxDateTime.getMilliseconds();
-						} else {
-							if (this.millisec > this._defaults.millisecMax) {
-								this.millisec = this._defaults.millisecMax;
-							}
-							this._defaults.millisecMax = this.millisecMaxOriginal;
-						}
-					} else {
-						this._defaults.minuteMax = this.minuteMaxOriginal;
-						this._defaults.secondMax = this.secondMaxOriginal;
-						this._defaults.millisecMax = this.millisecMaxOriginal;
-					}
-				} else {
-					this._defaults.hourMax = this.hourMaxOriginal;
-					this._defaults.minuteMax = this.minuteMaxOriginal;
-					this._defaults.secondMax = this.secondMaxOriginal;
-					this._defaults.millisecMax = this.millisecMaxOriginal;
-				}
-			}
-
-			if (adjustSliders !== undefined && adjustSliders === true) {
-				var hourMax = parseInt((this._defaults.hourMax - ((this._defaults.hourMax - this._defaults.hourMin) % this._defaults.stepHour)), 10),
-					minMax = parseInt((this._defaults.minuteMax - ((this._defaults.minuteMax - this._defaults.minuteMin) % this._defaults.stepMinute)), 10),
-					secMax = parseInt((this._defaults.secondMax - ((this._defaults.secondMax - this._defaults.secondMin) % this._defaults.stepSecond)), 10),
-					millisecMax = parseInt((this._defaults.millisecMax - ((this._defaults.millisecMax - this._defaults.millisecMin) % this._defaults.stepMillisec)), 10);
-
-				if (this.hour_slider) {
-					this.control.options(this, this.hour_slider, { min: this._defaults.hourMin, max: hourMax });
-					this.control.value(this, this.hour_slider, this.hour);
-				}
-				if (this.minute_slider) {
-					this.control.options(this, this.minute_slider, { min: this._defaults.minuteMin, max: minMax });
-					this.control.value(this, this.minute_slider, this.minute);
-				}
-				if (this.second_slider) {
-					this.control.options(this, this.second_slider, { min: this._defaults.secondMin, max: secMax });
-					this.control.value(this, this.second_slider, this.second);
-				}
-				if (this.millisec_slider) {
-					this.control.options(this, this.millisec_slider, { min: this._defaults.millisecMin, max: millisecMax });
-					this.control.value(this, this.millisec_slider, this.millisec);
-				}
-			}
-
-		},
-
-		/*
-		* when a slider moves, set the internal time...
-		* on time change is also called when the time is updated in the text field
-		*/
-		_onTimeChange: function() {
-			var hour = (this.hour_slider) ? this.control.value(this, this.hour_slider) : false,
-				minute = (this.minute_slider) ? this.control.value(this, this.minute_slider) : false,
-				second = (this.second_slider) ? this.control.value(this, this.second_slider) : false,
-				millisec = (this.millisec_slider) ? this.control.value(this, this.millisec_slider) : false,
-				timezone = (this.timezone_select) ? this.timezone_select.val() : false,
-				o = this._defaults;
-
-			if (typeof(hour) == 'object') {
-				hour = false;
-			}
-			if (typeof(minute) == 'object') {
-				minute = false;
-			}
-			if (typeof(second) == 'object') {
-				second = false;
-			}
-			if (typeof(millisec) == 'object') {
-				millisec = false;
-			}
-			if (typeof(timezone) == 'object') {
-				timezone = false;
-			}
-
-			if (hour !== false) {
-				hour = parseInt(hour, 10);
-			}
-			if (minute !== false) {
-				minute = parseInt(minute, 10);
-			}
-			if (second !== false) {
-				second = parseInt(second, 10);
-			}
-			if (millisec !== false) {
-				millisec = parseInt(millisec, 10);
-			}
-
-			var ampm = o[hour < 12 ? 'amNames' : 'pmNames'][0];
-
-			// If the update was done in the input field, the input field should not be updated.
-			// If the update was done using the sliders, update the input field.
-			var hasChanged = (hour != this.hour || minute != this.minute || second != this.second || millisec != this.millisec 
-								|| (this.ampm.length > 0 && (hour < 12) != ($.inArray(this.ampm.toUpperCase(), this.amNames) !== -1)) 
-								|| ((this.timezone === null && timezone != this.defaultTimezone) || (this.timezone !== null && timezone != this.timezone)));
-
-			if (hasChanged) {
-
-				if (hour !== false) {
-					this.hour = hour;
-				}
-				if (minute !== false) {
-					this.minute = minute;
-				}
-				if (second !== false) {
-					this.second = second;
-				}
-				if (millisec !== false) {
-					this.millisec = millisec;
-				}
-				if (timezone !== false) {
-					this.timezone = timezone;
-				}
-
-				if (!this.inst) {
-					this.inst = $.datepicker._getInst(this.$input[0]);
-				}
-
-				this._limitMinMaxDateTime(this.inst, true);
-			}
-			if (o.ampm) {
-				this.ampm = ampm;
-			}
-
-			this.formattedTime = $.datepicker.formatTime(this._defaults.timeFormat, this, this._defaults);
-			if (this.$timeObj) {
-				this.$timeObj.text(this.formattedTime + o.timeSuffix);
-			}
-			this.timeDefined = true;
-			if (hasChanged) {
-				this._updateDateTime();
-			}
-		},
-
-		/*
-		* call custom onSelect.
-		* bind to sliders slidestop, and grid click.
-		*/
-		_onSelectHandler: function() {
-			var onSelect = this._defaults.onSelect || this.inst.settings.onSelect;
-			var inputEl = this.$input ? this.$input[0] : null;
-			if (onSelect && inputEl) {
-				onSelect.apply(inputEl, [this.formattedDateTime, this]);
-			}
-		},
-
-		/*
-		* update our input with the new date time..
-		*/
-		_updateDateTime: function(dp_inst) {
-			dp_inst = this.inst || dp_inst;
-			var dt = $.datepicker._daylightSavingAdjust(new Date(dp_inst.selectedYear, dp_inst.selectedMonth, dp_inst.selectedDay)),
-				dateFmt = $.datepicker._get(dp_inst, 'dateFormat'),
-				formatCfg = $.datepicker._getFormatConfig(dp_inst),
-				timeAvailable = dt !== null && this.timeDefined;
-			this.formattedDate = $.datepicker.formatDate(dateFmt, (dt === null ? new Date() : dt), formatCfg);
-			var formattedDateTime = this.formattedDate;
-
-			/*
-			* remove following lines to force every changes in date picker to change the input value
-			* Bug descriptions: when an input field has a default value, and click on the field to pop up the date picker. 
-			* If the user manually empty the value in the input field, the date picker will never change selected value.
-			*/
-			//if (dp_inst.lastVal !== undefined && (dp_inst.lastVal.length > 0 && this.$input.val().length === 0)) {
-			//	return;
-			//}
-
-			if (this._defaults.timeOnly === true) {
-				formattedDateTime = this.formattedTime;
-			} else if (this._defaults.timeOnly !== true && (this._defaults.alwaysSetTime || timeAvailable)) {
-				formattedDateTime += this._defaults.separator + this.formattedTime + this._defaults.timeSuffix;
-			}
-
-			this.formattedDateTime = formattedDateTime;
-
-			if (!this._defaults.showTimepicker) {
-				this.$input.val(this.formattedDate);
-			} else if (this.$altInput && this._defaults.altFieldTimeOnly === true) {
-				this.$altInput.val(this.formattedTime);
-				this.$input.val(this.formattedDate);
-			} else if (this.$altInput) {
-				this.$input.val(formattedDateTime);
-				var altFormattedDateTime = '',
-					altSeparator = this._defaults.altSeparator ? this._defaults.altSeparator : this._defaults.separator,
-					altTimeSuffix = this._defaults.altTimeSuffix ? this._defaults.altTimeSuffix : this._defaults.timeSuffix;
-				if (this._defaults.altFormat) altFormattedDateTime = $.datepicker.formatDate(this._defaults.altFormat, (dt === null ? new Date() : dt), formatCfg);
-				else altFormattedDateTime = this.formattedDate;
-				if (altFormattedDateTime) altFormattedDateTime += altSeparator;
-				if (this._defaults.altTimeFormat) altFormattedDateTime += $.datepicker.formatTime(this._defaults.altTimeFormat, this, this._defaults) + altTimeSuffix;
-				else altFormattedDateTime += this.formattedTime + altTimeSuffix;
-				this.$altInput.val(altFormattedDateTime);
-			} else {
-				this.$input.val(formattedDateTime);
-			}
-
-			this.$input.trigger("change");
-		},
-
-		_onFocus: function() {
-			if (!this.$input.val() && this._defaults.defaultValue) {
-				this.$input.val(this._defaults.defaultValue);
-				var inst = $.datepicker._getInst(this.$input.get(0)),
-					tp_inst = $.datepicker._get(inst, 'timepicker');
-				if (tp_inst) {
-					if (tp_inst._defaults.timeOnly && (inst.input.val() != inst.lastVal)) {
-						try {
-							$.datepicker._updateDatepicker(inst);
-						} catch (err) {
-							$.datepicker.log(err);
-						}
-					}
-				}
-			}
-		},
-
-		/*
-		* Small abstraction to control types
-		* We can add more, just be sure to follow the pattern: create, options, value
-		*/
-		_controls: {
-			// slider methods
-			slider: {
-				create: function(tp_inst, obj, unit, val, min, max, step){
-					var rtl = tp_inst._defaults.isRTL; // if rtl go -60->0 instead of 0->60
-					return obj.prop('slide', null).slider({
-						orientation: "horizontal",
-						value: rtl? val*-1 : val,
-						min: rtl? max*-1 : min,
-						max: rtl? min*-1 : max,
-						step: step,
-						slide: function(event, ui) {
-							tp_inst.control.value(tp_inst, $(this), rtl? ui.value*-1:ui.value);
-							tp_inst._onTimeChange();
-						},
-						stop: function(event, ui) {
-							tp_inst._onSelectHandler();
-						}
-					});	
-				},
-				options: function(tp_inst, obj, opts, val){
-					if(tp_inst._defaults.isRTL){
-						if(typeof(opts) == 'string'){
-							if(opts == 'min' || opts == 'max'){
-								if(val !== undefined)
-									return obj.slider(opts, val*-1);
-								return Math.abs(obj.slider(opts));
-							}
-							return obj.slider(opts);
-						}
-						var min = opts.min, 
-							max = opts.max;
-						opts.min = opts.max = null;
-						if(min !== undefined)
-							opts.max = min * -1;
-						if(max !== undefined)
-							opts.min = max * -1;
-						return obj.slider(opts);
-					}
-					if(typeof(opts) == 'string' && val !== undefined)
-							return obj.slider(opts, val);
-					return obj.slider(opts);
-				},
-				value: function(tp_inst, obj, val){
-					if(tp_inst._defaults.isRTL){
-						if(val !== undefined)
-							return obj.slider('value', val*-1);
-						return Math.abs(obj.slider('value'));
-					}
-					if(val !== undefined)
-						return obj.slider('value', val);
-					return obj.slider('value');
-				}
-			},
-			// select methods
-			select: {
-				create: function(tp_inst, obj, unit, val, min, max, step){
-					var sel = '<select class="ui-timepicker-select" data-unit="'+ unit +'" data-min="'+ min +'" data-max="'+ max +'" data-step="'+ step +'">',
-						ul = tp_inst._defaults.timeFormat.indexOf('t') !== -1? 'toLowerCase':'toUpperCase',
-						m = 0;
-
-					for(var i=min; i<=max; i+=step){						
-						sel += '<option value="'+ i +'"'+ (i==val? ' selected':'') +'>';
-						if(unit == 'hour' && tp_inst._defaults.ampm){
-							m = i%12;
-							if(i === 0 || i === 12) sel += '12';
-							else if(m < 10) sel += '0'+ m.toString();
-							else sel += m;
-							sel += ' '+ ((i < 12)? tp_inst._defaults.amNames[0] : tp_inst._defaults.pmNames[0])[ul]();
-						}
-						else if(unit == 'millisec' || i >= 10) sel += i;
-						else sel += '0'+ i.toString();
-						sel += '</option>';
-					}
-					sel += '</select>';
-
-					obj.children('select').remove();
-
-					$(sel).appendTo(obj).change(function(e){
-						tp_inst._onTimeChange();
-						tp_inst._onSelectHandler();
-					});
-
-					return obj;
-				},
-				options: function(tp_inst, obj, opts, val){
-					var o = {},
-						$t = obj.children('select');
-					if(typeof(opts) == 'string'){
-						if(val === undefined)
-							return $t.data(opts);
-						o[opts] = val;	
-					}
-					else o = opts;
-					return tp_inst.control.create(tp_inst, obj, $t.data('unit'), $t.val(), o.min || $t.data('min'), o.max || $t.data('max'), o.step || $t.data('step'));
-				},
-				value: function(tp_inst, obj, val){
-					var $t = obj.children('select');
-					if(val !== undefined)
-						return $t.val(val);
-					return $t.val();
-				}
-			}
-		} // end _controls
-
-	});
-
-	$.fn.extend({
-		/*
-		* shorthand just to use timepicker..
-		*/
-		timepicker: function(o) {
-			o = o || {};
-			var tmp_args = Array.prototype.slice.call(arguments);
-
-			if (typeof o == 'object') {
-				tmp_args[0] = $.extend(o, {
-					timeOnly: true
-				});
-			}
-
-			return $(this).each(function() {
-				$.fn.datetimepicker.apply($(this), tmp_args);
-			});
-		},
-
-		/*
-		* extend timepicker to datepicker
-		*/
-		datetimepicker: function(o) {
-			o = o || {};
-			var tmp_args = arguments;
-
-			if (typeof(o) == 'string') {
-				if (o == 'getDate') {
-					return $.fn.datepicker.apply($(this[0]), tmp_args);
-				} else {
-					return this.each(function() {
-						var $t = $(this);
-						$t.datepicker.apply($t, tmp_args);
-					});
-				}
-			} else {
-				return this.each(function() {
-					var $t = $(this);
-					$t.datepicker($.timepicker._newInst($t, o)._defaults);
-				});
-			}
-		}
-	});
-
-	/*
-	* Public Utility to parse date and time
-	*/
-	$.datepicker.parseDateTime = function(dateFormat, timeFormat, dateTimeString, dateSettings, timeSettings) {
-		var parseRes = parseDateTimeInternal(dateFormat, timeFormat, dateTimeString, dateSettings, timeSettings);
-		if (parseRes.timeObj) {
-			var t = parseRes.timeObj;
-			parseRes.date.setHours(t.hour, t.minute, t.second, t.millisec);
-		}
-
-		return parseRes.date;
-	};
-
-	/*
-	* Public utility to parse time
-	*/
-	$.datepicker.parseTime = function(timeFormat, timeString, options) {
-		
-		// pattern for standard and localized AM/PM markers
-		var getPatternAmpm = function(amNames, pmNames) {
-			var markers = [];
-			if (amNames) {
-				$.merge(markers, amNames);
-			}
-			if (pmNames) {
-				$.merge(markers, pmNames);
-			}
-			markers = $.map(markers, function(val) {
-				return val.replace(/[.*+?|()\[\]{}\\]/g, '\\$&');
-			});
-			return '(' + markers.join('|') + ')?';
-		};
-
-		// figure out position of time elements.. cause js cant do named captures
-		var getFormatPositions = function(timeFormat) {
-			var finds = timeFormat.toLowerCase().match(/(h{1,2}|m{1,2}|s{1,2}|l{1}|t{1,2}|z|'.*?')/g),
-				orders = {
-					h: -1,
-					m: -1,
-					s: -1,
-					l: -1,
-					t: -1,
-					z: -1
-				};
-
-			if (finds) {
-				for (var i = 0; i < finds.length; i++) {
-					if (orders[finds[i].toString().charAt(0)] == -1) {
-						orders[finds[i].toString().charAt(0)] = i + 1;
-					}
-				}
-			}
-			return orders;
-		};
-
-		var o = extendRemove(extendRemove({}, $.timepicker._defaults), options || {});
-
-		var regstr = '^' + timeFormat.toString()
-				.replace(/(hh?|mm?|ss?|[tT]{1,2}|[lz]|'.*?')/g, function (match) {
-						switch (match.charAt(0).toLowerCase()) {
-							case 'h': return '(\\d?\\d)';
-							case 'm': return '(\\d?\\d)';
-							case 's': return '(\\d?\\d)';
-							case 'l': return '(\\d?\\d?\\d)';
-							case 'z': return '(z|[-+]\\d\\d:?\\d\\d|\\S+)?';
-							case 't': return getPatternAmpm(o.amNames, o.pmNames);
-							default:    // literal escaped in quotes
-								return '(' + match.replace(/\'/g, "").replace(/(\.|\$|\^|\\|\/|\(|\)|\[|\]|\?|\+|\*)/g, function (m) { return "\\" + m; }) + ')?';
-						}
-					})
-				.replace(/\s/g, '\\s?') +
-				o.timeSuffix + '$',
-			order = getFormatPositions(timeFormat),
-			ampm = '',
-			treg;
-
-		treg = timeString.match(new RegExp(regstr, 'i'));
-
-		var resTime = {
-			hour: 0,
-			minute: 0,
-			second: 0,
-			millisec: 0
-		};
-
-		if (treg) {
-			if (order.t !== -1) {
-				if (treg[order.t] === undefined || treg[order.t].length === 0) {
-					ampm = '';
-					resTime.ampm = '';
-				} else {
-					ampm = $.inArray(treg[order.t].toUpperCase(), o.amNames) !== -1 ? 'AM' : 'PM';
-					resTime.ampm = o[ampm == 'AM' ? 'amNames' : 'pmNames'][0];
-				}
-			}
-
-			if (order.h !== -1) {
-				if (ampm == 'AM' && treg[order.h] == '12') {
-					resTime.hour = 0; // 12am = 0 hour
-				} else {
-					if (ampm == 'PM' && treg[order.h] != '12') {
-						resTime.hour = parseInt(treg[order.h], 10) + 12; // 12pm = 12 hour, any other pm = hour + 12
-					} else {
-						resTime.hour = Number(treg[order.h]);
-					}
-				}
-			}
-
-			if (order.m !== -1) {
-				resTime.minute = Number(treg[order.m]);
-			}
-			if (order.s !== -1) {
-				resTime.second = Number(treg[order.s]);
-			}
-			if (order.l !== -1) {
-				resTime.millisec = Number(treg[order.l]);
-			}
-			if (order.z !== -1 && treg[order.z] !== undefined) {
-				var tz = treg[order.z].toUpperCase();
-				switch (tz.length) {
-				case 1:
-					// Z
-					tz = o.timezoneIso8601 ? 'Z' : '+0000';
-					break;
-				case 5:
-					// +hhmm
-					if (o.timezoneIso8601) {
-						tz = tz.substring(1) == '0000' ? 'Z' : tz.substring(0, 3) + ':' + tz.substring(3);
-					}
-					break;
-				case 6:
-					// +hh:mm
-					if (!o.timezoneIso8601) {
-						tz = tz == 'Z' || tz.substring(1) == '00:00' ? '+0000' : tz.replace(/:/, '');
-					} else {
-						if (tz.substring(1) == '00:00') {
-							tz = 'Z';
-						}
-					}
-					break;
-				}
-				resTime.timezone = tz;
-			}
-
-
-			return resTime;
-		}
-
-		return false;
-	};
-
-	/*
-	* Public utility to format the time
-	* format = string format of the time
-	* time = a {}, not a Date() for timezones
-	* options = essentially the regional[].. amNames, pmNames, ampm
-	*/
-	$.datepicker.formatTime = function(format, time, options) {
-		options = options || {};
-		options = $.extend({}, $.timepicker._defaults, options);
-		time = $.extend({
-			hour: 0,
-			minute: 0,
-			second: 0,
-			millisec: 0,
-			timezone: '+0000'
-		}, time);
-
-		var tmptime = format;
-		var ampmName = options.amNames[0];
-
-		var hour = parseInt(time.hour, 10);
-		if (options.ampm) {
-			if (hour > 11) {
-				ampmName = options.pmNames[0];
-				if (hour > 12) {
-					hour = hour % 12;
-				}
-			}
-			if (hour === 0) {
-				hour = 12;
-			}
-		}
-		tmptime = tmptime.replace(/(?:hh?|mm?|ss?|[tT]{1,2}|[lz]|'.*?')/g, function(match) {
-			switch (match.toLowerCase()) {
-			case 'hh':
-				return ('0' + hour).slice(-2);
-			case 'h':
-				return hour;
-			case 'mm':
-				return ('0' + time.minute).slice(-2);
-			case 'm':
-				return time.minute;
-			case 'ss':
-				return ('0' + time.second).slice(-2);
-			case 's':
-				return time.second;
-			case 'l':
-				return ('00' + time.millisec).slice(-3);
-			case 'z':
-				return time.timezone === null? options.defaultTimezone : time.timezone;
-			case 't':
-			case 'tt':
-				if (options.ampm) {
-					if (match.length == 1) {
-						ampmName = ampmName.charAt(0);
-					}
-					return match.charAt(0) === 'T' ? ampmName.toUpperCase() : ampmName.toLowerCase();
-				}
-				return '';
-			default:
-				return match.replace(/\'/g, "") || "'";
-			}
-		});
-
-		tmptime = $.trim(tmptime);
-		return tmptime;
-	};
-
-	/*
-	* the bad hack :/ override datepicker so it doesnt close on select
-	// inspired: http://stackoverflow.com/questions/1252512/jquery-datepicker-prevent-closing-picker-when-clicking-a-date/1762378#1762378
-	*/
-	$.datepicker._base_selectDate = $.datepicker._selectDate;
-	$.datepicker._selectDate = function(id, dateStr) {
-		var inst = this._getInst($(id)[0]),
-			tp_inst = this._get(inst, 'timepicker');
-
-		if (tp_inst) {
-			tp_inst._limitMinMaxDateTime(inst, true);
-			inst.inline = inst.stay_open = true;
-			//This way the onSelect handler called from calendarpicker get the full dateTime
-			this._base_selectDate(id, dateStr);
-			inst.inline = inst.stay_open = false;
-			this._notifyChange(inst);
-			this._updateDatepicker(inst);
-		} else {
-			this._base_selectDate(id, dateStr);
-		}
-	};
-
-	/*
-	* second bad hack :/ override datepicker so it triggers an event when changing the input field
-	* and does not redraw the datepicker on every selectDate event
-	*/
-	$.datepicker._base_updateDatepicker = $.datepicker._updateDatepicker;
-	$.datepicker._updateDatepicker = function(inst) {
-
-		// don't popup the datepicker if there is another instance already opened
-		var input = inst.input[0];
-		if ($.datepicker._curInst && $.datepicker._curInst != inst && $.datepicker._datepickerShowing && $.datepicker._lastInput != input) {
-			return;
-		}
-
-		if (typeof(inst.stay_open) !== 'boolean' || inst.stay_open === false) {
-
-			this._base_updateDatepicker(inst);
-
-			// Reload the time control when changing something in the input text field.
-			var tp_inst = this._get(inst, 'timepicker');
-			if (tp_inst) {
-				tp_inst._addTimePicker(inst);
-
-				if (tp_inst._defaults.useLocalTimezone) { //checks daylight saving with the new date.
-					var date = new Date(inst.selectedYear, inst.selectedMonth, inst.selectedDay, 12);
-					selectLocalTimeZone(tp_inst, date);
-					tp_inst._onTimeChange();
-				}
-			}
-		}
-	};
-
-	/*
-	* third bad hack :/ override datepicker so it allows spaces and colon in the input field
-	*/
-	$.datepicker._base_doKeyPress = $.datepicker._doKeyPress;
-	$.datepicker._doKeyPress = function(event) {
-		var inst = $.datepicker._getInst(event.target),
-			tp_inst = $.datepicker._get(inst, 'timepicker');
-
-		if (tp_inst) {
-			if ($.datepicker._get(inst, 'constrainInput')) {
-				var ampm = tp_inst._defaults.ampm,
-					dateChars = $.datepicker._possibleChars($.datepicker._get(inst, 'dateFormat')),
-					datetimeChars = tp_inst._defaults.timeFormat.toString()
-											.replace(/[hms]/g, '')
-											.replace(/TT/g, ampm ? 'APM' : '')
-											.replace(/Tt/g, ampm ? 'AaPpMm' : '')
-											.replace(/tT/g, ampm ? 'AaPpMm' : '')
-											.replace(/T/g, ampm ? 'AP' : '')
-											.replace(/tt/g, ampm ? 'apm' : '')
-											.replace(/t/g, ampm ? 'ap' : '') + 
-											" " + tp_inst._defaults.separator + 
-											tp_inst._defaults.timeSuffix + 
-											(tp_inst._defaults.showTimezone ? tp_inst._defaults.timezoneList.join('') : '') + 
-											(tp_inst._defaults.amNames.join('')) + (tp_inst._defaults.pmNames.join('')) + 
-											dateChars,
-					chr = String.fromCharCode(event.charCode === undefined ? event.keyCode : event.charCode);
-				return event.ctrlKey || (chr < ' ' || !dateChars || datetimeChars.indexOf(chr) > -1);
-			}
-		}
-
-		return $.datepicker._base_doKeyPress(event);
-	};
-
-	/*
-	* Fourth bad hack :/ override _updateAlternate function used in inline mode to init altField
-	*/
-	$.datepicker._base_updateAlternate = $.datepicker._updateAlternate;
-	/* Update any alternate field to synchronise with the main field. */
-	$.datepicker._updateAlternate = function(inst) {
-		var tp_inst = this._get(inst, 'timepicker');
-		if(tp_inst){
-			var altField = tp_inst._defaults.altField;
-			if (altField) { // update alternate field too
-				var altFormat = tp_inst._defaults.altFormat || tp_inst._defaults.dateFormat,
-					date = this._getDate(inst),
-					formatCfg = $.datepicker._getFormatConfig(inst),
-					altFormattedDateTime = '', 
-					altSeparator = tp_inst._defaults.altSeparator ? tp_inst._defaults.altSeparator : tp_inst._defaults.separator, 
-					altTimeSuffix = tp_inst._defaults.altTimeSuffix ? tp_inst._defaults.altTimeSuffix : tp_inst._defaults.timeSuffix,
-					altTimeFormat = tp_inst._defaults.altTimeFormat !== undefined ? tp_inst._defaults.altTimeFormat : tp_inst._defaults.timeFormat;
-				
-				altFormattedDateTime += $.datepicker.formatTime(altTimeFormat, tp_inst, tp_inst._defaults) + altTimeSuffix;
-				if(!tp_inst._defaults.timeOnly && !tp_inst._defaults.altFieldTimeOnly){
-					if(tp_inst._defaults.altFormat)
-						altFormattedDateTime = $.datepicker.formatDate(tp_inst._defaults.altFormat, (date === null ? new Date() : date), formatCfg) + altSeparator + altFormattedDateTime;
-					else altFormattedDateTime = tp_inst.formattedDate + altSeparator + altFormattedDateTime;
-				}
-				$(altField).val(altFormattedDateTime);
-			}
-		}
-		else{
-			$.datepicker._base_updateAlternate(inst);
-		}
-	};
-
-	/*
-	* Override key up event to sync manual input changes.
-	*/
-	$.datepicker._base_doKeyUp = $.datepicker._doKeyUp;
-	$.datepicker._doKeyUp = function(event) {
-		var inst = $.datepicker._getInst(event.target),
-			tp_inst = $.datepicker._get(inst, 'timepicker');
-
-		if (tp_inst) {
-			if (tp_inst._defaults.timeOnly && (inst.input.val() != inst.lastVal)) {
-				try {
-					$.datepicker._updateDatepicker(inst);
-				} catch (err) {
-					$.datepicker.log(err);
-				}
-			}
-		}
-
-		return $.datepicker._base_doKeyUp(event);
-	};
-
-	/*
-	* override "Today" button to also grab the time.
-	*/
-	$.datepicker._base_gotoToday = $.datepicker._gotoToday;
-	$.datepicker._gotoToday = function(id) {
-		var inst = this._getInst($(id)[0]),
-			$dp = inst.dpDiv;
-		this._base_gotoToday(id);
-		var tp_inst = this._get(inst, 'timepicker');
-		selectLocalTimeZone(tp_inst);
-		var now = new Date();
-		this._setTime(inst, now);
-		$('.ui-datepicker-today', $dp).click();
-	};
-
-	/*
-	* Disable & enable the Time in the datetimepicker
-	*/
-	$.datepicker._disableTimepickerDatepicker = function(target) {
-		var inst = this._getInst(target);
-		if (!inst) {
-			return;
-		}
-
-		var tp_inst = this._get(inst, 'timepicker');
-		$(target).datepicker('getDate'); // Init selected[Year|Month|Day]
-		if (tp_inst) {
-			tp_inst._defaults.showTimepicker = false;
-			tp_inst._updateDateTime(inst);
-		}
-	};
-
-	$.datepicker._enableTimepickerDatepicker = function(target) {
-		var inst = this._getInst(target);
-		if (!inst) {
-			return;
-		}
-
-		var tp_inst = this._get(inst, 'timepicker');
-		$(target).datepicker('getDate'); // Init selected[Year|Month|Day]
-		if (tp_inst) {
-			tp_inst._defaults.showTimepicker = true;
-			tp_inst._addTimePicker(inst); // Could be disabled on page load
-			tp_inst._updateDateTime(inst);
-		}
-	};
-
-	/*
-	* Create our own set time function
-	*/
-	$.datepicker._setTime = function(inst, date) {
-		var tp_inst = this._get(inst, 'timepicker');
-		if (tp_inst) {
-			var defaults = tp_inst._defaults;
-
-			// calling _setTime with no date sets time to defaults
-			tp_inst.hour = date ? date.getHours() : defaults.hour;
-			tp_inst.minute = date ? date.getMinutes() : defaults.minute;
-			tp_inst.second = date ? date.getSeconds() : defaults.second;
-			tp_inst.millisec = date ? date.getMilliseconds() : defaults.millisec;
-
-			//check if within min/max times.. 
-			tp_inst._limitMinMaxDateTime(inst, true);
-
-			tp_inst._onTimeChange();
-			tp_inst._updateDateTime(inst);
-		}
-	};
-
-	/*
-	* Create new public method to set only time, callable as $().datepicker('setTime', date)
-	*/
-	$.datepicker._setTimeDatepicker = function(target, date, withDate) {
-		var inst = this._getInst(target);
-		if (!inst) {
-			return;
-		}
-
-		var tp_inst = this._get(inst, 'timepicker');
-
-		if (tp_inst) {
-			this._setDateFromField(inst);
-			var tp_date;
-			if (date) {
-				if (typeof date == "string") {
-					tp_inst._parseTime(date, withDate);
-					tp_date = new Date();
-					tp_date.setHours(tp_inst.hour, tp_inst.minute, tp_inst.second, tp_inst.millisec);
-				} else {
-					tp_date = new Date(date.getTime());
-				}
-				if (tp_date.toString() == 'Invalid Date') {
-					tp_date = undefined;
-				}
-				this._setTime(inst, tp_date);
-			}
-		}
-
-	};
-
-	/*
-	* override setDate() to allow setting time too within Date object
-	*/
-	$.datepicker._base_setDateDatepicker = $.datepicker._setDateDatepicker;
-	$.datepicker._setDateDatepicker = function(target, date) {
-		var inst = this._getInst(target);
-		if (!inst) {
-			return;
-		}
-
-		var tp_date = (date instanceof Date) ? new Date(date.getTime()) : date;
-
-		this._updateDatepicker(inst);
-		this._base_setDateDatepicker.apply(this, arguments);
-		this._setTimeDatepicker(target, tp_date, true);
-	};
-
-	/*
-	* override getDate() to allow getting time too within Date object
-	*/
-	$.datepicker._base_getDateDatepicker = $.datepicker._getDateDatepicker;
-	$.datepicker._getDateDatepicker = function(target, noDefault) {
-		var inst = this._getInst(target);
-		if (!inst) {
-			return;
-		}
-
-		var tp_inst = this._get(inst, 'timepicker');
-
-		if (tp_inst) {
-			// if it hasn't yet been defined, grab from field
-			if(inst.lastVal === undefined){
-				this._setDateFromField(inst, noDefault);
-			}
-
-			var date = this._getDate(inst);
-			if (date && tp_inst._parseTime($(target).val(), tp_inst.timeOnly)) {
-				date.setHours(tp_inst.hour, tp_inst.minute, tp_inst.second, tp_inst.millisec);
-			}
-			return date;
-		}
-		return this._base_getDateDatepicker(target, noDefault);
-	};
-
-	/*
-	* override parseDate() because UI 1.8.14 throws an error about "Extra characters"
-	* An option in datapicker to ignore extra format characters would be nicer.
-	*/
-	$.datepicker._base_parseDate = $.datepicker.parseDate;
-	$.datepicker.parseDate = function(format, value, settings) {
-		var date;
-		try {
-			date = this._base_parseDate(format, value, settings);
-		} catch (err) {
-			// Hack!  The error message ends with a colon, a space, and
-			// the "extra" characters.  We rely on that instead of
-			// attempting to perfectly reproduce the parsing algorithm.
-			date = this._base_parseDate(format, value.substring(0,value.length-(err.length-err.indexOf(':')-2)), settings);
-		}
-		return date;
-	};
-
-	/*
-	* override formatDate to set date with time to the input
-	*/
-	$.datepicker._base_formatDate = $.datepicker._formatDate;
-	$.datepicker._formatDate = function(inst, day, month, year) {
-		var tp_inst = this._get(inst, 'timepicker');
-		if (tp_inst) {
-			tp_inst._updateDateTime(inst);
-			return tp_inst.$input.val();
-		}
-		return this._base_formatDate(inst);
-	};
-
-	/*
-	* override options setter to add time to maxDate(Time) and minDate(Time). MaxDate
-	*/
-	$.datepicker._base_optionDatepicker = $.datepicker._optionDatepicker;
-	$.datepicker._optionDatepicker = function(target, name, value) {
-		var inst = this._getInst(target),
-	        name_clone;
-		if (!inst) {
-			return null;
-		}
-
-		var tp_inst = this._get(inst, 'timepicker');
-		if (tp_inst) {
-			var min = null,
-				max = null,
-				onselect = null,
-				overrides = tp_inst._defaults.evnts,
-				fns = {},
-				prop;
-		    if (typeof name == 'string') { // if min/max was set with the string
-		        if (name === 'minDate' || name === 'minDateTime') {
-		            min = value;
-		        } else if (name === 'maxDate' || name === 'maxDateTime') {
-		            max = value;
-		        } else if (name === 'onSelect') {
-		            onselect = value;
-		        } else if (overrides.hasOwnProperty(name)) {
-		            if (typeof (value) === 'undefined') {
-		                return overrides[name];
-		            }
-		            fns[name] = value;
-		            name_clone = {}; //empty results in exiting function after overrides updated
-		        }
-		    } else if (typeof name == 'object') { //if min/max was set with the JSON
-		        if (name.minDate) {
-		            min = name.minDate;
-		        } else if (name.minDateTime) {
-		            min = name.minDateTime;
-		        } else if (name.maxDate) {
-		            max = name.maxDate;
-		        } else if (name.maxDateTime) {
-		            max = name.maxDateTime;
-		        }
-		        for (prop in overrides) {
-		            if (overrides.hasOwnProperty(prop) && name[prop]) {
-		                fns[prop] = name[prop];
-		            }
-		        }
-		    }
-		    for (prop in fns) {
-		        if (fns.hasOwnProperty(prop)) {
-		            overrides[prop] = fns[prop];
-		            if (!name_clone) { name_clone = $.extend({}, name);}
-		            delete name_clone[prop];
-		        }
-		    }
-		    if (name_clone && isEmptyObject(name_clone)) { return; }
-		    if (min) { //if min was set
-		        if (min === 0) {
-		            min = new Date();
-		        } else {
-		            min = new Date(min);
-		        }
-		        tp_inst._defaults.minDate = min;
-		        tp_inst._defaults.minDateTime = min;
-		    } else if (max) { //if max was set
-		        if (max === 0) {
-		            max = new Date();
-		        } else {
-		            max = new Date(max);
-		        }
-		        tp_inst._defaults.maxDate = max;
-		        tp_inst._defaults.maxDateTime = max;
-		    } else if (onselect) {
-		        tp_inst._defaults.onSelect = onselect;
-		    }
-		}
-		if (value === undefined) {
-			return this._base_optionDatepicker.call($.datepicker, target, name);
-		}
-		return this._base_optionDatepicker.call($.datepicker, target, name_clone || name, value);
-	};
-	/*
-	* jQuery isEmptyObject does not check hasOwnProperty - if someone has added to the object prototype,
-	* it will return false for all objects
-	*/
-	function isEmptyObject (obj) {
-		var prop;
-		for (prop in obj) {
-			if (obj.hasOwnProperty(obj)) {
-				return false;
-			}
-		}
-		return true;
-	}
-	/*
-	* jQuery extend now ignores nulls!
-	*/
-	function extendRemove(target, props) {
-		$.extend(target, props);
-		for (var name in props) {
-			if (props[name] === null || props[name] === undefined) {
-				target[name] = props[name];
-			}
-		}
-		return target;
-	}
-
-	/*
-	* Splits datetime string into date ans time substrings.
-	* Throws exception when date can't be parsed
-	* Returns [dateString, timeString]
-	*/
-	var splitDateTime = function(dateFormat, dateTimeString, dateSettings, timeSettings) {
-		try {
-			// The idea is to get the number separator occurances in datetime and the time format requested (since time has 
-			// fewer unknowns, mostly numbers and am/pm). We will use the time pattern to split.
-			var separator = timeSettings && timeSettings.separator ? timeSettings.separator : $.timepicker._defaults.separator,
-				format = timeSettings && timeSettings.timeFormat ? timeSettings.timeFormat : $.timepicker._defaults.timeFormat,
-				ampm = timeSettings && timeSettings.ampm ? timeSettings.ampm : $.timepicker._defaults.ampm,
-				timeParts = format.split(separator), // how many occurances of separator may be in our format?
-				timePartsLen = timeParts.length,
-				allParts = dateTimeString.split(separator),
-				allPartsLen = allParts.length;
-
-			// because our default ampm=false, but our default format has tt, we need to filter this out
-			if(!ampm){
-				timeParts = $.trim(format.replace(/t/gi,'')).split(separator);
-				timePartsLen = timeParts.length;
-			}
-
-			if (allPartsLen > 1) {
-				return [
-						allParts.splice(0,allPartsLen-timePartsLen).join(separator),
-						allParts.splice(0,timePartsLen).join(separator)
-					];
-			}
-
-		} catch (err) {
-			if (err.indexOf(":") >= 0) {
-				// Hack!  The error message ends with a colon, a space, and
-				// the "extra" characters.  We rely on that instead of
-				// attempting to perfectly reproduce the parsing algorithm.
-				var dateStringLength = dateTimeString.length - (err.length - err.indexOf(':') - 2),
-					timeString = dateTimeString.substring(dateStringLength);
-
-				return [$.trim(dateTimeString.substring(0, dateStringLength)), $.trim(dateTimeString.substring(dateStringLength))];
-
-			} else {
-				throw err;
-			}
-		}
-		return [dateTimeString, ''];
-	};
-
-	/*
-	* Internal function to parse datetime interval
-	* Returns: {date: Date, timeObj: Object}, where
-	*   date - parsed date without time (type Date)
-	*   timeObj = {hour: , minute: , second: , millisec: } - parsed time. Optional
-	*/
-	var parseDateTimeInternal = function(dateFormat, timeFormat, dateTimeString, dateSettings, timeSettings) {
-		var date;
-		var splitRes = splitDateTime(dateFormat, dateTimeString, dateSettings, timeSettings);
-		date = $.datepicker._base_parseDate(dateFormat, splitRes[0], dateSettings);
-		if (splitRes[1] !== '') {
-			var timeString = splitRes[1],
-				parsedTime = $.datepicker.parseTime(timeFormat, timeString, timeSettings);
-
-			if (parsedTime === null) {
-				throw 'Wrong time format';
-			}
-			return {
-				date: date,
-				timeObj: parsedTime
-			};
-		} else {
-			return {
-				date: date
-			};
-		}
-	};
-
-	/*
-	* Internal function to set timezone_select to the local timezone
-	*/
-	var selectLocalTimeZone = function(tp_inst, date) {
-		if (tp_inst && tp_inst.timezone_select) {
-			tp_inst._defaults.useLocalTimezone = true;
-			var now = typeof date !== 'undefined' ? date : new Date();
-			var tzoffset = $.timepicker.timeZoneOffsetString(now);
-			if (tp_inst._defaults.timezoneIso8601) {
-				tzoffset = tzoffset.substring(0, 3) + ':' + tzoffset.substring(3);
-			}
-			tp_inst.timezone_select.val(tzoffset);
-		}
-	};
-
-	/*
-	* Create a Singleton Insance
-	*/
-	$.timepicker = new Timepicker();
-
-	/**
-	 * Get the timezone offset as string from a date object (eg '+0530' for UTC+5.5)
-	 * @param  date
-	 * @return string
-	 */
-	$.timepicker.timeZoneOffsetString = function(date) {
-		var off = date.getTimezoneOffset() * -1,
-			minutes = off % 60,
-			hours = (off - minutes) / 60;
-		return (off >= 0 ? '+' : '-') + ('0' + (hours * 101).toString()).substr(-2) + ('0' + (minutes * 101).toString()).substr(-2);
-	};
-
-	/**
-	 * Calls `timepicker()` on the `startTime` and `endTime` elements, and configures them to
-	 * enforce date range limits.
-	 * n.b. The input value must be correctly formatted (reformatting is not supported)
-	 * @param  Element startTime
-	 * @param  Element endTime
-	 * @param  obj options Options for the timepicker() call
-	 * @return jQuery
-	 */
-	$.timepicker.timeRange = function(startTime, endTime, options) {
-		return $.timepicker.handleRange('timepicker', startTime, endTime, options);
-	};
-
-	/**
-	 * Calls `datetimepicker` on the `startTime` and `endTime` elements, and configures them to
-	 * enforce date range limits.
-	 * @param  Element startTime
-	 * @param  Element endTime
-	 * @param  obj options Options for the `timepicker()` call. Also supports `reformat`,
-	 *   a boolean value that can be used to reformat the input values to the `dateFormat`.
-	 * @param  string method Can be used to specify the type of picker to be added
-	 * @return jQuery
-	 */
-	$.timepicker.dateTimeRange = function(startTime, endTime, options) {
-		$.timepicker.dateRange(startTime, endTime, options, 'datetimepicker');
-	};
-
-	/**
-	 * Calls `method` on the `startTime` and `endTime` elements, and configures them to
-	 * enforce date range limits.
-	 * @param  Element startTime
-	 * @param  Element endTime
-	 * @param  obj options Options for the `timepicker()` call. Also supports `reformat`,
-	 *   a boolean value that can be used to reformat the input values to the `dateFormat`.
-	 * @param  string method Can be used to specify the type of picker to be added
-	 * @return jQuery
-	 */
-	$.timepicker.dateRange = function(startTime, endTime, options, method) {
-		method = method || 'datepicker';
-		$.timepicker.handleRange(method, startTime, endTime, options);
-	};
-
-	/**
-	 * Calls `method` on the `startTime` and `endTime` elements, and configures them to
-	 * enforce date range limits.
-	 * @param  string method Can be used to specify the type of picker to be added
-	 * @param  Element startTime
-	 * @param  Element endTime
-	 * @param  obj options Options for the `timepicker()` call. Also supports `reformat`,
-	 *   a boolean value that can be used to reformat the input values to the `dateFormat`.
-	 * @return jQuery
-	 */
-	$.timepicker.handleRange = function(method, startTime, endTime, options) {
-		$.fn[method].call(startTime, $.extend({
-			onClose: function(dateText, inst) {
-				checkDates(this, endTime, dateText);
-			},
-			onSelect: function(selectedDateTime) {
-				selected(this, endTime, 'minDate');
-			}
-		}, options, options.start));
-		$.fn[method].call(endTime, $.extend({
-			onClose: function(dateText, inst) {
-				checkDates(this, startTime, dateText);
-			},
-			onSelect: function(selectedDateTime) {
-				selected(this, startTime, 'maxDate');
-			}
-		}, options, options.end));
-		// timepicker doesn't provide access to its 'timeFormat' option, 
-		// nor could I get datepicker.formatTime() to behave with times, so I
-		// have disabled reformatting for timepicker
-		if (method != 'timepicker' && options.reformat) {
-			$([startTime, endTime]).each(function() {
-				var format = $(this)[method].call($(this), 'option', 'dateFormat'),
-					date = new Date($(this).val());
-				if ($(this).val() && date) {
-					$(this).val($.datepicker.formatDate(format, date));
-				}
-			});
-		}
-		checkDates(startTime, endTime, startTime.val());
-
-		function checkDates(changed, other, dateText) {
-			if (other.val() && (new Date(startTime.val()) > new Date(endTime.val()))) {
-				other.val(dateText);
-			}
-		}
-		selected(startTime, endTime, 'minDate');
-		selected(endTime, startTime, 'maxDate');
-
-		function selected(changed, other, option) {
-			if (!$(changed).val()) {
-				return;
-			}
-			var date = $(changed)[method].call($(changed), 'getDate');
-			// timepicker doesn't implement 'getDate' and returns a jQuery
-			if (date.getTime) {
-				$(other)[method].call($(other), 'option', option, date);
-			}
-		}
-		return $([startTime.get(0), endTime.get(0)]);
-	};
-
-	/*
-	* Keep up with the version
-	*/
-	$.timepicker.version = "1.0.5";
-
-})(jQuery);
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/vendor/scripts/jquery.ajax-retry.js b/branch-1.2/ambari-web/vendor/scripts/jquery.ajax-retry.js
deleted file mode 100644
index d39746f..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/jquery.ajax-retry.js
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * jquery.ajax-retry
- * https://github.com/johnkpaul/jquery-ajax-retry
- *
- * Copyright (c) 2012 John Paul
- * Licensed under the MIT license.
- */
-
-(function($) {
-
-  // enhance all ajax requests with our retry API
-  $.ajaxPrefilter(function(options, originalOptions, jqXHR){
-    jqXHR.retry = function(opts){
-      if(opts.timeout){
-        this.timeout = opts.timeout;
-      }
-      return this.pipe(null, pipeFailRetry(this, opts.times));
-    };
-  });
-
-  // generates a fail pipe function that will retry `jqXHR` `times` more times
-  function pipeFailRetry(jqXHR, times){
-
-    // takes failure data as input, returns a new deferred
-    return function(input, status, msg){
-      var ajaxOptions = this;
-      var output = new $.Deferred();
-
-      // whenever we do make this request, pipe its output to our deferred
-      function nextRequest() {
-        $.ajax(ajaxOptions)
-          .retry({times:times-1})
-          .pipe(output.resolve, output.reject);
-      }
-
-      if(times > 1){
-        // time to make that next request...
-        if(jqXHR.timeout !== undefined){
-          setTimeout(nextRequest, jqXHR.timeout);
-        } else {
-          nextRequest();
-        }
-      } else {
-        // no times left, reject our deferred with the current arguments
-        output.rejectWith(this, arguments);
-      }
-
-      return output;
-    };
-  }
-
-}(jQuery));
diff --git a/branch-1.2/ambari-web/vendor/scripts/jquery.flexibleArea.js b/branch-1.2/ambari-web/vendor/scripts/jquery.flexibleArea.js
deleted file mode 100644
index 56c425b..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/jquery.flexibleArea.js
+++ /dev/null
@@ -1,124 +0,0 @@
-/*!
-* flexibleArea.js v1.0
-* A jQuery plugin that dynamically updates textarea's height to fit the content.
-* http://flaviusmatis.github.com/flexibleArea.js/
-*
-* Copyright 2012, Flavius Matis
-* Released under the MIT license.
-* http://flaviusmatis.github.com/license.html
-*/
-
-(function($){
-	var methods = {
-		init : function() {
-
-			var styles = [
-				'paddingTop',
-				'paddingRight',
-				'paddingBottom',
-				'paddingLeft',
-				'fontSize',
-				'lineHeight',
-				'fontFamily',
-				'width',
-				'fontWeight',
-				'border-top-width',
-				'border-right-width',
-				'border-bottom-width',
-				'border-left-width'
-			];
-
-			return this.each( function() {
-
-				if (this.type !== 'textarea')	return false;
-					
-				var $textarea = $(this).css({'resize': 'none', overflow: 'hidden'});
-				
-				var	$clone = $('<div></div>').css({
-					'position' : 'absolute',
-					'display' : 'none',
-					'word-wrap' : 'break-word',
-					'white-space' : 'pre-wrap',
-					'border-style' : 'solid'
-				}).appendTo(document.body);
-
-				// Apply textarea styles to clone
-				for (var i=0; i < styles.length; i++) {
-					$clone.css(styles[i],$textarea.css(styles[i]));
-				}
-
-				var textareaHeight = parseInt($textarea.css('height'), 10);
-				var lineHeight = parseInt($textarea.css('line-height'), 10) || parseInt($textarea.css('font-size'), 10);
-				var minheight = lineHeight * 2 > textareaHeight ? lineHeight * 2 : textareaHeight;
-				var maxheight = parseInt($textarea.css('max-height'), 10) > -1 ? parseInt($textarea.css('max-height'), 10) : Number.MAX_VALUE;
-
-				function updateHeight() {
-					var textareaContent = $textarea.val().replace(/</g, '&lt;').replace(/>/g, '&gt;').replace(/&/g, '&amp;').replace(/\n/g, '<br/>');
-					// Adding an extra white space to make sure the last line is rendered.
-					$clone.html(textareaContent + '&nbsp;');
-					setHeightAndOverflow();
-				}
-
-				function setHeightAndOverflow(){
-					var cloneHeight = $clone.height() + lineHeight;
-					var overflow = 'hidden';
-					var height = cloneHeight;
-					if (cloneHeight > maxheight) {
-						height = maxheight;
-						overflow = 'auto';
-					} else if (cloneHeight < minheight) {
-						height = minheight;
-					}
-					if ($textarea.height() !== height) {
-						$textarea.css({'overflow': overflow, 'height': height + 'px'});
-					}
-				}
-
-				// Update textarea size on keyup, change, cut and paste
-				$textarea.bind('keyup change cut paste', function(){
-					updateHeight();
-				});
-
-				// Update textarea on window resize
-				$(window).bind('resize', function (){
-					var cleanWidth = parseInt($textarea.width(), 10);
-					if ($clone.width() !== cleanWidth) {
-						$clone.css({'width': cleanWidth + 'px'});
-						updateHeight();
-					}
-				});
-
-				// Update textarea on blur
-				$textarea.bind('blur',function(){
-					setHeightAndOverflow()
-				});
-
-				// Update textarea when needed
-				$textarea.bind('updateHeight', function(){
-					updateHeight();
-				});
-
-				// Wait until DOM is ready to fix IE7+ stupid bug
-				$(function(){
-					updateHeight();
-				});
-				
-			});
-			
-		}
-	};
-
-	$.fn.flexible = function(method) {
-
-		// Method calling logic
-		if (methods[method]) {
-			return methods[method].apply(this, Array.prototype.slice.call(arguments, 1));
-		} else if (typeof method === 'object' || ! method) {
-			return methods.init.apply(this, arguments);
-		} else {
-			$.error('Method ' + method + ' does not exist on jQuery.easyModal');
-		}
-
-	};
-
-})(jQuery);
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/vendor/scripts/jquery.jqprint-0.3.js b/branch-1.2/ambari-web/vendor/scripts/jquery.jqprint-0.3.js
deleted file mode 100644
index 3b01c47..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/jquery.jqprint-0.3.js
+++ /dev/null
@@ -1,75 +0,0 @@
-// -----------------------------------------------------------------------
-// Eros Fratini - eros@recoding.it
-// jqprint 0.3
-//
-// - 19/06/2009 - some new implementations, added Opera support
-// - 11/05/2009 - first sketch
-//
-// Printing plug-in for jQuery, evolution of jPrintArea: http://plugins.jquery.com/project/jPrintArea
-// requires jQuery 1.3.x
-//
-// Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
-//------------------------------------------------------------------------
-
-(function($) {
-    var opt;
-
-    $.fn.jqprint = function (options) {
-        opt = $.extend({}, $.fn.jqprint.defaults, options);
-
-        var $element = (this instanceof jQuery) ? this : $(this);
-        
-        if (opt.operaSupport && $.browser.opera) 
-        { 
-            var tab = window.open("","jqPrint-preview");
-            tab.document.open();
-
-            var doc = tab.document;
-        }
-        else 
-        {
-            var $iframe = $("<iframe  />");
-        
-            if (!opt.debug) { $iframe.css({ position: "absolute", width: "0px", height: "0px", left: "-600px", top: "-600px" }); }
-
-            $iframe.appendTo("body");
-            var doc = $iframe[0].contentWindow.document;
-        }
-        
-        if (opt.importCSS)
-        {
-            if ($("link[media=print]").length > 0) 
-            {
-                $("link[media=print]").each( function() {
-                    doc.write("<link type='text/css' rel='stylesheet' href='" + $(this).attr("href") + "' media='print' />");
-                });
-            }
-            else 
-            {
-                $("link").each( function() {
-                    doc.write("<link type='text/css' rel='stylesheet' href='" + $(this).attr("href") + "' />");
-                });
-            }
-        }
-        
-        if (opt.printContainer) { doc.write($element.outer()); }
-        else { $element.each( function() { doc.write($(this).html()); }); }
-        
-        doc.close();
-        
-        (opt.operaSupport && $.browser.opera ? tab : $iframe[0].contentWindow).focus();
-        setTimeout( function() { (opt.operaSupport && $.browser.opera ? tab : $iframe[0].contentWindow).print(); if (tab) { tab.close(); } }, 1000);
-    }
-    
-    $.fn.jqprint.defaults = {
-		debug: false,
-		importCSS: true, 
-		printContainer: true,
-		operaSupport: true
-	};
-
-    // Thanks to 9__, found at http://users.livejournal.com/9__/380664.html
-    jQuery.fn.outer = function() {
-      return $($('<div></div>').html(this.clone())).html();
-    } 
-})(jQuery);
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/vendor/scripts/jquery.periodic.js b/branch-1.2/ambari-web/vendor/scripts/jquery.periodic.js
deleted file mode 100644
index fadd31b..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/jquery.periodic.js
+++ /dev/null
@@ -1,198 +0,0 @@
-/*!
- * jQuery periodic plugin
- *
- * Copyright 2010, Tom Anderson
- * Dual licensed under the MIT or GPL Version 2 licenses.
- *
- */
-
-jQuery.periodic = function (options, callback) {
-
-  // if the first argument is a function then assume the options aren't being passed
-  if (jQuery.isFunction(options)) {
-    callback = options;
-    options = {};
-  }
-
-  // Merge passed settings with default values
-  var settings = jQuery.extend({}, jQuery.periodic.defaults, {
-    ajax_complete : ajaxComplete,
-    increment     : increment,
-    reset         : reset,
-    cancel        : cancel
-  }, options);
-
-  // bookkeeping variables
-  settings.cur_period = settings.period;
-  settings.tid = false;
-  var prev_ajax_response = '';
-
-  run();
-
-  // return settings so user can tweak them externally
-  return settings;
-
-  // run (or restart if already running) the looping construct
-  function run() {
-    // clear/stop existing timer (multiple calls to run() won't result in multiple timers)
-    cancel();
-    // let it rip!
-    settings.tid = setTimeout(function() {
-      // set the context (this) for the callback to the settings object
-      callback.call(settings);
-
-      // compute the next value for cur_period
-      increment();
-
-      // queue up the next run
-      if(settings.tid)
-        run();
-    }, settings.cur_period);
-  }
-
-  // utility function for use with ajax calls
-  function ajaxComplete(xhr, status) {
-    if (status === 'success' && prev_ajax_response !== xhr.responseText) {
-      // reset the period whenever the response changes
-      prev_ajax_response = xhr.responseText;
-      reset();
-    }
-  }
-
-  // compute the next delay
-  function increment() {
-    settings.cur_period *= settings.decay;
-    if (settings.cur_period < settings.period) {
-      // don't let it drop below the minimum
-      reset();
-    } else if (settings.cur_period > settings.max_period) {
-      settings.cur_period = settings.max_period;
-      if (settings.on_max !== undefined) {
-        // call the user-supplied callback if we reach max_period
-        settings.on_max.call(settings);
-      }
-    }
-  }
-
-  function reset() {
-    settings.cur_period = settings.period;
-    // restart with the new timeout
-    run();
-  }
-
-  function cancel() {
-    clearTimeout(settings.tid);
-    settings.tid = null;
-  }
-
-  // other functions we might want to implement
-  function pause() {}
-  function resume() {}
-  function log() {}
-};
-
-jQuery.periodic.defaults = {
-    period       : 4000,      // 4 sec.
-    max_period   : 1800000,   // 30 min.
-    decay        : 1.5,       // time period multiplier
-    on_max       : undefined  // called if max_period is reached
-};
-/*!
- * jQuery periodic plugin
- *
- * Copyright 2010, Tom Anderson
- * Dual licensed under the MIT or GPL Version 2 licenses.
- *
- */
-
-jQuery.periodic = function (options, callback) {
-
-  // if the first argument is a function then assume the options aren't being passed
-  if (jQuery.isFunction(options)) {
-    callback = options;
-    options = {};
-  }
-
-  // Merge passed settings with default values
-  var settings = jQuery.extend({}, jQuery.periodic.defaults, {
-    ajax_complete : ajaxComplete,
-    increment     : increment,
-    reset         : reset,
-    cancel        : cancel
-  }, options);
-
-  // bookkeeping variables
-  settings.cur_period = settings.period;
-  settings.tid = false;
-  var prev_ajax_response = '';
-
-  run();
-
-  // return settings so user can tweak them externally
-  return settings;
-
-  // run (or restart if already running) the looping construct
-  function run() {
-    // clear/stop existing timer (multiple calls to run() won't result in multiple timers)
-    cancel();
-    // let it rip!
-    settings.tid = setTimeout(function() {
-      // set the context (this) for the callback to the settings object
-      callback.call(settings);
-
-      // compute the next value for cur_period
-      increment();
-      
-      // queue up the next run
-      if(settings.tid)
-        run();
-    }, settings.cur_period);
-  }
-
-  // utility function for use with ajax calls
-  function ajaxComplete(xhr, status) {
-    if (status === 'success' && prev_ajax_response !== xhr.responseText) {
-      // reset the period whenever the response changes
-      prev_ajax_response = xhr.responseText;
-      reset();
-    }
-  }
-
-  // compute the next delay
-  function increment() {
-    settings.cur_period *= settings.decay;
-    if (settings.cur_period < settings.period) {
-      // don't let it drop below the minimum
-      reset();
-    } else if (settings.cur_period > settings.max_period) {
-      settings.cur_period = settings.max_period;
-      if (settings.on_max !== undefined) {
-        // call the user-supplied callback if we reach max_period
-        settings.on_max.call(settings);
-      }
-    }
-  }
-
-  function reset() {
-    settings.cur_period = settings.period;
-    // restart with the new timeout
-    run();
-  }
-
-  function cancel() {
-    clearTimeout(settings.tid);
-    settings.tid = null;
-  }
-  
-  // other functions we might want to implement
-  function pause() {}
-  function resume() {}
-  function log() {}
-};
-
-jQuery.periodic.defaults = {
-    period       : 4000,      // 4 sec.
-    max_period   : 1800000,   // 30 min.
-    decay        : 1.5,       // time period multiplier
-    on_max       : undefined  // called if max_period is reached
-};
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/vendor/scripts/jquery.timeago.js b/branch-1.2/ambari-web/vendor/scripts/jquery.timeago.js
deleted file mode 100644
index 2e8d29f..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/jquery.timeago.js
+++ /dev/null
@@ -1,152 +0,0 @@
-/**
- * Timeago is a jQuery plugin that makes it easy to support automatically
- * updating fuzzy timestamps (e.g. "4 minutes ago" or "about 1 day ago").
- *
- * @name timeago
- * @version 0.11.4
- * @requires jQuery v1.2.3+
- * @author Ryan McGeary
- * @license MIT License - http://www.opensource.org/licenses/mit-license.php
- *
- * For usage and examples, visit:
- * http://timeago.yarp.com/
- *
- * Copyright (c) 2008-2012, Ryan McGeary (ryan -[at]- mcgeary [*dot*] org)
- */
-(function($) {
-  $.timeago = function(timestamp) {
-    if (timestamp instanceof Date) {
-      return inWords(timestamp);
-    } else if (typeof timestamp === "string") {
-      return inWords($.timeago.parse(timestamp));
-    } else if (typeof timestamp === "number") {
-      return inWords(new Date(timestamp));
-    } else {
-      return inWords($.timeago.datetime(timestamp));
-    }
-  };
-  var $t = $.timeago;
-
-  $.extend($.timeago, {
-    settings: {
-      refreshMillis: 60000,
-      allowFuture: false,
-      strings: {
-        prefixAgo: null,
-        prefixFromNow: null,
-        suffixAgo: "ago",
-        suffixFromNow: "from now",
-        seconds: "less than a minute",
-        minute: "about a minute",
-        minutes: "%d minutes",
-        hour: "about an hour",
-        hours: "about %d hours",
-        day: "a day",
-        days: "%d days",
-        month: "about a month",
-        months: "%d months",
-        year: "about a year",
-        years: "%d years",
-        wordSeparator: " ",
-        numbers: []
-      }
-    },
-    inWords: function(distanceMillis) {
-      var $l = this.settings.strings;
-      var prefix = $l.prefixAgo;
-      var suffix = $l.suffixAgo;
-      if (this.settings.allowFuture) {
-        if (distanceMillis < 0) {
-          prefix = $l.prefixFromNow;
-          suffix = $l.suffixFromNow;
-        }
-      }
-
-      var seconds = Math.abs(distanceMillis) / 1000;
-      var minutes = seconds / 60;
-      var hours = minutes / 60;
-      var days = hours / 24;
-      var years = days / 365;
-
-      function substitute(stringOrFunction, number) {
-        var string = $.isFunction(stringOrFunction) ? stringOrFunction(number, distanceMillis) : stringOrFunction;
-        var value = ($l.numbers && $l.numbers[number]) || number;
-        return string.replace(/%d/i, value);
-      }
-
-      var words = seconds < 45 && substitute($l.seconds, Math.round(seconds)) ||
-        seconds < 90 && substitute($l.minute, 1) ||
-        minutes < 45 && substitute($l.minutes, Math.round(minutes)) ||
-        minutes < 90 && substitute($l.hour, 1) ||
-        hours < 24 && substitute($l.hours, Math.round(hours)) ||
-        hours < 42 && substitute($l.day, 1) ||
-        days < 30 && substitute($l.days, Math.round(days)) ||
-        days < 45 && substitute($l.month, 1) ||
-        days < 365 && substitute($l.months, Math.round(days / 30)) ||
-        years < 1.5 && substitute($l.year, 1) ||
-        substitute($l.years, Math.round(years));
-
-      var separator = $l.wordSeparator === undefined ?  " " : $l.wordSeparator;
-      return $.trim([prefix, words, suffix].join(separator));
-    },
-    parse: function(iso8601) {
-      var s = $.trim(iso8601);
-      s = s.replace(/\.\d+/,""); // remove milliseconds
-      s = s.replace(/-/,"/").replace(/-/,"/");
-      s = s.replace(/T/," ").replace(/Z/," UTC");
-      s = s.replace(/([\+\-]\d\d)\:?(\d\d)/," $1$2"); // -04:00 -> -0400
-      return new Date(s);
-    },
-    datetime: function(elem) {
-      var iso8601 = $t.isTime(elem) ? $(elem).attr("datetime") : $(elem).attr("title");
-      return $t.parse(iso8601);
-    },
-    isTime: function(elem) {
-      // jQuery's `is()` doesn't play well with HTML5 in IE
-      return $(elem).get(0).tagName.toLowerCase() === "time"; // $(elem).is("time");
-    }
-  });
-
-  $.fn.timeago = function() {
-    var self = this;
-    self.each(refresh);
-
-    var $s = $t.settings;
-    if ($s.refreshMillis > 0) {
-      setInterval(function() { self.each(refresh); }, $s.refreshMillis);
-    }
-    return self;
-  };
-
-  function refresh() {
-    var data = prepareData(this);
-    if (!isNaN(data.datetime)) {
-      $(this).text(inWords(data.datetime));
-    }
-    return this;
-  }
-
-  function prepareData(element) {
-    element = $(element);
-    if (!element.data("timeago")) {
-      element.data("timeago", { datetime: $t.datetime(element) });
-      var text = $.trim(element.text());
-      if (text.length > 0 && !($t.isTime(element) && element.attr("title"))) {
-        element.attr("title", text);
-      }
-    }
-    return element.data("timeago");
-  }
-
-  function inWords(date) {
-    return $t.inWords(distance(date));
-  }
-
-  function distance(date) {
-    return (new Date().getTime() - date.getTime());
-  }
-
-  // fix for IE6 suckage
-  document.createElement("abbr");
-  document.createElement("time");
-}(jQuery));
diff --git a/branch-1.2/ambari-web/vendor/scripts/jquery.ui.core.js b/branch-1.2/ambari-web/vendor/scripts/jquery.ui.core.js
deleted file mode 100644
index 1285a6d..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/jquery.ui.core.js
+++ /dev/null
@@ -1,334 +0,0 @@
-/*!
- * jQuery UI 1.8.23
- *
- * Copyright 2012, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI
- */
-(function( $, undefined ) {
-
-// prevent duplicate loading
-// this is only a problem because we proxy existing functions
-// and we don't want to double proxy them
-$.ui = $.ui || {};
-if ( $.ui.version ) {
-	return;
-}
-
-$.extend( $.ui, {
-	version: "1.8.23",
-
-	keyCode: {
-		ALT: 18,
-		BACKSPACE: 8,
-		CAPS_LOCK: 20,
-		COMMA: 188,
-		COMMAND: 91,
-		COMMAND_LEFT: 91, // COMMAND
-		COMMAND_RIGHT: 93,
-		CONTROL: 17,
-		DELETE: 46,
-		DOWN: 40,
-		END: 35,
-		ENTER: 13,
-		ESCAPE: 27,
-		HOME: 36,
-		INSERT: 45,
-		LEFT: 37,
-		MENU: 93, // COMMAND_RIGHT
-		NUMPAD_ADD: 107,
-		NUMPAD_DECIMAL: 110,
-		NUMPAD_DIVIDE: 111,
-		NUMPAD_ENTER: 108,
-		NUMPAD_MULTIPLY: 106,
-		NUMPAD_SUBTRACT: 109,
-		PAGE_DOWN: 34,
-		PAGE_UP: 33,
-		PERIOD: 190,
-		RIGHT: 39,
-		SHIFT: 16,
-		SPACE: 32,
-		TAB: 9,
-		UP: 38,
-		WINDOWS: 91 // COMMAND
-	}
-});
-
-// plugins
-$.fn.extend({
-	propAttr: $.fn.prop || $.fn.attr,
-
-	_focus: $.fn.focus,
-	focus: function( delay, fn ) {
-		return typeof delay === "number" ?
-			this.each(function() {
-				var elem = this;
-				setTimeout(function() {
-					$( elem ).focus();
-					if ( fn ) {
-						fn.call( elem );
-					}
-				}, delay );
-			}) :
-			this._focus.apply( this, arguments );
-	},
-
-	scrollParent: function() {
-		var scrollParent;
-		if (($.browser.msie && (/(static|relative)/).test(this.css('position'))) || (/absolute/).test(this.css('position'))) {
-			scrollParent = this.parents().filter(function() {
-				return (/(relative|absolute|fixed)/).test($.curCSS(this,'position',1)) && (/(auto|scroll)/).test($.curCSS(this,'overflow',1)+$.curCSS(this,'overflow-y',1)+$.curCSS(this,'overflow-x',1));
-			}).eq(0);
-		} else {
-			scrollParent = this.parents().filter(function() {
-				return (/(auto|scroll)/).test($.curCSS(this,'overflow',1)+$.curCSS(this,'overflow-y',1)+$.curCSS(this,'overflow-x',1));
-			}).eq(0);
-		}
-
-		return (/fixed/).test(this.css('position')) || !scrollParent.length ? $(document) : scrollParent;
-	},
-
-	zIndex: function( zIndex ) {
-		if ( zIndex !== undefined ) {
-			return this.css( "zIndex", zIndex );
-		}
-
-		if ( this.length ) {
-			var elem = $( this[ 0 ] ), position, value;
-			while ( elem.length && elem[ 0 ] !== document ) {
-				// Ignore z-index if position is set to a value where z-index is ignored by the browser
-				// This makes behavior of this function consistent across browsers
-				// WebKit always returns auto if the element is positioned
-				position = elem.css( "position" );
-				if ( position === "absolute" || position === "relative" || position === "fixed" ) {
-					// IE returns 0 when zIndex is not specified
-					// other browsers return a string
-					// we ignore the case of nested elements with an explicit value of 0
-					// <div style="z-index: -10;"><div style="z-index: 0;"></div></div>
-					value = parseInt( elem.css( "zIndex" ), 10 );
-					if ( !isNaN( value ) && value !== 0 ) {
-						return value;
-					}
-				}
-				elem = elem.parent();
-			}
-		}
-
-		return 0;
-	},
-
-	disableSelection: function() {
-		return this.bind( ( $.support.selectstart ? "selectstart" : "mousedown" ) +
-			".ui-disableSelection", function( event ) {
-				event.preventDefault();
-			});
-	},
-
-	enableSelection: function() {
-		return this.unbind( ".ui-disableSelection" );
-	}
-});
-
-// support: jQuery <1.8
-if ( !$( "<a>" ).outerWidth( 1 ).jquery ) {
-	$.each( [ "Width", "Height" ], function( i, name ) {
-		var side = name === "Width" ? [ "Left", "Right" ] : [ "Top", "Bottom" ],
-			type = name.toLowerCase(),
-			orig = {
-				innerWidth: $.fn.innerWidth,
-				innerHeight: $.fn.innerHeight,
-				outerWidth: $.fn.outerWidth,
-				outerHeight: $.fn.outerHeight
-			};
-
-		function reduce( elem, size, border, margin ) {
-			$.each( side, function() {
-				size -= parseFloat( $.curCSS( elem, "padding" + this, true) ) || 0;
-				if ( border ) {
-					size -= parseFloat( $.curCSS( elem, "border" + this + "Width", true) ) || 0;
-				}
-				if ( margin ) {
-					size -= parseFloat( $.curCSS( elem, "margin" + this, true) ) || 0;
-				}
-			});
-			return size;
-		}
-
-		$.fn[ "inner" + name ] = function( size ) {
-			if ( size === undefined ) {
-				return orig[ "inner" + name ].call( this );
-			}
-
-			return this.each(function() {
-				$( this ).css( type, reduce( this, size ) + "px" );
-			});
-		};
-
-		$.fn[ "outer" + name] = function( size, margin ) {
-			if ( typeof size !== "number" ) {
-				return orig[ "outer" + name ].call( this, size );
-			}
-
-			return this.each(function() {
-				$( this).css( type, reduce( this, size, true, margin ) + "px" );
-			});
-		};
-	});
-}
-
-// selectors
-function focusable( element, isTabIndexNotNaN ) {
-	var nodeName = element.nodeName.toLowerCase();
-	if ( "area" === nodeName ) {
-		var map = element.parentNode,
-			mapName = map.name,
-			img;
-		if ( !element.href || !mapName || map.nodeName.toLowerCase() !== "map" ) {
-			return false;
-		}
-		img = $( "img[usemap=#" + mapName + "]" )[0];
-		return !!img && visible( img );
-	}
-	return ( /input|select|textarea|button|object/.test( nodeName )
-		? !element.disabled
-		: "a" == nodeName
-			? element.href || isTabIndexNotNaN
-			: isTabIndexNotNaN)
-		// the element and all of its ancestors must be visible
-		&& visible( element );
-}
-
-function visible( element ) {
-	return !$( element ).parents().andSelf().filter(function() {
-		return $.curCSS( this, "visibility" ) === "hidden" ||
-			$.expr.filters.hidden( this );
-	}).length;
-}
-
-$.extend( $.expr[ ":" ], {
-	data: $.expr.createPseudo ?
-		$.expr.createPseudo(function( dataName ) {
-			return function( elem ) {
-				return !!$.data( elem, dataName );
-			};
-		}) :
-		// support: jQuery <1.8
-		function( elem, i, match ) {
-			return !!$.data( elem, match[ 3 ] );
-		},
-
-	focusable: function( element ) {
-		return focusable( element, !isNaN( $.attr( element, "tabindex" ) ) );
-	},
-
-	tabbable: function( element ) {
-		var tabIndex = $.attr( element, "tabindex" ),
-			isTabIndexNaN = isNaN( tabIndex );
-		return ( isTabIndexNaN || tabIndex >= 0 ) && focusable( element, !isTabIndexNaN );
-	}
-});
-
-// support
-$(function() {
-	var body = document.body,
-		div = body.appendChild( div = document.createElement( "div" ) );
-
-	// access offsetHeight before setting the style to prevent a layout bug
-	// in IE 9 which causes the elemnt to continue to take up space even
-	// after it is removed from the DOM (#8026)
-	div.offsetHeight;
-
-	$.extend( div.style, {
-		minHeight: "100px",
-		height: "auto",
-		padding: 0,
-		borderWidth: 0
-	});
-
-	$.support.minHeight = div.offsetHeight === 100;
-	$.support.selectstart = "onselectstart" in div;
-
-	// set display to none to avoid a layout bug in IE
-	// http://dev.jquery.com/ticket/4014
-	body.removeChild( div ).style.display = "none";
-});
-
-// jQuery <1.4.3 uses curCSS, in 1.4.3 - 1.7.2 curCSS = css, 1.8+ only has css
-if ( !$.curCSS ) {
-	$.curCSS = $.css;
-}
-
-
-
-
-
-// deprecated
-$.extend( $.ui, {
-	// $.ui.plugin is deprecated.  Use the proxy pattern instead.
-	plugin: {
-		add: function( module, option, set ) {
-			var proto = $.ui[ module ].prototype;
-			for ( var i in set ) {
-				proto.plugins[ i ] = proto.plugins[ i ] || [];
-				proto.plugins[ i ].push( [ option, set[ i ] ] );
-			}
-		},
-		call: function( instance, name, args ) {
-			var set = instance.plugins[ name ];
-			if ( !set || !instance.element[ 0 ].parentNode ) {
-				return;
-			}
-	
-			for ( var i = 0; i < set.length; i++ ) {
-				if ( instance.options[ set[ i ][ 0 ] ] ) {
-					set[ i ][ 1 ].apply( instance.element, args );
-				}
-			}
-		}
-	},
-	
-	// will be deprecated when we switch to jQuery 1.4 - use jQuery.contains()
-	contains: function( a, b ) {
-		return document.compareDocumentPosition ?
-			a.compareDocumentPosition( b ) & 16 :
-			a !== b && a.contains( b );
-	},
-	
-	// only used by resizable
-	hasScroll: function( el, a ) {
-	
-		//If overflow is hidden, the element might have extra content, but the user wants to hide it
-		if ( $( el ).css( "overflow" ) === "hidden") {
-			return false;
-		}
-	
-		var scroll = ( a && a === "left" ) ? "scrollLeft" : "scrollTop",
-			has = false;
-	
-		if ( el[ scroll ] > 0 ) {
-			return true;
-		}
-	
-		// TODO: determine which cases actually cause this to happen
-		// if the element doesn't have the scroll set, see if it's possible to
-		// set the scroll
-		el[ scroll ] = 1;
-		has = ( el[ scroll ] > 0 );
-		el[ scroll ] = 0;
-		return has;
-	},
-	
-	// these are odd functions, fix the API or move into individual plugins
-	isOverAxis: function( x, reference, size ) {
-		//Determines when x coordinate is over "b" element axis
-		return ( x > reference ) && ( x < ( reference + size ) );
-	},
-	isOver: function( y, x, top, left, height, width ) {
-		//Determines when x, y coordinates is over "b" element
-		return $.ui.isOverAxis( y, top, height ) && $.ui.isOverAxis( x, left, width );
-	}
-});
-
-})( jQuery );
diff --git a/branch-1.2/ambari-web/vendor/scripts/jquery.ui.custom-effects.js b/branch-1.2/ambari-web/vendor/scripts/jquery.ui.custom-effects.js
deleted file mode 100644
index 191d306..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/jquery.ui.custom-effects.js
+++ /dev/null
@@ -1,2224 +0,0 @@
-/*! jQuery UI - v1.9.0 - 2012-10-22
-* http://jqueryui.com
-* Includes: jquery.ui.effect.js, jquery.ui.effect-blind.js, jquery.ui.effect-bounce.js, jquery.ui.effect-clip.js, jquery.ui.effect-drop.js, jquery.ui.effect-explode.js, jquery.ui.effect-fade.js, jquery.ui.effect-fold.js, jquery.ui.effect-highlight.js, jquery.ui.effect-pulsate.js, jquery.ui.effect-scale.js, jquery.ui.effect-shake.js, jquery.ui.effect-slide.js, jquery.ui.effect-transfer.js
-* Copyright (c) 2012 jQuery Foundation and other contributors Licensed MIT */
-
-;(jQuery.effects || (function($, undefined) {
-
-var backCompat = $.uiBackCompat !== false,
-	// prefix used for storing data on .data()
-	dataSpace = "ui-effects-";
-
-$.effects = {
-	effect: {}
-};
-
-/*!
- * jQuery Color Animations v2.0.0
- * http://jquery.com/
- *
- * Copyright 2012 jQuery Foundation and other contributors
- * Released under the MIT license.
- * http://jquery.org/license
- *
- * Date: Mon Aug 13 13:41:02 2012 -0500
- */
-(function( jQuery, undefined ) {
-
-	var stepHooks = "backgroundColor borderBottomColor borderLeftColor borderRightColor borderTopColor color columnRuleColor outlineColor textDecorationColor textEmphasisColor".split(" "),
-
-	// plusequals test for += 100 -= 100
-	rplusequals = /^([\-+])=\s*(\d+\.?\d*)/,
-	// a set of RE's that can match strings and generate color tuples.
-	stringParsers = [{
-			re: /rgba?\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*(?:,\s*(\d+(?:\.\d+)?)\s*)?\)/,
-			parse: function( execResult ) {
-				return [
-					execResult[ 1 ],
-					execResult[ 2 ],
-					execResult[ 3 ],
-					execResult[ 4 ]
-				];
-			}
-		}, {
-			re: /rgba?\(\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d+(?:\.\d+)?)\s*)?\)/,
-			parse: function( execResult ) {
-				return [
-					execResult[ 1 ] * 2.55,
-					execResult[ 2 ] * 2.55,
-					execResult[ 3 ] * 2.55,
-					execResult[ 4 ]
-				];
-			}
-		}, {
-			// this regex ignores A-F because it's compared against an already lowercased string
-			re: /#([a-f0-9]{2})([a-f0-9]{2})([a-f0-9]{2})/,
-			parse: function( execResult ) {
-				return [
-					parseInt( execResult[ 1 ], 16 ),
-					parseInt( execResult[ 2 ], 16 ),
-					parseInt( execResult[ 3 ], 16 )
-				];
-			}
-		}, {
-			// this regex ignores A-F because it's compared against an already lowercased string
-			re: /#([a-f0-9])([a-f0-9])([a-f0-9])/,
-			parse: function( execResult ) {
-				return [
-					parseInt( execResult[ 1 ] + execResult[ 1 ], 16 ),
-					parseInt( execResult[ 2 ] + execResult[ 2 ], 16 ),
-					parseInt( execResult[ 3 ] + execResult[ 3 ], 16 )
-				];
-			}
-		}, {
-			re: /hsla?\(\s*(\d+(?:\.\d+)?)\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d+(?:\.\d+)?)\s*)?\)/,
-			space: "hsla",
-			parse: function( execResult ) {
-				return [
-					execResult[ 1 ],
-					execResult[ 2 ] / 100,
-					execResult[ 3 ] / 100,
-					execResult[ 4 ]
-				];
-			}
-		}],
-
-	// jQuery.Color( )
-	color = jQuery.Color = function( color, green, blue, alpha ) {
-		return new jQuery.Color.fn.parse( color, green, blue, alpha );
-	},
-	spaces = {
-		rgba: {
-			props: {
-				red: {
-					idx: 0,
-					type: "byte"
-				},
-				green: {
-					idx: 1,
-					type: "byte"
-				},
-				blue: {
-					idx: 2,
-					type: "byte"
-				}
-			}
-		},
-
-		hsla: {
-			props: {
-				hue: {
-					idx: 0,
-					type: "degrees"
-				},
-				saturation: {
-					idx: 1,
-					type: "percent"
-				},
-				lightness: {
-					idx: 2,
-					type: "percent"
-				}
-			}
-		}
-	},
-	propTypes = {
-		"byte": {
-			floor: true,
-			max: 255
-		},
-		"percent": {
-			max: 1
-		},
-		"degrees": {
-			mod: 360,
-			floor: true
-		}
-	},
-	support = color.support = {},
-
-	// element for support tests
-	supportElem = jQuery( "<p>" )[ 0 ],
-
-	// colors = jQuery.Color.names
-	colors,
-
-	// local aliases of functions called often
-	each = jQuery.each;
-
-// determine rgba support immediately
-supportElem.style.cssText = "background-color:rgba(1,1,1,.5)";
-support.rgba = supportElem.style.backgroundColor.indexOf( "rgba" ) > -1;
-
-// define cache name and alpha properties
-// for rgba and hsla spaces
-each( spaces, function( spaceName, space ) {
-	space.cache = "_" + spaceName;
-	space.props.alpha = {
-		idx: 3,
-		type: "percent",
-		def: 1
-	};
-});
-
-function clamp( value, prop, allowEmpty ) {
-	var type = propTypes[ prop.type ] || {};
-
-	if ( value == null ) {
-		return (allowEmpty || !prop.def) ? null : prop.def;
-	}
-
-	// ~~ is an short way of doing floor for positive numbers
-	value = type.floor ? ~~value : parseFloat( value );
-
-	// IE will pass in empty strings as value for alpha,
-	// which will hit this case
-	if ( isNaN( value ) ) {
-		return prop.def;
-	}
-
-	if ( type.mod ) {
-		// we add mod before modding to make sure that negatives values
-		// get converted properly: -10 -> 350
-		return (value + type.mod) % type.mod;
-	}
-
-	// for now all property types without mod have min and max
-	return 0 > value ? 0 : type.max < value ? type.max : value;
-}
-
-function stringParse( string ) {
-	var inst = color(),
-		rgba = inst._rgba = [];
-
-	string = string.toLowerCase();
-
-	each( stringParsers, function( i, parser ) {
-		var parsed,
-			match = parser.re.exec( string ),
-			values = match && parser.parse( match ),
-			spaceName = parser.space || "rgba";
-
-		if ( values ) {
-			parsed = inst[ spaceName ]( values );
-
-			// if this was an rgba parse the assignment might happen twice
-			// oh well....
-			inst[ spaces[ spaceName ].cache ] = parsed[ spaces[ spaceName ].cache ];
-			rgba = inst._rgba = parsed._rgba;
-
-			// exit each( stringParsers ) here because we matched
-			return false;
-		}
-	});
-
-	// Found a stringParser that handled it
-	if ( rgba.length ) {
-
-		// if this came from a parsed string, force "transparent" when alpha is 0
-		// chrome, (and maybe others) return "transparent" as rgba(0,0,0,0)
-		if ( rgba.join() === "0,0,0,0" ) {
-			jQuery.extend( rgba, colors.transparent );
-		}
-		return inst;
-	}
-
-	// named colors
-	return colors[ string ];
-}
-
-color.fn = jQuery.extend( color.prototype, {
-	parse: function( red, green, blue, alpha ) {
-		if ( red === undefined ) {
-			this._rgba = [ null, null, null, null ];
-			return this;
-		}
-		if ( red.jquery || red.nodeType ) {
-			red = jQuery( red ).css( green );
-			green = undefined;
-		}
-
-		var inst = this,
-			type = jQuery.type( red ),
-			rgba = this._rgba = [],
-			source;
-
-		// more than 1 argument specified - assume ( red, green, blue, alpha )
-		if ( green !== undefined ) {
-			red = [ red, green, blue, alpha ];
-			type = "array";
-		}
-
-		if ( type === "string" ) {
-			return this.parse( stringParse( red ) || colors._default );
-		}
-
-		if ( type === "array" ) {
-			each( spaces.rgba.props, function( key, prop ) {
-				rgba[ prop.idx ] = clamp( red[ prop.idx ], prop );
-			});
-			return this;
-		}
-
-		if ( type === "object" ) {
-			if ( red instanceof color ) {
-				each( spaces, function( spaceName, space ) {
-					if ( red[ space.cache ] ) {
-						inst[ space.cache ] = red[ space.cache ].slice();
-					}
-				});
-			} else {
-				each( spaces, function( spaceName, space ) {
-					var cache = space.cache;
-					each( space.props, function( key, prop ) {
-
-						// if the cache doesn't exist, and we know how to convert
-						if ( !inst[ cache ] && space.to ) {
-
-							// if the value was null, we don't need to copy it
-							// if the key was alpha, we don't need to copy it either
-							if ( key === "alpha" || red[ key ] == null ) {
-								return;
-							}
-							inst[ cache ] = space.to( inst._rgba );
-						}
-
-						// this is the only case where we allow nulls for ALL properties.
-						// call clamp with alwaysAllowEmpty
-						inst[ cache ][ prop.idx ] = clamp( red[ key ], prop, true );
-					});
-
-					// everything defined but alpha?
-					if ( inst[ cache ] && $.inArray( null, inst[ cache ].slice( 0, 3 ) ) < 0 ) {
-						// use the default of 1
-						inst[ cache ][ 3 ] = 1;
-						if ( space.from ) {
-							inst._rgba = space.from( inst[ cache ] );
-						}
-					}
-				});
-			}
-			return this;
-		}
-	},
-	is: function( compare ) {
-		var is = color( compare ),
-			same = true,
-			inst = this;
-
-		each( spaces, function( _, space ) {
-			var localCache,
-				isCache = is[ space.cache ];
-			if (isCache) {
-				localCache = inst[ space.cache ] || space.to && space.to( inst._rgba ) || [];
-				each( space.props, function( _, prop ) {
-					if ( isCache[ prop.idx ] != null ) {
-						same = ( isCache[ prop.idx ] === localCache[ prop.idx ] );
-						return same;
-					}
-				});
-			}
-			return same;
-		});
-		return same;
-	},
-	_space: function() {
-		var used = [],
-			inst = this;
-		each( spaces, function( spaceName, space ) {
-			if ( inst[ space.cache ] ) {
-				used.push( spaceName );
-			}
-		});
-		return used.pop();
-	},
-	transition: function( other, distance ) {
-		var end = color( other ),
-			spaceName = end._space(),
-			space = spaces[ spaceName ],
-			startColor = this.alpha() === 0 ? color( "transparent" ) : this,
-			start = startColor[ space.cache ] || space.to( startColor._rgba ),
-			result = start.slice();
-
-		end = end[ space.cache ];
-		each( space.props, function( key, prop ) {
-			var index = prop.idx,
-				startValue = start[ index ],
-				endValue = end[ index ],
-				type = propTypes[ prop.type ] || {};
-
-			// if null, don't override start value
-			if ( endValue === null ) {
-				return;
-			}
-			// if null - use end
-			if ( startValue === null ) {
-				result[ index ] = endValue;
-			} else {
-				if ( type.mod ) {
-					if ( endValue - startValue > type.mod / 2 ) {
-						startValue += type.mod;
-					} else if ( startValue - endValue > type.mod / 2 ) {
-						startValue -= type.mod;
-					}
-				}
-				result[ index ] = clamp( ( endValue - startValue ) * distance + startValue, prop );
-			}
-		});
-		return this[ spaceName ]( result );
-	},
-	blend: function( opaque ) {
-		// if we are already opaque - return ourself
-		if ( this._rgba[ 3 ] === 1 ) {
-			return this;
-		}
-
-		var rgb = this._rgba.slice(),
-			a = rgb.pop(),
-			blend = color( opaque )._rgba;
-
-		return color( jQuery.map( rgb, function( v, i ) {
-			return ( 1 - a ) * blend[ i ] + a * v;
-		}));
-	},
-	toRgbaString: function() {
-		var prefix = "rgba(",
-			rgba = jQuery.map( this._rgba, function( v, i ) {
-				return v == null ? ( i > 2 ? 1 : 0 ) : v;
-			});
-
-		if ( rgba[ 3 ] === 1 ) {
-			rgba.pop();
-			prefix = "rgb(";
-		}
-
-		return prefix + rgba.join() + ")";
-	},
-	toHslaString: function() {
-		var prefix = "hsla(",
-			hsla = jQuery.map( this.hsla(), function( v, i ) {
-				if ( v == null ) {
-					v = i > 2 ? 1 : 0;
-				}
-
-				// catch 1 and 2
-				if ( i && i < 3 ) {
-					v = Math.round( v * 100 ) + "%";
-				}
-				return v;
-			});
-
-		if ( hsla[ 3 ] === 1 ) {
-			hsla.pop();
-			prefix = "hsl(";
-		}
-		return prefix + hsla.join() + ")";
-	},
-	toHexString: function( includeAlpha ) {
-		var rgba = this._rgba.slice(),
-			alpha = rgba.pop();
-
-		if ( includeAlpha ) {
-			rgba.push( ~~( alpha * 255 ) );
-		}
-
-		return "#" + jQuery.map( rgba, function( v, i ) {
-
-			// default to 0 when nulls exist
-			v = ( v || 0 ).toString( 16 );
-			return v.length === 1 ? "0" + v : v;
-		}).join("");
-	},
-	toString: function() {
-		return this._rgba[ 3 ] === 0 ? "transparent" : this.toRgbaString();
-	}
-});
-color.fn.parse.prototype = color.fn;
-
-// hsla conversions adapted from:
-// https://code.google.com/p/maashaack/source/browse/packages/graphics/trunk/src/graphics/colors/HUE2RGB.as?r=5021
-
-function hue2rgb( p, q, h ) {
-	h = ( h + 1 ) % 1;
-	if ( h * 6 < 1 ) {
-		return p + (q - p) * h * 6;
-	}
-	if ( h * 2 < 1) {
-		return q;
-	}
-	if ( h * 3 < 2 ) {
-		return p + (q - p) * ((2/3) - h) * 6;
-	}
-	return p;
-}
-
-spaces.hsla.to = function ( rgba ) {
-	if ( rgba[ 0 ] == null || rgba[ 1 ] == null || rgba[ 2 ] == null ) {
-		return [ null, null, null, rgba[ 3 ] ];
-	}
-	var r = rgba[ 0 ] / 255,
-		g = rgba[ 1 ] / 255,
-		b = rgba[ 2 ] / 255,
-		a = rgba[ 3 ],
-		max = Math.max( r, g, b ),
-		min = Math.min( r, g, b ),
-		diff = max - min,
-		add = max + min,
-		l = add * 0.5,
-		h, s;
-
-	if ( min === max ) {
-		h = 0;
-	} else if ( r === max ) {
-		h = ( 60 * ( g - b ) / diff ) + 360;
-	} else if ( g === max ) {
-		h = ( 60 * ( b - r ) / diff ) + 120;
-	} else {
-		h = ( 60 * ( r - g ) / diff ) + 240;
-	}
-
-	if ( l === 0 || l === 1 ) {
-		s = l;
-	} else if ( l <= 0.5 ) {
-		s = diff / add;
-	} else {
-		s = diff / ( 2 - add );
-	}
-	return [ Math.round(h) % 360, s, l, a == null ? 1 : a ];
-};
-
-spaces.hsla.from = function ( hsla ) {
-	if ( hsla[ 0 ] == null || hsla[ 1 ] == null || hsla[ 2 ] == null ) {
-		return [ null, null, null, hsla[ 3 ] ];
-	}
-	var h = hsla[ 0 ] / 360,
-		s = hsla[ 1 ],
-		l = hsla[ 2 ],
-		a = hsla[ 3 ],
-		q = l <= 0.5 ? l * ( 1 + s ) : l + s - l * s,
-		p = 2 * l - q,
-		r, g, b;
-
-	return [
-		Math.round( hue2rgb( p, q, h + ( 1 / 3 ) ) * 255 ),
-		Math.round( hue2rgb( p, q, h ) * 255 ),
-		Math.round( hue2rgb( p, q, h - ( 1 / 3 ) ) * 255 ),
-		a
-	];
-};
-
-
-each( spaces, function( spaceName, space ) {
-	var props = space.props,
-		cache = space.cache,
-		to = space.to,
-		from = space.from;
-
-	// makes rgba() and hsla()
-	color.fn[ spaceName ] = function( value ) {
-
-		// generate a cache for this space if it doesn't exist
-		if ( to && !this[ cache ] ) {
-			this[ cache ] = to( this._rgba );
-		}
-		if ( value === undefined ) {
-			return this[ cache ].slice();
-		}
-
-		var ret,
-			type = jQuery.type( value ),
-			arr = ( type === "array" || type === "object" ) ? value : arguments,
-			local = this[ cache ].slice();
-
-		each( props, function( key, prop ) {
-			var val = arr[ type === "object" ? key : prop.idx ];
-			if ( val == null ) {
-				val = local[ prop.idx ];
-			}
-			local[ prop.idx ] = clamp( val, prop );
-		});
-
-		if ( from ) {
-			ret = color( from( local ) );
-			ret[ cache ] = local;
-			return ret;
-		} else {
-			return color( local );
-		}
-	};
-
-	// makes red() green() blue() alpha() hue() saturation() lightness()
-	each( props, function( key, prop ) {
-		// alpha is included in more than one space
-		if ( color.fn[ key ] ) {
-			return;
-		}
-		color.fn[ key ] = function( value ) {
-			var vtype = jQuery.type( value ),
-				fn = ( key === "alpha" ? ( this._hsla ? "hsla" : "rgba" ) : spaceName ),
-				local = this[ fn ](),
-				cur = local[ prop.idx ],
-				match;
-
-			if ( vtype === "undefined" ) {
-				return cur;
-			}
-
-			if ( vtype === "function" ) {
-				value = value.call( this, cur );
-				vtype = jQuery.type( value );
-			}
-			if ( value == null && prop.empty ) {
-				return this;
-			}
-			if ( vtype === "string" ) {
-				match = rplusequals.exec( value );
-				if ( match ) {
-					value = cur + parseFloat( match[ 2 ] ) * ( match[ 1 ] === "+" ? 1 : -1 );
-				}
-			}
-			local[ prop.idx ] = value;
-			return this[ fn ]( local );
-		};
-	});
-});
-
-// add .fx.step functions
-each( stepHooks, function( i, hook ) {
-	jQuery.cssHooks[ hook ] = {
-		set: function( elem, value ) {
-			var parsed, curElem,
-				backgroundColor = "";
-
-			if ( jQuery.type( value ) !== "string" || ( parsed = stringParse( value ) ) ) {
-				value = color( parsed || value );
-				if ( !support.rgba && value._rgba[ 3 ] !== 1 ) {
-					curElem = hook === "backgroundColor" ? elem.parentNode : elem;
-					while (
-						(backgroundColor === "" || backgroundColor === "transparent") &&
-						curElem && curElem.style
-					) {
-						try {
-							backgroundColor = jQuery.css( curElem, "backgroundColor" );
-							curElem = curElem.parentNode;
-						} catch ( e ) {
-						}
-					}
-
-					value = value.blend( backgroundColor && backgroundColor !== "transparent" ?
-						backgroundColor :
-						"_default" );
-				}
-
-				value = value.toRgbaString();
-			}
-			try {
-				elem.style[ hook ] = value;
-			} catch( value ) {
-				// wrapped to prevent IE from throwing errors on "invalid" values like 'auto' or 'inherit'
-			}
-		}
-	};
-	jQuery.fx.step[ hook ] = function( fx ) {
-		if ( !fx.colorInit ) {
-			fx.start = color( fx.elem, hook );
-			fx.end = color( fx.end );
-			fx.colorInit = true;
-		}
-		jQuery.cssHooks[ hook ].set( fx.elem, fx.start.transition( fx.end, fx.pos ) );
-	};
-});
-
-jQuery.cssHooks.borderColor = {
-	expand: function( value ) {
-		var expanded = {};
-
-		each( [ "Top", "Right", "Bottom", "Left" ], function( i, part ) {
-			expanded[ "border" + part + "Color" ] = value;
-		});
-		return expanded;
-	}
-};
-
-// Basic color names only.
-// Usage of any of the other color names requires adding yourself or including
-// jquery.color.svg-names.js.
-colors = jQuery.Color.names = {
-	// 4.1. Basic color keywords
-	aqua: "#00ffff",
-	black: "#000000",
-	blue: "#0000ff",
-	fuchsia: "#ff00ff",
-	gray: "#808080",
-	green: "#008000",
-	lime: "#00ff00",
-	maroon: "#800000",
-	navy: "#000080",
-	olive: "#808000",
-	purple: "#800080",
-	red: "#ff0000",
-	silver: "#c0c0c0",
-	teal: "#008080",
-	white: "#ffffff",
-	yellow: "#ffff00",
-
-	// 4.2.3. "transparent" color keyword
-	transparent: [ null, null, null, 0 ],
-
-	_default: "#ffffff"
-};
-
-})( jQuery );
-
-
-
-/******************************************************************************/
-/****************************** CLASS ANIMATIONS ******************************/
-/******************************************************************************/
-(function() {
-
-var classAnimationActions = [ "add", "remove", "toggle" ],
-	shorthandStyles = {
-		border: 1,
-		borderBottom: 1,
-		borderColor: 1,
-		borderLeft: 1,
-		borderRight: 1,
-		borderTop: 1,
-		borderWidth: 1,
-		margin: 1,
-		padding: 1
-	};
-
-$.each([ "borderLeftStyle", "borderRightStyle", "borderBottomStyle", "borderTopStyle" ], function( _, prop ) {
-	$.fx.step[ prop ] = function( fx ) {
-		if ( fx.end !== "none" && !fx.setAttr || fx.pos === 1 && !fx.setAttr ) {
-			jQuery.style( fx.elem, prop, fx.end );
-			fx.setAttr = true;
-		}
-	};
-});
-
-function getElementStyles() {
-	var style = this.ownerDocument.defaultView ?
-			this.ownerDocument.defaultView.getComputedStyle( this, null ) :
-			this.currentStyle,
-		newStyle = {},
-		key,
-		camelCase,
-		len;
-
-	// webkit enumerates style porperties
-	if ( style && style.length && style[ 0 ] && style[ style[ 0 ] ] ) {
-		len = style.length;
-		while ( len-- ) {
-			key = style[ len ];
-			if ( typeof style[ key ] === "string" ) {
-				newStyle[ $.camelCase( key ) ] = style[ key ];
-			}
-		}
-	} else {
-		for ( key in style ) {
-			if ( typeof style[ key ] === "string" ) {
-				newStyle[ key ] = style[ key ];
-			}
-		}
-	}
-
-	return newStyle;
-}
-
-
-function styleDifference( oldStyle, newStyle ) {
-	var diff = {},
-		name, value;
-
-	for ( name in newStyle ) {
-		value = newStyle[ name ];
-		if ( oldStyle[ name ] !== value ) {
-			if ( !shorthandStyles[ name ] ) {
-				if ( $.fx.step[ name ] || !isNaN( parseFloat( value ) ) ) {
-					diff[ name ] = value;
-				}
-			}
-		}
-	}
-
-	return diff;
-}
-
-$.effects.animateClass = function( value, duration, easing, callback ) {
-	var o = $.speed( duration, easing, callback );
-
-	return this.queue( function() {
-		var animated = $( this ),
-			baseClass = animated.attr( "class" ) || "",
-			applyClassChange,
-			allAnimations = o.children ? animated.find( "*" ).andSelf() : animated;
-
-		// map the animated objects to store the original styles.
-		allAnimations = allAnimations.map(function() {
-			var el = $( this );
-			return {
-				el: el,
-				start: getElementStyles.call( this )
-			};
-		});
-
-		// apply class change
-		applyClassChange = function() {
-			$.each( classAnimationActions, function(i, action) {
-				if ( value[ action ] ) {
-					animated[ action + "Class" ]( value[ action ] );
-				}
-			});
-		};
-		applyClassChange();
-
-		// map all animated objects again - calculate new styles and diff
-		allAnimations = allAnimations.map(function() {
-			this.end = getElementStyles.call( this.el[ 0 ] );
-			this.diff = styleDifference( this.start, this.end );
-			return this;
-		});
-
-		// apply original class
-		animated.attr( "class", baseClass );
-
-		// map all animated objects again - this time collecting a promise
-		allAnimations = allAnimations.map(function() {
-			var styleInfo = this,
-				dfd = $.Deferred(),
-				opts = jQuery.extend({}, o, {
-					queue: false,
-					complete: function() {
-						dfd.resolve( styleInfo );
-					}
-				});
-
-			this.el.animate( this.diff, opts );
-			return dfd.promise();
-		});
-
-		// once all animations have completed:
-		$.when.apply( $, allAnimations.get() ).done(function() {
-
-			// set the final class
-			applyClassChange();
-
-			// for each animated element,
-			// clear all css properties that were animated
-			$.each( arguments, function() {
-				var el = this.el;
-				$.each( this.diff, function(key) {
-					el.css( key, '' );
-				});
-			});
-
-			// this is guarnteed to be there if you use jQuery.speed()
-			// it also handles dequeuing the next anim...
-			o.complete.call( animated[ 0 ] );
-		});
-	});
-};
-
-$.fn.extend({
-	_addClass: $.fn.addClass,
-	addClass: function( classNames, speed, easing, callback ) {
-		return speed ?
-			$.effects.animateClass.call( this,
-				{ add: classNames }, speed, easing, callback ) :
-			this._addClass( classNames );
-	},
-
-	_removeClass: $.fn.removeClass,
-	removeClass: function( classNames, speed, easing, callback ) {
-		return speed ?
-			$.effects.animateClass.call( this,
-				{ remove: classNames }, speed, easing, callback ) :
-			this._removeClass( classNames );
-	},
-
-	_toggleClass: $.fn.toggleClass,
-	toggleClass: function( classNames, force, speed, easing, callback ) {
-		if ( typeof force === "boolean" || force === undefined ) {
-			if ( !speed ) {
-				// without speed parameter
-				return this._toggleClass( classNames, force );
-			} else {
-				return $.effects.animateClass.call( this,
-					(force ? { add: classNames } : { remove: classNames }),
-					speed, easing, callback );
-			}
-		} else {
-			// without force parameter
-			return $.effects.animateClass.call( this,
-				{ toggle: classNames }, force, speed, easing );
-		}
-	},
-
-	switchClass: function( remove, add, speed, easing, callback) {
-		return $.effects.animateClass.call( this, {
-			add: add,
-			remove: remove
-		}, speed, easing, callback );
-	}
-});
-
-})();
-
-/******************************************************************************/
-/*********************************** EFFECTS **********************************/
-/******************************************************************************/
-
-(function() {
-
-$.extend( $.effects, {
-	version: "1.9.0",
-
-	// Saves a set of properties in a data storage
-	save: function( element, set ) {
-		for( var i=0; i < set.length; i++ ) {
-			if ( set[ i ] !== null ) {
-				element.data( dataSpace + set[ i ], element[ 0 ].style[ set[ i ] ] );
-			}
-		}
-	},
-
-	// Restores a set of previously saved properties from a data storage
-	restore: function( element, set ) {
-		var val, i;
-		for( i=0; i < set.length; i++ ) {
-			if ( set[ i ] !== null ) {
-				val = element.data( dataSpace + set[ i ] );
-				// support: jQuery 1.6.2
-				// http://bugs.jquery.com/ticket/9917
-				// jQuery 1.6.2 incorrectly returns undefined for any falsy value.
-				// We can't differentiate between "" and 0 here, so we just assume
-				// empty string since it's likely to be a more common value...
-				if ( val === undefined ) {
-					val = "";
-				}
-				element.css( set[ i ], val );
-			}
-		}
-	},
-
-	setMode: function( el, mode ) {
-		if (mode === "toggle") {
-			mode = el.is( ":hidden" ) ? "show" : "hide";
-		}
-		return mode;
-	},
-
-	// Translates a [top,left] array into a baseline value
-	// this should be a little more flexible in the future to handle a string & hash
-	getBaseline: function( origin, original ) {
-		var y, x;
-		switch ( origin[ 0 ] ) {
-			case "top": y = 0; break;
-			case "middle": y = 0.5; break;
-			case "bottom": y = 1; break;
-			default: y = origin[ 0 ] / original.height;
-		}
-		switch ( origin[ 1 ] ) {
-			case "left": x = 0; break;
-			case "center": x = 0.5; break;
-			case "right": x = 1; break;
-			default: x = origin[ 1 ] / original.width;
-		}
-		return {
-			x: x,
-			y: y
-		};
-	},
-
-	// Wraps the element around a wrapper that copies position properties
-	createWrapper: function( element ) {
-
-		// if the element is already wrapped, return it
-		if ( element.parent().is( ".ui-effects-wrapper" )) {
-			return element.parent();
-		}
-
-		// wrap the element
-		var props = {
-				width: element.outerWidth(true),
-				height: element.outerHeight(true),
-				"float": element.css( "float" )
-			},
-			wrapper = $( "<div></div>" )
-				.addClass( "ui-effects-wrapper" )
-				.css({
-					fontSize: "100%",
-					background: "transparent",
-					border: "none",
-					margin: 0,
-					padding: 0
-				}),
-			// Store the size in case width/height are defined in % - Fixes #5245
-			size = {
-				width: element.width(),
-				height: element.height()
-			},
-			active = document.activeElement;
-
-		// support: Firefox
-		// Firefox incorrectly exposes anonymous content
-		// https://bugzilla.mozilla.org/show_bug.cgi?id=561664
-		try {
-			active.id;
-		} catch( e ) {
-			active = document.body;
-		}
-
-		element.wrap( wrapper );
-
-		// Fixes #7595 - Elements lose focus when wrapped.
-		if ( element[ 0 ] === active || $.contains( element[ 0 ], active ) ) {
-			$( active ).focus();
-		}
-
-		wrapper = element.parent(); //Hotfix for jQuery 1.4 since some change in wrap() seems to actually lose the reference to the wrapped element
-
-		// transfer positioning properties to the wrapper
-		if ( element.css( "position" ) === "static" ) {
-			wrapper.css({ position: "relative" });
-			element.css({ position: "relative" });
-		} else {
-			$.extend( props, {
-				position: element.css( "position" ),
-				zIndex: element.css( "z-index" )
-			});
-			$.each([ "top", "left", "bottom", "right" ], function(i, pos) {
-				props[ pos ] = element.css( pos );
-				if ( isNaN( parseInt( props[ pos ], 10 ) ) ) {
-					props[ pos ] = "auto";
-				}
-			});
-			element.css({
-				position: "relative",
-				top: 0,
-				left: 0,
-				right: "auto",
-				bottom: "auto"
-			});
-		}
-		element.css(size);
-
-		return wrapper.css( props ).show();
-	},
-
-	removeWrapper: function( element ) {
-		var active = document.activeElement;
-
-		if ( element.parent().is( ".ui-effects-wrapper" ) ) {
-			element.parent().replaceWith( element );
-
-			// Fixes #7595 - Elements lose focus when wrapped.
-			if ( element[ 0 ] === active || $.contains( element[ 0 ], active ) ) {
-				$( active ).focus();
-			}
-		}
-
-
-		return element;
-	},
-
-	setTransition: function( element, list, factor, value ) {
-		value = value || {};
-		$.each( list, function( i, x ) {
-			var unit = element.cssUnit( x );
-			if ( unit[ 0 ] > 0 ) {
-				value[ x ] = unit[ 0 ] * factor + unit[ 1 ];
-			}
-		});
-		return value;
-	}
-});
-
-// return an effect options object for the given parameters:
-function _normalizeArguments( effect, options, speed, callback ) {
-
-	// allow passing all optinos as the first parameter
-	if ( $.isPlainObject( effect ) ) {
-		options = effect;
-		effect = effect.effect;
-	}
-
-	// convert to an object
-	effect = { effect: effect };
-
-	// catch (effect)
-	if ( options === undefined ) {
-		options = {};
-	}
-
-	// catch (effect, callback)
-	if ( $.isFunction( options ) ) {
-		callback = options;
-		speed = null;
-		options = {};
-	}
-
-	// catch (effect, speed, ?)
-	if ( typeof options === "number" || $.fx.speeds[ options ] ) {
-		callback = speed;
-		speed = options;
-		options = {};
-	}
-
-	// catch (effect, options, callback)
-	if ( $.isFunction( speed ) ) {
-		callback = speed;
-		speed = null;
-	}
-
-	// add options to effect
-	if ( options ) {
-		$.extend( effect, options );
-	}
-
-	speed = speed || options.duration;
-	effect.duration = $.fx.off ? 0 :
-		typeof speed === "number" ? speed :
-		speed in $.fx.speeds ? $.fx.speeds[ speed ] :
-		$.fx.speeds._default;
-
-	effect.complete = callback || options.complete;
-
-	return effect;
-}
-
-function standardSpeed( speed ) {
-	// valid standard speeds
-	if ( !speed || typeof speed === "number" || $.fx.speeds[ speed ] ) {
-		return true;
-	}
-
-	// invalid strings - treat as "normal" speed
-	if ( typeof speed === "string" && !$.effects.effect[ speed ] ) {
-		// TODO: remove in 2.0 (#7115)
-		if ( backCompat && $.effects[ speed ] ) {
-			return false;
-		}
-		return true;
-	}
-
-	return false;
-}
-
-$.fn.extend({
-	effect: function( effect, options, speed, callback ) {
-		var args = _normalizeArguments.apply( this, arguments ),
-			mode = args.mode,
-			queue = args.queue,
-			effectMethod = $.effects.effect[ args.effect ],
-
-			// DEPRECATED: remove in 2.0 (#7115)
-			oldEffectMethod = !effectMethod && backCompat && $.effects[ args.effect ];
-
-		if ( $.fx.off || !( effectMethod || oldEffectMethod ) ) {
-			// delegate to the original method (e.g., .show()) if possible
-			if ( mode ) {
-				return this[ mode ]( args.duration, args.complete );
-			} else {
-				return this.each( function() {
-					if ( args.complete ) {
-						args.complete.call( this );
-					}
-				});
-			}
-		}
-
-		function run( next ) {
-			var elem = $( this ),
-				complete = args.complete,
-				mode = args.mode;
-
-			function done() {
-				if ( $.isFunction( complete ) ) {
-					complete.call( elem[0] );
-				}
-				if ( $.isFunction( next ) ) {
-					next();
-				}
-			}
-
-			// if the element is hiddden and mode is hide,
-			// or element is visible and mode is show
-			if ( elem.is( ":hidden" ) ? mode === "hide" : mode === "show" ) {
-				done();
-			} else {
-				effectMethod.call( elem[0], args, done );
-			}
-		}
-
-		// TODO: remove this check in 2.0, effectMethod will always be true
-		if ( effectMethod ) {
-			return queue === false ? this.each( run ) : this.queue( queue || "fx", run );
-		} else {
-			// DEPRECATED: remove in 2.0 (#7115)
-			return oldEffectMethod.call(this, {
-				options: args,
-				duration: args.duration,
-				callback: args.complete,
-				mode: args.mode
-			});
-		}
-	},
-
-	_show: $.fn.show,
-	show: function( speed ) {
-		if ( standardSpeed( speed ) ) {
-			return this._show.apply( this, arguments );
-		} else {
-			var args = _normalizeArguments.apply( this, arguments );
-			args.mode = "show";
-			return this.effect.call( this, args );
-		}
-	},
-
-	_hide: $.fn.hide,
-	hide: function( speed ) {
-		if ( standardSpeed( speed ) ) {
-			return this._hide.apply( this, arguments );
-		} else {
-			var args = _normalizeArguments.apply( this, arguments );
-			args.mode = "hide";
-			return this.effect.call( this, args );
-		}
-	},
-
-	// jQuery core overloads toggle and creates _toggle
-	__toggle: $.fn.toggle,
-	toggle: function( speed ) {
-		if ( standardSpeed( speed ) || typeof speed === "boolean" || $.isFunction( speed ) ) {
-			return this.__toggle.apply( this, arguments );
-		} else {
-			var args = _normalizeArguments.apply( this, arguments );
-			args.mode = "toggle";
-			return this.effect.call( this, args );
-		}
-	},
-
-	// helper functions
-	cssUnit: function(key) {
-		var style = this.css( key ),
-			val = [];
-
-		$.each( [ "em", "px", "%", "pt" ], function( i, unit ) {
-			if ( style.indexOf( unit ) > 0 ) {
-				val = [ parseFloat( style ), unit ];
-			}
-		});
-		return val;
-	}
-});
-
-})();
-
-/******************************************************************************/
-/*********************************** EASING ***********************************/
-/******************************************************************************/
-
-(function() {
-
-// based on easing equations from Robert Penner (http://www.robertpenner.com/easing)
-
-var baseEasings = {};
-
-$.each( [ "Quad", "Cubic", "Quart", "Quint", "Expo" ], function( i, name ) {
-	baseEasings[ name ] = function( p ) {
-		return Math.pow( p, i + 2 );
-	};
-});
-
-$.extend( baseEasings, {
-	Sine: function ( p ) {
-		return 1 - Math.cos( p * Math.PI / 2 );
-	},
-	Circ: function ( p ) {
-		return 1 - Math.sqrt( 1 - p * p );
-	},
-	Elastic: function( p ) {
-		return p === 0 || p === 1 ? p :
-			-Math.pow( 2, 8 * (p - 1) ) * Math.sin( ( (p - 1) * 80 - 7.5 ) * Math.PI / 15 );
-	},
-	Back: function( p ) {
-		return p * p * ( 3 * p - 2 );
-	},
-	Bounce: function ( p ) {
-		var pow2,
-			bounce = 4;
-
-		while ( p < ( ( pow2 = Math.pow( 2, --bounce ) ) - 1 ) / 11 ) {}
-		return 1 / Math.pow( 4, 3 - bounce ) - 7.5625 * Math.pow( ( pow2 * 3 - 2 ) / 22 - p, 2 );
-	}
-});
-
-$.each( baseEasings, function( name, easeIn ) {
-	$.easing[ "easeIn" + name ] = easeIn;
-	$.easing[ "easeOut" + name ] = function( p ) {
-		return 1 - easeIn( 1 - p );
-	};
-	$.easing[ "easeInOut" + name ] = function( p ) {
-		return p < 0.5 ?
-			easeIn( p * 2 ) / 2 :
-			1 - easeIn( p * -2 + 2 ) / 2;
-	};
-});
-
-})();
-
-})(jQuery));
-(function( $, undefined ) {
-
-var rvertical = /up|down|vertical/,
-	rpositivemotion = /up|left|vertical|horizontal/;
-
-$.effects.effect.blind = function( o, done ) {
-	// Create element
-	var el = $( this ),
-		props = [ "position", "top", "bottom", "left", "right", "height", "width" ],
-		mode = $.effects.setMode( el, o.mode || "hide" ),
-		direction = o.direction || "up",
-		vertical = rvertical.test( direction ),
-		ref = vertical ? "height" : "width",
-		ref2 = vertical ? "top" : "left",
-		motion = rpositivemotion.test( direction ),
-		animation = {},
-		show = mode === "show",
-		wrapper, distance, margin;
-
-	// if already wrapped, the wrapper's properties are my property. #6245
-	if ( el.parent().is( ".ui-effects-wrapper" ) ) {
-		$.effects.save( el.parent(), props );
-	} else {
-		$.effects.save( el, props );
-	}
-	el.show();
-	wrapper = $.effects.createWrapper( el ).css({
-		overflow: "hidden"
-	});
-
-	distance = wrapper[ ref ]();
-	margin = parseFloat( wrapper.css( ref2 ) ) || 0;
-
-	animation[ ref ] = show ? distance : 0;
-	if ( !motion ) {
-		el
-			.css( vertical ? "bottom" : "right", 0 )
-			.css( vertical ? "top" : "left", "auto" )
-			.css({ position: "absolute" });
-
-		animation[ ref2 ] = show ? margin : distance + margin;
-	}
-
-	// start at 0 if we are showing
-	if ( show ) {
-		wrapper.css( ref, 0 );
-		if ( ! motion ) {
-			wrapper.css( ref2, margin + distance );
-		}
-	}
-
-	// Animate
-	wrapper.animate( animation, {
-		duration: o.duration,
-		easing: o.easing,
-		queue: false,
-		complete: function() {
-			if ( mode === "hide" ) {
-				el.hide();
-			}
-			$.effects.restore( el, props );
-			$.effects.removeWrapper( el );
-			done();
-		}
-	});
-
-};
-
-})(jQuery);
-(function( $, undefined ) {
-
-$.effects.effect.bounce = function( o, done ) {
-	var el = $( this ),
-		props = [ "position", "top", "bottom", "left", "right", "height", "width" ],
-
-		// defaults:
-		mode = $.effects.setMode( el, o.mode || "effect" ),
-		hide = mode === "hide",
-		show = mode === "show",
-		direction = o.direction || "up",
-		distance = o.distance,
-		times = o.times || 5,
-
-		// number of internal animations
-		anims = times * 2 + ( show || hide ? 1 : 0 ),
-		speed = o.duration / anims,
-		easing = o.easing,
-
-		// utility:
-		ref = ( direction === "up" || direction === "down" ) ? "top" : "left",
-		motion = ( direction === "up" || direction === "left" ),
-		i,
-		upAnim,
-		downAnim,
-
-		// we will need to re-assemble the queue to stack our animations in place
-		queue = el.queue(),
-		queuelen = queue.length;
-
-	// Avoid touching opacity to prevent clearType and PNG issues in IE
-	if ( show || hide ) {
-		props.push( "opacity" );
-	}
-
-	$.effects.save( el, props );
-	el.show();
-	$.effects.createWrapper( el ); // Create Wrapper
-
-	// default distance for the BIGGEST bounce is the outer Distance / 3
-	if ( !distance ) {
-		distance = el[ ref === "top" ? "outerHeight" : "outerWidth" ]() / 3;
-	}
-
-	if ( show ) {
-		downAnim = { opacity: 1 };
-		downAnim[ ref ] = 0;
-
-		// if we are showing, force opacity 0 and set the initial position
-		// then do the "first" animation
-		el.css( "opacity", 0 )
-			.css( ref, motion ? -distance * 2 : distance * 2 )
-			.animate( downAnim, speed, easing );
-	}
-
-	// start at the smallest distance if we are hiding
-	if ( hide ) {
-		distance = distance / Math.pow( 2, times - 1 );
-	}
-
-	downAnim = {};
-	downAnim[ ref ] = 0;
-	// Bounces up/down/left/right then back to 0 -- times * 2 animations happen here
-	for ( i = 0; i < times; i++ ) {
-		upAnim = {};
-		upAnim[ ref ] = ( motion ? "-=" : "+=" ) + distance;
-
-		el.animate( upAnim, speed, easing )
-			.animate( downAnim, speed, easing );
-
-		distance = hide ? distance * 2 : distance / 2;
-	}
-
-	// Last Bounce when Hiding
-	if ( hide ) {
-		upAnim = { opacity: 0 };
-		upAnim[ ref ] = ( motion ? "-=" : "+=" ) + distance;
-
-		el.animate( upAnim, speed, easing );
-	}
-
-	el.queue(function() {
-		if ( hide ) {
-			el.hide();
-		}
-		$.effects.restore( el, props );
-		$.effects.removeWrapper( el );
-		done();
-	});
-
-	// inject all the animations we just queued to be first in line (after "inprogress")
-	if ( queuelen > 1) {
-		queue.splice.apply( queue,
-			[ 1, 0 ].concat( queue.splice( queuelen, anims + 1 ) ) );
-	}
-	el.dequeue();
-
-};
-
-})(jQuery);
-(function( $, undefined ) {
-
-$.effects.effect.clip = function( o, done ) {
-	// Create element
-	var el = $( this ),
-		props = [ "position", "top", "bottom", "left", "right", "height", "width" ],
-		mode = $.effects.setMode( el, o.mode || "hide" ),
-		show = mode === "show",
-		direction = o.direction || "vertical",
-		vert = direction === "vertical",
-		size = vert ? "height" : "width",
-		position = vert ? "top" : "left",
-		animation = {},
-		wrapper, animate, distance;
-
-	// Save & Show
-	$.effects.save( el, props );
-	el.show();
-
-	// Create Wrapper
-	wrapper = $.effects.createWrapper( el ).css({
-		overflow: "hidden"
-	});
-	animate = ( el[0].tagName === "IMG" ) ? wrapper : el;
-	distance = animate[ size ]();
-
-	// Shift
-	if ( show ) {
-		animate.css( size, 0 );
-		animate.css( position, distance / 2 );
-	}
-
-	// Create Animation Object:
-	animation[ size ] = show ? distance : 0;
-	animation[ position ] = show ? 0 : distance / 2;
-
-	// Animate
-	animate.animate( animation, {
-		queue: false,
-		duration: o.duration,
-		easing: o.easing,
-		complete: function() {
-			if ( !show ) {
-				el.hide();
-			}
-			$.effects.restore( el, props );
-			$.effects.removeWrapper( el );
-			done();
-		}
-	});
-
-};
-
-})(jQuery);
-(function( $, undefined ) {
-
-$.effects.effect.drop = function( o, done ) {
-
-	var el = $( this ),
-		props = [ "position", "top", "bottom", "left", "right", "opacity", "height", "width" ],
-		mode = $.effects.setMode( el, o.mode || "hide" ),
-		show = mode === "show",
-		direction = o.direction || "left",
-		ref = ( direction === "up" || direction === "down" ) ? "top" : "left",
-		motion = ( direction === "up" || direction === "left" ) ? "pos" : "neg",
-		animation = {
-			opacity: show ? 1 : 0
-		},
-		distance;
-
-	// Adjust
-	$.effects.save( el, props );
-	el.show();
-	$.effects.createWrapper( el );
-
-	distance = o.distance || el[ ref === "top" ? "outerHeight": "outerWidth" ]( true ) / 2;
-
-	if ( show ) {
-		el
-			.css( "opacity", 0 )
-			.css( ref, motion === "pos" ? -distance : distance );
-	}
-
-	// Animation
-	animation[ ref ] = ( show ?
-		( motion === "pos" ? "+=" : "-=" ) :
-		( motion === "pos" ? "-=" : "+=" ) ) +
-		distance;
-
-	// Animate
-	el.animate( animation, {
-		queue: false,
-		duration: o.duration,
-		easing: o.easing,
-		complete: function() {
-			if ( mode === "hide" ) {
-				el.hide();
-			}
-			$.effects.restore( el, props );
-			$.effects.removeWrapper( el );
-			done();
-		}
-	});
-};
-
-})(jQuery);
-(function( $, undefined ) {
-
-$.effects.effect.explode = function( o, done ) {
-
-	var rows = o.pieces ? Math.round( Math.sqrt( o.pieces ) ) : 3,
-		cells = rows,
-		el = $( this ),
-		mode = $.effects.setMode( el, o.mode || "hide" ),
-		show = mode === "show",
-
-		// show and then visibility:hidden the element before calculating offset
-		offset = el.show().css( "visibility", "hidden" ).offset(),
-
-		// width and height of a piece
-		width = Math.ceil( el.outerWidth() / cells ),
-		height = Math.ceil( el.outerHeight() / rows ),
-		pieces = [],
-
-		// loop
-		i, j, left, top, mx, my;
-
-	// children animate complete:
-	function childComplete() {
-		pieces.push( this );
-		if ( pieces.length === rows * cells ) {
-			animComplete();
-		}
-	}
-
-	// clone the element for each row and cell.
-	for( i = 0; i < rows ; i++ ) { // ===>
-		top = offset.top + i * height;
-		my = i - ( rows - 1 ) / 2 ;
-
-		for( j = 0; j < cells ; j++ ) { // |||
-			left = offset.left + j * width;
-			mx = j - ( cells - 1 ) / 2 ;
-
-			// Create a clone of the now hidden main element that will be absolute positioned
-			// within a wrapper div off the -left and -top equal to size of our pieces
-			el
-				.clone()
-				.appendTo( "body" )
-				.wrap( "<div></div>" )
-				.css({
-					position: "absolute",
-					visibility: "visible",
-					left: -j * width,
-					top: -i * height
-				})
-
-			// select the wrapper - make it overflow: hidden and absolute positioned based on
-			// where the original was located +left and +top equal to the size of pieces
-				.parent()
-				.addClass( "ui-effects-explode" )
-				.css({
-					position: "absolute",
-					overflow: "hidden",
-					width: width,
-					height: height,
-					left: left + ( show ? mx * width : 0 ),
-					top: top + ( show ? my * height : 0 ),
-					opacity: show ? 0 : 1
-				}).animate({
-					left: left + ( show ? 0 : mx * width ),
-					top: top + ( show ? 0 : my * height ),
-					opacity: show ? 1 : 0
-				}, o.duration || 500, o.easing, childComplete );
-		}
-	}
-
-	function animComplete() {
-		el.css({
-			visibility: "visible"
-		});
-		$( pieces ).remove();
-		if ( !show ) {
-			el.hide();
-		}
-		done();
-	}
-};
-
-})(jQuery);
-(function( $, undefined ) {
-
-$.effects.effect.fade = function( o, done ) {
-	var el = $( this ),
-		mode = $.effects.setMode( el, o.mode || "toggle" );
-
-	el.animate({
-		opacity: mode
-	}, {
-		queue: false,
-		duration: o.duration,
-		easing: o.easing,
-		complete: done
-	});
-};
-
-})( jQuery );
-(function( $, undefined ) {
-
-$.effects.effect.fold = function( o, done ) {
-
-	// Create element
-	var el = $( this ),
-		props = [ "position", "top", "bottom", "left", "right", "height", "width" ],
-		mode = $.effects.setMode( el, o.mode || "hide" ),
-		show = mode === "show",
-		hide = mode === "hide",
-		size = o.size || 15,
-		percent = /([0-9]+)%/.exec( size ),
-		horizFirst = !!o.horizFirst,
-		widthFirst = show !== horizFirst,
-		ref = widthFirst ? [ "width", "height" ] : [ "height", "width" ],
-		duration = o.duration / 2,
-		wrapper, distance,
-		animation1 = {},
-		animation2 = {};
-
-	$.effects.save( el, props );
-	el.show();
-
-	// Create Wrapper
-	wrapper = $.effects.createWrapper( el ).css({
-		overflow: "hidden"
-	});
-	distance = widthFirst ?
-		[ wrapper.width(), wrapper.height() ] :
-		[ wrapper.height(), wrapper.width() ];
-
-	if ( percent ) {
-		size = parseInt( percent[ 1 ], 10 ) / 100 * distance[ hide ? 0 : 1 ];
-	}
-	if ( show ) {
-		wrapper.css( horizFirst ? {
-			height: 0,
-			width: size
-		} : {
-			height: size,
-			width: 0
-		});
-	}
-
-	// Animation
-	animation1[ ref[ 0 ] ] = show ? distance[ 0 ] : size;
-	animation2[ ref[ 1 ] ] = show ? distance[ 1 ] : 0;
-
-	// Animate
-	wrapper
-		.animate( animation1, duration, o.easing )
-		.animate( animation2, duration, o.easing, function() {
-			if ( hide ) {
-				el.hide();
-			}
-			$.effects.restore( el, props );
-			$.effects.removeWrapper( el );
-			done();
-		});
-
-};
-
-})(jQuery);
-(function( $, undefined ) {
-
-$.effects.effect.highlight = function( o, done ) {
-	var elem = $( this ),
-		props = [ "backgroundImage", "backgroundColor", "opacity" ],
-		mode = $.effects.setMode( elem, o.mode || "show" ),
-		animation = {
-			backgroundColor: elem.css( "backgroundColor" )
-		};
-
-	if (mode === "hide") {
-		animation.opacity = 0;
-	}
-
-	$.effects.save( elem, props );
-	
-	elem
-		.show()
-		.css({
-			backgroundImage: "none",
-			backgroundColor: o.color || "#ffff99"
-		})
-		.animate( animation, {
-			queue: false,
-			duration: o.duration,
-			easing: o.easing,
-			complete: function() {
-				if ( mode === "hide" ) {
-					elem.hide();
-				}
-				$.effects.restore( elem, props );
-				done();
-			}
-		});
-};
-
-})(jQuery);
-(function( $, undefined ) {
-
-$.effects.effect.pulsate = function( o, done ) {
-	var elem = $( this ),
-		mode = $.effects.setMode( elem, o.mode || "show" ),
-		show = mode === "show",
-		hide = mode === "hide",
-		showhide = ( show || mode === "hide" ),
-
-		// showing or hiding leaves of the "last" animation
-		anims = ( ( o.times || 5 ) * 2 ) + ( showhide ? 1 : 0 ),
-		duration = o.duration / anims,
-		animateTo = 0,
-		queue = elem.queue(),
-		queuelen = queue.length,
-		i;
-
-	if ( show || !elem.is(":visible")) {
-		elem.css( "opacity", 0 ).show();
-		animateTo = 1;
-	}
-
-	// anims - 1 opacity "toggles"
-	for ( i = 1; i < anims; i++ ) {
-		elem.animate({
-			opacity: animateTo
-		}, duration, o.easing );
-		animateTo = 1 - animateTo;
-	}
-
-	elem.animate({
-		opacity: animateTo
-	}, duration, o.easing);
-
-	elem.queue(function() {
-		if ( hide ) {
-			elem.hide();
-		}
-		done();
-	});
-
-	// We just queued up "anims" animations, we need to put them next in the queue
-	if ( queuelen > 1 ) {
-		queue.splice.apply( queue,
-			[ 1, 0 ].concat( queue.splice( queuelen, anims + 1 ) ) );
-	}
-	elem.dequeue();
-};
-
-})(jQuery);
-(function( $, undefined ) {
-
-$.effects.effect.puff = function( o, done ) {
-	var elem = $( this ),
-		mode = $.effects.setMode( elem, o.mode || "hide" ),
-		hide = mode === "hide",
-		percent = parseInt( o.percent, 10 ) || 150,
-		factor = percent / 100,
-		original = {
-			height: elem.height(),
-			width: elem.width()
-		};
-
-	$.extend( o, {
-		effect: "scale",
-		queue: false,
-		fade: true,
-		mode: mode,
-		complete: done,
-		percent: hide ? percent : 100,
-		from: hide ?
-			original :
-			{
-				height: original.height * factor,
-				width: original.width * factor
-			}
-	});
-
-	elem.effect( o );
-};
-
-$.effects.effect.scale = function( o, done ) {
-
-	// Create element
-	var el = $( this ),
-		options = $.extend( true, {}, o ),
-		mode = $.effects.setMode( el, o.mode || "effect" ),
-		percent = parseInt( o.percent, 10 ) ||
-			( parseInt( o.percent, 10 ) === 0 ? 0 : ( mode === "hide" ? 0 : 100 ) ),
-		direction = o.direction || "both",
-		origin = o.origin,
-		original = {
-			height: el.height(),
-			width: el.width(),
-			outerHeight: el.outerHeight(),
-			outerWidth: el.outerWidth()
-		},
-		factor = {
-			y: direction !== "horizontal" ? (percent / 100) : 1,
-			x: direction !== "vertical" ? (percent / 100) : 1
-		};
-
-	// We are going to pass this effect to the size effect:
-	options.effect = "size";
-	options.queue = false;
-	options.complete = done;
-
-	// Set default origin and restore for show/hide
-	if ( mode !== "effect" ) {
-		options.origin = origin || ["middle","center"];
-		options.restore = true;
-	}
-
-	options.from = o.from || ( mode === "show" ? { height: 0, width: 0 } : original );
-	options.to = {
-		height: original.height * factor.y,
-		width: original.width * factor.x,
-		outerHeight: original.outerHeight * factor.y,
-		outerWidth: original.outerWidth * factor.x
-	};
-
-	// Fade option to support puff
-	if ( options.fade ) {
-		if ( mode === "show" ) {
-			options.from.opacity = 0;
-			options.to.opacity = 1;
-		}
-		if ( mode === "hide" ) {
-			options.from.opacity = 1;
-			options.to.opacity = 0;
-		}
-	}
-
-	// Animate
-	el.effect( options );
-
-};
-
-$.effects.effect.size = function( o, done ) {
-
-	// Create element
-	var el = $( this ),
-		props = [ "position", "top", "bottom", "left", "right", "width", "height", "overflow", "opacity" ],
-
-		// Always restore
-		props1 = [ "position", "top", "bottom", "left", "right", "overflow", "opacity" ],
-
-		// Copy for children
-		props2 = [ "width", "height", "overflow" ],
-		cProps = [ "fontSize" ],
-		vProps = [ "borderTopWidth", "borderBottomWidth", "paddingTop", "paddingBottom" ],
-		hProps = [ "borderLeftWidth", "borderRightWidth", "paddingLeft", "paddingRight" ],
-
-		// Set options
-		mode = $.effects.setMode( el, o.mode || "effect" ),
-		restore = o.restore || mode !== "effect",
-		scale = o.scale || "both",
-		origin = o.origin || [ "middle", "center" ],
-		original, baseline, factor,
-		position = el.css( "position" );
-
-	if ( mode === "show" ) {
-		el.show();
-	}
-	original = {
-		height: el.height(),
-		width: el.width(),
-		outerHeight: el.outerHeight(),
-		outerWidth: el.outerWidth()
-	};
-
-	el.from = o.from || original;
-	el.to = o.to || original;
-
-	// Set scaling factor
-	factor = {
-		from: {
-			y: el.from.height / original.height,
-			x: el.from.width / original.width
-		},
-		to: {
-			y: el.to.height / original.height,
-			x: el.to.width / original.width
-		}
-	};
-
-	// Scale the css box
-	if ( scale === "box" || scale === "both" ) {
-
-		// Vertical props scaling
-		if ( factor.from.y !== factor.to.y ) {
-			props = props.concat( vProps );
-			el.from = $.effects.setTransition( el, vProps, factor.from.y, el.from );
-			el.to = $.effects.setTransition( el, vProps, factor.to.y, el.to );
-		}
-
-		// Horizontal props scaling
-		if ( factor.from.x !== factor.to.x ) {
-			props = props.concat( hProps );
-			el.from = $.effects.setTransition( el, hProps, factor.from.x, el.from );
-			el.to = $.effects.setTransition( el, hProps, factor.to.x, el.to );
-		}
-	}
-
-	// Scale the content
-	if ( scale === "content" || scale === "both" ) {
-
-		// Vertical props scaling
-		if ( factor.from.y !== factor.to.y ) {
-			props = props.concat( cProps );
-			el.from = $.effects.setTransition( el, cProps, factor.from.y, el.from );
-			el.to = $.effects.setTransition( el, cProps, factor.to.y, el.to );
-		}
-	}
-
-	$.effects.save( el, restore ? props : props1 );
-	el.show();
-	$.effects.createWrapper( el );
-	el.css( "overflow", "hidden" ).css( el.from );
-
-	// Adjust
-	if (origin) { // Calculate baseline shifts
-		baseline = $.effects.getBaseline( origin, original );
-		el.from.top = ( original.outerHeight - el.outerHeight() ) * baseline.y;
-		el.from.left = ( original.outerWidth - el.outerWidth() ) * baseline.x;
-		el.to.top = ( original.outerHeight - el.to.outerHeight ) * baseline.y;
-		el.to.left = ( original.outerWidth - el.to.outerWidth ) * baseline.x;
-	}
-	el.css( el.from ); // set top & left
-
-	// Animate
-	if ( scale === "content" || scale === "both" ) { // Scale the children
-
-		// Add margins/font-size
-		vProps = vProps.concat([ "marginTop", "marginBottom" ]).concat(cProps);
-		hProps = hProps.concat([ "marginLeft", "marginRight" ]);
-		props2 = props.concat(vProps).concat(hProps);
-
-		el.find( "*[width]" ).each( function(){
-			var child = $( this ),
-				c_original = {
-					height: child.height(),
-					width: child.width()
-				};
-			if (restore) {
-				$.effects.save(child, props2);
-			}
-
-			child.from = {
-				height: c_original.height * factor.from.y,
-				width: c_original.width * factor.from.x
-			};
-			child.to = {
-				height: c_original.height * factor.to.y,
-				width: c_original.width * factor.to.x
-			};
-
-			// Vertical props scaling
-			if ( factor.from.y !== factor.to.y ) {
-				child.from = $.effects.setTransition( child, vProps, factor.from.y, child.from );
-				child.to = $.effects.setTransition( child, vProps, factor.to.y, child.to );
-			}
-
-			// Horizontal props scaling
-			if ( factor.from.x !== factor.to.x ) {
-				child.from = $.effects.setTransition( child, hProps, factor.from.x, child.from );
-				child.to = $.effects.setTransition( child, hProps, factor.to.x, child.to );
-			}
-
-			// Animate children
-			child.css( child.from );
-			child.animate( child.to, o.duration, o.easing, function() {
-
-				// Restore children
-				if ( restore ) {
-					$.effects.restore( child, props2 );
-				}
-			});
-		});
-	}
-
-	// Animate
-	el.animate( el.to, {
-		queue: false,
-		duration: o.duration,
-		easing: o.easing,
-		complete: function() {
-			if ( el.to.opacity === 0 ) {
-				el.css( "opacity", el.from.opacity );
-			}
-			if( mode === "hide" ) {
-				el.hide();
-			}
-			$.effects.restore( el, restore ? props : props1 );
-			if ( !restore ) {
-
-				// we need to calculate our new positioning based on the scaling
-				if ( position === "static" ) {
-					el.css({
-						position: "relative",
-						top: el.to.top,
-						left: el.to.left
-					});
-				} else {
-					$.each([ "top", "left" ], function( idx, pos ) {
-						el.css( pos, function( _, str ) {
-							var val = parseInt( str, 10 ),
-								toRef = idx ? el.to.left : el.to.top;
-
-							// if original was "auto", recalculate the new value from wrapper
-							if ( str === "auto" ) {
-								return toRef + "px";
-							}
-
-							return val + toRef + "px";
-						});
-					});
-				}
-			}
-
-			$.effects.removeWrapper( el );
-			done();
-		}
-	});
-
-};
-
-})(jQuery);
-(function( $, undefined ) {
-
-$.effects.effect.shake = function( o, done ) {
-
-	var el = $( this ),
-		props = [ "position", "top", "bottom", "left", "right", "height", "width" ],
-		mode = $.effects.setMode( el, o.mode || "effect" ),
-		direction = o.direction || "left",
-		distance = o.distance || 20,
-		times = o.times || 3,
-		anims = times * 2 + 1,
-		speed = Math.round(o.duration/anims),
-		ref = (direction === "up" || direction === "down") ? "top" : "left",
-		positiveMotion = (direction === "up" || direction === "left"),
-		animation = {},
-		animation1 = {},
-		animation2 = {},
-		i,
-
-		// we will need to re-assemble the queue to stack our animations in place
-		queue = el.queue(),
-		queuelen = queue.length;
-
-	$.effects.save( el, props );
-	el.show();
-	$.effects.createWrapper( el );
-
-	// Animation
-	animation[ ref ] = ( positiveMotion ? "-=" : "+=" ) + distance;
-	animation1[ ref ] = ( positiveMotion ? "+=" : "-=" ) + distance * 2;
-	animation2[ ref ] = ( positiveMotion ? "-=" : "+=" ) + distance * 2;
-
-	// Animate
-	el.animate( animation, speed, o.easing );
-
-	// Shakes
-	for ( i = 1; i < times; i++ ) {
-		el.animate( animation1, speed, o.easing ).animate( animation2, speed, o.easing );
-	}
-	el
-		.animate( animation1, speed, o.easing )
-		.animate( animation, speed / 2, o.easing )
-		.queue(function() {
-			if ( mode === "hide" ) {
-				el.hide();
-			}
-			$.effects.restore( el, props );
-			$.effects.removeWrapper( el );
-			done();
-		});
-
-	// inject all the animations we just queued to be first in line (after "inprogress")
-	if ( queuelen > 1) {
-		queue.splice.apply( queue,
-			[ 1, 0 ].concat( queue.splice( queuelen, anims + 1 ) ) );
-	}
-	el.dequeue();
-
-};
-
-})(jQuery);
-(function( $, undefined ) {
-
-$.effects.effect.slide = function( o, done ) {
-
-	// Create element
-	var el = $( this ),
-		props = [ "position", "top", "bottom", "left", "right", "width", "height" ],
-		mode = $.effects.setMode( el, o.mode || "show" ),
-		show = mode === "show",
-		direction = o.direction || "left",
-		ref = (direction === "up" || direction === "down") ? "top" : "left",
-		positiveMotion = (direction === "up" || direction === "left"),
-		distance,
-		animation = {};
-
-	// Adjust
-	$.effects.save( el, props );
-	el.show();
-	distance = o.distance || el[ ref === "top" ? "outerHeight" : "outerWidth" ]( true );
-
-	$.effects.createWrapper( el ).css({
-		overflow: "hidden"
-	});
-
-	if ( show ) {
-		el.css( ref, positiveMotion ? (isNaN(distance) ? "-" + distance : -distance) : distance );
-	}
-
-	// Animation
-	animation[ ref ] = ( show ?
-		( positiveMotion ? "+=" : "-=") :
-		( positiveMotion ? "-=" : "+=")) +
-		distance;
-
-	// Animate
-	el.animate( animation, {
-		queue: false,
-		duration: o.duration,
-		easing: o.easing,
-		complete: function() {
-			if ( mode === "hide" ) {
-				el.hide();
-			}
-			$.effects.restore( el, props );
-			$.effects.removeWrapper( el );
-			done();
-		}
-	});
-};
-
-})(jQuery);
-(function( $, undefined ) {
-
-$.effects.effect.transfer = function( o, done ) {
-	var elem = $( this ),
-		target = $( o.to ),
-		targetFixed = target.css( "position" ) === "fixed",
-		body = $("body"),
-		fixTop = targetFixed ? body.scrollTop() : 0,
-		fixLeft = targetFixed ? body.scrollLeft() : 0,
-		endPosition = target.offset(),
-		animation = {
-			top: endPosition.top - fixTop ,
-			left: endPosition.left - fixLeft ,
-			height: target.innerHeight(),
-			width: target.innerWidth()
-		},
-		startPosition = elem.offset(),
-		transfer = $( '<div class="ui-effects-transfer"></div>' )
-			.appendTo( document.body )
-			.addClass( o.className )
-			.css({
-				top: startPosition.top - fixTop ,
-				left: startPosition.left - fixLeft ,
-				height: elem.innerHeight(),
-				width: elem.innerWidth(),
-				position: targetFixed ? "fixed" : "absolute"
-			})
-			.animate( animation, o.duration, o.easing, function() {
-				transfer.remove();
-				done();
-			});
-};
-
-})(jQuery);
diff --git a/branch-1.2/ambari-web/vendor/scripts/jquery.ui.datepicker.js b/branch-1.2/ambari-web/vendor/scripts/jquery.ui.datepicker.js
deleted file mode 100644
index 7ea5b07..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/jquery.ui.datepicker.js
+++ /dev/null
@@ -1,1854 +0,0 @@
-/*!
- * jQuery UI Datepicker 1.8.23
- *
- * Copyright 2012, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI/Datepicker
- *
- * Depends:
- *	jquery.ui.core.js
- */
-(function( $, undefined ) {
-
-$.extend($.ui, { datepicker: { version: "1.8.23" } });
-
-var PROP_NAME = 'datepicker';
-var dpuuid = new Date().getTime();
-var instActive;
-
-/* Date picker manager.
-   Use the singleton instance of this class, $.datepicker, to interact with the date picker.
-   Settings for (groups of) date pickers are maintained in an instance object,
-   allowing multiple different settings on the same page. */
-
-function Datepicker() {
-	this.debug = false; // Change this to true to start debugging
-	this._curInst = null; // The current instance in use
-	this._keyEvent = false; // If the last event was a key event
-	this._disabledInputs = []; // List of date picker inputs that have been disabled
-	this._datepickerShowing = false; // True if the popup picker is showing , false if not
-	this._inDialog = false; // True if showing within a "dialog", false if not
-	this._mainDivId = 'ui-datepicker-div'; // The ID of the main datepicker division
-	this._inlineClass = 'ui-datepicker-inline'; // The name of the inline marker class
-	this._appendClass = 'ui-datepicker-append'; // The name of the append marker class
-	this._triggerClass = 'ui-datepicker-trigger'; // The name of the trigger marker class
-	this._dialogClass = 'ui-datepicker-dialog'; // The name of the dialog marker class
-	this._disableClass = 'ui-datepicker-disabled'; // The name of the disabled covering marker class
-	this._unselectableClass = 'ui-datepicker-unselectable'; // The name of the unselectable cell marker class
-	this._currentClass = 'ui-datepicker-current-day'; // The name of the current day marker class
-	this._dayOverClass = 'ui-datepicker-days-cell-over'; // The name of the day hover marker class
-	this.regional = []; // Available regional settings, indexed by language code
-	this.regional[''] = { // Default regional settings
-		closeText: 'Done', // Display text for close link
-		prevText: 'Prev', // Display text for previous month link
-		nextText: 'Next', // Display text for next month link
-		currentText: 'Today', // Display text for current month link
-		monthNames: ['January','February','March','April','May','June',
-			'July','August','September','October','November','December'], // Names of months for drop-down and formatting
-		monthNamesShort: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'], // For formatting
-		dayNames: ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'], // For formatting
-		dayNamesShort: ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'], // For formatting
-		dayNamesMin: ['Su','Mo','Tu','We','Th','Fr','Sa'], // Column headings for days starting at Sunday
-		weekHeader: 'Wk', // Column header for week of the year
-		dateFormat: 'mm/dd/yy', // See format options on parseDate
-		firstDay: 0, // The first day of the week, Sun = 0, Mon = 1, ...
-		isRTL: false, // True if right-to-left language, false if left-to-right
-		showMonthAfterYear: false, // True if the year select precedes month, false for month then year
-		yearSuffix: '' // Additional text to append to the year in the month headers
-	};
-	this._defaults = { // Global defaults for all the date picker instances
-		showOn: 'focus', // 'focus' for popup on focus,
-			// 'button' for trigger button, or 'both' for either
-		showAnim: 'fadeIn', // Name of jQuery animation for popup
-		showOptions: {}, // Options for enhanced animations
-		defaultDate: null, // Used when field is blank: actual date,
-			// +/-number for offset from today, null for today
-		appendText: '', // Display text following the input box, e.g. showing the format
-		buttonText: '...', // Text for trigger button
-		buttonImage: '', // URL for trigger button image
-		buttonImageOnly: false, // True if the image appears alone, false if it appears on a button
-		hideIfNoPrevNext: false, // True to hide next/previous month links
-			// if not applicable, false to just disable them
-		navigationAsDateFormat: false, // True if date formatting applied to prev/today/next links
-		gotoCurrent: false, // True if today link goes back to current selection instead
-		changeMonth: false, // True if month can be selected directly, false if only prev/next
-		changeYear: false, // True if year can be selected directly, false if only prev/next
-		yearRange: 'c-10:c+10', // Range of years to display in drop-down,
-			// either relative to today's year (-nn:+nn), relative to currently displayed year
-			// (c-nn:c+nn), absolute (nnnn:nnnn), or a combination of the above (nnnn:-n)
-		showOtherMonths: false, // True to show dates in other months, false to leave blank
-		selectOtherMonths: false, // True to allow selection of dates in other months, false for unselectable
-		showWeek: false, // True to show week of the year, false to not show it
-		calculateWeek: this.iso8601Week, // How to calculate the week of the year,
-			// takes a Date and returns the number of the week for it
-		shortYearCutoff: '+10', // Short year values < this are in the current century,
-			// > this are in the previous century,
-			// string value starting with '+' for current year + value
-		minDate: null, // The earliest selectable date, or null for no limit
-		maxDate: null, // The latest selectable date, or null for no limit
-		duration: 'fast', // Duration of display/closure
-		beforeShowDay: null, // Function that takes a date and returns an array with
-			// [0] = true if selectable, false if not, [1] = custom CSS class name(s) or '',
-			// [2] = cell title (optional), e.g. $.datepicker.noWeekends
-		beforeShow: null, // Function that takes an input field and
-			// returns a set of custom settings for the date picker
-		onSelect: null, // Define a callback function when a date is selected
-		onChangeMonthYear: null, // Define a callback function when the month or year is changed
-		onClose: null, // Define a callback function when the datepicker is closed
-		numberOfMonths: 1, // Number of months to show at a time
-		showCurrentAtPos: 0, // The position in multipe months at which to show the current month (starting at 0)
-		stepMonths: 1, // Number of months to step back/forward
-		stepBigMonths: 12, // Number of months to step back/forward for the big links
-		altField: '', // Selector for an alternate field to store selected dates into
-		altFormat: '', // The date format to use for the alternate field
-		constrainInput: true, // The input is constrained by the current date format
-		showButtonPanel: false, // True to show button panel, false to not show it
-		autoSize: false, // True to size the input for the date format, false to leave as is
-		disabled: false // The initial disabled state
-	};
-	$.extend(this._defaults, this.regional['']);
-	this.dpDiv = bindHover($('<div id="' + this._mainDivId + '" class="ui-datepicker ui-widget ui-widget-content ui-helper-clearfix ui-corner-all"></div>'));
-}
-
-$.extend(Datepicker.prototype, {
-	/* Class name added to elements to indicate already configured with a date picker. */
-	markerClassName: 'hasDatepicker',
-	
-	//Keep track of the maximum number of rows displayed (see #7043)
-	maxRows: 4,
-
-	/* Debug logging (if enabled). */
-	log: function () {
-		if (this.debug)
-			console.log.apply('', arguments);
-	},
-	
-	// TODO rename to "widget" when switching to widget factory
-	_widgetDatepicker: function() {
-		return this.dpDiv;
-	},
-
-	/* Override the default settings for all instances of the date picker.
-	   @param  settings  object - the new settings to use as defaults (anonymous object)
-	   @return the manager object */
-	setDefaults: function(settings) {
-		extendRemove(this._defaults, settings || {});
-		return this;
-	},
-
-	/* Attach the date picker to a jQuery selection.
-	   @param  target    element - the target input field or division or span
-	   @param  settings  object - the new settings to use for this date picker instance (anonymous) */
-	_attachDatepicker: function(target, settings) {
-		// check for settings on the control itself - in namespace 'date:'
-		var inlineSettings = null;
-		for (var attrName in this._defaults) {
-			var attrValue = target.getAttribute('date:' + attrName);
-			if (attrValue) {
-				inlineSettings = inlineSettings || {};
-				try {
-					inlineSettings[attrName] = eval(attrValue);
-				} catch (err) {
-					inlineSettings[attrName] = attrValue;
-				}
-			}
-		}
-		var nodeName = target.nodeName.toLowerCase();
-		var inline = (nodeName == 'div' || nodeName == 'span');
-		if (!target.id) {
-			this.uuid += 1;
-			target.id = 'dp' + this.uuid;
-		}
-		var inst = this._newInst($(target), inline);
-		inst.settings = $.extend({}, settings || {}, inlineSettings || {});
-		if (nodeName == 'input') {
-			this._connectDatepicker(target, inst);
-		} else if (inline) {
-			this._inlineDatepicker(target, inst);
-		}
-	},
-
-	/* Create a new instance object. */
-	_newInst: function(target, inline) {
-		var id = target[0].id.replace(/([^A-Za-z0-9_-])/g, '\\\\$1'); // escape jQuery meta chars
-		return {id: id, input: target, // associated target
-			selectedDay: 0, selectedMonth: 0, selectedYear: 0, // current selection
-			drawMonth: 0, drawYear: 0, // month being drawn
-			inline: inline, // is datepicker inline or not
-			dpDiv: (!inline ? this.dpDiv : // presentation div
-			bindHover($('<div class="' + this._inlineClass + ' ui-datepicker ui-widget ui-widget-content ui-helper-clearfix ui-corner-all"></div>')))};
-	},
-
-	/* Attach the date picker to an input field. */
-	_connectDatepicker: function(target, inst) {
-		var input = $(target);
-		inst.append = $([]);
-		inst.trigger = $([]);
-		if (input.hasClass(this.markerClassName))
-			return;
-		this._attachments(input, inst);
-		input.addClass(this.markerClassName).keydown(this._doKeyDown).
-			keypress(this._doKeyPress).keyup(this._doKeyUp).
-			bind("setData.datepicker", function(event, key, value) {
-				inst.settings[key] = value;
-			}).bind("getData.datepicker", function(event, key) {
-				return this._get(inst, key);
-			});
-		this._autoSize(inst);
-		$.data(target, PROP_NAME, inst);
-		//If disabled option is true, disable the datepicker once it has been attached to the input (see ticket #5665)
-		if( inst.settings.disabled ) {
-			this._disableDatepicker( target );
-		}
-	},
-
-	/* Make attachments based on settings. */
-	_attachments: function(input, inst) {
-		var appendText = this._get(inst, 'appendText');
-		var isRTL = this._get(inst, 'isRTL');
-		if (inst.append)
-			inst.append.remove();
-		if (appendText) {
-			inst.append = $('<span class="' + this._appendClass + '">' + appendText + '</span>');
-			input[isRTL ? 'before' : 'after'](inst.append);
-		}
-		input.unbind('focus', this._showDatepicker);
-		if (inst.trigger)
-			inst.trigger.remove();
-		var showOn = this._get(inst, 'showOn');
-		if (showOn == 'focus' || showOn == 'both') // pop-up date picker when in the marked field
-			input.focus(this._showDatepicker);
-		if (showOn == 'button' || showOn == 'both') { // pop-up date picker when button clicked
-			var buttonText = this._get(inst, 'buttonText');
-			var buttonImage = this._get(inst, 'buttonImage');
-			inst.trigger = $(this._get(inst, 'buttonImageOnly') ?
-				$('<img/>').addClass(this._triggerClass).
-					attr({ src: buttonImage, alt: buttonText, title: buttonText }) :
-				$('<button type="button"></button>').addClass(this._triggerClass).
-					html(buttonImage == '' ? buttonText : $('<img/>').attr(
-					{ src:buttonImage, alt:buttonText, title:buttonText })));
-			input[isRTL ? 'before' : 'after'](inst.trigger);
-			inst.trigger.click(function() {
-				if ($.datepicker._datepickerShowing && $.datepicker._lastInput == input[0])
-					$.datepicker._hideDatepicker();
-				else if ($.datepicker._datepickerShowing && $.datepicker._lastInput != input[0]) {
-					$.datepicker._hideDatepicker(); 
-					$.datepicker._showDatepicker(input[0]);
-				} else
-					$.datepicker._showDatepicker(input[0]);
-				return false;
-			});
-		}
-	},
-
-	/* Apply the maximum length for the date format. */
-	_autoSize: function(inst) {
-		if (this._get(inst, 'autoSize') && !inst.inline) {
-			var date = new Date(2009, 12 - 1, 20); // Ensure double digits
-			var dateFormat = this._get(inst, 'dateFormat');
-			if (dateFormat.match(/[DM]/)) {
-				var findMax = function(names) {
-					var max = 0;
-					var maxI = 0;
-					for (var i = 0; i < names.length; i++) {
-						if (names[i].length > max) {
-							max = names[i].length;
-							maxI = i;
-						}
-					}
-					return maxI;
-				};
-				date.setMonth(findMax(this._get(inst, (dateFormat.match(/MM/) ?
-					'monthNames' : 'monthNamesShort'))));
-				date.setDate(findMax(this._get(inst, (dateFormat.match(/DD/) ?
-					'dayNames' : 'dayNamesShort'))) + 20 - date.getDay());
-			}
-			inst.input.attr('size', this._formatDate(inst, date).length);
-		}
-	},
-
-	/* Attach an inline date picker to a div. */
-	_inlineDatepicker: function(target, inst) {
-		var divSpan = $(target);
-		if (divSpan.hasClass(this.markerClassName))
-			return;
-		divSpan.addClass(this.markerClassName).append(inst.dpDiv).
-			bind("setData.datepicker", function(event, key, value){
-				inst.settings[key] = value;
-			}).bind("getData.datepicker", function(event, key){
-				return this._get(inst, key);
-			});
-		$.data(target, PROP_NAME, inst);
-		this._setDate(inst, this._getDefaultDate(inst), true);
-		this._updateDatepicker(inst);
-		this._updateAlternate(inst);
-		//If disabled option is true, disable the datepicker before showing it (see ticket #5665)
-		if( inst.settings.disabled ) {
-			this._disableDatepicker( target );
-		}
-		// Set display:block in place of inst.dpDiv.show() which won't work on disconnected elements
-		// http://bugs.jqueryui.com/ticket/7552 - A Datepicker created on a detached div has zero height
-		inst.dpDiv.css( "display", "block" );
-	},
-
-	/* Pop-up the date picker in a "dialog" box.
-	   @param  input     element - ignored
-	   @param  date      string or Date - the initial date to display
-	   @param  onSelect  function - the function to call when a date is selected
-	   @param  settings  object - update the dialog date picker instance's settings (anonymous object)
-	   @param  pos       int[2] - coordinates for the dialog's position within the screen or
-	                     event - with x/y coordinates or
-	                     leave empty for default (screen centre)
-	   @return the manager object */
-	_dialogDatepicker: function(input, date, onSelect, settings, pos) {
-		var inst = this._dialogInst; // internal instance
-		if (!inst) {
-			this.uuid += 1;
-			var id = 'dp' + this.uuid;
-			this._dialogInput = $('<input type="text" id="' + id +
-				'" style="position: absolute; top: -100px; width: 0px;"/>');
-			this._dialogInput.keydown(this._doKeyDown);
-			$('body').append(this._dialogInput);
-			inst = this._dialogInst = this._newInst(this._dialogInput, false);
-			inst.settings = {};
-			$.data(this._dialogInput[0], PROP_NAME, inst);
-		}
-		extendRemove(inst.settings, settings || {});
-		date = (date && date.constructor == Date ? this._formatDate(inst, date) : date);
-		this._dialogInput.val(date);
-
-		this._pos = (pos ? (pos.length ? pos : [pos.pageX, pos.pageY]) : null);
-		if (!this._pos) {
-			var browserWidth = document.documentElement.clientWidth;
-			var browserHeight = document.documentElement.clientHeight;
-			var scrollX = document.documentElement.scrollLeft || document.body.scrollLeft;
-			var scrollY = document.documentElement.scrollTop || document.body.scrollTop;
-			this._pos = // should use actual width/height below
-				[(browserWidth / 2) - 100 + scrollX, (browserHeight / 2) - 150 + scrollY];
-		}
-
-		// move input on screen for focus, but hidden behind dialog
-		this._dialogInput.css('left', (this._pos[0] + 20) + 'px').css('top', this._pos[1] + 'px');
-		inst.settings.onSelect = onSelect;
-		this._inDialog = true;
-		this.dpDiv.addClass(this._dialogClass);
-		this._showDatepicker(this._dialogInput[0]);
-		if ($.blockUI)
-			$.blockUI(this.dpDiv);
-		$.data(this._dialogInput[0], PROP_NAME, inst);
-		return this;
-	},
-
-	/* Detach a datepicker from its control.
-	   @param  target    element - the target input field or division or span */
-	_destroyDatepicker: function(target) {
-		var $target = $(target);
-		var inst = $.data(target, PROP_NAME);
-		if (!$target.hasClass(this.markerClassName)) {
-			return;
-		}
-		var nodeName = target.nodeName.toLowerCase();
-		$.removeData(target, PROP_NAME);
-		if (nodeName == 'input') {
-			inst.append.remove();
-			inst.trigger.remove();
-			$target.removeClass(this.markerClassName).
-				unbind('focus', this._showDatepicker).
-				unbind('keydown', this._doKeyDown).
-				unbind('keypress', this._doKeyPress).
-				unbind('keyup', this._doKeyUp);
-		} else if (nodeName == 'div' || nodeName == 'span')
-			$target.removeClass(this.markerClassName).empty();
-	},
-
-	/* Enable the date picker to a jQuery selection.
-	   @param  target    element - the target input field or division or span */
-	_enableDatepicker: function(target) {
-		var $target = $(target);
-		var inst = $.data(target, PROP_NAME);
-		if (!$target.hasClass(this.markerClassName)) {
-			return;
-		}
-		var nodeName = target.nodeName.toLowerCase();
-		if (nodeName == 'input') {
-			target.disabled = false;
-			inst.trigger.filter('button').
-				each(function() { this.disabled = false; }).end().
-				filter('img').css({opacity: '1.0', cursor: ''});
-		}
-		else if (nodeName == 'div' || nodeName == 'span') {
-			var inline = $target.children('.' + this._inlineClass);
-			inline.children().removeClass('ui-state-disabled');
-			inline.find("select.ui-datepicker-month, select.ui-datepicker-year").
-				removeAttr("disabled");
-		}
-		this._disabledInputs = $.map(this._disabledInputs,
-			function(value) { return (value == target ? null : value); }); // delete entry
-	},
-
-	/* Disable the date picker to a jQuery selection.
-	   @param  target    element - the target input field or division or span */
-	_disableDatepicker: function(target) {
-		var $target = $(target);
-		var inst = $.data(target, PROP_NAME);
-		if (!$target.hasClass(this.markerClassName)) {
-			return;
-		}
-		var nodeName = target.nodeName.toLowerCase();
-		if (nodeName == 'input') {
-			target.disabled = true;
-			inst.trigger.filter('button').
-				each(function() { this.disabled = true; }).end().
-				filter('img').css({opacity: '0.5', cursor: 'default'});
-		}
-		else if (nodeName == 'div' || nodeName == 'span') {
-			var inline = $target.children('.' + this._inlineClass);
-			inline.children().addClass('ui-state-disabled');
-			inline.find("select.ui-datepicker-month, select.ui-datepicker-year").
-				attr("disabled", "disabled");
-		}
-		this._disabledInputs = $.map(this._disabledInputs,
-			function(value) { return (value == target ? null : value); }); // delete entry
-		this._disabledInputs[this._disabledInputs.length] = target;
-	},
-
-	/* Is the first field in a jQuery collection disabled as a datepicker?
-	   @param  target    element - the target input field or division or span
-	   @return boolean - true if disabled, false if enabled */
-	_isDisabledDatepicker: function(target) {
-		if (!target) {
-			return false;
-		}
-		for (var i = 0; i < this._disabledInputs.length; i++) {
-			if (this._disabledInputs[i] == target)
-				return true;
-		}
-		return false;
-	},
-
-	/* Retrieve the instance data for the target control.
-	   @param  target  element - the target input field or division or span
-	   @return  object - the associated instance data
-	   @throws  error if a jQuery problem getting data */
-	_getInst: function(target) {
-		try {
-			return $.data(target, PROP_NAME);
-		}
-		catch (err) {
-			throw 'Missing instance data for this datepicker';
-		}
-	},
-
-	/* Update or retrieve the settings for a date picker attached to an input field or division.
-	   @param  target  element - the target input field or division or span
-	   @param  name    object - the new settings to update or
-	                   string - the name of the setting to change or retrieve,
-	                   when retrieving also 'all' for all instance settings or
-	                   'defaults' for all global defaults
-	   @param  value   any - the new value for the setting
-	                   (omit if above is an object or to retrieve a value) */
-	_optionDatepicker: function(target, name, value) {
-		var inst = this._getInst(target);
-		if (arguments.length == 2 && typeof name == 'string') {
-			return (name == 'defaults' ? $.extend({}, $.datepicker._defaults) :
-				(inst ? (name == 'all' ? $.extend({}, inst.settings) :
-				this._get(inst, name)) : null));
-		}
-		var settings = name || {};
-		if (typeof name == 'string') {
-			settings = {};
-			settings[name] = value;
-		}
-		if (inst) {
-			if (this._curInst == inst) {
-				this._hideDatepicker();
-			}
-			var date = this._getDateDatepicker(target, true);
-			var minDate = this._getMinMaxDate(inst, 'min');
-			var maxDate = this._getMinMaxDate(inst, 'max');
-			extendRemove(inst.settings, settings);
-			// reformat the old minDate/maxDate values if dateFormat changes and a new minDate/maxDate isn't provided
-			if (minDate !== null && settings['dateFormat'] !== undefined && settings['minDate'] === undefined)
-				inst.settings.minDate = this._formatDate(inst, minDate);
-			if (maxDate !== null && settings['dateFormat'] !== undefined && settings['maxDate'] === undefined)
-				inst.settings.maxDate = this._formatDate(inst, maxDate);
-			this._attachments($(target), inst);
-			this._autoSize(inst);
-			this._setDate(inst, date);
-			this._updateAlternate(inst);
-			this._updateDatepicker(inst);
-		}
-	},
-
-	// change method deprecated
-	_changeDatepicker: function(target, name, value) {
-		this._optionDatepicker(target, name, value);
-	},
-
-	/* Redraw the date picker attached to an input field or division.
-	   @param  target  element - the target input field or division or span */
-	_refreshDatepicker: function(target) {
-		var inst = this._getInst(target);
-		if (inst) {
-			this._updateDatepicker(inst);
-		}
-	},
-
-	/* Set the dates for a jQuery selection.
-	   @param  target   element - the target input field or division or span
-	   @param  date     Date - the new date */
-	_setDateDatepicker: function(target, date) {
-		var inst = this._getInst(target);
-		if (inst) {
-			this._setDate(inst, date);
-			this._updateDatepicker(inst);
-			this._updateAlternate(inst);
-		}
-	},
-
-	/* Get the date(s) for the first entry in a jQuery selection.
-	   @param  target     element - the target input field or division or span
-	   @param  noDefault  boolean - true if no default date is to be used
-	   @return Date - the current date */
-	_getDateDatepicker: function(target, noDefault) {
-		var inst = this._getInst(target);
-		if (inst && !inst.inline)
-			this._setDateFromField(inst, noDefault);
-		return (inst ? this._getDate(inst) : null);
-	},
-
-	/* Handle keystrokes. */
-	_doKeyDown: function(event) {
-		var inst = $.datepicker._getInst(event.target);
-		var handled = true;
-		var isRTL = inst.dpDiv.is('.ui-datepicker-rtl');
-		inst._keyEvent = true;
-		if ($.datepicker._datepickerShowing)
-			switch (event.keyCode) {
-				case 9: $.datepicker._hideDatepicker();
-						handled = false;
-						break; // hide on tab out
-				case 13: var sel = $('td.' + $.datepicker._dayOverClass + ':not(.' + 
-									$.datepicker._currentClass + ')', inst.dpDiv);
-						if (sel[0])
-							$.datepicker._selectDay(event.target, inst.selectedMonth, inst.selectedYear, sel[0]);
-							var onSelect = $.datepicker._get(inst, 'onSelect');
-							if (onSelect) {
-								var dateStr = $.datepicker._formatDate(inst);
-
-								// trigger custom callback
-								onSelect.apply((inst.input ? inst.input[0] : null), [dateStr, inst]);
-							}
-						else
-							$.datepicker._hideDatepicker();
-						return false; // don't submit the form
-						break; // select the value on enter
-				case 27: $.datepicker._hideDatepicker();
-						break; // hide on escape
-				case 33: $.datepicker._adjustDate(event.target, (event.ctrlKey ?
-							-$.datepicker._get(inst, 'stepBigMonths') :
-							-$.datepicker._get(inst, 'stepMonths')), 'M');
-						break; // previous month/year on page up/+ ctrl
-				case 34: $.datepicker._adjustDate(event.target, (event.ctrlKey ?
-							+$.datepicker._get(inst, 'stepBigMonths') :
-							+$.datepicker._get(inst, 'stepMonths')), 'M');
-						break; // next month/year on page down/+ ctrl
-				case 35: if (event.ctrlKey || event.metaKey) $.datepicker._clearDate(event.target);
-						handled = event.ctrlKey || event.metaKey;
-						break; // clear on ctrl or command +end
-				case 36: if (event.ctrlKey || event.metaKey) $.datepicker._gotoToday(event.target);
-						handled = event.ctrlKey || event.metaKey;
-						break; // current on ctrl or command +home
-				case 37: if (event.ctrlKey || event.metaKey) $.datepicker._adjustDate(event.target, (isRTL ? +1 : -1), 'D');
-						handled = event.ctrlKey || event.metaKey;
-						// -1 day on ctrl or command +left
-						if (event.originalEvent.altKey) $.datepicker._adjustDate(event.target, (event.ctrlKey ?
-									-$.datepicker._get(inst, 'stepBigMonths') :
-									-$.datepicker._get(inst, 'stepMonths')), 'M');
-						// next month/year on alt +left on Mac
-						break;
-				case 38: if (event.ctrlKey || event.metaKey) $.datepicker._adjustDate(event.target, -7, 'D');
-						handled = event.ctrlKey || event.metaKey;
-						break; // -1 week on ctrl or command +up
-				case 39: if (event.ctrlKey || event.metaKey) $.datepicker._adjustDate(event.target, (isRTL ? -1 : +1), 'D');
-						handled = event.ctrlKey || event.metaKey;
-						// +1 day on ctrl or command +right
-						if (event.originalEvent.altKey) $.datepicker._adjustDate(event.target, (event.ctrlKey ?
-									+$.datepicker._get(inst, 'stepBigMonths') :
-									+$.datepicker._get(inst, 'stepMonths')), 'M');
-						// next month/year on alt +right
-						break;
-				case 40: if (event.ctrlKey || event.metaKey) $.datepicker._adjustDate(event.target, +7, 'D');
-						handled = event.ctrlKey || event.metaKey;
-						break; // +1 week on ctrl or command +down
-				default: handled = false;
-			}
-		else if (event.keyCode == 36 && event.ctrlKey) // display the date picker on ctrl+home
-			$.datepicker._showDatepicker(this);
-		else {
-			handled = false;
-		}
-		if (handled) {
-			event.preventDefault();
-			event.stopPropagation();
-		}
-	},
-
-	/* Filter entered characters - based on date format. */
-	_doKeyPress: function(event) {
-		var inst = $.datepicker._getInst(event.target);
-		if ($.datepicker._get(inst, 'constrainInput')) {
-			var chars = $.datepicker._possibleChars($.datepicker._get(inst, 'dateFormat'));
-			var chr = String.fromCharCode(event.charCode == undefined ? event.keyCode : event.charCode);
-			return event.ctrlKey || event.metaKey || (chr < ' ' || !chars || chars.indexOf(chr) > -1);
-		}
-	},
-
-	/* Synchronise manual entry and field/alternate field. */
-	_doKeyUp: function(event) {
-		var inst = $.datepicker._getInst(event.target);
-		if (inst.input.val() != inst.lastVal) {
-			try {
-				var date = $.datepicker.parseDate($.datepicker._get(inst, 'dateFormat'),
-					(inst.input ? inst.input.val() : null),
-					$.datepicker._getFormatConfig(inst));
-				if (date) { // only if valid
-					$.datepicker._setDateFromField(inst);
-					$.datepicker._updateAlternate(inst);
-					$.datepicker._updateDatepicker(inst);
-				}
-			}
-			catch (err) {
-				$.datepicker.log(err);
-			}
-		}
-		return true;
-	},
-
-	/* Pop-up the date picker for a given input field.
-       If false returned from beforeShow event handler do not show. 
-	   @param  input  element - the input field attached to the date picker or
-	                  event - if triggered by focus */
-	_showDatepicker: function(input) {
-		input = input.target || input;
-		if (input.nodeName.toLowerCase() != 'input') // find from button/image trigger
-			input = $('input', input.parentNode)[0];
-		if ($.datepicker._isDisabledDatepicker(input) || $.datepicker._lastInput == input) // already here
-			return;
-		var inst = $.datepicker._getInst(input);
-		if ($.datepicker._curInst && $.datepicker._curInst != inst) {
-			$.datepicker._curInst.dpDiv.stop(true, true);
-			if ( inst && $.datepicker._datepickerShowing ) {
-				$.datepicker._hideDatepicker( $.datepicker._curInst.input[0] );
-			}
-		}
-		var beforeShow = $.datepicker._get(inst, 'beforeShow');
-		var beforeShowSettings = beforeShow ? beforeShow.apply(input, [input, inst]) : {};
-		if(beforeShowSettings === false){
-            //false
-			return;
-		}
-		extendRemove(inst.settings, beforeShowSettings);
-		inst.lastVal = null;
-		$.datepicker._lastInput = input;
-		$.datepicker._setDateFromField(inst);
-		if ($.datepicker._inDialog) // hide cursor
-			input.value = '';
-		if (!$.datepicker._pos) { // position below input
-			$.datepicker._pos = $.datepicker._findPos(input);
-			$.datepicker._pos[1] += input.offsetHeight; // add the height
-		}
-		var isFixed = false;
-		$(input).parents().each(function() {
-			isFixed |= $(this).css('position') == 'fixed';
-			return !isFixed;
-		});
-		if (isFixed && $.browser.opera) { // correction for Opera when fixed and scrolled
-			$.datepicker._pos[0] -= document.documentElement.scrollLeft;
-			$.datepicker._pos[1] -= document.documentElement.scrollTop;
-		}
-		var offset = {left: $.datepicker._pos[0], top: $.datepicker._pos[1]};
-		$.datepicker._pos = null;
-		//to avoid flashes on Firefox
-		inst.dpDiv.empty();
-		// determine sizing offscreen
-		inst.dpDiv.css({position: 'absolute', display: 'block', top: '-1000px'});
-		$.datepicker._updateDatepicker(inst);
-		// fix width for dynamic number of date pickers
-		// and adjust position before showing
-		offset = $.datepicker._checkOffset(inst, offset, isFixed);
-		inst.dpDiv.css({position: ($.datepicker._inDialog && $.blockUI ?
-			'static' : (isFixed ? 'fixed' : 'absolute')), display: 'none',
-			left: offset.left + 'px', top: offset.top + 'px'});
-		if (!inst.inline) {
-			var showAnim = $.datepicker._get(inst, 'showAnim');
-			var duration = $.datepicker._get(inst, 'duration');
-			var postProcess = function() {
-				var cover = inst.dpDiv.find('iframe.ui-datepicker-cover'); // IE6- only
-				if( !! cover.length ){
-					var borders = $.datepicker._getBorders(inst.dpDiv);
-					cover.css({left: -borders[0], top: -borders[1],
-						width: inst.dpDiv.outerWidth(), height: inst.dpDiv.outerHeight()});
-				}
-			};
-			inst.dpDiv.zIndex($(input).zIndex()+1);
-			$.datepicker._datepickerShowing = true;
-			if ($.effects && $.effects[showAnim])
-				inst.dpDiv.show(showAnim, $.datepicker._get(inst, 'showOptions'), duration, postProcess);
-			else
-				inst.dpDiv[showAnim || 'show']((showAnim ? duration : null), postProcess);
-			if (!showAnim || !duration)
-				postProcess();
-			if (inst.input.is(':visible') && !inst.input.is(':disabled'))
-				inst.input.focus();
-			$.datepicker._curInst = inst;
-		}
-	},
-
-	/* Generate the date picker content. */
-	_updateDatepicker: function(inst) {
-		var self = this;
-		self.maxRows = 4; //Reset the max number of rows being displayed (see #7043)
-		var borders = $.datepicker._getBorders(inst.dpDiv);
-		instActive = inst; // for delegate hover events
-		inst.dpDiv.empty().append(this._generateHTML(inst));
-		this._attachHandlers(inst);
-		var cover = inst.dpDiv.find('iframe.ui-datepicker-cover'); // IE6- only
-		if( !!cover.length ){ //avoid call to outerXXXX() when not in IE6
-			cover.css({left: -borders[0], top: -borders[1], width: inst.dpDiv.outerWidth(), height: inst.dpDiv.outerHeight()})
-		}
-		inst.dpDiv.find('.' + this._dayOverClass + ' a').mouseover();
-		var numMonths = this._getNumberOfMonths(inst);
-		var cols = numMonths[1];
-		var width = 17;
-		inst.dpDiv.removeClass('ui-datepicker-multi-2 ui-datepicker-multi-3 ui-datepicker-multi-4').width('');
-		if (cols > 1)
-			inst.dpDiv.addClass('ui-datepicker-multi-' + cols).css('width', (width * cols) + 'em');
-		inst.dpDiv[(numMonths[0] != 1 || numMonths[1] != 1 ? 'add' : 'remove') +
-			'Class']('ui-datepicker-multi');
-		inst.dpDiv[(this._get(inst, 'isRTL') ? 'add' : 'remove') +
-			'Class']('ui-datepicker-rtl');
-		if (inst == $.datepicker._curInst && $.datepicker._datepickerShowing && inst.input &&
-				// #6694 - don't focus the input if it's already focused
-				// this breaks the change event in IE
-				inst.input.is(':visible') && !inst.input.is(':disabled') && inst.input[0] != document.activeElement)
-			inst.input.focus();
-		// deffered render of the years select (to avoid flashes on Firefox) 
-		if( inst.yearshtml ){
-			var origyearshtml = inst.yearshtml;
-			setTimeout(function(){
-				//assure that inst.yearshtml didn't change.
-				if( origyearshtml === inst.yearshtml && inst.yearshtml ){
-					inst.dpDiv.find('select.ui-datepicker-year:first').replaceWith(inst.yearshtml);
-				}
-				origyearshtml = inst.yearshtml = null;
-			}, 0);
-		}
-	},
-
-	/* Retrieve the size of left and top borders for an element.
-	   @param  elem  (jQuery object) the element of interest
-	   @return  (number[2]) the left and top borders */
-	_getBorders: function(elem) {
-		var convert = function(value) {
-			return {thin: 1, medium: 2, thick: 3}[value] || value;
-		};
-		return [parseFloat(convert(elem.css('border-left-width'))),
-			parseFloat(convert(elem.css('border-top-width')))];
-	},
-
-	/* Check positioning to remain on screen. */
-	_checkOffset: function(inst, offset, isFixed) {
-		var dpWidth = inst.dpDiv.outerWidth();
-		var dpHeight = inst.dpDiv.outerHeight();
-		var inputWidth = inst.input ? inst.input.outerWidth() : 0;
-		var inputHeight = inst.input ? inst.input.outerHeight() : 0;
-		var viewWidth = document.documentElement.clientWidth + (isFixed ? 0 : $(document).scrollLeft());
-		var viewHeight = document.documentElement.clientHeight + (isFixed ? 0 : $(document).scrollTop());
-
-		offset.left -= (this._get(inst, 'isRTL') ? (dpWidth - inputWidth) : 0);
-		offset.left -= (isFixed && offset.left == inst.input.offset().left) ? $(document).scrollLeft() : 0;
-		offset.top -= (isFixed && offset.top == (inst.input.offset().top + inputHeight)) ? $(document).scrollTop() : 0;
-
-		// now check if datepicker is showing outside window viewport - move to a better place if so.
-		offset.left -= Math.min(offset.left, (offset.left + dpWidth > viewWidth && viewWidth > dpWidth) ?
-			Math.abs(offset.left + dpWidth - viewWidth) : 0);
-		offset.top -= Math.min(offset.top, (offset.top + dpHeight > viewHeight && viewHeight > dpHeight) ?
-			Math.abs(dpHeight + inputHeight) : 0);
-
-		return offset;
-	},
-
-	/* Find an object's position on the screen. */
-	_findPos: function(obj) {
-		var inst = this._getInst(obj);
-		var isRTL = this._get(inst, 'isRTL');
-        while (obj && (obj.type == 'hidden' || obj.nodeType != 1 || $.expr.filters.hidden(obj))) {
-            obj = obj[isRTL ? 'previousSibling' : 'nextSibling'];
-        }
-        var position = $(obj).offset();
-	    return [position.left, position.top];
-	},
-
-	/* Hide the date picker from view.
-	   @param  input  element - the input field attached to the date picker */
-	_hideDatepicker: function(input) {
-		var inst = this._curInst;
-		if (!inst || (input && inst != $.data(input, PROP_NAME)))
-			return;
-		if (this._datepickerShowing) {
-			var showAnim = this._get(inst, 'showAnim');
-			var duration = this._get(inst, 'duration');
-			var postProcess = function() {
-				$.datepicker._tidyDialog(inst);
-			};
-			if ($.effects && $.effects[showAnim])
-				inst.dpDiv.hide(showAnim, $.datepicker._get(inst, 'showOptions'), duration, postProcess);
-			else
-				inst.dpDiv[(showAnim == 'slideDown' ? 'slideUp' :
-					(showAnim == 'fadeIn' ? 'fadeOut' : 'hide'))]((showAnim ? duration : null), postProcess);
-			if (!showAnim)
-				postProcess();
-			this._datepickerShowing = false;
-			var onClose = this._get(inst, 'onClose');
-			if (onClose)
-				onClose.apply((inst.input ? inst.input[0] : null),
-					[(inst.input ? inst.input.val() : ''), inst]);
-			this._lastInput = null;
-			if (this._inDialog) {
-				this._dialogInput.css({ position: 'absolute', left: '0', top: '-100px' });
-				if ($.blockUI) {
-					$.unblockUI();
-					$('body').append(this.dpDiv);
-				}
-			}
-			this._inDialog = false;
-		}
-	},
-
-	/* Tidy up after a dialog display. */
-	_tidyDialog: function(inst) {
-		inst.dpDiv.removeClass(this._dialogClass).unbind('.ui-datepicker-calendar');
-	},
-
-	/* Close date picker if clicked elsewhere. */
-	_checkExternalClick: function(event) {
-		if (!$.datepicker._curInst)
-			return;
-
-		var $target = $(event.target),
-			inst = $.datepicker._getInst($target[0]);
-
-		if ( ( ( $target[0].id != $.datepicker._mainDivId &&
-				$target.parents('#' + $.datepicker._mainDivId).length == 0 &&
-				!$target.hasClass($.datepicker.markerClassName) &&
-				!$target.closest("." + $.datepicker._triggerClass).length &&
-				$.datepicker._datepickerShowing && !($.datepicker._inDialog && $.blockUI) ) ) ||
-			( $target.hasClass($.datepicker.markerClassName) && $.datepicker._curInst != inst ) )
-			$.datepicker._hideDatepicker();
-	},
-
-	/* Adjust one of the date sub-fields. */
-	_adjustDate: function(id, offset, period) {
-		var target = $(id);
-		var inst = this._getInst(target[0]);
-		if (this._isDisabledDatepicker(target[0])) {
-			return;
-		}
-		this._adjustInstDate(inst, offset +
-			(period == 'M' ? this._get(inst, 'showCurrentAtPos') : 0), // undo positioning
-			period);
-		this._updateDatepicker(inst);
-	},
-
-	/* Action for current link. */
-	_gotoToday: function(id) {
-		var target = $(id);
-		var inst = this._getInst(target[0]);
-		if (this._get(inst, 'gotoCurrent') && inst.currentDay) {
-			inst.selectedDay = inst.currentDay;
-			inst.drawMonth = inst.selectedMonth = inst.currentMonth;
-			inst.drawYear = inst.selectedYear = inst.currentYear;
-		}
-		else {
-			var date = new Date();
-			inst.selectedDay = date.getDate();
-			inst.drawMonth = inst.selectedMonth = date.getMonth();
-			inst.drawYear = inst.selectedYear = date.getFullYear();
-		}
-		this._notifyChange(inst);
-		this._adjustDate(target);
-	},
-
-	/* Action for selecting a new month/year. */
-	_selectMonthYear: function(id, select, period) {
-		var target = $(id);
-		var inst = this._getInst(target[0]);
-		inst['selected' + (period == 'M' ? 'Month' : 'Year')] =
-		inst['draw' + (period == 'M' ? 'Month' : 'Year')] =
-			parseInt(select.options[select.selectedIndex].value,10);
-		this._notifyChange(inst);
-		this._adjustDate(target);
-	},
-
-	/* Action for selecting a day. */
-	_selectDay: function(id, month, year, td) {
-		var target = $(id);
-		if ($(td).hasClass(this._unselectableClass) || this._isDisabledDatepicker(target[0])) {
-			return;
-		}
-		var inst = this._getInst(target[0]);
-		inst.selectedDay = inst.currentDay = $('a', td).html();
-		inst.selectedMonth = inst.currentMonth = month;
-		inst.selectedYear = inst.currentYear = year;
-		this._selectDate(id, this._formatDate(inst,
-			inst.currentDay, inst.currentMonth, inst.currentYear));
-	},
-
-	/* Erase the input field and hide the date picker. */
-	_clearDate: function(id) {
-		var target = $(id);
-		var inst = this._getInst(target[0]);
-		this._selectDate(target, '');
-	},
-
-	/* Update the input field with the selected date. */
-	_selectDate: function(id, dateStr) {
-		var target = $(id);
-		var inst = this._getInst(target[0]);
-		dateStr = (dateStr != null ? dateStr : this._formatDate(inst));
-		if (inst.input)
-			inst.input.val(dateStr);
-		this._updateAlternate(inst);
-		var onSelect = this._get(inst, 'onSelect');
-		if (onSelect)
-			onSelect.apply((inst.input ? inst.input[0] : null), [dateStr, inst]);  // trigger custom callback
-		else if (inst.input)
-			inst.input.trigger('change'); // fire the change event
-		if (inst.inline)
-			this._updateDatepicker(inst);
-		else {
-			this._hideDatepicker();
-			this._lastInput = inst.input[0];
-			if (typeof(inst.input[0]) != 'object')
-				inst.input.focus(); // restore focus
-			this._lastInput = null;
-		}
-	},
-
-	/* Update any alternate field to synchronise with the main field. */
-	_updateAlternate: function(inst) {
-		var altField = this._get(inst, 'altField');
-		if (altField) { // update alternate field too
-			var altFormat = this._get(inst, 'altFormat') || this._get(inst, 'dateFormat');
-			var date = this._getDate(inst);
-			var dateStr = this.formatDate(altFormat, date, this._getFormatConfig(inst));
-			$(altField).each(function() { $(this).val(dateStr); });
-		}
-	},
-
-	/* Set as beforeShowDay function to prevent selection of weekends.
-	   @param  date  Date - the date to customise
-	   @return [boolean, string] - is this date selectable?, what is its CSS class? */
-	noWeekends: function(date) {
-		var day = date.getDay();
-		return [(day > 0 && day < 6), ''];
-	},
-
-	/* Set as calculateWeek to determine the week of the year based on the ISO 8601 definition.
-	   @param  date  Date - the date to get the week for
-	   @return  number - the number of the week within the year that contains this date */
-	iso8601Week: function(date) {
-		var checkDate = new Date(date.getTime());
-		// Find Thursday of this week starting on Monday
-		checkDate.setDate(checkDate.getDate() + 4 - (checkDate.getDay() || 7));
-		var time = checkDate.getTime();
-		checkDate.setMonth(0); // Compare with Jan 1
-		checkDate.setDate(1);
-		return Math.floor(Math.round((time - checkDate) / 86400000) / 7) + 1;
-	},
-
-	/* Parse a string value into a date object.
-	   See formatDate below for the possible formats.
-
-	   @param  format    string - the expected format of the date
-	   @param  value     string - the date in the above format
-	   @param  settings  Object - attributes include:
-	                     shortYearCutoff  number - the cutoff year for determining the century (optional)
-	                     dayNamesShort    string[7] - abbreviated names of the days from Sunday (optional)
-	                     dayNames         string[7] - names of the days from Sunday (optional)
-	                     monthNamesShort  string[12] - abbreviated names of the months (optional)
-	                     monthNames       string[12] - names of the months (optional)
-	   @return  Date - the extracted date value or null if value is blank */
-	parseDate: function (format, value, settings) {
-		if (format == null || value == null)
-			throw 'Invalid arguments';
-		value = (typeof value == 'object' ? value.toString() : value + '');
-		if (value == '')
-			return null;
-		var shortYearCutoff = (settings ? settings.shortYearCutoff : null) || this._defaults.shortYearCutoff;
-		shortYearCutoff = (typeof shortYearCutoff != 'string' ? shortYearCutoff :
-				new Date().getFullYear() % 100 + parseInt(shortYearCutoff, 10));
-		var dayNamesShort = (settings ? settings.dayNamesShort : null) || this._defaults.dayNamesShort;
-		var dayNames = (settings ? settings.dayNames : null) || this._defaults.dayNames;
-		var monthNamesShort = (settings ? settings.monthNamesShort : null) || this._defaults.monthNamesShort;
-		var monthNames = (settings ? settings.monthNames : null) || this._defaults.monthNames;
-		var year = -1;
-		var month = -1;
-		var day = -1;
-		var doy = -1;
-		var literal = false;
-		// Check whether a format character is doubled
-		var lookAhead = function(match) {
-			var matches = (iFormat + 1 < format.length && format.charAt(iFormat + 1) == match);
-			if (matches)
-				iFormat++;
-			return matches;
-		};
-		// Extract a number from the string value
-		var getNumber = function(match) {
-			var isDoubled = lookAhead(match);
-			var size = (match == '@' ? 14 : (match == '!' ? 20 :
-				(match == 'y' && isDoubled ? 4 : (match == 'o' ? 3 : 2))));
-			var digits = new RegExp('^\\d{1,' + size + '}');
-			var num = value.substring(iValue).match(digits);
-			if (!num)
-				throw 'Missing number at position ' + iValue;
-			iValue += num[0].length;
-			return parseInt(num[0], 10);
-		};
-		// Extract a name from the string value and convert to an index
-		var getName = function(match, shortNames, longNames) {
-			var names = $.map(lookAhead(match) ? longNames : shortNames, function (v, k) {
-				return [ [k, v] ];
-			}).sort(function (a, b) {
-				return -(a[1].length - b[1].length);
-			});
-			var index = -1;
-			$.each(names, function (i, pair) {
-				var name = pair[1];
-				if (value.substr(iValue, name.length).toLowerCase() == name.toLowerCase()) {
-					index = pair[0];
-					iValue += name.length;
-					return false;
-				}
-			});
-			if (index != -1)
-				return index + 1;
-			else
-				throw 'Unknown name at position ' + iValue;
-		};
-		// Confirm that a literal character matches the string value
-		var checkLiteral = function() {
-			if (value.charAt(iValue) != format.charAt(iFormat))
-				throw 'Unexpected literal at position ' + iValue;
-			iValue++;
-		};
-		var iValue = 0;
-		for (var iFormat = 0; iFormat < format.length; iFormat++) {
-			if (literal)
-				if (format.charAt(iFormat) == "'" && !lookAhead("'"))
-					literal = false;
-				else
-					checkLiteral();
-			else
-				switch (format.charAt(iFormat)) {
-					case 'd':
-						day = getNumber('d');
-						break;
-					case 'D':
-						getName('D', dayNamesShort, dayNames);
-						break;
-					case 'o':
-						doy = getNumber('o');
-						break;
-					case 'm':
-						month = getNumber('m');
-						break;
-					case 'M':
-						month = getName('M', monthNamesShort, monthNames);
-						break;
-					case 'y':
-						year = getNumber('y');
-						break;
-					case '@':
-						var date = new Date(getNumber('@'));
-						year = date.getFullYear();
-						month = date.getMonth() + 1;
-						day = date.getDate();
-						break;
-					case '!':
-						var date = new Date((getNumber('!') - this._ticksTo1970) / 10000);
-						year = date.getFullYear();
-						month = date.getMonth() + 1;
-						day = date.getDate();
-						break;
-					case "'":
-						if (lookAhead("'"))
-							checkLiteral();
-						else
-							literal = true;
-						break;
-					default:
-						checkLiteral();
-				}
-		}
-		if (iValue < value.length){
-			throw "Extra/unparsed characters found in date: " + value.substring(iValue);
-		}
-		if (year == -1)
-			year = new Date().getFullYear();
-		else if (year < 100)
-			year += new Date().getFullYear() - new Date().getFullYear() % 100 +
-				(year <= shortYearCutoff ? 0 : -100);
-		if (doy > -1) {
-			month = 1;
-			day = doy;
-			do {
-				var dim = this._getDaysInMonth(year, month - 1);
-				if (day <= dim)
-					break;
-				month++;
-				day -= dim;
-			} while (true);
-		}
-		var date = this._daylightSavingAdjust(new Date(year, month - 1, day));
-		if (date.getFullYear() != year || date.getMonth() + 1 != month || date.getDate() != day)
-			throw 'Invalid date'; // E.g. 31/02/00
-		return date;
-	},
-
-	/* Standard date formats. */
-	ATOM: 'yy-mm-dd', // RFC 3339 (ISO 8601)
-	COOKIE: 'D, dd M yy',
-	ISO_8601: 'yy-mm-dd',
-	RFC_822: 'D, d M y',
-	RFC_850: 'DD, dd-M-y',
-	RFC_1036: 'D, d M y',
-	RFC_1123: 'D, d M yy',
-	RFC_2822: 'D, d M yy',
-	RSS: 'D, d M y', // RFC 822
-	TICKS: '!',
-	TIMESTAMP: '@',
-	W3C: 'yy-mm-dd', // ISO 8601
-
-	_ticksTo1970: (((1970 - 1) * 365 + Math.floor(1970 / 4) - Math.floor(1970 / 100) +
-		Math.floor(1970 / 400)) * 24 * 60 * 60 * 10000000),
-
-	/* Format a date object into a string value.
-	   The format can be combinations of the following:
-	   d  - day of month (no leading zero)
-	   dd - day of month (two digit)
-	   o  - day of year (no leading zeros)
-	   oo - day of year (three digit)
-	   D  - day name short
-	   DD - day name long
-	   m  - month of year (no leading zero)
-	   mm - month of year (two digit)
-	   M  - month name short
-	   MM - month name long
-	   y  - year (two digit)
-	   yy - year (four digit)
-	   @ - Unix timestamp (ms since 01/01/1970)
-	   ! - Windows ticks (100ns since 01/01/0001)
-	   '...' - literal text
-	   '' - single quote
-
-	   @param  format    string - the desired format of the date
-	   @param  date      Date - the date value to format
-	   @param  settings  Object - attributes include:
-	                     dayNamesShort    string[7] - abbreviated names of the days from Sunday (optional)
-	                     dayNames         string[7] - names of the days from Sunday (optional)
-	                     monthNamesShort  string[12] - abbreviated names of the months (optional)
-	                     monthNames       string[12] - names of the months (optional)
-	   @return  string - the date in the above format */
-	formatDate: function (format, date, settings) {
-		if (!date)
-			return '';
-		var dayNamesShort = (settings ? settings.dayNamesShort : null) || this._defaults.dayNamesShort;
-		var dayNames = (settings ? settings.dayNames : null) || this._defaults.dayNames;
-		var monthNamesShort = (settings ? settings.monthNamesShort : null) || this._defaults.monthNamesShort;
-		var monthNames = (settings ? settings.monthNames : null) || this._defaults.monthNames;
-		// Check whether a format character is doubled
-		var lookAhead = function(match) {
-			var matches = (iFormat + 1 < format.length && format.charAt(iFormat + 1) == match);
-			if (matches)
-				iFormat++;
-			return matches;
-		};
-		// Format a number, with leading zero if necessary
-		var formatNumber = function(match, value, len) {
-			var num = '' + value;
-			if (lookAhead(match))
-				while (num.length < len)
-					num = '0' + num;
-			return num;
-		};
-		// Format a name, short or long as requested
-		var formatName = function(match, value, shortNames, longNames) {
-			return (lookAhead(match) ? longNames[value] : shortNames[value]);
-		};
-		var output = '';
-		var literal = false;
-		if (date)
-			for (var iFormat = 0; iFormat < format.length; iFormat++) {
-				if (literal)
-					if (format.charAt(iFormat) == "'" && !lookAhead("'"))
-						literal = false;
-					else
-						output += format.charAt(iFormat);
-				else
-					switch (format.charAt(iFormat)) {
-						case 'd':
-							output += formatNumber('d', date.getDate(), 2);
-							break;
-						case 'D':
-							output += formatName('D', date.getDay(), dayNamesShort, dayNames);
-							break;
-						case 'o':
-							output += formatNumber('o',
-								Math.round((new Date(date.getFullYear(), date.getMonth(), date.getDate()).getTime() - new Date(date.getFullYear(), 0, 0).getTime()) / 86400000), 3);
-							break;
-						case 'm':
-							output += formatNumber('m', date.getMonth() + 1, 2);
-							break;
-						case 'M':
-							output += formatName('M', date.getMonth(), monthNamesShort, monthNames);
-							break;
-						case 'y':
-							output += (lookAhead('y') ? date.getFullYear() :
-								(date.getYear() % 100 < 10 ? '0' : '') + date.getYear() % 100);
-							break;
-						case '@':
-							output += date.getTime();
-							break;
-						case '!':
-							output += date.getTime() * 10000 + this._ticksTo1970;
-							break;
-						case "'":
-							if (lookAhead("'"))
-								output += "'";
-							else
-								literal = true;
-							break;
-						default:
-							output += format.charAt(iFormat);
-					}
-			}
-		return output;
-	},
-
-	/* Extract all possible characters from the date format. */
-	_possibleChars: function (format) {
-		var chars = '';
-		var literal = false;
-		// Check whether a format character is doubled
-		var lookAhead = function(match) {
-			var matches = (iFormat + 1 < format.length && format.charAt(iFormat + 1) == match);
-			if (matches)
-				iFormat++;
-			return matches;
-		};
-		for (var iFormat = 0; iFormat < format.length; iFormat++)
-			if (literal)
-				if (format.charAt(iFormat) == "'" && !lookAhead("'"))
-					literal = false;
-				else
-					chars += format.charAt(iFormat);
-			else
-				switch (format.charAt(iFormat)) {
-					case 'd': case 'm': case 'y': case '@':
-						chars += '0123456789';
-						break;
-					case 'D': case 'M':
-						return null; // Accept anything
-					case "'":
-						if (lookAhead("'"))
-							chars += "'";
-						else
-							literal = true;
-						break;
-					default:
-						chars += format.charAt(iFormat);
-				}
-		return chars;
-	},
-
-	/* Get a setting value, defaulting if necessary. */
-	_get: function(inst, name) {
-		return inst.settings[name] !== undefined ?
-			inst.settings[name] : this._defaults[name];
-	},
-
-	/* Parse existing date and initialise date picker. */
-	_setDateFromField: function(inst, noDefault) {
-		if (inst.input.val() == inst.lastVal) {
-			return;
-		}
-		var dateFormat = this._get(inst, 'dateFormat');
-		var dates = inst.lastVal = inst.input ? inst.input.val() : null;
-		var date, defaultDate;
-		date = defaultDate = this._getDefaultDate(inst);
-		var settings = this._getFormatConfig(inst);
-		try {
-			date = this.parseDate(dateFormat, dates, settings) || defaultDate;
-		} catch (event) {
-			this.log(event);
-			dates = (noDefault ? '' : dates);
-		}
-		inst.selectedDay = date.getDate();
-		inst.drawMonth = inst.selectedMonth = date.getMonth();
-		inst.drawYear = inst.selectedYear = date.getFullYear();
-		inst.currentDay = (dates ? date.getDate() : 0);
-		inst.currentMonth = (dates ? date.getMonth() : 0);
-		inst.currentYear = (dates ? date.getFullYear() : 0);
-		this._adjustInstDate(inst);
-	},
-
-	/* Retrieve the default date shown on opening. */
-	_getDefaultDate: function(inst) {
-		return this._restrictMinMax(inst,
-			this._determineDate(inst, this._get(inst, 'defaultDate'), new Date()));
-	},
-
-	/* A date may be specified as an exact value or a relative one. */
-	_determineDate: function(inst, date, defaultDate) {
-		var offsetNumeric = function(offset) {
-			var date = new Date();
-			date.setDate(date.getDate() + offset);
-			return date;
-		};
-		var offsetString = function(offset) {
-			try {
-				return $.datepicker.parseDate($.datepicker._get(inst, 'dateFormat'),
-					offset, $.datepicker._getFormatConfig(inst));
-			}
-			catch (e) {
-				// Ignore
-			}
-			var date = (offset.toLowerCase().match(/^c/) ?
-				$.datepicker._getDate(inst) : null) || new Date();
-			var year = date.getFullYear();
-			var month = date.getMonth();
-			var day = date.getDate();
-			var pattern = /([+-]?[0-9]+)\s*(d|D|w|W|m|M|y|Y)?/g;
-			var matches = pattern.exec(offset);
-			while (matches) {
-				switch (matches[2] || 'd') {
-					case 'd' : case 'D' :
-						day += parseInt(matches[1],10); break;
-					case 'w' : case 'W' :
-						day += parseInt(matches[1],10) * 7; break;
-					case 'm' : case 'M' :
-						month += parseInt(matches[1],10);
-						day = Math.min(day, $.datepicker._getDaysInMonth(year, month));
-						break;
-					case 'y': case 'Y' :
-						year += parseInt(matches[1],10);
-						day = Math.min(day, $.datepicker._getDaysInMonth(year, month));
-						break;
-				}
-				matches = pattern.exec(offset);
-			}
-			return new Date(year, month, day);
-		};
-		var newDate = (date == null || date === '' ? defaultDate : (typeof date == 'string' ? offsetString(date) :
-			(typeof date == 'number' ? (isNaN(date) ? defaultDate : offsetNumeric(date)) : new Date(date.getTime()))));
-		newDate = (newDate && newDate.toString() == 'Invalid Date' ? defaultDate : newDate);
-		if (newDate) {
-			newDate.setHours(0);
-			newDate.setMinutes(0);
-			newDate.setSeconds(0);
-			newDate.setMilliseconds(0);
-		}
-		return this._daylightSavingAdjust(newDate);
-	},
-
-	/* Handle switch to/from daylight saving.
-	   Hours may be non-zero on daylight saving cut-over:
-	   > 12 when midnight changeover, but then cannot generate
-	   midnight datetime, so jump to 1AM, otherwise reset.
-	   @param  date  (Date) the date to check
-	   @return  (Date) the corrected date */
-	_daylightSavingAdjust: function(date) {
-		if (!date) return null;
-		date.setHours(date.getHours() > 12 ? date.getHours() + 2 : 0);
-		return date;
-	},
-
-	/* Set the date(s) directly. */
-	_setDate: function(inst, date, noChange) {
-		var clear = !date;
-		var origMonth = inst.selectedMonth;
-		var origYear = inst.selectedYear;
-		var newDate = this._restrictMinMax(inst, this._determineDate(inst, date, new Date()));
-		inst.selectedDay = inst.currentDay = newDate.getDate();
-		inst.drawMonth = inst.selectedMonth = inst.currentMonth = newDate.getMonth();
-		inst.drawYear = inst.selectedYear = inst.currentYear = newDate.getFullYear();
-		if ((origMonth != inst.selectedMonth || origYear != inst.selectedYear) && !noChange)
-			this._notifyChange(inst);
-		this._adjustInstDate(inst);
-		if (inst.input) {
-			inst.input.val(clear ? '' : this._formatDate(inst));
-		}
-	},
-
-	/* Retrieve the date(s) directly. */
-	_getDate: function(inst) {
-		var startDate = (!inst.currentYear || (inst.input && inst.input.val() == '') ? null :
-			this._daylightSavingAdjust(new Date(
-			inst.currentYear, inst.currentMonth, inst.currentDay)));
-			return startDate;
-	},
-
-	/* Attach the onxxx handlers.  These are declared statically so
-	 * they work with static code transformers like Caja.
-	 */
-	_attachHandlers: function(inst) {
-		var stepMonths = this._get(inst, 'stepMonths');
-		var id = '#' + inst.id.replace( /\\\\/g, "\\" );
-		inst.dpDiv.find('[data-handler]').map(function () {
-			var handler = {
-				prev: function () {
-					window['DP_jQuery_' + dpuuid].datepicker._adjustDate(id, -stepMonths, 'M');
-				},
-				next: function () {
-					window['DP_jQuery_' + dpuuid].datepicker._adjustDate(id, +stepMonths, 'M');
-				},
-				hide: function () {
-					window['DP_jQuery_' + dpuuid].datepicker._hideDatepicker();
-				},
-				today: function () {
-					window['DP_jQuery_' + dpuuid].datepicker._gotoToday(id);
-				},
-				selectDay: function () {
-					window['DP_jQuery_' + dpuuid].datepicker._selectDay(id, +this.getAttribute('data-month'), +this.getAttribute('data-year'), this);
-					return false;
-				},
-				selectMonth: function () {
-					window['DP_jQuery_' + dpuuid].datepicker._selectMonthYear(id, this, 'M');
-					return false;
-				},
-				selectYear: function () {
-					window['DP_jQuery_' + dpuuid].datepicker._selectMonthYear(id, this, 'Y');
-					return false;
-				}
-			};
-			$(this).bind(this.getAttribute('data-event'), handler[this.getAttribute('data-handler')]);
-		});
-	},
-	
-	/* Generate the HTML for the current state of the date picker. */
-	_generateHTML: function(inst) {
-		var today = new Date();
-		today = this._daylightSavingAdjust(
-			new Date(today.getFullYear(), today.getMonth(), today.getDate())); // clear time
-		var isRTL = this._get(inst, 'isRTL');
-		var showButtonPanel = this._get(inst, 'showButtonPanel');
-		var hideIfNoPrevNext = this._get(inst, 'hideIfNoPrevNext');
-		var navigationAsDateFormat = this._get(inst, 'navigationAsDateFormat');
-		var numMonths = this._getNumberOfMonths(inst);
-		var showCurrentAtPos = this._get(inst, 'showCurrentAtPos');
-		var stepMonths = this._get(inst, 'stepMonths');
-		var isMultiMonth = (numMonths[0] != 1 || numMonths[1] != 1);
-		var currentDate = this._daylightSavingAdjust((!inst.currentDay ? new Date(9999, 9, 9) :
-			new Date(inst.currentYear, inst.currentMonth, inst.currentDay)));
-		var minDate = this._getMinMaxDate(inst, 'min');
-		var maxDate = this._getMinMaxDate(inst, 'max');
-		var drawMonth = inst.drawMonth - showCurrentAtPos;
-		var drawYear = inst.drawYear;
-		if (drawMonth < 0) {
-			drawMonth += 12;
-			drawYear--;
-		}
-		if (maxDate) {
-			var maxDraw = this._daylightSavingAdjust(new Date(maxDate.getFullYear(),
-				maxDate.getMonth() - (numMonths[0] * numMonths[1]) + 1, maxDate.getDate()));
-			maxDraw = (minDate && maxDraw < minDate ? minDate : maxDraw);
-			while (this._daylightSavingAdjust(new Date(drawYear, drawMonth, 1)) > maxDraw) {
-				drawMonth--;
-				if (drawMonth < 0) {
-					drawMonth = 11;
-					drawYear--;
-				}
-			}
-		}
-		inst.drawMonth = drawMonth;
-		inst.drawYear = drawYear;
-		var prevText = this._get(inst, 'prevText');
-		prevText = (!navigationAsDateFormat ? prevText : this.formatDate(prevText,
-			this._daylightSavingAdjust(new Date(drawYear, drawMonth - stepMonths, 1)),
-			this._getFormatConfig(inst)));
-		var prev = (this._canAdjustMonth(inst, -1, drawYear, drawMonth) ?
-			'<a class="ui-datepicker-prev ui-corner-all" data-handler="prev" data-event="click"' +
-			' title="' + prevText + '"><span class="ui-icon ui-icon-circle-triangle-' + ( isRTL ? 'e' : 'w') + '">' + prevText + '</span></a>' :
-			(hideIfNoPrevNext ? '' : '<a class="ui-datepicker-prev ui-corner-all ui-state-disabled" title="'+ prevText +'"><span class="ui-icon ui-icon-circle-triangle-' + ( isRTL ? 'e' : 'w') + '">' + prevText + '</span></a>'));
-		var nextText = this._get(inst, 'nextText');
-		nextText = (!navigationAsDateFormat ? nextText : this.formatDate(nextText,
-			this._daylightSavingAdjust(new Date(drawYear, drawMonth + stepMonths, 1)),
-			this._getFormatConfig(inst)));
-		var next = (this._canAdjustMonth(inst, +1, drawYear, drawMonth) ?
-			'<a class="ui-datepicker-next ui-corner-all" data-handler="next" data-event="click"' +
-			' title="' + nextText + '"><span class="ui-icon ui-icon-circle-triangle-' + ( isRTL ? 'w' : 'e') + '">' + nextText + '</span></a>' :
-			(hideIfNoPrevNext ? '' : '<a class="ui-datepicker-next ui-corner-all ui-state-disabled" title="'+ nextText + '"><span class="ui-icon ui-icon-circle-triangle-' + ( isRTL ? 'w' : 'e') + '">' + nextText + '</span></a>'));
-		var currentText = this._get(inst, 'currentText');
-		var gotoDate = (this._get(inst, 'gotoCurrent') && inst.currentDay ? currentDate : today);
-		currentText = (!navigationAsDateFormat ? currentText :
-			this.formatDate(currentText, gotoDate, this._getFormatConfig(inst)));
-		var controls = (!inst.inline ? '<button type="button" class="ui-datepicker-close ui-state-default ui-priority-primary ui-corner-all" data-handler="hide" data-event="click">' +
-			this._get(inst, 'closeText') + '</button>' : '');
-		var buttonPanel = (showButtonPanel) ? '<div class="ui-datepicker-buttonpane ui-widget-content">' + (isRTL ? controls : '') +
-			(this._isInRange(inst, gotoDate) ? '<button type="button" class="ui-datepicker-current ui-state-default ui-priority-secondary ui-corner-all" data-handler="today" data-event="click"' +
-			'>' + currentText + '</button>' : '') + (isRTL ? '' : controls) + '</div>' : '';
-		var firstDay = parseInt(this._get(inst, 'firstDay'),10);
-		firstDay = (isNaN(firstDay) ? 0 : firstDay);
-		var showWeek = this._get(inst, 'showWeek');
-		var dayNames = this._get(inst, 'dayNames');
-		var dayNamesShort = this._get(inst, 'dayNamesShort');
-		var dayNamesMin = this._get(inst, 'dayNamesMin');
-		var monthNames = this._get(inst, 'monthNames');
-		var monthNamesShort = this._get(inst, 'monthNamesShort');
-		var beforeShowDay = this._get(inst, 'beforeShowDay');
-		var showOtherMonths = this._get(inst, 'showOtherMonths');
-		var selectOtherMonths = this._get(inst, 'selectOtherMonths');
-		var calculateWeek = this._get(inst, 'calculateWeek') || this.iso8601Week;
-		var defaultDate = this._getDefaultDate(inst);
-		var html = '';
-		for (var row = 0; row < numMonths[0]; row++) {
-			var group = '';
-			this.maxRows = 4;
-			for (var col = 0; col < numMonths[1]; col++) {
-				var selectedDate = this._daylightSavingAdjust(new Date(drawYear, drawMonth, inst.selectedDay));
-				var cornerClass = ' ui-corner-all';
-				var calender = '';
-				if (isMultiMonth) {
-					calender += '<div class="ui-datepicker-group';
-					if (numMonths[1] > 1)
-						switch (col) {
-							case 0: calender += ' ui-datepicker-group-first';
-								cornerClass = ' ui-corner-' + (isRTL ? 'right' : 'left'); break;
-							case numMonths[1]-1: calender += ' ui-datepicker-group-last';
-								cornerClass = ' ui-corner-' + (isRTL ? 'left' : 'right'); break;
-							default: calender += ' ui-datepicker-group-middle'; cornerClass = ''; break;
-						}
-					calender += '">';
-				}
-				calender += '<div class="ui-datepicker-header ui-widget-header ui-helper-clearfix' + cornerClass + '">' +
-					(/all|left/.test(cornerClass) && row == 0 ? (isRTL ? next : prev) : '') +
-					(/all|right/.test(cornerClass) && row == 0 ? (isRTL ? prev : next) : '') +
-					this._generateMonthYearHeader(inst, drawMonth, drawYear, minDate, maxDate,
-					row > 0 || col > 0, monthNames, monthNamesShort) + // draw month headers
-					'</div><table class="ui-datepicker-calendar"><thead>' +
-					'<tr>';
-				var thead = (showWeek ? '<th class="ui-datepicker-week-col">' + this._get(inst, 'weekHeader') + '</th>' : '');
-				for (var dow = 0; dow < 7; dow++) { // days of the week
-					var day = (dow + firstDay) % 7;
-					thead += '<th' + ((dow + firstDay + 6) % 7 >= 5 ? ' class="ui-datepicker-week-end"' : '') + '>' +
-						'<span title="' + dayNames[day] + '">' + dayNamesMin[day] + '</span></th>';
-				}
-				calender += thead + '</tr></thead><tbody>';
-				var daysInMonth = this._getDaysInMonth(drawYear, drawMonth);
-				if (drawYear == inst.selectedYear && drawMonth == inst.selectedMonth)
-					inst.selectedDay = Math.min(inst.selectedDay, daysInMonth);
-				var leadDays = (this._getFirstDayOfMonth(drawYear, drawMonth) - firstDay + 7) % 7;
-				var curRows = Math.ceil((leadDays + daysInMonth) / 7); // calculate the number of rows to generate
-				var numRows = (isMultiMonth ? this.maxRows > curRows ? this.maxRows : curRows : curRows); //If multiple months, use the higher number of rows (see #7043)
-				this.maxRows = numRows;
-				var printDate = this._daylightSavingAdjust(new Date(drawYear, drawMonth, 1 - leadDays));
-				for (var dRow = 0; dRow < numRows; dRow++) { // create date picker rows
-					calender += '<tr>';
-					var tbody = (!showWeek ? '' : '<td class="ui-datepicker-week-col">' +
-						this._get(inst, 'calculateWeek')(printDate) + '</td>');
-					for (var dow = 0; dow < 7; dow++) { // create date picker days
-						var daySettings = (beforeShowDay ?
-							beforeShowDay.apply((inst.input ? inst.input[0] : null), [printDate]) : [true, '']);
-						var otherMonth = (printDate.getMonth() != drawMonth);
-						var unselectable = (otherMonth && !selectOtherMonths) || !daySettings[0] ||
-							(minDate && printDate < minDate) || (maxDate && printDate > maxDate);
-						tbody += '<td class="' +
-							((dow + firstDay + 6) % 7 >= 5 ? ' ui-datepicker-week-end' : '') + // highlight weekends
-							(otherMonth ? ' ui-datepicker-other-month' : '') + // highlight days from other months
-							((printDate.getTime() == selectedDate.getTime() && drawMonth == inst.selectedMonth && inst._keyEvent) || // user pressed key
-							(defaultDate.getTime() == printDate.getTime() && defaultDate.getTime() == selectedDate.getTime()) ?
-							// or defaultDate is current printedDate and defaultDate is selectedDate
-							' ' + this._dayOverClass : '') + // highlight selected day
-							(unselectable ? ' ' + this._unselectableClass + ' ui-state-disabled': '') +  // highlight unselectable days
-							(otherMonth && !showOtherMonths ? '' : ' ' + daySettings[1] + // highlight custom dates
-							(printDate.getTime() == currentDate.getTime() ? ' ' + this._currentClass : '') + // highlight selected day
-							(printDate.getTime() == today.getTime() ? ' ui-datepicker-today' : '')) + '"' + // highlight today (if different)
-							((!otherMonth || showOtherMonths) && daySettings[2] ? ' title="' + daySettings[2] + '"' : '') + // cell title
-							(unselectable ? '' : ' data-handler="selectDay" data-event="click" data-month="' + printDate.getMonth() + '" data-year="' + printDate.getFullYear() + '"') + '>' + // actions
-							(otherMonth && !showOtherMonths ? '&#xa0;' : // display for other months
-							(unselectable ? '<span class="ui-state-default">' + printDate.getDate() + '</span>' : '<a class="ui-state-default' +
-							(printDate.getTime() == today.getTime() ? ' ui-state-highlight' : '') +
-							(printDate.getTime() == currentDate.getTime() ? ' ui-state-active' : '') + // highlight selected day
-							(otherMonth ? ' ui-priority-secondary' : '') + // distinguish dates from other months
-							'" href="#">' + printDate.getDate() + '</a>')) + '</td>'; // display selectable date
-						printDate.setDate(printDate.getDate() + 1);
-						printDate = this._daylightSavingAdjust(printDate);
-					}
-					calender += tbody + '</tr>';
-				}
-				drawMonth++;
-				if (drawMonth > 11) {
-					drawMonth = 0;
-					drawYear++;
-				}
-				calender += '</tbody></table>' + (isMultiMonth ? '</div>' + 
-							((numMonths[0] > 0 && col == numMonths[1]-1) ? '<div class="ui-datepicker-row-break"></div>' : '') : '');
-				group += calender;
-			}
-			html += group;
-		}
-		html += buttonPanel + ($.browser.msie && parseInt($.browser.version,10) < 7 && !inst.inline ?
-			'<iframe src="javascript:false;" class="ui-datepicker-cover" frameborder="0"></iframe>' : '');
-		inst._keyEvent = false;
-		return html;
-	},
-
-	/* Generate the month and year header. */
-	_generateMonthYearHeader: function(inst, drawMonth, drawYear, minDate, maxDate,
-			secondary, monthNames, monthNamesShort) {
-		var changeMonth = this._get(inst, 'changeMonth');
-		var changeYear = this._get(inst, 'changeYear');
-		var showMonthAfterYear = this._get(inst, 'showMonthAfterYear');
-		var html = '<div class="ui-datepicker-title">';
-		var monthHtml = '';
-		// month selection
-		if (secondary || !changeMonth)
-			monthHtml += '<span class="ui-datepicker-month">' + monthNames[drawMonth] + '</span>';
-		else {
-			var inMinYear = (minDate && minDate.getFullYear() == drawYear);
-			var inMaxYear = (maxDate && maxDate.getFullYear() == drawYear);
-			monthHtml += '<select class="ui-datepicker-month" data-handler="selectMonth" data-event="change">';
-			for (var month = 0; month < 12; month++) {
-				if ((!inMinYear || month >= minDate.getMonth()) &&
-						(!inMaxYear || month <= maxDate.getMonth()))
-					monthHtml += '<option value="' + month + '"' +
-						(month == drawMonth ? ' selected="selected"' : '') +
-						'>' + monthNamesShort[month] + '</option>';
-			}
-			monthHtml += '</select>';
-		}
-		if (!showMonthAfterYear)
-			html += monthHtml + (secondary || !(changeMonth && changeYear) ? '&#xa0;' : '');
-		// year selection
-		if ( !inst.yearshtml ) {
-			inst.yearshtml = '';
-			if (secondary || !changeYear)
-				html += '<span class="ui-datepicker-year">' + drawYear + '</span>';
-			else {
-				// determine range of years to display
-				var years = this._get(inst, 'yearRange').split(':');
-				var thisYear = new Date().getFullYear();
-				var determineYear = function(value) {
-					var year = (value.match(/c[+-].*/) ? drawYear + parseInt(value.substring(1), 10) :
-						(value.match(/[+-].*/) ? thisYear + parseInt(value, 10) :
-						parseInt(value, 10)));
-					return (isNaN(year) ? thisYear : year);
-				};
-				var year = determineYear(years[0]);
-				var endYear = Math.max(year, determineYear(years[1] || ''));
-				year = (minDate ? Math.max(year, minDate.getFullYear()) : year);
-				endYear = (maxDate ? Math.min(endYear, maxDate.getFullYear()) : endYear);
-				inst.yearshtml += '<select class="ui-datepicker-year" data-handler="selectYear" data-event="change">';
-				for (; year <= endYear; year++) {
-					inst.yearshtml += '<option value="' + year + '"' +
-						(year == drawYear ? ' selected="selected"' : '') +
-						'>' + year + '</option>';
-				}
-				inst.yearshtml += '</select>';
-				
-				html += inst.yearshtml;
-				inst.yearshtml = null;
-			}
-		}
-		html += this._get(inst, 'yearSuffix');
-		if (showMonthAfterYear)
-			html += (secondary || !(changeMonth && changeYear) ? '&#xa0;' : '') + monthHtml;
-		html += '</div>'; // Close datepicker_header
-		return html;
-	},
-
-	/* Adjust one of the date sub-fields. */
-	_adjustInstDate: function(inst, offset, period) {
-		var year = inst.drawYear + (period == 'Y' ? offset : 0);
-		var month = inst.drawMonth + (period == 'M' ? offset : 0);
-		var day = Math.min(inst.selectedDay, this._getDaysInMonth(year, month)) +
-			(period == 'D' ? offset : 0);
-		var date = this._restrictMinMax(inst,
-			this._daylightSavingAdjust(new Date(year, month, day)));
-		inst.selectedDay = date.getDate();
-		inst.drawMonth = inst.selectedMonth = date.getMonth();
-		inst.drawYear = inst.selectedYear = date.getFullYear();
-		if (period == 'M' || period == 'Y')
-			this._notifyChange(inst);
-	},
-
-	/* Ensure a date is within any min/max bounds. */
-	_restrictMinMax: function(inst, date) {
-		var minDate = this._getMinMaxDate(inst, 'min');
-		var maxDate = this._getMinMaxDate(inst, 'max');
-		var newDate = (minDate && date < minDate ? minDate : date);
-		newDate = (maxDate && newDate > maxDate ? maxDate : newDate);
-		return newDate;
-	},
-
-	/* Notify change of month/year. */
-	_notifyChange: function(inst) {
-		var onChange = this._get(inst, 'onChangeMonthYear');
-		if (onChange)
-			onChange.apply((inst.input ? inst.input[0] : null),
-				[inst.selectedYear, inst.selectedMonth + 1, inst]);
-	},
-
-	/* Determine the number of months to show. */
-	_getNumberOfMonths: function(inst) {
-		var numMonths = this._get(inst, 'numberOfMonths');
-		return (numMonths == null ? [1, 1] : (typeof numMonths == 'number' ? [1, numMonths] : numMonths));
-	},
-
-	/* Determine the current maximum date - ensure no time components are set. */
-	_getMinMaxDate: function(inst, minMax) {
-		return this._determineDate(inst, this._get(inst, minMax + 'Date'), null);
-	},
-
-	/* Find the number of days in a given month. */
-	_getDaysInMonth: function(year, month) {
-		return 32 - this._daylightSavingAdjust(new Date(year, month, 32)).getDate();
-	},
-
-	/* Find the day of the week of the first of a month. */
-	_getFirstDayOfMonth: function(year, month) {
-		return new Date(year, month, 1).getDay();
-	},
-
-	/* Determines if we should allow a "next/prev" month display change. */
-	_canAdjustMonth: function(inst, offset, curYear, curMonth) {
-		var numMonths = this._getNumberOfMonths(inst);
-		var date = this._daylightSavingAdjust(new Date(curYear,
-			curMonth + (offset < 0 ? offset : numMonths[0] * numMonths[1]), 1));
-		if (offset < 0)
-			date.setDate(this._getDaysInMonth(date.getFullYear(), date.getMonth()));
-		return this._isInRange(inst, date);
-	},
-
-	/* Is the given date in the accepted range? */
-	_isInRange: function(inst, date) {
-		var minDate = this._getMinMaxDate(inst, 'min');
-		var maxDate = this._getMinMaxDate(inst, 'max');
-		return ((!minDate || date.getTime() >= minDate.getTime()) &&
-			(!maxDate || date.getTime() <= maxDate.getTime()));
-	},
-
-	/* Provide the configuration settings for formatting/parsing. */
-	_getFormatConfig: function(inst) {
-		var shortYearCutoff = this._get(inst, 'shortYearCutoff');
-		shortYearCutoff = (typeof shortYearCutoff != 'string' ? shortYearCutoff :
-			new Date().getFullYear() % 100 + parseInt(shortYearCutoff, 10));
-		return {shortYearCutoff: shortYearCutoff,
-			dayNamesShort: this._get(inst, 'dayNamesShort'), dayNames: this._get(inst, 'dayNames'),
-			monthNamesShort: this._get(inst, 'monthNamesShort'), monthNames: this._get(inst, 'monthNames')};
-	},
-
-	/* Format the given date for display. */
-	_formatDate: function(inst, day, month, year) {
-		if (!day) {
-			inst.currentDay = inst.selectedDay;
-			inst.currentMonth = inst.selectedMonth;
-			inst.currentYear = inst.selectedYear;
-		}
-		var date = (day ? (typeof day == 'object' ? day :
-			this._daylightSavingAdjust(new Date(year, month, day))) :
-			this._daylightSavingAdjust(new Date(inst.currentYear, inst.currentMonth, inst.currentDay)));
-		return this.formatDate(this._get(inst, 'dateFormat'), date, this._getFormatConfig(inst));
-	}
-});
-
-/*
- * Bind hover events for datepicker elements.
- * Done via delegate so the binding only occurs once in the lifetime of the parent div.
- * Global instActive, set by _updateDatepicker allows the handlers to find their way back to the active picker.
- */ 
-function bindHover(dpDiv) {
-	var selector = 'button, .ui-datepicker-prev, .ui-datepicker-next, .ui-datepicker-calendar td a';
-	return dpDiv.bind('mouseout', function(event) {
-			var elem = $( event.target ).closest( selector );
-			if ( !elem.length ) {
-				return;
-			}
-			elem.removeClass( "ui-state-hover ui-datepicker-prev-hover ui-datepicker-next-hover" );
-		})
-		.bind('mouseover', function(event) {
-			var elem = $( event.target ).closest( selector );
-			if ($.datepicker._isDisabledDatepicker( instActive.inline ? dpDiv.parent()[0] : instActive.input[0]) ||
-					!elem.length ) {
-				return;
-			}
-			elem.parents('.ui-datepicker-calendar').find('a').removeClass('ui-state-hover');
-			elem.addClass('ui-state-hover');
-			if (elem.hasClass('ui-datepicker-prev')) elem.addClass('ui-datepicker-prev-hover');
-			if (elem.hasClass('ui-datepicker-next')) elem.addClass('ui-datepicker-next-hover');
-		});
-}
-
-/* jQuery extend now ignores nulls! */
-function extendRemove(target, props) {
-	$.extend(target, props);
-	for (var name in props)
-		if (props[name] == null || props[name] == undefined)
-			target[name] = props[name];
-	return target;
-};
-
-/* Determine whether an object is an array. */
-function isArray(a) {
-	return (a && (($.browser.safari && typeof a == 'object' && a.length) ||
-		(a.constructor && a.constructor.toString().match(/\Array\(\)/))));
-};
-
-/* Invoke the datepicker functionality.
-   @param  options  string - a command, optionally followed by additional parameters or
-                    Object - settings for attaching new datepicker functionality
-   @return  jQuery object */
-$.fn.datepicker = function(options){
-	
-	/* Verify an empty collection wasn't passed - Fixes #6976 */
-	if ( !this.length ) {
-		return this;
-	}
-	
-	/* Initialise the date picker. */
-	if (!$.datepicker.initialized) {
-		$(document).mousedown($.datepicker._checkExternalClick).
-			find('body').append($.datepicker.dpDiv);
-		$.datepicker.initialized = true;
-	}
-
-	var otherArgs = Array.prototype.slice.call(arguments, 1);
-	if (typeof options == 'string' && (options == 'isDisabled' || options == 'getDate' || options == 'widget'))
-		return $.datepicker['_' + options + 'Datepicker'].
-			apply($.datepicker, [this[0]].concat(otherArgs));
-	if (options == 'option' && arguments.length == 2 && typeof arguments[1] == 'string')
-		return $.datepicker['_' + options + 'Datepicker'].
-			apply($.datepicker, [this[0]].concat(otherArgs));
-	return this.each(function() {
-		typeof options == 'string' ?
-			$.datepicker['_' + options + 'Datepicker'].
-				apply($.datepicker, [this].concat(otherArgs)) :
-			$.datepicker._attachDatepicker(this, options);
-	});
-};
-
-$.datepicker = new Datepicker(); // singleton instance
-$.datepicker.initialized = false;
-$.datepicker.uuid = new Date().getTime();
-$.datepicker.version = "1.8.23";
-
-// Workaround for #4055
-// Add another global to avoid noConflict issues with inline event handlers
-window['DP_jQuery_' + dpuuid] = $;
-
-})(jQuery);
diff --git a/branch-1.2/ambari-web/vendor/scripts/jquery.ui.mouse.js b/branch-1.2/ambari-web/vendor/scripts/jquery.ui.mouse.js
deleted file mode 100644
index 78c8ef6..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/jquery.ui.mouse.js
+++ /dev/null
@@ -1,175 +0,0 @@
-/*!
- * jQuery UI Mouse 1.8.23
- *
- * Copyright 2012, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI/Mouse
- *
- * Depends:
- *	jquery.ui.widget.js
- */
-(function ($, undefined) {
-
-  var mouseHandled = false;
-  $(document).mouseup(function (e) {
-    mouseHandled = false;
-  });
-
-  $.widget("ui.mouse", {
-    options:{
-      cancel:':input,option',
-      distance:1,
-      delay:0
-    },
-    _mouseInit:function () {
-      var self = this;
-
-      this.element
-        .bind('mousedown.' + this.widgetName, function (event) {
-          return self._mouseDown(event);
-        })
-        .bind('click.' + this.widgetName, function (event) {
-          if (true === $.data(event.target, self.widgetName + '.preventClickEvent')) {
-            $.removeData(event.target, self.widgetName + '.preventClickEvent');
-            event.stopImmediatePropagation();
-            return false;
-          }
-        });
-
-      this.started = false;
-    },
-
-    // TODO: make sure destroying one instance of mouse doesn't mess with
-    // other instances of mouse
-    _mouseDestroy:function () {
-      this.element.unbind('.' + this.widgetName);
-      if (this._mouseMoveDelegate) {
-        $(document)
-          .unbind('mousemove.' + this.widgetName, this._mouseMoveDelegate)
-          .unbind('mouseup.' + this.widgetName, this._mouseUpDelegate);
-      }
-    },
-
-    _mouseDown:function (event) {
-      // don't let more than one widget handle mouseStart
-      if (mouseHandled) {
-        return
-      }
-      ;
-
-      // we may have missed mouseup (out of window)
-      (this._mouseStarted && this._mouseUp(event));
-
-      this._mouseDownEvent = event;
-
-      var self = this,
-        btnIsLeft = (event.which == 1),
-      // event.target.nodeName works around a bug in IE 8 with
-      // disabled inputs (#7620)
-        elIsCancel = (typeof this.options.cancel == "string" && event.target.nodeName ? $(event.target).closest(this.options.cancel).length : false);
-      if (!btnIsLeft || elIsCancel || !this._mouseCapture(event)) {
-        return true;
-      }
-
-      this.mouseDelayMet = !this.options.delay;
-      if (!this.mouseDelayMet) {
-        this._mouseDelayTimer = setTimeout(function () {
-          self.mouseDelayMet = true;
-        }, this.options.delay);
-      }
-
-      if (this._mouseDistanceMet(event) && this._mouseDelayMet(event)) {
-        this._mouseStarted = (this._mouseStart(event) !== false);
-        if (!this._mouseStarted) {
-          event.preventDefault();
-          return true;
-        }
-      }
-
-      // Click event may never have fired (Gecko & Opera)
-      if (true === $.data(event.target, this.widgetName + '.preventClickEvent')) {
-        $.removeData(event.target, this.widgetName + '.preventClickEvent');
-      }
-
-      // these delegates are required to keep context
-      this._mouseMoveDelegate = function (event) {
-        return self._mouseMove(event);
-      };
-      this._mouseUpDelegate = function (event) {
-        return self._mouseUp(event);
-      };
-      $(document)
-        .bind('mousemove.' + this.widgetName, this._mouseMoveDelegate)
-        .bind('mouseup.' + this.widgetName, this._mouseUpDelegate);
-
-      event.preventDefault();
-
-      mouseHandled = true;
-      return true;
-    },
-
-    _mouseMove:function (event) {
-      // IE mouseup check - mouseup happened when mouse was out of window
-      if ($.browser.msie && !(document.documentMode >= 9) && !event.button) {
-        return this._mouseUp(event);
-      }
-
-      if (this._mouseStarted) {
-        this._mouseDrag(event);
-        return event.preventDefault();
-      }
-
-      if (this._mouseDistanceMet(event) && this._mouseDelayMet(event)) {
-        this._mouseStarted =
-          (this._mouseStart(this._mouseDownEvent, event) !== false);
-        (this._mouseStarted ? this._mouseDrag(event) : this._mouseUp(event));
-      }
-
-      return !this._mouseStarted;
-    },
-
-    _mouseUp:function (event) {
-      $(document)
-        .unbind('mousemove.' + this.widgetName, this._mouseMoveDelegate)
-        .unbind('mouseup.' + this.widgetName, this._mouseUpDelegate);
-
-      if (this._mouseStarted) {
-        this._mouseStarted = false;
-
-        if (event.target == this._mouseDownEvent.target) {
-          $.data(event.target, this.widgetName + '.preventClickEvent', true);
-        }
-
-        this._mouseStop(event);
-      }
-
-      return false;
-    },
-
-    _mouseDistanceMet:function (event) {
-      return (Math.max(
-        Math.abs(this._mouseDownEvent.pageX - event.pageX),
-        Math.abs(this._mouseDownEvent.pageY - event.pageY)
-      ) >= this.options.distance
-        );
-    },
-
-    _mouseDelayMet:function (event) {
-      return this.mouseDelayMet;
-    },
-
-    // These are placeholder methods, to be overriden by extending plugin
-    _mouseStart:function (event) {
-    },
-    _mouseDrag:function (event) {
-    },
-    _mouseStop:function (event) {
-    },
-    _mouseCapture:function (event) {
-      return true;
-    }
-  });
-
-})(jQuery);
diff --git a/branch-1.2/ambari-web/vendor/scripts/jquery.ui.slider.js b/branch-1.2/ambari-web/vendor/scripts/jquery.ui.slider.js
deleted file mode 100644
index 5ea589e..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/jquery.ui.slider.js
+++ /dev/null
@@ -1,662 +0,0 @@
-/*!
- * jQuery UI Slider 1.8.23
- *
- * Copyright 2012, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI/Slider
- *
- * Depends:
- *	jquery.ui.core.js
- *	jquery.ui.mouse.js
- *	jquery.ui.widget.js
- */
-(function( $, undefined ) {
-
-// number of pages in a slider
-// (how many times can you page up/down to go through the whole range)
-var numPages = 5;
-
-$.widget( "ui.slider", $.ui.mouse, {
-
-	widgetEventPrefix: "slide",
-
-	options: {
-		animate: false,
-		distance: 0,
-		max: 100,
-		min: 0,
-		orientation: "horizontal",
-		range: false,
-		step: 1,
-		value: 0,
-		values: null
-	},
-
-	_create: function() {
-		var self = this,
-			o = this.options,
-			existingHandles = this.element.find( ".ui-slider-handle" ).addClass( "ui-state-default ui-corner-all" ),
-			handle = "<a class='ui-slider-handle ui-state-default ui-corner-all' href='#'></a>",
-			handleCount = ( o.values && o.values.length ) || 1,
-			handles = [];
-
-		this._keySliding = false;
-		this._mouseSliding = false;
-		this._animateOff = true;
-		this._handleIndex = null;
-		this._detectOrientation();
-		this._mouseInit();
-
-		this.element
-			.addClass( "ui-slider" +
-				" ui-slider-" + this.orientation +
-				" ui-widget" +
-				" ui-widget-content" +
-				" ui-corner-all" +
-				( o.disabled ? " ui-slider-disabled ui-disabled" : "" ) );
-
-		this.range = $([]);
-
-		if ( o.range ) {
-			if ( o.range === true ) {
-				if ( !o.values ) {
-					o.values = [ this._valueMin(), this._valueMin() ];
-				}
-				if ( o.values.length && o.values.length !== 2 ) {
-					o.values = [ o.values[0], o.values[0] ];
-				}
-			}
-
-			this.range = $( "<div></div>" )
-				.appendTo( this.element )
-				.addClass( "ui-slider-range" +
-				// note: this isn't the most fittingly semantic framework class for this element,
-				// but worked best visually with a variety of themes
-				" ui-widget-header" + 
-				( ( o.range === "min" || o.range === "max" ) ? " ui-slider-range-" + o.range : "" ) );
-		}
-
-		for ( var i = existingHandles.length; i < handleCount; i += 1 ) {
-			handles.push( handle );
-		}
-
-		this.handles = existingHandles.add( $( handles.join( "" ) ).appendTo( self.element ) );
-
-		this.handle = this.handles.eq( 0 );
-
-		this.handles.add( this.range ).filter( "a" )
-			.click(function( event ) {
-				event.preventDefault();
-			})
-			.hover(function() {
-				if ( !o.disabled ) {
-					$( this ).addClass( "ui-state-hover" );
-				}
-			}, function() {
-				$( this ).removeClass( "ui-state-hover" );
-			})
-			.focus(function() {
-				if ( !o.disabled ) {
-					$( ".ui-slider .ui-state-focus" ).removeClass( "ui-state-focus" );
-					$( this ).addClass( "ui-state-focus" );
-				} else {
-					$( this ).blur();
-				}
-			})
-			.blur(function() {
-				$( this ).removeClass( "ui-state-focus" );
-			});
-
-		this.handles.each(function( i ) {
-			$( this ).data( "index.ui-slider-handle", i );
-		});
-
-		this.handles
-			.keydown(function( event ) {
-				var index = $( this ).data( "index.ui-slider-handle" ),
-					allowed,
-					curVal,
-					newVal,
-					step;
-	
-				if ( self.options.disabled ) {
-					return;
-				}
-	
-				switch ( event.keyCode ) {
-					case $.ui.keyCode.HOME:
-					case $.ui.keyCode.END:
-					case $.ui.keyCode.PAGE_UP:
-					case $.ui.keyCode.PAGE_DOWN:
-					case $.ui.keyCode.UP:
-					case $.ui.keyCode.RIGHT:
-					case $.ui.keyCode.DOWN:
-					case $.ui.keyCode.LEFT:
-						event.preventDefault();
-						if ( !self._keySliding ) {
-							self._keySliding = true;
-							$( this ).addClass( "ui-state-active" );
-							allowed = self._start( event, index );
-							if ( allowed === false ) {
-								return;
-							}
-						}
-						break;
-				}
-	
-				step = self.options.step;
-				if ( self.options.values && self.options.values.length ) {
-					curVal = newVal = self.values( index );
-				} else {
-					curVal = newVal = self.value();
-				}
-	
-				switch ( event.keyCode ) {
-					case $.ui.keyCode.HOME:
-						newVal = self._valueMin();
-						break;
-					case $.ui.keyCode.END:
-						newVal = self._valueMax();
-						break;
-					case $.ui.keyCode.PAGE_UP:
-						newVal = self._trimAlignValue( curVal + ( (self._valueMax() - self._valueMin()) / numPages ) );
-						break;
-					case $.ui.keyCode.PAGE_DOWN:
-						newVal = self._trimAlignValue( curVal - ( (self._valueMax() - self._valueMin()) / numPages ) );
-						break;
-					case $.ui.keyCode.UP:
-					case $.ui.keyCode.RIGHT:
-						if ( curVal === self._valueMax() ) {
-							return;
-						}
-						newVal = self._trimAlignValue( curVal + step );
-						break;
-					case $.ui.keyCode.DOWN:
-					case $.ui.keyCode.LEFT:
-						if ( curVal === self._valueMin() ) {
-							return;
-						}
-						newVal = self._trimAlignValue( curVal - step );
-						break;
-				}
-	
-				self._slide( event, index, newVal );
-			})
-			.keyup(function( event ) {
-				var index = $( this ).data( "index.ui-slider-handle" );
-	
-				if ( self._keySliding ) {
-					self._keySliding = false;
-					self._stop( event, index );
-					self._change( event, index );
-					$( this ).removeClass( "ui-state-active" );
-				}
-	
-			});
-
-		this._refreshValue();
-
-		this._animateOff = false;
-	},
-
-	destroy: function() {
-		this.handles.remove();
-		this.range.remove();
-
-		this.element
-			.removeClass( "ui-slider" +
-				" ui-slider-horizontal" +
-				" ui-slider-vertical" +
-				" ui-slider-disabled" +
-				" ui-widget" +
-				" ui-widget-content" +
-				" ui-corner-all" )
-			.removeData( "slider" )
-			.unbind( ".slider" );
-
-		this._mouseDestroy();
-
-		return this;
-	},
-
-	_mouseCapture: function( event ) {
-		var o = this.options,
-			position,
-			normValue,
-			distance,
-			closestHandle,
-			self,
-			index,
-			allowed,
-			offset,
-			mouseOverHandle;
-
-		if ( o.disabled ) {
-			return false;
-		}
-
-		this.elementSize = {
-			width: this.element.outerWidth(),
-			height: this.element.outerHeight()
-		};
-		this.elementOffset = this.element.offset();
-
-		position = { x: event.pageX, y: event.pageY };
-		normValue = this._normValueFromMouse( position );
-		distance = this._valueMax() - this._valueMin() + 1;
-		self = this;
-		this.handles.each(function( i ) {
-			var thisDistance = Math.abs( normValue - self.values(i) );
-			if ( distance > thisDistance ) {
-				distance = thisDistance;
-				closestHandle = $( this );
-				index = i;
-			}
-		});
-
-		// workaround for bug #3736 (if both handles of a range are at 0,
-		// the first is always used as the one with least distance,
-		// and moving it is obviously prevented by preventing negative ranges)
-		if( o.range === true && this.values(1) === o.min ) {
-			index += 1;
-			closestHandle = $( this.handles[index] );
-		}
-
-		allowed = this._start( event, index );
-		if ( allowed === false ) {
-			return false;
-		}
-		this._mouseSliding = true;
-
-		self._handleIndex = index;
-
-		closestHandle
-			.addClass( "ui-state-active" )
-			.focus();
-		
-		offset = closestHandle.offset();
-		mouseOverHandle = !$( event.target ).parents().andSelf().is( ".ui-slider-handle" );
-		this._clickOffset = mouseOverHandle ? { left: 0, top: 0 } : {
-			left: event.pageX - offset.left - ( closestHandle.width() / 2 ),
-			top: event.pageY - offset.top -
-				( closestHandle.height() / 2 ) -
-				( parseInt( closestHandle.css("borderTopWidth"), 10 ) || 0 ) -
-				( parseInt( closestHandle.css("borderBottomWidth"), 10 ) || 0) +
-				( parseInt( closestHandle.css("marginTop"), 10 ) || 0)
-		};
-
-		if ( !this.handles.hasClass( "ui-state-hover" ) ) {
-			this._slide( event, index, normValue );
-		}
-		this._animateOff = true;
-		return true;
-	},
-
-	_mouseStart: function( event ) {
-		return true;
-	},
-
-	_mouseDrag: function( event ) {
-		var position = { x: event.pageX, y: event.pageY },
-			normValue = this._normValueFromMouse( position );
-		
-		this._slide( event, this._handleIndex, normValue );
-
-		return false;
-	},
-
-	_mouseStop: function( event ) {
-		this.handles.removeClass( "ui-state-active" );
-		this._mouseSliding = false;
-
-		this._stop( event, this._handleIndex );
-		this._change( event, this._handleIndex );
-
-		this._handleIndex = null;
-		this._clickOffset = null;
-		this._animateOff = false;
-
-		return false;
-	},
-	
-	_detectOrientation: function() {
-		this.orientation = ( this.options.orientation === "vertical" ) ? "vertical" : "horizontal";
-	},
-
-	_normValueFromMouse: function( position ) {
-		var pixelTotal,
-			pixelMouse,
-			percentMouse,
-			valueTotal,
-			valueMouse;
-
-		if ( this.orientation === "horizontal" ) {
-			pixelTotal = this.elementSize.width;
-			pixelMouse = position.x - this.elementOffset.left - ( this._clickOffset ? this._clickOffset.left : 0 );
-		} else {
-			pixelTotal = this.elementSize.height;
-			pixelMouse = position.y - this.elementOffset.top - ( this._clickOffset ? this._clickOffset.top : 0 );
-		}
-
-		percentMouse = ( pixelMouse / pixelTotal );
-		if ( percentMouse > 1 ) {
-			percentMouse = 1;
-		}
-		if ( percentMouse < 0 ) {
-			percentMouse = 0;
-		}
-		if ( this.orientation === "vertical" ) {
-			percentMouse = 1 - percentMouse;
-		}
-
-		valueTotal = this._valueMax() - this._valueMin();
-		valueMouse = this._valueMin() + percentMouse * valueTotal;
-
-		return this._trimAlignValue( valueMouse );
-	},
-
-	_start: function( event, index ) {
-		var uiHash = {
-			handle: this.handles[ index ],
-			value: this.value()
-		};
-		if ( this.options.values && this.options.values.length ) {
-			uiHash.value = this.values( index );
-			uiHash.values = this.values();
-		}
-		return this._trigger( "start", event, uiHash );
-	},
-
-	_slide: function( event, index, newVal ) {
-		var otherVal,
-			newValues,
-			allowed;
-
-		if ( this.options.values && this.options.values.length ) {
-			otherVal = this.values( index ? 0 : 1 );
-
-			if ( ( this.options.values.length === 2 && this.options.range === true ) && 
-					( ( index === 0 && newVal > otherVal) || ( index === 1 && newVal < otherVal ) )
-				) {
-				newVal = otherVal;
-			}
-
-			if ( newVal !== this.values( index ) ) {
-				newValues = this.values();
-				newValues[ index ] = newVal;
-				// A slide can be canceled by returning false from the slide callback
-				allowed = this._trigger( "slide", event, {
-					handle: this.handles[ index ],
-					value: newVal,
-					values: newValues
-				} );
-				otherVal = this.values( index ? 0 : 1 );
-				if ( allowed !== false ) {
-					this.values( index, newVal, true );
-				}
-			}
-		} else {
-			if ( newVal !== this.value() ) {
-				// A slide can be canceled by returning false from the slide callback
-				allowed = this._trigger( "slide", event, {
-					handle: this.handles[ index ],
-					value: newVal
-				} );
-				if ( allowed !== false ) {
-					this.value( newVal );
-				}
-			}
-		}
-	},
-
-	_stop: function( event, index ) {
-		var uiHash = {
-			handle: this.handles[ index ],
-			value: this.value()
-		};
-		if ( this.options.values && this.options.values.length ) {
-			uiHash.value = this.values( index );
-			uiHash.values = this.values();
-		}
-
-		this._trigger( "stop", event, uiHash );
-	},
-
-	_change: function( event, index ) {
-		if ( !this._keySliding && !this._mouseSliding ) {
-			var uiHash = {
-				handle: this.handles[ index ],
-				value: this.value()
-			};
-			if ( this.options.values && this.options.values.length ) {
-				uiHash.value = this.values( index );
-				uiHash.values = this.values();
-			}
-
-			this._trigger( "change", event, uiHash );
-		}
-	},
-
-	value: function( newValue ) {
-		if ( arguments.length ) {
-			this.options.value = this._trimAlignValue( newValue );
-			this._refreshValue();
-			this._change( null, 0 );
-			return;
-		}
-
-		return this._value();
-	},
-
-	values: function( index, newValue ) {
-		var vals,
-			newValues,
-			i;
-
-		if ( arguments.length > 1 ) {
-			this.options.values[ index ] = this._trimAlignValue( newValue );
-			this._refreshValue();
-			this._change( null, index );
-			return;
-		}
-
-		if ( arguments.length ) {
-			if ( $.isArray( arguments[ 0 ] ) ) {
-				vals = this.options.values;
-				newValues = arguments[ 0 ];
-				for ( i = 0; i < vals.length; i += 1 ) {
-					vals[ i ] = this._trimAlignValue( newValues[ i ] );
-					this._change( null, i );
-				}
-				this._refreshValue();
-			} else {
-				if ( this.options.values && this.options.values.length ) {
-					return this._values( index );
-				} else {
-					return this.value();
-				}
-			}
-		} else {
-			return this._values();
-		}
-	},
-
-	_setOption: function( key, value ) {
-		var i,
-			valsLength = 0;
-
-		if ( $.isArray( this.options.values ) ) {
-			valsLength = this.options.values.length;
-		}
-
-		$.Widget.prototype._setOption.apply( this, arguments );
-
-		switch ( key ) {
-			case "disabled":
-				if ( value ) {
-					this.handles.filter( ".ui-state-focus" ).blur();
-					this.handles.removeClass( "ui-state-hover" );
-					this.handles.propAttr( "disabled", true );
-					this.element.addClass( "ui-disabled" );
-				} else {
-					this.handles.propAttr( "disabled", false );
-					this.element.removeClass( "ui-disabled" );
-				}
-				break;
-			case "orientation":
-				this._detectOrientation();
-				this.element
-					.removeClass( "ui-slider-horizontal ui-slider-vertical" )
-					.addClass( "ui-slider-" + this.orientation );
-				this._refreshValue();
-				break;
-			case "value":
-				this._animateOff = true;
-				this._refreshValue();
-				this._change( null, 0 );
-				this._animateOff = false;
-				break;
-			case "values":
-				this._animateOff = true;
-				this._refreshValue();
-				for ( i = 0; i < valsLength; i += 1 ) {
-					this._change( null, i );
-				}
-				this._animateOff = false;
-				break;
-		}
-	},
-
-	//internal value getter
-	// _value() returns value trimmed by min and max, aligned by step
-	_value: function() {
-		var val = this.options.value;
-		val = this._trimAlignValue( val );
-
-		return val;
-	},
-
-	//internal values getter
-	// _values() returns array of values trimmed by min and max, aligned by step
-	// _values( index ) returns single value trimmed by min and max, aligned by step
-	_values: function( index ) {
-		var val,
-			vals,
-			i;
-
-		if ( arguments.length ) {
-			val = this.options.values[ index ];
-			val = this._trimAlignValue( val );
-
-			return val;
-		} else {
-			// .slice() creates a copy of the array
-			// this copy gets trimmed by min and max and then returned
-			vals = this.options.values.slice();
-			for ( i = 0; i < vals.length; i+= 1) {
-				vals[ i ] = this._trimAlignValue( vals[ i ] );
-			}
-
-			return vals;
-		}
-	},
-	
-	// returns the step-aligned value that val is closest to, between (inclusive) min and max
-	_trimAlignValue: function( val ) {
-		if ( val <= this._valueMin() ) {
-			return this._valueMin();
-		}
-		if ( val >= this._valueMax() ) {
-			return this._valueMax();
-		}
-		var step = ( this.options.step > 0 ) ? this.options.step : 1,
-			valModStep = (val - this._valueMin()) % step,
-			alignValue = val - valModStep;
-
-		if ( Math.abs(valModStep) * 2 >= step ) {
-			alignValue += ( valModStep > 0 ) ? step : ( -step );
-		}
-
-		// Since JavaScript has problems with large floats, round
-		// the final value to 5 digits after the decimal point (see #4124)
-		return parseFloat( alignValue.toFixed(5) );
-	},
-
-	_valueMin: function() {
-		return this.options.min;
-	},
-
-	_valueMax: function() {
-		return this.options.max;
-	},
-	
-	_refreshValue: function() {
-		var oRange = this.options.range,
-			o = this.options,
-			self = this,
-			animate = ( !this._animateOff ) ? o.animate : false,
-			valPercent,
-			_set = {},
-			lastValPercent,
-			value,
-			valueMin,
-			valueMax;
-
-		if ( this.options.values && this.options.values.length ) {
-			this.handles.each(function( i, j ) {
-				valPercent = ( self.values(i) - self._valueMin() ) / ( self._valueMax() - self._valueMin() ) * 100;
-				_set[ self.orientation === "horizontal" ? "left" : "bottom" ] = valPercent + "%";
-				$( this ).stop( 1, 1 )[ animate ? "animate" : "css" ]( _set, o.animate );
-				if ( self.options.range === true ) {
-					if ( self.orientation === "horizontal" ) {
-						if ( i === 0 ) {
-							self.range.stop( 1, 1 )[ animate ? "animate" : "css" ]( { left: valPercent + "%" }, o.animate );
-						}
-						if ( i === 1 ) {
-							self.range[ animate ? "animate" : "css" ]( { width: ( valPercent - lastValPercent ) + "%" }, { queue: false, duration: o.animate } );
-						}
-					} else {
-						if ( i === 0 ) {
-							self.range.stop( 1, 1 )[ animate ? "animate" : "css" ]( { bottom: ( valPercent ) + "%" }, o.animate );
-						}
-						if ( i === 1 ) {
-							self.range[ animate ? "animate" : "css" ]( { height: ( valPercent - lastValPercent ) + "%" }, { queue: false, duration: o.animate } );
-						}
-					}
-				}
-				lastValPercent = valPercent;
-			});
-		} else {
-			value = this.value();
-			valueMin = this._valueMin();
-			valueMax = this._valueMax();
-			valPercent = ( valueMax !== valueMin ) ?
-					( value - valueMin ) / ( valueMax - valueMin ) * 100 :
-					0;
-			_set[ self.orientation === "horizontal" ? "left" : "bottom" ] = valPercent + "%";
-			this.handle.stop( 1, 1 )[ animate ? "animate" : "css" ]( _set, o.animate );
-
-			if ( oRange === "min" && this.orientation === "horizontal" ) {
-				this.range.stop( 1, 1 )[ animate ? "animate" : "css" ]( { width: valPercent + "%" }, o.animate );
-			}
-			if ( oRange === "max" && this.orientation === "horizontal" ) {
-				this.range[ animate ? "animate" : "css" ]( { width: ( 100 - valPercent ) + "%" }, { queue: false, duration: o.animate } );
-			}
-			if ( oRange === "min" && this.orientation === "vertical" ) {
-				this.range.stop( 1, 1 )[ animate ? "animate" : "css" ]( { height: valPercent + "%" }, o.animate );
-			}
-			if ( oRange === "max" && this.orientation === "vertical" ) {
-				this.range[ animate ? "animate" : "css" ]( { height: ( 100 - valPercent ) + "%" }, { queue: false, duration: o.animate } );
-			}
-		}
-	}
-
-});
-
-$.extend( $.ui.slider, {
-	version: "1.8.23"
-});
-
-}(jQuery));
diff --git a/branch-1.2/ambari-web/vendor/scripts/jquery.ui.sortable.js b/branch-1.2/ambari-web/vendor/scripts/jquery.ui.sortable.js
deleted file mode 100644
index 898c13b..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/jquery.ui.sortable.js
+++ /dev/null
@@ -1,1087 +0,0 @@
-/*!
- * jQuery UI Sortable 1.9.0
- * http://jqueryui.com
- *
- * Copyright 2012 jQuery Foundation and other contributors
- * Released under the MIT license.
- * http://jquery.org/license
- *
- * http://api.jqueryui.com/sortable/
- *
- * Depends:
- *	jquery.ui.core.js
- *	jquery.ui.mouse.js
- *	jquery.ui.widget.js
- */
-(function( $, undefined ) {
-
-$.widget("ui.sortable", $.ui.mouse, {
-	version: "1.9.0",
-	widgetEventPrefix: "sort",
-	ready: false,
-	options: {
-		appendTo: "parent",
-		axis: false,
-		connectWith: false,
-		containment: false,
-		cursor: 'auto',
-		cursorAt: false,
-		dropOnEmpty: true,
-		forcePlaceholderSize: false,
-		forceHelperSize: false,
-		grid: false,
-		handle: false,
-		helper: "original",
-		items: '> *',
-		opacity: false,
-		placeholder: false,
-		revert: false,
-		scroll: true,
-		scrollSensitivity: 20,
-		scrollSpeed: 20,
-		scope: "default",
-		tolerance: "intersect",
-		zIndex: 1000
-	},
-	_create: function() {
-
-		var o = this.options;
-		this.containerCache = {};
-		this.element.addClass("ui-sortable");
-
-		//Get the items
-		this.refresh();
-
-		//Let's determine if the items are being displayed horizontally
-		this.floating = this.items.length ? o.axis === 'x' || (/left|right/).test(this.items[0].item.css('float')) || (/inline|table-cell/).test(this.items[0].item.css('display')) : false;
-
-		//Let's determine the parent's offset
-		this.offset = this.element.offset();
-
-		//Initialize mouse events for interaction
-		this._mouseInit();
-
-		//We're ready to go
-		this.ready = true
-
-	},
-
-	_destroy: function() {
-		this.element
-			.removeClass("ui-sortable ui-sortable-disabled");
-		this._mouseDestroy();
-
-		for ( var i = this.items.length - 1; i >= 0; i-- )
-			this.items[i].item.removeData(this.widgetName + "-item");
-
-		return this;
-	},
-
-	_setOption: function(key, value){
-		if ( key === "disabled" ) {
-			this.options[ key ] = value;
-
-			this.widget().toggleClass( "ui-sortable-disabled", !!value );
-		} else {
-			// Don't call widget base _setOption for disable as it adds ui-state-disabled class
-			$.Widget.prototype._setOption.apply(this, arguments);
-		}
-	},
-
-	_mouseCapture: function(event, overrideHandle) {
-		var that = this;
-
-		if (this.reverting) {
-			return false;
-		}
-
-		if(this.options.disabled || this.options.type == 'static') return false;
-
-		//We have to refresh the items data once first
-		this._refreshItems(event);
-
-		//Find out if the clicked node (or one of its parents) is a actual item in this.items
-		var currentItem = null, nodes = $(event.target).parents().each(function() {
-			if($.data(this, that.widgetName + '-item') == that) {
-				currentItem = $(this);
-				return false;
-			}
-		});
-		if($.data(event.target, that.widgetName + '-item') == that) currentItem = $(event.target);
-
-		if(!currentItem) return false;
-		if(this.options.handle && !overrideHandle) {
-			var validHandle = false;
-
-			$(this.options.handle, currentItem).find("*").andSelf().each(function() { if(this == event.target) validHandle = true; });
-			if(!validHandle) return false;
-		}
-
-		this.currentItem = currentItem;
-		this._removeCurrentsFromItems();
-		return true;
-
-	},
-
-	_mouseStart: function(event, overrideHandle, noActivation) {
-
-		var o = this.options;
-		this.currentContainer = this;
-
-		//We only need to call refreshPositions, because the refreshItems call has been moved to mouseCapture
-		this.refreshPositions();
-
-		//Create and append the visible helper
-		this.helper = this._createHelper(event);
-
-		//Cache the helper size
-		this._cacheHelperProportions();
-
-		/*
-		 * - Position generation -
-		 * This block generates everything position related - it's the core of draggables.
-		 */
-
-		//Cache the margins of the original element
-		this._cacheMargins();
-
-		//Get the next scrolling parent
-		this.scrollParent = this.helper.scrollParent();
-
-		//The element's absolute position on the page minus margins
-		this.offset = this.currentItem.offset();
-		this.offset = {
-			top: this.offset.top - this.margins.top,
-			left: this.offset.left - this.margins.left
-		};
-
-		$.extend(this.offset, {
-			click: { //Where the click happened, relative to the element
-				left: event.pageX - this.offset.left,
-				top: event.pageY - this.offset.top
-			},
-			parent: this._getParentOffset(),
-			relative: this._getRelativeOffset() //This is a relative to absolute position minus the actual position calculation - only used for relative positioned helper
-		});
-
-		// Only after we got the offset, we can change the helper's position to absolute
-		// TODO: Still need to figure out a way to make relative sorting possible
-		this.helper.css("position", "absolute");
-		this.cssPosition = this.helper.css("position");
-
-		//Generate the original position
-		this.originalPosition = this._generatePosition(event);
-		this.originalPageX = event.pageX;
-		this.originalPageY = event.pageY;
-
-		//Adjust the mouse offset relative to the helper if 'cursorAt' is supplied
-		(o.cursorAt && this._adjustOffsetFromHelper(o.cursorAt));
-
-		//Cache the former DOM position
-		this.domPosition = { prev: this.currentItem.prev()[0], parent: this.currentItem.parent()[0] };
-
-		//If the helper is not the original, hide the original so it's not playing any role during the drag, won't cause anything bad this way
-		if(this.helper[0] != this.currentItem[0]) {
-			this.currentItem.hide();
-		}
-
-		//Create the placeholder
-		this._createPlaceholder();
-
-		//Set a containment if given in the options
-		if(o.containment)
-			this._setContainment();
-
-		if(o.cursor) { // cursor option
-			if ($('body').css("cursor")) this._storedCursor = $('body').css("cursor");
-			$('body').css("cursor", o.cursor);
-		}
-
-		if(o.opacity) { // opacity option
-			if (this.helper.css("opacity")) this._storedOpacity = this.helper.css("opacity");
-			this.helper.css("opacity", o.opacity);
-		}
-
-		if(o.zIndex) { // zIndex option
-			if (this.helper.css("zIndex")) this._storedZIndex = this.helper.css("zIndex");
-			this.helper.css("zIndex", o.zIndex);
-		}
-
-		//Prepare scrolling
-		if(this.scrollParent[0] != document && this.scrollParent[0].tagName != 'HTML')
-			this.overflowOffset = this.scrollParent.offset();
-
-		//Call callbacks
-		this._trigger("start", event, this._uiHash());
-
-		//Recache the helper size
-		if(!this._preserveHelperProportions)
-			this._cacheHelperProportions();
-
-
-		//Post 'activate' events to possible containers
-		if(!noActivation) {
-			 for (var i = this.containers.length - 1; i >= 0; i--) { this.containers[i]._trigger("activate", event, this._uiHash(this)); }
-		}
-
-		//Prepare possible droppables
-		if($.ui.ddmanager)
-			$.ui.ddmanager.current = this;
-
-		if ($.ui.ddmanager && !o.dropBehaviour)
-			$.ui.ddmanager.prepareOffsets(this, event);
-
-		this.dragging = true;
-
-		this.helper.addClass("ui-sortable-helper");
-		this._mouseDrag(event); //Execute the drag once - this causes the helper not to be visible before getting its correct position
-		return true;
-
-	},
-
-	_mouseDrag: function(event) {
-
-		//Compute the helpers position
-		this.position = this._generatePosition(event);
-		this.positionAbs = this._convertPositionTo("absolute");
-
-		if (!this.lastPositionAbs) {
-			this.lastPositionAbs = this.positionAbs;
-		}
-
-		//Do scrolling
-		if(this.options.scroll) {
-			var o = this.options, scrolled = false;
-			if(this.scrollParent[0] != document && this.scrollParent[0].tagName != 'HTML') {
-
-				if((this.overflowOffset.top + this.scrollParent[0].offsetHeight) - event.pageY < o.scrollSensitivity)
-					this.scrollParent[0].scrollTop = scrolled = this.scrollParent[0].scrollTop + o.scrollSpeed;
-				else if(event.pageY - this.overflowOffset.top < o.scrollSensitivity)
-					this.scrollParent[0].scrollTop = scrolled = this.scrollParent[0].scrollTop - o.scrollSpeed;
-
-				if((this.overflowOffset.left + this.scrollParent[0].offsetWidth) - event.pageX < o.scrollSensitivity)
-					this.scrollParent[0].scrollLeft = scrolled = this.scrollParent[0].scrollLeft + o.scrollSpeed;
-				else if(event.pageX - this.overflowOffset.left < o.scrollSensitivity)
-					this.scrollParent[0].scrollLeft = scrolled = this.scrollParent[0].scrollLeft - o.scrollSpeed;
-
-			} else {
-
-				if(event.pageY - $(document).scrollTop() < o.scrollSensitivity)
-					scrolled = $(document).scrollTop($(document).scrollTop() - o.scrollSpeed);
-				else if($(window).height() - (event.pageY - $(document).scrollTop()) < o.scrollSensitivity)
-					scrolled = $(document).scrollTop($(document).scrollTop() + o.scrollSpeed);
-
-				if(event.pageX - $(document).scrollLeft() < o.scrollSensitivity)
-					scrolled = $(document).scrollLeft($(document).scrollLeft() - o.scrollSpeed);
-				else if($(window).width() - (event.pageX - $(document).scrollLeft()) < o.scrollSensitivity)
-					scrolled = $(document).scrollLeft($(document).scrollLeft() + o.scrollSpeed);
-
-			}
-
-			if(scrolled !== false && $.ui.ddmanager && !o.dropBehaviour)
-				$.ui.ddmanager.prepareOffsets(this, event);
-		}
-
-		//Regenerate the absolute position used for position checks
-		this.positionAbs = this._convertPositionTo("absolute");
-
-		//Set the helper position
-		if(!this.options.axis || this.options.axis != "y") this.helper[0].style.left = this.position.left+'px';
-		if(!this.options.axis || this.options.axis != "x") this.helper[0].style.top = this.position.top+'px';
-
-		//Rearrange
-		for (var i = this.items.length - 1; i >= 0; i--) {
-
-			//Cache variables and intersection, continue if no intersection
-			var item = this.items[i], itemElement = item.item[0], intersection = this._intersectsWithPointer(item);
-			if (!intersection) continue;
-
-			// Only put the placeholder inside the current Container, skip all
-			// items form other containers. This works because when moving
-			// an item from one container to another the
-			// currentContainer is switched before the placeholder is moved.
-			//
-			// Without this moving items in "sub-sortables" can cause the placeholder to jitter
-			// beetween the outer and inner container.
-			if (item.instance !== this.currentContainer) continue;
-
-			if (itemElement != this.currentItem[0] //cannot intersect with itself
-				&&	this.placeholder[intersection == 1 ? "next" : "prev"]()[0] != itemElement //no useless actions that have been done before
-				&&	!$.contains(this.placeholder[0], itemElement) //no action if the item moved is the parent of the item checked
-				&& (this.options.type == 'semi-dynamic' ? !$.contains(this.element[0], itemElement) : true)
-				//&& itemElement.parentNode == this.placeholder[0].parentNode // only rearrange items within the same container
-			) {
-
-				this.direction = intersection == 1 ? "down" : "up";
-
-				if (this.options.tolerance == "pointer" || this._intersectsWithSides(item)) {
-					this._rearrange(event, item);
-				} else {
-					break;
-				}
-
-				this._trigger("change", event, this._uiHash());
-				break;
-			}
-		}
-
-		//Post events to containers
-		this._contactContainers(event);
-
-		//Interconnect with droppables
-		if($.ui.ddmanager) $.ui.ddmanager.drag(this, event);
-
-		//Call callbacks
-		this._trigger('sort', event, this._uiHash());
-
-		this.lastPositionAbs = this.positionAbs;
-		return false;
-
-	},
-
-	_mouseStop: function(event, noPropagation) {
-
-		if(!event) return;
-
-		//If we are using droppables, inform the manager about the drop
-		if ($.ui.ddmanager && !this.options.dropBehaviour)
-			$.ui.ddmanager.drop(this, event);
-
-		if(this.options.revert) {
-			var that = this;
-			var cur = this.placeholder.offset();
-
-			this.reverting = true;
-
-			$(this.helper).animate({
-				left: cur.left - this.offset.parent.left - this.margins.left + (this.offsetParent[0] == document.body ? 0 : this.offsetParent[0].scrollLeft),
-				top: cur.top - this.offset.parent.top - this.margins.top + (this.offsetParent[0] == document.body ? 0 : this.offsetParent[0].scrollTop)
-			}, parseInt(this.options.revert, 10) || 500, function() {
-				that._clear(event);
-			});
-		} else {
-			this._clear(event, noPropagation);
-		}
-
-		return false;
-
-	},
-
-	cancel: function() {
-
-		if(this.dragging) {
-
-			this._mouseUp({ target: null });
-
-			if(this.options.helper == "original")
-				this.currentItem.css(this._storedCSS).removeClass("ui-sortable-helper");
-			else
-				this.currentItem.show();
-
-			//Post deactivating events to containers
-			for (var i = this.containers.length - 1; i >= 0; i--){
-				this.containers[i]._trigger("deactivate", null, this._uiHash(this));
-				if(this.containers[i].containerCache.over) {
-					this.containers[i]._trigger("out", null, this._uiHash(this));
-					this.containers[i].containerCache.over = 0;
-				}
-			}
-
-		}
-
-		if (this.placeholder) {
-			//$(this.placeholder[0]).remove(); would have been the jQuery way - unfortunately, it unbinds ALL events from the original node!
-			if(this.placeholder[0].parentNode) this.placeholder[0].parentNode.removeChild(this.placeholder[0]);
-			if(this.options.helper != "original" && this.helper && this.helper[0].parentNode) this.helper.remove();
-
-			$.extend(this, {
-				helper: null,
-				dragging: false,
-				reverting: false,
-				_noFinalSort: null
-			});
-
-			if(this.domPosition.prev) {
-				$(this.domPosition.prev).after(this.currentItem);
-			} else {
-				$(this.domPosition.parent).prepend(this.currentItem);
-			}
-		}
-
-		return this;
-
-	},
-
-	serialize: function(o) {
-
-		var items = this._getItemsAsjQuery(o && o.connected);
-		var str = []; o = o || {};
-
-		$(items).each(function() {
-			var res = ($(o.item || this).attr(o.attribute || 'id') || '').match(o.expression || (/(.+)[-=_](.+)/));
-			if(res) str.push((o.key || res[1]+'[]')+'='+(o.key && o.expression ? res[1] : res[2]));
-		});
-
-		if(!str.length && o.key) {
-			str.push(o.key + '=');
-		}
-
-		return str.join('&');
-
-	},
-
-	toArray: function(o) {
-
-		var items = this._getItemsAsjQuery(o && o.connected);
-		var ret = []; o = o || {};
-
-		items.each(function() { ret.push($(o.item || this).attr(o.attribute || 'id') || ''); });
-		return ret;
-
-	},
-
-	/* Be careful with the following core functions */
-	_intersectsWith: function(item) {
-
-		var x1 = this.positionAbs.left,
-			x2 = x1 + this.helperProportions.width,
-			y1 = this.positionAbs.top,
-			y2 = y1 + this.helperProportions.height;
-
-		var l = item.left,
-			r = l + item.width,
-			t = item.top,
-			b = t + item.height;
-
-		var dyClick = this.offset.click.top,
-			dxClick = this.offset.click.left;
-
-		var isOverElement = (y1 + dyClick) > t && (y1 + dyClick) < b && (x1 + dxClick) > l && (x1 + dxClick) < r;
-
-		if(	   this.options.tolerance == "pointer"
-			|| this.options.forcePointerForContainers
-			|| (this.options.tolerance != "pointer" && this.helperProportions[this.floating ? 'width' : 'height'] > item[this.floating ? 'width' : 'height'])
-		) {
-			return isOverElement;
-		} else {
-
-			return (l < x1 + (this.helperProportions.width / 2) // Right Half
-				&& x2 - (this.helperProportions.width / 2) < r // Left Half
-				&& t < y1 + (this.helperProportions.height / 2) // Bottom Half
-				&& y2 - (this.helperProportions.height / 2) < b ); // Top Half
-
-		}
-	},
-
-	_intersectsWithPointer: function(item) {
-
-		var isOverElementHeight = (this.options.axis === 'x') || $.ui.isOverAxis(this.positionAbs.top + this.offset.click.top, item.top, item.height),
-			isOverElementWidth = (this.options.axis === 'y') || $.ui.isOverAxis(this.positionAbs.left + this.offset.click.left, item.left, item.width),
-			isOverElement = isOverElementHeight && isOverElementWidth,
-			verticalDirection = this._getDragVerticalDirection(),
-			horizontalDirection = this._getDragHorizontalDirection();
-
-		if (!isOverElement)
-			return false;
-
-		return this.floating ?
-			( ((horizontalDirection && horizontalDirection == "right") || verticalDirection == "down") ? 2 : 1 )
-			: ( verticalDirection && (verticalDirection == "down" ? 2 : 1) );
-
-	},
-
-	_intersectsWithSides: function(item) {
-
-		var isOverBottomHalf = $.ui.isOverAxis(this.positionAbs.top + this.offset.click.top, item.top + (item.height/2), item.height),
-			isOverRightHalf = $.ui.isOverAxis(this.positionAbs.left + this.offset.click.left, item.left + (item.width/2), item.width),
-			verticalDirection = this._getDragVerticalDirection(),
-			horizontalDirection = this._getDragHorizontalDirection();
-
-		if (this.floating && horizontalDirection) {
-			return ((horizontalDirection == "right" && isOverRightHalf) || (horizontalDirection == "left" && !isOverRightHalf));
-		} else {
-			return verticalDirection && ((verticalDirection == "down" && isOverBottomHalf) || (verticalDirection == "up" && !isOverBottomHalf));
-		}
-
-	},
-
-	_getDragVerticalDirection: function() {
-		var delta = this.positionAbs.top - this.lastPositionAbs.top;
-		return delta != 0 && (delta > 0 ? "down" : "up");
-	},
-
-	_getDragHorizontalDirection: function() {
-		var delta = this.positionAbs.left - this.lastPositionAbs.left;
-		return delta != 0 && (delta > 0 ? "right" : "left");
-	},
-
-	refresh: function(event) {
-		this._refreshItems(event);
-		this.refreshPositions();
-		return this;
-	},
-
-	_connectWith: function() {
-		var options = this.options;
-		return options.connectWith.constructor == String
-			? [options.connectWith]
-			: options.connectWith;
-	},
-
-	_getItemsAsjQuery: function(connected) {
-
-		var items = [];
-		var queries = [];
-		var connectWith = this._connectWith();
-
-		if(connectWith && connected) {
-			for (var i = connectWith.length - 1; i >= 0; i--){
-				var cur = $(connectWith[i]);
-				for (var j = cur.length - 1; j >= 0; j--){
-					var inst = $.data(cur[j], this.widgetName);
-					if(inst && inst != this && !inst.options.disabled) {
-						queries.push([$.isFunction(inst.options.items) ? inst.options.items.call(inst.element) : $(inst.options.items, inst.element).not(".ui-sortable-helper").not('.ui-sortable-placeholder'), inst]);
-					}
-				};
-			};
-		}
-
-		queries.push([$.isFunction(this.options.items) ? this.options.items.call(this.element, null, { options: this.options, item: this.currentItem }) : $(this.options.items, this.element).not(".ui-sortable-helper").not('.ui-sortable-placeholder'), this]);
-
-		for (var i = queries.length - 1; i >= 0; i--){
-			queries[i][0].each(function() {
-				items.push(this);
-			});
-		};
-
-		return $(items);
-
-	},
-
-	_removeCurrentsFromItems: function() {
-
-		var list = this.currentItem.find(":data(" + this.widgetName + "-item)");
-
-		for (var i=0; i < this.items.length; i++) {
-
-			for (var j=0; j < list.length; j++) {
-				if(list[j] == this.items[i].item[0])
-					this.items.splice(i,1);
-			};
-
-		};
-
-	},
-
-	_refreshItems: function(event) {
-
-		this.items = [];
-		this.containers = [this];
-		var items = this.items;
-		var queries = [[$.isFunction(this.options.items) ? this.options.items.call(this.element[0], event, { item: this.currentItem }) : $(this.options.items, this.element), this]];
-		var connectWith = this._connectWith();
-
-		if(connectWith && this.ready) { //Shouldn't be run the first time through due to massive slow-down
-			for (var i = connectWith.length - 1; i >= 0; i--){
-				var cur = $(connectWith[i]);
-				for (var j = cur.length - 1; j >= 0; j--){
-					var inst = $.data(cur[j], this.widgetName);
-					if(inst && inst != this && !inst.options.disabled) {
-						queries.push([$.isFunction(inst.options.items) ? inst.options.items.call(inst.element[0], event, { item: this.currentItem }) : $(inst.options.items, inst.element), inst]);
-						this.containers.push(inst);
-					}
-				};
-			};
-		}
-
-		for (var i = queries.length - 1; i >= 0; i--) {
-			var targetData = queries[i][1];
-			var _queries = queries[i][0];
-
-			for (var j=0, queriesLength = _queries.length; j < queriesLength; j++) {
-				var item = $(_queries[j]);
-
-				item.data(this.widgetName + '-item', targetData); // Data for target checking (mouse manager)
-
-				items.push({
-					item: item,
-					instance: targetData,
-					width: 0, height: 0,
-					left: 0, top: 0
-				});
-			};
-		};
-
-	},
-
-	refreshPositions: function(fast) {
-
-		//This has to be redone because due to the item being moved out/into the offsetParent, the offsetParent's position will change
-		if(this.offsetParent && this.helper) {
-			this.offset.parent = this._getParentOffset();
-		}
-
-		for (var i = this.items.length - 1; i >= 0; i--){
-			var item = this.items[i];
-
-			//We ignore calculating positions of all connected containers when we're not over them
-			if(item.instance != this.currentContainer && this.currentContainer && item.item[0] != this.currentItem[0])
-				continue;
-
-			var t = this.options.toleranceElement ? $(this.options.toleranceElement, item.item) : item.item;
-
-			if (!fast) {
-				item.width = t.outerWidth();
-				item.height = t.outerHeight();
-			}
-
-			var p = t.offset();
-			item.left = p.left;
-			item.top = p.top;
-		};
-
-		if(this.options.custom && this.options.custom.refreshContainers) {
-			this.options.custom.refreshContainers.call(this);
-		} else {
-			for (var i = this.containers.length - 1; i >= 0; i--){
-				var p = this.containers[i].element.offset();
-				this.containers[i].containerCache.left = p.left;
-				this.containers[i].containerCache.top = p.top;
-				this.containers[i].containerCache.width	= this.containers[i].element.outerWidth();
-				this.containers[i].containerCache.height = this.containers[i].element.outerHeight();
-			};
-		}
-
-		return this;
-	},
-
-	_createPlaceholder: function(that) {
-		that = that || this;
-		var o = that.options;
-
-		if(!o.placeholder || o.placeholder.constructor == String) {
-			var className = o.placeholder;
-			o.placeholder = {
-				element: function() {
-
-					var el = $(document.createElement(that.currentItem[0].nodeName))
-						.addClass(className || that.currentItem[0].className+" ui-sortable-placeholder")
-						.removeClass("ui-sortable-helper")[0];
-
-					if(!className)
-						el.style.visibility = "hidden";
-
-					return el;
-				},
-				update: function(container, p) {
-
-					// 1. If a className is set as 'placeholder option, we don't force sizes - the class is responsible for that
-					// 2. The option 'forcePlaceholderSize can be enabled to force it even if a class name is specified
-					if(className && !o.forcePlaceholderSize) return;
-
-					//If the element doesn't have a actual height by itself (without styles coming from a stylesheet), it receives the inline height from the dragged item
-					if(!p.height()) { p.height(that.currentItem.innerHeight() - parseInt(that.currentItem.css('paddingTop')||0, 10) - parseInt(that.currentItem.css('paddingBottom')||0, 10)); };
-					if(!p.width()) { p.width(that.currentItem.innerWidth() - parseInt(that.currentItem.css('paddingLeft')||0, 10) - parseInt(that.currentItem.css('paddingRight')||0, 10)); };
-				}
-			};
-		}
-
-		//Create the placeholder
-		that.placeholder = $(o.placeholder.element.call(that.element, that.currentItem));
-
-		//Append it after the actual current item
-		that.currentItem.after(that.placeholder);
-
-		//Update the size of the placeholder (TODO: Logic to fuzzy, see line 316/317)
-		o.placeholder.update(that, that.placeholder);
-
-	},
-
-	_contactContainers: function(event) {
-
-		// get innermost container that intersects with item
-		var innermostContainer = null, innermostIndex = null;
-
-
-		for (var i = this.containers.length - 1; i >= 0; i--){
-
-			// never consider a container that's located within the item itself
-			if($.contains(this.currentItem[0], this.containers[i].element[0]))
-				continue;
-
-			if(this._intersectsWith(this.containers[i].containerCache)) {
-
-				// if we've already found a container and it's more "inner" than this, then continue
-				if(innermostContainer && $.contains(this.containers[i].element[0], innermostContainer.element[0]))
-					continue;
-
-				innermostContainer = this.containers[i];
-				innermostIndex = i;
-
-			} else {
-				// container doesn't intersect. trigger "out" event if necessary
-				if(this.containers[i].containerCache.over) {
-					this.containers[i]._trigger("out", event, this._uiHash(this));
-					this.containers[i].containerCache.over = 0;
-				}
-			}
-
-		}
-
-		// if no intersecting containers found, return
-		if(!innermostContainer) return;
-
-		// move the item into the container if it's not there already
-		if(this.containers.length === 1) {
-			this.containers[innermostIndex]._trigger("over", event, this._uiHash(this));
-			this.containers[innermostIndex].containerCache.over = 1;
-		} else if(this.currentContainer != this.containers[innermostIndex]) {
-
-			//When entering a new container, we will find the item with the least distance and append our item near it
-			var dist = 10000; var itemWithLeastDistance = null; var base = this.positionAbs[this.containers[innermostIndex].floating ? 'left' : 'top'];
-			for (var j = this.items.length - 1; j >= 0; j--) {
-				if(!$.contains(this.containers[innermostIndex].element[0], this.items[j].item[0])) continue;
-				var cur = this.containers[innermostIndex].floating ? this.items[j].item.offset().left : this.items[j].item.offset().top;
-				if(Math.abs(cur - base) < dist) {
-					dist = Math.abs(cur - base); itemWithLeastDistance = this.items[j];
-					this.direction = (cur - base > 0) ? 'down' : 'up';
-				}
-			}
-
-			if(!itemWithLeastDistance && !this.options.dropOnEmpty) //Check if dropOnEmpty is enabled
-				return;
-
-			this.currentContainer = this.containers[innermostIndex];
-			itemWithLeastDistance ? this._rearrange(event, itemWithLeastDistance, null, true) : this._rearrange(event, null, this.containers[innermostIndex].element, true);
-			this._trigger("change", event, this._uiHash());
-			this.containers[innermostIndex]._trigger("change", event, this._uiHash(this));
-
-			//Update the placeholder
-			this.options.placeholder.update(this.currentContainer, this.placeholder);
-
-			this.containers[innermostIndex]._trigger("over", event, this._uiHash(this));
-			this.containers[innermostIndex].containerCache.over = 1;
-		}
-
-
-	},
-
-	_createHelper: function(event) {
-
-		var o = this.options;
-		var helper = $.isFunction(o.helper) ? $(o.helper.apply(this.element[0], [event, this.currentItem])) : (o.helper == 'clone' ? this.currentItem.clone() : this.currentItem);
-
-		if(!helper.parents('body').length) //Add the helper to the DOM if that didn't happen already
-			$(o.appendTo != 'parent' ? o.appendTo : this.currentItem[0].parentNode)[0].appendChild(helper[0]);
-
-		if(helper[0] == this.currentItem[0])
-			this._storedCSS = { width: this.currentItem[0].style.width, height: this.currentItem[0].style.height, position: this.currentItem.css("position"), top: this.currentItem.css("top"), left: this.currentItem.css("left") };
-
-		if(helper[0].style.width == '' || o.forceHelperSize) helper.width(this.currentItem.width());
-		if(helper[0].style.height == '' || o.forceHelperSize) helper.height(this.currentItem.height());
-
-		return helper;
-
-	},
-
-	_adjustOffsetFromHelper: function(obj) {
-		if (typeof obj == 'string') {
-			obj = obj.split(' ');
-		}
-		if ($.isArray(obj)) {
-			obj = {left: +obj[0], top: +obj[1] || 0};
-		}
-		if ('left' in obj) {
-			this.offset.click.left = obj.left + this.margins.left;
-		}
-		if ('right' in obj) {
-			this.offset.click.left = this.helperProportions.width - obj.right + this.margins.left;
-		}
-		if ('top' in obj) {
-			this.offset.click.top = obj.top + this.margins.top;
-		}
-		if ('bottom' in obj) {
-			this.offset.click.top = this.helperProportions.height - obj.bottom + this.margins.top;
-		}
-	},
-
-	_getParentOffset: function() {
-
-
-		//Get the offsetParent and cache its position
-		this.offsetParent = this.helper.offsetParent();
-		var po = this.offsetParent.offset();
-
-		// This is a special case where we need to modify a offset calculated on start, since the following happened:
-		// 1. The position of the helper is absolute, so it's position is calculated based on the next positioned parent
-		// 2. The actual offset parent is a child of the scroll parent, and the scroll parent isn't the document, which means that
-		//    the scroll is included in the initial calculation of the offset of the parent, and never recalculated upon drag
-		if(this.cssPosition == 'absolute' && this.scrollParent[0] != document && $.contains(this.scrollParent[0], this.offsetParent[0])) {
-			po.left += this.scrollParent.scrollLeft();
-			po.top += this.scrollParent.scrollTop();
-		}
-
-		if((this.offsetParent[0] == document.body) //This needs to be actually done for all browsers, since pageX/pageY includes this information
-		|| (this.offsetParent[0].tagName && this.offsetParent[0].tagName.toLowerCase() == 'html' && $.browser.msie)) //Ugly IE fix
-			po = { top: 0, left: 0 };
-
-		return {
-			top: po.top + (parseInt(this.offsetParent.css("borderTopWidth"),10) || 0),
-			left: po.left + (parseInt(this.offsetParent.css("borderLeftWidth"),10) || 0)
-		};
-
-	},
-
-	_getRelativeOffset: function() {
-
-		if(this.cssPosition == "relative") {
-			var p = this.currentItem.position();
-			return {
-				top: p.top - (parseInt(this.helper.css("top"),10) || 0) + this.scrollParent.scrollTop(),
-				left: p.left - (parseInt(this.helper.css("left"),10) || 0) + this.scrollParent.scrollLeft()
-			};
-		} else {
-			return { top: 0, left: 0 };
-		}
-
-	},
-
-	_cacheMargins: function() {
-		this.margins = {
-			left: (parseInt(this.currentItem.css("marginLeft"),10) || 0),
-			top: (parseInt(this.currentItem.css("marginTop"),10) || 0)
-		};
-	},
-
-	_cacheHelperProportions: function() {
-		this.helperProportions = {
-			width: this.helper.outerWidth(),
-			height: this.helper.outerHeight()
-		};
-	},
-
-	_setContainment: function() {
-
-		var o = this.options;
-		if(o.containment == 'parent') o.containment = this.helper[0].parentNode;
-		if(o.containment == 'document' || o.containment == 'window') this.containment = [
-			0 - this.offset.relative.left - this.offset.parent.left,
-			0 - this.offset.relative.top - this.offset.parent.top,
-			$(o.containment == 'document' ? document : window).width() - this.helperProportions.width - this.margins.left,
-			($(o.containment == 'document' ? document : window).height() || document.body.parentNode.scrollHeight) - this.helperProportions.height - this.margins.top
-		];
-
-		if(!(/^(document|window|parent)$/).test(o.containment)) {
-			var ce = $(o.containment)[0];
-			var co = $(o.containment).offset();
-			var over = ($(ce).css("overflow") != 'hidden');
-
-			this.containment = [
-				co.left + (parseInt($(ce).css("borderLeftWidth"),10) || 0) + (parseInt($(ce).css("paddingLeft"),10) || 0) - this.margins.left,
-				co.top + (parseInt($(ce).css("borderTopWidth"),10) || 0) + (parseInt($(ce).css("paddingTop"),10) || 0) - this.margins.top,
-				co.left+(over ? Math.max(ce.scrollWidth,ce.offsetWidth) : ce.offsetWidth) - (parseInt($(ce).css("borderLeftWidth"),10) || 0) - (parseInt($(ce).css("paddingRight"),10) || 0) - this.helperProportions.width - this.margins.left,
-				co.top+(over ? Math.max(ce.scrollHeight,ce.offsetHeight) : ce.offsetHeight) - (parseInt($(ce).css("borderTopWidth"),10) || 0) - (parseInt($(ce).css("paddingBottom"),10) || 0) - this.helperProportions.height - this.margins.top
-			];
-		}
-
-	},
-
-	_convertPositionTo: function(d, pos) {
-
-		if(!pos) pos = this.position;
-		var mod = d == "absolute" ? 1 : -1;
-		var o = this.options, scroll = this.cssPosition == 'absolute' && !(this.scrollParent[0] != document && $.contains(this.scrollParent[0], this.offsetParent[0])) ? this.offsetParent : this.scrollParent, scrollIsRootNode = (/(html|body)/i).test(scroll[0].tagName);
-
-		return {
-			top: (
-				pos.top																	// The absolute mouse position
-				+ this.offset.relative.top * mod										// Only for relative positioned nodes: Relative offset from element to offset parent
-				+ this.offset.parent.top * mod											// The offsetParent's offset without borders (offset + border)
-				- ( ( this.cssPosition == 'fixed' ? -this.scrollParent.scrollTop() : ( scrollIsRootNode ? 0 : scroll.scrollTop() ) ) * mod)
-			),
-			left: (
-				pos.left																// The absolute mouse position
-				+ this.offset.relative.left * mod										// Only for relative positioned nodes: Relative offset from element to offset parent
-				+ this.offset.parent.left * mod											// The offsetParent's offset without borders (offset + border)
-				- ( ( this.cssPosition == 'fixed' ? -this.scrollParent.scrollLeft() : scrollIsRootNode ? 0 : scroll.scrollLeft() ) * mod)
-			)
-		};
-
-	},
-
-	_generatePosition: function(event) {
-
-		var o = this.options, scroll = this.cssPosition == 'absolute' && !(this.scrollParent[0] != document && $.contains(this.scrollParent[0], this.offsetParent[0])) ? this.offsetParent : this.scrollParent, scrollIsRootNode = (/(html|body)/i).test(scroll[0].tagName);
-
-		// This is another very weird special case that only happens for relative elements:
-		// 1. If the css position is relative
-		// 2. and the scroll parent is the document or similar to the offset parent
-		// we have to refresh the relative offset during the scroll so there are no jumps
-		if(this.cssPosition == 'relative' && !(this.scrollParent[0] != document && this.scrollParent[0] != this.offsetParent[0])) {
-			this.offset.relative = this._getRelativeOffset();
-		}
-
-		var pageX = event.pageX;
-		var pageY = event.pageY;
-
-		/*
-		 * - Position constraining -
-		 * Constrain the position to a mix of grid, containment.
-		 */
-
-		if(this.originalPosition) { //If we are not dragging yet, we won't check for options
-
-			if(this.containment) {
-				if(event.pageX - this.offset.click.left < this.containment[0]) pageX = this.containment[0] + this.offset.click.left;
-				if(event.pageY - this.offset.click.top < this.containment[1]) pageY = this.containment[1] + this.offset.click.top;
-				if(event.pageX - this.offset.click.left > this.containment[2]) pageX = this.containment[2] + this.offset.click.left;
-				if(event.pageY - this.offset.click.top > this.containment[3]) pageY = this.containment[3] + this.offset.click.top;
-			}
-
-			if(o.grid) {
-				var top = this.originalPageY + Math.round((pageY - this.originalPageY) / o.grid[1]) * o.grid[1];
-				pageY = this.containment ? (!(top - this.offset.click.top < this.containment[1] || top - this.offset.click.top > this.containment[3]) ? top : (!(top - this.offset.click.top < this.containment[1]) ? top - o.grid[1] : top + o.grid[1])) : top;
-
-				var left = this.originalPageX + Math.round((pageX - this.originalPageX) / o.grid[0]) * o.grid[0];
-				pageX = this.containment ? (!(left - this.offset.click.left < this.containment[0] || left - this.offset.click.left > this.containment[2]) ? left : (!(left - this.offset.click.left < this.containment[0]) ? left - o.grid[0] : left + o.grid[0])) : left;
-			}
-
-		}
-
-		return {
-			top: (
-				pageY																// The absolute mouse position
-				- this.offset.click.top													// Click offset (relative to the element)
-				- this.offset.relative.top												// Only for relative positioned nodes: Relative offset from element to offset parent
-				- this.offset.parent.top												// The offsetParent's offset without borders (offset + border)
-				+ ( ( this.cssPosition == 'fixed' ? -this.scrollParent.scrollTop() : ( scrollIsRootNode ? 0 : scroll.scrollTop() ) ))
-			),
-			left: (
-				pageX																// The absolute mouse position
-				- this.offset.click.left												// Click offset (relative to the element)
-				- this.offset.relative.left												// Only for relative positioned nodes: Relative offset from element to offset parent
-				- this.offset.parent.left												// The offsetParent's offset without borders (offset + border)
-				+ ( ( this.cssPosition == 'fixed' ? -this.scrollParent.scrollLeft() : scrollIsRootNode ? 0 : scroll.scrollLeft() ))
-			)
-		};
-
-	},
-
-	_rearrange: function(event, i, a, hardRefresh) {
-
-		a ? a[0].appendChild(this.placeholder[0]) : i.item[0].parentNode.insertBefore(this.placeholder[0], (this.direction == 'down' ? i.item[0] : i.item[0].nextSibling));
-
-		//Various things done here to improve the performance:
-		// 1. we create a setTimeout, that calls refreshPositions
-		// 2. on the instance, we have a counter variable, that get's higher after every append
-		// 3. on the local scope, we copy the counter variable, and check in the timeout, if it's still the same
-		// 4. this lets only the last addition to the timeout stack through
-		this.counter = this.counter ? ++this.counter : 1;
-		var counter = this.counter;
-
-		this._delay(function() {
-			if(counter == this.counter) this.refreshPositions(!hardRefresh); //Precompute after each DOM insertion, NOT on mousemove
-		});
-
-	},
-
-	_clear: function(event, noPropagation) {
-
-		this.reverting = false;
-		// We delay all events that have to be triggered to after the point where the placeholder has been removed and
-		// everything else normalized again
-		var delayedTriggers = [];
-
-		// We first have to update the dom position of the actual currentItem
-		// Note: don't do it if the current item is already removed (by a user), or it gets reappended (see #4088)
-		if(!this._noFinalSort && this.currentItem.parent().length) this.placeholder.before(this.currentItem);
-		this._noFinalSort = null;
-
-		if(this.helper[0] == this.currentItem[0]) {
-			for(var i in this._storedCSS) {
-				if(this._storedCSS[i] == 'auto' || this._storedCSS[i] == 'static') this._storedCSS[i] = '';
-			}
-			this.currentItem.css(this._storedCSS).removeClass("ui-sortable-helper");
-		} else {
-			this.currentItem.show();
-		}
-
-		if(this.fromOutside && !noPropagation) delayedTriggers.push(function(event) { this._trigger("receive", event, this._uiHash(this.fromOutside)); });
-		if((this.fromOutside || this.domPosition.prev != this.currentItem.prev().not(".ui-sortable-helper")[0] || this.domPosition.parent != this.currentItem.parent()[0]) && !noPropagation) delayedTriggers.push(function(event) { this._trigger("update", event, this._uiHash()); }); //Trigger update callback if the DOM position has changed
-
-		// Check if the items Container has Changed and trigger appropriate
-		// events.
-		if (this !== this.currentContainer) {
-			if(!noPropagation) {
-				delayedTriggers.push(function(event) { this._trigger("remove", event, this._uiHash()); });
-				delayedTriggers.push((function(c) { return function(event) { c._trigger("receive", event, this._uiHash(this)); };  }).call(this, this.currentContainer));
-				delayedTriggers.push((function(c) { return function(event) { c._trigger("update", event, this._uiHash(this));  }; }).call(this, this.currentContainer));
-			}
-		}
-
-
-		//Post events to containers
-		for (var i = this.containers.length - 1; i >= 0; i--){
-			if(!noPropagation) delayedTriggers.push((function(c) { return function(event) { c._trigger("deactivate", event, this._uiHash(this)); };  }).call(this, this.containers[i]));
-			if(this.containers[i].containerCache.over) {
-				delayedTriggers.push((function(c) { return function(event) { c._trigger("out", event, this._uiHash(this)); };  }).call(this, this.containers[i]));
-				this.containers[i].containerCache.over = 0;
-			}
-		}
-
-		//Do what was originally in plugins
-		if(this._storedCursor) $('body').css("cursor", this._storedCursor); //Reset cursor
-		if(this._storedOpacity) this.helper.css("opacity", this._storedOpacity); //Reset opacity
-		if(this._storedZIndex) this.helper.css("zIndex", this._storedZIndex == 'auto' ? '' : this._storedZIndex); //Reset z-index
-
-		this.dragging = false;
-		if(this.cancelHelperRemoval) {
-			if(!noPropagation) {
-				this._trigger("beforeStop", event, this._uiHash());
-				for (var i=0; i < delayedTriggers.length; i++) { delayedTriggers[i].call(this, event); }; //Trigger all delayed events
-				this._trigger("stop", event, this._uiHash());
-			}
-
-			this.fromOutside = false;
-			return false;
-		}
-
-		if(!noPropagation) this._trigger("beforeStop", event, this._uiHash());
-
-		//$(this.placeholder[0]).remove(); would have been the jQuery way - unfortunately, it unbinds ALL events from the original node!
-		this.placeholder[0].parentNode.removeChild(this.placeholder[0]);
-
-		if(this.helper[0] != this.currentItem[0]) this.helper.remove(); this.helper = null;
-
-		if(!noPropagation) {
-			for (var i=0; i < delayedTriggers.length; i++) { delayedTriggers[i].call(this, event); }; //Trigger all delayed events
-			this._trigger("stop", event, this._uiHash());
-		}
-
-		this.fromOutside = false;
-		return true;
-
-	},
-
-	_trigger: function() {
-		if ($.Widget.prototype._trigger.apply(this, arguments) === false) {
-			this.cancel();
-		}
-	},
-
-	_uiHash: function(_inst) {
-		var inst = _inst || this;
-		return {
-			helper: inst.helper,
-			placeholder: inst.placeholder || $([]),
-			position: inst.position,
-			originalPosition: inst.originalPosition,
-			offset: inst.positionAbs,
-			item: inst.currentItem,
-			sender: _inst ? _inst.element : null
-		};
-	}
-
-});
-
-})(jQuery);
diff --git a/branch-1.2/ambari-web/vendor/scripts/jquery.ui.widget.js b/branch-1.2/ambari-web/vendor/scripts/jquery.ui.widget.js
deleted file mode 100644
index 544ce71..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/jquery.ui.widget.js
+++ /dev/null
@@ -1,276 +0,0 @@
-/*!
- * jQuery UI Widget 1.8.23
- *
- * Copyright 2012, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI/Widget
- */
-(function ($, undefined) {
-
-// jQuery 1.4+
-  if ($.cleanData) {
-    var _cleanData = $.cleanData;
-    $.cleanData = function (elems) {
-      for (var i = 0, elem; (elem = elems[i]) != null; i++) {
-        try {
-          $(elem).triggerHandler("remove");
-          // http://bugs.jquery.com/ticket/8235
-        } catch (e) {
-        }
-      }
-      _cleanData(elems);
-    };
-  } else {
-    var _remove = $.fn.remove;
-    $.fn.remove = function (selector, keepData) {
-      return this.each(function () {
-        if (!keepData) {
-          if (!selector || $.filter(selector, [ this ]).length) {
-            $("*", this).add([ this ]).each(function () {
-              try {
-                $(this).triggerHandler("remove");
-                // http://bugs.jquery.com/ticket/8235
-              } catch (e) {
-              }
-            });
-          }
-        }
-        return _remove.call($(this), selector, keepData);
-      });
-    };
-  }
-
-  $.widget = function (name, base, prototype) {
-    var namespace = name.split(".")[ 0 ],
-      fullName;
-    name = name.split(".")[ 1 ];
-    fullName = namespace + "-" + name;
-
-    if (!prototype) {
-      prototype = base;
-      base = $.Widget;
-    }
-
-    // create selector for plugin
-    $.expr[ ":" ][ fullName ] = function (elem) {
-      return !!$.data(elem, name);
-    };
-
-    $[ namespace ] = $[ namespace ] || {};
-    $[ namespace ][ name ] = function (options, element) {
-      // allow instantiation without initializing for simple inheritance
-      if (arguments.length) {
-        this._createWidget(options, element);
-      }
-    };
-
-    var basePrototype = new base();
-    // we need to make the options hash a property directly on the new instance
-    // otherwise we'll modify the options hash on the prototype that we're
-    // inheriting from
-//	$.each( basePrototype, function( key, val ) {
-//		if ( $.isPlainObject(val) ) {
-//			basePrototype[ key ] = $.extend( {}, val );
-//		}
-//	});
-    basePrototype.options = $.extend(true, {}, basePrototype.options);
-    $[ namespace ][ name ].prototype = $.extend(true, basePrototype, {
-      namespace:namespace,
-      widgetName:name,
-      widgetEventPrefix:$[ namespace ][ name ].prototype.widgetEventPrefix || name,
-      widgetBaseClass:fullName
-    }, prototype);
-
-    $.widget.bridge(name, $[ namespace ][ name ]);
-  };
-
-  $.widget.bridge = function (name, object) {
-    $.fn[ name ] = function (options) {
-      var isMethodCall = typeof options === "string",
-        args = Array.prototype.slice.call(arguments, 1),
-        returnValue = this;
-
-      // allow multiple hashes to be passed on init
-      options = !isMethodCall && args.length ?
-        $.extend.apply(null, [ true, options ].concat(args)) :
-        options;
-
-      // prevent calls to internal methods
-      if (isMethodCall && options.charAt(0) === "_") {
-        return returnValue;
-      }
-
-      if (isMethodCall) {
-        this.each(function () {
-          var instance = $.data(this, name),
-            methodValue = instance && $.isFunction(instance[options]) ?
-              instance[ options ].apply(instance, args) :
-              instance;
-          // TODO: add this back in 1.9 and use $.error() (see #5972)
-//				if ( !instance ) {
-//					throw "cannot call methods on " + name + " prior to initialization; " +
-//						"attempted to call method '" + options + "'";
-//				}
-//				if ( !$.isFunction( instance[options] ) ) {
-//					throw "no such method '" + options + "' for " + name + " widget instance";
-//				}
-//				var methodValue = instance[ options ].apply( instance, args );
-          if (methodValue !== instance && methodValue !== undefined) {
-            returnValue = methodValue;
-            return false;
-          }
-        });
-      } else {
-        this.each(function () {
-          var instance = $.data(this, name);
-          if (instance) {
-            instance.option(options || {})._init();
-          } else {
-            $.data(this, name, new object(options, this));
-          }
-        });
-      }
-
-      return returnValue;
-    };
-  };
-
-  $.Widget = function (options, element) {
-    // allow instantiation without initializing for simple inheritance
-    if (arguments.length) {
-      this._createWidget(options, element);
-    }
-  };
-
-  $.Widget.prototype = {
-    widgetName:"widget",
-    widgetEventPrefix:"",
-    options:{
-      disabled:false
-    },
-    _createWidget:function (options, element) {
-      // $.widget.bridge stores the plugin instance, but we do it anyway
-      // so that it's stored even before the _create function runs
-      $.data(element, this.widgetName, this);
-      this.element = $(element);
-      this.options = $.extend(true, {},
-        this.options,
-        this._getCreateOptions(),
-        options);
-
-      var self = this;
-      this.element.bind("remove." + this.widgetName, function () {
-        self.destroy();
-      });
-
-      this._create();
-      this._trigger("create");
-      this._init();
-    },
-    _getCreateOptions:function () {
-      return $.metadata && $.metadata.get(this.element[0])[ this.widgetName ];
-    },
-    _create:function () {
-    },
-    _init:function () {
-    },
-
-    destroy:function () {
-      this.element
-        .unbind("." + this.widgetName)
-        .removeData(this.widgetName);
-      this.widget()
-        .unbind("." + this.widgetName)
-        .removeAttr("aria-disabled")
-        .removeClass(
-        this.widgetBaseClass + "-disabled " +
-          "ui-state-disabled");
-    },
-
-    widget:function () {
-      return this.element;
-    },
-
-    option:function (key, value) {
-      var options = key;
-
-      if (arguments.length === 0) {
-        // don't return a reference to the internal hash
-        return $.extend({}, this.options);
-      }
-
-      if (typeof key === "string") {
-        if (value === undefined) {
-          return this.options[ key ];
-        }
-        options = {};
-        options[ key ] = value;
-      }
-
-      this._setOptions(options);
-
-      return this;
-    },
-    _setOptions:function (options) {
-      var self = this;
-      $.each(options, function (key, value) {
-        self._setOption(key, value);
-      });
-
-      return this;
-    },
-    _setOption:function (key, value) {
-      this.options[ key ] = value;
-
-      if (key === "disabled") {
-        this.widget()
-          [ value ? "addClass" : "removeClass"](
-          this.widgetBaseClass + "-disabled" + " " +
-            "ui-state-disabled")
-          .attr("aria-disabled", value);
-      }
-
-      return this;
-    },
-
-    enable:function () {
-      return this._setOption("disabled", false);
-    },
-    disable:function () {
-      return this._setOption("disabled", true);
-    },
-
-    _trigger:function (type, event, data) {
-      var prop, orig,
-        callback = this.options[ type ];
-
-      data = data || {};
-      event = $.Event(event);
-      event.type = ( type === this.widgetEventPrefix ?
-        type :
-        this.widgetEventPrefix + type ).toLowerCase();
-      // the original event may come from any element
-      // so we need to reset the target on the new event
-      event.target = this.element[ 0 ];
-
-      // copy original event properties over to the new event
-      orig = event.originalEvent;
-      if (orig) {
-        for (prop in orig) {
-          if (!( prop in event )) {
-            event[ prop ] = orig[ prop ];
-          }
-        }
-      }
-
-      this.element.trigger(event, data);
-
-      return !( $.isFunction(callback) &&
-        callback.call(this.element[0], event, data) === false ||
-        event.isDefaultPrevented() );
-    }
-  };
-
-})(jQuery);
diff --git a/branch-1.2/ambari-web/vendor/scripts/rickshaw.js b/branch-1.2/ambari-web/vendor/scripts/rickshaw.js
deleted file mode 100644
index 2f6754d..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/rickshaw.js
+++ /dev/null
@@ -1,2658 +0,0 @@
-var Rickshaw = {
-
-	namespace: function(namespace, obj) {
-
-		var parts = namespace.split('.');
-
-		var parent = Rickshaw;
-
-		for(var i = 1, length = parts.length; i < length; i++) {
-			currentPart = parts[i];
-			parent[currentPart] = parent[currentPart] || {};
-			parent = parent[currentPart];
-		}
-		return parent;
-	},
-
-	keys: function(obj) {
-		var keys = [];
-		for (var key in obj) keys.push(key);
-		return keys;
-	},
-
-	extend: function(destination, source) {
-
-		for (var property in source) {
-			destination[property] = source[property];
-		}
-		return destination;
-	}
-};
-
-if (typeof module !== 'undefined' && module.exports) {
-	var d3 = require('d3');
-	module.exports = Rickshaw;
-}
-
-/* Adapted from https://github.com/Jakobo/PTClass */
-
-/*
-Copyright (c) 2005-2010 Sam Stephenson
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-*/
-/* Based on Alex Arnell's inheritance implementation. */
-/** section: Language
- * class Class
- *
- *  Manages Prototype's class-based OOP system.
- *
- *  Refer to Prototype's web site for a [tutorial on classes and
- *  inheritance](http://prototypejs.org/learn/class-inheritance).
-**/
-(function(globalContext) {
-/* ------------------------------------ */
-/* Import from object.js                */
-/* ------------------------------------ */
-var _toString = Object.prototype.toString,
-    NULL_TYPE = 'Null',
-    UNDEFINED_TYPE = 'Undefined',
-    BOOLEAN_TYPE = 'Boolean',
-    NUMBER_TYPE = 'Number',
-    STRING_TYPE = 'String',
-    OBJECT_TYPE = 'Object',
-    FUNCTION_CLASS = '[object Function]';
-function isFunction(object) {
-  return _toString.call(object) === FUNCTION_CLASS;
-}
-function extend(destination, source) {
-  for (var property in source) if (source.hasOwnProperty(property)) // modify protect primitive slaughter
-    destination[property] = source[property];
-  return destination;
-}
-function keys(object) {
-  if (Type(object) !== OBJECT_TYPE) { throw new TypeError(); }
-  var results = [];
-  for (var property in object) {
-    if (object.hasOwnProperty(property)) {
-      results.push(property);
-    }
-  }
-  return results;
-}
-function Type(o) {
-  switch(o) {
-    case null: return NULL_TYPE;
-    case (void 0): return UNDEFINED_TYPE;
-  }
-  var type = typeof o;
-  switch(type) {
-    case 'boolean': return BOOLEAN_TYPE;
-    case 'number':  return NUMBER_TYPE;
-    case 'string':  return STRING_TYPE;
-  }
-  return OBJECT_TYPE;
-}
-function isUndefined(object) {
-  return typeof object === "undefined";
-}
-/* ------------------------------------ */
-/* Import from Function.js              */
-/* ------------------------------------ */
-var slice = Array.prototype.slice;
-function argumentNames(fn) {
-  var names = fn.toString().match(/^[\s\(]*function[^(]*\(([^)]*)\)/)[1]
-    .replace(/\/\/.*?[\r\n]|\/\*(?:.|[\r\n])*?\*\//g, '')
-    .replace(/\s+/g, '').split(',');
-  return names.length == 1 && !names[0] ? [] : names;
-}
-function wrap(fn, wrapper) {
-  var __method = fn;
-  return function() {
-    var a = update([bind(__method, this)], arguments);
-    return wrapper.apply(this, a);
-  }
-}
-function update(array, args) {
-  var arrayLength = array.length, length = args.length;
-  while (length--) array[arrayLength + length] = args[length];
-  return array;
-}
-function merge(array, args) {
-  array = slice.call(array, 0);
-  return update(array, args);
-}
-function bind(fn, context) {
-  if (arguments.length < 2 && isUndefined(arguments[0])) return this;
-  var __method = fn, args = slice.call(arguments, 2);
-  return function() {
-    var a = merge(args, arguments);
-    return __method.apply(context, a);
-  }
-}
-
-/* ------------------------------------ */
-/* Import from Prototype.js             */
-/* ------------------------------------ */
-var emptyFunction = function(){};
-
-var Class = (function() {
-  
-  // Some versions of JScript fail to enumerate over properties, names of which 
-  // correspond to non-enumerable properties in the prototype chain
-  var IS_DONTENUM_BUGGY = (function(){
-    for (var p in { toString: 1 }) {
-      // check actual property name, so that it works with augmented Object.prototype
-      if (p === 'toString') return false;
-    }
-    return true;
-  })();
-  
-  function subclass() {};
-  function create() {
-    var parent = null, properties = [].slice.apply(arguments);
-    if (isFunction(properties[0]))
-      parent = properties.shift();
-
-    function klass() {
-      this.initialize.apply(this, arguments);
-    }
-
-    extend(klass, Class.Methods);
-    klass.superclass = parent;
-    klass.subclasses = [];
-
-    if (parent) {
-      subclass.prototype = parent.prototype;
-      klass.prototype = new subclass;
-      try { parent.subclasses.push(klass) } catch(e) {}
-    }
-
-    for (var i = 0, length = properties.length; i < length; i++)
-      klass.addMethods(properties[i]);
-
-    if (!klass.prototype.initialize)
-      klass.prototype.initialize = emptyFunction;
-
-    klass.prototype.constructor = klass;
-    return klass;
-  }
-
-  function addMethods(source) {
-    var ancestor   = this.superclass && this.superclass.prototype,
-        properties = keys(source);
-
-    // IE6 doesn't enumerate `toString` and `valueOf` (among other built-in `Object.prototype`) properties,
-    // Force copy if they're not Object.prototype ones.
-    // Do not copy other Object.prototype.* for performance reasons
-    if (IS_DONTENUM_BUGGY) {
-      if (source.toString != Object.prototype.toString)
-        properties.push("toString");
-      if (source.valueOf != Object.prototype.valueOf)
-        properties.push("valueOf");
-    }
-
-    for (var i = 0, length = properties.length; i < length; i++) {
-      var property = properties[i], value = source[property];
-      if (ancestor && isFunction(value) &&
-          argumentNames(value)[0] == "$super") {
-        var method = value;
-        value = wrap((function(m) {
-          return function() { return ancestor[m].apply(this, arguments); };
-        })(property), method);
-
-        value.valueOf = bind(method.valueOf, method);
-        value.toString = bind(method.toString, method);
-      }
-      this.prototype[property] = value;
-    }
-
-    return this;
-  }
-
-  return {
-    create: create,
-    Methods: {
-      addMethods: addMethods
-    }
-  };
-})();
-
-if (globalContext.exports) {
-  globalContext.exports.Class = Class;
-}
-else {
-  globalContext.Class = Class;
-}
-})(Rickshaw);
-Rickshaw.namespace('Rickshaw.Compat.ClassList');
-
-Rickshaw.Compat.ClassList = function() {
-
-	/* adapted from http://purl.eligrey.com/github/classList.js/blob/master/classList.js */
-
-	if (typeof document !== "undefined" && !("classList" in document.createElement("a"))) {
-
-	(function (view) {
-
-	"use strict";
-
-	var
-		  classListProp = "classList"
-		, protoProp = "prototype"
-		, elemCtrProto = (view.HTMLElement || view.Element)[protoProp]
-		, objCtr = Object
-		, strTrim = String[protoProp].trim || function () {
-			return this.replace(/^\s+|\s+$/g, "");
-		}
-		, arrIndexOf = Array[protoProp].indexOf || function (item) {
-			var
-				  i = 0
-				, len = this.length
-			;
-			for (; i < len; i++) {
-				if (i in this && this[i] === item) {
-					return i;
-				}
-			}
-			return -1;
-		}
-		// Vendors: please allow content code to instantiate DOMExceptions
-		, DOMEx = function (type, message) {
-			this.name = type;
-			this.code = DOMException[type];
-			this.message = message;
-		}
-		, checkTokenAndGetIndex = function (classList, token) {
-			if (token === "") {
-				throw new DOMEx(
-					  "SYNTAX_ERR"
-					, "An invalid or illegal string was specified"
-				);
-			}
-			if (/\s/.test(token)) {
-				throw new DOMEx(
-					  "INVALID_CHARACTER_ERR"
-					, "String contains an invalid character"
-				);
-			}
-			return arrIndexOf.call(classList, token);
-		}
-		, ClassList = function (elem) {
-			var
-				  trimmedClasses = strTrim.call(elem.className)
-				, classes = trimmedClasses ? trimmedClasses.split(/\s+/) : []
-				, i = 0
-				, len = classes.length
-			;
-			for (; i < len; i++) {
-				this.push(classes[i]);
-			}
-			this._updateClassName = function () {
-				elem.className = this.toString();
-			};
-		}
-		, classListProto = ClassList[protoProp] = []
-		, classListGetter = function () {
-			return new ClassList(this);
-		}
-	;
-	// Most DOMException implementations don't allow calling DOMException's toString()
-	// on non-DOMExceptions. Error's toString() is sufficient here.
-	DOMEx[protoProp] = Error[protoProp];
-	classListProto.item = function (i) {
-		return this[i] || null;
-	};
-	classListProto.contains = function (token) {
-		token += "";
-		return checkTokenAndGetIndex(this, token) !== -1;
-	};
-	classListProto.add = function (token) {
-		token += "";
-		if (checkTokenAndGetIndex(this, token) === -1) {
-			this.push(token);
-			this._updateClassName();
-		}
-	};
-	classListProto.remove = function (token) {
-		token += "";
-		var index = checkTokenAndGetIndex(this, token);
-		if (index !== -1) {
-			this.splice(index, 1);
-			this._updateClassName();
-		}
-	};
-	classListProto.toggle = function (token) {
-		token += "";
-		if (checkTokenAndGetIndex(this, token) === -1) {
-			this.add(token);
-		} else {
-			this.remove(token);
-		}
-	};
-	classListProto.toString = function () {
-		return this.join(" ");
-	};
-
-	if (objCtr.defineProperty) {
-		var classListPropDesc = {
-			  get: classListGetter
-			, enumerable: true
-			, configurable: true
-		};
-		try {
-			objCtr.defineProperty(elemCtrProto, classListProp, classListPropDesc);
-		} catch (ex) { // IE 8 doesn't support enumerable:true
-			if (ex.number === -0x7FF5EC54) {
-				classListPropDesc.enumerable = false;
-				objCtr.defineProperty(elemCtrProto, classListProp, classListPropDesc);
-			}
-		}
-	} else if (objCtr[protoProp].__defineGetter__) {
-		elemCtrProto.__defineGetter__(classListProp, classListGetter);
-	}
-
-	}(window));
-
-	}
-};
-
-if ( (typeof RICKSHAW_NO_COMPAT !== "undefined" && !RICKSHAW_NO_COMPAT) || typeof RICKSHAW_NO_COMPAT === "undefined") {
-	new Rickshaw.Compat.ClassList();
-}
-Rickshaw.namespace('Rickshaw.Graph');
-
-Rickshaw.Graph = function(args) {
-
-	this.element = args.element;
-	this.series = args.series;
-
-	this.defaults = {
-		interpolation: 'cardinal',
-		offset: 'zero',
-		min: undefined,
-		max: undefined
-	};
-
-	Rickshaw.keys(this.defaults).forEach( function(k) {
-		this[k] = args[k] || this.defaults[k];
-	}, this );
-
-	this.window = {};
-
-	this.updateCallbacks = [];
-
-	var self = this;
-
-	this.initialize = function(args) {
-
-		this.validateSeries(args.series);
-
-		this.series.active = function() { return self.series.filter( function(s) { return !s.disabled } ) };
-
-		this.setSize({ width: args.width, height: args.height });
-
-		this.element.classList.add('rickshaw_graph');
-		this.vis = d3.select(this.element)
-			.append("svg:svg")
-			.attr('width', this.width)
-			.attr('height', this.height);
-
-		var renderers = [
-			Rickshaw.Graph.Renderer.Stack,
-			Rickshaw.Graph.Renderer.Line,
-			Rickshaw.Graph.Renderer.Bar,
-			Rickshaw.Graph.Renderer.Area,
-			Rickshaw.Graph.Renderer.ScatterPlot
-		];
-
-		renderers.forEach( function(r) {
-			if (!r) return;
-			self.registerRenderer(new r( { graph: self } ));
-		} );
-
-		this.setRenderer(args.renderer || 'stack', args);
-		this.discoverRange();
-	};
-
-	this.validateSeries = function(series) {
-
-		if (!(series instanceof Array) && !(series instanceof Rickshaw.Series)) {
-			var seriesSignature = Object.prototype.toString.apply(series);
-			throw "series is not an array: " + seriesSignature;
-		}
-
-		var pointsCount;
-
-		series.forEach( function(s) {
-
-			if (!(s instanceof Object)) {
-				throw "series element is not an object: " + s;
-			}
-			if (!(s.data)) {
-				throw "series has no data: " + JSON.stringify(s);
-			}
-			if (!(s.data instanceof Array)) {
-				throw "series data is not an array: " + JSON.stringify(s.data);
-			}
-
-			pointsCount = pointsCount || s.data.length;
-
-			if (pointsCount && s.data.length != pointsCount) {
-				throw "series cannot have differing numbers of points: " +
-					pointsCount	+ " vs " + s.data.length + "; see Rickshaw.Series.zeroFill()";
-			}
-
-			var dataTypeX = typeof s.data[0].x;
-			var dataTypeY = typeof s.data[0].y;
-
-			if (dataTypeX != 'number' || dataTypeY != 'number') {
-				throw "x and y properties of points should be numbers instead of " +
-					dataTypeX + " and " + dataTypeY;
-			}
-		} );
-	};
-
-	this.dataDomain = function() {
-
-		// take from the first series
-		var data = this.series[0].data;
-
-		return [ data[0].x, data.slice(-1).shift().x ];
-
-	};
-
-	this.discoverRange = function() {
-
-		var domain = this.renderer.domain();
-
-		this.x = d3.scale.linear().domain(domain.x).range([0, this.width]);
-
-		this.y = d3.scale.linear().domain(domain.y).range([this.height, 0]);
-
-		this.y.magnitude = d3.scale.linear()
-			.domain([domain.y[0] - domain.y[0], domain.y[1] - domain.y[0]])
-			.range([0, this.height]);
-	};
-
-	this.render = function() {
-
-		var stackedData = this.stackData();
-		this.discoverRange();
-
-		this.renderer.render();
-
-		this.updateCallbacks.forEach( function(callback) {
-			callback();
-		} );
-	};
-
-	this.update = this.render;
-
-	this.stackData = function() {
-
-		var data = this.series.active()
-			.map( function(d) { return d.data } )
-			.map( function(d) { return d.filter( function(d) { return this._slice(d) }, this ) }, this);
-
-		this.stackData.hooks.data.forEach( function(entry) {
-			data = entry.f.apply(self, [data]);
-		} );
-
-		var layout = d3.layout.stack();
-		layout.offset( self.offset );
-
-		var stackedData = layout(data);
-
-		this.stackData.hooks.after.forEach( function(entry) {
-			stackedData = entry.f.apply(self, [data]);
-		} );
-
-		var i = 0;
-		this.series.forEach( function(series) {
-			if (series.disabled) return;
-			series.stack = stackedData[i++];
-		} );
-
-		this.stackedData = stackedData;
-		return stackedData;
-	};
-
-	this.stackData.hooks = { data: [], after: [] };
-
-	this._slice = function(d) {
-
-		if (this.window.xMin || this.window.xMax) {
-
-			var isInRange = true;
-
-			if (this.window.xMin && d.x < this.window.xMin) isInRange = false;
-			if (this.window.xMax && d.x > this.window.xMax) isInRange = false;
-
-			return isInRange;
-		}
-
-		return true;
-	};
-
-	this.onUpdate = function(callback) {
-		this.updateCallbacks.push(callback);
-	};
-
-	this.registerRenderer = function(renderer) {
-		this._renderers = this._renderers || {};
-		this._renderers[renderer.name] = renderer;
-	};
-
-	this.configure = function(args) {
-
-		if (args.width || args.height) {
-			this.setSize(args);
-		}
-
-		Rickshaw.keys(this.defaults).forEach( function(k) {
-			this[k] = k in args ? args[k]
-				: k in this ? this[k]
-				: this.defaults[k];
-		}, this );
-
-		this.setRenderer(args.renderer || this.renderer.name, args);
-	};
-
-	this.setRenderer = function(name, args) {
-
-		if (!this._renderers[name]) {
-			throw "couldn't find renderer " + name;
-		}
-		this.renderer = this._renderers[name];
-
-		if (typeof args == 'object') {
-			this.renderer.configure(args);
-		}
-	};
-
-	this.setSize = function(args) {
-
-		args = args || {};
-
-		if (typeof window !== undefined) {
-			var style = window.getComputedStyle(this.element, null);
-			var elementWidth = parseInt(style.getPropertyValue('width'));
-			var elementHeight = parseInt(style.getPropertyValue('height'));
-		}
-
-		this.width = args.width || elementWidth || 400;
-		this.height = args.height || elementHeight || 250;
-
-		this.vis && this.vis
-			.attr('width', this.width)
-			.attr('height', this.height);
-	}
-
-	this.initialize(args);
-};
-Rickshaw.namespace('Rickshaw.Fixtures.Color');
-
-Rickshaw.Fixtures.Color = function() {
-
-	this.schemes = {};
-
-	this.schemes.spectrum14 = [
-		'#ecb796',
-		'#dc8f70',
-		'#b2a470',
-		'#92875a',
-		'#716c49',
-		'#d2ed82',
-		'#bbe468',
-		'#a1d05d',
-		'#e7cbe6',
-		'#d8aad6',
-		'#a888c2',
-		'#9dc2d3',
-		'#649eb9',
-		'#387aa3'
-	].reverse();
-
-	this.schemes.spectrum2000 = [
-		'#57306f',
-		'#514c76',
-		'#646583',
-		'#738394',
-		'#6b9c7d',
-		'#84b665',
-		'#a7ca50',
-		'#bfe746',
-		'#e2f528',
-		'#fff726',
-		'#ecdd00',
-		'#d4b11d',
-		'#de8800',
-		'#de4800',
-		'#c91515',
-		'#9a0000',
-		'#7b0429',
-		'#580839',
-		'#31082b'
-	];
-
-	this.schemes.spectrum2001 = [
-		'#2f243f',
-		'#3c2c55',
-		'#4a3768',
-		'#565270',
-		'#6b6b7c',
-		'#72957f',
-		'#86ad6e',
-		'#a1bc5e',
-		'#b8d954',
-		'#d3e04e',
-		'#ccad2a',
-		'#cc8412',
-		'#c1521d',
-		'#ad3821',
-		'#8a1010',
-		'#681717',
-		'#531e1e',
-		'#3d1818',
-		'#320a1b'
-	];
-
-	this.schemes.classic9 = [
-		'#423d4f',
-		'#4a6860',
-		'#848f39',
-		'#a2b73c',
-		'#ddcb53',
-		'#c5a32f',
-		'#7d5836',
-		'#963b20',
-		'#7c2626',
-		'#491d37',
-		'#2f254a'
-	].reverse();
-
-	this.schemes.httpStatus = {
-		503: '#ea5029',
-		502: '#d23f14',
-		500: '#bf3613',
-		410: '#efacea',
-		409: '#e291dc',
-		403: '#f457e8',
-		408: '#e121d2',
-		401: '#b92dae',
-		405: '#f47ceb',
-		404: '#a82a9f',
-		400: '#b263c6',
-		301: '#6fa024',
-		302: '#87c32b',
-		307: '#a0d84c',
-		304: '#28b55c',
-		200: '#1a4f74',
-		206: '#27839f',
-		201: '#52adc9',
-		202: '#7c979f',
-		203: '#a5b8bd',
-		204: '#c1cdd1'
-	};
-
-	this.schemes.colorwheel = [
-		'#b5b6a9',
-		'#858772',
-		'#785f43',
-		'#96557e',
-		'#4682b4',
-		'#65b9ac',
-		'#73c03a',
-		'#cb513a'
-	].reverse();
-
-	this.schemes.cool = [
-		'#5e9d2f',
-		'#73c03a',
-		'#4682b4',
-		'#7bc3b8',
-		'#a9884e',
-		'#c1b266',
-		'#a47493',
-		'#c09fb5'
-	];
-
-	this.schemes.munin = [
-		'#00cc00',
-		'#0066b3',
-		'#ff8000',
-		'#ffcc00',
-		'#330099',
-		'#990099',
-		'#ccff00',
-		'#ff0000',
-		'#808080',
-		'#008f00',
-		'#00487d',
-		'#b35a00',
-		'#b38f00',
-		'#6b006b',
-		'#8fb300',
-		'#b30000',
-		'#bebebe',
-		'#80ff80',
-		'#80c9ff',
-		'#ffc080',
-		'#ffe680',
-		'#aa80ff',
-		'#ee00cc',
-		'#ff8080',
-		'#666600',
-		'#ffbfff',
-		'#00ffcc',
-		'#cc6699',
-		'#999900'
-	];
-};
-Rickshaw.namespace('Rickshaw.Fixtures.RandomData');
-
-Rickshaw.Fixtures.RandomData = function(timeInterval) {
-
-	var addData;
-	timeInterval = timeInterval || 1;
-
-	var lastRandomValue = 200;
-
-	var timeBase = Math.floor(new Date().getTime() / 1000);
-
-	this.addData = function(data) {
-
-		var randomValue = Math.random() * 100 + 15 + lastRandomValue;
-		var index = data[0].length;
-
-		var counter = 1;
-
-		data.forEach( function(series) {
-			var randomVariance = Math.random() * 20;
-			var v = randomValue / 25  + counter++
-				+ (Math.cos((index * counter * 11) / 960) + 2) * 15 
-				+ (Math.cos(index / 7) + 2) * 7
-				+ (Math.cos(index / 17) + 2) * 1;
-
-			series.push( { x: (index * timeInterval) + timeBase, y: v + randomVariance } );
-		} );
-
-		lastRandomValue = randomValue * .85;
-	}
-};
-
-Rickshaw.namespace('Rickshaw.Fixtures.Time');
-
-Rickshaw.Fixtures.Time = function() {
-
-	var tzOffset = new Date().getTimezoneOffset() * 60;
-
-	var self = this;
-
-	this.months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
-
-	this.units = [
-		{
-			name: 'decade',
-			seconds: 86400 * 365.25 * 10,
-			formatter: function(d) { return (parseInt(d.getUTCFullYear() / 10) * 10) }
-		}, {
-			name: '5 year',
-			seconds: 86400 * 365.25 * 5,
-			formatter: function(d) { return (parseInt(d.getUTCFullYear() / 5) * 5) }
-		}, {
-			name: 'year',
-			seconds: 86400 * 365.25,
-			formatter: function(d) { return d.getUTCFullYear() }
-		}, {
-			name: '6 month',
-			seconds: 86400 * 30.5 * 6,
-			formatter: function(d) { return self.months[d.getUTCMonth()] }
-		}, {
-			name: 'month',
-			seconds: 86400 * 30.5,
-			formatter: function(d) { return self.months[d.getUTCMonth()] }
-		}, {
-			name: 'week',
-			seconds: 86400 * 7,
-			formatter: function(d) { return self.formatDate(d) }
-		}, {
-			name: '3 day',
-			seconds: 86400 * 3,
-			formatter: function(d) { return self.formatDate(d) }
-		}, {
-			name: 'day',
-			seconds: 86400,
-			formatter: function(d) { return d.getUTCDate() }
-		}, {
-			name: '6 hour',
-			seconds: 3600 * 6,
-			formatter: function(d) { return self.formatTime(d) }
-		}, {
-			name: 'hour',
-			seconds: 3600,
-			formatter: function(d) { return self.formatTime(d) }
-		}, {
-			name: '15 minute',
-			seconds: 60 * 15,
-			formatter: function(d) { return self.formatTime(d) }
-		}, {
-			name: '5 minute',
-			seconds: 60 * 5,
-			formatter: function(d) { return self.formatTime(d) }
-		}, {
-			name: 'minute',
-			seconds: 60,
-			formatter: function(d) { return d.getUTCMinutes() }
-		}, {
-			name: '15 second',
-			seconds: 15,
-			formatter: function(d) { return d.getUTCSeconds() + 's' }
-		}, {
-			name: '5 second',
-			seconds: 5,
-			formatter: function(d) { return d.getUTCSeconds() + 's' }
-		}, {
-			name: 'second',
-			seconds: 1,
-			formatter: function(d) { return d.getUTCSeconds() + 's' }
-		}
-	];
-
-	this.unit = function(unitName) {
-		return this.units.filter( function(unit) { return unitName == unit.name } ).shift();
-	};
-
-	this.formatDate = function(d) {
-		return d.toUTCString().match(/, (\w+ \w+ \w+)/)[1];
-	};
-
-	this.formatTime = function(d) {
-		return d.toUTCString().match(/(\d+:\d+):/)[1];
-	};
-
-	this.ceil = function(time, unit) {
-
-		if (unit.name == 'month') {
-
-			var nearFuture = new Date((time + unit.seconds - 1) * 1000);
-
-			var rounded = new Date(0);
-			rounded.setUTCFullYear(nearFuture.getUTCFullYear());
-			rounded.setUTCMonth(nearFuture.getUTCMonth());
-			rounded.setUTCDate(1);
-			rounded.setUTCHours(0);
-			rounded.setUTCMinutes(0);
-			rounded.setUTCSeconds(0);
-			rounded.setUTCMilliseconds(0);
-
-			return rounded.getTime() / 1000;
-		}
-
-		if (unit.name == 'year') {
-
-			var nearFuture = new Date((time + unit.seconds - 1) * 1000);
-
-			var rounded = new Date(0);
-			rounded.setUTCFullYear(nearFuture.getUTCFullYear());
-			rounded.setUTCMonth(0);
-			rounded.setUTCDate(1);
-			rounded.setUTCHours(0);
-			rounded.setUTCMinutes(0);
-			rounded.setUTCSeconds(0);
-			rounded.setUTCMilliseconds(0);
-
-			return rounded.getTime() / 1000;
-		}
-
-		return Math.ceil(time / unit.seconds) * unit.seconds;
-	};
-};
-Rickshaw.namespace('Rickshaw.Fixtures.Number');
-
-Rickshaw.Fixtures.Number.formatKMBT = function(y) {
-	if (y >= 1000000000000)   { return y / 1000000000000 + "T" } 
-	else if (y >= 1000000000) { return y / 1000000000 + "B" } 
-	else if (y >= 1000000)    { return y / 1000000 + "M" } 
-	else if (y >= 1000)       { return y / 1000 + "K" }
-	else if (y < 1 && y > 0)  { return y.toFixed(2) }
-	else if (y == 0)          { return '' }
-	else                      { return y }
-};
-
-Rickshaw.Fixtures.Number.formatBase1024KMGTP = function(y) {
-    if (y >= 1125899906842624)  { return y / 1125899906842624 + "P" }
-    else if (y >= 1099511627776){ return y / 1099511627776 + "T" }
-    else if (y >= 1073741824)   { return y / 1073741824 + "G" }
-    else if (y >= 1048576)      { return y / 1048576 + "M" }
-    else if (y >= 1024)         { return y / 1024 + "K" }
-    else if (y < 1 && y > 0)    { return y.toFixed(2) }
-    else if (y == 0)            { return '' }
-    else                        { return y }
-};
-Rickshaw.namespace("Rickshaw.Color.Palette");
-
-Rickshaw.Color.Palette = function(args) {
-
-	var color = new Rickshaw.Fixtures.Color();
-
-	args = args || {};
-	this.schemes = {};
-
-	this.scheme = color.schemes[args.scheme] || args.scheme || color.schemes.colorwheel;
-	this.runningIndex = 0;
-	this.generatorIndex = 0;
-
-	if (args.interpolatedStopCount) {
-		var schemeCount = this.scheme.length - 1;
-		var i, j, scheme = [];
-		for (i = 0; i < schemeCount; i++) {
-			scheme.push(this.scheme[i]);
-			var generator = d3.interpolateHsl(this.scheme[i], this.scheme[i + 1]);
-			for (j = 1; j < args.interpolatedStopCount; j++) {
-				scheme.push(generator((1 / args.interpolatedStopCount) * j));
-			}
-		}
-		scheme.push(this.scheme[this.scheme.length - 1]);
-		this.scheme = scheme;
-	}
-	this.rotateCount = this.scheme.length;
-
-	this.color = function(key) {
-		return this.scheme[key] || this.scheme[this.runningIndex++] || this.interpolateColor() || '#808080';
-	};
-
-	this.interpolateColor = function() {
-		if (!Array.isArray(this.scheme)) return;
-		var color;
-		if (this.generatorIndex == this.rotateCount * 2 - 1) {
-			color = d3.interpolateHsl(this.scheme[this.generatorIndex], this.scheme[0])(0.5);
-			this.generatorIndex = 0;
-			this.rotateCount *= 2;
-		} else {
-			color = d3.interpolateHsl(this.scheme[this.generatorIndex], this.scheme[this.generatorIndex + 1])(0.5);
-			this.generatorIndex++;
-		}
-		this.scheme.push(color);
-		return color;
-	};
-
-};
-Rickshaw.namespace('Rickshaw.Graph.Ajax');
-
-Rickshaw.Graph.Ajax = Rickshaw.Class.create( {
-
-	initialize: function(args) {
-
-		this.dataURL = args.dataURL;
-
-		this.onData = args.onData || function(d) { return d };
-		this.onComplete = args.onComplete || function() {};
-		this.onError = args.onError || function() {};
-
-		this.args = args; // pass through to Rickshaw.Graph
-
-		this.request();
-	},
-
-	request: function() {
-
-		$.ajax( {
-			url: this.dataURL,
-			dataType: 'json',
-			success: this.success.bind(this),
-			error: this.error.bind(this)
-		} );
-	},
-
-	error: function() {
-
-		console.log("error loading dataURL: " + this.dataURL);
-		this.onError(this);
-	},
-
-	success: function(data, status) {
-
-		data = this.onData(data);
-		this.args.series = this._splice({ data: data, series: this.args.series });
-
-		this.graph = new Rickshaw.Graph(this.args);
-		this.graph.render();
-
-		this.onComplete(this);
-	},
-
-	_splice: function(args) {
-
-		var data = args.data;
-		var series = args.series;
-
-		if (!args.series) return data;
-
-		series.forEach( function(s) {
-
-			var seriesKey = s.key || s.name;
-			if (!seriesKey) throw "series needs a key or a name";
-
-			data.forEach( function(d) {
-
-				var dataKey = d.key || d.name;
-				if (!dataKey) throw "data needs a key or a name";
-
-				if (seriesKey == dataKey) {
-					var properties = ['color', 'name', 'data'];
-					properties.forEach( function(p) {
-						s[p] = s[p] || d[p];
-					} );
-				}
-			} );
-		} );
-
-		return series;
-	}
-} );
-
-Rickshaw.namespace('Rickshaw.Graph.Annotate');
-
-Rickshaw.Graph.Annotate = function(args) {
-
-	var graph = this.graph = args.graph;
-	this.elements = { timeline: args.element };
-	
-	var self = this;
-
-	this.data = {};
-
-	this.elements.timeline.classList.add('rickshaw_annotation_timeline');
-
-	this.add = function(time, content, end_time) {
-		self.data[time] = self.data[time] || {'boxes': []};
-		self.data[time].boxes.push({content: content, end: end_time});
-	};
-
-	this.update = function() {
-
-		Rickshaw.keys(self.data).forEach( function(time) {
-
-			var annotation = self.data[time];
-			var left = self.graph.x(time);
-
-			if (left < 0 || left > self.graph.x.range()[1]) {
-				if (annotation.element) {
-					annotation.line.classList.add('offscreen');
-					annotation.element.style.display = 'none';
-				}
-
-				annotation.boxes.forEach( function(box) {
-					if ( box.rangeElement ) box.rangeElement.classList.add('offscreen');
-				});
-
-				return;
-			}
-
-			if (!annotation.element) {
-				var element = annotation.element = document.createElement('div');
-				element.classList.add('annotation');
-				this.elements.timeline.appendChild(element);
-				element.addEventListener('click', function(e) {
-					element.classList.toggle('active');
-					annotation.line.classList.toggle('active');
-					annotation.boxes.forEach( function(box) {
-						if ( box.rangeElement ) box.rangeElement.classList.toggle('active');
-					});
-				}, false);
-					
-			}
-
-			annotation.element.style.left = left + 'px';
-			annotation.element.style.display = 'block';
-
-			annotation.boxes.forEach( function(box) {
-
-
-				var element = box.element;
-
-				if (!element) {
-					element = box.element = document.createElement('div');
-					element.classList.add('content');
-					element.innerHTML = box.content;
-					annotation.element.appendChild(element);
-
-					annotation.line = document.createElement('div');
-					annotation.line.classList.add('annotation_line');
-					self.graph.element.appendChild(annotation.line);
-
-					if ( box.end ) {
-						box.rangeElement = document.createElement('div');
-						box.rangeElement.classList.add('annotation_range');
-						self.graph.element.appendChild(box.rangeElement);
-					}
-
-				}
-
-				if ( box.end ) {
-
-					var annotationRangeStart = left;
-					var annotationRangeEnd   = Math.min( self.graph.x(box.end), self.graph.x.range()[1] );
-
-					// annotation makes more sense at end
-					if ( annotationRangeStart > annotationRangeEnd ) {
-						annotationRangeEnd   = left;
-						annotationRangeStart = Math.max( self.graph.x(box.end), self.graph.x.range()[0] );
-					}
-
-					var annotationRangeWidth = annotationRangeEnd - annotationRangeStart;
-
-					box.rangeElement.style.left  = annotationRangeStart + 'px';
-					box.rangeElement.style.width = annotationRangeWidth + 'px'
-
-					box.rangeElement.classList.remove('offscreen');
-				}
-
-				annotation.line.classList.remove('offscreen');
-				annotation.line.style.left = left + 'px';
-			} );
-		}, this );
-	};
-
-	this.graph.onUpdate( function() { self.update() } );
-};
-Rickshaw.namespace('Rickshaw.Graph.Axis.Time');
-
-Rickshaw.Graph.Axis.Time = function(args) {
-
-	var self = this;
-
-	this.graph = args.graph;
-	this.elements = [];
-	this.ticksTreatment = args.ticksTreatment || 'plain';
-	this.fixedTimeUnit = args.timeUnit;
-
-	var time = new Rickshaw.Fixtures.Time();
-
-	this.appropriateTimeUnit = function() {
-
-		var unit;
-		var units = time.units;
-
-		var domain = this.graph.x.domain();
-		var rangeSeconds = domain[1] - domain[0];
-
-		units.forEach( function(u) {
-			if (Math.floor(rangeSeconds / u.seconds) >= 2) {
-				unit = unit || u;
-			}
-		} );
-
-		return (unit || time.units[time.units.length - 1]);
-	};
-
-	this.tickOffsets = function() {
-
-		var domain = this.graph.x.domain();
-
-		var unit = this.fixedTimeUnit || this.appropriateTimeUnit();
-		var count = Math.ceil((domain[1] - domain[0]) / unit.seconds);
-
-		var runningTick = domain[0];
-
-		var offsets = [];
-
-		for (var i = 0; i < count; i++) {
-
-			tickValue = time.ceil(runningTick, unit);
-			runningTick = tickValue + unit.seconds / 2;
-
-			offsets.push( { value: tickValue, unit: unit } );
-		}
-
-		return offsets;
-	};
-
-	this.render = function() {
-
-		this.elements.forEach( function(e) {
-			e.parentNode.removeChild(e);
-		} );
-
-		this.elements = [];
-
-		var offsets = this.tickOffsets();
-
-		offsets.forEach( function(o) {
-			
-			if (self.graph.x(o.value) > self.graph.x.range()[1]) return;
-	
-			var element = document.createElement('div');
-			element.style.left = self.graph.x(o.value) + 'px';
-			element.classList.add('x_tick');
-			element.classList.add(self.ticksTreatment);
-
-			var title = document.createElement('div');
-			title.classList.add('title');
-			title.innerHTML = o.unit.formatter(new Date(o.value * 1000));
-			element.appendChild(title);
-
-			self.graph.element.appendChild(element);
-			self.elements.push(element);
-
-		} );
-	};
-
-	this.graph.onUpdate( function() { self.render() } );
-};
-
-Rickshaw.namespace('Rickshaw.Graph.Axis.Y');
-
-Rickshaw.Graph.Axis.Y = function(args) {
-
-	var self = this;
-	var berthRate = 0.10;
-
-	this.initialize = function(args) {
-
-		this.graph = args.graph;
-		this.orientation = args.orientation || 'right';
-
-		var pixelsPerTick = args.pixelsPerTick || 75;
-		this.ticks = args.ticks || Math.floor(this.graph.height / pixelsPerTick);
-		this.tickSize = args.tickSize || 4;
-		this.ticksTreatment = args.ticksTreatment || 'plain';
-
-		if (args.element) {
-
-			this.element = args.element;
-			this.vis = d3.select(args.element)
-				.append("svg:svg")
-				.attr('class', 'rickshaw_graph y_axis');
-
-			this.element = this.vis[0][0];
-			this.element.style.position = 'relative';
-
-			this.setSize({ width: args.width, height: args.height });
-
-		} else {
-			this.vis = this.graph.vis;
-		}
-
-		this.graph.onUpdate( function() { self.render() } );
-	};
-
-	this.setSize = function(args) {
-
-		args = args || {};
-
-		if (!this.element) return;
-
-		if (typeof window !== 'undefined') {
-
-			var style = window.getComputedStyle(this.element.parentNode, null);
-			var elementWidth = parseInt(style.getPropertyValue('width'));
-
-			if (!args.auto) {
-				var elementHeight = parseInt(style.getPropertyValue('height'));
-			}
-		}
-
-		this.width = args.width || elementWidth || this.graph.width * berthRate;
-		this.height = args.height || elementHeight || this.graph.height;
-
-		this.vis
-			.attr('width', this.width)
-			.attr('height', this.height * (1 + berthRate));
-
-		var berth = this.height * berthRate;
-		this.element.style.top = -1 * berth + 'px';
-	};
-
-	this.render = function() {
-
-		if (this.graph.height !== this._renderHeight) this.setSize({ auto: true });
-
-		var axis = d3.svg.axis().scale(this.graph.y).orient(this.orientation);
-		axis.tickFormat( args.tickFormat || function(y) { return y } );
-
-		if (this.orientation == 'left') {
-			var berth = this.height * berthRate;
-			var transform = 'translate(' + this.width + ', ' + berth + ')';
-		}
-
-		if (this.element) {
-			this.vis.selectAll('*').remove();
-		}
-
-		this.vis
-			.append("svg:g")
-			.attr("class", ["y_ticks", this.ticksTreatment].join(" "))
-			.attr("transform", transform)
-			.call(axis.ticks(this.ticks).tickSubdivide(0).tickSize(this.tickSize))
-
-		var gridSize = (this.orientation == 'right' ? 1 : -1) * this.graph.width;
-
-		this.graph.vis
-			.append("svg:g")
-			.attr("class", "y_grid")
-			.call(axis.ticks(this.ticks).tickSubdivide(0).tickSize(gridSize));
-
-		this._renderHeight = this.graph.height;
-	};
-
-	this.initialize(args);
-};
-
-Rickshaw.namespace('Rickshaw.Graph.Behavior.Series.Highlight');
-
-Rickshaw.Graph.Behavior.Series.Highlight = function(args) {
-
-	this.graph = args.graph;
-	this.legend = args.legend;
-
-	var self = this;
-
-	var colorSafe = {};
-
-	this.addHighlightEvents = function (l) {
-		l.element.addEventListener( 'mouseover', function(e) {
-
-			self.legend.lines.forEach( function(line) {
-				if (l === line) return;
-				colorSafe[line.series.name] = colorSafe[line.series.name] || line.series.color;
-				line.series.color = d3.interpolateRgb(line.series.color, d3.rgb('#d8d8d8'))(0.8).toString();
-			} );
-
-			self.graph.update();
-
-		}, false );
-
-		l.element.addEventListener( 'mouseout', function(e) {
-
-			self.legend.lines.forEach( function(line) {
-				if (colorSafe[line.series.name]) {
-					line.series.color = colorSafe[line.series.name];
-				}
-			} );
-
-			self.graph.update();
-
-		}, false );
-	};
-
-	if (this.legend) {
-		this.legend.lines.forEach( function(l) {
-			self.addHighlightEvents(l);
-		} );
-	}
-
-};
-Rickshaw.namespace('Rickshaw.Graph.Behavior.Series.Order');
-
-Rickshaw.Graph.Behavior.Series.Order = function(args) {
-
-	this.graph = args.graph;
-	this.legend = args.legend;
-
-	var self = this;
-
-	$(function() {
-		$(self.legend.list).sortable( { 
-			containment: 'parent',
-			tolerance: 'pointer',
-			update: function( event, ui ) {
-				var series = [];
-				$(self.legend.list).find('li').each( function(index, item) {
-					if (!item.series) return;
-					series.push(item.series);
-				} );
-
-				for (var i = self.graph.series.length - 1; i >= 0; i--) {
-					self.graph.series[i] = series.shift();
-				}
-
-				self.graph.update();
-			}
-		} );
-		$(self.legend.list).disableSelection();
-	});
-
-	//hack to make jquery-ui sortable behave
-	this.graph.onUpdate( function() { 
-		var h = window.getComputedStyle(self.legend.element).height;
-		self.legend.element.style.height = h;
-	} );
-};
-Rickshaw.namespace('Rickshaw.Graph.Behavior.Series.Toggle');
-
-Rickshaw.Graph.Behavior.Series.Toggle = function(args) {
-
-	this.graph = args.graph;
-	this.legend = args.legend;
-
-	var self = this;
-
-	this.addAnchor = function(line) {
-		var anchor = document.createElement('a');
-		anchor.innerHTML = '&#10004;';
-		anchor.classList.add('action');
-		line.element.insertBefore(anchor, line.element.firstChild);
-
-		anchor.onclick = function(e) {
-			if (line.series.disabled) {
-				line.series.enable();
-				line.element.classList.remove('disabled');
-			} else { 
-				line.series.disable();
-				line.element.classList.add('disabled');
-			}
-		}
-		
-                var label = line.element.getElementsByTagName('span')[0];
-                label.onclick = function(e){
-
-                        var disableAllOtherLines = line.series.disabled;
-                        if ( ! disableAllOtherLines ) {
-                                for ( var i = 0; i < self.legend.lines.length; i++ ) {
-                                        var l = self.legend.lines[i];
-                                        if ( line.series === l.series ) {
-                                                // noop
-                                        } else if ( l.series.disabled ) {
-                                                // noop
-                                        } else {
-                                                disableAllOtherLines = true;
-                                                break;
-                                        }
-                                }
-                        }
-
-                        // show all or none
-                        if ( disableAllOtherLines ) {
-
-                                // these must happen first or else we try ( and probably fail ) to make a no line graph
-                                line.series.enable();
-                                line.element.classList.remove('disabled');
-
-                                self.legend.lines.forEach(function(l){
-                                        if ( line.series === l.series ) {
-                                                // noop
-                                        } else {
-                                                l.series.disable();
-                                                l.element.classList.add('disabled');
-                                        }
-                                });
-
-                        } else {
-
-                                self.legend.lines.forEach(function(l){
-                                        l.series.enable();
-                                        l.element.classList.remove('disabled');
-                                });
-
-                        }
-
-                };
-
-	};
-
-	if (this.legend) {
-
-                $(this.legend.list).sortable( {
-                        start: function(event, ui) {
-                                ui.item.bind('no.onclick',
-                                        function(event) {
-                                                event.preventDefault();
-                                        }
-                                );
-                        },
-                        stop: function(event, ui) {
-                                setTimeout(function(){
-                                        ui.item.unbind('no.onclick');
-                                }, 250);
-                        }
-                })
-
-		this.legend.lines.forEach( function(l) {
-			self.addAnchor(l);
-		} );
-	}
-
-	this._addBehavior = function() {
-
-		this.graph.series.forEach( function(s) {
-			
-			s.disable = function() {
-
-				if (self.graph.series.length <= 1) {
-					throw('only one series left');
-				}
-				
-				s.disabled = true;
-				self.graph.update();
-			};
-
-			s.enable = function() {
-				s.disabled = false;
-				self.graph.update();
-			};
-		} );
-	};
-	this._addBehavior();
-
-	this.updateBehaviour = function () { this._addBehavior() };
-
-};
-Rickshaw.namespace('Rickshaw.Graph.HoverDetail');
-
-Rickshaw.Graph.HoverDetail = Rickshaw.Class.create({
-
-	initialize: function(args) {
-
-		var graph = this.graph = args.graph;
-
-		this.xFormatter = args.xFormatter || function(x) {
-			return new Date( x * 1000 ).toUTCString();
-		};
-
-		this.yFormatter = args.yFormatter || function(y) {
-			return y.toFixed(2);
-		};
-
-		var element = this.element = document.createElement('div');
-		element.className = 'detail';
-
-		this.visible = true;
-		graph.element.appendChild(element);
-
-		this.lastEvent = null;
-		this._addListeners();
-
-		this.onShow = args.onShow;
-		this.onHide = args.onHide;
-		this.onRender = args.onRender;
-
-		this.formatter = args.formatter || this.formatter;
-	},
-
-	formatter: function(series, x, y, formattedX, formattedY, d) {
-		return series.name + ':&nbsp;' + formattedY;
-	},
-
-	update: function(e) {
-
-		e = e || this.lastEvent;
-		if (!e) return;
-		this.lastEvent = e;
-
-		if (!e.target.nodeName.match(/^(path|svg|rect)$/)) return;
-
-		var graph = this.graph;
-
-		var eventX = e.offsetX || e.layerX;
-		var eventY = e.offsetY || e.layerY;
-
-		var domainX = graph.x.invert(eventX);
-		var stackedData = graph.stackedData;
-
-		var topSeriesData = stackedData.slice(-1).shift();
-
-		var domainIndexScale = d3.scale.linear()
-			.domain([topSeriesData[0].x, topSeriesData.slice(-1).shift().x])
-			.range([0, topSeriesData.length]);
-
-		var approximateIndex = Math.floor(domainIndexScale(domainX));
-		var dataIndex = Math.min(approximateIndex || 0, stackedData[0].length - 1);
-
-		for (var i = approximateIndex; i < stackedData[0].length - 1;) {
-
-			if (!stackedData[0][i] || !stackedData[0][i + 1]) {
-				break;
-			}
-
-			if (stackedData[0][i].x <= domainX && stackedData[0][i + 1].x > domainX) {
-				dataIndex = i;
-				break;
-			}
-			if (stackedData[0][i + 1] <= domainX) { i++ } else { i-- }
-		}
-
-		var domainX = stackedData[0][dataIndex].x;
-		var formattedXValue = this.xFormatter(domainX);
-		var graphX = graph.x(domainX);
-		var order = 0;
-
-		var detail = graph.series.active()
-			.map( function(s) { return { order: order++, series: s, name: s.name, value: s.stack[dataIndex] } } );
-
-		var activeItem;
-
-		var sortFn = function(a, b) {
-			return (a.value.y0 + a.value.y) - (b.value.y0 + b.value.y);
-		};
-
-		var domainMouseY = graph.y.magnitude.invert(graph.element.offsetHeight - eventY);
-
-		detail.sort(sortFn).forEach( function(d) {
-
-			d.formattedYValue = (this.yFormatter.constructor == Array) ?
-				this.yFormatter[detail.indexOf(d)](d.value.y) :
-				this.yFormatter(d.value.y);
-
-			d.graphX = graphX;
-			d.graphY = graph.y(d.value.y0 + d.value.y);
-
-			if (domainMouseY > d.value.y0 && domainMouseY < d.value.y0 + d.value.y && !activeItem) {
-				activeItem = d;
-				d.active = true;
-			}
-
-		}, this );
-
-		this.element.innerHTML = '';
-		this.element.style.left = graph.x(domainX) + 'px';
-
-		if (this.visible) {
-			this.render( {
-				detail: detail,
-				domainX: domainX,
-				formattedXValue: formattedXValue,
-				mouseX: eventX,
-				mouseY: eventY
-			} );
-		}
-	},
-
-	hide: function() {
-		this.visible = false;
-		this.element.classList.add('inactive');
-
-		if (typeof this.onHide == 'function') {
-			this.onHide();
-		}
-	},
-
-	show: function() {
-		this.visible = true;
-		this.element.classList.remove('inactive');
-
-		if (typeof this.onShow == 'function') {
-			this.onShow();
-		}
-	},
-
-	render: function(args) {
-
-		var detail = args.detail;
-		var domainX = args.domainX;
-
-		var mouseX = args.mouseX;
-		var mouseY = args.mouseY;
-
-		var formattedXValue = args.formattedXValue;
-
-		var xLabel = document.createElement('div');
-		xLabel.className = 'x_label';
-		xLabel.innerHTML = formattedXValue;
-		this.element.appendChild(xLabel);
-
-		detail.forEach( function(d) {
-
-			var item = document.createElement('div');
-			item.className = 'item';
-			item.innerHTML = this.formatter(d.series, domainX, d.value.y, formattedXValue, d.formattedYValue, d);
-			item.style.top = this.graph.y(d.value.y0 + d.value.y) + 'px';
-
-			this.element.appendChild(item);
-
-			var dot = document.createElement('div');
-			dot.className = 'dot';
-			dot.style.top = item.style.top;
-			dot.style.borderColor = d.series.color;
-
-			this.element.appendChild(dot);
-
-			if (d.active) {
-				item.className = 'item active';
-				dot.className = 'dot active';
-			}
-
-		}, this );
-
-		this.show();
-
-		if (typeof this.onRender == 'function') {
-			this.onRender(args);
-		}
-	},
-
-	_addListeners: function() {
-
-		this.graph.element.addEventListener(
-			'mousemove',
-			function(e) {
-				this.visible = true;
-				this.update(e)
-			}.bind(this),
-			false
-		);
-
-		this.graph.onUpdate( function() { this.update() }.bind(this) );
-
-		this.graph.element.addEventListener(
-			'mouseout',
-			function(e) {
-				if (e.relatedTarget && !(e.relatedTarget.compareDocumentPosition(this.graph.element) & Node.DOCUMENT_POSITION_CONTAINS)) {
-					this.hide();
-				}
-			 }.bind(this),
-			false
-		);
-	}
-});
-
-Rickshaw.namespace('Rickshaw.Graph.JSONP');
-
-Rickshaw.Graph.JSONP = Rickshaw.Class.create( Rickshaw.Graph.Ajax, {
-
-	request: function() {
-
-		$.ajax( {
-			url: this.dataURL,
-			dataType: 'jsonp',
-			success: this.success.bind(this),
-			error: this.error.bind(this)
-		} );
-	}
-} );
-Rickshaw.namespace('Rickshaw.Graph.Legend');
-
-Rickshaw.Graph.Legend = function(args) {
-
-	var element = this.element = args.element;
-	var graph = this.graph = args.graph;
-
-	var self = this;
-
-	element.classList.add('rickshaw_legend');
-
-	var list = this.list = document.createElement('ul');
-	element.appendChild(list);
-
-	var series = graph.series
-		.map( function(s) { return s } )
-		.reverse();
-
-	this.lines = [];
-
-	this.addLine = function (series) {
-		var line = document.createElement('li');
-		line.className = 'line';
-
-		var swatch = document.createElement('div');
-		swatch.className = 'swatch';
-		swatch.style.backgroundColor = series.color;
-
-		line.appendChild(swatch);
-
-		var label = document.createElement('span');
-		label.className = 'label';
-		label.innerHTML = series.name;
-
-		line.appendChild(label);
-		list.appendChild(line);
-
-		line.series = series;
-
-		if (series.noLegend) {
-			line.style.display = 'none';
-		}
-
-		var _line = { element: line, series: series };
-		if (self.shelving) {
-			self.shelving.addAnchor(_line);
-			self.shelving.updateBehaviour();
-		}
-		if (self.highlighter) {
-			self.highlighter.addHighlightEvents(_line);
-		}
-		self.lines.push(_line);
-	};
-
-	series.forEach( function(s) {
-		self.addLine(s);
-	} );
-
-	graph.onUpdate( function() {} );
-};
-Rickshaw.namespace('Rickshaw.Graph.RangeSlider');
-
-Rickshaw.Graph.RangeSlider = function(args) {
-
-	var element = this.element = args.element;
-	var graph = this.graph = args.graph;
-
-	$( function() {
-		$(element).slider( {
-
-			range: true,
-			min: graph.dataDomain()[0],
-			max: graph.dataDomain()[1],
-			values: [ 
-				graph.dataDomain()[0],
-				graph.dataDomain()[1]
-			],
-			slide: function( event, ui ) {
-
-				graph.window.xMin = ui.values[0];
-				graph.window.xMax = ui.values[1];
-				graph.update();
-
-				// if we're at an extreme, stick there
-				if (graph.dataDomain()[0] == ui.values[0]) {
-					graph.window.xMin = undefined;
-				}
-				if (graph.dataDomain()[1] == ui.values[1]) {
-					graph.window.xMax = undefined;
-				}
-			}
-		} );
-	} );
-
-	element[0].style.width = graph.width + 'px';
-
-	graph.onUpdate( function() {
-
-		var values = $(element).slider('option', 'values');
-
-		$(element).slider('option', 'min', graph.dataDomain()[0]);
-		$(element).slider('option', 'max', graph.dataDomain()[1]);
-
-		if (graph.window.xMin == undefined) {
-			values[0] = graph.dataDomain()[0];
-		}
-		if (graph.window.xMax == undefined) {
-			values[1] = graph.dataDomain()[1];
-		}
-
-		$(element).slider('option', 'values', values);
-
-	} );
-};
-
-Rickshaw.namespace("Rickshaw.Graph.Renderer");
-
-Rickshaw.Graph.Renderer = Rickshaw.Class.create( {
-
-	initialize: function(args) {
-		this.graph = args.graph;
-		this.tension = args.tension || this.tension;
-		this.graph.unstacker = this.graph.unstacker || new Rickshaw.Graph.Unstacker( { graph: this.graph } );
-		this.configure(args);
-	},
-
-	seriesPathFactory: function() {
-		//implement in subclass
-	},
-
-	seriesStrokeFactory: function() {
-		// implement in subclass
-	},
-
-	defaults: function() {
-		return {
-			tension: 0.8,
-			strokeWidth: 2,
-			unstack: true,
-			padding: { top: 0.01, right: 0, bottom: 0.01, left: 0 },
-			stroke: false,
-			fill: false
-		};
-	},
-
-	domain: function() {
-
-		var values = [];
-		var stackedData = this.graph.stackedData || this.graph.stackData();
-
-		var topSeriesData = this.unstack ? stackedData : [ stackedData.slice(-1).shift() ];
-
-		topSeriesData.forEach( function(series) {
-			series.forEach( function(d) {
-				values.push( d.y + d.y0 );
-			} );
-		} );
-
-		var xMin = stackedData[0][0].x;
-		var xMax = stackedData[0][ stackedData[0].length - 1 ].x;
-
-		xMin -= (xMax - xMin) * this.padding.left;
-		xMax += (xMax - xMin) * this.padding.right;
-
-		var yMin = this.graph.min === 'auto' ? d3.min( values ) : this.graph.min || 0;
-		var yMax = this.graph.max || d3.max( values );
-
-		if (this.graph.min === 'auto' || yMin < 0) {
-			yMin -= (yMax - yMin) * this.padding.bottom;
-		}
-
-		if (this.graph.max === undefined) {
-			yMax += (yMax - yMin) * this.padding.top;
-		}
-
-		return { x: [xMin, xMax], y: [yMin, yMax] };
-	},
-
-	render: function() {
-
-		var graph = this.graph;
-
-		graph.vis.selectAll('*').remove();
-
-		var nodes = graph.vis.selectAll("path")
-			.data(this.graph.stackedData)
-			.enter().append("svg:path")
-			.attr("d", this.seriesPathFactory());
-
-		var i = 0;
-		graph.series.forEach( function(series) {
-			if (series.disabled) return;
-			series.path = nodes[0][i++];
-			this._styleSeries(series);
-		}, this );
-	},
-
-	_styleSeries: function(series) {
-
-		var fill = this.fill ? series.color : 'none';
-		var stroke = this.stroke ? series.color : 'none';
-
-		series.path.setAttribute('fill', fill);
-		series.path.setAttribute('stroke', stroke);
-		series.path.setAttribute('stroke-width', this.strokeWidth);
-		series.path.setAttribute('class', series.className);
-	},
-
-	configure: function(args) {
-
-		args = args || {};
-
-		Rickshaw.keys(this.defaults()).forEach( function(key) {
-
-			if (!args.hasOwnProperty(key)) {
-				this[key] = this[key] || this.graph[key] || this.defaults()[key];
-				return;
-			}
-
-			if (typeof this.defaults()[key] == 'object') {
-
-				Rickshaw.keys(this.defaults()[key]).forEach( function(k) {
-
-					this[key][k] =
-						args[key][k] !== undefined ? args[key][k] :
-						this[key][k] !== undefined ? this[key][k] :
-						this.defaults()[key][k];
-				}, this );
-
-			} else {
-				this[key] =
-					args[key] !== undefined ? args[key] :
-					this[key] !== undefined ? this[key] :
-					this.graph[key] !== undefined ? this.graph[key] :
-					this.defaults()[key];
-			}
-
-		}, this );
-	},
-
-	setStrokeWidth: function(strokeWidth) {
-		if (strokeWidth !== undefined) {
-			this.strokeWidth = strokeWidth;
-		}
-	},
-
-	setTension: function(tension) {
-		if (tension !== undefined) {
-			this.tension = tension;
-		}
-	}
-} );
-
-Rickshaw.namespace('Rickshaw.Graph.Renderer.Line');
-
-Rickshaw.Graph.Renderer.Line = Rickshaw.Class.create( Rickshaw.Graph.Renderer, {
-
-	name: 'line',
-
-	defaults: function($super) {
-
-		return Rickshaw.extend( $super(), {
-			unstack: true,
-			fill: false,
-			stroke: true
-		} );
-	},
-
-	seriesPathFactory: function() {
-
-		var graph = this.graph;
-
-		return d3.svg.line()
-			.x( function(d) { return graph.x(d.x) } )
-			.y( function(d) { return graph.y(d.y) } )
-			.interpolate(this.graph.interpolation).tension(this.tension);
-	}
-} );
-
-Rickshaw.namespace('Rickshaw.Graph.Renderer.Stack');
-
-Rickshaw.Graph.Renderer.Stack = Rickshaw.Class.create( Rickshaw.Graph.Renderer, {
-
-	name: 'stack',
-
-	defaults: function($super) {
-
-		return Rickshaw.extend( $super(), {
-			fill: true,
-			stroke: false,
-			unstack: false
-		} );
-	},
-
-	seriesPathFactory: function() {
-
-		var graph = this.graph;
-
-		return d3.svg.area()
-			.x( function(d) { return graph.x(d.x) } )
-			.y0( function(d) { return graph.y(d.y0) } )
-			.y1( function(d) { return graph.y(d.y + d.y0) } )
-			.interpolate(this.graph.interpolation).tension(this.tension);
-	}
-} );
-
-Rickshaw.namespace('Rickshaw.Graph.Renderer.Bar');
-
-Rickshaw.Graph.Renderer.Bar = Rickshaw.Class.create( Rickshaw.Graph.Renderer, {
-
-	name: 'bar',
-
-	defaults: function($super) {
-
-		var defaults = Rickshaw.extend( $super(), {
-			gapSize: 0.05,
-			unstack: false
-		} );
-
-		delete defaults.tension;
-		return defaults;
-	},
-
-	initialize: function($super, args) {
-		args = args || {};
-		this.gapSize = args.gapSize || this.gapSize;
-		$super(args);
-	},
-
-	domain: function($super) {
-
-		var domain = $super();
-
-		var frequentInterval = this._frequentInterval();
-		domain.x[1] += parseInt(frequentInterval.magnitude);
-
-		return domain;
-	},
-
-	barWidth: function() {
-
-		var stackedData = this.graph.stackedData || this.graph.stackData();
-		var data = stackedData.slice(-1).shift();
-
-		var frequentInterval = this._frequentInterval();
-		var barWidth = this.graph.x(data[0].x + frequentInterval.magnitude * (1 - this.gapSize)); 
-
-		return barWidth;
-	},
-
-	render: function() {
-
-		var graph = this.graph;
-
-		graph.vis.selectAll('*').remove();
-
-		var barWidth = this.barWidth();
-		var barXOffset = 0;
-
-		var activeSeriesCount = graph.series.filter( function(s) { return !s.disabled; } ).length;
-		var seriesBarWidth = this.unstack ? barWidth / activeSeriesCount : barWidth;
-
-		var transform = function(d) {
-			// add a matrix transform for negative values
-			var matrix = [ 1, 0, 0, (d.y < 0 ? -1 : 1), 0, (d.y < 0 ? graph.y.magnitude(Math.abs(d.y)) * 2 : 0) ];
-			return "matrix(" + matrix.join(',') + ")";
-		};
-
-		graph.series.forEach( function(series) {
-
-			if (series.disabled) return;
-
-			var nodes = graph.vis.selectAll("path")
-				.data(series.stack)
-				.enter().append("svg:rect")
-				.attr("x", function(d) { return graph.x(d.x) + barXOffset })
-				.attr("y", function(d) { return (graph.y(d.y0 + Math.abs(d.y))) * (d.y < 0 ? -1 : 1 ) })
-				.attr("width", seriesBarWidth)
-				.attr("height", function(d) { return graph.y.magnitude(Math.abs(d.y)) })
-				.attr("transform", transform);
-
-			Array.prototype.forEach.call(nodes[0], function(n) {
-				n.setAttribute('fill', series.color);
-			} );
-
-			if (this.unstack) barXOffset += seriesBarWidth;
-
-		}, this );
-	},
-
-	_frequentInterval: function() {
-
-		var stackedData = this.graph.stackedData || this.graph.stackData();
-		var data = stackedData.slice(-1).shift();
-
-		var intervalCounts = {};
-
-		for (var i = 0; i < data.length - 1; i++) {
-			var interval = data[i + 1].x - data[i].x;
-			intervalCounts[interval] = intervalCounts[interval] || 0;
-			intervalCounts[interval]++;
-		}
-
-		var frequentInterval = { count: 0 };
-
-		Rickshaw.keys(intervalCounts).forEach( function(i) {
-			if (frequentInterval.count < intervalCounts[i]) {
-
-				frequentInterval = {
-					count: intervalCounts[i],
-					magnitude: i
-				};
-			}
-		} );
-
-		this._frequentInterval = function() { return frequentInterval };
-
-		return frequentInterval;
-	}
-} );
-
-Rickshaw.namespace('Rickshaw.Graph.Renderer.Area');
-
-Rickshaw.Graph.Renderer.Area = Rickshaw.Class.create( Rickshaw.Graph.Renderer, {
-
-	name: 'area',
-
-	defaults: function($super) {
-
-		return Rickshaw.extend( $super(), {
-			unstack: false,
-			fill: false,
-			stroke: false
-		} );
-	},
-
-	seriesPathFactory: function() {
-
-		var graph = this.graph;
-
-		return d3.svg.area()
-			.x( function(d) { return graph.x(d.x) } )
-			.y0( function(d) { return graph.y(d.y0) } )
-			.y1( function(d) { return graph.y(d.y + d.y0) } )
-			.interpolate(graph.interpolation).tension(this.tension);
-	},
-
-	seriesStrokeFactory: function() {
-
-		var graph = this.graph;
-
-		return d3.svg.line()
-			.x( function(d) { return graph.x(d.x) } )
-			.y( function(d) { return graph.y(d.y + d.y0) } )
-			.interpolate(graph.interpolation).tension(this.tension);
-	},
-
-	render: function() {
-
-		var graph = this.graph;
-
-		graph.vis.selectAll('*').remove();
-
-		var nodes = graph.vis.selectAll("path")
-			.data(this.graph.stackedData)
-			.enter().insert("svg:g", 'g');
-
-		nodes.append("svg:path")
-			.attr("d", this.seriesPathFactory())
-			.attr("class", 'area');
-
-		if (this.stroke) {
-			nodes.append("svg:path")
-				.attr("d", this.seriesStrokeFactory())
-				.attr("class", 'line');
-		}
-
-		var i = 0;
-		graph.series.forEach( function(series) {
-			if (series.disabled) return;
-			series.path = nodes[0][i++];
-			this._styleSeries(series);
-		}, this );
-	},
-
-	_styleSeries: function(series) {
-
-		if (!series.path) return;
-
-		d3.select(series.path).select('.area')
-			.attr('fill', series.color);
-
-		if (this.stroke) {
-			d3.select(series.path).select('.line')
-				.attr('fill', 'none')
-				.attr('stroke', series.stroke || d3.interpolateRgb(series.color, 'black')(0.125))
-				.attr('stroke-width', this.strokeWidth);
-		}
-
-		if (series.className) {
-			series.path.setAttribute('class', series.className);
-		}
-	}
-} );
-
-Rickshaw.namespace('Rickshaw.Graph.Renderer.ScatterPlot');
-
-Rickshaw.Graph.Renderer.ScatterPlot = Rickshaw.Class.create( Rickshaw.Graph.Renderer, {
-
-	name: 'scatterplot',
-
-	defaults: function($super) {
-
-		return Rickshaw.extend( $super(), {
-			unstack: true,
-			fill: true,
-			stroke: false,
-			padding:{ top: 0.01, right: 0.01, bottom: 0.01, left: 0.01 },
-			dotSize: 4
-		} );
-	},
-
-	initialize: function($super, args) {
-		$super(args);
-	},
-
-	render: function() {
-
-		var graph = this.graph;
-
-		graph.vis.selectAll('*').remove();
-
-		graph.series.forEach( function(series) {
-
-			if (series.disabled) return;
-
-			var nodes = graph.vis.selectAll("path")
-				.data(series.stack)
-				.enter().append("svg:circle")
-				.attr("cx", function(d) { return graph.x(d.x) })
-				.attr("cy", function(d) { return graph.y(d.y) })
-				.attr("r", function(d) { return ("r" in d) ? d.r : graph.renderer.dotSize});
-
-			Array.prototype.forEach.call(nodes[0], function(n) {
-				n.setAttribute('fill', series.color);
-			} );
-
-		}, this );
-	}
-} );
-Rickshaw.namespace('Rickshaw.Graph.Smoother');
-
-Rickshaw.Graph.Smoother = function(args) {
-
-	this.graph = args.graph;
-	this.element = args.element;
-
-	var self = this;
-
-	this.aggregationScale = 1;
-
-	if (this.element) {
-
-		$( function() {
-			$(self.element).slider( {
-				min: 1,
-				max: 100,
-				slide: function( event, ui ) {
-					self.setScale(ui.value);
-					self.graph.update();
-				}
-			} );
-		} );
-	}
-
-	self.graph.stackData.hooks.data.push( {
-		name: 'smoother',
-		orderPosition: 50,
-		f: function(data) {
-
-			var aggregatedData = [];
-
-			data.forEach( function(seriesData) {
-				
-				var aggregatedSeriesData = [];
-
-				while (seriesData.length) {
-
-					var avgX = 0, avgY = 0;
-					var slice = seriesData.splice(0, self.aggregationScale);
-
-					slice.forEach( function(d) {
-						avgX += d.x / slice.length;
-						avgY += d.y / slice.length;
-					} );
-
-					aggregatedSeriesData.push( { x: avgX, y: avgY } );
-				}
-
-				aggregatedData.push(aggregatedSeriesData);
-			} );
-
-			return aggregatedData;
-		}
-	} );
-
-	this.setScale = function(scale) {
-
-		if (scale < 1) {
-			throw "scale out of range: " + scale;
-		}
-
-		this.aggregationScale = scale;
-		this.graph.update();
-	}
-};
-
-Rickshaw.namespace('Rickshaw.Graph.Unstacker');
-
-Rickshaw.Graph.Unstacker = function(args) {
-
-	this.graph = args.graph;
-	var self = this;
-
-	this.graph.stackData.hooks.after.push( {
-		name: 'unstacker',
-		f: function(data) {
-
-			if (!self.graph.renderer.unstack) return data;
-
-			data.forEach( function(seriesData) {
-				seriesData.forEach( function(d) {
-					d.y0 = 0;
-				} );
-			} );
-
-			return data;
-		}
-	} );
-};
-
-Rickshaw.namespace('Rickshaw.Series');
-
-Rickshaw.Series = Rickshaw.Class.create( Array, {
-
-	initialize: function (data, palette, options) {
-
-		options = options || {}
-
-		this.palette = new Rickshaw.Color.Palette(palette);
-
-		this.timeBase = typeof(options.timeBase) === 'undefined' ? 
-			Math.floor(new Date().getTime() / 1000) : 
-			options.timeBase;
-
-		var timeInterval = typeof(options.timeInterval) == 'undefined' ?
-			1000 :
-			options.timeInterval;
-
-		this.setTimeInterval(timeInterval);
-
-		if (data && (typeof(data) == "object") && (data instanceof Array)) {
-			data.forEach( function(item) { this.addItem(item) }, this );
-		}
-	},
-
-	addItem: function(item) {
-
-		if (typeof(item.name) === 'undefined') {
-			throw('addItem() needs a name');
-		}
-
-		item.color = (item.color || this.palette.color(item.name));
-		item.data = (item.data || []);
-
-		// backfill, if necessary
-		if ((item.data.length == 0) && this.length && (this.getIndex() > 0)) {
-			this[0].data.forEach( function(plot) {
-				item.data.push({ x: plot.x, y: 0 });
-			} );
-		} else if (item.data.length == 0) {
-			item.data.push({ x: this.timeBase - (this.timeInterval || 0), y: 0 });
-		} 
-
-		this.push(item);
-
-		if (this.legend) {
-			this.legend.addLine(this.itemByName(item.name));
-		}
-	},
-
-	addData: function(data) {
-
-		var index = this.getIndex();
-
-		Rickshaw.keys(data).forEach( function(name) {
-			if (! this.itemByName(name)) {
-				this.addItem({ name: name });
-			}
-		}, this );
-
-		this.forEach( function(item) {
-			item.data.push({ 
-				x: (index * this.timeInterval || 1) + this.timeBase, 
-				y: (data[item.name] || 0) 
-			});
-		}, this );
-	},
-
-	getIndex: function () {
-		return (this[0] && this[0].data && this[0].data.length) ? this[0].data.length : 0;
-	},
-
-	itemByName: function(name) {
-
-		for (var i = 0; i < this.length; i++) {
-			if (this[i].name == name)
-				return this[i];
-		}
-	},
-
-	setTimeInterval: function(iv) {
-		this.timeInterval = iv / 1000;
-	},
-
-	setTimeBase: function (t) {
-		this.timeBase = t;
-	},
-
-	dump: function() {
-
-		var data = {
-			timeBase: this.timeBase,
-			timeInterval: this.timeInterval,
-			items: []
-		};
-
-		this.forEach( function(item) {
-
-			var newItem = {
-				color: item.color,
-				name: item.name,
-				data: []
-			};
-
-			item.data.forEach( function(plot) {
-				newItem.data.push({ x: plot.x, y: plot.y });
-			} );
-
-			data.items.push(newItem);
-		} );
-
-		return data;
-	},
-
-	load: function(data) {
-
-		if (data.timeInterval) {
-			this.timeInterval = data.timeInterval;
-		}
-
-		if (data.timeBase) {
-			this.timeBase = data.timeBase;
-		}
-
-		if (data.items) {
-			data.items.forEach( function(item) {
-				this.push(item);
-				if (this.legend) {
-					this.legend.addLine(this.itemByName(item.name));
-				}
-
-			}, this );
-		}
-	}
-} );
-
-Rickshaw.Series.zeroFill = function(series) {
-
-	var x;
-	var i = 0;
-
-	var data = series.map( function(s) { return s.data } );
-
-	while ( i < Math.max.apply(null, data.map( function(d) { return d.length } )) ) {
-
-		x = Math.min.apply( null, 
-			data
-				.filter(function(d) { return d[i] })
-				.map(function(d) { return d[i].x })
-		);
-
-		data.forEach( function(d) {
-			if (!d[i] || d[i].x != x) {
-				d.splice(i, 0, { x: x, y: 0 });
-			}
-		} );
-
-		i++;
-	}
-};
-Rickshaw.namespace('Rickshaw.Series.FixedDuration');
-
-Rickshaw.Series.FixedDuration = Rickshaw.Class.create(Rickshaw.Series, {
-
-	initialize: function (data, palette, options) {
-
-		var options = options || {}
-
-		if (typeof(options.timeInterval) === 'undefined') {
-			throw new Error('FixedDuration series requires timeInterval');
-		}
-
-		if (typeof(options.maxDataPoints) === 'undefined') {
-			throw new Error('FixedDuration series requires maxDataPoints');
-		}
-
-		this.palette = new Rickshaw.Color.Palette(palette);
-		this.timeBase = typeof(options.timeBase) === 'undefined' ? Math.floor(new Date().getTime() / 1000) : options.timeBase;
-		this.setTimeInterval(options.timeInterval);
-
-		if (this[0] && this[0].data && this[0].data.length) {
-			this.currentSize = this[0].data.length;
-			this.currentIndex = this[0].data.length;
-		} else {
-			this.currentSize  = 0;
-			this.currentIndex = 0;
-		}
-
-		this.maxDataPoints = options.maxDataPoints;
-
-
-		if (data && (typeof(data) == "object") && (data instanceof Array)) {
-			data.forEach( function (item) { this.addItem(item) }, this );
-			this.currentSize  += 1;
-			this.currentIndex += 1;
-		}
-
-		// reset timeBase for zero-filled values if needed
-		this.timeBase -= (this.maxDataPoints - this.currentSize) * this.timeInterval;
-
-		// zero-fill up to maxDataPoints size if we don't have that much data yet
-		if ((typeof(this.maxDataPoints) !== 'undefined') && (this.currentSize < this.maxDataPoints)) {
-			for (var i = this.maxDataPoints - this.currentSize - 1; i > 0; i--) {
-				this.currentSize  += 1;
-				this.currentIndex += 1;
-				this.forEach( function (item) {
-					item.data.unshift({ x: ((i-1) * this.timeInterval || 1) + this.timeBase, y: 0, i: i });
-				}, this );
-			}
-		}
-	},
-
-	addData: function($super, data) {
-
-		$super(data)
-
-		this.currentSize += 1;
-		this.currentIndex += 1;
-
-		if (this.maxDataPoints !== undefined) {
-			while (this.currentSize > this.maxDataPoints) {
-				this.dropData();
-			}
-		}
-	},
-
-	dropData: function() {
-
-		this.forEach(function(item) {
-			item.data.splice(0, 1);
-		} );
-
-		this.currentSize -= 1;
-	},
-
-	getIndex: function () {
-		return this.currentIndex;
-	}
-} );
-
diff --git a/branch-1.2/ambari-web/vendor/scripts/sinon-1.4.2.js b/branch-1.2/ambari-web/vendor/scripts/sinon-1.4.2.js
deleted file mode 100644
index c6e94c5..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/sinon-1.4.2.js
+++ /dev/null
@@ -1,4081 +0,0 @@
-/**
- * Sinon.JS 1.4.2, 2012/07/11
- *
- * @author Christian Johansen (christian@cjohansen.no)
- * @author Contributors: https://github.com/cjohansen/Sinon.JS/blob/master/AUTHORS
- *
- * (The BSD License)
- * 
- * Copyright (c) 2010-2012, Christian Johansen, christian@cjohansen.no
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without modification,
- * are permitted provided that the following conditions are met:
- * 
- *     * Redistributions of source code must retain the above copyright notice,
- *       this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright notice,
- *       this list of conditions and the following disclaimer in the documentation
- *       and/or other materials provided with the distribution.
- *     * Neither the name of Christian Johansen nor the names of his contributors
- *       may be used to endorse or promote products derived from this software
- *       without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-"use strict";
-var sinon = (function () {
-var buster = (function (setTimeout, B) {
-    var isNode = typeof require == "function" && typeof module == "object";
-    var div = typeof document != "undefined" && document.createElement("div");
-    var F = function () {};
-
-    var buster = {
-        bind: function bind(obj, methOrProp) {
-            var method = typeof methOrProp == "string" ? obj[methOrProp] : methOrProp;
-            var args = Array.prototype.slice.call(arguments, 2);
-            return function () {
-                var allArgs = args.concat(Array.prototype.slice.call(arguments));
-                return method.apply(obj, allArgs);
-            };
-        },
-
-        partial: function partial(fn) {
-            var args = [].slice.call(arguments, 1);
-            return function () {
-                return fn.apply(this, args.concat([].slice.call(arguments)));
-            };
-        },
-
-        create: function create(object) {
-            F.prototype = object;
-            return new F();
-        },
-
-        extend: function extend(target) {
-            if (!target) { return; }
-            for (var i = 1, l = arguments.length, prop; i < l; ++i) {
-                for (prop in arguments[i]) {
-                    target[prop] = arguments[i][prop];
-                }
-            }
-            return target;
-        },
-
-        nextTick: function nextTick(callback) {
-            if (typeof process != "undefined" && process.nextTick) {
-                return process.nextTick(callback);
-            }
-            setTimeout(callback, 0);
-        },
-
-        functionName: function functionName(func) {
-            if (!func) return "";
-            if (func.displayName) return func.displayName;
-            if (func.name) return func.name;
-            var matches = func.toString().match(/function\s+([^\(]+)/m);
-            return matches && matches[1] || "";
-        },
-
-        isNode: function isNode(obj) {
-            if (!div) return false;
-            try {
-                obj.appendChild(div);
-                obj.removeChild(div);
-            } catch (e) {
-                return false;
-            }
-            return true;
-        },
-
-        isElement: function isElement(obj) {
-            return obj && obj.nodeType === 1 && buster.isNode(obj);
-        },
-
-        isArray: function isArray(arr) {
-            return Object.prototype.toString.call(arr) == "[object Array]";
-        },
-
-        flatten: function flatten(arr) {
-            var result = [], arr = arr || [];
-            for (var i = 0, l = arr.length; i < l; ++i) {
-                result = result.concat(buster.isArray(arr[i]) ? flatten(arr[i]) : arr[i]);
-            }
-            return result;
-        },
-
-        each: function each(arr, callback) {
-            for (var i = 0, l = arr.length; i < l; ++i) {
-                callback(arr[i]);
-            }
-        },
-
-        map: function map(arr, callback) {
-            var results = [];
-            for (var i = 0, l = arr.length; i < l; ++i) {
-                results.push(callback(arr[i]));
-            }
-            return results;
-        },
-
-        parallel: function parallel(fns, callback) {
-            function cb(err, res) {
-                if (typeof callback == "function") {
-                    callback(err, res);
-                    callback = null;
-                }
-            }
-            if (fns.length == 0) { return cb(null, []); }
-            var remaining = fns.length, results = [];
-            function makeDone(num) {
-                return function done(err, result) {
-                    if (err) { return cb(err); }
-                    results[num] = result;
-                    if (--remaining == 0) { cb(null, results); }
-                };
-            }
-            for (var i = 0, l = fns.length; i < l; ++i) {
-                fns[i](makeDone(i));
-            }
-        },
-
-        series: function series(fns, callback) {
-            function cb(err, res) {
-                if (typeof callback == "function") {
-                    callback(err, res);
-                }
-            }
-            var remaining = fns.slice();
-            var results = [];
-            function callNext() {
-                if (remaining.length == 0) return cb(null, results);
-                var promise = remaining.shift()(next);
-                if (promise && typeof promise.then == "function") {
-                    promise.then(buster.partial(next, null), next);
-                }
-            }
-            function next(err, result) {
-                if (err) return cb(err);
-                results.push(result);
-                callNext();
-            }
-            callNext();
-        },
-
-        countdown: function countdown(num, done) {
-            return function () {
-                if (--num == 0) done();
-            };
-        }
-    };
-
-    if (typeof process === "object" &&
-        typeof require === "function" && typeof module === "object") {
-        var crypto = require("crypto");
-        var path = require("path");
-
-        buster.tmpFile = function (fileName) {
-            var hashed = crypto.createHash("sha1");
-            hashed.update(fileName);
-            var tmpfileName = hashed.digest("hex");
-
-            if (process.platform == "win32") {
-                return path.join(process.env["TEMP"], tmpfileName);
-            } else {
-                return path.join("/tmp", tmpfileName);
-            }
-        };
-    }
-
-    if (Array.prototype.some) {
-        buster.some = function (arr, fn, thisp) {
-            return arr.some(fn, thisp);
-        };
-    } else {
-        // https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Array/some
-        buster.some = function (arr, fun, thisp) {
-                        if (arr == null) { throw new TypeError(); }
-            arr = Object(arr);
-            var len = arr.length >>> 0;
-            if (typeof fun !== "function") { throw new TypeError(); }
-
-            for (var i = 0; i < len; i++) {
-                if (arr.hasOwnProperty(i) && fun.call(thisp, arr[i], i, arr)) {
-                    return true;
-                }
-            }
-
-            return false;
-        };
-    }
-
-    if (Array.prototype.filter) {
-        buster.filter = function (arr, fn, thisp) {
-            return arr.filter(fn, thisp);
-        };
-    } else {
-        // https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Array/filter
-        buster.filter = function (fn, thisp) {
-                        if (this == null) { throw new TypeError(); }
-
-            var t = Object(this);
-            var len = t.length >>> 0;
-            if (typeof fn != "function") { throw new TypeError(); }
-
-            var res = [];
-            for (var i = 0; i < len; i++) {
-                if (i in t) {
-                    var val = t[i]; // in case fun mutates this
-                    if (fn.call(thisp, val, i, t)) { res.push(val); }
-                }
-            }
-
-            return res;
-        };
-    }
-
-    if (isNode) {
-        module.exports = buster;
-        buster.eventEmitter = require("./buster-event-emitter");
-        Object.defineProperty(buster, "defineVersionGetter", {
-            get: function () {
-                return require("./define-version-getter");
-            }
-        });
-    }
-
-    return buster.extend(B || {}, buster);
-}(setTimeout, buster));
-if (typeof buster === "undefined") {
-    var buster = {};
-}
-
-if (typeof module === "object" && typeof require === "function") {
-    buster = require("buster-core");
-}
-
-buster.format = buster.format || {};
-buster.format.excludeConstructors = ["Object", /^.$/];
-buster.format.quoteStrings = true;
-
-buster.format.ascii = (function () {
-    
-    var hasOwn = Object.prototype.hasOwnProperty;
-
-    var specialObjects = [];
-    if (typeof global != "undefined") {
-        specialObjects.push({ obj: global, value: "[object global]" });
-    }
-    if (typeof document != "undefined") {
-        specialObjects.push({ obj: document, value: "[object HTMLDocument]" });
-    }
-    if (typeof window != "undefined") {
-        specialObjects.push({ obj: window, value: "[object Window]" });
-    }
-
-    function keys(object) {
-        var k = Object.keys && Object.keys(object) || [];
-
-        if (k.length == 0) {
-            for (var prop in object) {
-                if (hasOwn.call(object, prop)) {
-                    k.push(prop);
-                }
-            }
-        }
-
-        return k.sort();
-    }
-
-    function isCircular(object, objects) {
-        if (typeof object != "object") {
-            return false;
-        }
-
-        for (var i = 0, l = objects.length; i < l; ++i) {
-            if (objects[i] === object) {
-                return true;
-            }
-        }
-
-        return false;
-    }
-
-    function ascii(object, processed, indent) {
-        if (typeof object == "string") {
-            var quote = typeof this.quoteStrings != "boolean" || this.quoteStrings;
-            return processed || quote ? '"' + object + '"' : object;
-        }
-
-        if (typeof object == "function" && !(object instanceof RegExp)) {
-            return ascii.func(object);
-        }
-
-        processed = processed || [];
-
-        if (isCircular(object, processed)) {
-            return "[Circular]";
-        }
-
-        if (Object.prototype.toString.call(object) == "[object Array]") {
-            return ascii.array.call(this, object, processed);
-        }
-
-        if (!object) {
-            return "" + object;
-        }
-
-        if (buster.isElement(object)) {
-            return ascii.element(object);
-        }
-
-        if (typeof object.toString == "function" &&
-            object.toString !== Object.prototype.toString) {
-            return object.toString();
-        }
-
-        for (var i = 0, l = specialObjects.length; i < l; i++) {
-            if (object === specialObjects[i].obj) {
-                return specialObjects[i].value;
-            }
-        }
-
-        return ascii.object.call(this, object, processed, indent);
-    }
-
-    ascii.func = function (func) {
-        return "function " + buster.functionName(func) + "() {}";
-    };
-
-    ascii.array = function (array, processed) {
-        processed = processed || [];
-        processed.push(array);
-        var pieces = [];
-
-        for (var i = 0, l = array.length; i < l; ++i) {
-            pieces.push(ascii.call(this, array[i], processed));
-        }
-
-        return "[" + pieces.join(", ") + "]";
-    };
-
-    ascii.object = function (object, processed, indent) {
-        processed = processed || [];
-        processed.push(object);
-        indent = indent || 0;
-        var pieces = [], properties = keys(object), prop, str, obj;
-        var is = "";
-        var length = 3;
-
-        for (var i = 0, l = indent; i < l; ++i) {
-            is += " ";
-        }
-
-        for (i = 0, l = properties.length; i < l; ++i) {
-            prop = properties[i];
-            obj = object[prop];
-
-            if (isCircular(obj, processed)) {
-                str = "[Circular]";
-            } else {
-                str = ascii.call(this, obj, processed, indent + 2);
-            }
-
-            str = (/\s/.test(prop) ? '"' + prop + '"' : prop) + ": " + str;
-            length += str.length;
-            pieces.push(str);
-        }
-
-        var cons = ascii.constructorName.call(this, object);
-        var prefix = cons ? "[" + cons + "] " : ""
-
-        return (length + indent) > 80 ?
-            prefix + "{\n  " + is + pieces.join(",\n  " + is) + "\n" + is + "}" :
-            prefix + "{ " + pieces.join(", ") + " }";
-    };
-
-    ascii.element = function (element) {
-        var tagName = element.tagName.toLowerCase();
-        var attrs = element.attributes, attribute, pairs = [], attrName;
-
-        for (var i = 0, l = attrs.length; i < l; ++i) {
-            attribute = attrs.item(i);
-            attrName = attribute.nodeName.toLowerCase().replace("html:", "");
-
-            if (attrName == "contenteditable" && attribute.nodeValue == "inherit") {
-                continue;
-            }
-
-            if (!!attribute.nodeValue) {
-                pairs.push(attrName + "=\"" + attribute.nodeValue + "\"");
-            }
-        }
-
-        var formatted = "<" + tagName + (pairs.length > 0 ? " " : "");
-        var content = element.innerHTML;
-
-        if (content.length > 20) {
-            content = content.substr(0, 20) + "[...]";
-        }
-
-        var res = formatted + pairs.join(" ") + ">" + content + "</" + tagName + ">";
-
-        return res.replace(/ contentEditable="inherit"/, "");
-    };
-
-    ascii.constructorName = function (object) {
-        var name = buster.functionName(object && object.constructor);
-        var excludes = this.excludeConstructors || buster.format.excludeConstructors || [];
-
-        for (var i = 0, l = excludes.length; i < l; ++i) {
-            if (typeof excludes[i] == "string" && excludes[i] == name) {
-                return "";
-            } else if (excludes[i].test && excludes[i].test(name)) {
-                return "";
-            }
-        }
-
-        return name;
-    };
-
-    return ascii;
-}());
-
-if (typeof module != "undefined") {
-    module.exports = buster.format;
-}
-/*jslint eqeqeq: false, onevar: false, forin: true, nomen: false, regexp: false, plusplus: false*/
-/*global module, require, __dirname, document*/
-/**
- * Sinon core utilities. For internal use only.
- *
- * @author Christian Johansen (christian@cjohansen.no)
- * @license BSD
- *
- * Copyright (c) 2010-2011 Christian Johansen
- */
-
-var sinon = (function (buster) {
-    var div = typeof document != "undefined" && document.createElement("div");
-    var hasOwn = Object.prototype.hasOwnProperty;
-
-    function isDOMNode(obj) {
-        var success = false;
-
-        try {
-            obj.appendChild(div);
-            success = div.parentNode == obj;
-        } catch (e) {
-            return false;
-        } finally {
-            try {
-                obj.removeChild(div);
-            } catch (e) {
-                // Remove failed, not much we can do about that
-            }
-        }
-
-        return success;
-    }
-
-    function isElement(obj) {
-        return div && obj && obj.nodeType === 1 && isDOMNode(obj);
-    }
-
-    function isFunction(obj) {
-        return !!(obj && obj.constructor && obj.call && obj.apply);
-    }
-
-    function mirrorProperties(target, source) {
-        for (var prop in source) {
-            if (!hasOwn.call(target, prop)) {
-                target[prop] = source[prop];
-            }
-        }
-    }
-
-    var sinon = {
-        wrapMethod: function wrapMethod(object, property, method) {
-            if (!object) {
-                throw new TypeError("Should wrap property of object");
-            }
-
-            if (typeof method != "function") {
-                throw new TypeError("Method wrapper should be function");
-            }
-
-            var wrappedMethod = object[property];
-
-            if (!isFunction(wrappedMethod)) {
-                throw new TypeError("Attempted to wrap " + (typeof wrappedMethod) + " property " +
-                                    property + " as function");
-            }
-
-            if (wrappedMethod.restore && wrappedMethod.restore.sinon) {
-                throw new TypeError("Attempted to wrap " + property + " which is already wrapped");
-            }
-
-            if (wrappedMethod.calledBefore) {
-                var verb = !!wrappedMethod.returns ? "stubbed" : "spied on";
-                throw new TypeError("Attempted to wrap " + property + " which is already " + verb);
-            }
-
-            // IE 8 does not support hasOwnProperty on the window object.
-            var owned = hasOwn.call(object, property);
-            object[property] = method;
-            method.displayName = property;
-
-            method.restore = function () {
-                // For prototype properties try to reset by delete first.
-                // If this fails (ex: localStorage on mobile safari) then force a reset
-                // via direct assignment.
-                if (!owned) {
-                    delete object[property];
-                }
-                if (object[property] === method) {
-                    object[property] = wrappedMethod;
-                }
-            };
-
-            method.restore.sinon = true;
-            mirrorProperties(method, wrappedMethod);
-
-            return method;
-        },
-
-        extend: function extend(target) {
-            for (var i = 1, l = arguments.length; i < l; i += 1) {
-                for (var prop in arguments[i]) {
-                    if (arguments[i].hasOwnProperty(prop)) {
-                        target[prop] = arguments[i][prop];
-                    }
-
-                    // DONT ENUM bug, only care about toString
-                    if (arguments[i].hasOwnProperty("toString") &&
-                        arguments[i].toString != target.toString) {
-                        target.toString = arguments[i].toString;
-                    }
-                }
-            }
-
-            return target;
-        },
-
-        create: function create(proto) {
-            var F = function () {};
-            F.prototype = proto;
-            return new F();
-        },
-
-        deepEqual: function deepEqual(a, b) {
-            if (sinon.match && sinon.match.isMatcher(a)) {
-                return a.test(b);
-            }
-            if (typeof a != "object" || typeof b != "object") {
-                return a === b;
-            }
-
-            if (isElement(a) || isElement(b)) {
-                return a === b;
-            }
-
-            if (a === b) {
-                return true;
-            }
-
-            var aString = Object.prototype.toString.call(a);
-            if (aString != Object.prototype.toString.call(b)) {
-                return false;
-            }
-
-            if (aString == "[object Array]") {
-                if (a.length !== b.length) {
-                    return false;
-                }
-
-                for (var i = 0, l = a.length; i < l; i += 1) {
-                    if (!deepEqual(a[i], b[i])) {
-                        return false;
-                    }
-                }
-
-                return true;
-            }
-
-            var prop, aLength = 0, bLength = 0;
-
-            for (prop in a) {
-                aLength += 1;
-
-                if (!deepEqual(a[prop], b[prop])) {
-                    return false;
-                }
-            }
-
-            for (prop in b) {
-                bLength += 1;
-            }
-
-            if (aLength != bLength) {
-                return false;
-            }
-
-            return true;
-        },
-
-        functionName: function functionName(func) {
-            var name = func.displayName || func.name;
-
-            // Use function decomposition as a last resort to get function
-            // name. Does not rely on function decomposition to work - if it
-            // doesn't debugging will be slightly less informative
-            // (i.e. toString will say 'spy' rather than 'myFunc').
-            if (!name) {
-                var matches = func.toString().match(/function ([^\s\(]+)/);
-                name = matches && matches[1];
-            }
-
-            return name;
-        },
-
-        functionToString: function toString() {
-            if (this.getCall && this.callCount) {
-                var thisValue, prop, i = this.callCount;
-
-                while (i--) {
-                    thisValue = this.getCall(i).thisValue;
-
-                    for (prop in thisValue) {
-                        if (thisValue[prop] === this) {
-                            return prop;
-                        }
-                    }
-                }
-            }
-
-            return this.displayName || "sinon fake";
-        },
-
-        getConfig: function (custom) {
-            var config = {};
-            custom = custom || {};
-            var defaults = sinon.defaultConfig;
-
-            for (var prop in defaults) {
-                if (defaults.hasOwnProperty(prop)) {
-                    config[prop] = custom.hasOwnProperty(prop) ? custom[prop] : defaults[prop];
-                }
-            }
-
-            return config;
-        },
-
-        format: function (val) {
-            return "" + val;
-        },
-
-        defaultConfig: {
-            injectIntoThis: true,
-            injectInto: null,
-            properties: ["spy", "stub", "mock", "clock", "server", "requests"],
-            useFakeTimers: true,
-            useFakeServer: true
-        },
-
-        timesInWords: function timesInWords(count) {
-            return count == 1 && "once" ||
-                count == 2 && "twice" ||
-                count == 3 && "thrice" ||
-                (count || 0) + " times";
-        },
-
-        calledInOrder: function (spies) {
-            for (var i = 1, l = spies.length; i < l; i++) {
-                if (!spies[i - 1].calledBefore(spies[i])) {
-                    return false;
-                }
-            }
-
-            return true;
-        },
-
-        orderByFirstCall: function (spies) {
-            return spies.sort(function (a, b) {
-                // uuid, won't ever be equal
-                var aCall = a.getCall(0);
-                var bCall = b.getCall(0);
-                var aId = aCall && aCall.callId || -1;
-                var bId = bCall && bCall.callId || -1;
-
-                return aId < bId ? -1 : 1;
-            });
-        },
-
-        log: function () {},
-
-        logError: function (label, err) {
-            var msg = label + " threw exception: "
-            sinon.log(msg + "[" + err.name + "] " + err.message);
-            if (err.stack) { sinon.log(err.stack); }
-
-            setTimeout(function () {
-                err.message = msg + err.message;
-                throw err;
-            }, 0);
-        },
-
-        typeOf: function (value) {
-            if (value === null) {
-              return "null";
-            }
-            var string = Object.prototype.toString.call(value);
-            return string.substring(8, string.length - 1).toLowerCase();
-        }
-    };
-
-    var isNode = typeof module == "object" && typeof require == "function";
-
-    if (isNode) {
-        try {
-            buster = { format: require("buster-format") };
-        } catch (e) {}
-        module.exports = sinon;
-        module.exports.spy = require("./sinon/spy");
-        module.exports.stub = require("./sinon/stub");
-        module.exports.mock = require("./sinon/mock");
-        module.exports.collection = require("./sinon/collection");
-        module.exports.assert = require("./sinon/assert");
-        module.exports.sandbox = require("./sinon/sandbox");
-        module.exports.test = require("./sinon/test");
-        module.exports.testCase = require("./sinon/test_case");
-        module.exports.assert = require("./sinon/assert");
-        module.exports.match = require("./sinon/match");
-    }
-
-    if (buster) {
-        var formatter = sinon.create(buster.format);
-        formatter.quoteStrings = false;
-        sinon.format = function () {
-            return formatter.ascii.apply(formatter, arguments);
-        };
-    } else if (isNode) {
-        try {
-            var util = require("util");
-            sinon.format = function (value) {
-                return typeof value == "object" && value.toString === Object.prototype.toString ? util.inspect(value) : value;
-            };
-        } catch (e) {
-            /* Node, but no util module - would be very old, but better safe than
-             sorry */
-        }
-    }
-
-    return sinon;
-}(typeof buster == "object" && buster));
-
-/* @depend ../sinon.js */
-/*jslint eqeqeq: false, onevar: false, plusplus: false*/
-/*global module, require, sinon*/
-/**
- * Match functions
- *
- * @author Maximilian Antoni (mail@maxantoni.de)
- * @license BSD
- *
- * Copyright (c) 2012 Maximilian Antoni
- */
-
-(function (sinon) {
-    var commonJSModule = typeof module == "object" && typeof require == "function";
-
-    if (!sinon && commonJSModule) {
-        sinon = require("../sinon");
-    }
-
-    if (!sinon) {
-        return;
-    }
-
-    function assertType(value, type, name) {
-        var actual = sinon.typeOf(value);
-        if (actual !== type) {
-            throw new TypeError("Expected type of " + name + " to be " +
-                type + ", but was " + actual);
-        }
-    }
-
-    var matcher = {
-        toString: function () {
-            return this.message;
-        }
-    };
-
-    function isMatcher(object) {
-        return matcher.isPrototypeOf(object);
-    }
-
-    function matchObject(expectation, actual) {
-        if (actual === null || actual === undefined) {
-            return false;
-        }
-        for (var key in expectation) {
-            if (expectation.hasOwnProperty(key)) {
-                var exp = expectation[key];
-                var act = actual[key];
-                if (match.isMatcher(exp)) {
-                    if (!exp.test(act)) {
-                        return false;
-                    }
-                } else if (sinon.typeOf(exp) === "object") {
-                    if (!matchObject(exp, act)) {
-                        return false;
-                    }
-                } else if (!sinon.deepEqual(exp, act)) {
-                    return false;
-                }
-            }
-        }
-        return true;
-    }
-
-    matcher.or = function (m2) {
-        if (!isMatcher(m2)) {
-            throw new TypeError("Matcher expected");
-        }
-        var m1 = this;
-        var or = sinon.create(matcher);
-        or.test = function (actual) {
-            return m1.test(actual) || m2.test(actual);
-        };
-        or.message = m1.message + ".or(" + m2.message + ")";
-        return or;
-    };
-
-    matcher.and = function (m2) {
-        if (!isMatcher(m2)) {
-            throw new TypeError("Matcher expected");
-        }
-        var m1 = this;
-        var and = sinon.create(matcher);
-        and.test = function (actual) {
-            return m1.test(actual) && m2.test(actual);
-        };
-        and.message = m1.message + ".and(" + m2.message + ")";
-        return and;
-    };
-
-    var match = function (expectation, message) {
-        var m = sinon.create(matcher);
-        var type = sinon.typeOf(expectation);
-        switch (type) {
-        case "object":
-            if (typeof expectation.test === "function") {
-                m.test = function (actual) {
-                    return expectation.test(actual) === true;
-                };
-                m.message = "match(" + sinon.functionName(expectation.test) + ")";
-                return m;
-            }
-            var str = [];
-            for (var key in expectation) {
-                if (expectation.hasOwnProperty(key)) {
-                    str.push(key + ": " + expectation[key]);
-                }
-            }
-            m.test = function (actual) {
-                return matchObject(expectation, actual);
-            };
-            m.message = "match(" + str.join(", ") + ")";
-            break;
-        case "number":
-            m.test = function (actual) {
-                return expectation == actual;
-            };
-            break;
-        case "string":
-            m.test = function (actual) {
-                if (typeof actual !== "string") {
-                    return false;
-                }
-                return actual.indexOf(expectation) !== -1;
-            };
-            m.message = "match(\"" + expectation + "\")";
-            break;
-        case "regexp":
-            m.test = function (actual) {
-                if (typeof actual !== "string") {
-                    return false;
-                }
-                return expectation.test(actual);
-            };
-            break;
-        case "function":
-            m.test = expectation;
-            if (message) {
-                m.message = message;
-            } else {
-                m.message = "match(" + sinon.functionName(expectation) + ")";
-            }
-            break;
-        default:
-            m.test = function (actual) {
-              return sinon.deepEqual(expectation, actual);
-            };
-        }
-        if (!m.message) {
-            m.message = "match(" + expectation + ")";
-        }
-        return m;
-    };
-
-    match.isMatcher = isMatcher;
-
-    match.any = match(function () {
-        return true;
-    }, "any");
-
-    match.defined = match(function (actual) {
-        return actual !== null && actual !== undefined;
-    }, "defined");
-
-    match.truthy = match(function (actual) {
-        return !!actual;
-    }, "truthy");
-
-    match.falsy = match(function (actual) {
-        return !actual;
-    }, "falsy");
-
-    match.same = function (expectation) {
-        return match(function (actual) {
-            return expectation === actual;
-        }, "same(" + expectation + ")");
-    };
-
-    match.typeOf = function (type) {
-        assertType(type, "string", "type");
-        return match(function (actual) {
-            return sinon.typeOf(actual) === type;
-        }, "typeOf(\"" + type + "\")");
-    };
-
-    match.instanceOf = function (type) {
-        assertType(type, "function", "type");
-        return match(function (actual) {
-            return actual instanceof type;
-        }, "instanceOf(" + sinon.functionName(type) + ")");
-    };
-
-    function createPropertyMatcher(propertyTest, messagePrefix) {
-        return function (property, value) {
-            assertType(property, "string", "property");
-            var onlyProperty = arguments.length === 1;
-            var message = messagePrefix + "(\"" + property + "\"";
-            if (!onlyProperty) {
-                message += ", " + value;
-            }
-            message += ")";
-            return match(function (actual) {
-                if (actual === undefined || actual === null ||
-                        !propertyTest(actual, property)) {
-                    return false;
-                }
-                return onlyProperty || sinon.deepEqual(value, actual[property]);
-            }, message);
-        };
-    }
-
-    match.has = createPropertyMatcher(function (actual, property) {
-        if (typeof actual === "object") {
-            return property in actual;
-        }
-        return actual[property] !== undefined;
-    }, "has");
-
-    match.hasOwn = createPropertyMatcher(function (actual, property) {
-        return actual.hasOwnProperty(property);
-    }, "hasOwn");
-
-    match.bool = match.typeOf("boolean");
-    match.number = match.typeOf("number");
-    match.string = match.typeOf("string");
-    match.object = match.typeOf("object");
-    match.func = match.typeOf("function");
-    match.array = match.typeOf("array");
-    match.regexp = match.typeOf("regexp");
-    match.date = match.typeOf("date");
-
-    if (commonJSModule) {
-        module.exports = match;
-    } else {
-        sinon.match = match;
-    }
-}(typeof sinon == "object" && sinon || null));
-
-/**
- * @depend ../sinon.js
- * @depend match.js
- */
-/*jslint eqeqeq: false, onevar: false, plusplus: false*/
-/*global module, require, sinon*/
-/**
- * Spy functions
- *
- * @author Christian Johansen (christian@cjohansen.no)
- * @license BSD
- *
- * Copyright (c) 2010-2011 Christian Johansen
- */
-
-(function (sinon) {
-    var commonJSModule = typeof module == "object" && typeof require == "function";
-    var spyCall;
-    var callId = 0;
-    var push = [].push;
-    var slice = Array.prototype.slice;
-
-    if (!sinon && commonJSModule) {
-        sinon = require("../sinon");
-    }
-
-    if (!sinon) {
-        return;
-    }
-
-    function spy(object, property) {
-        if (!property && typeof object == "function") {
-            return spy.create(object);
-        }
-
-        if (!object && !property) {
-            return spy.create(function () {});
-        }
-
-        var method = object[property];
-        return sinon.wrapMethod(object, property, spy.create(method));
-    }
-
-    sinon.extend(spy, (function () {
-
-        function delegateToCalls(api, method, matchAny, actual, notCalled) {
-            api[method] = function () {
-                if (!this.called) {
-                    if (notCalled) {
-                        return notCalled.apply(this, arguments);
-                    }
-                    return false;
-                }
-
-                var currentCall;
-                var matches = 0;
-
-                for (var i = 0, l = this.callCount; i < l; i += 1) {
-                    currentCall = this.getCall(i);
-
-                    if (currentCall[actual || method].apply(currentCall, arguments)) {
-                        matches += 1;
-
-                        if (matchAny) {
-                            return true;
-                        }
-                    }
-                }
-
-                return matches === this.callCount;
-            };
-        }
-
-        function matchingFake(fakes, args, strict) {
-            if (!fakes) {
-                return;
-            }
-
-            var alen = args.length;
-
-            for (var i = 0, l = fakes.length; i < l; i++) {
-                if (fakes[i].matches(args, strict)) {
-                    return fakes[i];
-                }
-            }
-        }
-
-        function incrementCallCount() {
-            this.called = true;
-            this.callCount += 1;
-            this.notCalled = false;
-            this.calledOnce = this.callCount == 1;
-            this.calledTwice = this.callCount == 2;
-            this.calledThrice = this.callCount == 3;
-        }
-
-        function createCallProperties() {
-            this.firstCall = this.getCall(0);
-            this.secondCall = this.getCall(1);
-            this.thirdCall = this.getCall(2);
-            this.lastCall = this.getCall(this.callCount - 1);
-        }
-
-        var uuid = 0;
-
-        // Public API
-        var spyApi = {
-            reset: function () {
-                this.called = false;
-                this.notCalled = true;
-                this.calledOnce = false;
-                this.calledTwice = false;
-                this.calledThrice = false;
-                this.callCount = 0;
-                this.firstCall = null;
-                this.secondCall = null;
-                this.thirdCall = null;
-                this.lastCall = null;
-                this.args = [];
-                this.returnValues = [];
-                this.thisValues = [];
-                this.exceptions = [];
-                this.callIds = [];
-                if (this.fakes) {
-                    for (var i = 0; i < this.fakes.length; i++) {
-                        this.fakes[i].reset();
-                    }
-                }
-            },
-
-            create: function create(func) {
-                var name;
-
-                if (typeof func != "function") {
-                    func = function () {};
-                } else {
-                    name = sinon.functionName(func);
-                }
-
-                function proxy() {
-                    return proxy.invoke(func, this, slice.call(arguments));
-                }
-
-                sinon.extend(proxy, spy);
-                delete proxy.create;
-                sinon.extend(proxy, func);
-
-                proxy.reset();
-                proxy.prototype = func.prototype;
-                proxy.displayName = name || "spy";
-                proxy.toString = sinon.functionToString;
-                proxy._create = sinon.spy.create;
-                proxy.id = "spy#" + uuid++;
-
-                return proxy;
-            },
-
-            invoke: function invoke(func, thisValue, args) {
-                var matching = matchingFake(this.fakes, args);
-                var exception, returnValue;
-
-                incrementCallCount.call(this);
-                push.call(this.thisValues, thisValue);
-                push.call(this.args, args);
-                push.call(this.callIds, callId++);
-
-                try {
-                    if (matching) {
-                        returnValue = matching.invoke(func, thisValue, args);
-                    } else {
-                        returnValue = (this.func || func).apply(thisValue, args);
-                    }
-                } catch (e) {
-                    push.call(this.returnValues, undefined);
-                    exception = e;
-                    throw e;
-                } finally {
-                    push.call(this.exceptions, exception);
-                }
-
-                push.call(this.returnValues, returnValue);
-
-                createCallProperties.call(this);
-
-                return returnValue;
-            },
-
-            getCall: function getCall(i) {
-                if (i < 0 || i >= this.callCount) {
-                    return null;
-                }
-
-                return spyCall.create(this, this.thisValues[i], this.args[i],
-                                      this.returnValues[i], this.exceptions[i],
-                                      this.callIds[i]);
-            },
-
-            calledBefore: function calledBefore(spyFn) {
-                if (!this.called) {
-                    return false;
-                }
-
-                if (!spyFn.called) {
-                    return true;
-                }
-
-                return this.callIds[0] < spyFn.callIds[spyFn.callIds.length - 1];
-            },
-
-            calledAfter: function calledAfter(spyFn) {
-                if (!this.called || !spyFn.called) {
-                    return false;
-                }
-
-                return this.callIds[this.callCount - 1] > spyFn.callIds[spyFn.callCount - 1];
-            },
-
-            withArgs: function () {
-                var args = slice.call(arguments);
-
-                if (this.fakes) {
-                    var match = matchingFake(this.fakes, args, true);
-
-                    if (match) {
-                        return match;
-                    }
-                } else {
-                    this.fakes = [];
-                }
-
-                var original = this;
-                var fake = this._create();
-                fake.matchingAguments = args;
-                push.call(this.fakes, fake);
-
-                fake.withArgs = function () {
-                    return original.withArgs.apply(original, arguments);
-                };
-
-                for (var i = 0; i < this.args.length; i++) {
-                    if (fake.matches(this.args[i])) {
-                        incrementCallCount.call(fake);
-                        push.call(fake.thisValues, this.thisValues[i]);
-                        push.call(fake.args, this.args[i]);
-                        push.call(fake.returnValues, this.returnValues[i]);
-                        push.call(fake.exceptions, this.exceptions[i]);
-                        push.call(fake.callIds, this.callIds[i]);
-                    }
-                }
-                createCallProperties.call(fake);
-
-                return fake;
-            },
-
-            matches: function (args, strict) {
-                var margs = this.matchingAguments;
-
-                if (margs.length <= args.length &&
-                    sinon.deepEqual(margs, args.slice(0, margs.length))) {
-                    return !strict || margs.length == args.length;
-                }
-            },
-
-            printf: function (format) {
-                var spy = this;
-                var args = slice.call(arguments, 1);
-                var formatter;
-
-                return (format || "").replace(/%(.)/g, function (match, specifyer) {
-                    formatter = spyApi.formatters[specifyer];
-
-                    if (typeof formatter == "function") {
-                        return formatter.call(null, spy, args);
-                    } else if (!isNaN(parseInt(specifyer), 10)) {
-                        return sinon.format(args[specifyer - 1]);
-                    }
-
-                    return "%" + specifyer;
-                });
-            }
-        };
-
-        delegateToCalls(spyApi, "calledOn", true);
-        delegateToCalls(spyApi, "alwaysCalledOn", false, "calledOn");
-        delegateToCalls(spyApi, "calledWith", true);
-        delegateToCalls(spyApi, "calledWithMatch", true);
-        delegateToCalls(spyApi, "alwaysCalledWith", false, "calledWith");
-        delegateToCalls(spyApi, "alwaysCalledWithMatch", false, "calledWithMatch");
-        delegateToCalls(spyApi, "calledWithExactly", true);
-        delegateToCalls(spyApi, "alwaysCalledWithExactly", false, "calledWithExactly");
-        delegateToCalls(spyApi, "neverCalledWith", false, "notCalledWith",
-            function () { return true; });
-        delegateToCalls(spyApi, "neverCalledWithMatch", false, "notCalledWithMatch",
-            function () { return true; });
-        delegateToCalls(spyApi, "threw", true);
-        delegateToCalls(spyApi, "alwaysThrew", false, "threw");
-        delegateToCalls(spyApi, "returned", true);
-        delegateToCalls(spyApi, "alwaysReturned", false, "returned");
-        delegateToCalls(spyApi, "calledWithNew", true);
-        delegateToCalls(spyApi, "alwaysCalledWithNew", false, "calledWithNew");
-        delegateToCalls(spyApi, "callArg", false, "callArgWith", function () {
-            throw new Error(this.toString() + " cannot call arg since it was not yet invoked.");
-        });
-        spyApi.callArgWith = spyApi.callArg;
-        delegateToCalls(spyApi, "yield", false, "yield", function () {
-            throw new Error(this.toString() + " cannot yield since it was not yet invoked.");
-        });
-        // "invokeCallback" is an alias for "yield" since "yield" is invalid in strict mode.
-        spyApi.invokeCallback = spyApi.yield;
-        delegateToCalls(spyApi, "yieldTo", false, "yieldTo", function (property) {
-            throw new Error(this.toString() + " cannot yield to '" + property +
-                "' since it was not yet invoked.");
-        });
-
-        spyApi.formatters = {
-            "c": function (spy) {
-                return sinon.timesInWords(spy.callCount);
-            },
-
-            "n": function (spy) {
-                return spy.toString();
-            },
-
-            "C": function (spy) {
-                var calls = [];
-
-                for (var i = 0, l = spy.callCount; i < l; ++i) {
-                    push.call(calls, "    " + spy.getCall(i).toString());
-                }
-
-                return calls.length > 0 ? "\n" + calls.join("\n") : "";
-            },
-
-            "t": function (spy) {
-                var objects = [];
-
-                for (var i = 0, l = spy.callCount; i < l; ++i) {
-                    push.call(objects, sinon.format(spy.thisValues[i]));
-                }
-
-                return objects.join(", ");
-            },
-
-            "*": function (spy, args) {
-                var formatted = [];
-
-                for (var i = 0, l = args.length; i < l; ++i) {
-                    push.call(formatted, sinon.format(args[i]));
-                }
-
-                return formatted.join(", ");
-            }
-        };
-
-        return spyApi;
-    }()));
-
-    spyCall = (function () {
-
-        function throwYieldError(proxy, text, args) {
-            var msg = sinon.functionName(proxy) + text;
-            if (args.length) {
-                msg += " Received [" + slice.call(args).join(", ") + "]";
-            }
-            throw new Error(msg);
-        }
-
-        return {
-            create: function create(spy, thisValue, args, returnValue, exception, id) {
-                var proxyCall = sinon.create(spyCall);
-                delete proxyCall.create;
-                proxyCall.proxy = spy;
-                proxyCall.thisValue = thisValue;
-                proxyCall.args = args;
-                proxyCall.returnValue = returnValue;
-                proxyCall.exception = exception;
-                proxyCall.callId = typeof id == "number" && id || callId++;
-
-                return proxyCall;
-            },
-
-            calledOn: function calledOn(thisValue) {
-                return this.thisValue === thisValue;
-            },
-
-            calledWith: function calledWith() {
-                for (var i = 0, l = arguments.length; i < l; i += 1) {
-                    if (!sinon.deepEqual(arguments[i], this.args[i])) {
-                        return false;
-                    }
-                }
-
-                return true;
-            },
-
-            calledWithMatch: function calledWithMatch() {
-              for (var i = 0, l = arguments.length; i < l; i += 1) {
-                  var actual = this.args[i];
-                  var expectation = arguments[i];
-                  if (!sinon.match || !sinon.match(expectation).test(actual)) {
-                      return false;
-                  }
-              }
-              return true;
-            },
-
-            calledWithExactly: function calledWithExactly() {
-                return arguments.length == this.args.length &&
-                    this.calledWith.apply(this, arguments);
-            },
-
-            notCalledWith: function notCalledWith() {
-                return !this.calledWith.apply(this, arguments);
-            },
-
-            notCalledWithMatch: function notCalledWithMatch() {
-              return !this.calledWithMatch.apply(this, arguments);
-            },
-
-            returned: function returned(value) {
-                return sinon.deepEqual(value, this.returnValue);
-            },
-
-            threw: function threw(error) {
-                if (typeof error == "undefined" || !this.exception) {
-                    return !!this.exception;
-                }
-
-                if (typeof error == "string") {
-                    return this.exception.name == error;
-                }
-
-                return this.exception === error;
-            },
-
-            calledWithNew: function calledWithNew(thisValue) {
-                return this.thisValue instanceof this.proxy;
-            },
-
-            calledBefore: function (other) {
-                return this.callId < other.callId;
-            },
-
-            calledAfter: function (other) {
-                return this.callId > other.callId;
-            },
-
-            callArg: function (pos) {
-                this.args[pos]();
-            },
-
-            callArgWith: function (pos) {
-                var args = slice.call(arguments, 1);
-                this.args[pos].apply(null, args);
-            },
-
-            "yield": function () {
-                var args = this.args;
-                for (var i = 0, l = args.length; i < l; ++i) {
-                    if (typeof args[i] === "function") {
-                        args[i].apply(null, slice.call(arguments));
-                        return;
-                    }
-                }
-                throwYieldError(this.proxy, " cannot yield since no callback was passed.", args);
-            },
-
-            yieldTo: function (prop) {
-                var args = this.args;
-                for (var i = 0, l = args.length; i < l; ++i) {
-                    if (args[i] && typeof args[i][prop] === "function") {
-                        args[i][prop].apply(null, slice.call(arguments, 1));
-                        return;
-                    }
-                }
-                throwYieldError(this.proxy, " cannot yield to '" + prop +
-                    "' since no callback was passed.", args);
-            },
-
-            toString: function () {
-                var callStr = this.proxy.toString() + "(";
-                var args = [];
-
-                for (var i = 0, l = this.args.length; i < l; ++i) {
-                    push.call(args, sinon.format(this.args[i]));
-                }
-
-                callStr = callStr + args.join(", ") + ")";
-
-                if (typeof this.returnValue != "undefined") {
-                    callStr += " => " + sinon.format(this.returnValue);
-                }
-
-                if (this.exception) {
-                    callStr += " !" + this.exception.name;
-
-                    if (this.exception.message) {
-                        callStr += "(" + this.exception.message + ")";
-                    }
-                }
-
-                return callStr;
-            }
-        };
-    }());
-
-    spy.spyCall = spyCall;
-
-    // This steps outside the module sandbox and will be removed
-    sinon.spyCall = spyCall;
-
-    if (commonJSModule) {
-        module.exports = spy;
-    } else {
-        sinon.spy = spy;
-    }
-}(typeof sinon == "object" && sinon || null));
-
-/**
- * @depend ../sinon.js
- * @depend spy.js
- */
-/*jslint eqeqeq: false, onevar: false*/
-/*global module, require, sinon*/
-/**
- * Stub functions
- *
- * @author Christian Johansen (christian@cjohansen.no)
- * @license BSD
- *
- * Copyright (c) 2010-2011 Christian Johansen
- */
-
-(function (sinon) {
-    var commonJSModule = typeof module == "object" && typeof require == "function";
-
-    if (!sinon && commonJSModule) {
-        sinon = require("../sinon");
-    }
-
-    if (!sinon) {
-        return;
-    }
-
-    function stub(object, property, func) {
-        if (!!func && typeof func != "function") {
-            throw new TypeError("Custom stub should be function");
-        }
-
-        var wrapper;
-
-        if (func) {
-            wrapper = sinon.spy && sinon.spy.create ? sinon.spy.create(func) : func;
-        } else {
-            wrapper = stub.create();
-        }
-
-        if (!object && !property) {
-            return sinon.stub.create();
-        }
-
-        if (!property && !!object && typeof object == "object") {
-            for (var prop in object) {
-                if (typeof object[prop] === "function") {
-                    stub(object, prop);
-                }
-            }
-
-            return object;
-        }
-
-        return sinon.wrapMethod(object, property, wrapper);
-    }
-
-    function getCallback(stub, args) {
-        if (stub.callArgAt < 0) {
-            for (var i = 0, l = args.length; i < l; ++i) {
-                if (!stub.callArgProp && typeof args[i] == "function") {
-                    return args[i];
-                }
-
-                if (stub.callArgProp && args[i] &&
-                    typeof args[i][stub.callArgProp] == "function") {
-                    return args[i][stub.callArgProp];
-                }
-            }
-
-            return null;
-        }
-
-        return args[stub.callArgAt];
-    }
-
-    var join = Array.prototype.join;
-
-    function getCallbackError(stub, func, args) {
-        if (stub.callArgAt < 0) {
-            var msg;
-
-            if (stub.callArgProp) {
-                msg = sinon.functionName(stub) +
-                    " expected to yield to '" + stub.callArgProp +
-                    "', but no object with such a property was passed."
-            } else {
-                msg = sinon.functionName(stub) +
-                            " expected to yield, but no callback was passed."
-            }
-
-            if (args.length > 0) {
-                msg += " Received [" + join.call(args, ", ") + "]";
-            }
-
-            return msg;
-        }
-
-        return "argument at index " + stub.callArgAt + " is not a function: " + func;
-    }
-
-    var nextTick = (function () {
-        if (typeof process === "object" && typeof process.nextTick === "function") {
-            return process.nextTick;
-        } else if (typeof msSetImmediate === "function") {
-            return msSetImmediate.bind(window);
-        } else if (typeof setImmediate === "function") {
-            return setImmediate;
-        } else {
-            return function (callback) {
-                setTimeout(callback, 0);
-            };
-        }
-    })();
-
-    function callCallback(stub, args) {
-        if (typeof stub.callArgAt == "number") {
-            var func = getCallback(stub, args);
-
-            if (typeof func != "function") {
-                throw new TypeError(getCallbackError(stub, func, args));
-            }
-
-            if (stub.callbackAsync) {
-                nextTick(function() {
-                    func.apply(stub.callbackContext, stub.callbackArguments);
-                });
-            } else {
-                func.apply(stub.callbackContext, stub.callbackArguments);
-            }
-        }
-    }
-
-    var uuid = 0;
-
-    sinon.extend(stub, (function () {
-        var slice = Array.prototype.slice, proto;
-
-        function throwsException(error, message) {
-            if (typeof error == "string") {
-                this.exception = new Error(message || "");
-                this.exception.name = error;
-            } else if (!error) {
-                this.exception = new Error("Error");
-            } else {
-                this.exception = error;
-            }
-
-            return this;
-        }
-
-        proto = {
-            create: function create() {
-                var functionStub = function () {
-
-                    callCallback(functionStub, arguments);
-
-                    if (functionStub.exception) {
-                        throw functionStub.exception;
-                    } else if (typeof functionStub.returnArgAt == 'number') {
-                        return arguments[functionStub.returnArgAt];
-                    } else if (functionStub.returnThis) {
-                        return this;
-                    }
-                    return functionStub.returnValue;
-                };
-
-                functionStub.id = "stub#" + uuid++;
-                var orig = functionStub;
-                functionStub = sinon.spy.create(functionStub);
-                functionStub.func = orig;
-
-                sinon.extend(functionStub, stub);
-                functionStub._create = sinon.stub.create;
-                functionStub.displayName = "stub";
-                functionStub.toString = sinon.functionToString;
-
-                return functionStub;
-            },
-
-            returns: function returns(value) {
-                this.returnValue = value;
-
-                return this;
-            },
-
-            returnsArg: function returnsArg(pos) {
-                if (typeof pos != "number") {
-                    throw new TypeError("argument index is not number");
-                }
-
-                this.returnArgAt = pos;
-
-                return this;
-            },
-
-            returnsThis: function returnsThis() {
-                this.returnThis = true;
-
-                return this;
-            },
-
-            "throws": throwsException,
-            throwsException: throwsException,
-
-            callsArg: function callsArg(pos) {
-                if (typeof pos != "number") {
-                    throw new TypeError("argument index is not number");
-                }
-
-                this.callArgAt = pos;
-                this.callbackArguments = [];
-
-                return this;
-            },
-
-            callsArgOn: function callsArgOn(pos, context) {
-                if (typeof pos != "number") {
-                    throw new TypeError("argument index is not number");
-                }
-                if (typeof context != "object") {
-                    throw new TypeError("argument context is not an object");
-                }
-
-                this.callArgAt = pos;
-                this.callbackArguments = [];
-                this.callbackContext = context;
-
-                return this;
-            },
-
-            callsArgWith: function callsArgWith(pos) {
-                if (typeof pos != "number") {
-                    throw new TypeError("argument index is not number");
-                }
-
-                this.callArgAt = pos;
-                this.callbackArguments = slice.call(arguments, 1);
-
-                return this;
-            },
-
-            callsArgOnWith: function callsArgWith(pos, context) {
-                if (typeof pos != "number") {
-                    throw new TypeError("argument index is not number");
-                }
-                if (typeof context != "object") {
-                    throw new TypeError("argument context is not an object");
-                }
-
-                this.callArgAt = pos;
-                this.callbackArguments = slice.call(arguments, 2);
-                this.callbackContext = context;
-
-                return this;
-            },
-
-            yields: function () {
-                this.callArgAt = -1;
-                this.callbackArguments = slice.call(arguments, 0);
-
-                return this;
-            },
-
-            yieldsOn: function (context) {
-                if (typeof context != "object") {
-                    throw new TypeError("argument context is not an object");
-                }
-
-                this.callArgAt = -1;
-                this.callbackArguments = slice.call(arguments, 1);
-                this.callbackContext = context;
-
-                return this;
-            },
-
-            yieldsTo: function (prop) {
-                this.callArgAt = -1;
-                this.callArgProp = prop;
-                this.callbackArguments = slice.call(arguments, 1);
-
-                return this;
-            },
-
-            yieldsToOn: function (prop, context) {
-                if (typeof context != "object") {
-                    throw new TypeError("argument context is not an object");
-                }
-
-                this.callArgAt = -1;
-                this.callArgProp = prop;
-                this.callbackArguments = slice.call(arguments, 2);
-                this.callbackContext = context;
-
-                return this;
-            }
-        };
-        
-        // create asynchronous versions of callsArg* and yields* methods
-        for (var method in proto) {
-            if (proto.hasOwnProperty(method) && method.match(/^(callsArg|yields)/)) {
-                proto[method + 'Async'] = (function (syncFnName) {
-                    return function () {
-                        this.callbackAsync = true;
-                        return this[syncFnName].apply(this, arguments);
-                    };
-                })(method);
-            }
-        }
-        
-        return proto;
-        
-    }()));
-
-    if (commonJSModule) {
-        module.exports = stub;
-    } else {
-        sinon.stub = stub;
-    }
-}(typeof sinon == "object" && sinon || null));
-
-/**
- * @depend ../sinon.js
- * @depend stub.js
- */
-/*jslint eqeqeq: false, onevar: false, nomen: false*/
-/*global module, require, sinon*/
-/**
- * Mock functions.
- *
- * @author Christian Johansen (christian@cjohansen.no)
- * @license BSD
- *
- * Copyright (c) 2010-2011 Christian Johansen
- */
-
-(function (sinon) {
-    var commonJSModule = typeof module == "object" && typeof require == "function";
-    var push = [].push;
-
-    if (!sinon && commonJSModule) {
-        sinon = require("../sinon");
-    }
-
-    if (!sinon) {
-        return;
-    }
-
-    function mock(object) {
-        if (!object) {
-            return sinon.expectation.create("Anonymous mock");
-        }
-
-        return mock.create(object);
-    }
-
-    sinon.mock = mock;
-
-    sinon.extend(mock, (function () {
-        function each(collection, callback) {
-            if (!collection) {
-                return;
-            }
-
-            for (var i = 0, l = collection.length; i < l; i += 1) {
-                callback(collection[i]);
-            }
-        }
-
-        return {
-            create: function create(object) {
-                if (!object) {
-                    throw new TypeError("object is null");
-                }
-
-                var mockObject = sinon.extend({}, mock);
-                mockObject.object = object;
-                delete mockObject.create;
-
-                return mockObject;
-            },
-
-            expects: function expects(method) {
-                if (!method) {
-                    throw new TypeError("method is falsy");
-                }
-
-                if (!this.expectations) {
-                    this.expectations = {};
-                    this.proxies = [];
-                }
-
-                if (!this.expectations[method]) {
-                    this.expectations[method] = [];
-                    var mockObject = this;
-
-                    sinon.wrapMethod(this.object, method, function () {
-                        return mockObject.invokeMethod(method, this, arguments);
-                    });
-
-                    push.call(this.proxies, method);
-                }
-
-                var expectation = sinon.expectation.create(method);
-                push.call(this.expectations[method], expectation);
-
-                return expectation;
-            },
-
-            restore: function restore() {
-                var object = this.object;
-
-                each(this.proxies, function (proxy) {
-                    if (typeof object[proxy].restore == "function") {
-                        object[proxy].restore();
-                    }
-                });
-            },
-
-            verify: function verify() {
-                var expectations = this.expectations || {};
-                var messages = [], met = [];
-
-                each(this.proxies, function (proxy) {
-                    each(expectations[proxy], function (expectation) {
-                        if (!expectation.met()) {
-                            push.call(messages, expectation.toString());
-                        } else {
-                            push.call(met, expectation.toString());
-                        }
-                    });
-                });
-
-                this.restore();
-
-                if (messages.length > 0) {
-                    sinon.expectation.fail(messages.concat(met).join("\n"));
-                } else {
-                    sinon.expectation.pass(messages.concat(met).join("\n"));
-                }
-
-                return true;
-            },
-
-            invokeMethod: function invokeMethod(method, thisValue, args) {
-                var expectations = this.expectations && this.expectations[method];
-                var length = expectations && expectations.length || 0, i;
-
-                for (i = 0; i < length; i += 1) {
-                    if (!expectations[i].met() &&
-                        expectations[i].allowsCall(thisValue, args)) {
-                        return expectations[i].apply(thisValue, args);
-                    }
-                }
-
-                var messages = [], available, exhausted = 0;
-
-                for (i = 0; i < length; i += 1) {
-                    if (expectations[i].allowsCall(thisValue, args)) {
-                        available = available || expectations[i];
-                    } else {
-                        exhausted += 1;
-                    }
-                    push.call(messages, "    " + expectations[i].toString());
-                }
-
-                if (exhausted === 0) {
-                    return available.apply(thisValue, args);
-                }
-
-                messages.unshift("Unexpected call: " + sinon.spyCall.toString.call({
-                    proxy: method,
-                    args: args
-                }));
-
-                sinon.expectation.fail(messages.join("\n"));
-            }
-        };
-    }()));
-
-    var times = sinon.timesInWords;
-
-    sinon.expectation = (function () {
-        var slice = Array.prototype.slice;
-        var _invoke = sinon.spy.invoke;
-
-        function callCountInWords(callCount) {
-            if (callCount == 0) {
-                return "never called";
-            } else {
-                return "called " + times(callCount);
-            }
-        }
-
-        function expectedCallCountInWords(expectation) {
-            var min = expectation.minCalls;
-            var max = expectation.maxCalls;
-
-            if (typeof min == "number" && typeof max == "number") {
-                var str = times(min);
-
-                if (min != max) {
-                    str = "at least " + str + " and at most " + times(max);
-                }
-
-                return str;
-            }
-
-            if (typeof min == "number") {
-                return "at least " + times(min);
-            }
-
-            return "at most " + times(max);
-        }
-
-        function receivedMinCalls(expectation) {
-            var hasMinLimit = typeof expectation.minCalls == "number";
-            return !hasMinLimit || expectation.callCount >= expectation.minCalls;
-        }
-
-        function receivedMaxCalls(expectation) {
-            if (typeof expectation.maxCalls != "number") {
-                return false;
-            }
-
-            return expectation.callCount == expectation.maxCalls;
-        }
-
-        return {
-            minCalls: 1,
-            maxCalls: 1,
-
-            create: function create(methodName) {
-                var expectation = sinon.extend(sinon.stub.create(), sinon.expectation);
-                delete expectation.create;
-                expectation.method = methodName;
-
-                return expectation;
-            },
-
-            invoke: function invoke(func, thisValue, args) {
-                this.verifyCallAllowed(thisValue, args);
-
-                return _invoke.apply(this, arguments);
-            },
-
-            atLeast: function atLeast(num) {
-                if (typeof num != "number") {
-                    throw new TypeError("'" + num + "' is not number");
-                }
-
-                if (!this.limitsSet) {
-                    this.maxCalls = null;
-                    this.limitsSet = true;
-                }
-
-                this.minCalls = num;
-
-                return this;
-            },
-
-            atMost: function atMost(num) {
-                if (typeof num != "number") {
-                    throw new TypeError("'" + num + "' is not number");
-                }
-
-                if (!this.limitsSet) {
-                    this.minCalls = null;
-                    this.limitsSet = true;
-                }
-
-                this.maxCalls = num;
-
-                return this;
-            },
-
-            never: function never() {
-                return this.exactly(0);
-            },
-
-            once: function once() {
-                return this.exactly(1);
-            },
-
-            twice: function twice() {
-                return this.exactly(2);
-            },
-
-            thrice: function thrice() {
-                return this.exactly(3);
-            },
-
-            exactly: function exactly(num) {
-                if (typeof num != "number") {
-                    throw new TypeError("'" + num + "' is not a number");
-                }
-
-                this.atLeast(num);
-                return this.atMost(num);
-            },
-
-            met: function met() {
-                return !this.failed && receivedMinCalls(this);
-            },
-
-            verifyCallAllowed: function verifyCallAllowed(thisValue, args) {
-                if (receivedMaxCalls(this)) {
-                    this.failed = true;
-                    sinon.expectation.fail(this.method + " already called " + times(this.maxCalls));
-                }
-
-                if ("expectedThis" in this && this.expectedThis !== thisValue) {
-                    sinon.expectation.fail(this.method + " called with " + thisValue + " as thisValue, expected " +
-                        this.expectedThis);
-                }
-
-                if (!("expectedArguments" in this)) {
-                    return;
-                }
-
-                if (!args) {
-                    sinon.expectation.fail(this.method + " received no arguments, expected " +
-                        this.expectedArguments.join());
-                }
-
-                if (args.length < this.expectedArguments.length) {
-                    sinon.expectation.fail(this.method + " received too few arguments (" + args.join() +
-                        "), expected " + this.expectedArguments.join());
-                }
-
-                if (this.expectsExactArgCount &&
-                    args.length != this.expectedArguments.length) {
-                    sinon.expectation.fail(this.method + " received too many arguments (" + args.join() +
-                        "), expected " + this.expectedArguments.join());
-                }
-
-                for (var i = 0, l = this.expectedArguments.length; i < l; i += 1) {
-                    if (!sinon.deepEqual(this.expectedArguments[i], args[i])) {
-                        sinon.expectation.fail(this.method + " received wrong arguments (" + args.join() +
-                            "), expected " + this.expectedArguments.join());
-                    }
-                }
-            },
-
-            allowsCall: function allowsCall(thisValue, args) {
-                if (this.met() && receivedMaxCalls(this)) {
-                    return false;
-                }
-
-                if ("expectedThis" in this && this.expectedThis !== thisValue) {
-                    return false;
-                }
-
-                if (!("expectedArguments" in this)) {
-                    return true;
-                }
-
-                args = args || [];
-
-                if (args.length < this.expectedArguments.length) {
-                    return false;
-                }
-
-                if (this.expectsExactArgCount &&
-                    args.length != this.expectedArguments.length) {
-                    return false;
-                }
-
-                for (var i = 0, l = this.expectedArguments.length; i < l; i += 1) {
-                    if (!sinon.deepEqual(this.expectedArguments[i], args[i])) {
-                        return false;
-                    }
-                }
-
-                return true;
-            },
-
-            withArgs: function withArgs() {
-                this.expectedArguments = slice.call(arguments);
-                return this;
-            },
-
-            withExactArgs: function withExactArgs() {
-                this.withArgs.apply(this, arguments);
-                this.expectsExactArgCount = true;
-                return this;
-            },
-
-            on: function on(thisValue) {
-                this.expectedThis = thisValue;
-                return this;
-            },
-
-            toString: function () {
-                var args = (this.expectedArguments || []).slice();
-
-                if (!this.expectsExactArgCount) {
-                    push.call(args, "[...]");
-                }
-
-                var callStr = sinon.spyCall.toString.call({
-                    proxy: this.method, args: args
-                });
-
-                var message = callStr.replace(", [...", "[, ...") + " " +
-                    expectedCallCountInWords(this);
-
-                if (this.met()) {
-                    return "Expectation met: " + message;
-                }
-
-                return "Expected " + message + " (" +
-                    callCountInWords(this.callCount) + ")";
-            },
-
-            verify: function verify() {
-                if (!this.met()) {
-                    sinon.expectation.fail(this.toString());
-                } else {
-                    sinon.expectation.pass(this.toString());
-                }
-
-                return true;
-            },
-
-            pass: function(message) {
-              sinon.assert.pass(message);
-            },
-            fail: function (message) {
-                var exception = new Error(message);
-                exception.name = "ExpectationError";
-
-                throw exception;
-            }
-        };
-    }());
-
-    if (commonJSModule) {
-        module.exports = mock;
-    } else {
-        sinon.mock = mock;
-    }
-}(typeof sinon == "object" && sinon || null));
-
-/**
- * @depend ../sinon.js
- * @depend stub.js
- * @depend mock.js
- */
-/*jslint eqeqeq: false, onevar: false, forin: true*/
-/*global module, require, sinon*/
-/**
- * Collections of stubs, spies and mocks.
- *
- * @author Christian Johansen (christian@cjohansen.no)
- * @license BSD
- *
- * Copyright (c) 2010-2011 Christian Johansen
- */
-
-(function (sinon) {
-    var commonJSModule = typeof module == "object" && typeof require == "function";
-    var push = [].push;
-
-    if (!sinon && commonJSModule) {
-        sinon = require("../sinon");
-    }
-
-    if (!sinon) {
-        return;
-    }
-
-    function getFakes(fakeCollection) {
-        if (!fakeCollection.fakes) {
-            fakeCollection.fakes = [];
-        }
-
-        return fakeCollection.fakes;
-    }
-
-    function each(fakeCollection, method) {
-        var fakes = getFakes(fakeCollection);
-
-        for (var i = 0, l = fakes.length; i < l; i += 1) {
-            if (typeof fakes[i][method] == "function") {
-                fakes[i][method]();
-            }
-        }
-    }
-
-    function compact(fakeCollection) {
-        var fakes = getFakes(fakeCollection);
-        var i = 0;
-        while (i < fakes.length) {
-          fakes.splice(i, 1);
-        }
-    }
-
-    var collection = {
-        verify: function resolve() {
-            each(this, "verify");
-        },
-
-        restore: function restore() {
-            each(this, "restore");
-            compact(this);
-        },
-
-        verifyAndRestore: function verifyAndRestore() {
-            var exception;
-
-            try {
-                this.verify();
-            } catch (e) {
-                exception = e;
-            }
-
-            this.restore();
-
-            if (exception) {
-                throw exception;
-            }
-        },
-
-        add: function add(fake) {
-            push.call(getFakes(this), fake);
-            return fake;
-        },
-
-        spy: function spy() {
-            return this.add(sinon.spy.apply(sinon, arguments));
-        },
-
-        stub: function stub(object, property, value) {
-            if (property) {
-                var original = object[property];
-
-                if (typeof original != "function") {
-                    if (!object.hasOwnProperty(property)) {
-                        throw new TypeError("Cannot stub non-existent own property " + property);
-                    }
-
-                    object[property] = value;
-
-                    return this.add({
-                        restore: function () {
-                            object[property] = original;
-                        }
-                    });
-                }
-            }
-            if (!property && !!object && typeof object == "object") {
-                var stubbedObj = sinon.stub.apply(sinon, arguments);
-
-                for (var prop in stubbedObj) {
-                    if (typeof stubbedObj[prop] === "function") {
-                        this.add(stubbedObj[prop]);
-                    }
-                }
-
-                return stubbedObj;
-            }
-
-            return this.add(sinon.stub.apply(sinon, arguments));
-        },
-
-        mock: function mock() {
-            return this.add(sinon.mock.apply(sinon, arguments));
-        },
-
-        inject: function inject(obj) {
-            var col = this;
-
-            obj.spy = function () {
-                return col.spy.apply(col, arguments);
-            };
-
-            obj.stub = function () {
-                return col.stub.apply(col, arguments);
-            };
-
-            obj.mock = function () {
-                return col.mock.apply(col, arguments);
-            };
-
-            return obj;
-        }
-    };
-
-    if (commonJSModule) {
-        module.exports = collection;
-    } else {
-        sinon.collection = collection;
-    }
-}(typeof sinon == "object" && sinon || null));
-
-/*jslint eqeqeq: false, plusplus: false, evil: true, onevar: false, browser: true, forin: false*/
-/*global module, require, window*/
-/**
- * Fake timer API
- * setTimeout
- * setInterval
- * clearTimeout
- * clearInterval
- * tick
- * reset
- * Date
- *
- * Inspired by jsUnitMockTimeOut from JsUnit
- *
- * @author Christian Johansen (christian@cjohansen.no)
- * @license BSD
- *
- * Copyright (c) 2010-2011 Christian Johansen
- */
-
-if (typeof sinon == "undefined") {
-    var sinon = {};
-}
-
-(function (global) {
-    var id = 1;
-
-    function addTimer(args, recurring) {
-        if (args.length === 0) {
-            throw new Error("Function requires at least 1 parameter");
-        }
-
-        var toId = id++;
-        var delay = args[1] || 0;
-
-        if (!this.timeouts) {
-            this.timeouts = {};
-        }
-
-        this.timeouts[toId] = {
-            id: toId,
-            func: args[0],
-            callAt: this.now + delay,
-            invokeArgs: Array.prototype.slice.call(args, 2)
-        };
-
-        if (recurring === true) {
-            this.timeouts[toId].interval = delay;
-        }
-
-        return toId;
-    }
-
-    function parseTime(str) {
-        if (!str) {
-            return 0;
-        }
-
-        var strings = str.split(":");
-        var l = strings.length, i = l;
-        var ms = 0, parsed;
-
-        if (l > 3 || !/^(\d\d:){0,2}\d\d?$/.test(str)) {
-            throw new Error("tick only understands numbers and 'h:m:s'");
-        }
-
-        while (i--) {
-            parsed = parseInt(strings[i], 10);
-
-            if (parsed >= 60) {
-                throw new Error("Invalid time " + str);
-            }
-
-            ms += parsed * Math.pow(60, (l - i - 1));
-        }
-
-        return ms * 1000;
-    }
-
-    function createObject(object) {
-        var newObject;
-
-        if (Object.create) {
-            newObject = Object.create(object);
-        } else {
-            var F = function () {};
-            F.prototype = object;
-            newObject = new F();
-        }
-
-        newObject.Date.clock = newObject;
-        return newObject;
-    }
-
-    sinon.clock = {
-        now: 0,
-
-        create: function create(now) {
-            var clock = createObject(this);
-
-            if (typeof now == "number") {
-                clock.now = now;
-            }
-
-            if (!!now && typeof now == "object") {
-                throw new TypeError("now should be milliseconds since UNIX epoch");
-            }
-
-            return clock;
-        },
-
-        setTimeout: function setTimeout(callback, timeout) {
-            return addTimer.call(this, arguments, false);
-        },
-
-        clearTimeout: function clearTimeout(timerId) {
-            if (!this.timeouts) {
-                this.timeouts = [];
-            }
-
-            if (timerId in this.timeouts) {
-                delete this.timeouts[timerId];
-            }
-        },
-
-        setInterval: function setInterval(callback, timeout) {
-            return addTimer.call(this, arguments, true);
-        },
-
-        clearInterval: function clearInterval(timerId) {
-            this.clearTimeout(timerId);
-        },
-
-        tick: function tick(ms) {
-            ms = typeof ms == "number" ? ms : parseTime(ms);
-            var tickFrom = this.now, tickTo = this.now + ms, previous = this.now;
-            var timer = this.firstTimerInRange(tickFrom, tickTo);
-
-            var firstException;
-            while (timer && tickFrom <= tickTo) {
-                if (this.timeouts[timer.id]) {
-                    tickFrom = this.now = timer.callAt;
-                    try {
-                      this.callTimer(timer);
-                    } catch (e) {
-                      firstException = firstException || e;
-                    }
-                }
-
-                timer = this.firstTimerInRange(previous, tickTo);
-                previous = tickFrom;
-            }
-
-            this.now = tickTo;
-
-            if (firstException) {
-              throw firstException;
-            }
-        },
-
-        firstTimerInRange: function (from, to) {
-            var timer, smallest, originalTimer;
-
-            for (var id in this.timeouts) {
-                if (this.timeouts.hasOwnProperty(id)) {
-                    if (this.timeouts[id].callAt < from || this.timeouts[id].callAt > to) {
-                        continue;
-                    }
-
-                    if (!smallest || this.timeouts[id].callAt < smallest) {
-                        originalTimer = this.timeouts[id];
-                        smallest = this.timeouts[id].callAt;
-
-                        timer = {
-                            func: this.timeouts[id].func,
-                            callAt: this.timeouts[id].callAt,
-                            interval: this.timeouts[id].interval,
-                            id: this.timeouts[id].id,
-                            invokeArgs: this.timeouts[id].invokeArgs
-                        };
-                    }
-                }
-            }
-
-            return timer || null;
-        },
-
-        callTimer: function (timer) {
-            if (typeof timer.interval == "number") {
-                this.timeouts[timer.id].callAt += timer.interval;
-            } else {
-                delete this.timeouts[timer.id];
-            }
-
-            try {
-                if (typeof timer.func == "function") {
-                    timer.func.apply(null, timer.invokeArgs);
-                } else {
-                    eval(timer.func);
-                }
-            } catch (e) {
-              var exception = e;
-            }
-
-            if (!this.timeouts[timer.id]) {
-                if (exception) {
-                  throw exception;
-                }
-                return;
-            }
-
-            if (exception) {
-              throw exception;
-            }
-        },
-
-        reset: function reset() {
-            this.timeouts = {};
-        },
-
-        Date: (function () {
-            var NativeDate = Date;
-
-            function ClockDate(year, month, date, hour, minute, second, ms) {
-                // Defensive and verbose to avoid potential harm in passing
-                // explicit undefined when user does not pass argument
-                switch (arguments.length) {
-                case 0:
-                    return new NativeDate(ClockDate.clock.now);
-                case 1:
-                    return new NativeDate(year);
-                case 2:
-                    return new NativeDate(year, month);
-                case 3:
-                    return new NativeDate(year, month, date);
-                case 4:
-                    return new NativeDate(year, month, date, hour);
-                case 5:
-                    return new NativeDate(year, month, date, hour, minute);
-                case 6:
-                    return new NativeDate(year, month, date, hour, minute, second);
-                default:
-                    return new NativeDate(year, month, date, hour, minute, second, ms);
-                }
-            }
-
-            return mirrorDateProperties(ClockDate, NativeDate);
-        }())
-    };
-
-    function mirrorDateProperties(target, source) {
-        if (source.now) {
-            target.now = function now() {
-                return target.clock.now;
-            };
-        } else {
-            delete target.now;
-        }
-
-        if (source.toSource) {
-            target.toSource = function toSource() {
-                return source.toSource();
-            };
-        } else {
-            delete target.toSource;
-        }
-
-        target.toString = function toString() {
-            return source.toString();
-        };
-
-        target.prototype = source.prototype;
-        target.parse = source.parse;
-        target.UTC = source.UTC;
-        target.prototype.toUTCString = source.prototype.toUTCString;
-        return target;
-    }
-
-    var methods = ["Date", "setTimeout", "setInterval",
-                   "clearTimeout", "clearInterval"];
-
-    function restore() {
-        var method;
-
-        for (var i = 0, l = this.methods.length; i < l; i++) {
-            method = this.methods[i];
-            if (global[method].hadOwnProperty) {
-                global[method] = this["_" + method];
-            } else {
-                delete global[method];
-            }
-        }
-
-        // Prevent multiple executions which will completely remove these props
-        this.methods = [];
-    }
-
-    function stubGlobal(method, clock) {
-        clock[method].hadOwnProperty = Object.prototype.hasOwnProperty.call(global, method);
-        clock["_" + method] = global[method];
-
-        if (method == "Date") {
-            var date = mirrorDateProperties(clock[method], global[method]);
-            global[method] = date;
-        } else {
-            global[method] = function () {
-                return clock[method].apply(clock, arguments);
-            };
-
-            for (var prop in clock[method]) {
-                if (clock[method].hasOwnProperty(prop)) {
-                    global[method][prop] = clock[method][prop];
-                }
-            }
-        }
-
-        global[method].clock = clock;
-    }
-
-    sinon.useFakeTimers = function useFakeTimers(now) {
-        var clock = sinon.clock.create(now);
-        clock.restore = restore;
-        clock.methods = Array.prototype.slice.call(arguments,
-                                                   typeof now == "number" ? 1 : 0);
-
-        if (clock.methods.length === 0) {
-            clock.methods = methods;
-        }
-
-        for (var i = 0, l = clock.methods.length; i < l; i++) {
-            stubGlobal(clock.methods[i], clock);
-        }
-
-        return clock;
-    };
-}(typeof global != "undefined" && typeof global !== "function" ? global : this));
-
-sinon.timers = {
-    setTimeout: setTimeout,
-    clearTimeout: clearTimeout,
-    setInterval: setInterval,
-    clearInterval: clearInterval,
-    Date: Date
-};
-
-if (typeof module == "object" && typeof require == "function") {
-    module.exports = sinon;
-}
-
-/*jslint eqeqeq: false, onevar: false*/
-/*global sinon, module, require, ActiveXObject, XMLHttpRequest, DOMParser*/
-/**
- * Minimal Event interface implementation
- *
- * Original implementation by Sven Fuchs: https://gist.github.com/995028
- * Modifications and tests by Christian Johansen.
- *
- * @author Sven Fuchs (svenfuchs@artweb-design.de)
- * @author Christian Johansen (christian@cjohansen.no)
- * @license BSD
- *
- * Copyright (c) 2011 Sven Fuchs, Christian Johansen
- */
-
-if (typeof sinon == "undefined") {
-    this.sinon = {};
-}
-
-(function () {
-    var push = [].push;
-
-    sinon.Event = function Event(type, bubbles, cancelable) {
-        this.initEvent(type, bubbles, cancelable);
-    };
-
-    sinon.Event.prototype = {
-        initEvent: function(type, bubbles, cancelable) {
-            this.type = type;
-            this.bubbles = bubbles;
-            this.cancelable = cancelable;
-        },
-
-        stopPropagation: function () {},
-
-        preventDefault: function () {
-            this.defaultPrevented = true;
-        }
-    };
-
-    sinon.EventTarget = {
-        addEventListener: function addEventListener(event, listener, useCapture) {
-            this.eventListeners = this.eventListeners || {};
-            this.eventListeners[event] = this.eventListeners[event] || [];
-            push.call(this.eventListeners[event], listener);
-        },
-
-        removeEventListener: function removeEventListener(event, listener, useCapture) {
-            var listeners = this.eventListeners && this.eventListeners[event] || [];
-
-            for (var i = 0, l = listeners.length; i < l; ++i) {
-                if (listeners[i] == listener) {
-                    return listeners.splice(i, 1);
-                }
-            }
-        },
-
-        dispatchEvent: function dispatchEvent(event) {
-            var type = event.type;
-            var listeners = this.eventListeners && this.eventListeners[type] || [];
-
-            for (var i = 0; i < listeners.length; i++) {
-                if (typeof listeners[i] == "function") {
-                    listeners[i].call(this, event);
-                } else {
-                    listeners[i].handleEvent(event);
-                }
-            }
-
-            return !!event.defaultPrevented;
-        }
-    };
-}());
-
-/**
- * @depend event.js
- */
-/*jslint eqeqeq: false, onevar: false*/
-/*global sinon, module, require, ActiveXObject, XMLHttpRequest, DOMParser*/
-/**
- * Fake XMLHttpRequest object
- *
- * @author Christian Johansen (christian@cjohansen.no)
- * @license BSD
- *
- * Copyright (c) 2010-2011 Christian Johansen
- */
-
-if (typeof sinon == "undefined") {
-    this.sinon = {};
-}
-sinon.xhr = { XMLHttpRequest: this.XMLHttpRequest };
-
-// wrapper for global
-(function(global) {
-    var xhr = sinon.xhr;
-    xhr.GlobalXMLHttpRequest = global.XMLHttpRequest;
-    xhr.GlobalActiveXObject = global.ActiveXObject;
-    xhr.supportsActiveX = typeof xhr.GlobalActiveXObject != "undefined";
-    xhr.supportsXHR = typeof xhr.GlobalXMLHttpRequest != "undefined";
-    xhr.workingXHR = xhr.supportsXHR ? xhr.GlobalXMLHttpRequest : xhr.supportsActiveX
-                                     ? function() { return new xhr.GlobalActiveXObject("MSXML2.XMLHTTP.3.0") } : false;
-
-    /*jsl:ignore*/
-    var unsafeHeaders = {
-        "Accept-Charset": true,
-        "Accept-Encoding": true,
-        "Connection": true,
-        "Content-Length": true,
-        "Cookie": true,
-        "Cookie2": true,
-        "Content-Transfer-Encoding": true,
-        "Date": true,
-        "Expect": true,
-        "Host": true,
-        "Keep-Alive": true,
-        "Referer": true,
-        "TE": true,
-        "Trailer": true,
-        "Transfer-Encoding": true,
-        "Upgrade": true,
-        "User-Agent": true,
-        "Via": true
-    };
-    /*jsl:end*/
-
-    function FakeXMLHttpRequest() {
-        this.readyState = FakeXMLHttpRequest.UNSENT;
-        this.requestHeaders = {};
-        this.requestBody = null;
-        this.status = 0;
-        this.statusText = "";
-
-        if (typeof FakeXMLHttpRequest.onCreate == "function") {
-            FakeXMLHttpRequest.onCreate(this);
-        }
-    }
-
-    function verifyState(xhr) {
-        if (xhr.readyState !== FakeXMLHttpRequest.OPENED) {
-            throw new Error("INVALID_STATE_ERR");
-        }
-
-        if (xhr.sendFlag) {
-            throw new Error("INVALID_STATE_ERR");
-        }
-    }
-
-    // filtering to enable a white-list version of Sinon FakeXhr,
-    // where whitelisted requests are passed through to real XHR
-    function each(collection, callback) {
-        if (!collection) return;
-        for (var i = 0, l = collection.length; i < l; i += 1) {
-            callback(collection[i]);
-        }
-    }
-    function some(collection, callback) {
-        for (var index = 0; index < collection.length; index++) {
-            if(callback(collection[index]) === true) return true;
-        };
-        return false;
-    }
-    // largest arity in XHR is 5 - XHR#open
-    var apply = function(obj,method,args) {
-        switch(args.length) {
-        case 0: return obj[method]();
-        case 1: return obj[method](args[0]);
-        case 2: return obj[method](args[0],args[1]);
-        case 3: return obj[method](args[0],args[1],args[2]);
-        case 4: return obj[method](args[0],args[1],args[2],args[3]);
-        case 5: return obj[method](args[0],args[1],args[2],args[3],args[4]);
-        };
-    };
-
-    FakeXMLHttpRequest.filters = [];
-    FakeXMLHttpRequest.addFilter = function(fn) {
-        this.filters.push(fn)
-    };
-    var IE6Re = /MSIE 6/;
-    FakeXMLHttpRequest.defake = function(fakeXhr,xhrArgs) {
-        var xhr = new sinon.xhr.workingXHR();
-        each(["open","setRequestHeader","send","abort","getResponseHeader",
-              "getAllResponseHeaders","addEventListener","overrideMimeType","removeEventListener"],
-             function(method) {
-                 fakeXhr[method] = function() {
-                   return apply(xhr,method,arguments);
-                 };
-             });
-
-        var copyAttrs = function(args) {
-            each(args, function(attr) {
-              try {
-                fakeXhr[attr] = xhr[attr]
-              } catch(e) {
-                if(!IE6Re.test(navigator.userAgent)) throw e;
-              }
-            });
-        };
-
-        var stateChange = function() {
-            fakeXhr.readyState = xhr.readyState;
-            if(xhr.readyState >= FakeXMLHttpRequest.HEADERS_RECEIVED) {
-                copyAttrs(["status","statusText"]);
-            }
-            if(xhr.readyState >= FakeXMLHttpRequest.LOADING) {
-                copyAttrs(["responseText"]);
-            }
-            if(xhr.readyState === FakeXMLHttpRequest.DONE) {
-                copyAttrs(["responseXML"]);
-            }
-            if(fakeXhr.onreadystatechange) fakeXhr.onreadystatechange.call(fakeXhr);
-        };
-        if(xhr.addEventListener) {
-          for(var event in fakeXhr.eventListeners) {
-              if(fakeXhr.eventListeners.hasOwnProperty(event)) {
-                  each(fakeXhr.eventListeners[event],function(handler) {
-                      xhr.addEventListener(event, handler);
-                  });
-              }
-          }
-          xhr.addEventListener("readystatechange",stateChange);
-        } else {
-          xhr.onreadystatechange = stateChange;
-        }
-        apply(xhr,"open",xhrArgs);
-    };
-    FakeXMLHttpRequest.useFilters = false;
-
-    function verifyRequestSent(xhr) {
-        if (xhr.readyState == FakeXMLHttpRequest.DONE) {
-            throw new Error("Request done");
-        }
-    }
-
-    function verifyHeadersReceived(xhr) {
-        if (xhr.async && xhr.readyState != FakeXMLHttpRequest.HEADERS_RECEIVED) {
-            throw new Error("No headers received");
-        }
-    }
-
-    function verifyResponseBodyType(body) {
-        if (typeof body != "string") {
-            var error = new Error("Attempted to respond to fake XMLHttpRequest with " +
-                                 body + ", which is not a string.");
-            error.name = "InvalidBodyException";
-            throw error;
-        }
-    }
-
-    sinon.extend(FakeXMLHttpRequest.prototype, sinon.EventTarget, {
-        async: true,
-
-        open: function open(method, url, async, username, password) {
-            this.method = method;
-            this.url = url;
-            this.async = typeof async == "boolean" ? async : true;
-            this.username = username;
-            this.password = password;
-            this.responseText = null;
-            this.responseXML = null;
-            this.requestHeaders = {};
-            this.sendFlag = false;
-            if(sinon.FakeXMLHttpRequest.useFilters === true) {
-                var xhrArgs = arguments;
-                var defake = some(FakeXMLHttpRequest.filters,function(filter) {
-                    return filter.apply(this,xhrArgs)
-                });
-                if (defake) {
-                  return sinon.FakeXMLHttpRequest.defake(this,arguments);
-                }
-            }
-            this.readyStateChange(FakeXMLHttpRequest.OPENED);
-        },
-
-        readyStateChange: function readyStateChange(state) {
-            this.readyState = state;
-
-            if (typeof this.onreadystatechange == "function") {
-                try {
-                    this.onreadystatechange();
-                } catch (e) {
-                    sinon.logError("Fake XHR onreadystatechange handler", e);
-                }
-            }
-
-            this.dispatchEvent(new sinon.Event("readystatechange"));
-        },
-
-        setRequestHeader: function setRequestHeader(header, value) {
-            verifyState(this);
-
-            if (unsafeHeaders[header] || /^(Sec-|Proxy-)/.test(header)) {
-                throw new Error("Refused to set unsafe header \"" + header + "\"");
-            }
-
-            if (this.requestHeaders[header]) {
-                this.requestHeaders[header] += "," + value;
-            } else {
-                this.requestHeaders[header] = value;
-            }
-        },
-
-        // Helps testing
-        setResponseHeaders: function setResponseHeaders(headers) {
-            this.responseHeaders = {};
-
-            for (var header in headers) {
-                if (headers.hasOwnProperty(header)) {
-                    this.responseHeaders[header] = headers[header];
-                }
-            }
-
-            if (this.async) {
-                this.readyStateChange(FakeXMLHttpRequest.HEADERS_RECEIVED);
-            }
-        },
-
-        // Currently treats ALL data as a DOMString (i.e. no Document)
-        send: function send(data) {
-            verifyState(this);
-
-            if (!/^(get|head)$/i.test(this.method)) {
-                if (this.requestHeaders["Content-Type"]) {
-                    var value = this.requestHeaders["Content-Type"].split(";");
-                    this.requestHeaders["Content-Type"] = value[0] + ";charset=utf-8";
-                } else {
-                    this.requestHeaders["Content-Type"] = "text/plain;charset=utf-8";
-                }
-
-                this.requestBody = data;
-            }
-
-            this.errorFlag = false;
-            this.sendFlag = this.async;
-            this.readyStateChange(FakeXMLHttpRequest.OPENED);
-
-            if (typeof this.onSend == "function") {
-                this.onSend(this);
-            }
-        },
-
-        abort: function abort() {
-            this.aborted = true;
-            this.responseText = null;
-            this.errorFlag = true;
-            this.requestHeaders = {};
-
-            if (this.readyState > sinon.FakeXMLHttpRequest.UNSENT && this.sendFlag) {
-                this.readyStateChange(sinon.FakeXMLHttpRequest.DONE);
-                this.sendFlag = false;
-            }
-
-            this.readyState = sinon.FakeXMLHttpRequest.UNSENT;
-        },
-
-        getResponseHeader: function getResponseHeader(header) {
-            if (this.readyState < FakeXMLHttpRequest.HEADERS_RECEIVED) {
-                return null;
-            }
-
-            if (/^Set-Cookie2?$/i.test(header)) {
-                return null;
-            }
-
-            header = header.toLowerCase();
-
-            for (var h in this.responseHeaders) {
-                if (h.toLowerCase() == header) {
-                    return this.responseHeaders[h];
-                }
-            }
-
-            return null;
-        },
-
-        getAllResponseHeaders: function getAllResponseHeaders() {
-            if (this.readyState < FakeXMLHttpRequest.HEADERS_RECEIVED) {
-                return "";
-            }
-
-            var headers = "";
-
-            for (var header in this.responseHeaders) {
-                if (this.responseHeaders.hasOwnProperty(header) &&
-                    !/^Set-Cookie2?$/i.test(header)) {
-                    headers += header + ": " + this.responseHeaders[header] + "\r\n";
-                }
-            }
-
-            return headers;
-        },
-
-        setResponseBody: function setResponseBody(body) {
-            verifyRequestSent(this);
-            verifyHeadersReceived(this);
-            verifyResponseBodyType(body);
-
-            var chunkSize = this.chunkSize || 10;
-            var index = 0;
-            this.responseText = "";
-
-            do {
-                if (this.async) {
-                    this.readyStateChange(FakeXMLHttpRequest.LOADING);
-                }
-
-                this.responseText += body.substring(index, index + chunkSize);
-                index += chunkSize;
-            } while (index < body.length);
-
-            var type = this.getResponseHeader("Content-Type");
-
-            if (this.responseText &&
-                (!type || /(text\/xml)|(application\/xml)|(\+xml)/.test(type))) {
-                try {
-                    this.responseXML = FakeXMLHttpRequest.parseXML(this.responseText);
-                } catch (e) {
-                    // Unable to parse XML - no biggie
-                }
-            }
-
-            if (this.async) {
-                this.readyStateChange(FakeXMLHttpRequest.DONE);
-            } else {
-                this.readyState = FakeXMLHttpRequest.DONE;
-            }
-        },
-
-        respond: function respond(status, headers, body) {
-            this.setResponseHeaders(headers || {});
-            this.status = typeof status == "number" ? status : 200;
-            this.statusText = FakeXMLHttpRequest.statusCodes[this.status];
-            this.setResponseBody(body || "");
-        }
-    });
-
-    sinon.extend(FakeXMLHttpRequest, {
-        UNSENT: 0,
-        OPENED: 1,
-        HEADERS_RECEIVED: 2,
-        LOADING: 3,
-        DONE: 4
-    });
-
-    // Borrowed from JSpec
-    FakeXMLHttpRequest.parseXML = function parseXML(text) {
-        var xmlDoc;
-
-        if (typeof DOMParser != "undefined") {
-            var parser = new DOMParser();
-            xmlDoc = parser.parseFromString(text, "text/xml");
-        } else {
-            xmlDoc = new ActiveXObject("Microsoft.XMLDOM");
-            xmlDoc.async = "false";
-            xmlDoc.loadXML(text);
-        }
-
-        return xmlDoc;
-    };
-
-    FakeXMLHttpRequest.statusCodes = {
-        100: "Continue",
-        101: "Switching Protocols",
-        200: "OK",
-        201: "Created",
-        202: "Accepted",
-        203: "Non-Authoritative Information",
-        204: "No Content",
-        205: "Reset Content",
-        206: "Partial Content",
-        300: "Multiple Choice",
-        301: "Moved Permanently",
-        302: "Found",
-        303: "See Other",
-        304: "Not Modified",
-        305: "Use Proxy",
-        307: "Temporary Redirect",
-        400: "Bad Request",
-        401: "Unauthorized",
-        402: "Payment Required",
-        403: "Forbidden",
-        404: "Not Found",
-        405: "Method Not Allowed",
-        406: "Not Acceptable",
-        407: "Proxy Authentication Required",
-        408: "Request Timeout",
-        409: "Conflict",
-        410: "Gone",
-        411: "Length Required",
-        412: "Precondition Failed",
-        413: "Request Entity Too Large",
-        414: "Request-URI Too Long",
-        415: "Unsupported Media Type",
-        416: "Requested Range Not Satisfiable",
-        417: "Expectation Failed",
-        422: "Unprocessable Entity",
-        500: "Internal Server Error",
-        501: "Not Implemented",
-        502: "Bad Gateway",
-        503: "Service Unavailable",
-        504: "Gateway Timeout",
-        505: "HTTP Version Not Supported"
-    };
-
-    sinon.useFakeXMLHttpRequest = function () {
-        sinon.FakeXMLHttpRequest.restore = function restore(keepOnCreate) {
-            if (xhr.supportsXHR) {
-                global.XMLHttpRequest = xhr.GlobalXMLHttpRequest;
-            }
-
-            if (xhr.supportsActiveX) {
-                global.ActiveXObject = xhr.GlobalActiveXObject;
-            }
-
-            delete sinon.FakeXMLHttpRequest.restore;
-
-            if (keepOnCreate !== true) {
-                delete sinon.FakeXMLHttpRequest.onCreate;
-            }
-        };
-        if (xhr.supportsXHR) {
-            global.XMLHttpRequest = sinon.FakeXMLHttpRequest;
-        }
-
-        if (xhr.supportsActiveX) {
-            global.ActiveXObject = function ActiveXObject(objId) {
-                if (objId == "Microsoft.XMLHTTP" || /^Msxml2\.XMLHTTP/i.test(objId)) {
-
-                    return new sinon.FakeXMLHttpRequest();
-                }
-
-                return new xhr.GlobalActiveXObject(objId);
-            };
-        }
-
-        return sinon.FakeXMLHttpRequest;
-    };
-
-    sinon.FakeXMLHttpRequest = FakeXMLHttpRequest;
-})(this);
-
-if (typeof module == "object" && typeof require == "function") {
-    module.exports = sinon;
-}
-
-/**
- * @depend fake_xml_http_request.js
- */
-/*jslint eqeqeq: false, onevar: false, regexp: false, plusplus: false*/
-/*global module, require, window*/
-/**
- * The Sinon "server" mimics a web server that receives requests from
- * sinon.FakeXMLHttpRequest and provides an API to respond to those requests,
- * both synchronously and asynchronously. To respond synchronuously, canned
- * answers have to be provided upfront.
- *
- * @author Christian Johansen (christian@cjohansen.no)
- * @license BSD
- *
- * Copyright (c) 2010-2011 Christian Johansen
- */
-
-if (typeof sinon == "undefined") {
-    var sinon = {};
-}
-
-sinon.fakeServer = (function () {
-    var push = [].push;
-    function F() {}
-
-    function create(proto) {
-        F.prototype = proto;
-        return new F();
-    }
-
-    function responseArray(handler) {
-        var response = handler;
-
-        if (Object.prototype.toString.call(handler) != "[object Array]") {
-            response = [200, {}, handler];
-        }
-
-        if (typeof response[2] != "string") {
-            throw new TypeError("Fake server response body should be string, but was " +
-                                typeof response[2]);
-        }
-
-        return response;
-    }
-
-    var wloc = typeof window !== "undefined" ? window.location : {};
-    var rCurrLoc = new RegExp("^" + wloc.protocol + "//" + wloc.host);
-
-    function matchOne(response, reqMethod, reqUrl) {
-        var rmeth = response.method;
-        var matchMethod = !rmeth || rmeth.toLowerCase() == reqMethod.toLowerCase();
-        var url = response.url;
-        var matchUrl = !url || url == reqUrl || (typeof url.test == "function" && url.test(reqUrl));
-
-        return matchMethod && matchUrl;
-    }
-
-    function match(response, request) {
-        var requestMethod = this.getHTTPMethod(request);
-        var requestUrl = request.url;
-
-        if (!/^https?:\/\//.test(requestUrl) || rCurrLoc.test(requestUrl)) {
-            requestUrl = requestUrl.replace(rCurrLoc, "");
-        }
-
-        if (matchOne(response, this.getHTTPMethod(request), requestUrl)) {
-            if (typeof response.response == "function") {
-                var ru = response.url;
-                var args = [request].concat(!ru ? [] : requestUrl.match(ru).slice(1));
-                return response.response.apply(response, args);
-            }
-
-            return true;
-        }
-
-        return false;
-    }
-
-    return {
-        create: function () {
-            var server = create(this);
-            this.xhr = sinon.useFakeXMLHttpRequest();
-            server.requests = [];
-
-            this.xhr.onCreate = function (xhrObj) {
-                server.addRequest(xhrObj);
-            };
-
-            return server;
-        },
-
-        addRequest: function addRequest(xhrObj) {
-            var server = this;
-            push.call(this.requests, xhrObj);
-
-            xhrObj.onSend = function () {
-                server.handleRequest(this);
-            };
-
-            if (this.autoRespond && !this.responding) {
-                setTimeout(function () {
-                    server.responding = false;
-                    server.respond();
-                }, this.autoRespondAfter || 10);
-
-                this.responding = true;
-            }
-        },
-
-        getHTTPMethod: function getHTTPMethod(request) {
-            if (this.fakeHTTPMethods && /post/i.test(request.method)) {
-                var matches = (request.requestBody || "").match(/_method=([^\b;]+)/);
-                return !!matches ? matches[1] : request.method;
-            }
-
-            return request.method;
-        },
-
-        handleRequest: function handleRequest(xhr) {
-            if (xhr.async) {
-                if (!this.queue) {
-                    this.queue = [];
-                }
-
-                push.call(this.queue, xhr);
-            } else {
-                this.processRequest(xhr);
-            }
-        },
-
-        respondWith: function respondWith(method, url, body) {
-            if (arguments.length == 1 && typeof method != "function") {
-                this.response = responseArray(method);
-                return;
-            }
-
-            if (!this.responses) { this.responses = []; }
-
-            if (arguments.length == 1) {
-                body = method;
-                url = method = null;
-            }
-
-            if (arguments.length == 2) {
-                body = url;
-                url = method;
-                method = null;
-            }
-
-            push.call(this.responses, {
-                method: method,
-                url: url,
-                response: typeof body == "function" ? body : responseArray(body)
-            });
-        },
-
-        respond: function respond() {
-            if (arguments.length > 0) this.respondWith.apply(this, arguments);
-            var queue = this.queue || [];
-            var request;
-
-            while(request = queue.shift()) {
-                this.processRequest(request);
-            }
-        },
-
-        processRequest: function processRequest(request) {
-            try {
-                if (request.aborted) {
-                    return;
-                }
-
-                var response = this.response || [404, {}, ""];
-
-                if (this.responses) {
-                    for (var i = 0, l = this.responses.length; i < l; i++) {
-                        if (match.call(this, this.responses[i], request)) {
-                            response = this.responses[i].response;
-                            break;
-                        }
-                    }
-                }
-
-                if (request.readyState != 4) {
-                    request.respond(response[0], response[1], response[2]);
-                }
-            } catch (e) {
-                sinon.logError("Fake server request processing", e);
-            }
-        },
-
-        restore: function restore() {
-            return this.xhr.restore && this.xhr.restore.apply(this.xhr, arguments);
-        }
-    };
-}());
-
-if (typeof module == "object" && typeof require == "function") {
-    module.exports = sinon;
-}
-
-/**
- * @depend fake_server.js
- * @depend fake_timers.js
- */
-/*jslint browser: true, eqeqeq: false, onevar: false*/
-/*global sinon*/
-/**
- * Add-on for sinon.fakeServer that automatically handles a fake timer along with
- * the FakeXMLHttpRequest. The direct inspiration for this add-on is jQuery
- * 1.3.x, which does not use xhr object's onreadystatehandler at all - instead,
- * it polls the object for completion with setInterval. Dispite the direct
- * motivation, there is nothing jQuery-specific in this file, so it can be used
- * in any environment where the ajax implementation depends on setInterval or
- * setTimeout.
- *
- * @author Christian Johansen (christian@cjohansen.no)
- * @license BSD
- *
- * Copyright (c) 2010-2011 Christian Johansen
- */
-
-(function () {
-    function Server() {}
-    Server.prototype = sinon.fakeServer;
-
-    sinon.fakeServerWithClock = new Server();
-
-    sinon.fakeServerWithClock.addRequest = function addRequest(xhr) {
-        if (xhr.async) {
-            if (typeof setTimeout.clock == "object") {
-                this.clock = setTimeout.clock;
-            } else {
-                this.clock = sinon.useFakeTimers();
-                this.resetClock = true;
-            }
-
-            if (!this.longestTimeout) {
-                var clockSetTimeout = this.clock.setTimeout;
-                var clockSetInterval = this.clock.setInterval;
-                var server = this;
-
-                this.clock.setTimeout = function (fn, timeout) {
-                    server.longestTimeout = Math.max(timeout, server.longestTimeout || 0);
-
-                    return clockSetTimeout.apply(this, arguments);
-                };
-
-                this.clock.setInterval = function (fn, timeout) {
-                    server.longestTimeout = Math.max(timeout, server.longestTimeout || 0);
-
-                    return clockSetInterval.apply(this, arguments);
-                };
-            }
-        }
-
-        return sinon.fakeServer.addRequest.call(this, xhr);
-    };
-
-    sinon.fakeServerWithClock.respond = function respond() {
-        var returnVal = sinon.fakeServer.respond.apply(this, arguments);
-
-        if (this.clock) {
-            this.clock.tick(this.longestTimeout || 0);
-            this.longestTimeout = 0;
-
-            if (this.resetClock) {
-                this.clock.restore();
-                this.resetClock = false;
-            }
-        }
-
-        return returnVal;
-    };
-
-    sinon.fakeServerWithClock.restore = function restore() {
-        if (this.clock) {
-            this.clock.restore();
-        }
-
-        return sinon.fakeServer.restore.apply(this, arguments);
-    };
-}());
-
-/**
- * @depend ../sinon.js
- * @depend collection.js
- * @depend util/fake_timers.js
- * @depend util/fake_server_with_clock.js
- */
-/*jslint eqeqeq: false, onevar: false, plusplus: false*/
-/*global require, module*/
-/**
- * Manages fake collections as well as fake utilities such as Sinon's
- * timers and fake XHR implementation in one convenient object.
- *
- * @author Christian Johansen (christian@cjohansen.no)
- * @license BSD
- *
- * Copyright (c) 2010-2011 Christian Johansen
- */
-
-if (typeof module == "object" && typeof require == "function") {
-    var sinon = require("../sinon");
-    sinon.extend(sinon, require("./util/fake_timers"));
-}
-
-(function () {
-    var push = [].push;
-
-    function exposeValue(sandbox, config, key, value) {
-        if (!value) {
-            return;
-        }
-
-        if (config.injectInto) {
-            config.injectInto[key] = value;
-        } else {
-            push.call(sandbox.args, value);
-        }
-    }
-
-    function prepareSandboxFromConfig(config) {
-        var sandbox = sinon.create(sinon.sandbox);
-
-        if (config.useFakeServer) {
-            if (typeof config.useFakeServer == "object") {
-                sandbox.serverPrototype = config.useFakeServer;
-            }
-
-            sandbox.useFakeServer();
-        }
-
-        if (config.useFakeTimers) {
-            if (typeof config.useFakeTimers == "object") {
-                sandbox.useFakeTimers.apply(sandbox, config.useFakeTimers);
-            } else {
-                sandbox.useFakeTimers();
-            }
-        }
-
-        return sandbox;
-    }
-
-    sinon.sandbox = sinon.extend(sinon.create(sinon.collection), {
-        useFakeTimers: function useFakeTimers() {
-            this.clock = sinon.useFakeTimers.apply(sinon, arguments);
-
-            return this.add(this.clock);
-        },
-
-        serverPrototype: sinon.fakeServer,
-
-        useFakeServer: function useFakeServer() {
-            var proto = this.serverPrototype || sinon.fakeServer;
-
-            if (!proto || !proto.create) {
-                return null;
-            }
-
-            this.server = proto.create();
-            return this.add(this.server);
-        },
-
-        inject: function (obj) {
-            sinon.collection.inject.call(this, obj);
-
-            if (this.clock) {
-                obj.clock = this.clock;
-            }
-
-            if (this.server) {
-                obj.server = this.server;
-                obj.requests = this.server.requests;
-            }
-
-            return obj;
-        },
-
-        create: function (config) {
-            if (!config) {
-                return sinon.create(sinon.sandbox);
-            }
-
-            var sandbox = prepareSandboxFromConfig(config);
-            sandbox.args = sandbox.args || [];
-            var prop, value, exposed = sandbox.inject({});
-
-            if (config.properties) {
-                for (var i = 0, l = config.properties.length; i < l; i++) {
-                    prop = config.properties[i];
-                    value = exposed[prop] || prop == "sandbox" && sandbox;
-                    exposeValue(sandbox, config, prop, value);
-                }
-            } else {
-                exposeValue(sandbox, config, "sandbox", value);
-            }
-
-            return sandbox;
-        }
-    });
-
-    sinon.sandbox.useFakeXMLHttpRequest = sinon.sandbox.useFakeServer;
-
-    if (typeof module == "object" && typeof require == "function") {
-        module.exports = sinon.sandbox;
-    }
-}());
-
-/**
- * @depend ../sinon.js
- * @depend stub.js
- * @depend mock.js
- * @depend sandbox.js
- */
-/*jslint eqeqeq: false, onevar: false, forin: true, plusplus: false*/
-/*global module, require, sinon*/
-/**
- * Test function, sandboxes fakes
- *
- * @author Christian Johansen (christian@cjohansen.no)
- * @license BSD
- *
- * Copyright (c) 2010-2011 Christian Johansen
- */
-
-(function (sinon) {
-    var commonJSModule = typeof module == "object" && typeof require == "function";
-
-    if (!sinon && commonJSModule) {
-        sinon = require("../sinon");
-    }
-
-    if (!sinon) {
-        return;
-    }
-
-    function test(callback) {
-        var type = typeof callback;
-
-        if (type != "function") {
-            throw new TypeError("sinon.test needs to wrap a test function, got " + type);
-        }
-
-        return function () {
-            var config = sinon.getConfig(sinon.config);
-            config.injectInto = config.injectIntoThis && this || config.injectInto;
-            var sandbox = sinon.sandbox.create(config);
-            var exception, result;
-            var args = Array.prototype.slice.call(arguments).concat(sandbox.args);
-
-            try {
-                result = callback.apply(this, args);
-            } finally {
-                sandbox.verifyAndRestore();
-            }
-
-            return result;
-        };
-    }
-
-    test.config = {
-        injectIntoThis: true,
-        injectInto: null,
-        properties: ["spy", "stub", "mock", "clock", "server", "requests"],
-        useFakeTimers: true,
-        useFakeServer: true
-    };
-
-    if (commonJSModule) {
-        module.exports = test;
-    } else {
-        sinon.test = test;
-    }
-}(typeof sinon == "object" && sinon || null));
-
-/**
- * @depend ../sinon.js
- * @depend test.js
- */
-/*jslint eqeqeq: false, onevar: false, eqeqeq: false*/
-/*global module, require, sinon*/
-/**
- * Test case, sandboxes all test functions
- *
- * @author Christian Johansen (christian@cjohansen.no)
- * @license BSD
- *
- * Copyright (c) 2010-2011 Christian Johansen
- */
-
-(function (sinon) {
-    var commonJSModule = typeof module == "object" && typeof require == "function";
-
-    if (!sinon && commonJSModule) {
-        sinon = require("../sinon");
-    }
-
-    if (!sinon || !Object.prototype.hasOwnProperty) {
-        return;
-    }
-
-    function createTest(property, setUp, tearDown) {
-        return function () {
-            if (setUp) {
-                setUp.apply(this, arguments);
-            }
-
-            var exception, result;
-
-            try {
-                result = property.apply(this, arguments);
-            } catch (e) {
-                exception = e;
-            }
-
-            if (tearDown) {
-                tearDown.apply(this, arguments);
-            }
-
-            if (exception) {
-                throw exception;
-            }
-
-            return result;
-        };
-    }
-
-    function testCase(tests, prefix) {
-        /*jsl:ignore*/
-        if (!tests || typeof tests != "object") {
-            throw new TypeError("sinon.testCase needs an object with test functions");
-        }
-        /*jsl:end*/
-
-        prefix = prefix || "test";
-        var rPrefix = new RegExp("^" + prefix);
-        var methods = {}, testName, property, method;
-        var setUp = tests.setUp;
-        var tearDown = tests.tearDown;
-
-        for (testName in tests) {
-            if (tests.hasOwnProperty(testName)) {
-                property = tests[testName];
-
-                if (/^(setUp|tearDown)$/.test(testName)) {
-                    continue;
-                }
-
-                if (typeof property == "function" && rPrefix.test(testName)) {
-                    method = property;
-
-                    if (setUp || tearDown) {
-                        method = createTest(property, setUp, tearDown);
-                    }
-
-                    methods[testName] = sinon.test(method);
-                } else {
-                    methods[testName] = tests[testName];
-                }
-            }
-        }
-
-        return methods;
-    }
-
-    if (commonJSModule) {
-        module.exports = testCase;
-    } else {
-        sinon.testCase = testCase;
-    }
-}(typeof sinon == "object" && sinon || null));
-
-/**
- * @depend ../sinon.js
- * @depend stub.js
- */
-/*jslint eqeqeq: false, onevar: false, nomen: false, plusplus: false*/
-/*global module, require, sinon*/
-/**
- * Assertions matching the test spy retrieval interface.
- *
- * @author Christian Johansen (christian@cjohansen.no)
- * @license BSD
- *
- * Copyright (c) 2010-2011 Christian Johansen
- */
-
-(function (sinon, global) {
-    var commonJSModule = typeof module == "object" && typeof require == "function";
-    var slice = Array.prototype.slice;
-    var assert;
-
-    if (!sinon && commonJSModule) {
-        sinon = require("../sinon");
-    }
-
-    if (!sinon) {
-        return;
-    }
-
-    function verifyIsStub() {
-        var method;
-
-        for (var i = 0, l = arguments.length; i < l; ++i) {
-            method = arguments[i];
-
-            if (!method) {
-                assert.fail("fake is not a spy");
-            }
-
-            if (typeof method != "function") {
-                assert.fail(method + " is not a function");
-            }
-
-            if (typeof method.getCall != "function") {
-                assert.fail(method + " is not stubbed");
-            }
-        }
-    }
-
-    function failAssertion(object, msg) {
-        object = object || global;
-        var failMethod = object.fail || assert.fail;
-        failMethod.call(object, msg);
-    }
-
-    function mirrorPropAsAssertion(name, method, message) {
-        if (arguments.length == 2) {
-            message = method;
-            method = name;
-        }
-
-        assert[name] = function (fake) {
-            verifyIsStub(fake);
-
-            var args = slice.call(arguments, 1);
-            var failed = false;
-
-            if (typeof method == "function") {
-                failed = !method(fake);
-            } else {
-                failed = typeof fake[method] == "function" ?
-                    !fake[method].apply(fake, args) : !fake[method];
-            }
-
-            if (failed) {
-                failAssertion(this, fake.printf.apply(fake, [message].concat(args)));
-            } else {
-                assert.pass(name);
-            }
-        };
-    }
-
-    function exposedName(prefix, prop) {
-        return !prefix || /^fail/.test(prop) ? prop :
-            prefix + prop.slice(0, 1).toUpperCase() + prop.slice(1);
-    };
-
-    assert = {
-        failException: "AssertError",
-
-        fail: function fail(message) {
-            var error = new Error(message);
-            error.name = this.failException || assert.failException;
-
-            throw error;
-        },
-
-        pass: function pass(assertion) {},
-
-        callOrder: function assertCallOrder() {
-            verifyIsStub.apply(null, arguments);
-            var expected = "", actual = "";
-
-            if (!sinon.calledInOrder(arguments)) {
-                try {
-                    expected = [].join.call(arguments, ", ");
-                    actual = sinon.orderByFirstCall(slice.call(arguments)).join(", ");
-                } catch (e) {
-                    // If this fails, we'll just fall back to the blank string
-                }
-
-                failAssertion(this, "expected " + expected + " to be " +
-                              "called in order but were called as " + actual);
-            } else {
-                assert.pass("callOrder");
-            }
-        },
-
-        callCount: function assertCallCount(method, count) {
-            verifyIsStub(method);
-
-            if (method.callCount != count) {
-                var msg = "expected %n to be called " + sinon.timesInWords(count) +
-                    " but was called %c%C";
-                failAssertion(this, method.printf(msg));
-            } else {
-                assert.pass("callCount");
-            }
-        },
-
-        expose: function expose(target, options) {
-            if (!target) {
-                throw new TypeError("target is null or undefined");
-            }
-
-            var o = options || {};
-            var prefix = typeof o.prefix == "undefined" && "assert" || o.prefix;
-            var includeFail = typeof o.includeFail == "undefined" || !!o.includeFail;
-
-            for (var method in this) {
-                if (method != "export" && (includeFail || !/^(fail)/.test(method))) {
-                    target[exposedName(prefix, method)] = this[method];
-                }
-            }
-
-            return target;
-        }
-    };
-
-    mirrorPropAsAssertion("called", "expected %n to have been called at least once but was never called");
-    mirrorPropAsAssertion("notCalled", function (spy) { return !spy.called; },
-                          "expected %n to not have been called but was called %c%C");
-    mirrorPropAsAssertion("calledOnce", "expected %n to be called once but was called %c%C");
-    mirrorPropAsAssertion("calledTwice", "expected %n to be called twice but was called %c%C");
-    mirrorPropAsAssertion("calledThrice", "expected %n to be called thrice but was called %c%C");
-    mirrorPropAsAssertion("calledOn", "expected %n to be called with %1 as this but was called with %t");
-    mirrorPropAsAssertion("alwaysCalledOn", "expected %n to always be called with %1 as this but was called with %t");
-    mirrorPropAsAssertion("calledWithNew", "expected %n to be called with new");
-    mirrorPropAsAssertion("alwaysCalledWithNew", "expected %n to always be called with new");
-    mirrorPropAsAssertion("calledWith", "expected %n to be called with arguments %*%C");
-    mirrorPropAsAssertion("calledWithMatch", "expected %n to be called with match %*%C");
-    mirrorPropAsAssertion("alwaysCalledWith", "expected %n to always be called with arguments %*%C");
-    mirrorPropAsAssertion("alwaysCalledWithMatch", "expected %n to always be called with match %*%C");
-    mirrorPropAsAssertion("calledWithExactly", "expected %n to be called with exact arguments %*%C");
-    mirrorPropAsAssertion("alwaysCalledWithExactly", "expected %n to always be called with exact arguments %*%C");
-    mirrorPropAsAssertion("neverCalledWith", "expected %n to never be called with arguments %*%C");
-    mirrorPropAsAssertion("neverCalledWithMatch", "expected %n to never be called with match %*%C");
-    mirrorPropAsAssertion("threw", "%n did not throw exception%C");
-    mirrorPropAsAssertion("alwaysThrew", "%n did not always throw exception%C");
-
-    if (commonJSModule) {
-        module.exports = assert;
-    } else {
-        sinon.assert = assert;
-    }
-}(typeof sinon == "object" && sinon || null, typeof window != "undefined" ? window : global));
-
-return sinon;}.call(typeof window != 'undefined' && window || {}));
diff --git a/branch-1.2/ambari-web/vendor/scripts/spin.js b/branch-1.2/ambari-web/vendor/scripts/spin.js
deleted file mode 100644
index 434d76a..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/spin.js
+++ /dev/null
@@ -1,320 +0,0 @@
-//fgnass.github.com/spin.js#v1.2.7
-!function(window, document, undefined) {
-
-  /**
-   * Copyright (c) 2011 Felix Gnass [fgnass at neteye dot de]
-   * Licensed under the MIT license
-   */
-
-  var prefixes = ['webkit', 'Moz', 'ms', 'O'] /* Vendor prefixes */
-    , animations = {} /* Animation rules keyed by their name */
-    , useCssAnimations
-
-  /**
-   * Utility function to create elements. If no tag name is given,
-   * a DIV is created. Optionally properties can be passed.
-   */
-  function createEl(tag, prop) {
-    var el = document.createElement(tag || 'div')
-      , n
-
-    for(n in prop) el[n] = prop[n]
-    return el
-  }
-
-  /**
-   * Appends children and returns the parent.
-   */
-  function ins(parent /* child1, child2, ...*/) {
-    for (var i=1, n=arguments.length; i<n; i++)
-      parent.appendChild(arguments[i])
-
-    return parent
-  }
-
-  /**
-   * Insert a new stylesheet to hold the @keyframe or VML rules.
-   */
-  var sheet = function() {
-    var el = createEl('style', {type : 'text/css'})
-    ins(document.getElementsByTagName('head')[0], el)
-    return el.sheet || el.styleSheet
-  }()
-
-  /**
-   * Creates an opacity keyframe animation rule and returns its name.
-   * Since most mobile Webkits have timing issues with animation-delay,
-   * we create separate rules for each line/segment.
-   */
-  function addAnimation(alpha, trail, i, lines) {
-    var name = ['opacity', trail, ~~(alpha*100), i, lines].join('-')
-      , start = 0.01 + i/lines*100
-      , z = Math.max(1 - (1-alpha) / trail * (100-start), alpha)
-      , prefix = useCssAnimations.substring(0, useCssAnimations.indexOf('Animation')).toLowerCase()
-      , pre = prefix && '-'+prefix+'-' || ''
-
-    if (!animations[name]) {
-      sheet.insertRule(
-        '@' + pre + 'keyframes ' + name + '{' +
-        '0%{opacity:' + z + '}' +
-        start + '%{opacity:' + alpha + '}' +
-        (start+0.01) + '%{opacity:1}' +
-        (start+trail) % 100 + '%{opacity:' + alpha + '}' +
-        '100%{opacity:' + z + '}' +
-        '}', sheet.cssRules.length)
-
-      animations[name] = 1
-    }
-    return name
-  }
-
-  /**
-   * Tries various vendor prefixes and returns the first supported property.
-   **/
-  function vendor(el, prop) {
-    var s = el.style
-      , pp
-      , i
-
-    if(s[prop] !== undefined) return prop
-    prop = prop.charAt(0).toUpperCase() + prop.slice(1)
-    for(i=0; i<prefixes.length; i++) {
-      pp = prefixes[i]+prop
-      if(s[pp] !== undefined) return pp
-    }
-  }
-
-  /**
-   * Sets multiple style properties at once.
-   */
-  function css(el, prop) {
-    for (var n in prop)
-      el.style[vendor(el, n)||n] = prop[n]
-
-    return el
-  }
-
-  /**
-   * Fills in default values.
-   */
-  function merge(obj) {
-    for (var i=1; i < arguments.length; i++) {
-      var def = arguments[i]
-      for (var n in def)
-        if (obj[n] === undefined) obj[n] = def[n]
-    }
-    return obj
-  }
-
-  /**
-   * Returns the absolute page-offset of the given element.
-   */
-  function pos(el) {
-    var o = { x:el.offsetLeft, y:el.offsetTop }
-    while((el = el.offsetParent))
-      o.x+=el.offsetLeft, o.y+=el.offsetTop
-
-    return o
-  }
-
-  var defaults = {
-    lines: 12,            // The number of lines to draw
-    length: 7,            // The length of each line
-    width: 5,             // The line thickness
-    radius: 10,           // The radius of the inner circle
-    rotate: 0,            // Rotation offset
-    corners: 1,           // Roundness (0..1)
-    color: '#000',        // #rgb or #rrggbb
-    speed: 1,             // Rounds per second
-    trail: 100,           // Afterglow percentage
-    opacity: 1/4,         // Opacity of the lines
-    fps: 20,              // Frames per second when using setTimeout()
-    zIndex: 2e9,          // Use a high z-index by default
-    className: 'spinner', // CSS class to assign to the element
-    top: 'auto',          // center vertically
-    left: 'auto',         // center horizontally
-    position: 'relative'  // element position
-  }
-
-  /** The constructor */
-  var Spinner = function Spinner(o) {
-    if (!this.spin) return new Spinner(o)
-    this.opts = merge(o || {}, Spinner.defaults, defaults)
-  }
-
-  Spinner.defaults = {}
-
-  merge(Spinner.prototype, {
-    spin: function(target) {
-      this.stop()
-      var self = this
-        , o = self.opts
-        , el = self.el = css(createEl(0, {className: o.className}), {position: o.position, width: 0, zIndex: o.zIndex})
-        , mid = o.radius+o.length+o.width
-        , ep // element position
-        , tp // target position
-
-      if (target) {
-        target.insertBefore(el, target.firstChild||null)
-        tp = pos(target)
-        ep = pos(el)
-        css(el, {
-          left: (o.left == 'auto' ? tp.x-ep.x + (target.offsetWidth >> 1) : parseInt(o.left, 10) + mid) + 'px',
-          top: (o.top == 'auto' ? tp.y-ep.y + (target.offsetHeight >> 1) : parseInt(o.top, 10) + mid)  + 'px'
-        })
-      }
-
-      el.setAttribute('aria-role', 'progressbar')
-      self.lines(el, self.opts)
-
-      if (!useCssAnimations) {
-        // No CSS animation support, use setTimeout() instead
-        var i = 0
-          , fps = o.fps
-          , f = fps/o.speed
-          , ostep = (1-o.opacity) / (f*o.trail / 100)
-          , astep = f/o.lines
-
-        ;(function anim() {
-          i++;
-          for (var s=o.lines; s; s--) {
-            var alpha = Math.max(1-(i+s*astep)%f * ostep, o.opacity)
-            self.opacity(el, o.lines-s, alpha, o)
-          }
-          self.timeout = self.el && setTimeout(anim, ~~(1000/fps))
-        })()
-      }
-      return self
-    },
-
-    stop: function() {
-      var el = this.el
-      if (el) {
-        clearTimeout(this.timeout)
-        if (el.parentNode) el.parentNode.removeChild(el)
-        this.el = undefined
-      }
-      return this
-    },
-
-    lines: function(el, o) {
-      var i = 0
-        , seg
-
-      function fill(color, shadow) {
-        return css(createEl(), {
-          position: 'absolute',
-          width: (o.length+o.width) + 'px',
-          height: o.width + 'px',
-          background: color,
-          boxShadow: shadow,
-          transformOrigin: 'left',
-          transform: 'rotate(' + ~~(360/o.lines*i+o.rotate) + 'deg) translate(' + o.radius+'px' +',0)',
-          borderRadius: (o.corners * o.width>>1) + 'px'
-        })
-      }
-
-      for (; i < o.lines; i++) {
-        seg = css(createEl(), {
-          position: 'absolute',
-          top: 1+~(o.width/2) + 'px',
-          transform: o.hwaccel ? 'translate3d(0,0,0)' : '',
-          opacity: o.opacity,
-          animation: useCssAnimations && addAnimation(o.opacity, o.trail, i, o.lines) + ' ' + 1/o.speed + 's linear infinite'
-        })
-
-        if (o.shadow) ins(seg, css(fill('#000', '0 0 4px ' + '#000'), {top: 2+'px'}))
-
-        ins(el, ins(seg, fill(o.color, '0 0 1px rgba(0,0,0,.1)')))
-      }
-      return el
-    },
-
-    opacity: function(el, i, val) {
-      if (i < el.childNodes.length) el.childNodes[i].style.opacity = val
-    }
-
-  })
-
-  /////////////////////////////////////////////////////////////////////////
-  // VML rendering for IE
-  /////////////////////////////////////////////////////////////////////////
-
-  /**
-   * Check and init VML support
-   */
-  ;(function() {
-
-    function vml(tag, attr) {
-      return createEl('<' + tag + ' xmlns="urn:schemas-microsoft.com:vml" class="spin-vml">', attr)
-    }
-
-    var s = css(createEl('group'), {behavior: 'url(#default#VML)'})
-
-    if (!vendor(s, 'transform') && s.adj) {
-
-      // VML support detected. Insert CSS rule ...
-      sheet.addRule('.spin-vml', 'behavior:url(#default#VML)')
-
-      Spinner.prototype.lines = function(el, o) {
-        var r = o.length+o.width
-          , s = 2*r
-
-        function grp() {
-          return css(
-            vml('group', {
-              coordsize: s + ' ' + s,
-              coordorigin: -r + ' ' + -r
-            }),
-            { width: s, height: s }
-          )
-        }
-
-        var margin = -(o.width+o.length)*2 + 'px'
-          , g = css(grp(), {position: 'absolute', top: margin, left: margin})
-          , i
-
-        function seg(i, dx, filter) {
-          ins(g,
-            ins(css(grp(), {rotation: 360 / o.lines * i + 'deg', left: ~~dx}),
-              ins(css(vml('roundrect', {arcsize: o.corners}), {
-                  width: r,
-                  height: o.width,
-                  left: o.radius,
-                  top: -o.width>>1,
-                  filter: filter
-                }),
-                vml('fill', {color: o.color, opacity: o.opacity}),
-                vml('stroke', {opacity: 0}) // transparent stroke to fix color bleeding upon opacity change
-              )
-            )
-          )
-        }
-
-        if (o.shadow)
-          for (i = 1; i <= o.lines; i++)
-            seg(i, -2, 'progid:DXImageTransform.Microsoft.Blur(pixelradius=2,makeshadow=1,shadowopacity=.3)')
-
-        for (i = 1; i <= o.lines; i++) seg(i)
-        return ins(el, g)
-      }
-
-      Spinner.prototype.opacity = function(el, i, val, o) {
-        var c = el.firstChild
-        o = o.shadow && o.lines || 0
-        if (c && i+o < c.childNodes.length) {
-          c = c.childNodes[i+o]; c = c && c.firstChild; c = c && c.firstChild
-          if (c) c.opacity = val
-        }
-      }
-    }
-    else
-      useCssAnimations = vendor(s, 'animation')
-  })()
-
-  if (typeof define == 'function' && define.amd)
-    define(function() { return Spinner })
-  else
-    window.Spinner = Spinner
-
-}(window, document);
diff --git a/branch-1.2/ambari-web/vendor/scripts/workflow_visualization.js b/branch-1.2/ambari-web/vendor/scripts/workflow_visualization.js
deleted file mode 100644
index ebfb78c..0000000
--- a/branch-1.2/ambari-web/vendor/scripts/workflow_visualization.js
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Example usage:
- *
- *  var dv = new DagViewer('pig_5')
- *  .setData(workflowData,jobsData)
- *  .drawDag(svgWidth,svgHeight,nodeHeight);
- */
-function DagViewer(domId) {
-  // initialize variables
-  this._nodes = new Array();
-  this._links = new Array();
-  this._numNodes = 0;
-  this._id = domId;
-}
-
-// set workflow schema and job data
-DagViewer.prototype.setData = function (wfData, jobData) {
-  // create map from entity names to nodes
-  var existingNodes = new Array();
-  var jobData = (jobData) ? jobData : new Array();
-  var minStartTime = 0;
-  if (jobData.length > 0)
-    minStartTime = jobData[0].submitTime;
-  var maxFinishTime = 0;
-  // iterate through job data
-  for (var i = 0; i < jobData.length; i++) {
-    minStartTime = Math.min(minStartTime, jobData[i].submitTime);
-    maxFinishTime = Math.max(maxFinishTime, jobData[i].submitTime + jobData[i].elapsedTime);
-    this._addNode(existingNodes, jobData[i].entityName, jobData[i]);
-  }
-  this._minStartTime = minStartTime;
-  this._maxFinishTime = maxFinishTime;
-  var dag = eval('(' + wfData + ')').dag;
-  this._sourceMarker = new Array();
-  this._targetMarker = new Array();
-  this._sourceMap = new Array();
-  // for each source node in the context, create links between it and its target nodes
-  for (var source in dag) {
-    var sourceNode = null;
-    if (source in existingNodes)
-      sourceNode = existingNodes[source];
-    for (var i = 0; i < dag[source].length; i++) {
-      var targetNode = null;
-      if (dag[source][i] in existingNodes)
-        targetNode = existingNodes[dag[source][i]];
-      this._addLink(sourceNode, targetNode);
-    }
-  }
-  return this;
-}
-
-// add a node to the nodes array and to a provided map of entity names to nodes
-DagViewer.prototype._addNode = function (existingNodes, entityName, node) {
-  existingNodes[entityName] = node;
-  this._nodes.push(node);
-  this._numNodes++;
-}
-
-// add a link between sourceNode and targetNode
-DagViewer.prototype._addLink = function (sourceNode, targetNode) {
-  // if source or target is null, add marker indicating unsubmitted job and return
-  if (sourceNode==null) {
-    if (targetNode==null)
-      return;
-    this._sourceMarker.push(targetNode);
-    return;
-  }
-  if (targetNode==null) {
-    this._targetMarker.push(sourceNode);
-    return;
-  }
-  // add link between nodes
-  var status = false;
-  if (sourceNode.status && targetNode.status)
-    status = true;
-  this._links.push({"source":sourceNode, "target":targetNode, "status":status, "value":sourceNode.output});
-  // add source to map of targets to sources
-  if (!(targetNode.name in this._sourceMap))
-    this._sourceMap[targetNode.name] = new Array();
-  this._sourceMap[targetNode.name].push(sourceNode);
-}
-
-// display the graph
-// rules of thumb: nodeHeight = 20, labelFontSize = 14, maxLabelWidth = 180
-//                 nodeHeight = 15, labelFontSize = 10, maxLabelWidth = 120
-//                 nodeHeight = 40, labelFontSize = 20, maxLabelWidth = 260
-//                 nodeHeight = 30, labelFontSize = 16
-DagViewer.prototype.drawDag = function (svgw, svgh, nodeHeight, labelFontSize, maxLabelWidth, axisPadding, svgPadding) {
-  this._addTimelineGraph(svgw, svgh, nodeHeight || 20, labelFontSize || 14, maxLabelWidth || 180, axisPadding || 30, svgPadding || 20);
-  return this;
-}
-
-// draw timeline graph
-DagViewer.prototype._addTimelineGraph = function (svgw, svgh, nodeHeight, labelFontSize, maxLabelWidth, axisPadding, svgPadding) {
-  // want to avoid having unnecessary scrollbars, so we need to size the div slightly larger than the svg
-  svgw = svgw - svgPadding;
-
-  var margin = {"top":10, "bottom":10, "left":30, "right":30};
-  var w = svgw - margin.left - margin.right;
-
-  var startTime = this._minStartTime;
-  var elapsedTime = this._maxFinishTime - this._minStartTime;
-  var x = d3.time.scale()
-    .domain([0, elapsedTime])
-    .range([0, w]);
-
-  // process nodes and determine their x and y positions, width and height
-  var minNodeSpacing = nodeHeight/2;
-  var ends = new Array();
-  var maxIndex = 0;
-  this._nodes = this._nodes.sort(function(a,b){return a.name.localeCompare(b.name);});
-  for (var i = 0; i < this._numNodes; i++) {
-    var d = this._nodes[i];
-    d.x = x(d.submitTime-startTime);
-    d.w = x(d.elapsedTime+d.submitTime-startTime) - x(d.submitTime-startTime);
-    if (d.w < nodeHeight/2) {
-      d.w = nodeHeight/2;
-      if (d.x + d.w > w)
-        d.x = w - d.w;
-    }
-    var effectiveX = d.x
-    var effectiveWidth = d.w;
-    if (d.w < maxLabelWidth) {
-      effectiveWidth = maxLabelWidth;
-      if (d.x + effectiveWidth > w)
-        effectiveX = w - effectiveWidth;
-      else if (d.x > 0)
-        effectiveX = d.x+(d.w-maxLabelWidth)/2;
-    }
-    // select "lane" (slot for y-position) for this node
-    // starting at the slot above the node's closest source node (or 0, if none exists)
-    // and moving down until a slot is found that has no nodes within minNodeSpacing of this node
-    // excluding slots that contain more than one source of this node
-    var index = 0;
-    var rejectIndices = new Array();
-    if (d.name in this._sourceMap) {
-      var sources = this._sourceMap[d.name];
-      var closestSource = sources[0];
-      var indices = new Array();
-      for (var j = 0; j < sources.length; j++) {
-        if (sources[j].index in indices)
-          rejectIndices[sources[j].index] = true;
-        indices[sources[j].index] = true;
-        if (sources[j].submitTime + sources[j].elapsedTime > closestSource.submitTime + closestSource.elapsedTime)
-          closestSource = sources[j];
-      }
-      index = Math.max(0, closestSource.index-1);
-    }
-    while ((index in ends) && ((index in rejectIndices) || (ends[index]+minNodeSpacing >= effectiveX))) {
-      index++
-    }
-    ends[index] = Math.max(effectiveX + effectiveWidth);
-    maxIndex = Math.max(maxIndex, index);
-    d.y = index*2*nodeHeight + axisPadding;
-    d.h = nodeHeight;
-    d.index = index;
-  }
-
-  var h = 2*axisPadding + 2*nodeHeight*(maxIndex+1);
-  d3.select("div#" + this._id)
-    .attr("style","width:"+(svgw+svgPadding)+"px;height:"+Math.min(svgh,h+margin.top+margin.bottom+svgPadding)+"px;overflow:auto;padding:none;");
-  svgh = h + margin.top + margin.bottom;
-  var svg = d3.select("div#" + this._id).append("svg:svg")
-    .attr("width", svgw+"px")
-    .attr("height", svgh+"px");
-  var svgg = svg.append("g")
-    .attr("transform", "translate("+margin.left+","+margin.top+")");
-  
-  // create axes
-  var x = d3.time.scale()
-    .domain([0, elapsedTime])
-    .range([0, w]);
-  var tickFormatter = function(x) {
-    d = x.getTime();
-    if (d==0) { return "0" }
-    var seconds = Math.floor(parseInt(d) / 1000);
-    if ( seconds < 60 )
-      return seconds + "s";
-    var minutes = Math.floor(seconds / 60);
-    if ( minutes < 60 ) {
-      var x = seconds - 60*minutes;
-      return minutes + "m" + (x==0 ? "" : " " + x + "s");
-    }
-    var hours = Math.floor(minutes / 60);
-    if ( hours < 24 ) {
-      var x = minutes - 60*hours;
-      return hours + "h" + (x==0 ? "" : " " + x + "m");
-    }
-    var days = Math.floor(hours / 24);
-    if ( days < 7 ) {
-      var x = hours - 24*days;
-      return days + "d " + (x==0 ? "" : " " + x + "h");
-    }
-    var weeks = Math.floor(days / 7);
-    var x = days - 7*weeks;
-    return weeks + "w " + (x==0 ? "" : " " + x + "d");
-  };
-  var topAxis = d3.svg.axis()
-    .scale(x)
-    .orient("bottom")
-    .tickFormat(tickFormatter);
-  var bottomAxis = d3.svg.axis()
-    .scale(x)
-    .orient("top")
-    .tickFormat(tickFormatter);
-  svgg.append("g")
-    .attr("class", "x axis")
-    .call(topAxis);
-  svgg.append("g")
-    .attr("class", "x axis")
-    .call(bottomAxis)
-    .attr("transform", "translate(0,"+h+")");
-  
-  // create a rectangle for each node
-  var boxes = svgg.append("svg:g").selectAll("rect")
-    .data(this._nodes)
-    .enter().append("svg:rect")
-    .attr("x", function(d) { return d.x; } )
-    .attr("y", function(d) { return d.y; } )
-    .attr("width", function(d) { return d.w; } )
-    .attr("height", function(d) { return d.h; } )
-    .attr("class", function (d) {
-      return "node " + (d.status ? " finished" : "");
-    })
-    .attr("id", function (d) {
-      return d.name;
-    });
-  
-  // defs for arrowheads marked as to whether they link finished jobs or not
-  svgg.append("svg:defs").selectAll("arrowmarker")
-    .data(["finished", "unfinished"])
-    .enter().append("svg:marker")
-    .attr("id", String)
-    .attr("viewBox", "0 -5 10 10")
-    .attr("markerWidth", 6)
-    .attr("markerHeight", 6)
-    .attr("orient", "auto")
-    .append("svg:path")
-    .attr("d", "M0,-3L8,0L0,3");
-  // defs for unsubmitted node marker
-  svgg.append("svg:defs").selectAll("circlemarker")
-    .data(["circle"])
-    .enter().append("svg:marker")
-    .attr("id", String)
-    .attr("viewBox", "-2 -2 18 18")
-    .attr("markerWidth", 10)
-    .attr("markerHeight", 10)
-    .attr("refX", 10)
-    .attr("refY", 5)
-    .attr("orient", "auto")
-    .append("svg:circle")
-    .attr("cx", 5)
-    .attr("cy", 5)
-    .attr("r", 5);
-
-  // create dangling links representing unsubmitted jobs
-  var markerWidth = nodeHeight/2;
-  var sourceMarker = svgg.append("svg:g").selectAll("line")
-    .data(this._sourceMarker)
-    .enter().append("svg:line")
-    .attr("x1", function(d) { return d.x - markerWidth; } )
-    .attr("x2", function(d) { return d.x; } )
-    .attr("y1", function(d) { return d.y; } )
-    .attr("y2", function(d) { return d.y + 3; } )
-    .attr("class", "source mark")
-    .attr("marker-start", "url(#circle)");
-  var targetMarker = svgg.append("svg:g").selectAll("line")
-    .data(this._targetMarker)
-    .enter().append("svg:line")
-    .attr("x1", function(d) { return d.x + d.w + markerWidth; } )
-    .attr("x2", function(d) { return d.x + d.w; } )
-    .attr("y1", function(d) { return d.y + d.h; } )
-    .attr("y2", function(d) { return d.y + d.h - 3; } )
-    .attr("class", "target mark")
-    .attr("marker-start", "url(#circle)");
-
-  // create links between the nodes
-  var lines = svgg.append("svg:g").selectAll("path")
-    .data(this._links)
-    .enter().append("svg:path")
-    .attr("d", function(d) {
-      var s = d.source;
-      var t = d.target;
-      var x1 = s.x + s.w;
-      var x2 = t.x;
-      var y1 = s.y;
-      var y2 = t.y;
-      if (y1==y2) {
-        y1 += s.h/2;
-        y2 += t.h/2;
-      } else if (y1 < y2) {
-        y1 += s.h;
-      } else {
-        y2 += t.h;
-      }
-      return "M "+x1+" "+y1+" L "+((x2+x1)/2)+" "+((y2+y1)/2)+" L "+x2+" "+y2;
-    } )
-    .attr("class", function (d) {
-      return "link" + (d.status ? " finished" : "");
-    })
-    .attr("marker-mid", function (d) {
-      return "url(#" + (d.status ? "finished" : "unfinished") + ")";
-    });
-  
-  // create text group for each node label
-  var text = svgg.append("svg:g").selectAll("g")
-    .data(this._nodes)
-    .enter().append("svg:g");
-  
-  // add a shadow copy of the node label (will have a lighter color and thicker
-  // stroke for legibility)
-  text.append("svg:text")
-    .attr("x", function(d) {
-      var goal = d.x + d.w/2;
-      var halfLabel = maxLabelWidth/2;
-      if (goal < halfLabel) return halfLabel;      else if (goal > w-halfLabel) return w-halfLabel;
-      return goal;
-    } )
-    .attr("y", function(d) { return d.y + d.h + labelFontSize; } )
-    .attr("class", "joblabel shadow")
-    .attr("style", "font: "+labelFontSize+"px sans-serif")
-    .text(function (d) {
-      return d.name;
-    });
-  
-  // add the main node label
-  text.append("svg:text")
-    .attr("x", function(d) {
-      var goal = d.x + d.w/2;
-      var halfLabel = maxLabelWidth/2;
-      if (goal < halfLabel) return halfLabel;
-      else if (goal > w-halfLabel) return w-halfLabel;
-      return goal;
-    } )
-    .attr("y", function(d) { return d.y + d.h + labelFontSize; } )
-    .attr("class", "joblabel")
-    .attr("style", "font: "+labelFontSize+"px sans-serif")
-    .text(function (d) {
-      return d.name;
-    });
-}
diff --git a/branch-1.2/ambari-web/vendor/styles/bootstrap-combobox.css b/branch-1.2/ambari-web/vendor/styles/bootstrap-combobox.css
deleted file mode 100644
index d5891ff..0000000
--- a/branch-1.2/ambari-web/vendor/styles/bootstrap-combobox.css
+++ /dev/null
@@ -1,181 +0,0 @@
-.combobox-container {
-    margin-bottom: 5px;
-    *zoom: 1;
-}
-
-.combobox-container:before,
-.combobox-container:after {
-    display: table;
-    content: "";
-}
-
-.combobox-container:after {
-    clear: both;
-}
-
-.combobox-container input,
-.combobox-container .uneditable-input {
-    -webkit-border-radius: 0 3px 3px 0;
-    -moz-border-radius: 0 3px 3px 0;
-    border-radius: 0 3px 3px 0;
-}
-
-.combobox-container input:focus,
-.combobox-container .uneditable-input:focus {
-    position: relative;
-    z-index: 2;
-}
-
-.combobox-container .uneditable-input {
-    border-left-color: #ccc;
-}
-
-.combobox-container .add-on {
-    float: left;
-    display: block;
-    width: auto;
-    min-width: 16px;
-    height: 18px;
-    margin-right: -1px;
-    padding: 4px 5px;
-    font-weight: normal;
-    line-height: 18px;
-    color: #999999;
-    text-align: center;
-    text-shadow: 0 1px 0 #ffffff;
-    background-color: #f5f5f5;
-    border: 1px solid #ccc;
-    -webkit-border-radius: 3px 0 0 3px;
-    -moz-border-radius: 3px 0 0 3px;
-    border-radius: 3px 0 0 3px;
-}
-
-.combobox-container .active {
-    background-color: #a9dba9;
-    border-color: #46a546;
-}
-
-.combobox-container input,
-.combobox-container .uneditable-input {
-    float: left;
-    -webkit-border-radius: 3px 0 0 3px;
-    -moz-border-radius: 3px 0 0 3px;
-    border-radius: 3px 0 0 3px;
-}
-
-.combobox-container .uneditable-input {
-    border-left-color: #eee;
-    border-right-color: #ccc;
-}
-
-.combobox-container .add-on {
-    margin-right: 0;
-    margin-left: -1px;
-    -webkit-border-radius: 0 3px 3px 0;
-    -moz-border-radius: 0 3px 3px 0;
-    border-radius: 0 3px 3px 0;
-}
-
-.combobox-container input:first-child {
-    *margin-left: -160px;
-}
-
-.combobox-container input:first-child + .add-on {
-    *margin-left: -21px;
-}
-
-.combobox-container select {
-    display: inline-block;
-    width: 0;
-    height: 0;
-    border: 0;
-    padding: 0;
-    margin: 0;
-    text-indent: -99999px;
-    *text-indent: 0;
-}
-
-.form-search .combobox-container,
-.form-inline .combobox-container {
-    display: inline-block;
-    margin-bottom: 0;
-    vertical-align: top;
-}
-
-.form-search .combobox-container .add-on,
-.form-inline .combobox-container .add-on {
-    vertical-align: middle;
-}
-
-.combobox-selected .combobox-clear {
-    display: inline-block;
-}
-
-.combobox-selected .caret {
-    display: none;
-}
-
-.combobox-clear {
-    display: none;
-    width: 14px;
-    height: 14px;
-    line-height: 14px;
-    vertical-align: top;
-    opacity: 0.3;
-    filter: alpha(opacity = 30);
-}
-
-.dropdown:hover .combobox-clear,
-.open.dropdown .combobox-clear {
-    opacity: 1;
-    filter: alpha(opacity = 100);
-}
-
-.btn .combobox-clear {
-    margin-top: 1px;
-    margin-left: 1px;
-}
-
-.btn:hover .combobox-clear,
-.open.btn-group .combobox-clear {
-    opacity: 1;
-    filter: alpha(opacity = 100);
-
-}
-
-.typeahead-long {
-    max-height: 300px;
-    overflow-y: auto;
-}
-
-.control-group.error .combobox-container .add-on {
-    color: #B94A48;
-    border-color: #B94A48;
-}
-
-.control-group.error .combobox-container .caret {
-    border-top-color: #B94A48;
-}
-
-.control-group.warning .combobox-container .add-on {
-    color: #C09853;
-    border-color: #C09853;
-}
-
-.control-group.warning .combobox-container .caret {
-    border-top-color: #C09853;
-}
-
-.control-group.success .combobox-container .add-on {
-    color: #468847;
-    border-color: #468847;
-}
-
-.control-group.success .combobox-container .caret {
-    border-top-color: #468847;
-}
-
-.combobox-container button.add-on {
-    height: 30px;
-    width: 30px;
-}
\ No newline at end of file
diff --git a/branch-1.2/ambari-web/vendor/styles/bootstrap.css b/branch-1.2/ambari-web/vendor/styles/bootstrap.css
deleted file mode 100644
index cd0d25a..0000000
--- a/branch-1.2/ambari-web/vendor/styles/bootstrap.css
+++ /dev/null
@@ -1,5783 +0,0 @@
-/*!
- * Bootstrap v2.1.1
- *
- * Copyright 2012 Twitter, Inc
- * Licensed under the Apache License v2.0
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Designed and built with all the love in the world @twitter by @mdo and @fat.
- */
-
-article,
-aside,
-details,
-figcaption,
-figure,
-footer,
-header,
-hgroup,
-nav,
-section {
-  display: block;
-}
-
-audio,
-canvas,
-video {
-  display: inline-block;
-  *display: inline;
-  *zoom: 1;
-}
-
-audio:not([controls]) {
-  display: none;
-}
-
-html {
-  font-size: 100%;
-  -webkit-text-size-adjust: 100%;
-      -ms-text-size-adjust: 100%;
-}
-
-a:focus {
-  outline: thin dotted #333;
-  outline: 5px auto -webkit-focus-ring-color;
-  outline-offset: -2px;
-}
-
-a:hover,
-a:active {
-  outline: 0;
-}
-
-sub,
-sup {
-  position: relative;
-  font-size: 75%;
-  line-height: 0;
-  vertical-align: baseline;
-}
-
-sup {
-  top: -0.5em;
-}
-
-sub {
-  bottom: -0.25em;
-}
-
-img {
-  width: auto\9;
-  height: auto;
-  max-width: 100%;
-  vertical-align: middle;
-  border: 0;
-  -ms-interpolation-mode: bicubic;
-}
-
-#map_canvas img {
-  max-width: none;
-}
-
-button,
-input,
-select,
-textarea {
-  margin: 0;
-  font-size: 100%;
-  vertical-align: middle;
-}
-
-button,
-input {
-  *overflow: visible;
-  line-height: normal;
-}
-
-button::-moz-focus-inner,
-input::-moz-focus-inner {
-  padding: 0;
-  border: 0;
-}
-
-button,
-input[type="button"],
-input[type="reset"],
-input[type="submit"] {
-  cursor: pointer;
-  -webkit-appearance: button;
-}
-
-input[type="search"] {
-  -webkit-box-sizing: content-box;
-     -moz-box-sizing: content-box;
-          box-sizing: content-box;
-  -webkit-appearance: textfield;
-}
-
-input[type="search"]::-webkit-search-decoration,
-input[type="search"]::-webkit-search-cancel-button {
-  -webkit-appearance: none;
-}
-
-textarea {
-  overflow: auto;
-  vertical-align: top;
-}
-
-.clearfix {
-  *zoom: 1;
-}
-
-.clearfix:before,
-.clearfix:after {
-  display: table;
-  line-height: 0;
-  content: "";
-}
-
-.clearfix:after {
-  clear: both;
-}
-
-.hide-text {
-  font: 0/0 a;
-  color: transparent;
-  text-shadow: none;
-  background-color: transparent;
-  border: 0;
-}
-
-.input-block-level {
-  display: block;
-  width: 100%;
-  min-height: 30px;
-  -webkit-box-sizing: border-box;
-     -moz-box-sizing: border-box;
-          box-sizing: border-box;
-}
-
-body {
-  margin: 0;
-  font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
-  font-size: 14px;
-  line-height: 20px;
-  color: #333333;
-  background-color: #ffffff;
-}
-
-a {
-  color: #0088cc;
-  text-decoration: none;
-}
-
-a:hover {
-  color: #005580;
-  text-decoration: underline;
-}
-
-.img-rounded {
-  -webkit-border-radius: 6px;
-     -moz-border-radius: 6px;
-          border-radius: 6px;
-}
-
-.img-polaroid {
-  padding: 4px;
-  background-color: #fff;
-  border: 1px solid #ccc;
-  border: 1px solid rgba(0, 0, 0, 0.2);
-  -webkit-box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
-     -moz-box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
-          box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
-}
-
-.img-circle {
-  -webkit-border-radius: 500px;
-     -moz-border-radius: 500px;
-          border-radius: 500px;
-}
-
-.row {
-  margin-left: -20px;
-  *zoom: 1;
-}
-
-.row:before,
-.row:after {
-  display: table;
-  line-height: 0;
-  content: "";
-}
-
-.row:after {
-  clear: both;
-}
-
-[class*="span"] {
-  float: left;
-  min-height: 1px;
-  margin-left: 20px;
-}
-
-.container,
-.navbar-static-top .container,
-.navbar-fixed-top .container,
-.navbar-fixed-bottom .container {
-  width: 940px;
-}
-
-.span12 {
-  width: 940px;
-}
-
-.span11 {
-  width: 860px;
-}
-
-.span10 {
-  width: 780px;
-}
-
-.span9 {
-  width: 700px;
-}
-
-.span8 {
-  width: 620px;
-}
-
-.span7 {
-  width: 540px;
-}
-
-.span6 {
-  width: 460px;
-}
-
-.span5 {
-  width: 380px;
-}
-
-.span4 {
-  width: 300px;
-}
-
-.span3 {
-  width: 220px;
-}
-
-.span2 {
-  width: 140px;
-}
-
-.span1 {
-  width: 60px;
-}
-
-.offset12 {
-  margin-left: 980px;
-}
-
-.offset11 {
-  margin-left: 900px;
-}
-
-.offset10 {
-  margin-left: 820px;
-}
-
-.offset9 {
-  margin-left: 740px;
-}
-
-.offset8 {
-  margin-left: 660px;
-}
-
-.offset7 {
-  margin-left: 580px;
-}
-
-.offset6 {
-  margin-left: 500px;
-}
-
-.offset5 {
-  margin-left: 420px;
-}
-
-.offset4 {
-  margin-left: 340px;
-}
-
-.offset3 {
-  margin-left: 260px;
-}
-
-.offset2 {
-  margin-left: 180px;
-}
-
-.offset1 {
-  margin-left: 100px;
-}
-
-.row-fluid {
-  width: 100%;
-  *zoom: 1;
-}
-
-.row-fluid:before,
-.row-fluid:after {
-  display: table;
-  line-height: 0;
-  content: "";
-}
-
-.row-fluid:after {
-  clear: both;
-}
-
-.row-fluid [class*="span"] {
-  display: block;
-  float: left;
-  width: 100%;
-  min-height: 30px;
-  margin-left: 2.127659574468085%;
-  *margin-left: 2.074468085106383%;
-  -webkit-box-sizing: border-box;
-     -moz-box-sizing: border-box;
-          box-sizing: border-box;
-}
-
-.row-fluid [class*="span"]:first-child {
-  margin-left: 0;
-}
-
-.row-fluid .span12 {
-  width: 100%;
-  *width: 99.94680851063829%;
-}
-
-.row-fluid .span11 {
-  width: 91.48936170212765%;
-  *width: 91.43617021276594%;
-}
-
-.row-fluid .span10 {
-  width: 82.97872340425532%;
-  *width: 82.92553191489361%;
-}
-
-.row-fluid .span9 {
-  width: 74.46808510638297%;
-  *width: 74.41489361702126%;
-}
-
-.row-fluid .span8 {
-  width: 65.95744680851064%;
-  *width: 65.90425531914893%;
-}
-
-.row-fluid .span7 {
-  width: 57.44680851063829%;
-  *width: 57.39361702127659%;
-}
-
-.row-fluid .span6 {
-  width: 48.93617021276595%;
-  *width: 48.88297872340425%;
-}
-
-.row-fluid .span5 {
-  width: 40.42553191489362%;
-  *width: 40.37234042553192%;
-}
-
-.row-fluid .span4 {
-  width: 31.914893617021278%;
-  *width: 31.861702127659576%;
-}
-
-.row-fluid .span3 {
-  width: 23.404255319148934%;
-  *width: 23.351063829787233%;
-}
-
-.row-fluid .span2 {
-  width: 14.893617021276595%;
-  *width: 14.840425531914894%;
-}
-
-.row-fluid .span1 {
-  width: 6.382978723404255%;
-  *width: 6.329787234042553%;
-}
-
-.row-fluid .offset12 {
-  margin-left: 104.25531914893617%;
-  *margin-left: 104.14893617021275%;
-}
-
-.row-fluid .offset12:first-child {
-  margin-left: 102.12765957446808%;
-  *margin-left: 102.02127659574467%;
-}
-
-.row-fluid .offset11 {
-  margin-left: 95.74468085106382%;
-  *margin-left: 95.6382978723404%;
-}
-
-.row-fluid .offset11:first-child {
-  margin-left: 93.61702127659574%;
-  *margin-left: 93.51063829787232%;
-}
-
-.row-fluid .offset10 {
-  margin-left: 87.23404255319149%;
-  *margin-left: 87.12765957446807%;
-}
-
-.row-fluid .offset10:first-child {
-  margin-left: 85.1063829787234%;
-  *margin-left: 84.99999999999999%;
-}
-
-.row-fluid .offset9 {
-  margin-left: 78.72340425531914%;
-  *margin-left: 78.61702127659572%;
-}
-
-.row-fluid .offset9:first-child {
-  margin-left: 76.59574468085106%;
-  *margin-left: 76.48936170212764%;
-}
-
-.row-fluid .offset8 {
-  margin-left: 70.2127659574468%;
-  *margin-left: 70.10638297872339%;
-}
-
-.row-fluid .offset8:first-child {
-  margin-left: 68.08510638297872%;
-  *margin-left: 67.9787234042553%;
-}
-
-.row-fluid .offset7 {
-  margin-left: 61.70212765957446%;
-  *margin-left: 61.59574468085106%;
-}
-
-.row-fluid .offset7:first-child {
-  margin-left: 59.574468085106375%;
-  *margin-left: 59.46808510638297%;
-}
-
-.row-fluid .offset6 {
-  margin-left: 53.191489361702125%;
-  *margin-left: 53.085106382978715%;
-}
-
-.row-fluid .offset6:first-child {
-  margin-left: 51.063829787234035%;
-  *margin-left: 50.95744680851063%;
-}
-
-.row-fluid .offset5 {
-  margin-left: 44.68085106382979%;
-  *margin-left: 44.57446808510638%;
-}
-
-.row-fluid .offset5:first-child {
-  margin-left: 42.5531914893617%;
-  *margin-left: 42.4468085106383%;
-}
-
-.row-fluid .offset4 {
-  margin-left: 36.170212765957444%;
-  *margin-left: 36.06382978723405%;
-}
-
-.row-fluid .offset4:first-child {
-  margin-left: 34.04255319148936%;
-  *margin-left: 33.93617021276596%;
-}
-
-.row-fluid .offset3 {
-  margin-left: 27.659574468085104%;
-  *margin-left: 27.5531914893617%;
-}
-
-.row-fluid .offset3:first-child {
-  margin-left: 25.53191489361702%;
-  *margin-left: 25.425531914893618%;
-}
-
-.row-fluid .offset2 {
-  margin-left: 19.148936170212764%;
-  *margin-left: 19.04255319148936%;
-}
-
-.row-fluid .offset2:first-child {
-  margin-left: 17.02127659574468%;
-  *margin-left: 16.914893617021278%;
-}
-
-.row-fluid .offset1 {
-  margin-left: 10.638297872340425%;
-  *margin-left: 10.53191489361702%;
-}
-
-.row-fluid .offset1:first-child {
-  margin-left: 8.51063829787234%;
-  *margin-left: 8.404255319148938%;
-}
-
-[class*="span"].hide,
-.row-fluid [class*="span"].hide {
-  display: none;
-}
-
-[class*="span"].pull-right,
-.row-fluid [class*="span"].pull-right {
-  float: right;
-}
-
-.container {
-  margin-right: auto;
-  margin-left: auto;
-  *zoom: 1;
-}
-
-.container:before,
-.container:after {
-  display: table;
-  line-height: 0;
-  content: "";
-}
-
-.container:after {
-  clear: both;
-}
-
-.container-fluid {
-  padding-right: 20px;
-  padding-left: 20px;
-  *zoom: 1;
-}
-
-.container-fluid:before,
-.container-fluid:after {
-  display: table;
-  line-height: 0;
-  content: "";
-}
-
-.container-fluid:after {
-  clear: both;
-}
-
-p {
-  margin: 0 0 10px;
-}
-
-.lead {
-  margin-bottom: 20px;
-  font-size: 21px;
-  font-weight: 200;
-  line-height: 30px;
-}
-
-small {
-  font-size: 85%;
-}
-
-strong {
-  font-weight: bold;
-}
-
-em {
-  font-style: italic;
-}
-
-cite {
-  font-style: normal;
-}
-
-.muted {
-  color: #999999;
-}
-
-.text-warning {
-  color: #c09853;
-}
-
-.text-error {
-  color: #b94a48;
-}
-
-.text-info {
-  color: #3a87ad;
-}
-
-.text-success {
-  color: #468847;
-}
-
-h1,
-h2,
-h3,
-h4,
-h5,
-h6 {
-  margin: 10px 0;
-  font-family: inherit;
-  font-weight: bold;
-  line-height: 1;
-  color: inherit;
-  text-rendering: optimizelegibility;
-}
-
-h1 small,
-h2 small,
-h3 small,
-h4 small,
-h5 small,
-h6 small {
-  font-weight: normal;
-  line-height: 1;
-  color: #999999;
-}
-
-h1 {
-  font-size: 36px;
-  line-height: 40px;
-}
-
-h2 {
-  font-size: 30px;
-  line-height: 40px;
-}
-
-h3 {
-  font-size: 24px;
-  line-height: 40px;
-}
-
-h4 {
-  font-size: 18px;
-  line-height: 20px;
-}
-
-h5 {
-  font-size: 14px;
-  line-height: 20px;
-}
-
-h6 {
-  font-size: 12px;
-  line-height: 20px;
-}
-
-h1 small {
-  font-size: 24px;
-}
-
-h2 small {
-  font-size: 18px;
-}
-
-h3 small {
-  font-size: 14px;
-}
-
-h4 small {
-  font-size: 14px;
-}
-
-.page-header {
-  padding-bottom: 9px;
-  margin: 20px 0 30px;
-  border-bottom: 1px solid #eeeeee;
-}
-
-ul,
-ol {
-  padding: 0;
-  margin: 0 0 10px 25px;
-}
-
-ul ul,
-ul ol,
-ol ol,
-ol ul {
-  margin-bottom: 0;
-}
-
-li {
-  line-height: 20px;
-}
-
-ul.unstyled,
-ol.unstyled {
-  margin-left: 0;
-  list-style: none;
-}
-
-dl {
-  margin-bottom: 20px;
-}
-
-dt,
-dd {
-  line-height: 20px;
-}
-
-dt {
-  font-weight: bold;
-}
-
-dd {
-  margin-left: 10px;
-}
-
-.dl-horizontal {
-  *zoom: 1;
-}
-
-.dl-horizontal:before,
-.dl-horizontal:after {
-  display: table;
-  line-height: 0;
-  content: "";
-}
-
-.dl-horizontal:after {
-  clear: both;
-}
-
-.dl-horizontal dt {
-  float: left;
-  width: 160px;
-  overflow: hidden;
-  clear: left;
-  text-align: right;
-  text-overflow: ellipsis;
-  white-space: nowrap;
-}
-
-.dl-horizontal dd {
-  margin-left: 180px;
-}
-
-hr {
-  margin: 20px 0;
-  border: 0;
-  border-top: 1px solid #eeeeee;
-  border-bottom: 1px solid #ffffff;
-}
-
-abbr[title] {
-  cursor: help;
-  border-bottom: 1px dotted #999999;
-}
-
-abbr.initialism {
-  font-size: 90%;
-  text-transform: uppercase;
-}
-
-blockquote {
-  padding: 0 0 0 15px;
-  margin: 0 0 20px;
-  border-left: 5px solid #eeeeee;
-}
-
-blockquote p {
-  margin-bottom: 0;
-  font-size: 16px;
-  font-weight: 300;
-  line-height: 25px;
-}
-
-blockquote small {
-  display: block;
-  line-height: 20px;
-  color: #999999;
-}
-
-blockquote small:before {
-  content: '\2014 \00A0';
-}
-
-blockquote.pull-right {
-  float: right;
-  padding-right: 15px;
-  padding-left: 0;
-  border-right: 5px solid #eeeeee;
-  border-left: 0;
-}
-
-blockquote.pull-right p,
-blockquote.pull-right small {
-  text-align: right;
-}
-
-blockquote.pull-right small:before {
-  content: '';
-}
-
-blockquote.pull-right small:after {
-  content: '\00A0 \2014';
-}
-
-q:before,
-q:after,
-blockquote:before,
-blockquote:after {
-  content: "";
-}
-
-address {
-  display: block;
-  margin-bottom: 20px;
-  font-style: normal;
-  line-height: 20px;
-}
-
-code,
-pre {
-  padding: 0 3px 2px;
-  font-family: Monaco, Menlo, Consolas, "Courier New", monospace;
-  font-size: 12px;
-  color: #333333;
-  -webkit-border-radius: 3px;
-     -moz-border-radius: 3px;
-          border-radius: 3px;
-}
-
-code {
-  padding: 2px 4px;
-  color: #d14;
-  background-color: #f7f7f9;
-  border: 1px solid #e1e1e8;
-}
-
-pre {
-  display: block;
-  padding: 9.5px;
-  margin: 0 0 10px;
-  font-size: 13px;
-  line-height: 20px;
-  word-break: break-all;
-  word-wrap: break-word;
-  white-space: pre;
-  white-space: pre-wrap;
-  background-color: #f5f5f5;
-  border: 1px solid #ccc;
-  border: 1px solid rgba(0, 0, 0, 0.15);
-  -webkit-border-radius: 4px;
-     -moz-border-radius: 4px;
-          border-radius: 4px;
-}
-
-pre.prettyprint {
-  margin-bottom: 20px;
-}
-
-pre code {
-  padding: 0;
-  color: inherit;
-  background-color: transparent;
-  border: 0;
-}
-
-.pre-scrollable {
-  max-height: 340px;
-  overflow-y: scroll;
-}
-
-form {
-  margin: 0 0 20px;
-}
-
-fieldset {
-  padding: 0;
-  margin: 0;
-  border: 0;
-}
-
-legend {
-  display: block;
-  width: 100%;
-  padding: 0;
-  margin-bottom: 20px;
-  font-size: 21px;
-  line-height: 40px;
-  color: #333333;
-  border: 0;
-  border-bottom: 1px solid #e5e5e5;
-}
-
-legend small {
-  font-size: 15px;
-  color: #999999;
-}
-
-label,
-input,
-button,
-select,
-textarea {
-  font-size: 14px;
-  font-weight: normal;
-  line-height: 20px;
-}
-
-input,
-button,
-select,
-textarea {
-  font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
-}
-
-label {
-  display: block;
-  margin-bottom: 5px;
-}
-
-select,
-textarea,
-input[type="text"],
-input[type="password"],
-input[type="datetime"],
-input[type="datetime-local"],
-input[type="date"],
-input[type="month"],
-input[type="time"],
-input[type="week"],
-input[type="number"],
-input[type="email"],
-input[type="url"],
-input[type="search"],
-input[type="tel"],
-input[type="color"],
-.uneditable-input {
-  display: inline-block;
-  height: 20px;
-  padding: 4px 6px;
-  margin-bottom: 9px;
-  font-size: 14px;
-  line-height: 20px;
-  color: #555555;
-  -webkit-border-radius: 3px;
-     -moz-border-radius: 3px;
-          border-radius: 3px;
-}
-
-input,
-textarea,
-.uneditable-input {
-  width: 206px;
-}
-
-textarea {
-  height: auto;
-}
-
-textarea,
-input[type="text"],
-input[type="password"],
-input[type="datetime"],
-input[type="datetime-local"],
-input[type="date"],
-input[type="month"],
-input[type="time"],
-input[type="week"],
-input[type="number"],
-input[type="email"],
-input[type="url"],
-input[type="search"],
-input[type="tel"],
-input[type="color"],
-.uneditable-input {
-  background-color: #ffffff;
-  border: 1px solid #cccccc;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-     -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-          box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-  -webkit-transition: border linear 0.2s, box-shadow linear 0.2s;
-     -moz-transition: border linear 0.2s, box-shadow linear 0.2s;
-       -o-transition: border linear 0.2s, box-shadow linear 0.2s;
-          transition: border linear 0.2s, box-shadow linear 0.2s;
-}
-
-textarea:focus,
-input[type="text"]:focus,
-input[type="password"]:focus,
-input[type="datetime"]:focus,
-input[type="datetime-local"]:focus,
-input[type="date"]:focus,
-input[type="month"]:focus,
-input[type="time"]:focus,
-input[type="week"]:focus,
-input[type="number"]:focus,
-input[type="email"]:focus,
-input[type="url"]:focus,
-input[type="search"]:focus,
-input[type="tel"]:focus,
-input[type="color"]:focus,
-.uneditable-input:focus {
-  border-color: rgba(82, 168, 236, 0.8);
-  outline: 0;
-  outline: thin dotted \9;
-  /* IE6-9 */
-
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(82, 168, 236, 0.6);
-     -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(82, 168, 236, 0.6);
-          box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(82, 168, 236, 0.6);
-}
-
-input[type="radio"],
-input[type="checkbox"] {
-  margin: 4px 0 0;
-  margin-top: 1px \9;
-  *margin-top: 0;
-  line-height: normal;
-  cursor: pointer;
-}
-
-input[type="file"],
-input[type="image"],
-input[type="submit"],
-input[type="reset"],
-input[type="button"],
-input[type="radio"],
-input[type="checkbox"] {
-  width: auto;
-}
-
-select,
-input[type="file"] {
-  height: 30px;
-  /* In IE7, the height of the select element cannot be changed by height, only font-size */
-
-  *margin-top: 4px;
-  /* For IE7, add top margin to align select with labels */
-
-  line-height: 30px;
-}
-
-select {
-  width: 220px;
-  background-color: #ffffff;
-  border: 1px solid #cccccc;
-}
-
-select[multiple],
-select[size] {
-  height: auto;
-}
-
-select:focus,
-input[type="file"]:focus,
-input[type="radio"]:focus,
-input[type="checkbox"]:focus {
-  outline: thin dotted #333;
-  outline: 5px auto -webkit-focus-ring-color;
-  outline-offset: -2px;
-}
-
-.uneditable-input,
-.uneditable-textarea {
-  color: #999999;
-  cursor: not-allowed;
-  background-color: #fcfcfc;
-  border-color: #cccccc;
-  -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.025);
-     -moz-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.025);
-          box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.025);
-}
-
-.uneditable-input {
-  overflow: hidden;
-  white-space: nowrap;
-}
-
-.uneditable-textarea {
-  width: auto;
-  height: auto;
-}
-
-input:-moz-placeholder,
-textarea:-moz-placeholder {
-  color: #999999;
-}
-
-input:-ms-input-placeholder,
-textarea:-ms-input-placeholder {
-  color: #999999;
-}
-
-input::-webkit-input-placeholder,
-textarea::-webkit-input-placeholder {
-  color: #999999;
-}
-
-.radio,
-.checkbox {
-  min-height: 18px;
-  padding-left: 18px;
-}
-
-.radio input[type="radio"],
-.checkbox input[type="checkbox"] {
-  float: left;
-  margin-left: -18px;
-}
-
-.controls > .radio:first-child,
-.controls > .checkbox:first-child {
-  padding-top: 5px;
-}
-
-.radio.inline,
-.checkbox.inline {
-  display: inline-block;
-  padding-top: 5px;
-  margin-bottom: 0;
-  vertical-align: middle;
-}
-
-.radio.inline + .radio.inline,
-.checkbox.inline + .checkbox.inline {
-  margin-left: 10px;
-}
-
-.input-mini {
-  width: 60px;
-}
-
-.input-small {
-  width: 90px;
-}
-
-.input-medium {
-  width: 150px;
-}
-
-.input-large {
-  width: 210px;
-}
-
-.input-xlarge {
-  width: 270px;
-}
-
-.input-xxlarge {
-  width: 530px;
-}
-
-input[class*="span"],
-select[class*="span"],
-textarea[class*="span"],
-.uneditable-input[class*="span"],
-.row-fluid input[class*="span"],
-.row-fluid select[class*="span"],
-.row-fluid textarea[class*="span"],
-.row-fluid .uneditable-input[class*="span"] {
-  float: none;
-  margin-left: 0;
-}
-
-.input-append input[class*="span"],
-.input-append .uneditable-input[class*="span"],
-.input-prepend input[class*="span"],
-.input-prepend .uneditable-input[class*="span"],
-.row-fluid input[class*="span"],
-.row-fluid select[class*="span"],
-.row-fluid textarea[class*="span"],
-.row-fluid .uneditable-input[class*="span"],
-.row-fluid .input-prepend [class*="span"],
-.row-fluid .input-append [class*="span"] {
-  display: inline-block;
-}
-
-input,
-textarea,
-.uneditable-input {
-  margin-left: 0;
-}
-
-.controls-row [class*="span"] + [class*="span"] {
-  margin-left: 20px;
-}
-
-input.span12,
-textarea.span12,
-.uneditable-input.span12 {
-  width: 926px;
-}
-
-input.span11,
-textarea.span11,
-.uneditable-input.span11 {
-  width: 846px;
-}
-
-input.span10,
-textarea.span10,
-.uneditable-input.span10 {
-  width: 766px;
-}
-
-input.span9,
-textarea.span9,
-.uneditable-input.span9 {
-  width: 686px;
-}
-
-input.span8,
-textarea.span8,
-.uneditable-input.span8 {
-  width: 606px;
-}
-
-input.span7,
-textarea.span7,
-.uneditable-input.span7 {
-  width: 526px;
-}
-
-input.span6,
-textarea.span6,
-.uneditable-input.span6 {
-  width: 446px;
-}
-
-input.span5,
-textarea.span5,
-.uneditable-input.span5 {
-  width: 366px;
-}
-
-input.span4,
-textarea.span4,
-.uneditable-input.span4 {
-  width: 286px;
-}
-
-input.span3,
-textarea.span3,
-.uneditable-input.span3 {
-  width: 206px;
-}
-
-input.span2,
-textarea.span2,
-.uneditable-input.span2 {
-  width: 126px;
-}
-
-input.span1,
-textarea.span1,
-.uneditable-input.span1 {
-  width: 46px;
-}
-
-.controls-row {
-  *zoom: 1;
-}
-
-.controls-row:before,
-.controls-row:after {
-  display: table;
-  line-height: 0;
-  content: "";
-}
-
-.controls-row:after {
-  clear: both;
-}
-
-.controls-row [class*="span"] {
-  float: left;
-}
-
-input[disabled],
-select[disabled],
-textarea[disabled],
-input[readonly],
-select[readonly],
-textarea[readonly] {
-  cursor: not-allowed;
-  background-color: #eeeeee;
-}
-
-input[type="radio"][disabled],
-input[type="checkbox"][disabled],
-input[type="radio"][readonly],
-input[type="checkbox"][readonly] {
-  background-color: transparent;
-}
-
-.control-group.warning > label,
-.control-group.warning .help-block,
-.control-group.warning .help-inline {
-  color: #c09853;
-}
-
-.control-group.warning .checkbox,
-.control-group.warning .radio,
-.control-group.warning input,
-.control-group.warning select,
-.control-group.warning textarea {
-  color: #c09853;
-}
-
-.control-group.warning input,
-.control-group.warning select,
-.control-group.warning textarea {
-  border-color: #c09853;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-     -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-          box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-}
-
-.control-group.warning input:focus,
-.control-group.warning select:focus,
-.control-group.warning textarea:focus {
-  border-color: #a47e3c;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #dbc59e;
-     -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #dbc59e;
-          box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #dbc59e;
-}
-
-.control-group.warning .input-prepend .add-on,
-.control-group.warning .input-append .add-on {
-  color: #c09853;
-  background-color: #fcf8e3;
-  border-color: #c09853;
-}
-
-.control-group.error > label,
-.control-group.error .help-block,
-.control-group.error .help-inline {
-  color: #b94a48;
-}
-
-.control-group.error .checkbox,
-.control-group.error .radio,
-.control-group.error input,
-.control-group.error select,
-.control-group.error textarea {
-  color: #b94a48;
-}
-
-.control-group.error input,
-.control-group.error select,
-.control-group.error textarea {
-  border-color: #b94a48;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-     -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-          box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-}
-
-.control-group.error input:focus,
-.control-group.error select:focus,
-.control-group.error textarea:focus {
-  border-color: #953b39;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #d59392;
-     -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #d59392;
-          box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #d59392;
-}
-
-.control-group.error .input-prepend .add-on,
-.control-group.error .input-append .add-on {
-  color: #b94a48;
-  background-color: #f2dede;
-  border-color: #b94a48;
-}
-
-.control-group.success > label,
-.control-group.success .help-block,
-.control-group.success .help-inline {
-  color: #468847;
-}
-
-.control-group.success .checkbox,
-.control-group.success .radio,
-.control-group.success input,
-.control-group.success select,
-.control-group.success textarea {
-  color: #468847;
-}
-
-.control-group.success input,
-.control-group.success select,
-.control-group.success textarea {
-  border-color: #468847;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-     -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-          box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-}
-
-.control-group.success input:focus,
-.control-group.success select:focus,
-.control-group.success textarea:focus {
-  border-color: #356635;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7aba7b;
-     -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7aba7b;
-          box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7aba7b;
-}
-
-.control-group.success .input-prepend .add-on,
-.control-group.success .input-append .add-on {
-  color: #468847;
-  background-color: #dff0d8;
-  border-color: #468847;
-}
-
-.control-group.info > label,
-.control-group.info .help-block,
-.control-group.info .help-inline {
-  color: #3a87ad;
-}
-
-.control-group.info .checkbox,
-.control-group.info .radio,
-.control-group.info input,
-.control-group.info select,
-.control-group.info textarea {
-  color: #3a87ad;
-}
-
-.control-group.info input,
-.control-group.info select,
-.control-group.info textarea {
-  border-color: #3a87ad;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-     -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-          box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-}
-
-.control-group.info input:focus,
-.control-group.info select:focus,
-.control-group.info textarea:focus {
-  border-color: #2d6987;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7ab5d3;
-     -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7ab5d3;
-          box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7ab5d3;
-}
-
-.control-group.info .input-prepend .add-on,
-.control-group.info .input-append .add-on {
-  color: #3a87ad;
-  background-color: #d9edf7;
-  border-color: #3a87ad;
-}
-
-input:focus:required:invalid,
-textarea:focus:required:invalid,
-select:focus:required:invalid {
-  color: #b94a48;
-  border-color: #ee5f5b;
-}
-
-input:focus:required:invalid:focus,
-textarea:focus:required:invalid:focus,
-select:focus:required:invalid:focus {
-  border-color: #e9322d;
-  -webkit-box-shadow: 0 0 6px #f8b9b7;
-     -moz-box-shadow: 0 0 6px #f8b9b7;
-          box-shadow: 0 0 6px #f8b9b7;
-}
-
-.form-actions {
-  padding: 19px 20px 20px;
-  margin-top: 20px;
-  margin-bottom: 20px;
-  background-color: #f5f5f5;
-  border-top: 1px solid #e5e5e5;
-  *zoom: 1;
-}
-
-.form-actions:before,
-.form-actions:after {
-  display: table;
-  line-height: 0;
-  content: "";
-}
-
-.form-actions:after {
-  clear: both;
-}
-
-.help-block,
-.help-inline {
-  color: #595959;
-}
-
-.help-block {
-  display: block;
-  margin-bottom: 10px;
-}
-
-.help-inline {
-  display: inline-block;
-  *display: inline;
-  padding-left: 5px;
-  vertical-align: middle;
-  *zoom: 1;
-}
-
-.input-append,
-.input-prepend {
-  margin-bottom: 5px;
-  font-size: 0;
-  white-space: nowrap;
-}
-
-.input-append input,
-.input-prepend input,
-.input-append select,
-.input-prepend select,
-.input-append .uneditable-input,
-.input-prepend .uneditable-input {
-  position: relative;
-  margin-bottom: 0;
-  *margin-left: 0;
-  font-size: 14px;
-  vertical-align: top;
-  -webkit-border-radius: 0 3px 3px 0;
-     -moz-border-radius: 0 3px 3px 0;
-          border-radius: 0 3px 3px 0;
-}
-
-.input-append input:focus,
-.input-prepend input:focus,
-.input-append select:focus,
-.input-prepend select:focus,
-.input-append .uneditable-input:focus,
-.input-prepend .uneditable-input:focus {
-  z-index: 2;
-}
-
-.input-append .add-on,
-.input-prepend .add-on {
-  display: inline-block;
-  width: auto;
-  height: 20px;
-  min-width: 16px;
-  padding: 4px 5px;
-  font-size: 14px;
-  font-weight: normal;
-  line-height: 20px;
-  text-align: center;
-  text-shadow: 0 1px 0 #ffffff;
-  background-color: #eeeeee;
-  border: 1px solid #ccc;
-}
-
-.input-append .add-on,
-.input-prepend .add-on,
-.input-append .btn,
-.input-prepend .btn {
-  vertical-align: top;
-  -webkit-border-radius: 0;
-     -moz-border-radius: 0;
-          border-radius: 0;
-}
-
-.input-append .active,
-.input-prepend .active {
-  background-color: #a9dba9;
-  border-color: #46a546;
-}
-
-.input-prepend .add-on,
-.input-prepend .btn {
-  margin-right: -1px;
-}
-
-.input-prepend .add-on:first-child,
-.input-prepend .btn:first-child {
-  -webkit-border-radius: 3px 0 0 3px;
-     -moz-border-radius: 3px 0 0 3px;
-          border-radius: 3px 0 0 3px;
-}
-
-.input-append input,
-.input-append select,
-.input-append .uneditable-input {
-  -webkit-border-radius: 3px 0 0 3px;
-     -moz-border-radius: 3px 0 0 3px;
-          border-radius: 3px 0 0 3px;
-}
-
-.input-append .add-on,
-.input-append .btn {
-  margin-left: -1px;
-}
-
-.input-append .add-on:last-child,
-.input-append .btn:last-child {
-  -webkit-border-radius: 0 3px 3px 0;
-     -moz-border-radius: 0 3px 3px 0;
-          border-radius: 0 3px 3px 0;
-}
-
-.input-prepend.input-append input,
-.input-prepend.input-append select,
-.input-prepend.input-append .uneditable-input {
-  -webkit-border-radius: 0;
-     -moz-border-radius: 0;
-          border-radius: 0;
-}
-
-.input-prepend.input-append .add-on:first-child,
-.input-prepend.input-append .btn:first-child {
-  margin-right: -1px;
-  -webkit-border-radius: 3px 0 0 3px;
-     -moz-border-radius: 3px 0 0 3px;
-          border-radius: 3px 0 0 3px;
-}
-
-.input-prepend.input-append .add-on:last-child,
-.input-prepend.input-append .btn:last-child {
-  margin-left: -1px;
-  -webkit-border-radius: 0 3px 3px 0;
-     -moz-border-radius: 0 3px 3px 0;
-          border-radius: 0 3px 3px 0;
-}
-
-input.search-query {
-  padding-right: 14px;
-  padding-right: 4px \9;
-  padding-left: 14px;
-  padding-left: 4px \9;
-  /* IE7-8 doesn't have border-radius, so don't indent the padding */
-
-  margin-bottom: 0;
-  -webkit-border-radius: 15px;
-     -moz-border-radius: 15px;
-          border-radius: 15px;
-}
-
-/* Allow for input prepend/append in search forms */
-
-.form-search .input-append .search-query,
-.form-search .input-prepend .search-query {
-  -webkit-border-radius: 0;
-     -moz-border-radius: 0;
-          border-radius: 0;
-}
-
-.form-search .input-append .search-query {
-  -webkit-border-radius: 14px 0 0 14px;
-     -moz-border-radius: 14px 0 0 14px;
-          border-radius: 14px 0 0 14px;
-}
-
-.form-search .input-append .btn {
-  -webkit-border-radius: 0 14px 14px 0;
-     -moz-border-radius: 0 14px 14px 0;
-          border-radius: 0 14px 14px 0;
-}
-
-.form-search .input-prepend .search-query {
-  -webkit-border-radius: 0 14px 14px 0;
-     -moz-border-radius: 0 14px 14px 0;
-          border-radius: 0 14px 14px 0;
-}
-
-.form-search .input-prepend .btn {
-  -webkit-border-radius: 14px 0 0 14px;
-     -moz-border-radius: 14px 0 0 14px;
-          border-radius: 14px 0 0 14px;
-}
-
-.form-search input,
-.form-inline input,
-.form-horizontal input,
-.form-search textarea,
-.form-inline textarea,
-.form-horizontal textarea,
-.form-search select,
-.form-inline select,
-.form-horizontal select,
-.form-search .help-inline,
-.form-inline .help-inline,
-.form-horizontal .help-inline,
-.form-search .uneditable-input,
-.form-inline .uneditable-input,
-.form-horizontal .uneditable-input,
-.form-search .input-prepend,
-.form-inline .input-prepend,
-.form-horizontal .input-prepend,
-.form-search .input-append,
-.form-inline .input-append,
-.form-horizontal .input-append {
-  display: inline-block;
-  *display: inline;
-  margin-bottom: 0;
-  vertical-align: middle;
-  *zoom: 1;
-}
-
-.form-search .hide,
-.form-inline .hide,
-.form-horizontal .hide {
-  display: none;
-}
-
-.form-search label,
-.form-inline label,
-.form-search .btn-group,
-.form-inline .btn-group {
-  display: inline-block;
-}
-
-.form-search .input-append,
-.form-inline .input-append,
-.form-search .input-prepend,
-.form-inline .input-prepend {
-  margin-bottom: 0;
-}
-
-.form-search .radio,
-.form-search .checkbox,
-.form-inline .radio,
-.form-inline .checkbox {
-  padding-left: 0;
-  margin-bottom: 0;
-  vertical-align: middle;
-}
-
-.form-search .radio input[type="radio"],
-.form-search .checkbox input[type="checkbox"],
-.form-inline .radio input[type="radio"],
-.form-inline .checkbox input[type="checkbox"] {
-  float: left;
-  margin-right: 3px;
-  margin-left: 0;
-}
-
-.control-group {
-  margin-bottom: 10px;
-}
-
-legend + .control-group {
-  margin-top: 20px;
-  -webkit-margin-top-collapse: separate;
-}
-
-.form-horizontal .control-group {
-  margin-bottom: 20px;
-  *zoom: 1;
-}
-
-.form-horizontal .control-group:before,
-.form-horizontal .control-group:after {
-  display: table;
-  line-height: 0;
-  content: "";
-}
-
-.form-horizontal .control-group:after {
-  clear: both;
-}
-
-.form-horizontal .control-label {
-  float: left;
-  width: 160px;
-  padding-top: 5px;
-  text-align: right;
-}
-
-.form-horizontal .controls {
-  *display: inline-block;
-  *padding-left: 20px;
-  margin-left: 180px;
-  *margin-left: 0;
-}
-
-.form-horizontal .controls:first-child {
-  *padding-left: 180px;
-}
-
-.form-horizontal .help-block {
-  margin-bottom: 0;
-}
-
-.form-horizontal input + .help-block,
-.form-horizontal select + .help-block,
-.form-horizontal textarea + .help-block {
-  margin-top: 10px;
-}
-
-.form-horizontal .form-actions {
-  padding-left: 180px;
-}
-
-table {
-  max-width: 100%;
-  background-color: transparent;
-  border-collapse: collapse;
-  border-spacing: 0;
-}
-
-.table {
-  width: 100%;
-  margin-bottom: 20px;
-}
-
-.table th,
-.table td {
-  padding: 8px;
-  line-height: 20px;
-  text-align: left;
-  vertical-align: top;
-  border-top: 1px solid #dddddd;
-}
-
-.table th {
-  font-weight: bold;
-}
-
-.table thead th {
-  vertical-align: bottom;
-}
-
-.table caption + thead tr:first-child th,
-.table caption + thead tr:first-child td,
-.table colgroup + thead tr:first-child th,
-.table colgroup + thead tr:first-child td,
-.table thead:first-child tr:first-child th,
-.table thead:first-child tr:first-child td {
-  border-top: 0;
-}
-
-.table tbody + tbody {
-  border-top: 2px solid #dddddd;
-}
-
-.table-condensed th,
-.table-condensed td {
-  padding: 4px 5px;
-}
-
-.table-bordered {
-  border: 1px solid #dddddd;
-  border-collapse: separate;
-  *border-collapse: collapse;
-  border-left: 0;
-  -webkit-border-radius: 4px;
-     -moz-border-radius: 4px;
-          border-radius: 4px;
-}
-
-.table-bordered th,
-.table-bordered td {
-  border-left: 1px solid #dddddd;
-}
-
-.table-bordered caption + thead tr:first-child th,
-.table-bordered caption + tbody tr:first-child th,
-.table-bordered caption + tbody tr:first-child td,
-.table-bordered colgroup + thead tr:first-child th,
-.table-bordered colgroup + tbody tr:first-child th,
-.table-bordered colgroup + tbody tr:first-child td,
-.table-bordered thead:first-child tr:first-child th,
-.table-bordered tbody:first-child tr:first-child th,
-.table-bordered tbody:first-child tr:first-child td {
-  border-top: 0;
-}
-
-.table-bordered thead:first-child tr:first-child th:first-child,
-.table-bordered tbody:first-child tr:first-child td:first-child {
-  -webkit-border-top-left-radius: 4px;
-          border-top-left-radius: 4px;
-  -moz-border-radius-topleft: 4px;
-}
-
-.table-bordered thead:first-child tr:first-child th:last-child,
-.table-bordered tbody:first-child tr:first-child td:last-child {
-  -webkit-border-top-right-radius: 4px;
-          border-top-right-radius: 4px;
-  -moz-border-radius-topright: 4px;
-}
-
-.table-bordered thead:last-child tr:last-child th:first-child,
-.table-bordered tbody:last-child tr:last-child td:first-child,
-.table-bordered tfoot:last-child tr:last-child td:first-child {
-  -webkit-border-radius: 0 0 0 4px;
-     -moz-border-radius: 0 0 0 4px;
-          border-radius: 0 0 0 4px;
-  -webkit-border-bottom-left-radius: 4px;
-          border-bottom-left-radius: 4px;
-  -moz-border-radius-bottomleft: 4px;
-}
-
-.table-bordered thead:last-child tr:last-child th:last-child,
-.table-bordered tbody:last-child tr:last-child td:last-child,
-.table-bordered tfoot:last-child tr:last-child td:last-child {
-  -webkit-border-bottom-right-radius: 4px;
-          border-bottom-right-radius: 4px;
-  -moz-border-radius-bottomright: 4px;
-}
-
-.table-bordered caption + thead tr:first-child th:first-child,
-.table-bordered caption + tbody tr:first-child td:first-child,
-.table-bordered colgroup + thead tr:first-child th:first-child,
-.table-bordered colgroup + tbody tr:first-child td:first-child {
-  -webkit-border-top-left-radius: 4px;
-          border-top-left-radius: 4px;
-  -moz-border-radius-topleft: 4px;
-}
-
-.table-bordered caption + thead tr:first-child th:last-child,
-.table-bordered caption + tbody tr:first-child td:last-child,
-.table-bordered colgroup + thead tr:first-child th:last-child,
-.table-bordered colgroup + tbody tr:first-child td:last-child {
-  -webkit-border-top-right-radius: 4px;
-          border-top-right-radius: 4px;
-  -moz-border-radius-topleft: 4px;
-}
-
-.table-striped tbody tr:nth-child(odd) td,
-.table-striped tbody tr:nth-child(odd) th {
-  background-color: #f9f9f9;
-}
-
-.table-hover tbody tr:hover td,
-.table-hover tbody tr:hover th {
-  background-color: #f5f5f5;
-}
-
-table [class*=span],
-.row-fluid table [class*=span] {
-  display: table-cell;
-  float: none;
-  margin-left: 0;
-}
-
-.table .span1 {
-  float: none;
-  width: 44px;
-  margin-left: 0;
-}
-
-.table .span2 {
-  float: none;
-  width: 124px;
-  margin-left: 0;
-}
-
-.table .span3 {
-  float: none;
-  width: 204px;
-  margin-left: 0;
-}
-
-.table .span4 {
-  float: none;
-  width: 284px;
-  margin-left: 0;
-}
-
-.table .span5 {
-  float: none;
-  width: 364px;
-  margin-left: 0;
-}
-
-.table .span6 {
-  float: none;
-  width: 444px;
-  margin-left: 0;
-}
-
-.table .span7 {
-  float: none;
-  width: 524px;
-  margin-left: 0;
-}
-
-.table .span8 {
-  float: none;
-  width: 604px;
-  margin-left: 0;
-}
-
-.table .span9 {
-  float: none;
-  width: 684px;
-  margin-left: 0;
-}
-
-.table .span10 {
-  float: none;
-  width: 764px;
-  margin-left: 0;
-}
-
-.table .span11 {
-  float: none;
-  width: 844px;
-  margin-left: 0;
-}
-
-.table .span12 {
-  float: none;
-  width: 924px;
-  margin-left: 0;
-}
-
-.table .span13 {
-  float: none;
-  width: 1004px;
-  margin-left: 0;
-}
-
-.table .span14 {
-  float: none;
-  width: 1084px;
-  margin-left: 0;
-}
-
-.table .span15 {
-  float: none;
-  width: 1164px;
-  margin-left: 0;
-}
-
-.table .span16 {
-  float: none;
-  width: 1244px;
-  margin-left: 0;
-}
-
-.table .span17 {
-  float: none;
-  width: 1324px;
-  margin-left: 0;
-}
-
-.table .span18 {
-  float: none;
-  width: 1404px;
-  margin-left: 0;
-}
-
-.table .span19 {
-  float: none;
-  width: 1484px;
-  margin-left: 0;
-}
-
-.table .span20 {
-  float: none;
-  width: 1564px;
-  margin-left: 0;
-}
-
-.table .span21 {
-  float: none;
-  width: 1644px;
-  margin-left: 0;
-}
-
-.table .span22 {
-  float: none;
-  width: 1724px;
-  margin-left: 0;
-}
-
-.table .span23 {
-  float: none;
-  width: 1804px;
-  margin-left: 0;
-}
-
-.table .span24 {
-  float: none;
-  width: 1884px;
-  margin-left: 0;
-}
-
-.table tbody tr.success td {
-  background-color: #dff0d8;
-}
-
-.table tbody tr.error td {
-  background-color: #f2dede;
-}
-
-.table tbody tr.warning td {
-  background-color: #fcf8e3;
-}
-
-.table tbody tr.info td {
-  background-color: #d9edf7;
-}
-
-.table-hover tbody tr.success:hover td {
-  background-color: #d0e9c6;
-}
-
-.table-hover tbody tr.error:hover td {
-  background-color: #ebcccc;
-}
-
-.table-hover tbody tr.warning:hover td {
-  background-color: #faf2cc;
-}
-
-.table-hover tbody tr.info:hover td {
-  background-color: #c4e3f3;
-}
-
-/******************
- * Commented out due to adding dependency on FontAwesome-2.0
- * which provides scalable icons for Ambari. FontAwesome explicitly
- * mentions about commenting out Twitter Bootstrap's sprite.less so
- * that there are no issues. Since bootstrap.css has the sprite.less
- * compiled into it, we are commenting out the section contributed by
- * sprite.less.
-  
-
-[class^="icon-"],
-[class*=" icon-"] {
-  display: inline-block;
-  width: 14px;
-  height: 14px;
-  margin-top: 1px;
-  *margin-right: .3em;
-  line-height: 14px;
-  vertical-align: text-top;
-  background-image: url("../img/glyphicons-halflings.png");
-  background-position: 14px 14px;
-  background-repeat: no-repeat;
-}
-
-/ * White icons with optional class, or on hover/active states of certain elements * /
-
-.icon-white,
-.nav-tabs > .active > a > [class^="icon-"],
-.nav-tabs > .active > a > [class*=" icon-"],
-.nav-pills > .active > a > [class^="icon-"],
-.nav-pills > .active > a > [class*=" icon-"],
-.nav-list > .active > a > [class^="icon-"],
-.nav-list > .active > a > [class*=" icon-"],
-.navbar-inverse .nav > .active > a > [class^="icon-"],
-.navbar-inverse .nav > .active > a > [class*=" icon-"],
-.dropdown-menu > li > a:hover > [class^="icon-"],
-.dropdown-menu > li > a:hover > [class*=" icon-"],
-.dropdown-menu > .active > a > [class^="icon-"],
-.dropdown-menu > .active > a > [class*=" icon-"] {
-  background-image: url("../img/glyphicons-halflings-white.png");
-}
-
-.icon-glass {
-  background-position: 0      0;
-}
-
-.icon-music {
-  background-position: -24px 0;
-}
-
-.icon-search {
-  background-position: -48px 0;
-}
-
-.icon-envelope {
-  background-position: -72px 0;
-}
-
-.icon-heart {
-  background-position: -96px 0;
-}
-
-.icon-star {
-  background-position: -120px 0;
-}
-
-.icon-star-empty {
-  background-position: -144px 0;
-}
-
-.icon-user {
-  background-position: -168px 0;
-}
-
-.icon-film {
-  background-position: -192px 0;
-}
-
-.icon-th-large {
-  background-position: -216px 0;
-}
-
-.icon-th {
-  background-position: -240px 0;
-}
-
-.icon-th-list {
-  background-position: -264px 0;
-}
-
-.icon-ok {
-  background-position: -288px 0;
-}
-
-.icon-remove {
-  background-position: -312px 0;
-}
-
-.icon-zoom-in {
-  background-position: -336px 0;
-}
-
-.icon-zoom-out {
-  background-position: -360px 0;
-}
-
-.icon-off {
-  background-position: -384px 0;
-}
-
-.icon-signal {
-  background-position: -408px 0;
-}
-
-.icon-cog {
-  background-position: -432px 0;
-}
-
-.icon-trash {
-  background-position: -456px 0;
-}
-
-.icon-home {
-  background-position: 0 -24px;
-}
-
-.icon-file {
-  background-position: -24px -24px;
-}
-
-.icon-time {
-  background-position: -48px -24px;
-}
-
-.icon-road {
-  background-position: -72px -24px;
-}
-
-.icon-download-alt {
-  background-position: -96px -24px;
-}
-
-.icon-download {
-  background-position: -120px -24px;
-}
-
-.icon-upload {
-  background-position: -144px -24px;
-}
-
-.icon-inbox {
-  background-position: -168px -24px;
-}
-
-.icon-play-circle {
-  background-position: -192px -24px;
-}
-
-.icon-repeat {
-  background-position: -216px -24px;
-}
-
-.icon-refresh {
-  background-position: -240px -24px;
-}
-
-.icon-list-alt {
-  background-position: -264px -24px;
-}
-
-.icon-lock {
-  background-position: -287px -24px;
-}
-
-.icon-flag {
-  background-position: -312px -24px;
-}
-
-.icon-headphones {
-  background-position: -336px -24px;
-}
-
-.icon-volume-off {
-  background-position: -360px -24px;
-}
-
-.icon-volume-down {
-  background-position: -384px -24px;
-}
-
-.icon-volume-up {
-  background-position: -408px -24px;
-}
-
-.icon-qrcode {
-  background-position: -432px -24px;
-}
-
-.icon-barcode {
-  background-position: -456px -24px;
-}
-
-.icon-tag {
-  background-position: 0 -48px;
-}
-
-.icon-tags {
-  background-position: -25px -48px;
-}
-
-.icon-book {
-  background-position: -48px -48px;
-}
-
-.icon-bookmark {
-  background-position: -72px -48px;
-}
-
-.icon-print {
-  background-position: -96px -48px;
-}
-
-.icon-camera {
-  background-position: -120px -48px;
-}
-
-.icon-font {
-  background-position: -144px -48px;
-}
-
-.icon-bold {
-  background-position: -167px -48px;
-}
-
-.icon-italic {
-  background-position: -192px -48px;
-}
-
-.icon-text-height {
-  background-position: -216px -48px;
-}
-
-.icon-text-width {
-  background-position: -240px -48px;
-}
-
-.icon-align-left {
-  background-position: -264px -48px;
-}
-
-.icon-align-center {
-  background-position: -288px -48px;
-}
-
-.icon-align-right {
-  background-position: -312px -48px;
-}
-
-.icon-align-justify {
-  background-position: -336px -48px;
-}
-
-.icon-list {
-  background-position: -360px -48px;
-}
-
-.icon-indent-left {
-  background-position: -384px -48px;
-}
-
-.icon-indent-right {
-  background-position: -408px -48px;
-}
-
-.icon-facetime-video {
-  background-position: -432px -48px;
-}
-
-.icon-picture {
-  background-position: -456px -48px;
-}
-
-.icon-pencil {
-  background-position: 0 -72px;
-}
-
-.icon-map-marker {
-  background-position: -24px -72px;
-}
-
-.icon-adjust {
-  background-position: -48px -72px;
-}
-
-.icon-tint {
-  background-position: -72px -72px;
-}
-
-.icon-edit {
-  background-position: -96px -72px;
-}
-
-.icon-share {
-  background-position: -120px -72px;
-}
-
-.icon-check {
-  background-position: -144px -72px;
-}
-
-.icon-move {
-  background-position: -168px -72px;
-}
-
-.icon-step-backward {
-  background-position: -192px -72px;
-}
-
-.icon-fast-backward {
-  background-position: -216px -72px;
-}
-
-.icon-backward {
-  background-position: -240px -72px;
-}
-
-.icon-play {
-  background-position: -264px -72px;
-}
-
-.icon-pause {
-  background-position: -288px -72px;
-}
-
-.icon-stop {
-  background-position: -312px -72px;
-}
-
-.icon-forward {
-  background-position: -336px -72px;
-}
-
-.icon-fast-forward {
-  background-position: -360px -72px;
-}
-
-.icon-step-forward {
-  background-position: -384px -72px;
-}
-
-.icon-eject {
-  background-position: -408px -72px;
-}
-
-.icon-chevron-left {
-  background-position: -432px -72px;
-}
-
-.icon-chevron-right {
-  background-position: -456px -72px;
-}
-
-.icon-plus-sign {
-  background-position: 0 -96px;
-}
-
-.icon-minus-sign {
-  background-position: -24px -96px;
-}
-
-.icon-remove-sign {
-  background-position: -48px -96px;
-}
-
-.icon-ok-sign {
-  background-position: -72px -96px;
-}
-
-.icon-question-sign {
-  background-position: -96px -96px;
-}
-
-.icon-info-sign {
-  background-position: -120px -96px;
-}
-
-.icon-screenshot {
-  background-position: -144px -96px;
-}
-
-.icon-remove-circle {
-  background-position: -168px -96px;
-}
-
-.icon-ok-circle {
-  background-position: -192px -96px;
-}
-
-.icon-ban-circle {
-  background-position: -216px -96px;
-}
-
-.icon-arrow-left {
-  background-position: -240px -96px;
-}
-
-.icon-arrow-right {
-  background-position: -264px -96px;
-}
-
-.icon-arrow-up {
-  background-position: -289px -96px;
-}
-
-.icon-arrow-down {
-  background-position: -312px -96px;
-}
-
-.icon-share-alt {
-  background-position: -336px -96px;
-}
-
-.icon-resize-full {
-  background-position: -360px -96px;
-}
-
-.icon-resize-small {
-  background-position: -384px -96px;
-}
-
-.icon-plus {
-  background-position: -408px -96px;
-}
-
-.icon-minus {
-  background-position: -433px -96px;
-}
-
-.icon-asterisk {
-  background-position: -456px -96px;
-}
-
-.icon-exclamation-sign {
-  background-position: 0 -120px;
-}
-
-.icon-gift {
-  background-position: -24px -120px;
-}
-
-.icon-leaf {
-  background-position: -48px -120px;
-}
-
-.icon-fire {
-  background-position: -72px -120px;
-}
-
-.icon-eye-open {
-  background-position: -96px -120px;
-}
-
-.icon-eye-close {
-  background-position: -120px -120px;
-}
-
-.icon-warning-sign {
-  background-position: -144px -120px;
-}
-
-.icon-plane {
-  background-position: -168px -120px;
-}
-
-.icon-calendar {
-  background-position: -192px -120px;
-}
-
-.icon-random {
-  width: 16px;
-  background-position: -216px -120px;
-}
-
-.icon-comment {
-  background-position: -240px -120px;
-}
-
-.icon-magnet {
-  background-position: -264px -120px;
-}
-
-.icon-chevron-up {
-  background-position: -288px -120px;
-}
-
-.icon-chevron-down {
-  background-position: -313px -119px;
-}
-
-.icon-retweet {
-  background-position: -336px -120px;
-}
-
-.icon-shopping-cart {
-  background-position: -360px -120px;
-}
-
-.icon-folder-close {
-  background-position: -384px -120px;
-}
-
-.icon-folder-open {
-  width: 16px;
-  background-position: -408px -120px;
-}
-
-.icon-resize-vertical {
-  background-position: -432px -119px;
-}
-
-.icon-resize-horizontal {
-  background-position: -456px -118px;
-}
-
-.icon-hdd {
-  background-position: 0 -144px;
-}
-
-.icon-bullhorn {
-  background-position: -24px -144px;
-}
-
-.icon-bell {
-  background-position: -48px -144px;
-}
-
-.icon-certificate {
-  background-position: -72px -144px;
-}
-
-.icon-thumbs-up {
-  background-position: -96px -144px;
-}
-
-.icon-thumbs-down {
-  background-position: -120px -144px;
-}
-
-.icon-hand-right {
-  background-position: -144px -144px;
-}
-
-.icon-hand-left {
-  background-position: -168px -144px;
-}
-
-.icon-hand-up {
-  background-position: -192px -144px;
-}
-
-.icon-hand-down {
-  background-position: -216px -144px;
-}
-
-.icon-circle-arrow-right {
-  background-position: -240px -144px;
-}
-
-.icon-circle-arrow-left {
-  background-position: -264px -144px;
-}
-
-.icon-circle-arrow-up {
-  background-position: -288px -144px;
-}
-
-.icon-circle-arrow-down {
-  background-position: -312px -144px;
-}
-
-.icon-globe {
-  background-position: -336px -144px;
-}
-
-.icon-wrench {
-  background-position: -360px -144px;
-}
-
-.icon-tasks {
-  background-position: -384px -144px;
-}
-
-.icon-filter {
-  background-position: -408px -144px;
-}
-
-.icon-briefcase {
-  background-position: -432px -144px;
-}
-
-.icon-fullscreen {
-  background-position: -456px -144px;
-} */
-
-.dropup,
-.dropdown {
-  position: relative;
-}
-
-.dropdown-toggle {
-  *margin-bottom: -3px;
-}
-
-.dropdown-toggle:active,
-.open .dropdown-toggle {
-  outline: 0;
-}
-
-.caret {
-  display: inline-block;
-  width: 0;
-  height: 0;
-  vertical-align: top;
-  border-top: 4px solid #000000;
-  border-right: 4px solid transparent;
-  border-left: 4px solid transparent;
-  content: "";
-}
-
-.dropdown .caret {
-  margin-top: 8px;
-  margin-left: 2px;
-}
-
-.dropdown-menu {
-  position: absolute;
-  top: 100%;
-  left: 0;
-  z-index: 1000;
-  display: none;
-  float: left;
-  min-width: 160px;
-  padding: 5px 0;
-  margin: 2px 0 0;
-  list-style: none;
-  background-color: #ffffff;
-  border: 1px solid #ccc;
-  border: 1px solid rgba(0, 0, 0, 0.2);
-  *border-right-width: 2px;
-  *border-bottom-width: 2px;
-  -webkit-border-radius: 6px;
-     -moz-border-radius: 6px;
-          border-radius: 6px;
-  -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
-     -moz-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
-          box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
-  -webkit-background-clip: padding-box;
-     -moz-background-clip: padding;
-          background-clip: padding-box;
-}
-
-.dropdown-menu.pull-right {
-  right: 0;
-  left: auto;
-}
-
-.dropdown-menu .divider {
-  *width: 100%;
-  height: 1px;
-  margin: 9px 1px;
-  *margin: -5px 0 5px;
-  overflow: hidden;
-  background-color: #e5e5e5;
-  border-bottom: 1px solid #ffffff;
-}
-
-.dropdown-menu a {
-  display: block;
-  padding: 3px 20px;
-  clear: both;
-  font-weight: normal;
-  line-height: 20px;
-  color: #333333;
-  white-space: nowrap;
-}
-
-.dropdown-menu li > a:hover,
-.dropdown-menu li > a:focus,
-.dropdown-submenu:hover > a {
-  color: #ffffff;
-  text-decoration: none;
-  background-color: #0088cc;
-  background-color: #0081c2;
-  background-image: -moz-linear-gradient(top, #0088cc, #0077b3);
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#0088cc), to(#0077b3));
-  background-image: -webkit-linear-gradient(top, #0088cc, #0077b3);
-  background-image: -o-linear-gradient(top, #0088cc, #0077b3);
-  background-image: linear-gradient(to bottom, #0088cc, #0077b3);
-  background-repeat: repeat-x;
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#ff0088cc', endColorstr='#ff0077b3', GradientType=0);
-}
-
-.dropdown-menu .active > a,
-.dropdown-menu .active > a:hover {
-  color: #ffffff;
-  text-decoration: none;
-  background-color: #0088cc;
-  background-color: #0081c2;
-  background-image: linear-gradient(to bottom, #0088cc, #0077b3);
-  background-image: -moz-linear-gradient(top, #0088cc, #0077b3);
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#0088cc), to(#0077b3));
-  background-image: -webkit-linear-gradient(top, #0088cc, #0077b3);
-  background-image: -o-linear-gradient(top, #0088cc, #0077b3);
-  background-repeat: repeat-x;
-  outline: 0;
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#ff0088cc', endColorstr='#ff0077b3', GradientType=0);
-}
-
-.dropdown-menu .disabled > a,
-.dropdown-menu .disabled > a:hover {
-  color: #999999;
-}
-
-.dropdown-menu .disabled > a:hover {
-  text-decoration: none;
-  cursor: default;
-  background-color: transparent;
-}
-
-.open {
-  *z-index: 1000;
-}
-
-.open > .dropdown-menu {
-  display: block;
-}
-
-.pull-right > .dropdown-menu {
-  right: 0;
-  left: auto;
-}
-
-.dropup .caret,
-.navbar-fixed-bottom .dropdown .caret {
-  border-top: 0;
-  border-bottom: 4px solid #000000;
-  content: "";
-}
-
-.dropup .dropdown-menu,
-.navbar-fixed-bottom .dropdown .dropdown-menu {
-  top: auto;
-  bottom: 100%;
-  margin-bottom: 1px;
-}
-
-.dropdown-submenu {
-  position: relative;
-}
-
-.dropdown-submenu > .dropdown-menu {
-  top: 0;
-  left: 100%;
-  margin-top: -6px;
-  margin-left: -1px;
-  -webkit-border-radius: 0 6px 6px 6px;
-     -moz-border-radius: 0 6px 6px 6px;
-          border-radius: 0 6px 6px 6px;
-}
-
-.dropdown-submenu:hover > .dropdown-menu {
-  display: block;
-}
-
-.dropdown-submenu > a:after {
-  display: block;
-  float: right;
-  width: 0;
-  height: 0;
-  margin-top: 5px;
-  margin-right: -10px;
-  border-color: transparent;
-  border-left-color: #cccccc;
-  border-style: solid;
-  border-width: 5px 0 5px 5px;
-  content: " ";
-}
-
-.dropdown-submenu:hover > a:after {
-  border-left-color: #ffffff;
-}
-
-.dropdown .dropdown-menu .nav-header {
-  padding-right: 20px;
-  padding-left: 20px;
-}
-
-.typeahead {
-  margin-top: 2px;
-  -webkit-border-radius: 4px;
-     -moz-border-radius: 4px;
-          border-radius: 4px;
-}
-
-.well {
-  min-height: 20px;
-  padding: 19px;
-  margin-bottom: 20px;
-  background-color: #f5f5f5;
-  border: 1px solid #e3e3e3;
-  -webkit-border-radius: 4px;
-     -moz-border-radius: 4px;
-          border-radius: 4px;
-  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);
-     -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);
-          box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);
-}
-
-.well blockquote {
-  border-color: #ddd;
-  border-color: rgba(0, 0, 0, 0.15);
-}
-
-.well-large {
-  padding: 24px;
-  -webkit-border-radius: 6px;
-     -moz-border-radius: 6px;
-          border-radius: 6px;
-}
-
-.well-small {
-  padding: 9px;
-  -webkit-border-radius: 3px;
-     -moz-border-radius: 3px;
-          border-radius: 3px;
-}
-
-.fade {
-  opacity: 0;
-  -webkit-transition: opacity 0.15s linear;
-     -moz-transition: opacity 0.15s linear;
-       -o-transition: opacity 0.15s linear;
-          transition: opacity 0.15s linear;
-}
-
-.fade.in {
-  opacity: 1;
-}
-
-.collapse {
-  position: relative;
-  height: 0;
-  overflow: hidden;
-  -webkit-transition: height 0.35s ease;
-     -moz-transition: height 0.35s ease;
-       -o-transition: height 0.35s ease;
-          transition: height 0.35s ease;
-}
-
-.collapse.in {
-  height: auto;
-}
-
-.close {
-  float: right;
-  font-size: 20px;
-  font-weight: bold;
-  line-height: 20px;
-  color: #000000;
-  text-shadow: 0 1px 0 #ffffff;
-  opacity: 0.2;
-  filter: alpha(opacity=20);
-}
-
-.close:hover {
-  color: #000000;
-  text-decoration: none;
-  cursor: pointer;
-  opacity: 0.4;
-  filter: alpha(opacity=40);
-}
-
-button.close {
-  padding: 0;
-  cursor: pointer;
-  background: transparent;
-  border: 0;
-  -webkit-appearance: none;
-}
-
-.btn {
-  display: inline-block;
-  *display: inline;
-  padding: 4px 14px;
-  margin-bottom: 0;
-  *margin-left: .3em;
-  font-size: 14px;
-  line-height: 20px;
-  *line-height: 20px;
-  color: #333333;
-  text-align: center;
-  text-shadow: 0 1px 1px rgba(255, 255, 255, 0.75);
-  vertical-align: middle;
-  cursor: pointer;
-  background-color: #f5f5f5;
-  *background-color: #e6e6e6;
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ffffff), to(#e6e6e6));
-  background-image: -webkit-linear-gradient(top, #ffffff, #e6e6e6);
-  background-image: -o-linear-gradient(top, #ffffff, #e6e6e6);
-  background-image: linear-gradient(to bottom, #ffffff, #e6e6e6);
-  background-image: -moz-linear-gradient(top, #ffffff, #e6e6e6);
-  background-repeat: repeat-x;
-  border: 1px solid #bbbbbb;
-  *border: 0;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  border-color: #e6e6e6 #e6e6e6 #bfbfbf;
-  border-bottom-color: #a2a2a2;
-  -webkit-border-radius: 4px;
-     -moz-border-radius: 4px;
-          border-radius: 4px;
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe6e6e6', GradientType=0);
-  filter: progid:dximagetransform.microsoft.gradient(enabled=false);
-  *zoom: 1;
-  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
-     -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
-          box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
-}
-
-.btn:hover,
-.btn:active,
-.btn.active,
-.btn.disabled,
-.btn[disabled] {
-  color: #333333;
-  background-color: #e6e6e6;
-  *background-color: #d9d9d9;
-}
-
-.btn:active,
-.btn.active {
-  background-color: #cccccc \9;
-}
-
-.btn:first-child {
-  *margin-left: 0;
-}
-
-.btn:hover {
-  color: #333333;
-  text-decoration: none;
-  background-color: #e6e6e6;
-  *background-color: #d9d9d9;
-  /* Buttons in IE7 don't get borders, so darken on hover */
-
-  background-position: 0 -15px;
-  -webkit-transition: background-position 0.1s linear;
-     -moz-transition: background-position 0.1s linear;
-       -o-transition: background-position 0.1s linear;
-          transition: background-position 0.1s linear;
-}
-
-.btn:focus {
-  outline: thin dotted #333;
-  outline: 5px auto -webkit-focus-ring-color;
-  outline-offset: -2px;
-}
-
-.btn.active,
-.btn:active {
-  background-color: #e6e6e6;
-  background-color: #d9d9d9 \9;
-  background-image: none;
-  outline: 0;
-  -webkit-box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05);
-     -moz-box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05);
-          box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05);
-}
-
-.btn.disabled,
-.btn[disabled] {
-  cursor: default;
-  background-color: #e6e6e6;
-  background-image: none;
-  opacity: 0.65;
-  filter: alpha(opacity=65);
-  -webkit-box-shadow: none;
-     -moz-box-shadow: none;
-          box-shadow: none;
-}
-
-.btn-large {
-  padding: 9px 14px;
-  font-size: 16px;
-  line-height: normal;
-  -webkit-border-radius: 5px;
-     -moz-border-radius: 5px;
-          border-radius: 5px;
-}
-
-.btn-large [class^="icon-"] {
-  margin-top: 2px;
-}
-
-.btn-small {
-  padding: 3px 9px;
-  font-size: 12px;
-  line-height: 18px;
-}
-
-.btn-small [class^="icon-"] {
-  margin-top: 0;
-}
-
-.btn-mini {
-  padding: 2px 6px;
-  font-size: 11px;
-  line-height: 17px;
-}
-
-.btn-block {
-  display: block;
-  width: 100%;
-  padding-right: 0;
-  padding-left: 0;
-  -webkit-box-sizing: border-box;
-     -moz-box-sizing: border-box;
-          box-sizing: border-box;
-}
-
-.btn-block + .btn-block {
-  margin-top: 5px;
-}
-
-input[type="submit"].btn-block,
-input[type="reset"].btn-block,
-input[type="button"].btn-block {
-  width: 100%;
-}
-
-.btn-primary.active,
-.btn-warning.active,
-.btn-danger.active,
-.btn-success.active,
-.btn-info.active,
-.btn-inverse.active {
-  color: rgba(255, 255, 255, 0.75);
-}
-
-.btn {
-  border-color: #c5c5c5;
-  border-color: rgba(0, 0, 0, 0.15) rgba(0, 0, 0, 0.15) rgba(0, 0, 0, 0.25);
-}
-
-.btn-primary {
-  color: #ffffff;
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  background-color: #006dcc;
-  *background-color: #0044cc;
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#0088cc), to(#0044cc));
-  background-image: -webkit-linear-gradient(top, #0088cc, #0044cc);
-  background-image: -o-linear-gradient(top, #0088cc, #0044cc);
-  background-image: linear-gradient(to bottom, #0088cc, #0044cc);
-  background-image: -moz-linear-gradient(top, #0088cc, #0044cc);
-  background-repeat: repeat-x;
-  border-color: #0044cc #0044cc #002a80;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#ff0088cc', endColorstr='#ff0044cc', GradientType=0);
-  filter: progid:dximagetransform.microsoft.gradient(enabled=false);
-}
-
-.btn-primary:hover,
-.btn-primary:active,
-.btn-primary.active,
-.btn-primary.disabled,
-.btn-primary[disabled] {
-  color: #ffffff;
-  background-color: #0044cc;
-  *background-color: #003bb3;
-}
-
-.btn-primary:active,
-.btn-primary.active {
-  background-color: #003399 \9;
-}
-
-.btn-warning {
-  color: #ffffff;
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  background-color: #faa732;
-  *background-color: #f89406;
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#fbb450), to(#f89406));
-  background-image: -webkit-linear-gradient(top, #fbb450, #f89406);
-  background-image: -o-linear-gradient(top, #fbb450, #f89406);
-  background-image: linear-gradient(to bottom, #fbb450, #f89406);
-  background-image: -moz-linear-gradient(top, #fbb450, #f89406);
-  background-repeat: repeat-x;
-  border-color: #f89406 #f89406 #ad6704;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#fffbb450', endColorstr='#fff89406', GradientType=0);
-  filter: progid:dximagetransform.microsoft.gradient(enabled=false);
-}
-
-.btn-warning:hover,
-.btn-warning:active,
-.btn-warning.active,
-.btn-warning.disabled,
-.btn-warning[disabled] {
-  color: #ffffff;
-  background-color: #f89406;
-  *background-color: #df8505;
-}
-
-.btn-warning:active,
-.btn-warning.active {
-  background-color: #c67605 \9;
-}
-
-.btn-danger {
-  color: #ffffff;
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  background-color: #da4f49;
-  *background-color: #bd362f;
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ee5f5b), to(#bd362f));
-  background-image: -webkit-linear-gradient(top, #ee5f5b, #bd362f);
-  background-image: -o-linear-gradient(top, #ee5f5b, #bd362f);
-  background-image: linear-gradient(to bottom, #ee5f5b, #bd362f);
-  background-image: -moz-linear-gradient(top, #ee5f5b, #bd362f);
-  background-repeat: repeat-x;
-  border-color: #bd362f #bd362f #802420;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#ffee5f5b', endColorstr='#ffbd362f', GradientType=0);
-  filter: progid:dximagetransform.microsoft.gradient(enabled=false);
-}
-
-.btn-danger:hover,
-.btn-danger:active,
-.btn-danger.active,
-.btn-danger.disabled,
-.btn-danger[disabled] {
-  color: #ffffff;
-  background-color: #bd362f;
-  *background-color: #a9302a;
-}
-
-.btn-danger:active,
-.btn-danger.active {
-  background-color: #942a25 \9;
-}
-
-.btn-success {
-  color: #ffffff;
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  background-color: #5bb75b;
-  *background-color: #51a351;
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#62c462), to(#51a351));
-  background-image: -webkit-linear-gradient(top, #62c462, #51a351);
-  background-image: -o-linear-gradient(top, #62c462, #51a351);
-  background-image: linear-gradient(to bottom, #62c462, #51a351);
-  background-image: -moz-linear-gradient(top, #62c462, #51a351);
-  background-repeat: repeat-x;
-  border-color: #51a351 #51a351 #387038;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#ff62c462', endColorstr='#ff51a351', GradientType=0);
-  filter: progid:dximagetransform.microsoft.gradient(enabled=false);
-}
-
-.btn-success:hover,
-.btn-success:active,
-.btn-success.active,
-.btn-success.disabled,
-.btn-success[disabled] {
-  color: #ffffff;
-  background-color: #51a351;
-  *background-color: #499249;
-}
-
-.btn-success:active,
-.btn-success.active {
-  background-color: #408140 \9;
-}
-
-.btn-info {
-  color: #ffffff;
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  background-color: #49afcd;
-  *background-color: #2f96b4;
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#5bc0de), to(#2f96b4));
-  background-image: -webkit-linear-gradient(top, #5bc0de, #2f96b4);
-  background-image: -o-linear-gradient(top, #5bc0de, #2f96b4);
-  background-image: linear-gradient(to bottom, #5bc0de, #2f96b4);
-  background-image: -moz-linear-gradient(top, #5bc0de, #2f96b4);
-  background-repeat: repeat-x;
-  border-color: #2f96b4 #2f96b4 #1f6377;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2f96b4', GradientType=0);
-  filter: progid:dximagetransform.microsoft.gradient(enabled=false);
-}
-
-.btn-info:hover,
-.btn-info:active,
-.btn-info.active,
-.btn-info.disabled,
-.btn-info[disabled] {
-  color: #ffffff;
-  background-color: #2f96b4;
-  *background-color: #2a85a0;
-}
-
-.btn-info:active,
-.btn-info.active {
-  background-color: #24748c \9;
-}
-
-.btn-inverse {
-  color: #ffffff;
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  background-color: #363636;
-  *background-color: #222222;
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#444444), to(#222222));
-  background-image: -webkit-linear-gradient(top, #444444, #222222);
-  background-image: -o-linear-gradient(top, #444444, #222222);
-  background-image: linear-gradient(to bottom, #444444, #222222);
-  background-image: -moz-linear-gradient(top, #444444, #222222);
-  background-repeat: repeat-x;
-  border-color: #222222 #222222 #000000;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#ff444444', endColorstr='#ff222222', GradientType=0);
-  filter: progid:dximagetransform.microsoft.gradient(enabled=false);
-}
-
-.btn-inverse:hover,
-.btn-inverse:active,
-.btn-inverse.active,
-.btn-inverse.disabled,
-.btn-inverse[disabled] {
-  color: #ffffff;
-  background-color: #222222;
-  *background-color: #151515;
-}
-
-.btn-inverse:active,
-.btn-inverse.active {
-  background-color: #080808 \9;
-}
-
-button.btn,
-input[type="submit"].btn {
-  *padding-top: 3px;
-  *padding-bottom: 3px;
-}
-
-button.btn::-moz-focus-inner,
-input[type="submit"].btn::-moz-focus-inner {
-  padding: 0;
-  border: 0;
-}
-
-button.btn.btn-large,
-input[type="submit"].btn.btn-large {
-  *padding-top: 7px;
-  *padding-bottom: 7px;
-}
-
-button.btn.btn-small,
-input[type="submit"].btn.btn-small {
-  *padding-top: 3px;
-  *padding-bottom: 3px;
-}
-
-button.btn.btn-mini,
-input[type="submit"].btn.btn-mini {
-  *padding-top: 1px;
-  *padding-bottom: 1px;
-}
-
-.btn-link,
-.btn-link:active,
-.btn-link[disabled] {
-  background-color: transparent;
-  background-image: none;
-  -webkit-box-shadow: none;
-     -moz-box-shadow: none;
-          box-shadow: none;
-}
-
-.btn-link {
-  color: #0088cc;
-  cursor: pointer;
-  border-color: transparent;
-  -webkit-border-radius: 0;
-     -moz-border-radius: 0;
-          border-radius: 0;
-}
-
-.btn-link:hover {
-  color: #005580;
-  text-decoration: underline;
-  background-color: transparent;
-}
-
-.btn-link[disabled]:hover {
-  color: #333333;
-  text-decoration: none;
-}
-
-.btn-group {
-  position: relative;
-  *margin-left: .3em;
-  font-size: 0;
-  white-space: nowrap;
-  vertical-align: middle;
-}
-
-.btn-group:first-child {
-  *margin-left: 0;
-}
-
-.btn-group + .btn-group {
-  margin-left: 5px;
-}
-
-.btn-toolbar {
-  margin-top: 10px;
-  margin-bottom: 10px;
-  font-size: 0;
-}
-
-.btn-toolbar .btn-group {
-  display: inline-block;
-  *display: inline;
-  /* IE7 inline-block hack */
-
-  *zoom: 1;
-}
-
-.btn-toolbar .btn + .btn,
-.btn-toolbar .btn-group + .btn,
-.btn-toolbar .btn + .btn-group {
-  margin-left: 5px;
-}
-
-.btn-group > .btn {
-  position: relative;
-  -webkit-border-radius: 0;
-     -moz-border-radius: 0;
-          border-radius: 0;
-}
-
-.btn-group > .btn + .btn {
-  margin-left: -1px;
-}
-
-.btn-group > .btn,
-.btn-group > .dropdown-menu {
-  font-size: 14px;
-}
-
-.btn-group > .btn-mini {
-  font-size: 11px;
-}
-
-.btn-group > .btn-small {
-  font-size: 12px;
-}
-
-.btn-group > .btn-large {
-  font-size: 16px;
-}
-
-.btn-group > .btn:first-child {
-  margin-left: 0;
-  -webkit-border-bottom-left-radius: 4px;
-          border-bottom-left-radius: 4px;
-  -webkit-border-top-left-radius: 4px;
-          border-top-left-radius: 4px;
-  -moz-border-radius-bottomleft: 4px;
-  -moz-border-radius-topleft: 4px;
-}
-
-.btn-group > .btn:last-child,
-.btn-group > .dropdown-toggle {
-  -webkit-border-top-right-radius: 4px;
-          border-top-right-radius: 4px;
-  -webkit-border-bottom-right-radius: 4px;
-          border-bottom-right-radius: 4px;
-  -moz-border-radius-topright: 4px;
-  -moz-border-radius-bottomright: 4px;
-}
-
-.btn-group > .btn.large:first-child {
-  margin-left: 0;
-  -webkit-border-bottom-left-radius: 6px;
-          border-bottom-left-radius: 6px;
-  -webkit-border-top-left-radius: 6px;
-          border-top-left-radius: 6px;
-  -moz-border-radius-bottomleft: 6px;
-  -moz-border-radius-topleft: 6px;
-}
-
-.btn-group > .btn.large:last-child,
-.btn-group > .large.dropdown-toggle {
-  -webkit-border-top-right-radius: 6px;
-          border-top-right-radius: 6px;
-  -webkit-border-bottom-right-radius: 6px;
-          border-bottom-right-radius: 6px;
-  -moz-border-radius-topright: 6px;
-  -moz-border-radius-bottomright: 6px;
-}
-
-.btn-group > .btn:hover,
-.btn-group > .btn:focus,
-.btn-group > .btn:active,
-.btn-group > .btn.active {
-  z-index: 2;
-}
-
-.btn-group .dropdown-toggle:active,
-.btn-group.open .dropdown-toggle {
-  outline: 0;
-}
-
-.btn-group > .btn + .dropdown-toggle {
-  *padding-top: 5px;
-  padding-right: 8px;
-  *padding-bottom: 5px;
-  padding-left: 8px;
-  -webkit-box-shadow: inset 1px 0 0 rgba(255, 255, 255, 0.125), inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
-     -moz-box-shadow: inset 1px 0 0 rgba(255, 255, 255, 0.125), inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
-          box-shadow: inset 1px 0 0 rgba(255, 255, 255, 0.125), inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
-}
-
-.btn-group > .btn-mini + .dropdown-toggle {
-  *padding-top: 2px;
-  padding-right: 5px;
-  *padding-bottom: 2px;
-  padding-left: 5px;
-}
-
-.btn-group > .btn-small + .dropdown-toggle {
-  *padding-top: 5px;
-  *padding-bottom: 4px;
-}
-
-.btn-group > .btn-large + .dropdown-toggle {
-  *padding-top: 7px;
-  padding-right: 12px;
-  *padding-bottom: 7px;
-  padding-left: 12px;
-}
-
-.btn-group.open .dropdown-toggle {
-  background-image: none;
-  -webkit-box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05);
-     -moz-box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05);
-          box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05);
-}
-
-.btn-group.open .btn.dropdown-toggle {
-  background-color: #e6e6e6;
-}
-
-.btn-group.open .btn-primary.dropdown-toggle {
-  background-color: #0044cc;
-}
-
-.btn-group.open .btn-warning.dropdown-toggle {
-  background-color: #f89406;
-}
-
-.btn-group.open .btn-danger.dropdown-toggle {
-  background-color: #bd362f;
-}
-
-.btn-group.open .btn-success.dropdown-toggle {
-  background-color: #51a351;
-}
-
-.btn-group.open .btn-info.dropdown-toggle {
-  background-color: #2f96b4;
-}
-
-.btn-group.open .btn-inverse.dropdown-toggle {
-  background-color: #222222;
-}
-
-.btn .caret {
-  margin-top: 8px;
-  margin-left: 0;
-}
-
-.btn-mini .caret,
-.btn-small .caret,
-.btn-large .caret {
-  margin-top: 6px;
-}
-
-.btn-large .caret {
-  border-top-width: 5px;
-  border-right-width: 5px;
-  border-left-width: 5px;
-}
-
-.dropup .btn-large .caret {
-  border-top: 0;
-  border-bottom: 5px solid #000000;
-}
-
-.btn-primary .caret,
-.btn-warning .caret,
-.btn-danger .caret,
-.btn-info .caret,
-.btn-success .caret,
-.btn-inverse .caret {
-  border-top-color: #ffffff;
-  border-bottom-color: #ffffff;
-}
-
-.btn-group-vertical {
-  display: inline-block;
-  *display: inline;
-  /* IE7 inline-block hack */
-
-  *zoom: 1;
-}
-
-.btn-group-vertical .btn {
-  display: block;
-  float: none;
-  width: 100%;
-  -webkit-border-radius: 0;
-     -moz-border-radius: 0;
-          border-radius: 0;
-}
-
-.btn-group-vertical .btn + .btn {
-  margin-top: -1px;
-  margin-left: 0;
-}
-
-.btn-group-vertical .btn:first-child {
-  -webkit-border-radius: 4px 4px 0 0;
-     -moz-border-radius: 4px 4px 0 0;
-          border-radius: 4px 4px 0 0;
-}
-
-.btn-group-vertical .btn:last-child {
-  -webkit-border-radius: 0 0 4px 4px;
-     -moz-border-radius: 0 0 4px 4px;
-          border-radius: 0 0 4px 4px;
-}
-
-.btn-group-vertical .btn-large:first-child {
-  -webkit-border-radius: 6px 6px 0 0;
-     -moz-border-radius: 6px 6px 0 0;
-          border-radius: 6px 6px 0 0;
-}
-
-.btn-group-vertical .btn-large:last-child {
-  -webkit-border-radius: 0 0 6px 6px;
-     -moz-border-radius: 0 0 6px 6px;
-          border-radius: 0 0 6px 6px;
-}
-
-.alert {
-  padding: 8px 35px 8px 14px;
-  margin-bottom: 20px;
-  color: #c09853;
-  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5);
-  background-color: #fcf8e3;
-  border: 1px solid #fbeed5;
-  -webkit-border-radius: 4px;
-     -moz-border-radius: 4px;
-          border-radius: 4px;
-}
-
-.alert h4 {
-  margin: 0;
-}
-
-.alert .close {
-  position: relative;
-  top: -2px;
-  right: -21px;
-  line-height: 20px;
-}
-
-.alert-success {
-  color: #468847;
-  background-color: #dff0d8;
-  border-color: #d6e9c6;
-}
-
-.alert-danger,
-.alert-error {
-  color: #b94a48;
-  background-color: #f2dede;
-  border-color: #eed3d7;
-}
-
-.alert-info {
-  color: #3a87ad;
-  background-color: #d9edf7;
-  border-color: #bce8f1;
-}
-
-.alert-block {
-  padding-top: 14px;
-  padding-bottom: 14px;
-}
-
-.alert-block > p,
-.alert-block > ul {
-  margin-bottom: 0;
-}
-
-.alert-block p + p {
-  margin-top: 5px;
-}
-
-.nav {
-  margin-bottom: 20px;
-  margin-left: 0;
-  list-style: none;
-}
-
-.nav > li > a {
-  display: block;
-}
-
-.nav > li > a:hover {
-  text-decoration: none;
-  background-color: #eeeeee;
-}
-
-.nav > .pull-right {
-  float: right;
-}
-
-.nav-header {
-  display: block;
-  padding: 3px 15px;
-  font-size: 11px;
-  font-weight: bold;
-  line-height: 20px;
-  color: #999999;
-  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5);
-  text-transform: uppercase;
-}
-
-.nav li + .nav-header {
-  margin-top: 9px;
-}
-
-.nav-list {
-  padding-right: 15px;
-  padding-left: 15px;
-  margin-bottom: 0;
-}
-
-.nav-list > li > a,
-.nav-list .nav-header {
-  margin-right: -15px;
-  margin-left: -15px;
-  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5);
-}
-
-.nav-list > li > a {
-  padding: 3px 15px;
-}
-
-.nav-list > .active > a,
-.nav-list > .active > a:hover {
-  color: #ffffff;
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);
-  background-color: #0088cc;
-}
-
-.nav-list [class^="icon-"] {
-  margin-right: 2px;
-}
-
-.nav-list .divider {
-  *width: 100%;
-  height: 1px;
-  margin: 9px 1px;
-  *margin: -5px 0 5px;
-  overflow: hidden;
-  background-color: #e5e5e5;
-  border-bottom: 1px solid #ffffff;
-}
-
-.nav-tabs,
-.nav-pills {
-  *zoom: 1;
-}
-
-.nav-tabs:before,
-.nav-pills:before,
-.nav-tabs:after,
-.nav-pills:after {
-  display: table;
-  line-height: 0;
-  content: "";
-}
-
-.nav-tabs:after,
-.nav-pills:after {
-  clear: both;
-}
-
-.nav-tabs > li,
-.nav-pills > li {
-  float: left;
-}
-
-.nav-tabs > li > a,
-.nav-pills > li > a {
-  padding-right: 12px;
-  padding-left: 12px;
-  margin-right: 2px;
-  line-height: 14px;
-}
-
-.nav-tabs {
-  border-bottom: 1px solid #ddd;
-}
-
-.nav-tabs > li {
-  margin-bottom: -1px;
-}
-
-.nav-tabs > li > a {
-  padding-top: 8px;
-  padding-bottom: 8px;
-  line-height: 20px;
-  border: 1px solid transparent;
-  -webkit-border-radius: 4px 4px 0 0;
-     -moz-border-radius: 4px 4px 0 0;
-          border-radius: 4px 4px 0 0;
-}
-
-.nav-tabs > li > a:hover {
-  border-color: #eeeeee #eeeeee #dddddd;
-}
-
-.nav-tabs > .active > a,
-.nav-tabs > .active > a:hover {
-  color: #555555;
-  cursor: default;
-  background-color: #ffffff;
-  border: 1px solid #ddd;
-  border-bottom-color: transparent;
-}
-
-.nav-pills > li > a {
-  padding-top: 8px;
-  padding-bottom: 8px;
-  margin-top: 2px;
-  margin-bottom: 2px;
-  -webkit-border-radius: 5px;
-     -moz-border-radius: 5px;
-          border-radius: 5px;
-}
-
-.nav-pills > .active > a,
-.nav-pills > .active > a:hover {
-  color: #ffffff;
-  background-color: #0088cc;
-}
-
-.nav-stacked > li {
-  float: none;
-}
-
-.nav-stacked > li > a {
-  margin-right: 0;
-}
-
-.nav-tabs.nav-stacked {
-  border-bottom: 0;
-}
-
-.nav-tabs.nav-stacked > li > a {
-  border: 1px solid #ddd;
-  -webkit-border-radius: 0;
-     -moz-border-radius: 0;
-          border-radius: 0;
-}
-
-.nav-tabs.nav-stacked > li:first-child > a {
-  -webkit-border-top-right-radius: 4px;
-          border-top-right-radius: 4px;
-  -webkit-border-top-left-radius: 4px;
-          border-top-left-radius: 4px;
-  -moz-border-radius-topright: 4px;
-  -moz-border-radius-topleft: 4px;
-}
-
-.nav-tabs.nav-stacked > li:last-child > a {
-  -webkit-border-bottom-right-radius: 4px;
-          border-bottom-right-radius: 4px;
-  -webkit-border-bottom-left-radius: 4px;
-          border-bottom-left-radius: 4px;
-  -moz-border-radius-bottomright: 4px;
-  -moz-border-radius-bottomleft: 4px;
-}
-
-.nav-tabs.nav-stacked > li > a:hover {
-  z-index: 2;
-  border-color: #ddd;
-}
-
-.nav-pills.nav-stacked > li > a {
-  margin-bottom: 3px;
-}
-
-.nav-pills.nav-stacked > li:last-child > a {
-  margin-bottom: 1px;
-}
-
-.nav-tabs .dropdown-menu {
-  -webkit-border-radius: 0 0 6px 6px;
-     -moz-border-radius: 0 0 6px 6px;
-          border-radius: 0 0 6px 6px;
-}
-
-.nav-pills .dropdown-menu {
-  -webkit-border-radius: 6px;
-     -moz-border-radius: 6px;
-          border-radius: 6px;
-}
-
-.nav .dropdown-toggle .caret {
-  margin-top: 6px;
-  border-top-color: #0088cc;
-  border-bottom-color: #0088cc;
-}
-
-.nav .dropdown-toggle:hover .caret {
-  border-top-color: #005580;
-  border-bottom-color: #005580;
-}
-
-/* move down carets for tabs */
-
-.nav-tabs .dropdown-toggle .caret {
-  margin-top: 8px;
-}
-
-.nav .active .dropdown-toggle .caret {
-  border-top-color: #fff;
-  border-bottom-color: #fff;
-}
-
-.nav-tabs .active .dropdown-toggle .caret {
-  border-top-color: #555555;
-  border-bottom-color: #555555;
-}
-
-.nav > .dropdown.active > a:hover {
-  cursor: pointer;
-}
-
-.nav-tabs .open .dropdown-toggle,
-.nav-pills .open .dropdown-toggle,
-.nav > li.dropdown.open.active > a:hover {
-  color: #ffffff;
-  background-color: #999999;
-  border-color: #999999;
-}
-
-.nav li.dropdown.open .caret,
-.nav li.dropdown.open.active .caret,
-.nav li.dropdown.open a:hover .caret {
-  border-top-color: #ffffff;
-  border-bottom-color: #ffffff;
-  opacity: 1;
-  filter: alpha(opacity=100);
-}
-
-.tabs-stacked .open > a:hover {
-  border-color: #999999;
-}
-
-.tabbable {
-  *zoom: 1;
-}
-
-.tabbable:before,
-.tabbable:after {
-  display: table;
-  line-height: 0;
-  content: "";
-}
-
-.tabbable:after {
-  clear: both;
-}
-
-.tab-content {
-  overflow: auto;
-}
-
-.tabs-below > .nav-tabs,
-.tabs-right > .nav-tabs,
-.tabs-left > .nav-tabs {
-  border-bottom: 0;
-}
-
-.tab-content > .tab-pane,
-.pill-content > .pill-pane {
-  display: none;
-}
-
-.tab-content > .active,
-.pill-content > .active {
-  display: block;
-}
-
-.tabs-below > .nav-tabs {
-  border-top: 1px solid #ddd;
-}
-
-.tabs-below > .nav-tabs > li {
-  margin-top: -1px;
-  margin-bottom: 0;
-}
-
-.tabs-below > .nav-tabs > li > a {
-  -webkit-border-radius: 0 0 4px 4px;
-     -moz-border-radius: 0 0 4px 4px;
-          border-radius: 0 0 4px 4px;
-}
-
-.tabs-below > .nav-tabs > li > a:hover {
-  border-top-color: #ddd;
-  border-bottom-color: transparent;
-}
-
-.tabs-below > .nav-tabs > .active > a,
-.tabs-below > .nav-tabs > .active > a:hover {
-  border-color: transparent #ddd #ddd #ddd;
-}
-
-.tabs-left > .nav-tabs > li,
-.tabs-right > .nav-tabs > li {
-  float: none;
-}
-
-.tabs-left > .nav-tabs > li > a,
-.tabs-right > .nav-tabs > li > a {
-  min-width: 74px;
-  margin-right: 0;
-  margin-bottom: 3px;
-}
-
-.tabs-left > .nav-tabs {
-  float: left;
-  margin-right: 19px;
-  border-right: 1px solid #ddd;
-}
-
-.tabs-left > .nav-tabs > li > a {
-  margin-right: -1px;
-  -webkit-border-radius: 4px 0 0 4px;
-     -moz-border-radius: 4px 0 0 4px;
-          border-radius: 4px 0 0 4px;
-}
-
-.tabs-left > .nav-tabs > li > a:hover {
-  border-color: #eeeeee #dddddd #eeeeee #eeeeee;
-}
-
-.tabs-left > .nav-tabs .active > a,
-.tabs-left > .nav-tabs .active > a:hover {
-  border-color: #ddd transparent #ddd #ddd;
-  *border-right-color: #ffffff;
-}
-
-.tabs-right > .nav-tabs {
-  float: right;
-  margin-left: 19px;
-  border-left: 1px solid #ddd;
-}
-
-.tabs-right > .nav-tabs > li > a {
-  margin-left: -1px;
-  -webkit-border-radius: 0 4px 4px 0;
-     -moz-border-radius: 0 4px 4px 0;
-          border-radius: 0 4px 4px 0;
-}
-
-.tabs-right > .nav-tabs > li > a:hover {
-  border-color: #eeeeee #eeeeee #eeeeee #dddddd;
-}
-
-.tabs-right > .nav-tabs .active > a,
-.tabs-right > .nav-tabs .active > a:hover {
-  border-color: #ddd #ddd #ddd transparent;
-  *border-left-color: #ffffff;
-}
-
-.nav > .disabled > a {
-  color: #999999;
-}
-
-.nav > .disabled > a:hover {
-  text-decoration: none;
-  cursor: default;
-  background-color: transparent;
-}
-
-.navbar {
-  *position: relative;
-  *z-index: 2;
-  margin-bottom: 20px;
-  overflow: visible;
-  color: #777777;
-}
-
-.navbar-inner {
-  min-height: 40px;
-  padding-right: 20px;
-  padding-left: 20px;
-  background-color: #fafafa;
-  background-image: -moz-linear-gradient(top, #ffffff, #f2f2f2);
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ffffff), to(#f2f2f2));
-  background-image: -webkit-linear-gradient(top, #ffffff, #f2f2f2);
-  background-image: -o-linear-gradient(top, #ffffff, #f2f2f2);
-  background-image: linear-gradient(to bottom, #ffffff, #f2f2f2);
-  background-repeat: repeat-x;
-  border: 1px solid #d4d4d4;
-  -webkit-border-radius: 4px;
-     -moz-border-radius: 4px;
-          border-radius: 4px;
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff2f2f2', GradientType=0);
-  *zoom: 1;
-  -webkit-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065);
-     -moz-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065);
-          box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065);
-}
-
-.navbar-inner:before,
-.navbar-inner:after {
-  display: table;
-  line-height: 0;
-  content: "";
-}
-
-.navbar-inner:after {
-  clear: both;
-}
-
-.navbar .container {
-  width: auto;
-}
-
-.nav-collapse.collapse {
-  height: auto;
-}
-
-.navbar .brand {
-  display: block;
-  float: left;
-  padding: 10px 20px 10px;
-  margin-left: -20px;
-  font-size: 20px;
-  font-weight: 200;
-  color: #777777;
-  text-shadow: 0 1px 0 #ffffff;
-}
-
-.navbar .brand:hover {
-  text-decoration: none;
-}
-
-.navbar-text {
-  margin-bottom: 0;
-  line-height: 40px;
-}
-
-.navbar-link {
-  color: #777777;
-}
-
-.navbar-link:hover {
-  color: #333333;
-}
-
-.navbar .divider-vertical {
-  height: 40px;
-  margin: 0 9px;
-  border-right: 1px solid #ffffff;
-  border-left: 1px solid #f2f2f2;
-}
-
-.navbar .btn,
-.navbar .btn-group {
-  margin-top: 5px;
-}
-
-.navbar .btn-group .btn,
-.navbar .input-prepend .btn,
-.navbar .input-append .btn {
-  margin-top: 0;
-}
-
-.navbar-form {
-  margin-bottom: 0;
-  *zoom: 1;
-}
-
-.navbar-form:before,
-.navbar-form:after {
-  display: table;
-  line-height: 0;
-  content: "";
-}
-
-.navbar-form:after {
-  clear: both;
-}
-
-.navbar-form input,
-.navbar-form select,
-.navbar-form .radio,
-.navbar-form .checkbox {
-  margin-top: 5px;
-}
-
-.navbar-form input,
-.navbar-form select,
-.navbar-form .btn {
-  display: inline-block;
-  margin-bottom: 0;
-}
-
-.navbar-form input[type="image"],
-.navbar-form input[type="checkbox"],
-.navbar-form input[type="radio"] {
-  margin-top: 3px;
-}
-
-.navbar-form .input-append,
-.navbar-form .input-prepend {
-  margin-top: 6px;
-  white-space: nowrap;
-}
-
-.navbar-form .input-append input,
-.navbar-form .input-prepend input {
-  margin-top: 0;
-}
-
-.navbar-search {
-  position: relative;
-  float: left;
-  margin-top: 5px;
-  margin-bottom: 0;
-}
-
-.navbar-search .search-query {
-  padding: 4px 14px;
-  margin-bottom: 0;
-  font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
-  font-size: 13px;
-  font-weight: normal;
-  line-height: 1;
-  -webkit-border-radius: 15px;
-     -moz-border-radius: 15px;
-          border-radius: 15px;
-}
-
-.navbar-static-top {
-  position: static;
-  width: 100%;
-  margin-bottom: 0;
-}
-
-.navbar-static-top .navbar-inner {
-  -webkit-border-radius: 0;
-     -moz-border-radius: 0;
-          border-radius: 0;
-}
-
-.navbar-fixed-top,
-.navbar-fixed-bottom {
-  position: fixed;
-  right: 0;
-  left: 0;
-  z-index: 1030;
-  margin-bottom: 0;
-}
-
-.navbar-fixed-top .navbar-inner,
-.navbar-static-top .navbar-inner {
-  border-width: 0 0 1px;
-}
-
-.navbar-fixed-bottom .navbar-inner {
-  border-width: 1px 0 0;
-}
-
-.navbar-fixed-top .navbar-inner,
-.navbar-fixed-bottom .navbar-inner {
-  padding-right: 0;
-  padding-left: 0;
-  -webkit-border-radius: 0;
-     -moz-border-radius: 0;
-          border-radius: 0;
-}
-
-.navbar-static-top .container,
-.navbar-fixed-top .container,
-.navbar-fixed-bottom .container {
-  width: 940px;
-}
-
-.navbar-fixed-top {
-  top: 0;
-}
-
-.navbar-fixed-top .navbar-inner,
-.navbar-static-top .navbar-inner {
-  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.1), 0 1px 10px rgba(0, 0, 0, 0.1);
-     -moz-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.1), 0 1px 10px rgba(0, 0, 0, 0.1);
-          box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.1), 0 1px 10px rgba(0, 0, 0, 0.1);
-}
-
-.navbar-fixed-bottom {
-  bottom: 0;
-}
-
-.navbar-fixed-bottom .navbar-inner {
-  -webkit-box-shadow: inset 0 1px 0 rgba(0, 0, 0, 0.1), 0 -1px 10px rgba(0, 0, 0, 0.1);
-     -moz-box-shadow: inset 0 1px 0 rgba(0, 0, 0, 0.1), 0 -1px 10px rgba(0, 0, 0, 0.1);
-          box-shadow: inset 0 1px 0 rgba(0, 0, 0, 0.1), 0 -1px 10px rgba(0, 0, 0, 0.1);
-}
-
-.navbar .nav {
-  position: relative;
-  left: 0;
-  display: block;
-  float: left;
-  margin: 0 10px 0 0;
-}
-
-.navbar .nav.pull-right {
-  float: right;
-  margin-right: 0;
-}
-
-.navbar .nav > li {
-  float: left;
-}
-
-.navbar .nav > li > a {
-  float: none;
-  padding: 10px 15px 10px;
-  color: #777777;
-  text-decoration: none;
-  text-shadow: 0 1px 0 #ffffff;
-}
-
-.navbar .nav .dropdown-toggle .caret {
-  margin-top: 8px;
-}
-
-.navbar .nav > li > a:focus,
-.navbar .nav > li > a:hover {
-  color: #333333;
-  text-decoration: none;
-  background-color: transparent;
-}
-
-.navbar .nav > .active > a,
-.navbar .nav > .active > a:hover,
-.navbar .nav > .active > a:focus {
-  color: #555555;
-  text-decoration: none;
-  background-color: #e5e5e5;
-  -webkit-box-shadow: inset 0 3px 8px rgba(0, 0, 0, 0.125);
-     -moz-box-shadow: inset 0 3px 8px rgba(0, 0, 0, 0.125);
-          box-shadow: inset 0 3px 8px rgba(0, 0, 0, 0.125);
-}
-
-.navbar .btn-navbar {
-  display: none;
-  float: right;
-  padding: 7px 10px;
-  margin-right: 5px;
-  margin-left: 5px;
-  color: #ffffff;
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  background-color: #ededed;
-  *background-color: #e5e5e5;
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#f2f2f2), to(#e5e5e5));
-  background-image: -webkit-linear-gradient(top, #f2f2f2, #e5e5e5);
-  background-image: -o-linear-gradient(top, #f2f2f2, #e5e5e5);
-  background-image: linear-gradient(to bottom, #f2f2f2, #e5e5e5);
-  background-image: -moz-linear-gradient(top, #f2f2f2, #e5e5e5);
-  background-repeat: repeat-x;
-  border-color: #e5e5e5 #e5e5e5 #bfbfbf;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#fff2f2f2', endColorstr='#ffe5e5e5', GradientType=0);
-  filter: progid:dximagetransform.microsoft.gradient(enabled=false);
-  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.075);
-     -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.075);
-          box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.075);
-}
-
-.navbar .btn-navbar:hover,
-.navbar .btn-navbar:active,
-.navbar .btn-navbar.active,
-.navbar .btn-navbar.disabled,
-.navbar .btn-navbar[disabled] {
-  color: #ffffff;
-  background-color: #e5e5e5;
-  *background-color: #d9d9d9;
-}
-
-.navbar .btn-navbar:active,
-.navbar .btn-navbar.active {
-  background-color: #cccccc \9;
-}
-
-.navbar .btn-navbar .icon-bar {
-  display: block;
-  width: 18px;
-  height: 2px;
-  background-color: #f5f5f5;
-  -webkit-border-radius: 1px;
-     -moz-border-radius: 1px;
-          border-radius: 1px;
-  -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.25);
-     -moz-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.25);
-          box-shadow: 0 1px 0 rgba(0, 0, 0, 0.25);
-}
-
-.btn-navbar .icon-bar + .icon-bar {
-  margin-top: 3px;
-}
-
-.navbar .nav > li > .dropdown-menu:before {
-  position: absolute;
-  top: -7px;
-  left: 9px;
-  display: inline-block;
-  border-right: 7px solid transparent;
-  border-bottom: 7px solid #ccc;
-  border-left: 7px solid transparent;
-  border-bottom-color: rgba(0, 0, 0, 0.2);
-  content: '';
-}
-
-.navbar .nav > li > .dropdown-menu:after {
-  position: absolute;
-  top: -6px;
-  left: 10px;
-  display: inline-block;
-  border-right: 6px solid transparent;
-  border-bottom: 6px solid #ffffff;
-  border-left: 6px solid transparent;
-  content: '';
-}
-
-.navbar-fixed-bottom .nav > li > .dropdown-menu:before {
-  top: auto;
-  bottom: -7px;
-  border-top: 7px solid #ccc;
-  border-bottom: 0;
-  border-top-color: rgba(0, 0, 0, 0.2);
-}
-
-.navbar-fixed-bottom .nav > li > .dropdown-menu:after {
-  top: auto;
-  bottom: -6px;
-  border-top: 6px solid #ffffff;
-  border-bottom: 0;
-}
-
-.navbar .nav li.dropdown.open > .dropdown-toggle,
-.navbar .nav li.dropdown.active > .dropdown-toggle,
-.navbar .nav li.dropdown.open.active > .dropdown-toggle {
-  color: #555555;
-  background-color: #e5e5e5;
-}
-
-.navbar .nav li.dropdown > .dropdown-toggle .caret {
-  border-top-color: #777777;
-  border-bottom-color: #777777;
-}
-
-.navbar .nav li.dropdown.open > .dropdown-toggle .caret,
-.navbar .nav li.dropdown.active > .dropdown-toggle .caret,
-.navbar .nav li.dropdown.open.active > .dropdown-toggle .caret {
-  border-top-color: #555555;
-  border-bottom-color: #555555;
-}
-
-.navbar .pull-right > li > .dropdown-menu,
-.navbar .nav > li > .dropdown-menu.pull-right {
-  right: 0;
-  left: auto;
-}
-
-.navbar .pull-right > li > .dropdown-menu:before,
-.navbar .nav > li > .dropdown-menu.pull-right:before {
-  right: 12px;
-  left: auto;
-}
-
-.navbar .pull-right > li > .dropdown-menu:after,
-.navbar .nav > li > .dropdown-menu.pull-right:after {
-  right: 13px;
-  left: auto;
-}
-
-.navbar .pull-right > li > .dropdown-menu .dropdown-menu,
-.navbar .nav > li > .dropdown-menu.pull-right .dropdown-menu {
-  right: 100%;
-  left: auto;
-  margin-right: -1px;
-  margin-left: 0;
-  -webkit-border-radius: 6px 0 6px 6px;
-     -moz-border-radius: 6px 0 6px 6px;
-          border-radius: 6px 0 6px 6px;
-}
-
-.navbar-inverse {
-  color: #999999;
-}
-
-.navbar-inverse .navbar-inner {
-  background-color: #1b1b1b;
-  background-image: -moz-linear-gradient(top, #222222, #111111);
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#222222), to(#111111));
-  background-image: -webkit-linear-gradient(top, #222222, #111111);
-  background-image: -o-linear-gradient(top, #222222, #111111);
-  background-image: linear-gradient(to bottom, #222222, #111111);
-  background-repeat: repeat-x;
-  border-color: #252525;
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#ff222222', endColorstr='#ff111111', GradientType=0);
-}
-
-.navbar-inverse .brand,
-.navbar-inverse .nav > li > a {
-  color: #999999;
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-}
-
-.navbar-inverse .brand:hover,
-.navbar-inverse .nav > li > a:hover {
-  color: #ffffff;
-}
-
-.navbar-inverse .nav > li > a:focus,
-.navbar-inverse .nav > li > a:hover {
-  color: #ffffff;
-  background-color: transparent;
-}
-
-.navbar-inverse .nav .active > a,
-.navbar-inverse .nav .active > a:hover,
-.navbar-inverse .nav .active > a:focus {
-  color: #ffffff;
-  background-color: #111111;
-}
-
-.navbar-inverse .navbar-link {
-  color: #999999;
-}
-
-.navbar-inverse .navbar-link:hover {
-  color: #ffffff;
-}
-
-.navbar-inverse .divider-vertical {
-  border-right-color: #222222;
-  border-left-color: #111111;
-}
-
-.navbar-inverse .nav li.dropdown.open > .dropdown-toggle,
-.navbar-inverse .nav li.dropdown.active > .dropdown-toggle,
-.navbar-inverse .nav li.dropdown.open.active > .dropdown-toggle {
-  color: #ffffff;
-  background-color: #111111;
-}
-
-.navbar-inverse .nav li.dropdown > .dropdown-toggle .caret {
-  border-top-color: #999999;
-  border-bottom-color: #999999;
-}
-
-.navbar-inverse .nav li.dropdown.open > .dropdown-toggle .caret,
-.navbar-inverse .nav li.dropdown.active > .dropdown-toggle .caret,
-.navbar-inverse .nav li.dropdown.open.active > .dropdown-toggle .caret {
-  border-top-color: #ffffff;
-  border-bottom-color: #ffffff;
-}
-
-.navbar-inverse .navbar-search .search-query {
-  color: #ffffff;
-  background-color: #515151;
-  border-color: #111111;
-  -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1), 0 1px 0 rgba(255, 255, 255, 0.15);
-     -moz-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1), 0 1px 0 rgba(255, 255, 255, 0.15);
-          box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1), 0 1px 0 rgba(255, 255, 255, 0.15);
-  -webkit-transition: none;
-     -moz-transition: none;
-       -o-transition: none;
-          transition: none;
-}
-
-.navbar-inverse .navbar-search .search-query:-moz-placeholder {
-  color: #cccccc;
-}
-
-.navbar-inverse .navbar-search .search-query:-ms-input-placeholder {
-  color: #cccccc;
-}
-
-.navbar-inverse .navbar-search .search-query::-webkit-input-placeholder {
-  color: #cccccc;
-}
-
-.navbar-inverse .navbar-search .search-query:focus,
-.navbar-inverse .navbar-search .search-query.focused {
-  padding: 5px 15px;
-  color: #333333;
-  text-shadow: 0 1px 0 #ffffff;
-  background-color: #ffffff;
-  border: 0;
-  outline: 0;
-  -webkit-box-shadow: 0 0 3px rgba(0, 0, 0, 0.15);
-     -moz-box-shadow: 0 0 3px rgba(0, 0, 0, 0.15);
-          box-shadow: 0 0 3px rgba(0, 0, 0, 0.15);
-}
-
-.navbar-inverse .btn-navbar {
-  color: #ffffff;
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  background-color: #0e0e0e;
-  *background-color: #040404;
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#151515), to(#040404));
-  background-image: -webkit-linear-gradient(top, #151515, #040404);
-  background-image: -o-linear-gradient(top, #151515, #040404);
-  background-image: linear-gradient(to bottom, #151515, #040404);
-  background-image: -moz-linear-gradient(top, #151515, #040404);
-  background-repeat: repeat-x;
-  border-color: #040404 #040404 #000000;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#ff151515', endColorstr='#ff040404', GradientType=0);
-  filter: progid:dximagetransform.microsoft.gradient(enabled=false);
-}
-
-.navbar-inverse .btn-navbar:hover,
-.navbar-inverse .btn-navbar:active,
-.navbar-inverse .btn-navbar.active,
-.navbar-inverse .btn-navbar.disabled,
-.navbar-inverse .btn-navbar[disabled] {
-  color: #ffffff;
-  background-color: #040404;
-  *background-color: #000000;
-}
-
-.navbar-inverse .btn-navbar:active,
-.navbar-inverse .btn-navbar.active {
-  background-color: #000000 \9;
-}
-
-.breadcrumb {
-  padding: 8px 15px;
-  margin: 0 0 20px;
-  list-style: none;
-  background-color: #f5f5f5;
-  -webkit-border-radius: 4px;
-     -moz-border-radius: 4px;
-          border-radius: 4px;
-}
-
-.breadcrumb li {
-  display: inline-block;
-  *display: inline;
-  text-shadow: 0 1px 0 #ffffff;
-  *zoom: 1;
-}
-
-.breadcrumb .divider {
-  padding: 0 5px;
-  color: #ccc;
-}
-
-.breadcrumb .active {
-  color: #999999;
-}
-
-.pagination {
-  height: 40px;
-  margin: 20px 0;
-}
-
-.pagination ul {
-  display: inline-block;
-  *display: inline;
-  margin-bottom: 0;
-  margin-left: 0;
-  -webkit-border-radius: 3px;
-     -moz-border-radius: 3px;
-          border-radius: 3px;
-  *zoom: 1;
-  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
-     -moz-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
-          box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
-}
-
-.pagination ul > li {
-  display: inline;
-}
-
-.pagination ul > li > a,
-.pagination ul > li > span {
-  float: left;
-  padding: 0 14px;
-  line-height: 38px;
-  text-decoration: none;
-  background-color: #ffffff;
-  border: 1px solid #dddddd;
-  border-left-width: 0;
-}
-
-.pagination ul > li > a:hover,
-.pagination ul > .active > a,
-.pagination ul > .active > span {
-  background-color: #f5f5f5;
-}
-
-.pagination ul > .active > a,
-.pagination ul > .active > span {
-  color: #999999;
-  cursor: default;
-}
-
-.pagination ul > .disabled > span,
-.pagination ul > .disabled > a,
-.pagination ul > .disabled > a:hover {
-  color: #999999;
-  cursor: default;
-  background-color: transparent;
-}
-
-.pagination ul > li:first-child > a,
-.pagination ul > li:first-child > span {
-  border-left-width: 1px;
-  -webkit-border-radius: 3px 0 0 3px;
-     -moz-border-radius: 3px 0 0 3px;
-          border-radius: 3px 0 0 3px;
-}
-
-.pagination ul > li:last-child > a,
-.pagination ul > li:last-child > span {
-  -webkit-border-radius: 0 3px 3px 0;
-     -moz-border-radius: 0 3px 3px 0;
-          border-radius: 0 3px 3px 0;
-}
-
-.pagination-centered {
-  text-align: center;
-}
-
-.pagination-right {
-  text-align: right;
-}
-
-.pager {
-  margin: 20px 0;
-  text-align: center;
-  list-style: none;
-  *zoom: 1;
-}
-
-.pager:before,
-.pager:after {
-  display: table;
-  line-height: 0;
-  content: "";
-}
-
-.pager:after {
-  clear: both;
-}
-
-.pager li {
-  display: inline;
-}
-
-.pager a,
-.pager span {
-  display: inline-block;
-  padding: 5px 14px;
-  background-color: #fff;
-  border: 1px solid #ddd;
-  -webkit-border-radius: 15px;
-     -moz-border-radius: 15px;
-          border-radius: 15px;
-}
-
-.pager a:hover {
-  text-decoration: none;
-  background-color: #f5f5f5;
-}
-
-.pager .next a,
-.pager .next span {
-  float: right;
-}
-
-.pager .previous a {
-  float: left;
-}
-
-.pager .disabled a,
-.pager .disabled a:hover,
-.pager .disabled span {
-  color: #999999;
-  cursor: default;
-  background-color: #fff;
-}
-
-.modal-open .modal .dropdown-menu {
-  z-index: 2050;
-}
-
-.modal-open .modal .dropdown.open {
-  *z-index: 2050;
-}
-
-.modal-open .modal .popover {
-  z-index: 2060;
-}
-
-.modal-open .modal .tooltip {
-  z-index: 2080;
-}
-
-.modal-backdrop {
-  position: fixed;
-  top: 0;
-  right: 0;
-  bottom: 0;
-  left: 0;
-  z-index: 1040;
-  background-color: #000000;
-}
-
-.modal-backdrop.fade {
-  opacity: 0;
-}
-
-.modal-backdrop,
-.modal-backdrop.fade.in {
-  opacity: 0.8;
-  filter: alpha(opacity=80);
-}
-
-.modal {
-  position: fixed;
-  top: 50%;
-  left: 50%;
-  z-index: 1050;
-  width: 560px;
-  margin: -250px 0 0 -280px;
-  overflow: auto;
-  background-color: #ffffff;
-  border: 1px solid #999;
-  border: 1px solid rgba(0, 0, 0, 0.3);
-  *border: 1px solid #999;
-  -webkit-border-radius: 6px;
-     -moz-border-radius: 6px;
-          border-radius: 6px;
-  -webkit-box-shadow: 0 3px 7px rgba(0, 0, 0, 0.3);
-     -moz-box-shadow: 0 3px 7px rgba(0, 0, 0, 0.3);
-          box-shadow: 0 3px 7px rgba(0, 0, 0, 0.3);
-  -webkit-background-clip: padding-box;
-     -moz-background-clip: padding-box;
-          background-clip: padding-box;
-}
-
-.modal.fade {
-  top: -25%;
-  -webkit-transition: opacity 0.3s linear, top 0.3s ease-out;
-     -moz-transition: opacity 0.3s linear, top 0.3s ease-out;
-       -o-transition: opacity 0.3s linear, top 0.3s ease-out;
-          transition: opacity 0.3s linear, top 0.3s ease-out;
-}
-
-.modal.fade.in {
-  top: 50%;
-}
-
-.modal-header {
-  padding: 9px 15px;
-  border-bottom: 1px solid #eee;
-}
-
-.modal-header .close {
-  margin-top: 2px;
-}
-
-.modal-header h3 {
-  margin: 0;
-  line-height: 30px;
-}
-
-.modal-body {
-  max-height: 400px;
-  padding: 15px;
-  overflow-y: auto;
-}
-
-.modal-form {
-  margin-bottom: 0;
-}
-
-.modal-footer {
-  padding: 14px 15px 15px;
-  margin-bottom: 0;
-  text-align: right;
-  background-color: #f5f5f5;
-  border-top: 1px solid #ddd;
-  -webkit-border-radius: 0 0 6px 6px;
-     -moz-border-radius: 0 0 6px 6px;
-          border-radius: 0 0 6px 6px;
-  *zoom: 1;
-  -webkit-box-shadow: inset 0 1px 0 #ffffff;
-     -moz-box-shadow: inset 0 1px 0 #ffffff;
-          box-shadow: inset 0 1px 0 #ffffff;
-}
-
-.modal-footer:before,
-.modal-footer:after {
-  display: table;
-  line-height: 0;
-  content: "";
-}
-
-.modal-footer:after {
-  clear: both;
-}
-
-.modal-footer .btn + .btn {
-  margin-bottom: 0;
-  margin-left: 5px;
-}
-
-.modal-footer .btn-group .btn + .btn {
-  margin-left: -1px;
-}
-
-.tooltip {
-  position: absolute;
-  z-index: 1030;
-  display: block;
-  padding: 5px;
-  font-size: 11px;
-  opacity: 0;
-  filter: alpha(opacity=0);
-  visibility: visible;
-}
-
-.tooltip.in {
-  opacity: 0.8;
-  filter: alpha(opacity=80);
-}
-
-.tooltip.top {
-  margin-top: -3px;
-}
-
-.tooltip.right {
-  margin-left: 3px;
-}
-
-.tooltip.bottom {
-  margin-top: 3px;
-}
-
-.tooltip.left {
-  margin-left: -3px;
-}
-
-.tooltip-inner {
-  max-width: 200px;
-  padding: 3px 8px;
-  color: #ffffff;
-  text-align: center;
-  text-decoration: none;
-  background-color: #000000;
-  -webkit-border-radius: 4px;
-     -moz-border-radius: 4px;
-          border-radius: 4px;
-}
-
-.tooltip-arrow {
-  position: absolute;
-  width: 0;
-  height: 0;
-  border-color: transparent;
-  border-style: solid;
-}
-
-.tooltip.top .tooltip-arrow {
-  bottom: 0;
-  left: 50%;
-  margin-left: -5px;
-  border-top-color: #000000;
-  border-width: 5px 5px 0;
-}
-
-.tooltip.right .tooltip-arrow {
-  top: 50%;
-  left: 0;
-  margin-top: -5px;
-  border-right-color: #000000;
-  border-width: 5px 5px 5px 0;
-}
-
-.tooltip.left .tooltip-arrow {
-  top: 50%;
-  right: 0;
-  margin-top: -5px;
-  border-left-color: #000000;
-  border-width: 5px 0 5px 5px;
-}
-
-.tooltip.bottom .tooltip-arrow {
-  top: 0;
-  left: 50%;
-  margin-left: -5px;
-  border-bottom-color: #000000;
-  border-width: 0 5px 5px;
-}
-
-.popover {
-  position: absolute;
-  top: 0;
-  left: 0;
-  z-index: 1010;
-  display: none;
-  width: 236px;
-  padding: 1px;
-  background-color: #ffffff;
-  border: 1px solid #ccc;
-  border: 1px solid rgba(0, 0, 0, 0.2);
-  -webkit-border-radius: 6px;
-     -moz-border-radius: 6px;
-          border-radius: 6px;
-  -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
-     -moz-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
-          box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
-  -webkit-background-clip: padding-box;
-     -moz-background-clip: padding;
-          background-clip: padding-box;
-}
-
-.popover.top {
-  margin-bottom: 10px;
-}
-
-.popover.right {
-  margin-left: 10px;
-}
-
-.popover.bottom {
-  margin-top: 10px;
-}
-
-.popover.left {
-  margin-right: 10px;
-}
-
-.popover-title {
-  padding: 8px 14px;
-  margin: 0;
-  font-size: 14px;
-  font-weight: normal;
-  line-height: 18px;
-  background-color: #f7f7f7;
-  border-bottom: 1px solid #ebebeb;
-  -webkit-border-radius: 5px 5px 0 0;
-     -moz-border-radius: 5px 5px 0 0;
-          border-radius: 5px 5px 0 0;
-}
-
-.popover-content {
-  padding: 9px 14px;
-}
-
-.popover-content p,
-.popover-content ul,
-.popover-content ol {
-  margin-bottom: 0;
-}
-
-.popover .arrow,
-.popover .arrow:after {
-  position: absolute;
-  display: inline-block;
-  width: 0;
-  height: 0;
-  border-color: transparent;
-  border-style: solid;
-}
-
-.popover .arrow:after {
-  z-index: -1;
-  content: "";
-}
-
-.popover.top .arrow {
-  bottom: -10px;
-  left: 50%;
-  margin-left: -10px;
-  border-top-color: #ffffff;
-  border-width: 10px 10px 0;
-}
-
-.popover.top .arrow:after {
-  bottom: -1px;
-  left: -11px;
-  border-top-color: rgba(0, 0, 0, 0.25);
-  border-width: 11px 11px 0;
-}
-
-.popover.right .arrow {
-  top: 50%;
-  left: -10px;
-  margin-top: -10px;
-  border-right-color: #ffffff;
-  border-width: 10px 10px 10px 0;
-}
-
-.popover.right .arrow:after {
-  bottom: -11px;
-  left: -1px;
-  border-right-color: rgba(0, 0, 0, 0.25);
-  border-width: 11px 11px 11px 0;
-}
-
-.popover.bottom .arrow {
-  top: -10px;
-  left: 50%;
-  margin-left: -10px;
-  border-bottom-color: #ffffff;
-  border-width: 0 10px 10px;
-}
-
-.popover.bottom .arrow:after {
-  top: -1px;
-  left: -11px;
-  border-bottom-color: rgba(0, 0, 0, 0.25);
-  border-width: 0 11px 11px;
-}
-
-.popover.left .arrow {
-  top: 50%;
-  right: -10px;
-  margin-top: -10px;
-  border-left-color: #ffffff;
-  border-width: 10px 0 10px 10px;
-}
-
-.popover.left .arrow:after {
-  right: -1px;
-  bottom: -11px;
-  border-left-color: rgba(0, 0, 0, 0.25);
-  border-width: 11px 0 11px 11px;
-}
-
-.thumbnails {
-  margin-left: -20px;
-  list-style: none;
-  *zoom: 1;
-}
-
-.thumbnails:before,
-.thumbnails:after {
-  display: table;
-  line-height: 0;
-  content: "";
-}
-
-.thumbnails:after {
-  clear: both;
-}
-
-.row-fluid .thumbnails {
-  margin-left: 0;
-}
-
-.thumbnails > li {
-  float: left;
-  margin-bottom: 20px;
-  margin-left: 20px;
-}
-
-.thumbnail {
-  display: block;
-  padding: 4px;
-  line-height: 20px;
-  border: 1px solid #ddd;
-  -webkit-border-radius: 4px;
-     -moz-border-radius: 4px;
-          border-radius: 4px;
-  -webkit-box-shadow: 0 1px 3px rgba(0, 0, 0, 0.055);
-     -moz-box-shadow: 0 1px 3px rgba(0, 0, 0, 0.055);
-          box-shadow: 0 1px 3px rgba(0, 0, 0, 0.055);
-  -webkit-transition: all 0.2s ease-in-out;
-     -moz-transition: all 0.2s ease-in-out;
-       -o-transition: all 0.2s ease-in-out;
-          transition: all 0.2s ease-in-out;
-}
-
-a.thumbnail:hover {
-  border-color: #0088cc;
-  -webkit-box-shadow: 0 1px 4px rgba(0, 105, 214, 0.25);
-     -moz-box-shadow: 0 1px 4px rgba(0, 105, 214, 0.25);
-          box-shadow: 0 1px 4px rgba(0, 105, 214, 0.25);
-}
-
-.thumbnail > img {
-  display: block;
-  max-width: 100%;
-  margin-right: auto;
-  margin-left: auto;
-}
-
-.thumbnail .caption {
-  padding: 9px;
-  color: #555555;
-}
-
-.label,
-.badge {
-  font-size: 11.844px;
-  font-weight: bold;
-  line-height: 14px;
-  color: #ffffff;
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  white-space: nowrap;
-  vertical-align: baseline;
-  background-color: #999999;
-}
-
-.label {
-  padding: 1px 4px 2px;
-  -webkit-border-radius: 3px;
-     -moz-border-radius: 3px;
-          border-radius: 3px;
-}
-
-.badge {
-  padding: 1px 9px 2px;
-  -webkit-border-radius: 9px;
-     -moz-border-radius: 9px;
-          border-radius: 9px;
-}
-
-a.label:hover,
-a.badge:hover {
-  color: #ffffff;
-  text-decoration: none;
-  cursor: pointer;
-}
-
-.label-important,
-.badge-important {
-  background-color: #b94a48;
-}
-
-.label-important[href],
-.badge-important[href] {
-  background-color: #953b39;
-}
-
-.label-warning,
-.badge-warning {
-  background-color: #f89406;
-}
-
-.label-warning[href],
-.badge-warning[href] {
-  background-color: #c67605;
-}
-
-.label-success,
-.badge-success {
-  background-color: #468847;
-}
-
-.label-success[href],
-.badge-success[href] {
-  background-color: #356635;
-}
-
-.label-info,
-.badge-info {
-  background-color: #3a87ad;
-}
-
-.label-info[href],
-.badge-info[href] {
-  background-color: #2d6987;
-}
-
-.label-inverse,
-.badge-inverse {
-  background-color: #333333;
-}
-
-.label-inverse[href],
-.badge-inverse[href] {
-  background-color: #1a1a1a;
-}
-
-.btn .label,
-.btn .badge {
-  position: relative;
-  top: -1px;
-}
-
-.btn-mini .label,
-.btn-mini .badge {
-  top: 0;
-}
-
-@-webkit-keyframes progress-bar-stripes {
-  from {
-    background-position: 40px 0;
-  }
-  to {
-    background-position: 0 0;
-  }
-}
-
-@-moz-keyframes progress-bar-stripes {
-  from {
-    background-position: 40px 0;
-  }
-  to {
-    background-position: 0 0;
-  }
-}
-
-@-ms-keyframes progress-bar-stripes {
-  from {
-    background-position: 40px 0;
-  }
-  to {
-    background-position: 0 0;
-  }
-}
-
-@-o-keyframes progress-bar-stripes {
-  from {
-    background-position: 0 0;
-  }
-  to {
-    background-position: 40px 0;
-  }
-}
-
-@keyframes progress-bar-stripes {
-  from {
-    background-position: 40px 0;
-  }
-  to {
-    background-position: 0 0;
-  }
-}
-
-.progress {
-  height: 20px;
-  margin-bottom: 20px;
-  overflow: hidden;
-  background-color: #f7f7f7;
-  background-image: -moz-linear-gradient(top, #f5f5f5, #f9f9f9);
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#f5f5f5), to(#f9f9f9));
-  background-image: -webkit-linear-gradient(top, #f5f5f5, #f9f9f9);
-  background-image: -o-linear-gradient(top, #f5f5f5, #f9f9f9);
-  background-image: linear-gradient(to bottom, #f5f5f5, #f9f9f9);
-  background-repeat: repeat-x;
-  -webkit-border-radius: 4px;
-     -moz-border-radius: 4px;
-          border-radius: 4px;
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#fff9f9f9', GradientType=0);
-  -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);
-     -moz-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);
-          box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);
-}
-
-.progress .bar {
-  float: left;
-  width: 0;
-  height: 100%;
-  font-size: 12px;
-  color: #ffffff;
-  text-align: center;
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  background-color: #0e90d2;
-  background-image: -moz-linear-gradient(top, #149bdf, #0480be);
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#149bdf), to(#0480be));
-  background-image: -webkit-linear-gradient(top, #149bdf, #0480be);
-  background-image: -o-linear-gradient(top, #149bdf, #0480be);
-  background-image: linear-gradient(to bottom, #149bdf, #0480be);
-  background-repeat: repeat-x;
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#ff149bdf', endColorstr='#ff0480be', GradientType=0);
-  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);
-     -moz-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);
-          box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);
-  -webkit-box-sizing: border-box;
-     -moz-box-sizing: border-box;
-          box-sizing: border-box;
-  -webkit-transition: width 0.6s ease;
-     -moz-transition: width 0.6s ease;
-       -o-transition: width 0.6s ease;
-          transition: width 0.6s ease;
-}
-
-.progress .bar + .bar {
-  -webkit-box-shadow: inset 1px 0 0 rgba(0, 0, 0, 0.15), inset 0 -1px 0 rgba(0, 0, 0, 0.15);
-     -moz-box-shadow: inset 1px 0 0 rgba(0, 0, 0, 0.15), inset 0 -1px 0 rgba(0, 0, 0, 0.15);
-          box-shadow: inset 1px 0 0 rgba(0, 0, 0, 0.15), inset 0 -1px 0 rgba(0, 0, 0, 0.15);
-}
-
-.progress-striped .bar {
-  background-color: #149bdf;
-  background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent));
-  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  -webkit-background-size: 40px 40px;
-     -moz-background-size: 40px 40px;
-       -o-background-size: 40px 40px;
-          background-size: 40px 40px;
-}
-
-.progress.active .bar {
-  -webkit-animation: progress-bar-stripes 2s linear infinite;
-     -moz-animation: progress-bar-stripes 2s linear infinite;
-      -ms-animation: progress-bar-stripes 2s linear infinite;
-       -o-animation: progress-bar-stripes 2s linear infinite;
-          animation: progress-bar-stripes 2s linear infinite;
-}
-
-.progress-danger .bar,
-.progress .bar-danger {
-  background-color: #dd514c;
-  background-image: -moz-linear-gradient(top, #ee5f5b, #c43c35);
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ee5f5b), to(#c43c35));
-  background-image: -webkit-linear-gradient(top, #ee5f5b, #c43c35);
-  background-image: -o-linear-gradient(top, #ee5f5b, #c43c35);
-  background-image: linear-gradient(to bottom, #ee5f5b, #c43c35);
-  background-repeat: repeat-x;
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#ffee5f5b', endColorstr='#ffc43c35', GradientType=0);
-}
-
-.progress-danger.progress-striped .bar,
-.progress-striped .bar-danger {
-  background-color: #ee5f5b;
-  background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent));
-  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-}
-
-.progress-success .bar,
-.progress .bar-success {
-  background-color: #5eb95e;
-  background-image: -moz-linear-gradient(top, #62c462, #57a957);
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#62c462), to(#57a957));
-  background-image: -webkit-linear-gradient(top, #62c462, #57a957);
-  background-image: -o-linear-gradient(top, #62c462, #57a957);
-  background-image: linear-gradient(to bottom, #62c462, #57a957);
-  background-repeat: repeat-x;
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#ff62c462', endColorstr='#ff57a957', GradientType=0);
-}
-
-.progress-success.progress-striped .bar,
-.progress-striped .bar-success {
-  background-color: #62c462;
-  background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent));
-  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-}
-
-.progress-info .bar,
-.progress .bar-info {
-  background-color: #4bb1cf;
-  background-image: -moz-linear-gradient(top, #5bc0de, #339bb9);
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#5bc0de), to(#339bb9));
-  background-image: -webkit-linear-gradient(top, #5bc0de, #339bb9);
-  background-image: -o-linear-gradient(top, #5bc0de, #339bb9);
-  background-image: linear-gradient(to bottom, #5bc0de, #339bb9);
-  background-repeat: repeat-x;
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff339bb9', GradientType=0);
-}
-
-.progress-info.progress-striped .bar,
-.progress-striped .bar-info {
-  background-color: #5bc0de;
-  background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent));
-  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-}
-
-.progress-warning .bar,
-.progress .bar-warning {
-  background-color: #faa732;
-  background-image: -moz-linear-gradient(top, #fbb450, #f89406);
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#fbb450), to(#f89406));
-  background-image: -webkit-linear-gradient(top, #fbb450, #f89406);
-  background-image: -o-linear-gradient(top, #fbb450, #f89406);
-  background-image: linear-gradient(to bottom, #fbb450, #f89406);
-  background-repeat: repeat-x;
-  filter: progid:dximagetransform.microsoft.gradient(startColorstr='#fffbb450', endColorstr='#fff89406', GradientType=0);
-}
-
-.progress-warning.progress-striped .bar,
-.progress-striped .bar-warning {
-  background-color: #fbb450;
-  background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent));
-  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-  background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
-}
-
-.accordion {
-  margin-bottom: 20px;
-}
-
-.accordion-group {
-  margin-bottom: 2px;
-  border: 1px solid #e5e5e5;
-  -webkit-border-radius: 4px;
-     -moz-border-radius: 4px;
-          border-radius: 4px;
-}
-
-.accordion-heading {
-  border-bottom: 0;
-}
-
-.accordion-heading .accordion-toggle {
-  display: block;
-  padding: 8px 15px;
-}
-
-.accordion-toggle {
-  cursor: pointer;
-}
-
-.accordion-inner {
-  padding: 9px 15px;
-  border-top: 1px solid #e5e5e5;
-}
-
-.carousel {
-  position: relative;
-  margin-bottom: 20px;
-  line-height: 1;
-}
-
-.carousel-inner {
-  position: relative;
-  width: 100%;
-  overflow: hidden;
-}
-
-.carousel .item {
-  position: relative;
-  display: none;
-  -webkit-transition: 0.6s ease-in-out left;
-     -moz-transition: 0.6s ease-in-out left;
-       -o-transition: 0.6s ease-in-out left;
-          transition: 0.6s ease-in-out left;
-}
-
-.carousel .item > img {
-  display: block;
-  line-height: 1;
-}
-
-.carousel .active,
-.carousel .next,
-.carousel .prev {
-  display: block;
-}
-
-.carousel .active {
-  left: 0;
-}
-
-.carousel .next,
-.carousel .prev {
-  position: absolute;
-  top: 0;
-  width: 100%;
-}
-
-.carousel .next {
-  left: 100%;
-}
-
-.carousel .prev {
-  left: -100%;
-}
-
-.carousel .next.left,
-.carousel .prev.right {
-  left: 0;
-}
-
-.carousel .active.left {
-  left: -100%;
-}
-
-.carousel .active.right {
-  left: 100%;
-}
-
-.carousel-control {
-  position: absolute;
-  top: 40%;
-  left: 15px;
-  width: 40px;
-  height: 40px;
-  margin-top: -20px;
-  font-size: 60px;
-  font-weight: 100;
-  line-height: 30px;
-  color: #ffffff;
-  text-align: center;
-  background: #222222;
-  border: 3px solid #ffffff;
-  -webkit-border-radius: 23px;
-     -moz-border-radius: 23px;
-          border-radius: 23px;
-  opacity: 0.5;
-  filter: alpha(opacity=50);
-}
-
-.carousel-control.right {
-  right: 15px;
-  left: auto;
-}
-
-.carousel-control:hover {
-  color: #ffffff;
-  text-decoration: none;
-  opacity: 0.9;
-  filter: alpha(opacity=90);
-}
-
-.carousel-caption {
-  position: absolute;
-  right: 0;
-  bottom: 0;
-  left: 0;
-  padding: 15px;
-  background: #333333;
-  background: rgba(0, 0, 0, 0.75);
-}
-
-.carousel-caption h4,
-.carousel-caption p {
-  line-height: 20px;
-  color: #ffffff;
-}
-
-.carousel-caption h4 {
-  margin: 0 0 5px;
-}
-
-.carousel-caption p {
-  margin-bottom: 0;
-}
-
-.hero-unit {
-  padding: 60px;
-  margin-bottom: 30px;
-  background-color: #eeeeee;
-  -webkit-border-radius: 6px;
-     -moz-border-radius: 6px;
-          border-radius: 6px;
-}
-
-.hero-unit h1 {
-  margin-bottom: 0;
-  font-size: 60px;
-  line-height: 1;
-  letter-spacing: -1px;
-  color: inherit;
-}
-
-.hero-unit p {
-  font-size: 18px;
-  font-weight: 200;
-  line-height: 30px;
-  color: inherit;
-}
-
-.pull-right {
-  float: right;
-}
-
-.pull-left {
-  float: left;
-}
-
-.hide {
-  display: none;
-}
-
-.show {
-  display: block;
-}
-
-.invisible {
-  visibility: hidden;
-}
-
-.affix {
-  position: fixed;
-}
diff --git a/branch-1.2/ambari-web/vendor/styles/cubism.css b/branch-1.2/ambari-web/vendor/styles/cubism.css
deleted file mode 100644
index 3ff5b3b..0000000
--- a/branch-1.2/ambari-web/vendor/styles/cubism.css
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * D3  visualization components 
- * Styles for Dag viewer and Sankey diagram
- */
-.wfdag {
-  width: 100%;
-}
-
-#dag_viewer .axis path,
-#dag_viewer .axis line {
-  fill: none;
-  stroke: #000;
-  shape-rendering: crispEdges;
-}
-
-#dag_viewer line.link,
-#dag_viewer path.link {
-  fill: none;
-  stroke: #666;
-  stroke-width: 2.5px;
-}
-
-#dag_viewer line.link.finished,
-#dag_viewer path.link.finished {
-  stroke: #444;
-}
-
-#dag_viewer marker#finished {
-  fill: #444;
-}
-
-#dag_viewer marker#circle {
-  fill: #666;
-  stroke: none;
-}
-
-#dag_viewer line.source.mark,
-#dag_viewer line.target.mark {
-  stroke: #666;
-  stroke-width: 2.5px;
-}
-
-#dag_viewer rect {
-  fill: #ccc;
-  stroke: #333;
-  stroke-width: 1.5px;
-}
-
-#dag_viewer rect.finished {
-  fill: #69BE28;
-}
-
-#dag_viewer text.joblabel {
-  pointer-events: none;
-  text-anchor: middle;
-}
-
-#dag_viewer text.shadow {
-  stroke: #fff;
-  stroke-width: 3px;
-  stroke-opacity: .8;
-}
diff --git a/branch-1.2/ambari-web/vendor/styles/font-awesome-ie7.css b/branch-1.2/ambari-web/vendor/styles/font-awesome-ie7.css
deleted file mode 100644
index c1dc3ac..0000000
--- a/branch-1.2/ambari-web/vendor/styles/font-awesome-ie7.css
+++ /dev/null
@@ -1,645 +0,0 @@
-[class^="icon-"],
-[class*=" icon-"] {
-  font-family: FontAwesome;
-  font-style: normal;
-  font-weight: normal;
-}
-.btn.dropdown-toggle [class^="icon-"],
-.btn.dropdown-toggle [class*=" icon-"] {
-  /* keeps button heights with and without icons the same */
-
-  line-height: 1.4em;
-}
-.icon-large {
-  font-size: 1.3333em;
-}
-.icon-glass {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf000;&nbsp;');
-}
-.icon-music {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf001;&nbsp;');
-}
-.icon-search {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf002;&nbsp;');
-}
-.icon-envelope {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf003;&nbsp;');
-}
-.icon-heart {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf004;&nbsp;');
-}
-.icon-star {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf005;&nbsp;');
-}
-.icon-star-empty {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf006;&nbsp;');
-}
-.icon-user {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf007;&nbsp;');
-}
-.icon-film {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf008;&nbsp;');
-}
-.icon-th-large {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf009;&nbsp;');
-}
-.icon-th {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf00a;&nbsp;');
-}
-.icon-th-list {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf00b;&nbsp;');
-}
-.icon-ok {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf00c;&nbsp;');
-}
-.icon-remove {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf00d;&nbsp;');
-}
-.icon-zoom-in {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf00e;&nbsp;');
-}
-.icon-zoom-out {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf010;&nbsp;');
-}
-.icon-off {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf011;&nbsp;');
-}
-.icon-signal {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf012;&nbsp;');
-}
-.icon-cog {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf013;&nbsp;');
-}
-.icon-trash {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf014;&nbsp;');
-}
-.icon-home {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf015;&nbsp;');
-}
-.icon-file {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf016;&nbsp;');
-}
-.icon-time {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf017;&nbsp;');
-}
-.icon-road {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf018;&nbsp;');
-}
-.icon-download-alt {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf019;&nbsp;');
-}
-.icon-download {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf01a;&nbsp;');
-}
-.icon-upload {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf01b;&nbsp;');
-}
-.icon-inbox {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf01c;&nbsp;');
-}
-.icon-play-circle {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf01d;&nbsp;');
-}
-.icon-repeat {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf01e;&nbsp;');
-}
-.icon-refresh {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf021;&nbsp;');
-}
-.icon-list-alt {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf022;&nbsp;');
-}
-.icon-lock {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf023;&nbsp;');
-}
-.icon-flag {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf024;&nbsp;');
-}
-.icon-headphones {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf025;&nbsp;');
-}
-.icon-volume-off {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf026;&nbsp;');
-}
-.icon-volume-down {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf027;&nbsp;');
-}
-.icon-volume-up {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf028;&nbsp;');
-}
-.icon-qrcode {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf029;&nbsp;');
-}
-.icon-barcode {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf02a;&nbsp;');
-}
-.icon-tag {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf02b;&nbsp;');
-}
-.icon-tags {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf02c;&nbsp;');
-}
-.icon-book {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf02d;&nbsp;');
-}
-.icon-bookmark {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf02e;&nbsp;');
-}
-.icon-print {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf02f;&nbsp;');
-}
-.icon-camera {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf030;&nbsp;');
-}
-.icon-font {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf031;&nbsp;');
-}
-.icon-bold {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf032;&nbsp;');
-}
-.icon-italic {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf033;&nbsp;');
-}
-.icon-text-height {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf034;&nbsp;');
-}
-.icon-text-width {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf035;&nbsp;');
-}
-.icon-align-left {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf036;&nbsp;');
-}
-.icon-align-center {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf037;&nbsp;');
-}
-.icon-align-right {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf038;&nbsp;');
-}
-.icon-align-justify {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf039;&nbsp;');
-}
-.icon-list {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf03a;&nbsp;');
-}
-.icon-indent-left {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf03b;&nbsp;');
-}
-.icon-indent-right {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf03c;&nbsp;');
-}
-.icon-facetime-video {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf03d;&nbsp;');
-}
-.icon-picture {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf03e;&nbsp;');
-}
-.icon-pencil {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf040;&nbsp;');
-}
-.icon-map-marker {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf041;&nbsp;');
-}
-.icon-adjust {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf042;&nbsp;');
-}
-.icon-tint {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf043;&nbsp;');
-}
-.icon-edit {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf044;&nbsp;');
-}
-.icon-share {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf045;&nbsp;');
-}
-.icon-check {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf046;&nbsp;');
-}
-.icon-move {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf047;&nbsp;');
-}
-.icon-step-backward {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf048;&nbsp;');
-}
-.icon-fast-backward {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf049;&nbsp;');
-}
-.icon-backward {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf04a;&nbsp;');
-}
-.icon-play {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf04b;&nbsp;');
-}
-.icon-pause {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf04c;&nbsp;');
-}
-.icon-stop {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf04d;&nbsp;');
-}
-.icon-forward {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf04e;&nbsp;');
-}
-.icon-fast-forward {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf050;&nbsp;');
-}
-.icon-step-forward {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf051;&nbsp;');
-}
-.icon-eject {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf052;&nbsp;');
-}
-.icon-chevron-left {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf053;&nbsp;');
-}
-.icon-chevron-right {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf054;&nbsp;');
-}
-.icon-plus-sign {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf055;&nbsp;');
-}
-.icon-minus-sign {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf056;&nbsp;');
-}
-.icon-remove-sign {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf057;&nbsp;');
-}
-.icon-ok-sign {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf058;&nbsp;');
-}
-.icon-question-sign {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf059;&nbsp;');
-}
-.icon-info-sign {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf05a;&nbsp;');
-}
-.icon-screenshot {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf05b;&nbsp;');
-}
-.icon-remove-circle {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf05c;&nbsp;');
-}
-.icon-ok-circle {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf05d;&nbsp;');
-}
-.icon-ban-circle {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf05e;&nbsp;');
-}
-.icon-arrow-left {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf060;&nbsp;');
-}
-.icon-arrow-right {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf061;&nbsp;');
-}
-.icon-arrow-up {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf062;&nbsp;');
-}
-.icon-arrow-down {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf063;&nbsp;');
-}
-.icon-share-alt {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf064;&nbsp;');
-}
-.icon-resize-full {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf065;&nbsp;');
-}
-.icon-resize-small {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf066;&nbsp;');
-}
-.icon-plus {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf067;&nbsp;');
-}
-.icon-minus {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf068;&nbsp;');
-}
-.icon-asterisk {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf069;&nbsp;');
-}
-.icon-exclamation-sign {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf06a;&nbsp;');
-}
-.icon-gift {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf06b;&nbsp;');
-}
-.icon-leaf {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf06c;&nbsp;');
-}
-.icon-fire {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf06d;&nbsp;');
-}
-.icon-eye-open {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf06e;&nbsp;');
-}
-.icon-eye-close {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf070;&nbsp;');
-}
-.icon-warning-sign {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf071;&nbsp;');
-}
-.icon-plane {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf072;&nbsp;');
-}
-.icon-calendar {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf073;&nbsp;');
-}
-.icon-random {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf074;&nbsp;');
-}
-.icon-comment {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf075;&nbsp;');
-}
-.icon-magnet {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf076;&nbsp;');
-}
-.icon-chevron-up {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf077;&nbsp;');
-}
-.icon-chevron-down {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf078;&nbsp;');
-}
-.icon-retweet {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf079;&nbsp;');
-}
-.icon-shopping-cart {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf07a;&nbsp;');
-}
-.icon-folder-close {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf07b;&nbsp;');
-}
-.icon-folder-open {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf07c;&nbsp;');
-}
-.icon-resize-vertical {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf07d;&nbsp;');
-}
-.icon-resize-horizontal {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf07e;&nbsp;');
-}
-.icon-bar-chart {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf080;&nbsp;');
-}
-.icon-twitter-sign {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf081;&nbsp;');
-}
-.icon-facebook-sign {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf082;&nbsp;');
-}
-.icon-camera-retro {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf083;&nbsp;');
-}
-.icon-key {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf084;&nbsp;');
-}
-.icon-cogs {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf085;&nbsp;');
-}
-.icon-comments {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf086;&nbsp;');
-}
-.icon-thumbs-up {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf087;&nbsp;');
-}
-.icon-thumbs-down {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf088;&nbsp;');
-}
-.icon-star-half {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf089;&nbsp;');
-}
-.icon-heart-empty {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf08a;&nbsp;');
-}
-.icon-signout {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf08b;&nbsp;');
-}
-.icon-linkedin-sign {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf08c;&nbsp;');
-}
-.icon-pushpin {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf08d;&nbsp;');
-}
-.icon-external-link {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf08e;&nbsp;');
-}
-.icon-signin {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf090;&nbsp;');
-}
-.icon-trophy {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf091;&nbsp;');
-}
-.icon-github-sign {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf092;&nbsp;');
-}
-.icon-upload-alt {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf093;&nbsp;');
-}
-.icon-lemon {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf094;&nbsp;');
-}
-.icon-phone {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf095;&nbsp;');
-}
-.icon-check-empty {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf096;&nbsp;');
-}
-.icon-bookmark-empty {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf097;&nbsp;');
-}
-.icon-phone-sign {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf098;&nbsp;');
-}
-.icon-twitter {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf099;&nbsp;');
-}
-.icon-facebook {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf09a;&nbsp;');
-}
-.icon-github {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf09b;&nbsp;');
-}
-.icon-unlock {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf09c;&nbsp;');
-}
-.icon-credit-card {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf09d;&nbsp;');
-}
-.icon-rss {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf09e;&nbsp;');
-}
-.icon-hdd {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0a0;&nbsp;');
-}
-.icon-bullhorn {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0a1;&nbsp;');
-}
-.icon-bell {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0a2;&nbsp;');
-}
-.icon-certificate {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0a3;&nbsp;');
-}
-.icon-hand-right {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0a4;&nbsp;');
-}
-.icon-hand-left {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0a5;&nbsp;');
-}
-.icon-hand-up {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0a6;&nbsp;');
-}
-.icon-hand-down {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0a7;&nbsp;');
-}
-.icon-circle-arrow-left {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0a8;&nbsp;');
-}
-.icon-circle-arrow-right {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0a9;&nbsp;');
-}
-.icon-circle-arrow-up {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0aa;&nbsp;');
-}
-.icon-circle-arrow-down {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0ab;&nbsp;');
-}
-.icon-globe {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0ac;&nbsp;');
-}
-.icon-wrench {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0ad;&nbsp;');
-}
-.icon-tasks {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0ae;&nbsp;');
-}
-.icon-filter {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0b0;&nbsp;');
-}
-.icon-briefcase {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0b1;&nbsp;');
-}
-.icon-fullscreen {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0b2;&nbsp;');
-}
-.icon-group {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0c0;&nbsp;');
-}
-.icon-link {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0c1;&nbsp;');
-}
-.icon-cloud {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0c2;&nbsp;');
-}
-.icon-beaker {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0c3;&nbsp;');
-}
-.icon-cut {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0c4;&nbsp;');
-}
-.icon-copy {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0c5;&nbsp;');
-}
-.icon-paper-clip {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0c6;&nbsp;');
-}
-.icon-save {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0c7;&nbsp;');
-}
-.icon-sign-blank {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0c8;&nbsp;');
-}
-.icon-reorder {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0c9;&nbsp;');
-}
-.icon-list-ul {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0ca;&nbsp;');
-}
-.icon-list-ol {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0cb;&nbsp;');
-}
-.icon-strikethrough {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0cc;&nbsp;');
-}
-.icon-underline {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0cd;&nbsp;');
-}
-.icon-table {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0ce;&nbsp;');
-}
-.icon-magic {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0d0;&nbsp;');
-}
-.icon-truck {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0d1;&nbsp;');
-}
-.icon-pinterest {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0d2;&nbsp;');
-}
-.icon-pinterest-sign {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0d3;&nbsp;');
-}
-.icon-google-plus-sign {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0d4;&nbsp;');
-}
-.icon-google-plus {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0d5;&nbsp;');
-}
-.icon-money {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0d6;&nbsp;');
-}
-.icon-caret-down {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0d7;&nbsp;');
-}
-.icon-caret-up {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0d8;&nbsp;');
-}
-.icon-caret-left {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0d9;&nbsp;');
-}
-.icon-caret-right {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0da;&nbsp;');
-}
-.icon-columns {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0db;&nbsp;');
-}
-.icon-sort {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0dc;&nbsp;');
-}
-.icon-sort-down {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0dd;&nbsp;');
-}
-.icon-sort-up {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0de;&nbsp;');
-}
-.icon-envelope-alt {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0e0;&nbsp;');
-}
-.icon-linkedin {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0e1;&nbsp;');
-}
-.icon-undo {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0e2;&nbsp;');
-}
-.icon-legal {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0e3;&nbsp;');
-}
-.icon-dashboard {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0e4;&nbsp;');
-}
-.icon-comment-alt {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0e5;&nbsp;');
-}
-.icon-comments-alt {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0e6;&nbsp;');
-}
-.icon-bolt {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0e7;&nbsp;');
-}
-.icon-sitemap {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0e8;&nbsp;');
-}
-.icon-umbrella {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0e9;&nbsp;');
-}
-.icon-paste {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf0ea;&nbsp;');
-}
-.icon-user-md {
-  *zoom: expression( this.runtimeStyle['zoom'] = '1', this.innerHTML = '&#xf200;&nbsp;');
-}
diff --git a/branch-1.2/ambari-web/vendor/styles/font-awesome.css b/branch-1.2/ambari-web/vendor/styles/font-awesome.css
deleted file mode 100644
index 4697599..0000000
--- a/branch-1.2/ambari-web/vendor/styles/font-awesome.css
+++ /dev/null
@@ -1,303 +0,0 @@
-/*  Font Awesome
-    the iconic font designed for use with Twitter Bootstrap
-    -------------------------------------------------------
-    The full suite of pictographic icons, examples, and documentation
-    can be found at: http://fortawesome.github.com/Font-Awesome/
-
-    License
-    -------------------------------------------------------
-    The Font Awesome webfont, CSS, and LESS files are licensed under CC BY 3.0:
-    http://creativecommons.org/licenses/by/3.0/ A mention of
-    'Font Awesome - http://fortawesome.github.com/Font-Awesome' in human-readable
-    source code is considered acceptable attribution (most common on the web).
-    If human readable source code is not available to the end user, a mention in
-    an 'About' or 'Credits' screen is considered acceptable (most common in desktop
-    or mobile software).
-
-    Contact
-    -------------------------------------------------------
-    Email: dave@davegandy.com
-    Twitter: http://twitter.com/fortaweso_me
-    Work: http://lemonwi.se co-founder
-
-    */
-@font-face {
-  font-family: "FontAwesome";
-  src: url('../font/fontawesome-webfont.eot');
-  src: url('../font/fontawesome-webfont.eot?#iefix') format('eot'), url('../font/fontawesome-webfont.woff') format('woff'), url('../font/fontawesome-webfont.ttf') format('truetype'), url('../font/fontawesome-webfont.svg#FontAwesome') format('svg');
-  font-weight: normal;
-  font-style: normal;
-}
-
-/*  Font Awesome styles
-    ------------------------------------------------------- */
-[class^="icon-"]:before, [class*=" icon-"]:before {
-  font-family: FontAwesome;
-  font-weight: normal;
-  font-style: normal;
-  display: inline-block;
-  text-decoration: inherit;
-}
-a [class^="icon-"], a [class*=" icon-"] {
-  display: inline-block;
-  text-decoration: inherit;
-}
-/* makes the font 33% larger relative to the icon container */
-.icon-large:before {
-  vertical-align: top;
-  font-size: 1.3333333333333333em;
-}
-.btn [class^="icon-"], .btn [class*=" icon-"] {
-  /* keeps button heights with and without icons the same */
-
-  line-height: .9em;
-}
-li [class^="icon-"], li [class*=" icon-"] {
-  display: inline-block;
-  width: 1.25em;
-  text-align: center;
-}
-li .icon-large[class^="icon-"], li .icon-large[class*=" icon-"] {
-  /* 1.5 increased font size for icon-large * 1.25 width */
-
-  width: 1.875em;
-}
-li[class^="icon-"], li[class*=" icon-"] {
-  margin-left: 0;
-  list-style-type: none;
-}
-li[class^="icon-"]:before, li[class*=" icon-"]:before {
-  text-indent: -2em;
-  text-align: center;
-}
-li[class^="icon-"].icon-large:before, li[class*=" icon-"].icon-large:before {
-  text-indent: -1.3333333333333333em;
-}
-/*  Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen
-    readers do not read off random characters that represent icons */
-.icon-glass:before                { content: "\f000"; }
-.icon-music:before                { content: "\f001"; }
-.icon-search:before               { content: "\f002"; }
-.icon-envelope:before             { content: "\f003"; }
-.icon-heart:before                { content: "\f004"; }
-.icon-star:before                 { content: "\f005"; }
-.icon-star-empty:before           { content: "\f006"; }
-.icon-user:before                 { content: "\f007"; }
-.icon-film:before                 { content: "\f008"; }
-.icon-th-large:before             { content: "\f009"; }
-.icon-th:before                   { content: "\f00a"; }
-.icon-th-list:before              { content: "\f00b"; }
-.icon-ok:before                   { content: "\f00c"; }
-.icon-remove:before               { content: "\f00d"; }
-.icon-zoom-in:before              { content: "\f00e"; }
-
-.icon-zoom-out:before             { content: "\f010"; }
-.icon-off:before                  { content: "\f011"; }
-.icon-signal:before               { content: "\f012"; }
-.icon-cog:before                  { content: "\f013"; }
-.icon-trash:before                { content: "\f014"; }
-.icon-home:before                 { content: "\f015"; }
-.icon-file:before                 { content: "\f016"; }
-.icon-time:before                 { content: "\f017"; }
-.icon-road:before                 { content: "\f018"; }
-.icon-download-alt:before         { content: "\f019"; }
-.icon-download:before             { content: "\f01a"; }
-.icon-upload:before               { content: "\f01b"; }
-.icon-inbox:before                { content: "\f01c"; }
-.icon-play-circle:before          { content: "\f01d"; }
-.icon-repeat:before               { content: "\f01e"; }
-
-/* \f020 doesn't work in Safari. all shifted one down */
-.icon-refresh:before              { content: "\f021"; }
-.icon-list-alt:before             { content: "\f022"; }
-.icon-lock:before                 { content: "\f023"; }
-.icon-flag:before                 { content: "\f024"; }
-.icon-headphones:before           { content: "\f025"; }
-.icon-volume-off:before           { content: "\f026"; }
-.icon-volume-down:before          { content: "\f027"; }
-.icon-volume-up:before            { content: "\f028"; }
-.icon-qrcode:before               { content: "\f029"; }
-.icon-barcode:before              { content: "\f02a"; }
-.icon-tag:before                  { content: "\f02b"; }
-.icon-tags:before                 { content: "\f02c"; }
-.icon-book:before                 { content: "\f02d"; }
-.icon-bookmark:before             { content: "\f02e"; }
-.icon-print:before                { content: "\f02f"; }
-
-.icon-camera:before               { content: "\f030"; }
-.icon-font:before                 { content: "\f031"; }
-.icon-bold:before                 { content: "\f032"; }
-.icon-italic:before               { content: "\f033"; }
-.icon-text-height:before          { content: "\f034"; }
-.icon-text-width:before           { content: "\f035"; }
-.icon-align-left:before           { content: "\f036"; }
-.icon-align-center:before         { content: "\f037"; }
-.icon-align-right:before          { content: "\f038"; }
-.icon-align-justify:before        { content: "\f039"; }
-.icon-list:before                 { content: "\f03a"; }
-.icon-indent-left:before          { content: "\f03b"; }
-.icon-indent-right:before         { content: "\f03c"; }
-.icon-facetime-video:before       { content: "\f03d"; }
-.icon-picture:before              { content: "\f03e"; }
-
-.icon-pencil:before               { content: "\f040"; }
-.icon-map-marker:before           { content: "\f041"; }
-.icon-adjust:before               { content: "\f042"; }
-.icon-tint:before                 { content: "\f043"; }
-.icon-edit:before                 { content: "\f044"; }
-.icon-share:before                { content: "\f045"; }
-.icon-check:before                { content: "\f046"; }
-.icon-move:before                 { content: "\f047"; }
-.icon-step-backward:before        { content: "\f048"; }
-.icon-fast-backward:before        { content: "\f049"; }
-.icon-backward:before             { content: "\f04a"; }
-.icon-play:before                 { content: "\f04b"; }
-.icon-pause:before                { content: "\f04c"; }
-.icon-stop:before                 { content: "\f04d"; }
-.icon-forward:before              { content: "\f04e"; }
-
-.icon-fast-forward:before         { content: "\f050"; }
-.icon-step-forward:before         { content: "\f051"; }
-.icon-eject:before                { content: "\f052"; }
-.icon-chevron-left:before         { content: "\f053"; }
-.icon-chevron-right:before        { content: "\f054"; }
-.icon-plus-sign:before            { content: "\f055"; }
-.icon-minus-sign:before           { content: "\f056"; }
-.icon-remove-sign:before          { content: "\f057"; }
-.icon-ok-sign:before              { content: "\f058"; }
-.icon-question-sign:before        { content: "\f059"; }
-.icon-info-sign:before            { content: "\f05a"; }
-.icon-screenshot:before           { content: "\f05b"; }
-.icon-remove-circle:before        { content: "\f05c"; }
-.icon-ok-circle:before            { content: "\f05d"; }
-.icon-ban-circle:before           { content: "\f05e"; }
-
-.icon-arrow-left:before           { content: "\f060"; }
-.icon-arrow-right:before          { content: "\f061"; }
-.icon-arrow-up:before             { content: "\f062"; }
-.icon-arrow-down:before           { content: "\f063"; }
-.icon-share-alt:before            { content: "\f064"; }
-.icon-resize-full:before          { content: "\f065"; }
-.icon-resize-small:before         { content: "\f066"; }
-.icon-plus:before                 { content: "\f067"; }
-.icon-minus:before                { content: "\f068"; }
-.icon-asterisk:before             { content: "\f069"; }
-.icon-exclamation-sign:before     { content: "\f06a"; }
-.icon-gift:before                 { content: "\f06b"; }
-.icon-leaf:before                 { content: "\f06c"; }
-.icon-fire:before                 { content: "\f06d"; }
-.icon-eye-open:before             { content: "\f06e"; }
-
-.icon-eye-close:before            { content: "\f070"; }
-.icon-warning-sign:before         { content: "\f071"; }
-.icon-plane:before                { content: "\f072"; }
-.icon-calendar:before             { content: "\f073"; }
-.icon-random:before               { content: "\f074"; }
-.icon-comment:before              { content: "\f075"; }
-.icon-magnet:before               { content: "\f076"; }
-.icon-chevron-up:before           { content: "\f077"; }
-.icon-chevron-down:before         { content: "\f078"; }
-.icon-retweet:before              { content: "\f079"; }
-.icon-shopping-cart:before        { content: "\f07a"; }
-.icon-folder-close:before         { content: "\f07b"; }
-.icon-folder-open:before          { content: "\f07c"; }
-.icon-resize-vertical:before      { content: "\f07d"; }
-.icon-resize-horizontal:before    { content: "\f07e"; }
-
-.icon-bar-chart:before            { content: "\f080"; }
-.icon-twitter-sign:before         { content: "\f081"; }
-.icon-facebook-sign:before        { content: "\f082"; }
-.icon-camera-retro:before         { content: "\f083"; }
-.icon-key:before                  { content: "\f084"; }
-.icon-cogs:before                 { content: "\f085"; }
-.icon-comments:before             { content: "\f086"; }
-.icon-thumbs-up:before            { content: "\f087"; }
-.icon-thumbs-down:before          { content: "\f088"; }
-.icon-star-half:before            { content: "\f089"; }
-.icon-heart-empty:before          { content: "\f08a"; }
-.icon-signout:before              { content: "\f08b"; }
-.icon-linkedin-sign:before        { content: "\f08c"; }
-.icon-pushpin:before              { content: "\f08d"; }
-.icon-external-link:before        { content: "\f08e"; }
-
-.icon-signin:before               { content: "\f090"; }
-.icon-trophy:before               { content: "\f091"; }
-.icon-github-sign:before          { content: "\f092"; }
-.icon-upload-alt:before           { content: "\f093"; }
-.icon-lemon:before                { content: "\f094"; }
-.icon-phone:before                { content: "\f095"; }
-.icon-check-empty:before          { content: "\f096"; }
-.icon-bookmark-empty:before       { content: "\f097"; }
-.icon-phone-sign:before           { content: "\f098"; }
-.icon-twitter:before              { content: "\f099"; }
-.icon-facebook:before             { content: "\f09a"; }
-.icon-github:before               { content: "\f09b"; }
-.icon-unlock:before               { content: "\f09c"; }
-.icon-credit-card:before          { content: "\f09d"; }
-.icon-rss:before                  { content: "\f09e"; }
-
-.icon-hdd:before                  { content: "\f0a0"; }
-.icon-bullhorn:before             { content: "\f0a1"; }
-.icon-bell:before                 { content: "\f0a2"; }
-.icon-certificate:before          { content: "\f0a3"; }
-.icon-hand-right:before           { content: "\f0a4"; }
-.icon-hand-left:before            { content: "\f0a5"; }
-.icon-hand-up:before              { content: "\f0a6"; }
-.icon-hand-down:before            { content: "\f0a7"; }
-.icon-circle-arrow-left:before    { content: "\f0a8"; }
-.icon-circle-arrow-right:before   { content: "\f0a9"; }
-.icon-circle-arrow-up:before      { content: "\f0aa"; }
-.icon-circle-arrow-down:before    { content: "\f0ab"; }
-.icon-globe:before                { content: "\f0ac"; }
-.icon-wrench:before               { content: "\f0ad"; }
-.icon-tasks:before                { content: "\f0ae"; }
-
-.icon-filter:before               { content: "\f0b0"; }
-.icon-briefcase:before            { content: "\f0b1"; }
-.icon-fullscreen:before           { content: "\f0b2"; }
-
-.icon-group:before                { content: "\f0c0"; }
-.icon-link:before                 { content: "\f0c1"; }
-.icon-cloud:before                { content: "\f0c2"; }
-.icon-beaker:before               { content: "\f0c3"; }
-.icon-cut:before                  { content: "\f0c4"; }
-.icon-copy:before                 { content: "\f0c5"; }
-.icon-paper-clip:before           { content: "\f0c6"; }
-.icon-save:before                 { content: "\f0c7"; }
-.icon-sign-blank:before           { content: "\f0c8"; }
-.icon-reorder:before              { content: "\f0c9"; }
-.icon-list-ul:before              { content: "\f0ca"; }
-.icon-list-ol:before              { content: "\f0cb"; }
-.icon-strikethrough:before        { content: "\f0cc"; }
-.icon-underline:before            { content: "\f0cd"; }
-.icon-table:before                { content: "\f0ce"; }
-
-.icon-magic:before                { content: "\f0d0"; }
-.icon-truck:before                { content: "\f0d1"; }
-.icon-pinterest:before            { content: "\f0d2"; }
-.icon-pinterest-sign:before       { content: "\f0d3"; }
-.icon-google-plus-sign:before     { content: "\f0d4"; }
-.icon-google-plus:before          { content: "\f0d5"; }
-.icon-money:before                { content: "\f0d6"; }
-.icon-caret-down:before           { content: "\f0d7"; }
-.icon-caret-up:before             { content: "\f0d8"; }
-.icon-caret-left:before           { content: "\f0d9"; }
-.icon-caret-right:before          { content: "\f0da"; }
-.icon-columns:before              { content: "\f0db"; }
-.icon-sort:before                 { content: "\f0dc"; }
-.icon-sort-down:before            { content: "\f0dd"; }
-.icon-sort-up:before              { content: "\f0de"; }
-
-.icon-envelope-alt:before         { content: "\f0e0"; }
-.icon-linkedin:before             { content: "\f0e1"; }
-.icon-undo:before                 { content: "\f0e2"; }
-.icon-legal:before                { content: "\f0e3"; }
-.icon-dashboard:before            { content: "\f0e4"; }
-.icon-comment-alt:before          { content: "\f0e5"; }
-.icon-comments-alt:before         { content: "\f0e6"; }
-.icon-bolt:before                 { content: "\f0e7"; }
-.icon-sitemap:before              { content: "\f0e8"; }
-.icon-umbrella:before             { content: "\f0e9"; }
-.icon-paste:before                { content: "\f0ea"; }
-
-.icon-user-md:before              { content: "\f200"; }
diff --git a/branch-1.2/ambari-web/vendor/styles/jquery-ui-bootstrap/jquery-ui-1.8.16.custom.css b/branch-1.2/ambari-web/vendor/styles/jquery-ui-bootstrap/jquery-ui-1.8.16.custom.css
deleted file mode 100644
index ad18ce7..0000000
--- a/branch-1.2/ambari-web/vendor/styles/jquery-ui-bootstrap/jquery-ui-1.8.16.custom.css
+++ /dev/null
@@ -1,1320 +0,0 @@
-/*!
- * jQuery UI Bootstrap (0.22)
- * http://addyosmani.github.com/jquery-ui-bootstrap
- *
- * Copyright 2012, Addy Osmani
- * Dual licensed under the MIT or GPL Version 2 licenses.
- *
- * Portions copyright jQuery UI & Twitter Bootstrap
- */
-
-
-/* Layout helpers
-----------------------------------*/
-.ui-helper-hidden { display: none; }
-.ui-helper-hidden-accessible { position: absolute !important; clip: rect(1px 1px 1px 1px); clip: rect(1px,1px,1px,1px); }
-.ui-helper-reset { margin: 0; padding: 0; border: 0; outline: 0; line-height: 1.3; text-decoration: none; font-size: 100%; list-style: none; }
-.ui-helper-clearfix:after { content: "."; display: block; height: 0; clear: both; visibility: hidden; }
-.ui-helper-clearfix { display: inline-block; }
-/* required comment for clearfix to work in Opera \*/
-* html .ui-helper-clearfix { height:1%; }
-.ui-helper-clearfix { display:block; }
-/* end clearfix */
-.ui-helper-zfix { width: 100%; height: 100%; top: 0; left: 0; position: absolute; opacity: 0; filter:Alpha(Opacity=0); }
-
-
-/* Interaction Cues
-----------------------------------*/
-.ui-state-disabled { cursor: default !important; }
-
-
-/* Icons
-----------------------------------*/
-
-/* states and images */
-.ui-icon { display: block; text-indent: -99999px; overflow: hidden; background-repeat: no-repeat; }
-
-
-/* Misc visuals
-----------------------------------*/
-
-/* Overlays */
-.ui-widget-overlay { position: absolute; top: 0; left: 0; width: 100%; height: 100%; }
-
-
-/*
- * jQuery UI CSS Framework 1.8.16
- *
- * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI/Theming/API
- *
- * To view and modify this theme, visit http://jqueryui.com/themeroller/?ctl=themeroller
- */
-
-
-/* Component containers
-----------------------------------*/
-.ui-widget {   font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; font-size:13px; }
-.ui-widget .ui-widget { font-size: 1em; }
-.ui-widget input, .ui-widget select, .ui-widget textarea, .ui-widget button { font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; font-size: 1em; }
-.ui-widget-content { border: 1px solid #aaaaaa; background: #ffffff url(../img/jquery-ui-bootstrap/ui-bg_glass_75_ffffff_1x400.png) 50% 50% repeat-x; color: #404040; }
-.ui-widget-content a { color: #404040; }
-.ui-widget-header {
-  font-weight:bold;
-  border-color: #0064cd #0064cd #003f81;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  border:1px solid #666;
-
- }
-.ui-widget-header a { color: #222222; }
-
-/* Interaction states
-----------------------------------*/
-.ui-state-default, .ui-widget-content .ui-state-default, .ui-widget-header .ui-state-default {
-
-  background-color: #e6e6e6;
-  background-repeat: no-repeat;
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ffffff), color-stop(25%, #ffffff), to(#e6e6e6));
-  background-image: -webkit-linear-gradient(#ffffff, #ffffff 25%, #e6e6e6);
-  background-image: -moz-linear-gradient(top, #ffffff, #ffffff 25%, #e6e6e6);
-  background-image: -ms-linear-gradient(#ffffff, #ffffff 25%, #e6e6e6);
-  background-image: -o-linear-gradient(#ffffff, #ffffff 25%, #e6e6e6);
-  background-image: linear-gradient(#ffffff, #ffffff 25%, #e6e6e6);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffff', endColorstr='#e6e6e6', GradientType=0);
-
-  text-shadow: 0 1px 1px rgba(255, 255, 255, 0.75);
-
-  color: #333;
-  font-size: 13px;
-  line-height: normal;
-  border: 1px solid #ccc;
-  border-bottom-color: #bbb;
-  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
-  -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
-  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
-  -webkit-transition: 0.1s linear background-image;
-  -moz-transition: 0.1s linear background-image;
-  -ms-transition: 0.1s linear background-image;
-  -o-transition: 0.1s linear background-image;
-  transition: 0.1s linear background-image;
-   overflow: visible;
-
- }
-
-
-.ui-state-default a, .ui-state-default a:link, .ui-state-default a:visited { color: #555555; text-decoration: none; }
-.ui-state-hover, .ui-widget-content .ui-state-hover, .ui-widget-header .ui-state-hover, .ui-state-focus, .ui-widget-content .ui-state-focus, .ui-widget-header .ui-state-focus {
-  background-position: 0 -15px;
-  color: #333;
-  text-decoration: none;
-
-
- }
-.ui-state-hover a, .ui-state-hover a:hover { color: #212121; text-decoration: none; }
-.ui-state-active, .ui-widget-content .ui-state-active, .ui-widget-header .ui-state-active { border: 1px solid #aaaaaa;  font-weight: normal; color: #212121; }
-.ui-state-active a, .ui-state-active a:link, .ui-state-active a:visited { color: #212121; text-decoration: none; }
-.ui-widget :active { outline: none; }
-
-/* Interaction Cues
-----------------------------------*/
-
-
-.ui-state-highlight p, .ui-state-error p, .ui-state-default p{
-	font-size: 13px;
-	font-weight: normal;
-	line-height: 18px;
-	margin:7px 15px;
-}
-.ui-state-highlight, .ui-widget-content .ui-state-highlight, .ui-widget-header .ui-state-highlight  {
-
-
-  position: relative;
-  margin-bottom: 18px;
-  color: #404040;
-  background-color: #eedc94;
-  background-repeat: repeat-x;
-  background-image: -khtml-gradient(linear, left top, left bottom, from(#fceec1), to(#eedc94));
-  background-image: -moz-linear-gradient(top, #fceec1, #eedc94);
-  background-image: -ms-linear-gradient(top, #fceec1, #eedc94);
-  background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #fceec1), color-stop(100%, #eedc94));
-  background-image: -webkit-linear-gradient(top, #fceec1, #eedc94);
-  background-image: -o-linear-gradient(top, #fceec1, #eedc94);
-  background-image: linear-gradient(top, #fceec1, #eedc94);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fceec1', endColorstr='#eedc94', GradientType=0);
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  border-color: #eedc94 #eedc94 #e4c652;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5);
-  border-width: 1px;
-  border-style: solid;
-  -webkit-border-radius: 4px;
-  -moz-border-radius: 4px;
-  border-radius: 4px;
-  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25);
-  -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25);
-  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25);
-
-
-}
-.ui-state-highlight a, .ui-widget-content .ui-state-highlight a,.ui-widget-header .ui-state-highlight a { color: #363636; }
-.ui-state-error, .ui-widget-content .ui-state-error, .ui-widget-header .ui-state-error {
-
-
-  position: relative;
-  margin-bottom: 18px;
-  color: #ffffff;
-  border-width: 1px;
-  border-style: solid;
-  -webkit-border-radius: 4px;
-  -moz-border-radius: 4px;
-  border-radius: 4px;
-  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25);
-  -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25);
-  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25);
- background-color: #c43c35;
-  background-repeat: repeat-x;
-  background-image: -khtml-gradient(linear, left top, left bottom, from(#ee5f5b), to(#c43c35));
-  background-image: -moz-linear-gradient(top, #ee5f5b, #c43c35);
-  background-image: -ms-linear-gradient(top, #ee5f5b, #c43c35);
-  background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #ee5f5b), color-stop(100%, #c43c35));
-  background-image: -webkit-linear-gradient(top, #ee5f5b, #c43c35);
-  background-image: -o-linear-gradient(top, #ee5f5b, #c43c35);
-  background-image: linear-gradient(top, #ee5f5b, #c43c35);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ee5f5b', endColorstr='#c43c35', GradientType=0);
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  border-color: #c43c35 #c43c35 #882a25;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-
-
-}
-.ui-state-error a, .ui-widget-content .ui-state-error a, .ui-widget-header .ui-state-error a { color: #cd0a0a; }
-.ui-state-error-text, .ui-widget-content .ui-state-error-text, .ui-widget-header .ui-state-error-text { color: #cd0a0a; }
-.ui-priority-primary, .ui-widget-content .ui-priority-primary, .ui-widget-header .ui-priority-primary { font-weight: bold; }
-.ui-priority-secondary, .ui-widget-content .ui-priority-secondary,  .ui-widget-header .ui-priority-secondary { opacity: .7; filter:Alpha(Opacity=70); font-weight: normal; }
-.ui-state-disabled, .ui-widget-content .ui-state-disabled, .ui-widget-header .ui-state-disabled { opacity: .35; filter:Alpha(Opacity=35); background-image: none;  }
-
-
-
-/* Icons
-----------------------------------*/
-
-/* states and images */
-.ui-icon { width: 16px; height: 16px; background-image: url(../img/jquery-ui-bootstrap/ui-icons_222222_256x240.png); }
-.ui-widget-content .ui-icon {background-image: url(../img/jquery-ui-bootstrap/ui-icons_222222_256x240.png); }
-.ui-widget-header .ui-icon {background-image: url(../img/jquery-ui-bootstrap/ui-icons_222222_256x240.png); }
-.ui-state-default .ui-icon { background-image: url(../img/jquery-ui-bootstrap/ui-icons_888888_256x240.png); }
-.ui-state-hover .ui-icon, .ui-state-focus .ui-icon {background-image: url(../img/jquery-ui-bootstrap/ui-icons_454545_256x240.png); }
-.ui-state-active .ui-icon {background-image: url(../img/jquery-ui-bootstrap/ui-icons_454545_256x240.png); }
-.ui-state-highlight .ui-icon {background-image: url(../img/jquery-ui-bootstrap/ui-icons_2e83ff_256x240.png); }
-.ui-state-error .ui-icon, .ui-state-error-text .ui-icon {background-image: url(../img/jquery-ui-bootstrap/ui-icons_f6cf3b_256x240.png); }
-
-/* positioning */
-.ui-icon-carat-1-n { background-position: 0 0; }
-.ui-icon-carat-1-ne { background-position: -16px 0; }
-.ui-icon-carat-1-e { background-position: -32px 0; }
-.ui-icon-carat-1-se { background-position: -48px 0; }
-.ui-icon-carat-1-s { background-position: -64px 0; }
-.ui-icon-carat-1-sw { background-position: -80px 0; }
-.ui-icon-carat-1-w { background-position: -96px 0; }
-.ui-icon-carat-1-nw { background-position: -112px 0; }
-.ui-icon-carat-2-n-s { background-position: -128px 0; }
-.ui-icon-carat-2-e-w { background-position: -144px 0; }
-.ui-icon-triangle-1-n { background-position: 0 -16px; }
-.ui-icon-triangle-1-ne { background-position: -16px -16px; }
-.ui-icon-triangle-1-e { background-position: -32px -16px; }
-.ui-icon-triangle-1-se { background-position: -48px -16px; }
-.ui-icon-triangle-1-s { background-position: -64px -16px; }
-.ui-icon-triangle-1-sw { background-position: -80px -16px; }
-.ui-icon-triangle-1-w { background-position: -96px -16px; }
-.ui-icon-triangle-1-nw { background-position: -112px -16px; }
-.ui-icon-triangle-2-n-s { background-position: -128px -16px; }
-.ui-icon-triangle-2-e-w { background-position: -144px -16px; }
-.ui-icon-arrow-1-n { background-position: 0 -32px; }
-.ui-icon-arrow-1-ne { background-position: -16px -32px; }
-.ui-icon-arrow-1-e { background-position: -32px -32px; }
-.ui-icon-arrow-1-se { background-position: -48px -32px; }
-.ui-icon-arrow-1-s { background-position: -64px -32px; }
-.ui-icon-arrow-1-sw { background-position: -80px -32px; }
-.ui-icon-arrow-1-w { background-position: -96px -32px; }
-.ui-icon-arrow-1-nw { background-position: -112px -32px; }
-.ui-icon-arrow-2-n-s { background-position: -128px -32px; }
-.ui-icon-arrow-2-ne-sw { background-position: -144px -32px; }
-.ui-icon-arrow-2-e-w { background-position: -160px -32px; }
-.ui-icon-arrow-2-se-nw { background-position: -176px -32px; }
-.ui-icon-arrowstop-1-n { background-position: -192px -32px; }
-.ui-icon-arrowstop-1-e { background-position: -208px -32px; }
-.ui-icon-arrowstop-1-s { background-position: -224px -32px; }
-.ui-icon-arrowstop-1-w { background-position: -240px -32px; }
-.ui-icon-arrowthick-1-n { background-position: 0 -48px; }
-.ui-icon-arrowthick-1-ne { background-position: -16px -48px; }
-.ui-icon-arrowthick-1-e { background-position: -32px -48px; }
-.ui-icon-arrowthick-1-se { background-position: -48px -48px; }
-.ui-icon-arrowthick-1-s { background-position: -64px -48px; }
-.ui-icon-arrowthick-1-sw { background-position: -80px -48px; }
-.ui-icon-arrowthick-1-w { background-position: -96px -48px; }
-.ui-icon-arrowthick-1-nw { background-position: -112px -48px; }
-.ui-icon-arrowthick-2-n-s { background-position: -128px -48px; }
-.ui-icon-arrowthick-2-ne-sw { background-position: -144px -48px; }
-.ui-icon-arrowthick-2-e-w { background-position: -160px -48px; }
-.ui-icon-arrowthick-2-se-nw { background-position: -176px -48px; }
-.ui-icon-arrowthickstop-1-n { background-position: -192px -48px; }
-.ui-icon-arrowthickstop-1-e { background-position: -208px -48px; }
-.ui-icon-arrowthickstop-1-s { background-position: -224px -48px; }
-.ui-icon-arrowthickstop-1-w { background-position: -240px -48px; }
-.ui-icon-arrowreturnthick-1-w { background-position: 0 -64px; }
-.ui-icon-arrowreturnthick-1-n { background-position: -16px -64px; }
-.ui-icon-arrowreturnthick-1-e { background-position: -32px -64px; }
-.ui-icon-arrowreturnthick-1-s { background-position: -48px -64px; }
-.ui-icon-arrowreturn-1-w { background-position: -64px -64px; }
-.ui-icon-arrowreturn-1-n { background-position: -80px -64px; }
-.ui-icon-arrowreturn-1-e { background-position: -96px -64px; }
-.ui-icon-arrowreturn-1-s { background-position: -112px -64px; }
-.ui-icon-arrowrefresh-1-w { background-position: -128px -64px; }
-.ui-icon-arrowrefresh-1-n { background-position: -144px -64px; }
-.ui-icon-arrowrefresh-1-e { background-position: -160px -64px; }
-.ui-icon-arrowrefresh-1-s { background-position: -176px -64px; }
-.ui-icon-arrow-4 { background-position: 0 -80px; }
-.ui-icon-arrow-4-diag { background-position: -16px -80px; }
-.ui-icon-extlink { background-position: -32px -80px; }
-.ui-icon-newwin { background-position: -48px -80px; }
-.ui-icon-refresh { background-position: -64px -80px; }
-.ui-icon-shuffle { background-position: -80px -80px; }
-.ui-icon-transfer-e-w { background-position: -96px -80px; }
-.ui-icon-transferthick-e-w { background-position: -112px -80px; }
-.ui-icon-folder-collapsed { background-position: 0 -96px; }
-.ui-icon-folder-open { background-position: -16px -96px; }
-.ui-icon-document { background-position: -32px -96px; }
-.ui-icon-document-b { background-position: -48px -96px; }
-.ui-icon-note { background-position: -64px -96px; }
-.ui-icon-mail-closed { background-position: -80px -96px; }
-.ui-icon-mail-open { background-position: -96px -96px; }
-.ui-icon-suitcase { background-position: -112px -96px; }
-.ui-icon-comment { background-position: -128px -96px; }
-.ui-icon-person { background-position: -144px -96px; }
-.ui-icon-print { background-position: -160px -96px; }
-.ui-icon-trash { background-position: -176px -96px; }
-.ui-icon-locked { background-position: -192px -96px; }
-.ui-icon-unlocked { background-position: -208px -96px; }
-.ui-icon-bookmark { background-position: -224px -96px; }
-.ui-icon-tag { background-position: -240px -96px; }
-.ui-icon-home { background-position: 0 -112px; }
-.ui-icon-flag { background-position: -16px -112px; }
-.ui-icon-calendar { background-position: -32px -112px; }
-.ui-icon-cart { background-position: -48px -112px; }
-.ui-icon-pencil { background-position: -64px -112px; }
-.ui-icon-clock { background-position: -80px -112px; }
-.ui-icon-disk { background-position: -96px -112px; }
-.ui-icon-calculator { background-position: -112px -112px; }
-.ui-icon-zoomin { background-position: -128px -112px; }
-.ui-icon-zoomout { background-position: -144px -112px; }
-.ui-icon-search { background-position: -160px -112px; }
-.ui-icon-wrench { background-position: -176px -112px; }
-.ui-icon-gear { background-position: -192px -112px; }
-.ui-icon-heart { background-position: -208px -112px; }
-.ui-icon-star { background-position: -224px -112px; }
-.ui-icon-link { background-position: -240px -112px; }
-.ui-icon-cancel { background-position: 0 -128px; }
-.ui-icon-plus { background-position: -16px -128px; }
-.ui-icon-plusthick { background-position: -32px -128px; }
-.ui-icon-minus { background-position: -48px -128px; }
-.ui-icon-minusthick { background-position: -64px -128px; }
-.ui-icon-close { background-position: -80px -128px; }
-.ui-icon-closethick { background-position: -96px -128px; }
-.ui-icon-key { background-position: -112px -128px; }
-.ui-icon-lightbulb { background-position: -128px -128px; }
-.ui-icon-scissors { background-position: -144px -128px; }
-.ui-icon-clipboard { background-position: -160px -128px; }
-.ui-icon-copy { background-position: -176px -128px; }
-.ui-icon-contact { background-position: -192px -128px; }
-.ui-icon-image { background-position: -208px -128px; }
-.ui-icon-video { background-position: -224px -128px; }
-.ui-icon-script { background-position: -240px -128px; }
-.ui-icon-alert { background-position: 0 -144px; }
-.ui-icon-info { background-position: -16px -144px; }
-.ui-icon-notice { background-position: -32px -144px; }
-.ui-icon-help { background-position: -48px -144px; }
-.ui-icon-check { background-position: -64px -144px; }
-.ui-icon-bullet { background-position: -80px -144px; }
-.ui-icon-radio-off { background-position: -96px -144px; }
-.ui-icon-radio-on { background-position: -112px -144px; }
-.ui-icon-pin-w { background-position: -128px -144px; }
-.ui-icon-pin-s { background-position: -144px -144px; }
-.ui-icon-play { background-position: 0 -160px; }
-.ui-icon-pause { background-position: -16px -160px; }
-.ui-icon-seek-next { background-position: -32px -160px; }
-.ui-icon-seek-prev { background-position: -48px -160px; }
-.ui-icon-seek-end { background-position: -64px -160px; }
-.ui-icon-seek-start { background-position: -80px -160px; }
-/* ui-icon-seek-first is deprecated, use ui-icon-seek-start instead */
-.ui-icon-seek-first { background-position: -80px -160px; }
-.ui-icon-stop { background-position: -96px -160px; }
-.ui-icon-eject { background-position: -112px -160px; }
-.ui-icon-volume-off { background-position: -128px -160px; }
-.ui-icon-volume-on { background-position: -144px -160px; }
-.ui-icon-power { background-position: 0 -176px; }
-.ui-icon-signal-diag { background-position: -16px -176px; }
-.ui-icon-signal { background-position: -32px -176px; }
-.ui-icon-battery-0 { background-position: -48px -176px; }
-.ui-icon-battery-1 { background-position: -64px -176px; }
-.ui-icon-battery-2 { background-position: -80px -176px; }
-.ui-icon-battery-3 { background-position: -96px -176px; }
-.ui-icon-circle-plus { background-position: 0 -192px; }
-.ui-icon-circle-minus { background-position: -16px -192px; }
-.ui-icon-circle-close { background-position: -32px -192px; }
-.ui-icon-circle-triangle-e { background-position: -48px -192px; }
-.ui-icon-circle-triangle-s { background-position: -64px -192px; }
-.ui-icon-circle-triangle-w { background-position: -80px -192px; }
-.ui-icon-circle-triangle-n { background-position: -96px -192px; }
-.ui-icon-circle-arrow-e { background-position: -112px -192px; }
-.ui-icon-circle-arrow-s { background-position: -128px -192px; }
-.ui-icon-circle-arrow-w { background-position: -144px -192px; }
-.ui-icon-circle-arrow-n { background-position: -160px -192px; }
-.ui-icon-circle-zoomin { background-position: -176px -192px; }
-.ui-icon-circle-zoomout { background-position: -192px -192px; }
-.ui-icon-circle-check { background-position: -208px -192px; }
-.ui-icon-circlesmall-plus { background-position: 0 -208px; }
-.ui-icon-circlesmall-minus { background-position: -16px -208px; }
-.ui-icon-circlesmall-close { background-position: -32px -208px; }
-.ui-icon-squaresmall-plus { background-position: -48px -208px; }
-.ui-icon-squaresmall-minus { background-position: -64px -208px; }
-.ui-icon-squaresmall-close { background-position: -80px -208px; }
-.ui-icon-grip-dotted-vertical { background-position: 0 -224px; }
-.ui-icon-grip-dotted-horizontal { background-position: -16px -224px; }
-.ui-icon-grip-solid-vertical { background-position: -32px -224px; }
-.ui-icon-grip-solid-horizontal { background-position: -48px -224px; }
-.ui-icon-gripsmall-diagonal-se { background-position: -64px -224px; }
-.ui-icon-grip-diagonal-se { background-position: -80px -224px; }
-
-
-/* Misc visuals
-----------------------------------*/
-
-/* Corner radius */
-.ui-corner-all, .ui-corner-top, .ui-corner-left, .ui-corner-tl { -moz-border-radius-topleft: 4px; -webkit-border-top-left-radius: 4px; -khtml-border-top-left-radius: 4px; border-top-left-radius: 4px; }
-.ui-corner-all, .ui-corner-top, .ui-corner-right, .ui-corner-tr { -moz-border-radius-topright: 4px; -webkit-border-top-right-radius: 4px; -khtml-border-top-right-radius: 4px; border-top-right-radius: 4px; }
-.ui-corner-all, .ui-corner-bottom, .ui-corner-left, .ui-corner-bl { -moz-border-radius-bottomleft: 4px; -webkit-border-bottom-left-radius: 4px; -khtml-border-bottom-left-radius: 4px; border-bottom-left-radius: 4px; }
-.ui-corner-all, .ui-corner-bottom, .ui-corner-right, .ui-corner-br { -moz-border-radius-bottomright: 4px; -webkit-border-bottom-right-radius: 4px; -khtml-border-bottom-right-radius: 4px; border-bottom-right-radius: 4px; }
-
-
-
-/* Overlays */
-.ui-widget-overlay { background: #aaaaaa url(../img/jquery-ui-bootstrap/ui-bg_flat_0_aaaaaa_40x100.png) 50% 50% repeat-x; opacity: .30;filter:Alpha(Opacity=30); }
-.ui-widget-shadow { margin: -8px 0 0 -8px; padding: 8px; background: #aaaaaa url(../img/jquery-ui-bootstrap/ui-bg_flat_0_aaaaaa_40x100.png) 50% 50% repeat-x; opacity: .30;filter:Alpha(Opacity=30); -moz-border-radius: 8px; -khtml-border-radius: 8px; -webkit-border-radius: 8px; border-radius: 8px; }/*
- * jQuery UI Resizable 1.8.16
- *
- * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI/Resizable#theming
- */
-.ui-resizable { position: relative;}
-.ui-resizable-handle { position: absolute;font-size: 0.1px;z-index: 99999; display: block; }
-.ui-resizable-disabled .ui-resizable-handle, .ui-resizable-autohide .ui-resizable-handle { display: none; }
-.ui-resizable-n { cursor: n-resize; height: 7px; width: 100%; top: -5px; left: 0; }
-.ui-resizable-s { cursor: s-resize; height: 7px; width: 100%; bottom: -5px; left: 0; }
-.ui-resizable-e { cursor: e-resize; width: 7px; right: -5px; top: 0; height: 100%; }
-.ui-resizable-w { cursor: w-resize; width: 7px; left: -5px; top: 0; height: 100%; }
-.ui-resizable-se { cursor: se-resize; width: 12px; height: 12px; right: 1px; bottom: 1px; }
-.ui-resizable-sw { cursor: sw-resize; width: 9px; height: 9px; left: -5px; bottom: -5px; }
-.ui-resizable-nw { cursor: nw-resize; width: 9px; height: 9px; left: -5px; top: -5px; }
-.ui-resizable-ne { cursor: ne-resize; width: 9px; height: 9px; right: -5px; top: -5px;}/*
- * jQuery UI Selectable 1.8.16
- *
- * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI/Selectable#theming
- */
-.ui-selectable-helper { position: absolute; z-index: 100; border:1px dotted black; }
-/*
- * jQuery UI Accordion 1.8.16
- *
- * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI/Accordion#theming
- */
-/* IE/Win - Fix animation bug - #4615 */
-.ui-accordion { width: 100%; }
-.ui-accordion .ui-accordion-header { cursor: pointer; position: relative; margin-top: 1px; zoom: 1; font-weight:bold; }
-.ui-accordion .ui-accordion-li-fix { display: inline; }
-.ui-accordion .ui-accordion-header-active { border-bottom: 0 !important; }
-.ui-accordion .ui-accordion-header a { display: block; font-size: 1em; padding: .5em .5em .5em .7em; }
-.ui-accordion-icons .ui-accordion-header a { padding-left: 2.2em; }
-.ui-accordion .ui-accordion-header .ui-icon { position: absolute; left: .5em; top: 50%; margin-top: -8px; }
-.ui-accordion .ui-accordion-content { padding: 1em 2.2em; border-top: 0; margin-top: -2px; position: relative; top: 1px; margin-bottom: 2px; overflow: auto; display: none; zoom: 1; }
-.ui-accordion .ui-accordion-content-active { display: block; }
-/*
- * jQuery UI Autocomplete 1.8.16
- *
- * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI/Autocomplete#theming
- */
-.ui-autocomplete { position: absolute; cursor: default; }
-
-/* workarounds */
-* html .ui-autocomplete { width:1px; } /* without this, the menu expands to 100% in IE6 */
-
-/*
- * jQuery UI Menu 1.8.16
- *
- * Copyright 2010, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI/Menu#theming
- */
-.ui-menu {
-	list-style:none;
-	padding: 2px;
-	margin: 0;
-	display:block;
-	float: left;
-}
-.ui-menu .ui-menu {
-	margin-top: -3px;
-}
-.ui-menu .ui-menu-item {
-	margin:0;
-	padding: 0;
-	zoom: 1;
-	float: left;
-	clear: left;
-	width: 100%;
-}
-.ui-menu .ui-menu-item a {
-	text-decoration:none;
-	display:block;
-	padding:.2em .4em;
-	line-height:1.5;
-	zoom:1;
-}
-.ui-menu .ui-menu-item a.ui-state-hover,
-.ui-menu .ui-menu-item a.ui-state-active {
-	font-weight: normal;
-  background:#0064CD;
-  color:#fff
-}
-
-
-/*
- * jQuery UI Button 1.8.16
- *
- * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI/Button#theming
- */
-.ui-button {
-
-  cursor: pointer;
-  display: inline-block;
-  background-color: #e6e6e6;
-  background-repeat: no-repeat;
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ffffff), color-stop(25%, #ffffff), to(#e6e6e6));
-  background-image: -webkit-linear-gradient(#ffffff, #ffffff 25%, #e6e6e6);
-  background-image: -moz-linear-gradient(top, #ffffff, #ffffff 25%, #e6e6e6);
-  background-image: -ms-linear-gradient(#ffffff, #ffffff 25%, #e6e6e6);
-  background-image: -o-linear-gradient(#ffffff, #ffffff 25%, #e6e6e6);
-  background-image: linear-gradient(#ffffff, #ffffff 25%, #e6e6e6);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffff', endColorstr='#e6e6e6', GradientType=0);
-  padding: 5px 14px 6px;
-  margin: 0;
-  text-shadow: 0 1px 1px rgba(255, 255, 255, 0.75);
-  color: #333;
-  font-size: 13px;
-  line-height: normal;
-  border: 1px solid #ccc;
-  border-bottom-color: #bbb;
-
-  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
-  -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
-  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
-  -webkit-transition: 0.1s linear background-image;
-  -moz-transition: 0.1s linear background-image;
-  -ms-transition: 0.1s linear background-image;
-  -o-transition: 0.1s linear background-image;
-  transition: 0.1s linear background-image;
-   overflow: visible;
-
-} /* the overflow property removes extra width in IE */
-
-.ui-button-primary {
-  color: #ffffff;
-  background-color: #0064cd;
-  background-repeat: repeat-x;
-  background-image: -khtml-gradient(linear, left top, left bottom, from(#049cdb), to(#0064cd));
-  background-image: -moz-linear-gradient(top, #049cdb, #0064cd);
-  background-image: -ms-linear-gradient(top, #049cdb, #0064cd);
-  background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #049cdb), color-stop(100%, #0064cd));
-  background-image: -webkit-linear-gradient(top, #049cdb, #0064cd);
-  background-image: -o-linear-gradient(top, #049cdb, #0064cd);
-  background-image: linear-gradient(top, #049cdb, #0064cd);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#049cdb', endColorstr='#0064cd', GradientType=0);
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  border-color: #0064cd #0064cd #003f81;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-
-}
-
-
-
-.ui-button-success{
-  color:#ffffff;
-  background-color: #57a957;
-  background-repeat: repeat-x;
-  background-image: -khtml-gradient(linear, left top, left bottom, from(#62c462), to(#57a957));
-  background-image: -moz-linear-gradient(top, #62c462, #57a957);
-  background-image: -ms-linear-gradient(top, #62c462, #57a957);
-  background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #62c462), color-stop(100%, #57a957));
-  background-image: -webkit-linear-gradient(top, #62c462, #57a957);
-  background-image: -o-linear-gradient(top, #62c462, #57a957);
-  background-image: linear-gradient(top, #62c462, #57a957);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#62c462', endColorstr='#57a957', GradientType=0);
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  border-color: #57a957 #57a957 #3d773d;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-}
-
-.ui-button-error{
-  color:#ffffff;
-  background-color: #c43c35;
-  background-repeat: repeat-x;
-  background-image: -khtml-gradient(linear, left top, left bottom, from(#ee5f5b), to(#c43c35));
-  background-image: -moz-linear-gradient(top, #ee5f5b, #c43c35);
-  background-image: -ms-linear-gradient(top, #ee5f5b, #c43c35);
-  background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #ee5f5b), color-stop(100%, #c43c35));
-  background-image: -webkit-linear-gradient(top, #ee5f5b, #c43c35);
-  background-image: -o-linear-gradient(top, #ee5f5b, #c43c35);
-  background-image: linear-gradient(top, #ee5f5b, #c43c35);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ee5f5b', endColorstr='#c43c35', GradientType=0);
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  border-color: #c43c35 #c43c35 #882a25;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-}
-
-.ui-button-icon-only { width: 2.2em; } /* to make room for the icon, a width needs to be set here */
-button.ui-button-icon-only { } /* button elements seem to need a little more width */
-.ui-button-icons-only { width: 3.4em; }
-button.ui-button-icons-only { width: 3.7em; }
-
-/*button text element */
-
-.ui-button .ui-button-text { display: block;   }
-.ui-button-text-only .ui-button-text {  }
-.ui-button-icon-only .ui-button-text, .ui-button-icons-only .ui-button-text { padding: .4em; text-indent: -9999999px; /*tempfix*/ display:none;}
-.ui-button-text-icon-primary .ui-button-text, .ui-button-text-icons .ui-button-text { padding: .4em 1em .4em 2.1em; }
-.ui-button-text-icon-secondary .ui-button-text, .ui-button-text-icons .ui-button-text { padding: .4em 2.1em .4em 1em; }
-.ui-button-text-icons .ui-button-text { padding-left: 2.1em; padding-right: 2.1em; }
-/* no icon support for input elements, provide padding by default */
-/* input.ui-button { padding: .4em 1em; } */
-
-/*button icon element(s) */
-.ui-button-icon-only .ui-icon, .ui-button-text-icon-primary .ui-icon, .ui-button-text-icon-secondary .ui-icon, .ui-button-text-icons .ui-icon, .ui-button-icons-only .ui-icon { top: 50%; margin-top:-3px; margin-bottom:3px; }
-.ui-button-icon-only .ui-icon { left: 50%; margin-left: -8px; }
-.ui-button-text-icon-primary .ui-button-icon-primary, .ui-button-text-icons .ui-button-icon-primary, .ui-button-icons-only .ui-button-icon-primary { left: .5em; }
-.ui-button-text-icon-secondary .ui-button-icon-secondary, .ui-button-text-icons .ui-button-icon-secondary, .ui-button-icons-only .ui-button-icon-secondary { right: .5em; }
-.ui-button-text-icons .ui-button-icon-secondary, .ui-button-icons-only .ui-button-icon-secondary { right: .5em; }
-
-/*button sets*/
-
-
-.ui-buttonset { margin-right: 7px; }
-.ui-buttonset .ui-state-active {
-  color: #ffffff;
-  background-color: #0064cd;
-  background-repeat: repeat-x;
-  background-image: -khtml-gradient(linear, left top, left bottom, from(#049cdb), to(#0064cd));
-  background-image: -moz-linear-gradient(top, #049cdb, #0064cd);
-  background-image: -ms-linear-gradient(top, #049cdb, #0064cd);
-  background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #049cdb), color-stop(100%, #0064cd));
-  background-image: -webkit-linear-gradient(top, #049cdb, #0064cd);
-  background-image: -o-linear-gradient(top, #049cdb, #0064cd);
-  background-image: linear-gradient(top, #049cdb, #0064cd);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#049cdb', endColorstr='#0064cd', GradientType=0);
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  border-color: #0064cd #0064cd #003f81;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-}
-.ui-buttonset .ui-button { margin-left: 0; margin-right: -.4em; }
-
-/* workarounds */
-button.ui-button::-moz-focus-inner { border: 0; padding: 0; } /* reset extra padding in Firefox */
-
-
-
-/*
- * jQuery UI Dialog 1.8.16
- *
- * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI/Dialog#theming
- */
-.ui-dialog { position: absolute; padding: .2em; width: 300px; overflow: hidden; }
-.ui-dialog .ui-dialog-titlebar { /*padding: .4em 1em;*/
-
-  position: relative;
-  padding:5px 15px;
-
-  border:0px 0px 0px 1px solid;
-  border-color: white;
-  padding: 5px 15px;
-  font-size: 18px;
-  text-decoration:none;
-  background:none;
-  -moz-border-radius-bottomright: 0px;
-  -webkit-border-bottom-right-radius: 0px;
-  -khtml-border-bottom-right-radius: 0px;
-
-  -moz-border-radius-bottomleft: 0px;
-  -webkit-border-bottom-left-radius: 0px;
-  -khtml-border-bottom-left-radius: 0px;
-  border-bottom-left-radius: 0px;
-
-  border-bottom:1px solid #ccc;
-
-}
-.ui-dialog .ui-dialog-title {
-  float: left;
-  color:#404040;
-  font-weight:bold;
-  margin-top:5px;
-  margin-bottom:5px;
-  padding:5px;
-
-}
-.ui-dialog .ui-dialog-titlebar-close {
-  position: absolute;
-  right: .3em;
-  top: 50%;
-  width: 19px;
-  margin: -10px 0 0 0;
-  padding: 1px;
-  height: 18px;
-  font-size: 20px;
-  font-weight: bold;
-  line-height: 13.5px;
-  text-shadow: 0 1px 0 #ffffff;
-  filter: alpha(opacity=25);
-  -khtml-opacity: 0.25;
-  -moz-opacity: 0.25;
-  opacity: 0.25;
-}
-
-.ui-dialog .ui-dialog-titlebar-close span { 
-  display: block; 
-  margin: 1px;
-  text-indent: 9999px;
-}
-
-.ui-dialog .ui-dialog-titlebar-close:hover, .ui-dialog .ui-dialog-titlebar-close:focus { padding: 0;   filter: alpha(opacity=90);
-  -khtml-opacity: 0.90;
-  -moz-opacity: 0.90;
-  opacity: 0.90;   }
-
-.ui-dialog .ui-dialog-content { position: relative; border: 0; padding: .5em 1em; background: none; overflow: auto; zoom: 1; }
-
-.ui-dialog .ui-dialog-buttonpane {
-  text-align: left;
-  border-width: 1px 0 0 0;
-  background-image: none;
-  margin: .5em 0 0 0;
-  background-color: #f5f5f5;
-  padding: 5px 15px 5px;
-  border-top: 1px solid #ddd;
-  -webkit-border-radius: 0 0 6px 6px;
-  -moz-border-radius: 0 0 6px 6px;
-  border-radius: 0 0 6px 6px;
-  -webkit-box-shadow: inset 0 1px 0 #ffffff;
-  -moz-box-shadow: inset 0 1px 0 #ffffff;
-  box-shadow: inset 0 1px 0 #ffffff;
-  zoom: 1;
-  margin-bottom: 0;
-
-}
-.ui-dialog .ui-dialog-buttonpane .ui-dialog-buttonset { float: right; }
-.ui-dialog .ui-dialog-buttonpane button { margin: .5em .4em .5em 0; cursor: pointer; }
-.ui-dialog .ui-resizable-se { width: 14px; height: 14px; right: 3px; bottom: 3px; }
-.ui-draggable .ui-dialog-titlebar { cursor: move; }
-
-.ui-dialog-buttonpane .ui-dialog-buttonset .ui-button{
-  color: #ffffff;
-  background-color: #0064cd;
-  background-repeat: repeat-x;
-  background-image: -khtml-gradient(linear, left top, left bottom, from(#049cdb), to(#0064cd));
-  background-image: -moz-linear-gradient(top, #049cdb, #0064cd);
-  background-image: -ms-linear-gradient(top, #049cdb, #0064cd);
-  background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #049cdb), color-stop(100%, #0064cd));
-  background-image: -webkit-linear-gradient(top, #049cdb, #0064cd);
-  background-image: -o-linear-gradient(top, #049cdb, #0064cd);
-  background-image: linear-gradient(top, #049cdb, #0064cd);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#049cdb', endColorstr='#0064cd', GradientType=0);
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  border-color: #0064cd #0064cd #003f81;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-}
-/*
- * jQuery UI Slider 1.8.16
- *
- * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI/Slider#theming
- */
-.ui-slider { position: relative; text-align: left; }
-.ui-slider .ui-slider-handle { position: absolute; z-index: 2; width: 1.2em; height: 1.2em; cursor: default; }
-.ui-slider .ui-slider-range { position: absolute; z-index: 1; font-size: .7em; display: block; border: 0; background-position: 0 0;
-
-  color: #ffffff;
-  background-color: #0064cd;
-  background-repeat: repeat-x;
-  background-image: -khtml-gradient(linear, left top, left bottom, from(#049cdb), to(#0064cd));
-  background-image: -moz-linear-gradient(top, #049cdb, #0064cd);
-  background-image: -ms-linear-gradient(top, #049cdb, #0064cd);
-  background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #049cdb), color-stop(100%, #0064cd));
-  background-image: -webkit-linear-gradient(top, #049cdb, #0064cd);
-  background-image: -o-linear-gradient(top, #049cdb, #0064cd);
-  background-image: linear-gradient(top, #049cdb, #0064cd);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#049cdb', endColorstr='#0064cd', GradientType=0);
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  border-color: #0064cd #0064cd #003f81;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-
-}
-
-.ui-slider-horizontal { height: .8em; }
-.ui-slider-horizontal .ui-slider-handle { top: -.3em; margin-left: -.6em; }
-.ui-slider-horizontal .ui-slider-range { top: 0; height: 100%; }
-.ui-slider-horizontal .ui-slider-range-min { left: 0; }
-.ui-slider-horizontal .ui-slider-range-max { right: 0; }
-
-.ui-slider-vertical { width: .8em; height: 100px; }
-.ui-slider-vertical .ui-slider-handle { left: -.3em; margin-left: 0; margin-bottom: -.6em; }
-.ui-slider-vertical .ui-slider-range { left: 0; width: 100%; }
-.ui-slider-vertical .ui-slider-range-min { bottom: 0; }
-.ui-slider-vertical .ui-slider-range-max { top: 0; }/*
- * jQuery UI Tabs 1.8.16
- *
- * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI/Tabs#theming
- */
- .ui-tabs .ui-tabs-nav{ background:none; border-color: #ddd;
-  border-style: solid;
-  border-width: 0 0 1px;}
-.ui-tabs { position: relative; padding: .2em; zoom: 1; border:0px;} /* position: relative prevents IE scroll bug (element with position: relative inside container with overflow: auto appear as "fixed") */
-
-
-.ui-tabs .ui-tabs-nav li:hover, .ui-tabs .ui-tabs-nav li a:hover{
-  background:whiteSmoke;
-  border-bottom:1px solid #ddd;
-  padding-bottom:0px;
-   color:#00438A;
-}
-
-
-.ui-tabs .ui-tabs-nav { margin: 0; padding: .2em .2em 0; border-bottom:1px solid #DDD; }
-.ui-tabs .ui-tabs-nav li {  text-decoration: none; list-style: none; float: left; position: relative; top: 1px; padding: 0px 0px 1px 0px; white-space: nowrap; background:none; border:0px; 
-
-}
-
-.ui-tabs-nav .ui-state-default{
-  -webkit-box-shadow: 0px 0px 0px #ffffff; /* Saf3-4, iOS 4.0.2 - 4.2, Android 2.3+ */
-     -moz-box-shadow: 0px 0px 0px #ffffff; /* FF3.5 - 3.6 */
-          box-shadow: 0px 0px 0px #ffffff; /* Opera 10.5, IE9, FF4+, Chrome 6+, iOS 5 */
-}
-.ui-tabs .ui-tabs-nav li a {
-
-  float: left;
-  text-decoration: none;
-  cursor: text;
-  padding: 0 15px;
-  margin-right: 2px;
-  line-height: 34px;
-  border: 1px solid transparent;
-  -webkit-border-radius: 4px 4px 0 0;
-  -moz-border-radius: 4px 4px 0 0;
-  border-radius: 4px 4px 0 0;
-
-
- }
-.ui-tabs .ui-tabs-nav li.ui-tabs-selected { margin-bottom: 0; padding-bottom: 0px; outline:none;}
-
-.ui-tabs .ui-tabs-nav li.ui-tabs-selected a, .ui-tabs .ui-tabs-nav li.ui-state-disabled a, .ui-tabs .ui-tabs-nav li.ui-state-processing a {
-
-  background-color: #ffffff;
-  border: 1px solid #ddd;
-  border-bottom-color: #ffffff;
-  cursor: default;
-  color:gray;
-  outline:none;
-}
-
-
-.ui-tabs .ui-tabs-nav li.ui-tabs-selected:hover{
-  background:#ffffff;
-  outline:none;
-}
-
-.ui-tabs .ui-tabs-nav li a, .ui-tabs.ui-tabs-collapsible .ui-tabs-nav li.ui-tabs-selected a { cursor: pointer; color:#0069D6; background:none; font-weight:normal; margin-bottom:-1px;} 
-/* first selector in group seems obsolete, but required to overcome bug in Opera applying cursor: text overall if defined elsewhere... */
-.ui-tabs .ui-tabs-panel { display: block; border-width: 0; padding: 1em 1.4em; background: none; }
-.ui-tabs-panel .ui-button{text-decoration:none;}
-.ui-tabs .ui-tabs-hide { display: none !important; }
-
-
-/* IE fix for background inheritance from ui-widget*/
-.ui-tabs .ui-tabs-nav li{
-  filter:none;
-}
-
-
-
-/*
- * jQuery UI Datepicker 1.8.16
- *
- * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI/Datepicker#theming
- */
-.ui-datepicker { width: 17em; padding: .4em .4em 0; display: none; }
-.ui-datepicker .ui-datepicker-header { position:relative; padding:.2em 0; border:0px; font-weight: bold; width: 100%; padding: 4px 0; background-color: #f5f5f5; color: #808080; }
-.ui-datepicker .ui-datepicker-prev, .ui-datepicker .ui-datepicker-next { position:absolute; top: 2px; width: 1.8em; height: 1.8em; }
-
-.ui-datepicker .ui-datepicker-prev-hover, .ui-datepicker .ui-datepicker-next-hover { /*top: 1px;*/ }
-.ui-datepicker .ui-datepicker-prev { left:2px; }
-.ui-datepicker .ui-datepicker-next { right:2px; }
-
-.ui-datepicker .ui-datepicker-prev-hover { /*left:1px;*/ background:#0064CD;color:#fff;}
-.ui-datepicker .ui-datepicker-next-hover { /*right:1px;*/ background:#0064CD;color:#fff;}
-
-.ui-datepicker .ui-datepicker-prev span, .ui-datepicker .ui-datepicker-next span { display: block; position: absolute; left: 50%; margin-left: -8px; top: 50%; margin-top: -8px;  }
-.ui-datepicker .ui-datepicker-title { margin: 0 2.3em; line-height: 1.8em; text-align: center; }
-.ui-datepicker .ui-datepicker-title select { font-size:1em; margin:1px 0; }
-.ui-datepicker select.ui-datepicker-month-year {width: 100%;}
-.ui-datepicker select.ui-datepicker-month,
-.ui-datepicker select.ui-datepicker-year { width: 49%;}
-.ui-datepicker table {width: 100%; font-size: .9em; border-collapse: collapse; margin:0 0 .4em; }
-.ui-datepicker th { padding: .7em .3em; text-align: center; font-weight: bold; border: 0;  }
-.ui-datepicker td { border: 0; padding: 1px; }
-.ui-datepicker td span, .ui-datepicker td a { display: block; padding: .2em; text-align: right; text-decoration: none; }
-.ui-datepicker .ui-datepicker-buttonpane { background-image: none; margin: .7em 0 0 0; padding:0 .2em; border-left: 0; border-right: 0; border-bottom: 0; }
-.ui-datepicker .ui-datepicker-buttonpane button { float: right; margin: .5em .2em .4em; cursor: pointer; padding: .2em .6em .3em .6em; width:auto; overflow:visible; }
-.ui-datepicker .ui-datepicker-buttonpane button.ui-datepicker-current { float:left; }
-
-/* with multiple calendars */
-.ui-datepicker.ui-datepicker-multi { width:auto; }
-.ui-datepicker-multi .ui-datepicker-group { float:left; }
-.ui-datepicker-multi .ui-datepicker-group table { width:95%; margin:0 auto .4em; }
-.ui-datepicker-multi-2 .ui-datepicker-group { width:50%; }
-.ui-datepicker-multi-3 .ui-datepicker-group { width:33.3%; }
-.ui-datepicker-multi-4 .ui-datepicker-group { width:25%; }
-.ui-datepicker-multi .ui-datepicker-group-last .ui-datepicker-header { border-left-width:0; }
-.ui-datepicker-multi .ui-datepicker-group-middle .ui-datepicker-header { border-left-width:0; }
-.ui-datepicker-multi .ui-datepicker-buttonpane { clear:left; }
-.ui-datepicker-row-break { clear:both; width:100%; font-size:0em; }
-
-/* RTL support */
-.ui-datepicker-rtl { direction: rtl; }
-.ui-datepicker-rtl .ui-datepicker-prev { right: 2px; left: auto; }
-.ui-datepicker-rtl .ui-datepicker-next { left: 2px; right: auto; }
-.ui-datepicker-rtl .ui-datepicker-prev:hover { right: 1px; left: auto; }
-.ui-datepicker-rtl .ui-datepicker-next:hover { left: 1px; right: auto; }
-.ui-datepicker-rtl .ui-datepicker-buttonpane { clear:right; }
-.ui-datepicker-rtl .ui-datepicker-buttonpane button { float: left; }
-.ui-datepicker-rtl .ui-datepicker-buttonpane button.ui-datepicker-current { float:right; }
-.ui-datepicker-rtl .ui-datepicker-group { float:right; }
-.ui-datepicker-rtl .ui-datepicker-group-last .ui-datepicker-header { border-right-width:0; border-left-width:1px; }
-.ui-datepicker-rtl .ui-datepicker-group-middle .ui-datepicker-header { border-right-width:0; border-left-width:1px; }
-
-/* IE6 IFRAME FIX (taken from datepicker 1.5.3 */
-.ui-datepicker-cover {
-  display: none; /*sorry for IE5*/
-  display/**/: block; /*sorry for IE5*/
-  position: absolute; /*must have*/
-  z-index: -1; /*must have*/
-  filter: mask(); /*must have*/
-  top: -4px; /*must have*/
-  left: -4px; /*must have*/
-  width: 200px; /*must have*/
-  height: 200px; /*must have*/
-}
-
-.ui-datepicker th{
-  font-weight: bold;
-  color: gray;
-}
-
-.ui-datepicker-today a:hover{
-  background-color: #808080;
-  color: #ffffff;
-
-}
-.ui-datepicker-today a{
-  background-color: #BFBFBF;
-  cursor: pointer;
-  padding: 0 4px;
-  margin-bottom:0px;
-
-}
-
-
-.ui-datepicker td a{
-  margin-bottom:0px;
-  border:0px;
-}
-
-.ui-datepicker td:hover{
-  color:white;
-}
-
-.ui-datepicker td .ui-state-default {
-  border:0px;
-  background:none;
-  margin-bottom:0px;
-  padding:5px;
-  color:gray;
-  text-align: center;
-  filter:none;
-}
-
-
-.ui-datepicker td .ui-state-active{
-  background:#BFBFBF;
-  margin-bottom:0px;
-  font-size:normal;
-  text-shadow: 0px;
-  color:white;  
-  -webkit-border-radius: 4px;
-  -moz-border-radius: 4px;
-  border-radius: 4px;
-}
-
-.ui-datepicker td .ui-state-default:hover{
-  background:#0064cd;
-  color:white;
-  -webkit-border-radius: 4px;
-  -moz-border-radius: 4px;
-  border-radius: 4px;
-}
-
-
-/*
- * jQuery UI Progressbar 1.8.16
- *
- * Copyright 2011, AUTHORS.txt (http://jqueryui.com/about)
- * Dual licensed under the MIT or GPL Version 2 licenses.
- * http://jquery.org/license
- *
- * http://docs.jquery.com/UI/Progressbar#theming
- */
-.ui-progressbar { height:2em; text-align: left; }
-.ui-progressbar .ui-progressbar-value {margin: -1px; height:100%;
-
-/*this can be removed if ui-widget-header is blue*/
-    color: #ffffff;
-  background-color: #0064cd;
-  background-repeat: repeat-x;
-  background-image: -khtml-gradient(linear, left top, left bottom, from(#049cdb), to(#0064cd));
-  background-image: -moz-linear-gradient(top, #049cdb, #0064cd);
-  background-image: -ms-linear-gradient(top, #049cdb, #0064cd);
-  background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #049cdb), color-stop(100%, #0064cd));
-  background-image: -webkit-linear-gradient(top, #049cdb, #0064cd);
-  background-image: -o-linear-gradient(top, #049cdb, #0064cd);
-  background-image: linear-gradient(top, #049cdb, #0064cd);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#049cdb', endColorstr='#0064cd', GradientType=0);
-  text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
-  border-color: #0064cd #0064cd #003f81;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
- }
-
-
-
-/*** Input field styling from Bootstrap **/
- input, textarea {
-  -webkit-transition: border linear 0.2s, box-shadow linear 0.2s;
-  -moz-transition: border linear 0.2s, box-shadow linear 0.2s;
-  -ms-transition: border linear 0.2s, box-shadow linear 0.2s;
-  -o-transition: border linear 0.2s, box-shadow linear 0.2s;
-  transition: border linear 0.2s, box-shadow linear 0.2s;
-  -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.1);
-  -moz-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.1);
-  box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.1);
-}
-input:focus, textarea:focus {
-  outline: 0;
-  border-color: rgba(82, 168, 236, 0.8);
-  -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.1), 0 0 8px rgba(82, 168, 236, 0.6);
-  -moz-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.1), 0 0 8px rgba(82, 168, 236, 0.6);
-  box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.1), 0 0 8px rgba(82, 168, 236, 0.6);
-}
-input[type=file]:focus, input[type=checkbox]:focus, select:focus {
-  -webkit-box-shadow: none;
-  -moz-box-shadow: none;
-  box-shadow: none;
-  outline: 1px dotted #666;
-}
-
-input[type="text"],
-input[type="password"],
-.ui-autocomplete-input,
-textarea,
-.uneditable-input {
-  display: inline-block;
-  padding: 4px;
-  /* font-size: 13px; */
-  line-height: 18px;
-  /* color: #808080; */
-  border: 1px solid #ccc;
-  -webkit-border-radius: 3px;
-  -moz-border-radius: 3px;
-  border-radius: 3px;
-}
-
-
-
-/**Toolbar**/
-
-.ui-toolbar{
-  padding: 7px 14px;
-  margin: 0 0 18px;
-  background-color: #f5f5f5;
-  background-repeat: repeat-x;
-  background-image: -khtml-gradient(linear, left top, left bottom, from(#ffffff), to(#f5f5f5));
-  background-image: -moz-linear-gradient(top, #ffffff, #f5f5f5);
-  background-image: -ms-linear-gradient(top, #ffffff, #f5f5f5);
-  background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #ffffff), color-stop(100%, #f5f5f5));
-  background-image: -webkit-linear-gradient(top, #ffffff, #f5f5f5);
-  background-image: -o-linear-gradient(top, #ffffff, #f5f5f5);
-  background-image: linear-gradient(top, #ffffff, #f5f5f5);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffff', endColorstr='#f5f5f5', GradientType=0);
-  border: 1px solid #ddd;
-  -webkit-border-radius: 3px;
-  -moz-border-radius: 3px;
-  border-radius: 3px;
-  -webkit-box-shadow: inset 0 1px 0 #ffffff;
-  -moz-box-shadow: inset 0 1px 0 #ffffff;
-  box-shadow: inset 0 1px 0 #ffffff;
-}
-
-
-/***Dialog fixes**/
-
-.ui-dialog-buttonset .ui-button:nth-child(2){
-  cursor: pointer;
-  display: inline-block;
-  background-color: #e6e6e6;
-  background-repeat: no-repeat;
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ffffff), color-stop(25%, #ffffff), to(#e6e6e6));
-  background-image: -webkit-linear-gradient(#ffffff, #ffffff 25%, #e6e6e6);
-  background-image: -moz-linear-gradient(top, #ffffff, #ffffff 25%, #e6e6e6);
-  background-image: -ms-linear-gradient(#ffffff, #ffffff 25%, #e6e6e6);
-  background-image: -o-linear-gradient(#ffffff, #ffffff 25%, #e6e6e6);
-  background-image: linear-gradient(#ffffff, #ffffff 25%, #e6e6e6);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffff', endColorstr='#e6e6e6', GradientType=0);
-  padding: 5px 14px 6px;
-  text-shadow: 0 1px 1px rgba(255, 255, 255, 0.75);
-  color: #333;
-  font-size: 13px;
-  line-height: normal;
-  border: 1px solid #ccc;
-  border-bottom-color: #bbb;
-  -webkit-border-radius: 4px;
-  -moz-border-radius: 4px;
-  border-radius: 4px;
-  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
-  -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
-  box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
-  -webkit-transition: 0.1s linear all;
-  -moz-transition: 0.1s linear all;
-  -ms-transition: 0.1s linear all;
-  -o-transition: 0.1s linear all;
-  transition: 0.1s linear all;
-   overflow: visible;
-}
-
-
-
-/***Wijmo Theming**/
-
-div.wijmo-wijmenu{
-  padding:0 20px;
-  background-color: #222;
-  background-color: #222222;
-  background-repeat: repeat-x;
-  background-image: -khtml-gradient(linear, left top, left bottom, from(#333333), to(#222222));
-  background-image: -moz-linear-gradient(top, #333333, #222222);
-  background-image: -ms-linear-gradient(top, #333333, #222222);
-  background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #333333), color-stop(100%, #222222));
-  background-image: -webkit-linear-gradient(top, #333333, #222222);
-  background-image: -o-linear-gradient(top, #333333, #222222);
-  background-image: linear-gradient(top, #333333, #222222);
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#333333', endColorstr='#222222', GradientType=0);
-  -webkit-box-shadow: 0 1px 3px rgba(0, 0, 0, 0.25), inset 0 -1px 0 rgba(0, 0, 0, 0.1);
-  -moz-box-shadow: 0 1px 3px rgba(0, 0, 0, 0.25), inset 0 -1px 0 rgba(0, 0, 0, 0.1);
-  box-shadow: 0 1px 3px rgba(0, 0, 0, 0.25), inset 0 -1px 0 rgba(0, 0, 0, 0.1);
-}
-
-.wijmo-wijmenu .ui-state-default{
-  box-shadow: none;
-  color:#BFBFBF;
-}
-
-.wijmo-wijmenu .ui-state-default .wijmo-wijmenu-text{    
-  color:#BFBFBF;
-}
-
-.wijmo-wijmenu .ui-state-hover{
-  background: #444;
-  background: rgba(255, 255, 255, 0.05);
-}
-
-.wijmo-wijmenu .ui-state-hover .wijmo-wijmenu-text{
-  color:#ffffff;
-}
-
-div.wijmo-wijmenu .ui-widget-header h3{
-  position: relative;
-  margin-top:1px;
-  padding:0;
-}
-
-.wijmo-wijmenu h3 a{
-  color: #FFFFFF;
-  display: block;
-  float: left;
-  font-size: 20px;
-  font-weight: 200;
-  line-height: 1;
-  margin-left: -20px;
-  margin-top:1px;
-  padding: 8px 20px 12px;
-}
-
-.wijmo-wijmenu h3 a:hover{
-  background-color: rgba(255, 255, 255, 0.05);
-  color: #FFFFFF;
-  text-decoration: none;
-}
-
-.wijmo-wijmenu .ui-widget-header{
-  border:0px;
-}
-
-.wijmo-wijmenu .wijmo-wijmenu-parent .wijmo-wijmenu-child{
-  padding: 0.3em 0;
-}
-
-div.wijmo-wijmenu .wijmo-wijmenu-item .wijmo-wijmenu-child{
-  background: #333;
-  border:0;
-  margin:0;
-  padding: 6px 0;
-  width:160px;
-  -webkit-border-radius: 0 0 6px 6px;
-  -moz-border-radius: 0 0 6px 6px;
-  border-radius: 0 0 6px 6px;
-  -webkit-box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2);
-  -moz-box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2);
-  box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2);
-}
-
-div.wijmo-wijmenu .wijmo-wijmenu-item{
-  margin:0;
-  border:0;
-}
-
-.wijmo-wijmenu a.wijmo-wijmenu-link{
-  margin:0;
-  line-height: 19px;
-  padding: 10px 10px 11px;
-  border:0;
-  -webkit-border-radius: 0;
-  -moz-border-radius: 0;
-  border-radius:0;
-}
-
-div.wijmo-wijmenu .wijmo-wijmenu-child .wijmo-wijmenu-link{
-  display:block;
-  float:none;
-  padding: 4px 15px;
-  width:auto;
-}
-
-div.wijmo-wijmenu .wijmo-wijmenu-child .wijmo-wijmenu-text
-{
-  float:none;
-}
-
-.wijmo-wijmenu .wijmo-wijmenu-item .wijmo-wijmenu-child .ui-state-hover {
-  background: #191919;
-}
-
-.wijmo-wijmenu .wijmo-wijmenu-item .wijmo-wijmenu-separator{
-  padding: 5px 0;
-  background-image: none;
-  background-color: #222;
-  border-top: 1px solid #444;
-  border-bottom:0;
-  border-left:0;
-  border-right:0;
-}
-
-.wijmo-wijmenu .wijmo-wijmenu-item input {
-  -moz-transition: none 0s ease 0s;
-  background-color: rgba(255, 255, 255, 0.3);
-  border: 1px solid #111111;
-  border-radius: 4px 4px 4px 4px;
-  box-shadow: 0 1px 2px rgba(0, 0, 0, 0.1) inset, 0 1px 0 rgba(255, 255, 255, 0.25);
-  color: rgba(255, 255, 255, 0.75);
-  font-family: "Helvetica Neue",Helvetica,Arial,sans-serif;
-  line-height: 1;
-  margin: 5px 10px 0 10px;
-  padding: 4px 9px;
-  width:100px;
-}
-
-.wijmo-wijmenu .wijmo-wijmenu-item input:hover {
-  background-color: rgba(255, 255, 255, 0.5);
-  color: #FFFFFF;
-}
-
-.wijmo-wijmenu .wijmo-wijmenu-item input:focus {
-  background-color: #FFFFFF;
-  border: 0 none;
-  box-shadow: 0 0 3px rgba(0, 0, 0, 0.15);
-  color: #404040;
-  outline: 0 none;
-  padding: 5px 10px;
-  text-shadow: 0 1px 0 #FFFFFF;
-}
-
-
-.wijmo-wijmenu .ui-state-default, .ui-widget-content .ui-state-default, .ui-widget-header .ui-state-default {
-  text-shadow:none;
-}
-
-
-.wijmo-wijmenu .ui-state-default{
-  box-shadow: none;
-  color:#BFBFBF;
-  filter: none;
-}
-
diff --git a/branch-1.2/ambari-web/vendor/styles/rickshaw.css b/branch-1.2/ambari-web/vendor/styles/rickshaw.css
deleted file mode 100644
index 9102bd4..0000000
--- a/branch-1.2/ambari-web/vendor/styles/rickshaw.css
+++ /dev/null
@@ -1,307 +0,0 @@
-.rickshaw_graph .detail {
-	pointer-events: none;
-	position: absolute;
-	top: 0;
-	z-index: 2;
-	background: rgba(0, 0, 0, 0.1);
-	bottom: 0;
-	width: 1px;
-	transition: opacity 0.25s linear;
-	-moz-transition: opacity 0.25s linear;
-	-o-transition: opacity 0.25s linear;
-	-webkit-transition: opacity 0.25s linear;
-}
-.rickshaw_graph .detail.inactive {
-	opacity: 0;
-}
-.rickshaw_graph .detail .item.active {
-	opacity: 1;
-}
-.rickshaw_graph .detail .x_label {
-	font-family: Arial, sans-serif;
-	border-radius: 3px;
-	padding: 6px;
-	opacity: 0.5;
-	border: 1px solid #e0e0e0;
-	font-size: 12px;
-	position: absolute;
-	background: white;
-	white-space: nowrap;
-}
-.rickshaw_graph .detail .item {
-	position: absolute;
-	z-index: 2;
-	border-radius: 3px;
-	padding: 0.25em;
-	font-size: 12px;
-	font-family: Arial, sans-serif;
-	opacity: 0;
-	background: rgba(0, 0, 0, 0.4);
-	color: white;
-	border: 1px solid rgba(0, 0, 0, 0.4);
-	margin-left: 1em;
-	margin-top: -1em;
-	white-space: nowrap;
-}
-.rickshaw_graph .detail .item.active {
-	opacity: 1;
-	background: rgba(0, 0, 0, 0.8);
-}
-.rickshaw_graph .detail .item:before {
-	content: "\25c2";
-	position: absolute;
-	left: -0.5em;
-	color: rgba(0, 0, 0, 0.7);
-	width: 0;
-}
-.rickshaw_graph .detail .dot {
-	width: 4px;
-	height: 4px;
-	margin-left: -4px;
-	margin-top: -3px;
-	border-radius: 5px;
-	position: absolute;
-	box-shadow: 0 0 2px rgba(0, 0, 0, 0.6);
-	background: white;
-	border-width: 2px;
-	border-style: solid;
-	display: none;
-	background-clip: padding-box;
-}
-.rickshaw_graph .detail .dot.active {
-	display: block;
-}
-/* graph */
-
-.rickshaw_graph {
-	position: relative;
-}
-.rickshaw_graph svg {
-	display: block;	
-	overflow: hidden;
-}
-
-/* ticks */
-
-.rickshaw_graph .x_tick {
-	position: absolute;
-	top: 0;
-	bottom: 0;
-	width: 0px;
-	border-left: 1px dotted rgba(0, 0, 0, 0.2);
-	pointer-events: none;
-}
-.rickshaw_graph .x_tick .title {
-	position: absolute;
-	font-size: 12px;
-	font-family: Arial, sans-serif;
-	opacity: 0.5;
-	white-space: nowrap;
-	margin-left: 3px;
-	bottom: 1px;
-}
-
-/* annotations */
-
-.rickshaw_annotation_timeline {
-	height: 1px;
-	border-top: 1px solid #e0e0e0;
-	margin-top: 10px;
-	position: relative;
-}
-.rickshaw_annotation_timeline .annotation {
-	position: absolute;
-	height: 6px;
-	width: 6px;
-	margin-left: -2px;
-	top: -3px;
-	border-radius: 5px;
-	background-color: rgba(0, 0, 0, 0.25);
-}
-.rickshaw_graph .annotation_line {
-	position: absolute;
-	top: 0;
-	bottom: -6px;
-	width: 0px;
-	border-left: 2px solid rgba(0, 0, 0, 0.3);
-	display: none;
-}
-.rickshaw_graph .annotation_line.active {
-	display: block;
-}
-
-.rickshaw_graph .annotation_range {
-        background: rgba(0, 0, 0, 0.1);
-        display: none;
-        position: absolute;
-        top: 0;
-        bottom: -6px;
-        z-index: -10;
-}
-.rickshaw_graph .annotation_range.active {
-        display: block;
-}
-.rickshaw_graph .annotation_range.active.offscreen {
-        display: none;
-}
-
-.rickshaw_annotation_timeline .annotation .content {
-	background: white;
-	color: black;
-	opacity: 0.9;
-	padding: 5px 5px;
-	box-shadow: 0 0 2px rgba(0, 0, 0, 0.8);
-	border-radius: 3px;
-	position: relative;
-	z-index: 20;
-	font-size: 12px;
-	padding: 6px 8px 8px;
-	top: 18px;
-	left: -11px;
-	width: 160px;
-	display: none;
-	cursor: pointer;
-}
-.rickshaw_annotation_timeline .annotation .content:before {
-	content: "\25b2";
-	position: absolute;
-	top: -11px;
-	color: white;
-	text-shadow: 0 -1px 1px rgba(0, 0, 0, 0.8);
-}
-.rickshaw_annotation_timeline .annotation.active,
-.rickshaw_annotation_timeline .annotation:hover {
-	background-color: rgba(0, 0, 0, 0.8);
-	cursor: none;
-}
-.rickshaw_annotation_timeline .annotation .content:hover {
-	z-index: 50;
-}
-.rickshaw_annotation_timeline .annotation.active .content {
-	display: block;
-}
-.rickshaw_annotation_timeline .annotation:hover .content {
-	display: block;
-	z-index: 50;
-}
-.rickshaw_graph .y_axis {
-	fill: none;
-}
-.rickshaw_graph .y_ticks .tick {
-	stroke: rgba(0, 0, 0, 0.16);
-	stroke-width: 2px;
-	shape-rendering: crisp-edges;
-	pointer-events: none;
-}
-.rickshaw_graph .y_grid .tick {
-	z-index: -1;
-	stroke: rgba(0, 0, 0, 0.20);
-	stroke-width: 1px;
-	stroke-dasharray: 1 1;
-}
-.rickshaw_graph .y_grid path {
-	fill: none;
-	stroke: none;
-}
-.rickshaw_graph .y_ticks path {
-	fill: none;
-	stroke: #808080;
-}
-.rickshaw_graph .y_ticks text {
-	opacity: 0.5;
-	font-size: 12px;
-	pointer-events: none;
-}
-.rickshaw_graph .x_tick.glow .title,
-.rickshaw_graph .y_ticks.glow text {
-	fill: black;
-	color: black;
-	text-shadow: 
-		-1px 1px 0 rgba(255, 255, 255, 0.1),
-		1px -1px 0 rgba(255, 255, 255, 0.1),
-		1px 1px 0 rgba(255, 255, 255, 0.1),
-		0px 1px 0 rgba(255, 255, 255, 0.1),
-		0px -1px 0 rgba(255, 255, 255, 0.1),
-		1px 0px 0 rgba(255, 255, 255, 0.1),
-		-1px 0px 0 rgba(255, 255, 255, 0.1),
-		-1px -1px 0 rgba(255, 255, 255, 0.1);
-}
-.rickshaw_graph .x_tick.inverse .title,
-.rickshaw_graph .y_ticks.inverse text {
-	fill: white;
-	color: white;
-	text-shadow: 
-		-1px 1px 0 rgba(0, 0, 0, 0.8),
-		1px -1px 0 rgba(0, 0, 0, 0.8),
-		1px 1px 0 rgba(0, 0, 0, 0.8),
-		0px 1px 0 rgba(0, 0, 0, 0.8),
-		0px -1px 0 rgba(0, 0, 0, 0.8),
-		1px 0px 0 rgba(0, 0, 0, 0.8),
-		-1px 0px 0 rgba(0, 0, 0, 0.8),
-		-1px -1px 0 rgba(0, 0, 0, 0.8);
-}
-.rickshaw_legend {
-	font-family: Arial;
-	font-size: 12px;
-	color: white;
-	background: #404040;
-	display: inline-block;
-	padding: 12px 5px; 
-	border-radius: 2px;
-	position: relative;
-}
-.rickshaw_legend:hover {
-	z-index: 10;
-}
-.rickshaw_legend .swatch {
-	width: 10px;
-	height: 10px;
-	border: 1px solid rgba(0, 0, 0, 0.2);
-}
-.rickshaw_legend .line {
-	clear: both;
-	line-height: 140%;
-	padding-right: 15px;
-}
-.rickshaw_legend .line .swatch {
-	display: inline-block;
-	margin-right: 3px;
-	border-radius: 2px;
-}
-.rickshaw_legend .label {
-	white-space: nowrap;
-	display: inline;
-}
-.rickshaw_legend .action:hover {
-	opacity: 0.6;
-}
-.rickshaw_legend .action {
-	margin-right: 0.2em;
-	font-size: 10px;
-	opacity: 0.2;
-	cursor: pointer;
-	font-size: 14px;
-}
-.rickshaw_legend .line.disabled {
-	opacity: 0.4;
-}
-.rickshaw_legend ul {
-	list-style-type: none;
-	margin: 0;
-	padding: 0;
-	margin: 2px;
-	cursor: pointer;
-}
-.rickshaw_legend li {
-	padding: 0 0 0 2px;
-	min-width: 80px;
-	white-space: nowrap;
-}
-.rickshaw_legend li:hover {
-	background: rgba(255, 255, 255, 0.08);
-	border-radius: 3px;
-}
-.rickshaw_legend li:active {
-	background: rgba(255, 255, 255, 0.2);
-	border-radius: 3px;
-}
diff --git a/branch-1.2/ambari.iml b/branch-1.2/ambari.iml
deleted file mode 100644
index 8015fa7..0000000
--- a/branch-1.2/ambari.iml
+++ /dev/null
@@ -1,14 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<module org.jetbrains.idea.maven.project.MavenProjectsManager.isMavenModule="true" type="JAVA_MODULE" version="4">
-  <component name="NewModuleRootManager" LANGUAGE_LEVEL="JDK_1_6" inherit-compiler-output="false">
-    <output url="file://$MODULE_DIR$/target/classes" />
-    <output-test url="file://$MODULE_DIR$/target/test-classes" />
-    <exclude-output />
-    <content url="file://$MODULE_DIR$">
-      <excludeFolder url="file://$MODULE_DIR$/target" />
-    </content>
-    <orderEntry type="inheritedJdk" />
-    <orderEntry type="sourceFolder" forTests="false" />
-  </component>
-</module>
-
diff --git a/branch-1.2/contrib/addons/package/rpm/.gitignore b/branch-1.2/contrib/addons/package/rpm/.gitignore
deleted file mode 100644
index 378eac2..0000000
--- a/branch-1.2/contrib/addons/package/rpm/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-build
diff --git a/branch-1.2/contrib/addons/package/rpm/create_ganglia_addon_rpm.sh b/branch-1.2/contrib/addons/package/rpm/create_ganglia_addon_rpm.sh
deleted file mode 100644
index 89b7e1f..0000000
--- a/branch-1.2/contrib/addons/package/rpm/create_ganglia_addon_rpm.sh
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-CUR_DIR=`pwd`
-
-BASEDIR="$( cd "$( dirname "$0" )" && pwd )"
-
-if [[ -z "${BUILD_DIR}" ]]; then
-  BUILD_DIR="${BASEDIR}/build/"
-fi
-
-if [[ -z "${VERSION}" ]]; then
-  VERSION=${VERSION:-1.2.2.3}
-fi
-
-if [[ -z "${RELEASE}" ]]; then
-  RELEASE=${RELEASE:-1}
-fi
-
-#rm -rf ${BUILD_DIR}/*
-
-PKG_NAME="hdp_mon_ganglia_addons"
-
-MON_TAR_DIR="${BUILD_DIR}/${PKG_NAME}-$VERSION/"
-
-mkdir -p "${MON_TAR_DIR}"
-cp -r ${BASEDIR}/../../src/addOns/ganglia/* ${MON_TAR_DIR}
-
-TAR_DEST="${BUILD_DIR}/${PKG_NAME}-$VERSION.tar.gz"
-
-cd ${BUILD_DIR};
-tar -zcf "${TAR_DEST}" "${PKG_NAME}-$VERSION/"
-
-RPM_BUILDDIR=${BUILD_DIR}/rpmbuild/
-
-mkdir -p ${RPM_BUILDDIR}
-mkdir -p ${RPM_BUILDDIR}/SOURCES/
-mkdir -p ${RPM_BUILDDIR}/SPECS/
-mkdir -p ${RPM_BUILDDIR}/BUILD/
-mkdir -p ${RPM_BUILDDIR}/RPMS/
-mkdir -p ${RPM_BUILDDIR}/SRPMS/
-
-cp -f ${BASEDIR}/${PKG_NAME}.spec ${RPM_BUILDDIR}/SPECS/
-cp -f ${TAR_DEST} ${RPM_BUILDDIR}/SOURCES/
-
-echo "${VERSION}" > ${RPM_BUILDDIR}/SOURCES/version.txt
-echo "${RELEASE}" > ${RPM_BUILDDIR}/SOURCES/release.txt
-
-cd ${RPM_BUILDDIR}
-
-cmd="rpmbuild --define \"_topdir ${RPM_BUILDDIR}\" \
-    -bb ${RPM_BUILDDIR}/SPECS/${PKG_NAME}.spec"
-
-echo $cmd
-eval $cmd
-ret=$?
-if [[ "$ret" != "0" ]]; then
-  echo "Error: rpmbuild failed, error=$ret"
-  exit 1
-fi
-
-cd ${CUR_DIR}
-
-RPM_DEST="${RPM_BUILDDIR}/RPMS/noarch/${PKG_NAME}-$VERSION-$RELEASE.noarch.rpm"
-if [[ ! -f "${RPM_DEST}" ]]; then
-  echo "Error: ${RPM_DEST} does not exist"
-  exit 1
-fi
-
-exit 0
diff --git a/branch-1.2/contrib/addons/package/rpm/create_nagios_addon_rpm.sh b/branch-1.2/contrib/addons/package/rpm/create_nagios_addon_rpm.sh
deleted file mode 100644
index 78799cc..0000000
--- a/branch-1.2/contrib/addons/package/rpm/create_nagios_addon_rpm.sh
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-CUR_DIR=`pwd`
-
-BASEDIR="$( cd "$( dirname "$0" )" && pwd )"
-
-if [[ -z "${BUILD_DIR}" ]]; then
-  BUILD_DIR="${BASEDIR}/build/"
-fi
-
-if [[ -z "${VERSION}" ]]; then
-  VERSION=${VERSION:-1.2.2.3}
-fi
-
-if [[ -z "${RELEASE}" ]]; then
-  RELEASE=${RELEASE:-1}
-fi
-
-#rm -rf ${BUILD_DIR}/*
-
-PKG_NAME="hdp_mon_nagios_addons"
-
-MON_TAR_DIR="${BUILD_DIR}/${PKG_NAME}-$VERSION/"
-
-mkdir -p "${MON_TAR_DIR}"
-cp -r ${BASEDIR}/../../src/addOns/nagios/* ${MON_TAR_DIR}
-
-TAR_DEST="${BUILD_DIR}/${PKG_NAME}-$VERSION.tar.gz"
-
-cd ${BUILD_DIR};
-tar -zcf "${TAR_DEST}" "${PKG_NAME}-$VERSION/"
-
-RPM_BUILDDIR=${BUILD_DIR}/rpmbuild/
-
-mkdir -p ${RPM_BUILDDIR}
-mkdir -p ${RPM_BUILDDIR}/SOURCES/
-mkdir -p ${RPM_BUILDDIR}/BUILD/
-mkdir -p ${RPM_BUILDDIR}/SPECS/
-mkdir -p ${RPM_BUILDDIR}/RPMS/
-mkdir -p ${RPM_BUILDDIR}/SRPMS/
-
-cp -f ${BASEDIR}/${PKG_NAME}.spec ${RPM_BUILDDIR}/SPECS/
-cp -f ${TAR_DEST} ${RPM_BUILDDIR}/SOURCES/
-
-echo "${VERSION}" > ${RPM_BUILDDIR}/SOURCES/version.txt
-echo "${RELEASE}" > ${RPM_BUILDDIR}/SOURCES/release.txt
-
-cd ${RPM_BUILDDIR}
-
-cmd="rpmbuild --define \"_topdir ${RPM_BUILDDIR}\" \
-  -bb ${RPM_BUILDDIR}/SPECS/${PKG_NAME}.spec"
-
-echo $cmd
-eval $cmd
-ret=$?
-if [[ "$ret" != "0" ]]; then
-  echo "Error: rpmbuild failed, error=$ret"
-  exit 1
-fi
-
-cd ${CUR_DIR}
-
-RPM_DEST="${RPM_BUILDDIR}/RPMS/noarch/${PKG_NAME}-$VERSION-$RELEASE.noarch.rpm"
-if [[ ! -f "${RPM_DEST}" ]]; then
-  echo "Error: ${RPM_DEST} does not exist"
-  exit 1
-fi
-
-exit 0
diff --git a/branch-1.2/contrib/addons/package/rpm/hdp_mon_ganglia_addons.spec b/branch-1.2/contrib/addons/package/rpm/hdp_mon_ganglia_addons.spec
deleted file mode 100644
index e92a2d0..0000000
--- a/branch-1.2/contrib/addons/package/rpm/hdp_mon_ganglia_addons.spec
+++ /dev/null
@@ -1,81 +0,0 @@
-##
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#
-# RPM Spec file for Ganglia Add-ons for HDP Monitoring Dashboard
-#
-
-%define name  hdp_mon_ganglia_addons
-%define release %(cat %{_sourcedir}/release.txt)
-%define version %(cat %{_sourcedir}/version.txt)
-%define buildroot %{_tmppath}/%{name}-%{version}-buildroot
-
-
-Summary: Ganglia Add-ons for HDP Monitoring Dashboard
-Name: %{name}
-Version: %{version}
-URL: http://hortonworks.com
-Release: %{release}%{?dist}
-License: Apache License, Version 2.0
-Vendor: Hortonworks <ambari-group@hortonworks.com>
-Group: System Environment/Base
-Source: %{name}-%{version}.tar.gz
-Buildroot: %{buildroot}
-Requires: gweb >= 2.2
-
-%if 0%{?suse_version}
-%define graphd_dir /srv/www/htdocs/ganglia/graph.d/
-%else
-%define graphd_dir /var/www/html/ganglia/graph.d/
-%endif
-%define gconf_dir /var/lib/ganglia/conf/
-
-BuildArchitectures: noarch
-
-%description
-This package provides add-on graphs and configurations for ganglia to provide 
-for a better monitoring integration with a Hadoop Cluster
-
-%prep
-%setup -q -n %{name}-%{version}
-%build
-
-%install
-# Flush any old RPM build root
-%__rm -rf $RPM_BUILD_ROOT
-
-%__mkdir -p $RPM_BUILD_ROOT/%{graphd_dir}/
-%__mkdir -p $RPM_BUILD_ROOT/%{gconf_dir}/
-
-%__cp -rf conf/* $RPM_BUILD_ROOT/%{gconf_dir}/
-%__cp -rf graph.d/* $RPM_BUILD_ROOT/%{graphd_dir}/
-
-
-%files
-%defattr(-,root,root)
-%{graphd_dir}/*
-%{gconf_dir}/*
-
-%clean
-%__rm -rf $RPM_BUILD_ROOT
-
-%changelog
-* Fri Feb 17 2011 Hortonworks <ambari-group@hortonworks.com>
-- Initial version
diff --git a/branch-1.2/contrib/addons/package/rpm/hdp_mon_nagios_addons.spec b/branch-1.2/contrib/addons/package/rpm/hdp_mon_nagios_addons.spec
deleted file mode 100644
index 861e6d8..0000000
--- a/branch-1.2/contrib/addons/package/rpm/hdp_mon_nagios_addons.spec
+++ /dev/null
@@ -1,88 +0,0 @@
-##
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#
-# RPM Spec file for Nagios Add-ons for HDP Monitoring Dashboard
-#
-
-%define name hdp_mon_nagios_addons
-%define release %(cat %{_sourcedir}/release.txt)
-%define version %(cat %{_sourcedir}/version.txt)
-%define buildroot %{_tmppath}/%{name}-%{version}-buildroot
-
-Summary: Nagios Add-ons for HDP Monitoring Dashboard
-Name: %{name}
-Version: %{version}
-URL: http://hortonworks.com
-Release: %{release}%{?dist}
-License: Apache License, Version 2.0
-Vendor: Hortonworks <ambari-group@hortonworks.com>
-Group: System Environment/Base
-Source: %{name}-%{version}.tar.gz
-Buildroot: %{buildroot}
-Requires: nagios, nagios-plugins, php >= 5
-
-%define nagioshdpscripts_dir %{_prefix}/share/hdp/nagios
-%define nagiosplugin_dir %{_libdir}/nagios/plugins
-%if 0%{?suse_version}
-%define httpd_confdir %{_sysconfdir}/apache2/conf.d
-%else
-%define httpd_confdir %{_sysconfdir}/httpd/conf.d
-%endif
-BuildArchitectures: noarch
-
-%description
-This package provides add-on helper scripts and plugins for nagios for 
-monitoring of a Hadoop Cluster
-
-%prep
-%setup -q -n %{name}-%{version}
-%build
-
-%install
-# Flush any old RPM build root
-%__rm -rf $RPM_BUILD_ROOT
-
-%__mkdir -p $RPM_BUILD_ROOT/%{nagioshdpscripts_dir}/
-%__mkdir -p $RPM_BUILD_ROOT/%{nagiosplugin_dir}/
-%__mkdir -p $RPM_BUILD_ROOT/%{httpd_confdir}/
-
-%__cp -rf scripts/* $RPM_BUILD_ROOT/%{nagioshdpscripts_dir}/
-%__cp -rf plugins/* $RPM_BUILD_ROOT/%{nagiosplugin_dir}/
-echo "Alias /ambarinagios %{_prefix}/share/hdp" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
-echo "<Directory /usr/share/hdp>" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
-echo "  Options None" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
-echo "  AllowOverride None" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
-echo "  Order allow,deny" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
-echo "  Allow from all" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
-echo "</Directory>" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
-
-%files
-%defattr(-,root,root)
-%{nagioshdpscripts_dir}/*
-%attr(0755,root,root)%{nagiosplugin_dir}/*
-%{httpd_confdir}/hdp_mon_nagios_addons.conf
-
-%clean
-%__rm -rf $RPM_BUILD_ROOT
-
-%changelog
-* Fri Feb 17 2011 Hortonworks <ambari-group@hortonworks.com>
-- Initial version
diff --git a/branch-1.2/contrib/addons/src/.gitignore b/branch-1.2/contrib/addons/src/.gitignore
deleted file mode 100644
index e69de29..0000000
--- a/branch-1.2/contrib/addons/src/.gitignore
+++ /dev/null
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/conf/cluster_HDPJobTracker.json b/branch-1.2/contrib/addons/src/addOns/ganglia/conf/cluster_HDPJobTracker.json
deleted file mode 100644
index 9c7e6a3..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/conf/cluster_HDPJobTracker.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
-  "included_reports": 
-    ["hdp_mon_jobtracker_map_slot_report","hdp_mon_jobtracker_reduce_slot_report","hdp_mon_jobtracker_mapreduce_report","hdp_mon_rpc_latency_report","hdp_mon_jvm_gc_report"]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/conf/cluster_HDPNameNode.json b/branch-1.2/contrib/addons/src/addOns/ganglia/conf/cluster_HDPNameNode.json
deleted file mode 100644
index 0a9098a..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/conf/cluster_HDPNameNode.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
-  "included_reports": ["hdp_mon_hdfs_ops_report","hdp_mon_rpc_latency_report","hdp_mon_jvm_gc_report","hdp_mon_jvm_threads_report"]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/conf/cluster_HDPSlaves.json b/branch-1.2/contrib/addons/src/addOns/ganglia/conf/cluster_HDPSlaves.json
deleted file mode 100644
index 19b2586..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/conf/cluster_HDPSlaves.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
-	"included_reports": 
-    ["hdp_mon_hdfs_io_report","hdp_mon_tasktracker_task_report","hdp_mon_tasktracker_mapreduce_report"]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_disk_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_disk_report.json
deleted file mode 100644
index 9561816..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_disk_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "disk_report",
-   "report_type" : "standard",
-   "title" : "Disk Report",
-   "vertical_label" : "GB",
-   "series" : [
-      { "metric": "disk_total", "color": "ffea00", "label": "Total Disk Space", "line_width": "2", "type": "line" },
-      { "metric": "disk_free", "color": "3333bb", "label": "Disk Space Available", "line_width": "2", "type": "stack" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_hlog_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_hlog_report.json
deleted file mode 100644
index 7425a39..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_hlog_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "hbase_hlog_report",
-   "report_type" : "standard",
-   "title" : "Avg Time in HLog file split",
-   "vertical_label" : "Milliseconds",
-   "series" : [
-      { "metric": "hbase.master.splitTime_avg_time", "color": "ff0000", "label": "Average Time", 
-        "line_width": "2", "type": "stack" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_hlog_split_size_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_hlog_split_size_report.json
deleted file mode 100644
index 7279eb1..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_hlog_split_size_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "hlog_split_size_report",
-   "report_type" : "standard",
-   "title" : "Avg HLog split file size",
-   "vertical_label" : "Bytes",
-   "series" : [
-      { "metric": "hbase.master.splitSize_avg_time", "color": "ff0000", "label": "Avg Split Size", 
-        "line_width": "2", "type": "stack" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_master_cluster_requests_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_master_cluster_requests_report.json
deleted file mode 100644
index 7d3ee26..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_master_cluster_requests_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "hbase_master_cluster_requests_report",
-   "report_type" : "standard",
-   "title" : "Cluster Requests",
-   "vertical_label" : "Request count",
-   "series" : [
-      { "metric": "hbase.master.cluster_requests", "color": "ff0000", "label": "Cluster Requests", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_compaction_queue_size_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_compaction_queue_size_report.json
deleted file mode 100644
index 4c27133..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_compaction_queue_size_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "hbase_regionserver_compaction_queue_size_report",
-   "report_type" : "standard",
-   "title" : "Total Compaction Queue Size",
-   "vertical_label" : "Queue Size",
-   "series" : [
-      { "metric": "hbase.regionserver.compactionQueueSize", "color": "ff0000", "label": "Compaction Queue Size", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_flush_queue_size_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_flush_queue_size_report.json
deleted file mode 100644
index 11030c3..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_flush_queue_size_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "hbase_regionserver_flush_queue_size_report",
-   "report_type" : "standard",
-   "title" : "Total flush Queue size",
-   "vertical_label" : "Queue Size",
-   "series" : [
-      { "metric": "hbase.regionserver.flushQueueSize", "color": "ff0000", "label": "flushQueueSize", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_fs_read_latency_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_fs_read_latency_report.json
deleted file mode 100644
index 160ae23..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_fs_read_latency_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "hbase_regionserver_fs_read_latency_report",
-   "report_type" : "standard",
-   "title" : "Region Server FS Read Latency",
-   "vertical_label" : "Milliseconds",
-   "series" : [
-      { "metric": "hbase.regionserver.fsReadLatency_avg_time", "color": "ff0000", "label": "Read Latency", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_fs_write_latency_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_fs_write_latency_report.json
deleted file mode 100644
index e38f710..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_fs_write_latency_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "hbase_regionserver_fs_write_latency_report",
-   "report_type" : "standard",
-   "title" : "Region Server FS Write Latency",
-   "vertical_label" : "Milliseconds",
-   "series" : [
-      { "metric": "hbase.regionserver.fsWriteLatency_avg_time", "color": "ff0000", "label": "Write Latency", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_read_requests_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_read_requests_report.json
deleted file mode 100644
index 98b7007..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_read_requests_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "hbase_regionserver_read_requests_report",
-   "report_type" : "standard",
-   "title" : "Region Server Read Requests",
-   "vertical_label" : "Request count",
-   "series" : [
-      { "metric": "hbase.regionserver.readRequestsCount", "color": "ff0000", "label": "Read Requests", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_regions_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_regions_report.json
deleted file mode 100644
index f177189..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_regions_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "hbase_regionserver_regions_report",
-   "report_type" : "standard",
-   "title" : "Total Cluster Regions",
-   "vertical_label" : "Region count",
-   "series" : [
-      { "metric": "hbase.regionserver.regions", "color": "ff0000", "label": "Regions", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_write_requests_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_write_requests_report.json
deleted file mode 100644
index e70fb82..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hbase_regionserver_write_requests_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "hbase_regionserver_write_requests_report",
-   "report_type" : "standard",
-   "title" : "Region Server Write Requests",
-   "vertical_label" : "Request count",
-   "series" : [
-      { "metric": "hbase.regionserver.writeRequestsCount", "color": "ff0000", "label": "Write Requests", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_capacity_remaining_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_capacity_remaining_report.json
deleted file mode 100644
index 4a100cd..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_capacity_remaining_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "hdfs_capacity_remaining_report",
-   "report_type" : "standard",
-   "title" : "HDFS Capacity Remaining",
-   "vertical_label" : "GB",
-   "series" : [
-      { "metric": "dfs.FSNamesystem.CapacityRemainingGB", "color": "ff0000", "label": "Capacity Remaining", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_io_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_io_report.json
deleted file mode 100644
index f1dc1db..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_io_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "hdfs_io_report",
-   "report_type" : "standard",
-   "title" : "HDFS I/O",
-   "vertical_label" : "Bytes/Sec",
-   "series" : [
-      { "metric": "dfs.datanode.bytes_written", "color": "ff0000", "label": "Bytes Written/Sec", "line_width": "2", "type": "line" },
-      { "metric": "dfs.datanode.bytes_read", "color": "0000ff", "label": "Bytes Read/Sec", "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_ops_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_ops_report.json
deleted file mode 100644
index 7bf8e22..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_ops_report.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
-   "report_name" : "hdfs_ops_report",
-   "report_type" : "standard",
-   "title" : "HDFS Operations",
-   "vertical_label" : "Operations/Sec",
-   "series" : [
-      { "metric": "dfs.namenode.CreateFileOps", "color": "00ff00", "label": "File Creation", 
-        "line_width": "2", "type": "stack" },
-      { "metric": "dfs.namenode.DeleteFileOps", "color": "ff0000", "label": "File Deletion", 
-        "line_width": "2", "type": "stack" },
-      { "metric": "dfs.namenode.FileInfoOps", "color": "0000ff", "label": "File Info", 
-        "line_width": "2", "type": "stack" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_pending_replication_blocks_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_pending_replication_blocks_report.json
deleted file mode 100644
index 14bff9a..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_pending_replication_blocks_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "hdfs_pending_replication_blocks_report",
-   "report_type" : "standard",
-   "title" : "HDFS Blocks Pending Replication",
-   "vertical_label" : "Block count",
-   "series" : [
-      { "metric": "dfs.FSNamesystem.PendingReplicationBlocks", "color": "ff0000", "label": "Blocks Pending Replication", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_under_replicated_blocks_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_under_replicated_blocks_report.json
deleted file mode 100644
index f7bfb81..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_hdfs_under_replicated_blocks_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "hdfs_under_replicated_blocks_report",
-   "report_type" : "standard",
-   "title" : "HDFS Under-Replicated Blocks",
-   "vertical_label" : "Block Count",
-   "series" : [
-      { "metric": "dfs.FSNamesystem.UnderReplicatedBlocks", "color": "ff0000", "label": "Under-Replicated Blocks", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_heartbeats_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_heartbeats_report.json
deleted file mode 100644
index 38af20a..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_heartbeats_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "jobtracker_heartbeats_report",
-   "report_type" : "standard",
-   "title" : "JobTracker Heartbeats",
-   "vertical_label" : "Heartbeats/Sec",
-   "series" : [
-      { "metric": "mapred.jobtracker.heartbeats", "color": "ff0000", "label": "Heartbeats/Sec", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_completed_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_completed_report.json
deleted file mode 100644
index 15d572c..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_completed_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "jobtracker_jobs_completed_report",
-   "report_type" : "standard",
-   "title" : "Jobs Completion rate",
-   "vertical_label" : "Jobs/Sec",
-   "series" : [
-      { "metric": "mapred.jobtracker.jobs_completed", "color": "ff0000", "label": "Jobs Completed", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_failed_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_failed_report.json
deleted file mode 100644
index 8489612..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_failed_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "jobtracker_jobs_failed_report",
-   "report_type" : "standard",
-   "title" : "Jobs Failure rate",
-   "vertical_label" : "Jobs/Sec",
-   "series" : [
-      { "metric": "mapred.jobtracker.jobs_failed", "color": "ff0000", "label": "Failed Jobs/Sec", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_running_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_running_report.json
deleted file mode 100644
index 77f5744..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_running_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "jobtracker_jobs_running_report",
-   "report_type" : "standard",
-   "title" : "Jobs Running",
-   "vertical_label" : "Number Of Jobs",
-   "series" : [
-      { "metric": "mapred.jobtracker.jobs_running", "color": "ff0000", "label": "Jobs Running", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_submitted_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_submitted_report.json
deleted file mode 100644
index 7ca2a65..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_jobs_submitted_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "jobtracker_jobs_submitted_report",
-   "report_type" : "standard",
-   "title" : "Jobs Submission rate",
-   "vertical_label" : "Jobs/Sec",
-   "series" : [
-      { "metric": "mapred.jobtracker.jobs_submitted", "color": "ff0000", "label": "Submitted Jobs/Sec", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_map_slot_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_map_slot_report.json
deleted file mode 100644
index a767a14..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_map_slot_report.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
-   "report_name" : "jobtracker_map_slot_report",
-   "report_type" : "standard",
-   "title" : "Map Slot Utilization",
-   "vertical_label" : "Slots",
-   "series" : [
-      { "metric": "mapred.jobtracker.map_slots", "color": "ff0000", "label": "Total", 
-        "line_width": "2", "type": "line" },
-      { "metric": "mapred.jobtracker.occupied_map_slots", "color": "ff6ca9", "label": "Occupied",
-        "line_width": "2", "type": "stack" },
-      { "metric": "mapred.jobtracker.reserved_map_slots", "color": "ff6600", "label": "Reserved",
-        "line_width": "2", "type": "stack" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_mapreduce_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_mapreduce_report.json
deleted file mode 100644
index 15e282e..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_mapreduce_report.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-   "report_name" : "jobtracker_mapreduce_report",
-   "report_type" : "standard",
-   "title" : "Waiting Map/Reduce tasks",
-   "vertical_label" : "Tasks",
-   "series" : [
-      { "metric": "mapred.jobtracker.waiting_maps", "color": "ff0000", "label": "Waiting Maps", 
-        "line_width": "2", "type": "stack" },
-      { "metric": "mapred.jobtracker.waiting_reduces", "color": "0000ff", "label": "Waiting Reduces", 
-        "line_width": "2", "type": "stack" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_reduce_slot_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_reduce_slot_report.json
deleted file mode 100644
index bc77478..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jobtracker_reduce_slot_report.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
-   "report_name" : "jobtracker_reduce_slot_report",
-   "report_type" : "standard",
-   "title" : "Reduce Slot Utilization",
-   "vertical_label" : "Slots",
-   "series" : [
-      { "metric": "mapred.jobtracker.reduce_slots", "color": "0000ff", "label": "Total", 
-        "line_width": "2", "type": "line" },
-      { "metric": "mapred.jobtracker.occupied_reduce_slots", "color": "06f7ff", "label": "Occupied", 
-        "line_width": "2", "type": "stack" },
-      { "metric": "mapred.jobtracker.reserved_reduce_slots", "color": "009999", "label": "Reserved",
-        "line_width": "2", "type": "stack" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jvm_gc_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jvm_gc_report.json
deleted file mode 100644
index 2bc8ca0..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jvm_gc_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "jvm_gc_report",
-   "report_type" : "standard",
-   "title" : "Time spent in Garbage Collection",
-   "vertical_label" : "Milliseconds",
-   "series" : [
-      { "metric": "jvm.metrics.gcTimeMillis", "color": "ff0000", "label": "Time Spent", 
-        "line_width": "2", "type": "stack" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jvm_mem_heap_used_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jvm_mem_heap_used_report.json
deleted file mode 100644
index 2e2b238..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jvm_mem_heap_used_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "jvm_mem_heap_used_report",
-   "report_type" : "standard",
-   "title" : "JVM Heap Memory Used",
-   "vertical_label" : "MB",
-   "series" : [
-      { "metric": "jvm.metrics.memHeapUsedM", "color": "ff0000", "label": "Heap Memory Used", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jvm_threads_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jvm_threads_report.json
deleted file mode 100644
index 9734587..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_jvm_threads_report.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-   "report_name" : "jvm_threads_report",
-   "report_type" : "standard",
-   "title" : "JVM Threads Status",
-   "vertical_label" : "Number Of Threads",
-   "series" : [
-      { "metric": "jvm.metrics.threadsBlocked", "color": "ff0000", "label": "Blocked", 
-        "line_width": "2", "type": "stack" },
-      { "metric": "jvm.metrics.threadsWaiting", "color": "ff6600", "label": "Waiting",
-        "line_width": "2", "type": "stack" },
-      { "metric": "jvm.metrics.threadsTimedWaiting", "color": "ffff00", "label": "Timed Waiting",
-        "line_width": "2", "type": "stack" },
-      { "metric": "jvm.metrics.threadsRunnable", "color": "00ff00", "label": "Runnable", 
-        "line_width": "2", "type": "stack" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpc_latency_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpc_latency_report.json
deleted file mode 100644
index 928a20f..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpc_latency_report.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-   "report_name" : "rpc_latency_report",
-   "report_type" : "standard",
-   "title" : "Average RPC Latencies",
-   "vertical_label" : "Seconds",
-   "series" : [
-      { "metric": "rpc.rpc.RpcProcessingTime_avg_time", "color": "0000ff", "label": "Average Processing Time", 
-        "line_width": "2", "type": "stack" },
-      { "metric": "rpc.rpc.RpcQueueTime_avg_time", "color": "ff0000", "label": "Average Queue Time", 
-        "line_width": "2", "type": "stack" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_avg_time_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_avg_time_report.json
deleted file mode 100644
index d0038a5..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_avg_time_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "rpc_queue_time_avg_time_report",
-   "report_type" : "standard",
-   "title" : "Average RPC Wait Time",
-   "vertical_label" : "Seconds",
-   "series" : [
-      { "metric": "rpc.rpc.RpcQueueTime_avg_time", "color": "ff0000", "label": "Avg RPC Wait Time", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_num_ops_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_num_ops_report.json
deleted file mode 100644
index 3a6f3fd..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_num_ops_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "rpc_queue_time_num_ops_report",
-   "report_type" : "standard",
-   "title" : "Average RPC Operations",
-   "vertical_label" : "Operations/Sec",
-   "series" : [
-      { "metric": "rpc.rpc.RpcQueueTime_num_ops", "color": "ff0000", "label": "Avg RPC Ops", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpcdetailed_heartbeat_num_ops_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpcdetailed_heartbeat_num_ops_report.json
deleted file mode 100644
index a689884..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_rpcdetailed_heartbeat_num_ops_report.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-   "report_name" : "rpcdetailed_heartbeat_num_ops_report",
-   "report_type" : "standard",
-   "title" : "Heartbeats",
-   "vertical_label" : "Heartbeats/Sec",
-   "series" : [
-      { "metric": "rpcdetailed.rpcdetailed.sendHeartbeat_num_ops", "color": "ff0000", "label": "Heartbeats/Sec", 
-        "line_width": "2", "type": "line" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_tasktracker_mapreduce_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_tasktracker_mapreduce_report.json
deleted file mode 100644
index 799df83..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_tasktracker_mapreduce_report.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-   "report_name" : "tasktracker_mapreduce_report",
-   "report_type" : "standard",
-   "title" : "Running Maps and Reduces",
-   "vertical_label" : "Number of Maps/Reduces",
-   "series" : [
-      { "metric": "mapred.tasktracker.maps_running", "color": "ff0000", "label": "Running Maps", 
-        "line_width": "2", "type": "stack" },
-      { "metric": "mapred.tasktracker.reduces_running", "color": "0000ff", "label": "Running Reduces", 
-        "line_width": "2", "type": "stack" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_tasktracker_task_report.json b/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_tasktracker_task_report.json
deleted file mode 100644
index 5c70316..0000000
--- a/branch-1.2/contrib/addons/src/addOns/ganglia/graph.d/hdp_mon_tasktracker_task_report.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
-   "report_name" : "tasktracker_task_report",
-   "report_type" : "standard",
-   "title" : "Task Status",
-   "vertical_label" : "Number Of Tasks",
-   "series" : [
-      { "metric": "mapred.tasktracker.tasks_completed", "color": "00ff00", "label": "Completed", 
-        "line_width": "2", "type": "stack" },
-      { "metric": "mapred.tasktracker.tasks_failed_timeout", "color": "ffff00", "label": "Failed Timeout", 
-        "line_width": "2", "type": "stack" },
-      { "metric": "mapred.tasktracker.tasks_failed_ping", "color": "ff0000", "label": "Failed Ping", 
-        "line_width": "2", "type": "stack" }
-   ]
-}
diff --git a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_aggregate.php b/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_aggregate.php
deleted file mode 100644
index 8eaee53..0000000
--- a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_aggregate.php
+++ /dev/null
@@ -1,195 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-  $options = getopt ("f:s:n:w:c:t:");
-  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-  $status_file=$options['f'];
-  $status_code=$options['s'];
-  $type=$options['t'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  if ($type == "service" && !array_key_exists('n', $options)) {
-    echo "Service description not provided -n option\n";
-    exit(3);
-  } 
-  if ($type == "service") {
-    $service_name=$options['n'];
-    /* echo "DESC: " . $service_name . "\n"; */
-  }
-  
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  $counts;
-  if ($type == "service") {
-    $counts=query_alert_count($status_file_content, $service_name, $status_code);
-  } else {
-    $counts=query_host_count($status_file_content, $status_code);
-  }
-
-  if ($counts['total'] == 0) {
-    $percent = 0;
-  } else {
-    $percent = ($counts['actual']/$counts['total'])*100;
-  }
-  if ($percent >= $crit) {
-    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (1);
-  }
-  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-  exit(0);
-
-
-  # Functions 
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content, $status_code) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $total_hosts = 0;
-    $hosts = 0;
-    foreach ($matches[0] as $object) {
-      $total_hosts++;
-      if (getParameter($object, "current_state") == $status_code) {
-        $hosts++;
-      } 
-    }
-    $hostcounts_object['total'] = $total_hosts;
-    $hostcounts_object['actual'] = $hosts;
-    return $hostcounts_object;
-  }
-
-  /* Query Alert counts */
-  function query_alert_count ($status_file_content, $service_name, $status_code) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $alertcounts_objects = array ();
-    $total_alerts=0;
-    $alerts=0;
-    foreach ($matches[0] as $object) {
-      if (getParameter($object, "service_description") == $service_name) {
-        $total_alerts++;
-        if (getParameter($object, "current_state") >= $status_code) {
-          $alerts++;
-        } 
-      }
-    }
-    $alertcounts_objects['total'] = $total_alerts;
-    $alertcounts_objects['actual'] = $alerts;
-    return $alertcounts_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-      case "NAMENODE":
-        $pieces[0] = "HDFS";
-        break; 
-      case "JOBTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break; 
-      case "HBASEMASTER":
-        $pieces[0] = "HBASE";
-        break; 
-      case "SYSTEM":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-        break; 
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-?>
-
diff --git a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_hadoop.sh b/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_hadoop.sh
deleted file mode 100644
index 49105fc..0000000
--- a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_hadoop.sh
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-user=""
-secure="false"
-keytab=""
-while getopts ":u:k:s" opt; do
-  case $opt in
-    u)
-      user=$OPTARG;
-      ;;
-    k)
-      keytab=$OPTARG;
-      ;;
-    s)
-      secure="true";
-      ;;
-    \?)
-      echo "Invalid option: -$OPTARG" >&2
-      exit 3
-      ;;
-    :)
-      echo "UNKNOWNOption -$OPTARG requires an argument." >&2
-      exit 3
-      ;;
-  esac
-done
-
-outfile="/tmp/nagios-hadoop-check.out"
-curtime=`date +"%F-%H-%M-%S"`
-fname="nagios-hadoop-check-${curtime}"
-
-if [[ "$user" == "" ]]; then
-  echo "INVALID: user argument not specified";
-  exit 3;
-fi
-if [[ "$keytab" == "" ]]; then 
-  keytab="/homes/$user/$user.headless.keytab"
-fi
-
-if [[ "$secure" == "true" ]]; then
-  sudo -u $user -i "/usr/kerberos/bin/kinit -kt $keytab $user" > ${outfile} 2>&1
-fi
-
-sudo -u $user -i "hadoop dfs -copyFromLocal /etc/passwd ${fname}.input " > ${outfile} 2>&1
-if [[ "$?" -ne "0" ]]; then 
-  echo "CRITICAL: Error copying file to HDFS. See error output in ${outfile} on nagios server";
-  exit 2; 
-fi
-sudo -u $user -i "hadoop dfs -ls" > ${outfile} 2>&1
-if [[ "$?" -ne "0" ]]; then 
-  echo "CRITICAL: Error listing HDFS files. See error output in ${outfile} on nagios server";
-  exit 2; 
-fi
-sudo -u $user -i "hadoop jar /usr/share/hadoop/hadoop-examples-*.jar wordcount ${fname}.input ${fname}.out" >> ${outfile} 2>&1
-if [[ "$?" -ne "0" ]]; then 
-  echo "CRITICAL: Error running M/R job. See error output in ${outfile} on nagios server";
-  exit 2; 
-fi
-sudo -u $user -i "hadoop fs -rmr -skipTrash ${fname}.out" >> ${outfile} 2>&1
-if [[ "$?" -ne "0" ]]; then 
-  echo "CRITICAL: Error removing M/R job output. See error output in ${outfile} on nagios server";
-  exit 2; 
-fi
-sudo -u $user -i "hadoop fs -rm -skipTrash ${fname}.input" >> ${outfile} 2>&1
-if [[ "$?" -ne "0" ]]; then 
-  echo "CRITICAL: Error removing M/R job input. See error output in ${outfile} on nagios server";
-  exit 2; 
-fi
-
-echo "OK: M/R WordCount Job ran successfully"
-exit 0;
diff --git a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_hbase.sh b/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_hbase.sh
deleted file mode 100644
index f29e590..0000000
--- a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_hbase.sh
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-user=""
-secure="false"
-keytab=""
-while getopts ":u:k:s" opt; do
-  case $opt in
-    u)
-      user=$OPTARG;
-      ;;
-    k)
-      keytab=$OPTARG;
-      ;;
-    s)
-      secure="true";
-      ;;
-    \?)
-      echo "Invalid option: -$OPTARG" >&2
-      exit 3
-      ;;
-    :)
-      echo "UNKNOWNOption -$OPTARG requires an argument." >&2
-      exit 3
-      ;;
-  esac
-done
-
-outfile="/tmp/nagios-hbase-check.out"
-curtime=`date +"%F-%H-%M-%S"`
-fname="nagios-hbase-check-${curtime}"
-
-if [[ "$user" == "" ]]; then
-  echo "INVALID: user argument not specified";
-  exit 3;
-fi
-if [[ "$keytab" == "" ]]; then 
-  keytab="/homes/$user/$user.headless.keytab"
-fi
-
-if [[ "$secure" == "true" ]]; then
-  sudo -u $user -i "/usr/kerberos/bin/kinit -kt $keytab $user" > ${outfile} 2>&1
-fi
-
-output=`sudo -u $user -i "echo status | /usr/bin/hbase --config /etc/hbase shell"`
-(IFS='')
-tmpOutput=$(echo $output | grep -v '0 servers')
-if [[ "$?" -ne "0" ]]; then 
-  echo "CRITICAL: No region servers are running";
-  exit 2; 
-fi
-sudo -u $user -i "echo disable \'nagios_test_table\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
-sudo -u $user -i "echo drop \'nagios_test_table\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
-sudo -u $user -i "echo create \'nagios_test_table\', \'family\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
-sudo -u $user -i "echo put \'nagios_test_table\', \'row01\', \'family:col01\', \'value1\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
-output=`sudo -u $user -i "echo scan \'nagios_test_table\' | /usr/bin/hbase --config /etc/hbase shell"`
-(IFS='')
-tmpOutput=$(echo $output | grep -v '1 row(s) in')
-if [[ "$?" -ne "1" ]]; then 
-  echo "CRITICAL: Error populating HBase table";
-  exit 2; 
-fi
-sudo -u $user -i "echo disable \'nagios_test_table\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
-sudo -u $user -i "echo drop \'nagios_test_table\' | /usr/bin/hbase --config /etc/hbase shell" > ${outfile} 2>&1
-
-echo "OK: HBase transaction completed successfully"
-exit 0;
diff --git a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_hdfs_blocks.php b/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_hdfs_blocks.php
deleted file mode 100644
index c20d406..0000000
--- a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_hdfs_blocks.php
+++ /dev/null
@@ -1,72 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the corrupt or missing blocks % is > threshod
- * check_jmx -H hostaddress -p port -w 1% -c 1%
- */
-
-  $options = getopt ("h:p:w:c:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-
-  /* Get the json document */
-  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemMetrics");
-  $json_array = json_decode($json_string, true);
-  $m_percent = 0;
-  $c_percent = 0;
-  $object = $json_array['beans'][0];
-  $missing_blocks = $object['MissingBlocks'];
-  $corrupt_blocks = $object['CorruptBlocks'];
-  $total_blocks = $object['BlocksTotal'];
-  if($total_blocks == 0) {
-    $m_percent = 0;
-    $c_percent = 0;
-  } else {
-    $m_percent = ($missing_blocks/$total_blocks)*100;
-    $c_percent = ($corrupt_blocks/$total_blocks)*100;
-  }
-  $out_msg = "corrupt_blocks:<" . $corrupt_blocks . 
-             ">, missing_blocks:<" . $missing_blocks . 
-             ">, total_blocks:<" . $total_blocks . ">";
-  
-  if ($m_percent > $crit || $c_percent > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($m_percent > $warn || $c_percent > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%>\n";
-  }
-?>
diff --git a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_hdfs_capacity.php b/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_hdfs_capacity.php
deleted file mode 100644
index a2686c5..0000000
--- a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_hdfs_capacity.php
+++ /dev/null
@@ -1,68 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the % HDFS capacity used >= warn and critical limits.
- * check_jmx -H hostaddress -p port -w 1 -c 1
- */
-
-  $options = getopt ("h:p:w:c:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-
-  /* Get the json document */
-  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState");
-  $json_array = json_decode($json_string, true);
-  $percent = 0;
-  $object = $json_array['beans'][0];
-  $CapacityUsed = $object['CapacityUsed'];
-  $CapacityRemaining = $object['CapacityRemaining'];
-  $CapacityTotal = $CapacityUsed + $CapacityRemaining;
-  if($CapacityTotal == 0) {
-    $percent = 0;
-  } else {
-    $percent = ($CapacityUsed/$CapacityTotal)*100;
-  }
-  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) . 
-             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
-  
-  if ($percent >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%>\n";
-  }
-?>
diff --git a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_hive_metastore_status.sh b/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_hive_metastore_status.sh
deleted file mode 100644
index aa7d193..0000000
--- a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_hive_metastore_status.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#The uri is of the form thrift://<hostname>:<port>
-HOST=$1
-PORT=$2
-HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
-out=`hcat $HCAT_URL -e "show databases" 2>&1`
-if [[ "$?" -ne 0 ]]; then 
-  echo "CRITICAL: Error accessing Hive Metastore status [$out]";
-  exit 2;
-fi
-echo "OK: Hive Metastore status OK";
-exit 0;
diff --git a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_name_dir_status.php b/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_name_dir_status.php
deleted file mode 100644
index 7e44ea7..0000000
--- a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_name_dir_status.php
+++ /dev/null
@@ -1,59 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to namenode, get the jmx-json document
- * check the NameDirStatuses to find any offline (failed) directories
- * check_jmx -H hostaddress -p port
- */
-
-  $options = getopt ("h:p:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-
-  /* Get the json document */
-  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo");
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if ($object['NameDirStatuses'] == "") {
-    echo "UNKNOWN: NameNode directory status not available via http://<nn_host>:port/jmx url" . "\n";
-    exit(3);
-  }
-  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
-  $failed_dir_count = count($NameDirStatuses['failed']);
-  $out_msg = "CRITICAL: Offline NameNode directories: ";
-  if ($failed_dir_count > 0) {
-    foreach ($NameDirStatuses['failed'] as $key => $value) {
-      $out_msg = $out_msg . $key . ":" . $value . ", ";
-    }
-    echo $out_msg . "\n";
-    exit (2);
-  }
-  echo "OK: All NameNode directories are active" . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port\n";
-  }
-?>
diff --git a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_oozie_status.sh b/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_oozie_status.sh
deleted file mode 100644
index e943bbe..0000000
--- a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_oozie_status.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# OOZIE_URL is of the form http://<hostname>:<port>/oozie
-# OOZIE_URL: http://host1.localdomain:11000/oozie
-HOST=$1
-PORT=$2
-JAVA_HOME=$3
-OOZIE_URL="http://$HOST:$PORT/oozie"
-export JAVA_HOME=$JAVA_HOME
-out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
-if [[ "$?" -ne 0 ]]; then 
-  echo "CRITICAL: Error accessing Oozie Server status [$out]";
-  exit 2;
-fi
-echo "OK: Oozie Server status [$out]";
-exit 0;
diff --git a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_rpcq_latency.php b/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_rpcq_latency.php
deleted file mode 100644
index 9ec28f7..0000000
--- a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_rpcq_latency.php
+++ /dev/null
@@ -1,67 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
- * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
- * Warning and Critical values are in seconds
- * Service Name = JobTracker, NameNode
- */
-
-  $options = getopt ("h:p:w:c:n:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $master=$options['n'];
-  $warn=$options['w']; 
-  $crit=$options['c']; 
-
-  /* Get the json document */
-  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*");
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  
-  $RpcQueueTime_avg_time = $object['RpcQueueTime_avg_time'];
-  $RpcProcessingTime_avg_time = $object['RpcProcessingTime_avg_time'];
-
-  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time . 
-             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time . 
-             "> Secs";
-  
-  if ($RpcQueueTime_avg_time >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($RpcQueueTime_avg_time >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode> -w <warn_in_sec> -c <crit_in_sec>\n";
-  }
-?>
diff --git a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_webui.sh b/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_webui.sh
deleted file mode 100644
index 57b9239..0000000
--- a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/check_webui.sh
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-checkurl () {
-  url=$1
-  /usr/bin/wget -q $url -O /dev/null
-  echo $?
-}
-
-service=$1
-host=$2
-
-if [[ -z "$service" || -z "$host" ]]; then
-  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
-  exit 3;
-fi
-
-case "$service" in
-
-jobtracker) 
-    jtweburl="http://$host:50030"
-    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
-      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
-      exit 1;
-    fi
-    ;;
-namenode)
-    nnweburl="http://$host:50070"
-    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
-      echo "WARNING: NameNode Web UI not accessible : $nnweburl";
-      exit 1;
-    fi
-    ;;
-jobhistory)
-    jhweburl="http://$host:51111/jobhistoryhome.jsp"
-    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
-      echo "WARNING: JobHistory Web UI not accessible : $jhweburl";
-      exit 1;
-    fi
-    ;;
-hbase)
-    hbaseweburl="http://$host:60010/master-status"
-    jhweburl="http://domU-12-31-39-16-DC-FB.compute-1.internal:51111/jobhistoryhome.jsp"
-    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
-      echo "WARNING: HBase Master Web UI not accessible : $hbaseweburl";
-      exit 1;
-    fi
-    ;;
-*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode]"
-   exit 3
-   ;;
-esac
-
-echo "OK: Successfully accessed $service Web UI"
-exit 0;
diff --git a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/sys_logger.py b/branch-1.2/contrib/addons/src/addOns/nagios/plugins/sys_logger.py
deleted file mode 100644
index 7b716c5..0000000
--- a/branch-1.2/contrib/addons/src/addOns/nagios/plugins/sys_logger.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/python
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import sys
-import syslog
-
-# dictionary of state->severity mappings
-severities = {'UP':'OK', 'DOWN':'Critical', 'UNREACHABLE':'Critical', 'OK':'OK',
-              'WARNING':'Warning', 'UNKNOWN':'Warning', 'CRITICAL':'Critical'}
-
-# List of services which can result in events at the Degraded severity
-degraded_alert_services = ['HBASEMASTER::HBase Master CPU utilization',
-                           'HDFS::NameNode RPC latency',
-                           'MAPREDUCE::JobTracker RPC latency',
-                           'JOBTRACKER::JobTracker CPU utilization']
-
-# List of services which can result in events at the Fatal severity
-fatal_alert_services = ['NAMENODE::NameNode process down']
-
-# dictionary of service->msg_id mappings
-msg_ids = {'Host::Ping':'host_down', 'HBASEMASTER::HBase Master CPU utilization':'master_cpu_utilization',
-           'HDFS::HDFS capacity utilization':'hdfs_percent_capacity', 'HDFS::Corrupt/Missing blocks':'hdfs_block',
-           'NAMENODE::NameNode edit logs directory status':'namenode_edit_log_write', 'HDFS::Percent DataNodes down':'datanode_down',
-           'DATANODE::DataNode process down':'datanode_process_down', 'HDFS::Percent DataNodes storage full':'datanodes_percent_storage_full',
-           'NAMENODE::NameNode process down':'namenode_process_down', 'HDFS::NameNode RPC latency':'namenode_rpc_latency',
-           'DATANODE::DataNode storage full':'datanodes_storage_full', 'JOBTRACKER::JobTracker process down':'jobtracker_process_down',
-           'MAPREDUCE::JobTracker RPC latency':'jobtracker_rpc_latency', 'MAPREDUCE::Percent TaskTrackers down':'tasktrackers_down',
-           'TASKTRACKER::TaskTracker process down':'tasktracker_process_down', 'HBASEMASTER::HBase Master process down':'hbasemaster_process_down',
-           'REGIONSERVER::RegionServer process down':'regionserver_process_down', 'HBASE::Percent RegionServers down':'regionservers_down',
-           'HIVE-METASTORE::Hive Metastore status check':'hive_metastore_process_down', 'ZOOKEEPER::Percent ZooKeeper Servers down':'zookeepers_down',
-           'ZOOKEEPER::ZooKeeper Server process down':'zookeeper_process_down', 'OOZIE::Oozie Server status check':'oozie_down',
-           'WEBHCAT::WebHCat Server status check':'templeton_down', 'PUPPET::Puppet agent down':'puppet_down',
-           'NAGIOS::Nagios status log staleness':'nagios_status_log_stale', 'GANGLIA::Ganglia [gmetad] process down':'ganglia_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for HBase Master':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for JobTracker':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for NameNode':'ganglia_collector_process_down',
-           'GANGLIA::Ganglia Collector [gmond] process down alert for slaves':'ganglia_collector_process_down',
-           'NAMENODE::Secondary NameNode process down':'secondary_namenode_process_down',
-           'JOBTRACKER::JobTracker CPU utilization':'jobtracker_cpu_utilization',
-           'HBASEMASTER::HBase Master Web UI down':'hbase_ui_down', 'NAMENODE::NameNode Web UI down':'namenode_ui_down',
-           'JOBTRACKER::JobHistory Web UI down':'jobhistory_ui_down', 'JOBTRACKER::JobTracker Web UI down':'jobtracker_ui_down'}
-
-
-# Determine the severity of the TVI alert based on the Nagios alert state.
-def determine_severity(state, service):
-    if severities.has_key(state):
-        severity = severities[state]
-    else: severity = 'Warning'
-
-    # For some alerts, warning should be converted to Degraded
-    if severity == 'Warning' and service in degraded_alert_services:
-        severity = 'Degraded'
-    elif severity != 'OK' and service in fatal_alert_services:
-        severity = 'Fatal'
-
-    return severity
-
-
-# Determine the msg id for the TVI alert from based on the service which generates the Nagios alert.
-# The msg id is used to correlate a log msg to a TVI rule.
-def determine_msg_id(service, severity):
-    if msg_ids.has_key(service):
-        msg_id = msg_ids[service]
-        if severity == 'OK':
-            msg_id = '{0}_ok'.format(msg_id)
-
-        return msg_id
-    else: return 'HADOOP_UNKNOWN_MSG'
-
-
-# Determine the domain.  Currently the domain is always 'Hadoop'.
-def determine_domain():
-    return 'Hadoop'
-
-
-# log the TVI msg to the syslog
-def log_tvi_msg(msg):
-    syslog.openlog('Hadoop', syslog.LOG_PID)
-    syslog.syslog(msg)
-
-
-# generate a tvi log msg from a Hadoop alert
-def generate_tvi_log_msg(alert_type, attempt, state, service, msg):
-    # Determine the TVI msg contents
-    severity = determine_severity(state, service)  # The TVI alert severity.
-    domain   = determine_domain()                  # The domain specified in the TVI alert.
-    msg_id   = determine_msg_id(service, severity) # The msg_id used to correlate to a TVI rule.
-
-    # Only log HARD alerts
-    if alert_type == 'HARD':
-        # Format and log msg
-        log_tvi_msg('{0}: {1}: {2}# {3}'.format(severity, domain, msg_id, msg))
-
-
-# main method which is called when invoked on the command line
-def main():
-    generate_tvi_log_msg(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
-
-
-# run the main method
-if __name__ == '__main__':
-    main()
-    sys.exit(0)
\ No newline at end of file
diff --git a/branch-1.2/contrib/addons/src/addOns/nagios/scripts/nagios_alerts.php b/branch-1.2/contrib/addons/src/addOns/nagios/scripts/nagios_alerts.php
deleted file mode 100644
index 87274fc..0000000
--- a/branch-1.2/contrib/addons/src/addOns/nagios/scripts/nagios_alerts.php
+++ /dev/null
@@ -1,461 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** Constants. */
-define("HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES", "Properties");
-define("HDP_MON_RESPONSE_OPTION_KEY__TYPE", "Type");
-
-define("HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE", "Uncacheable");
-define("HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON", "JSON");
-define("HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT", "JAVASCRIPT");
-
-define("HDP_MON_QUERY_ARG__JSONP", "jsonp");
-
-/** Spits out appropriate response headers, as per the options passed in. */
-function hdp_mon_generate_response_headers( $response_options )
-{
-  if( $response_options[HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES] == HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE )
-  {
-    // Make the response uncache-able.
-    header("Expires: Mon, 26 Jul 1997 05:00:00 GMT"); // Date in the past
-    header("Last-Modified: " . gmdate("D, d M Y H:i:s") . " GMT"); // Always modified
-    header("Cache-Control: no-cache, must-revalidate"); // HTTP/1.1
-    header("Pragma: no-cache"); // HTTP/1.0
-  }
-
-  switch( $response_options[HDP_MON_RESPONSE_OPTION_KEY__TYPE] )
-  {
-    case HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON:
-      {
-        header('Content-type: application/json');
-      }
-      break;
-
-    case HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT:
-      {
-        header('Content-type: application/javascript');
-      }
-      break;
-  }
-}
-
-/** Given $response_data (which we expect to be a JSON string), generate an
- *  HTTP response, which includes emitting the necessary HTTP response headers
- *  followed by the response body (that is either plain ol' $response_data,
- *  or a JSONP wrapper around it).
- */
-function hdp_mon_generate_response( $response_data )
-{
-  $jsonpFunctionName = NULL;
-  if (isset($_GET[HDP_MON_QUERY_ARG__JSONP])) {
-    $jsonpFunctionName = $_GET[HDP_MON_QUERY_ARG__JSONP];
-  }
-
-  hdp_mon_generate_response_headers( array
-  ( HDP_MON_RESPONSE_OPTION_KEY__PROPERTIES => HDP_MON_RESPONSE_OPTION_VALUE__PROPERTIES_UNCACHEABLE,
-  HDP_MON_RESPONSE_OPTION_KEY__TYPE =>
-  isset( $jsonpFunctionName )  && $jsonpFunctionName != "" ?
-  HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JAVASCRIPT :
-  HDP_MON_RESPONSE_OPTION_VALUE__TYPE_JSON ) );
-
-  if( isset( $jsonpFunctionName ) )
-  {
-    echo "$jsonpFunctionName( $response_data );";
-  }
-  else
-  {
-    echo $response_data;
-  }
-}
-
-  /* alert_type { ok, non-ok, warning, critical, all } */
-  define ("all", "-2");
-  define ("nok", "-1");
-  define ("ok", "0");
-  define ("warn", "1");
-  define ("critical", "2");
-
-  define ("HDFS_SERVICE_CHECK", "NAMENODE::NameNode process down");
-  define ("MAPREDUCE_SERVICE_CHECK", "JOBTRACKER::JobTracker process down");
-  define ("HBASE_SERVICE_CHECK", "HBASEMASTER::HBaseMaster process down");
-  define ("ZOOKEEPER_SERVICE_CHECK", "ZOOKEEPER::Percent ZooKeeper Servers down");
-  define ("HIVE_METASTORE_SERVICE_CHECK", "HIVE-METASTORE::Hive Metastore status check");
-  define ("OOZIE_SERVICE_CHECK", "OOZIE::Oozie Server status check");
-  define ("WEBHCAT_SERVICE_CHECK", "WEBHCAT::WebHCat Server status check");
-  define ("PUPPET_SERVICE_CHECK", "PUPPET::Puppet agent down");
-
-  /* If SUSE, status file is under /var/lib/nagios */
-  if (file_exists("/etc/SuSE-release")) {
-    $status_file="/var/nagios/status.dat";
-  } else {
-    $status_file="/var/nagios/status.dat";
-  }
-
-  $q1="";
-  if (array_key_exists('q1', $_GET)) {
-    $q1=$_GET["q1"];
-  }
-  $q2="";
-  if (array_key_exists('q2', $_GET)) {
-    $q2=$_GET["q2"];
-  }
-  $alert_type="";
-  if (array_key_exists('alert_type', $_GET)) {
-    $alert_type=$_GET["alert_type"];
-  }
-  $host="";
-  if (array_key_exists('host_name', $_GET)) {
-    $host=$_GET["host_name"];
-  }
-  $indent="";
-  if (array_key_exists('indent', $_GET)) {
-    $indent=$_GET["indent"];
-  }
-
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  if ($q1 == "alerts") {
-    /* Add the service status object to result array */
-    $result['alerts'] = query_alerts ($status_file_content, $alert_type, $host);
-  }
-
-  if ($q2 == "hosts") {
-    /* Add the service status object to result array */
-    $result['hosts'] = query_hosts ($status_file_content, $alert_type, $host);
-  }
-
-  /* Add host count object to the results */
-  $result['hostcounts'] = query_host_count ($status_file_content);
-
-  /* Add services runtime states */
-  $result['servicestates'] = query_service_states ($status_file_content);
-
-  /* Return results */
-  if ($indent == "true") {
-    hdp_mon_generate_response(indent(json_encode($result)));
-  } else {
-    hdp_mon_generate_response(json_encode($result));
-  }
-
-  # Functions
-  /* Query service states */
-  function query_service_states ($status_file_content) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $services_object = array ();
-    $services_object["PUPPET"] = 0;
-    foreach ($matches[0] as $object) {
-      if (getParameter($object, "service_description") == HDFS_SERVICE_CHECK) {
-        $services_object["HDFS"] = getParameter($object, "last_hard_state");
-        if ($services_object["HDFS"] >= 1) {
-          $services_object["HDFS"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == MAPREDUCE_SERVICE_CHECK) {
-        $services_object["MAPREDUCE"] = getParameter($object, "last_hard_state");
-        if ($services_object["MAPREDUCE"] >= 1) {
-          $services_object["MAPREDUCE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == HBASE_SERVICE_CHECK) {
-        $services_object["HBASE"] = getParameter($object, "last_hard_state");
-        if ($services_object["HBASE"] >= 1) {
-          $services_object["HBASE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == HIVE_METASTORE_SERVICE_CHECK) {
-        $services_object["HIVE-METASTORE"] = getParameter($object, "last_hard_state");
-        if ($services_object["HIVE-METASTORE"] >= 1) {
-          $services_object["HIVE-METASTORE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == OOZIE_SERVICE_CHECK) {
-        $services_object["OOZIE"] = getParameter($object, "last_hard_state");
-        if ($services_object["OOZIE"] >= 1) {
-          $services_object["OOZIE"] = 1;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == WEBHCAT_SERVICE_CHECK) {
-        $services_object["WEBHCAT"] = getParameter($object, "last_hard_state");
-        if ($services_object["WEBHCAT"] >= 1) {
-          $services_object["WEBHCAT"] = 1;
-        }
-        continue;
-      }
-      /* In case of zookeeper, service is treated running if alert is ok or warning (i.e partial
-       * instances of zookeepers are running
-       */
-      if (getParameter($object, "service_description") == ZOOKEEPER_SERVICE_CHECK) {
-        $services_object["ZOOKEEPER"] = getParameter($object, "last_hard_state");
-        if ($services_object["ZOOKEEPER"] <= 1) {
-          $services_object["ZOOKEEPER"] = 0;
-        }
-        continue;
-      }
-      if (getParameter($object, "service_description") == PUPPET_SERVICE_CHECK) {
-        $state = getParameter($object, "last_hard_state");
-        if ($state >= 1) {
-          $services_object["PUPPET"]++;
-        }
-        continue;
-      }
-    }
-    if ($services_object["PUPPET"] >= 1) {
-      $services_object["PUPPET"] = 1;
-    }
-    return $services_object;
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $up_hosts = 0;
-    $down_hosts = 0;
-    foreach ($matches[0] as $object) {
-      if (getParameter($object, "last_hard_state") != ok) {
-        $down_hosts++;
-      } else {
-        $up_hosts++;
-      }
-    }
-    $hostcounts_object['up_hosts'] = $up_hosts;
-    $hostcounts_object['down_hosts'] = $down_hosts;
-    return $hostcounts_object;
-  }
-
-  /* Query Hosts */
-  function query_hosts ($status_file_content, $alert_type, $host) {
-    $hoststatus_attributes = array ("host_name", "current_state", "last_hard_state",
-                              "plugin_output", "last_check", "current_attempt",
-                              "last_hard_state_change", "last_time_up", "last_time_down",
-                              "last_time_unreachable", "is_flapping", "last_check");
-
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hosts_objects = array ();
-    $i = 0;
-    foreach ($matches[0] as $object) {
-      $hoststatus = array ();
-      $chost = getParameter($object, "host_name");
-      if (empty($host) || $chost == $host) {
-        foreach ($hoststatus_attributes as $attrib) {
-          $hoststatus[$attrib] = htmlentities(getParameter($object, $attrib), ENT_COMPAT);
-        }
-        $hoststatus['alerts'] = query_alerts ($status_file_content, $alert_type, $chost);
-        if (!empty($host)) {
-          $hosts_objects[$i] = $hoststatus;
-          $i++;
-          break;
-        }
-      }
-      if (!empty($hoststatus)) {
-        $hosts_objects[$i] = $hoststatus;
-        $i++;
-      }
-    }
-    /* echo "COUNT : " . count ($services_objects) . "\n"; */
-    return $hosts_objects;
-  }
-
-  /* Query Alerts */
-  function query_alerts ($status_file_content, $alert_type, $host) {
-
-    $servicestatus_attributes = array ("service_description", "host_name", "current_attempt",
-                                       "current_state", "plugin_output", "last_hard_state_change", "last_hard_state",
-                                       "last_time_ok", "last_time_warning", "last_time_unknown",
-                                       "last_time_critical", "is_flapping", "last_check");
-
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    #echo $matches[0][0] . ", " . $matches[0][1] . "\n";
-    #echo $matches[1][0] . ", " . $matches[1][1] . "\n";
-    $services_objects = array ();
-    $i = 0;
-    foreach ($matches[0] as $object) {
-      $servicestatus = array ();
-      switch ($alert_type) {
-      case "all":
-        if (empty($host) || getParameter($object, "host_name") == $host) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameter($object, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "nok":
-        if (getParameter($object, "last_hard_state") != ok &&
-           (empty($host) || getParameter($object, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameter($object, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "ok":
-        if (getParameter($object, "last_hard_state") == ok &&
-           (empty($host) || getParameter($object, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameter($object, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "warn":
-        if (getParameter($object, "last_hard_state") == warn &&
-           (empty($host) || getParameter($object, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameter($object, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      case "critical":
-        if (getParameter($object, "last_hard_state") == critical &&
-           (empty($host) || getParameter($object, "host_name") == $host)) {
-          foreach ($servicestatus_attributes as $attrib) {
-            $servicestatus[$attrib] = htmlentities(getParameter($object, $attrib), ENT_COMPAT);
-          }
-          $servicestatus['service_type'] = get_service_type($servicestatus['service_description']);
-          $srv_desc = explode ("::",$servicestatus['service_description'],2);
-          $servicestatus['service_description'] = $srv_desc[1];
-        }
-        break;
-      }
-      if (!empty($servicestatus)) {
-        $services_objects[$i] = $servicestatus;
-        $i++;
-      }
-    }
-    /* echo "COUNT : " . count ($services_objects) . "\n"; */
-    return $services_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-	  case "DATANODE":
-      case "NAMENODE":
-        $pieces[0] = "HDFS";
-        break;
-      case "JOBTRACKER":
-	  case "TASKTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break;
-      case "HBASEMASTER":
-      case "REGIONSERVER":
-        $pieces[0] = "HBASE";
-        break;
-      case "HIVE-METASTORE":
-        $pieces[0] = "HIVE";
-        break;
-      case "ZKSERVERS":
-	    $pieces[0] = "ZOOKEEPER";
-        break;
-      case "NAGIOS":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-      case "ZOOKEEPER":
-      case "OOZIE":
-      case "WEBHCAT":
-      case "GANGLIA":
-      case "PUPPET":
-        break;
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-?>
diff --git a/branch-1.2/contrib/addons/test/dataServices/jmx/data/cluster_configuration.json b/branch-1.2/contrib/addons/test/dataServices/jmx/data/cluster_configuration.json
deleted file mode 100644
index c49478c..0000000
--- a/branch-1.2/contrib/addons/test/dataServices/jmx/data/cluster_configuration.json
+++ /dev/null
@@ -1,93 +0,0 @@
-{
-  "config_version": 1,
-  "stack_version": "1.0.2",
-  "overall": {
-    "cluster_name": "MyHDPCluster",
-    "dashboard_host": "dashboard_host",
-    "dashboard_port": 80, 
-    "ganglia" : {
-      "web_host": "gangliaweb_host",
-      "web_port": 80,
-      "web_root": "/var/www/ganglia2",
-      "grid_name": "HDP_GRID"
-    },
-    "nagios": {
-      "nagiosserver_host": "nagiosserver_host",
-      "nagiosserver_port": 80,
-      "web_root": "/nagios"
-    },
-    "jmx": {
-      "timeout": 1
-    },
-    "services": {
-	  "HDFS" : [
-        {
-          "installed": true,
-          "name": "HDFS",
-          "namenode_host": "namenode",
-          "namenode_port": 50070,
-          "snamenode_host": "snamenode",
-          "snamenode_port": 50071,
-          "total_datanodes": 10,
-          "ganglia_clusters": {
-            "slaves": "HDPSlaves",
-            "namenode": "HDPNameNode"      
-          }
-        }
-      ],
-      "MAPREDUCE" : [        
-        {
-          "installed": true,
-          "name": "MAPREDUCE",
-          "jobtracker_host": "jobtracker",
-          "jobtracker_port": 50030,
-          "total_tasktrackers": 20,
-          "jobhistory_host": "jobhistory_host",
-          "jobhistory_port": 52890,
-          "ganglia_clusters": {
-            "slaves": "HDPSlaves",
-            "jobtracker": "HDPJobTracker"
-          },
-          "scheduler_type": "org.foo.CapacityTaskScheduler"
-        }
-      ],
-      "HBASE" : [  
-        {
-          "installed": true,
-          "name": "HBASE",
-          "hbasemaster_host": "hbasemaster",
-          "hbasemaster_port": 60010,
-          "total_regionservers": 30,
-          "ganglia_clusters": {
-            "slaves": "HDPSlaves",
-            "hbasemaster": "HDPHBaseMaster"
-          }
-        }
-      ],
-      "ZOOKEEPER" : [
-        {
-          "installed": false,
-          "name": "ZOOKEEPER"
-        }
-      ],
-      "HIVE-METASTORE" : [
-        {
-          "installed": true,
-          "name": "HIVE-METASTORE"
-        }
-      ],
-      "TEMPLETON" : [
-        {
-          "installed": true,
-          "name": "TEMPLETON"
-        }
-      ],
-      "OOZIE" : [
-        {
-          "installed": true,
-          "name": "OOZIE"
-        }
-      ]
-    }
-  }
-}
diff --git a/branch-1.2/contrib/addons/test/dataServices/jmx/data/cluster_configuration.json.nohbase b/branch-1.2/contrib/addons/test/dataServices/jmx/data/cluster_configuration.json.nohbase
deleted file mode 100644
index 87b04e9..0000000
--- a/branch-1.2/contrib/addons/test/dataServices/jmx/data/cluster_configuration.json.nohbase
+++ /dev/null
@@ -1,93 +0,0 @@
-{
-  "config_version": 1,
-  "stack_version": "1.0.2",
-  "overall": {
-    "cluster_name": "MyHDPCluster",
-    "dashboard_host": "dashboard_host",
-    "dashboard_port": 80, 
-    "ganglia" : {
-      "web_host": "gangliaweb_host",
-      "web_port": 80,
-      "web_root": "/var/www/ganglia2",
-      "grid_name": "HDP_GRID"
-    },
-    "nagios": {
-      "nagiosserver_host": "nagiosserver_host",
-      "nagiosserver_port": 80,
-      "web_root": "/nagios"
-    },
-    "jmx": {
-      "timeout": 1
-    },
-    "services": {
-	  "HDFS" : [
-        {
-          "installed": true,
-          "name": "HDFS",
-          "namenode_host": "namenode",
-          "namenode_port": 50070,
-          "snamenode_host": "snamenode",
-          "snamenode_port": 50071,
-          "total_datanodes": 10,
-          "ganglia_clusters": {
-            "slaves": "HDPSlaves",
-            "namenode": "HDPNameNode"      
-          }
-        }
-      ],
-      "MAPREDUCE" : [        
-        {
-          "installed": true,
-          "name": "MAPREDUCE",
-          "jobtracker_host": "jobtracker",
-          "jobtracker_port": 50030,
-          "total_tasktrackers": 20,
-          "jobhistory_host": "jobhistory_host",
-          "jobhistory_port": 52890,
-          "ganglia_clusters": {
-            "slaves": "HDPSlaves",
-            "jobtracker": "HDPJobTracker"
-          },
-          "scheduler_type": "org.foo.CapacityTaskScheduler"
-        }
-      ],
-      "HBASE" : [  
-        {
-          "installed": false,
-          "name": "HBASE",
-          "hbasemaster_host": "hbasemaster",
-          "hbasemaster_port": 60010,
-          "total_regionservers": 30,
-          "ganglia_clusters": {
-            "slaves": "HDPSlaves",
-            "hbasemaster": "HDPHBaseMaster"
-          }
-        }
-      ],
-      "ZOOKEEPER" : [
-        {
-          "installed": true,
-          "name": "ZOOKEEPER"
-        }
-      ],
-      "HIVE-METASTORE" : [
-        {
-          "installed": false,
-          "name": "HIVE-METASTORE"
-        }
-      ],
-      "TEMPLETON" : [
-        {
-          "installed": false,
-          "name": "TEMPLETON"
-        }
-      ],
-      "OOZIE" : [
-        {
-          "installed": true,
-          "name": "OOZIE"
-        }
-      ]
-    }
-  }
-}
diff --git a/branch-1.2/contrib/addons/test/dataServices/jmx/data/sample_hbasemaster_jmx.json b/branch-1.2/contrib/addons/test/dataServices/jmx/data/sample_hbasemaster_jmx.json
deleted file mode 100644
index 08e1472..0000000
--- a/branch-1.2/contrib/addons/test/dataServices/jmx/data/sample_hbasemaster_jmx.json
+++ /dev/null
@@ -1,1652 +0,0 @@
-{
-  "beans" : [ {
-    "name" : "java.lang:type=Memory",
-    "modelerType" : "sun.management.MemoryImpl",
-    "Verbose" : false,
-    "ObjectPendingFinalizationCount" : 0,
-    "HeapMemoryUsage" : {
-      "committed" : 85000192,
-      "init" : 0,
-      "max" : 1035468800,
-      "used" : 32946880
-    },
-    "NonHeapMemoryUsage" : {
-      "committed" : 29233152,
-      "init" : 24317952,
-      "max" : 136314880,
-      "used" : 28124056
-    }
-  }, {
-    "name" : "hadoop:service=HBase,name=RPCStatistics-61258",
-    "modelerType" : "org.apache.hadoop.hbase.ipc.HBaseRPCStatistics",
-    "enableTableNumOps" : 0,
-    "enableTableAvgTime" : 0,
-    "enableTableMinTime" : -1,
-    "enableTableMaxTime" : 0,
-    "assignNumOps" : 0,
-    "assignAvgTime" : 0,
-    "assignMinTime" : -1,
-    "assignMaxTime" : 0,
-    "flushRegion.aboveOneSec.NumOps" : 0,
-    "flushRegion.aboveOneSec.AvgTime" : 0,
-    "flushRegion.aboveOneSec.MinTime" : -1,
-    "flushRegion.aboveOneSec.MaxTime" : 0,
-    "incrementColumnValue.aboveOneSec.NumOps" : 0,
-    "incrementColumnValue.aboveOneSec.AvgTime" : 0,
-    "incrementColumnValue.aboveOneSec.MinTime" : -1,
-    "incrementColumnValue.aboveOneSec.MaxTime" : 0,
-    "getZooKeeper.aboveOneSec.NumOps" : 0,
-    "getZooKeeper.aboveOneSec.AvgTime" : 0,
-    "getZooKeeper.aboveOneSec.MinTime" : -1,
-    "getZooKeeper.aboveOneSec.MaxTime" : 0,
-    "splitRegion.aboveOneSec.NumOps" : 0,
-    "splitRegion.aboveOneSec.AvgTime" : 0,
-    "splitRegion.aboveOneSec.MinTime" : -1,
-    "splitRegion.aboveOneSec.MaxTime" : 0,
-    "RpcSlowResponseNumOps" : 0,
-    "RpcSlowResponseAvgTime" : 0,
-    "RpcSlowResponseMinTime" : -1,
-    "RpcSlowResponseMaxTime" : 0,
-    "getProtocolVersion.aboveOneSec.NumOps" : 0,
-    "getProtocolVersion.aboveOneSec.AvgTime" : 0,
-    "getProtocolVersion.aboveOneSec.MinTime" : -1,
-    "getProtocolVersion.aboveOneSec.MaxTime" : 0,
-    "getCatalogTracker.aboveOneSec.NumOps" : 0,
-    "getCatalogTracker.aboveOneSec.AvgTime" : 0,
-    "getCatalogTracker.aboveOneSec.MinTime" : -1,
-    "getCatalogTracker.aboveOneSec.MaxTime" : 0,
-    "rollHLogWriter.aboveOneSec.NumOps" : 0,
-    "rollHLogWriter.aboveOneSec.AvgTime" : 0,
-    "rollHLogWriter.aboveOneSec.MinTime" : -1,
-    "rollHLogWriter.aboveOneSec.MaxTime" : 0,
-    "delete.aboveOneSec.NumOps" : 0,
-    "delete.aboveOneSec.AvgTime" : 0,
-    "delete.aboveOneSec.MinTime" : -1,
-    "delete.aboveOneSec.MaxTime" : 0,
-    "bulkLoadHFiles.aboveOneSec.NumOps" : 0,
-    "bulkLoadHFiles.aboveOneSec.AvgTime" : 0,
-    "bulkLoadHFiles.aboveOneSec.MinTime" : -1,
-    "bulkLoadHFiles.aboveOneSec.MaxTime" : 0,
-    "moveNumOps" : 0,
-    "moveAvgTime" : 0,
-    "moveMinTime" : -1,
-    "moveMaxTime" : 0,
-    "getServerNameNumOps" : 0,
-    "getServerNameAvgTime" : 0,
-    "getServerNameMinTime" : -1,
-    "getServerNameMaxTime" : 0,
-    "openRegionNumOps" : 0,
-    "openRegionAvgTime" : 0,
-    "openRegionMinTime" : 1,
-    "openRegionMaxTime" : 3,
-    "getClosestRowBefore.aboveOneSec.NumOps" : 0,
-    "getClosestRowBefore.aboveOneSec.AvgTime" : 0,
-    "getClosestRowBefore.aboveOneSec.MinTime" : -1,
-    "getClosestRowBefore.aboveOneSec.MaxTime" : 0,
-    "addToOnlineRegionsNumOps" : 0,
-    "addToOnlineRegionsAvgTime" : 0,
-    "addToOnlineRegionsMinTime" : -1,
-    "addToOnlineRegionsMaxTime" : 0,
-    "incrementNumOps" : 0,
-    "incrementAvgTime" : 0,
-    "incrementMinTime" : -1,
-    "incrementMaxTime" : 0,
-    "increment.aboveOneSec.NumOps" : 0,
-    "increment.aboveOneSec.AvgTime" : 0,
-    "increment.aboveOneSec.MinTime" : -1,
-    "increment.aboveOneSec.MaxTime" : 0,
-    "deleteTableNumOps" : 0,
-    "deleteTableAvgTime" : 0,
-    "deleteTableMinTime" : -1,
-    "deleteTableMaxTime" : 0,
-    "execCoprocessor.aboveOneSec.NumOps" : 0,
-    "execCoprocessor.aboveOneSec.AvgTime" : 0,
-    "execCoprocessor.aboveOneSec.MinTime" : -1,
-    "execCoprocessor.aboveOneSec.MaxTime" : 0,
-    "checkOOME.aboveOneSec.NumOps" : 0,
-    "checkOOME.aboveOneSec.AvgTime" : 0,
-    "checkOOME.aboveOneSec.MinTime" : -1,
-    "checkOOME.aboveOneSec.MaxTime" : 0,
-    "get.aboveOneSec.NumOps" : 0,
-    "get.aboveOneSec.AvgTime" : 0,
-    "get.aboveOneSec.MinTime" : -1,
-    "get.aboveOneSec.MaxTime" : 0,
-    "getCatalogTrackerNumOps" : 0,
-    "getCatalogTrackerAvgTime" : 0,
-    "getCatalogTrackerMinTime" : -1,
-    "getCatalogTrackerMaxTime" : 0,
-    "getHServerInfo.aboveOneSec.NumOps" : 0,
-    "getHServerInfo.aboveOneSec.AvgTime" : 0,
-    "getHServerInfo.aboveOneSec.MinTime" : -1,
-    "getHServerInfo.aboveOneSec.MaxTime" : 0,
-    "execCoprocessorNumOps" : 0,
-    "execCoprocessorAvgTime" : 0,
-    "execCoprocessorMinTime" : -1,
-    "execCoprocessorMaxTime" : 0,
-    "openScanner.aboveOneSec.NumOps" : 0,
-    "openScanner.aboveOneSec.AvgTime" : 0,
-    "openScanner.aboveOneSec.MinTime" : -1,
-    "openScanner.aboveOneSec.MaxTime" : 0,
-    "openRegions.aboveOneSec.NumOps" : 0,
-    "openRegions.aboveOneSec.AvgTime" : 0,
-    "openRegions.aboveOneSec.MinTime" : -1,
-    "openRegions.aboveOneSec.MaxTime" : 0,
-    "getHTableDescriptorsNumOps" : 0,
-    "getHTableDescriptorsAvgTime" : 0,
-    "getHTableDescriptorsMinTime" : -1,
-    "getHTableDescriptorsMaxTime" : 0,
-    "addColumnNumOps" : 0,
-    "addColumnAvgTime" : 0,
-    "addColumnMinTime" : -1,
-    "addColumnMaxTime" : 0,
-    "abort.aboveOneSec.NumOps" : 0,
-    "abort.aboveOneSec.AvgTime" : 0,
-    "abort.aboveOneSec.MinTime" : -1,
-    "abort.aboveOneSec.MaxTime" : 0,
-    "getServerName.aboveOneSec.NumOps" : 0,
-    "getServerName.aboveOneSec.AvgTime" : 0,
-    "getServerName.aboveOneSec.MinTime" : -1,
-    "getServerName.aboveOneSec.MaxTime" : 0,
-    "multiNumOps" : 0,
-    "multiAvgTime" : 0,
-    "multiMinTime" : 5,
-    "multiMaxTime" : 5,
-    "closeRegionNumOps" : 0,
-    "closeRegionAvgTime" : 0,
-    "closeRegionMinTime" : -1,
-    "closeRegionMaxTime" : 0,
-    "disableTableNumOps" : 0,
-    "disableTableAvgTime" : 0,
-    "disableTableMinTime" : -1,
-    "disableTableMaxTime" : 0,
-    "next.aboveOneSec.NumOps" : 0,
-    "next.aboveOneSec.AvgTime" : 0,
-    "next.aboveOneSec.MinTime" : -1,
-    "next.aboveOneSec.MaxTime" : 0,
-    "bulkLoadHFilesNumOps" : 0,
-    "bulkLoadHFilesAvgTime" : 0,
-    "bulkLoadHFilesMinTime" : -1,
-    "bulkLoadHFilesMaxTime" : 0,
-    "putNumOps" : 0,
-    "putAvgTime" : 0,
-    "putMinTime" : -1,
-    "putMaxTime" : 0,
-    "createTableNumOps" : 0,
-    "createTableAvgTime" : 0,
-    "createTableMinTime" : -1,
-    "createTableMaxTime" : 0,
-    "stop.aboveOneSec.NumOps" : 0,
-    "stop.aboveOneSec.AvgTime" : 0,
-    "stop.aboveOneSec.MinTime" : -1,
-    "stop.aboveOneSec.MaxTime" : 0,
-    "unlockRowNumOps" : 0,
-    "unlockRowAvgTime" : 0,
-    "unlockRowMinTime" : -1,
-    "unlockRowMaxTime" : 0,
-    "nextNumOps" : 0,
-    "nextAvgTime" : 0,
-    "nextMinTime" : 0,
-    "nextMaxTime" : 1,
-    "reportRSFatalErrorNumOps" : 0,
-    "reportRSFatalErrorAvgTime" : 0,
-    "reportRSFatalErrorMinTime" : -1,
-    "reportRSFatalErrorMaxTime" : 0,
-    "rpcAuthenticationFailures" : 0,
-    "getRegionInfoNumOps" : 0,
-    "getRegionInfoAvgTime" : 0,
-    "getRegionInfoMinTime" : 0,
-    "getRegionInfoMaxTime" : 0,
-    "openScannerNumOps" : 0,
-    "openScannerAvgTime" : 0,
-    "openScannerMinTime" : 0,
-    "openScannerMaxTime" : 1,
-    "getAlterStatusNumOps" : 0,
-    "getAlterStatusAvgTime" : 0,
-    "getAlterStatusMinTime" : -1,
-    "getAlterStatusMaxTime" : 0,
-    "deleteNumOps" : 0,
-    "deleteAvgTime" : 0,
-    "deleteMinTime" : -1,
-    "deleteMaxTime" : 0,
-    "getFromOnlineRegionsNumOps" : 0,
-    "getFromOnlineRegionsAvgTime" : 0,
-    "getFromOnlineRegionsMinTime" : -1,
-    "getFromOnlineRegionsMaxTime" : 0,
-    "RpcProcessingTimeNumOps" : 0,
-    "RpcProcessingTimeAvgTime" : 0,
-    "RpcProcessingTimeMinTime" : 0,
-    "RpcProcessingTimeMaxTime" : 6,
-    "isStoppedNumOps" : 0,
-    "isStoppedAvgTime" : 0,
-    "isStoppedMinTime" : -1,
-    "isStoppedMaxTime" : 0,
-    "shutdownNumOps" : 0,
-    "shutdownAvgTime" : 0,
-    "shutdownMinTime" : -1,
-    "shutdownMaxTime" : 0,
-    "openRegionsNumOps" : 0,
-    "openRegionsAvgTime" : 0,
-    "openRegionsMinTime" : -1,
-    "openRegionsMaxTime" : 0,
-    "getClosestRowBeforeNumOps" : 0,
-    "getClosestRowBeforeAvgTime" : 0,
-    "getClosestRowBeforeMinTime" : 4,
-    "getClosestRowBeforeMaxTime" : 4,
-    "getHServerInfoNumOps" : 0,
-    "getHServerInfoAvgTime" : 0,
-    "getHServerInfoMinTime" : -1,
-    "getHServerInfoMaxTime" : 0,
-    "getProtocolSignatureNumOps" : 0,
-    "getProtocolSignatureAvgTime" : 0,
-    "getProtocolSignatureMinTime" : -1,
-    "getProtocolSignatureMaxTime" : 0,
-    "getRegionInfo.aboveOneSec.NumOps" : 0,
-    "getRegionInfo.aboveOneSec.AvgTime" : 0,
-    "getRegionInfo.aboveOneSec.MinTime" : -1,
-    "getRegionInfo.aboveOneSec.MaxTime" : 0,
-    "getZooKeeperNumOps" : 0,
-    "getZooKeeperAvgTime" : 0,
-    "getZooKeeperMinTime" : -1,
-    "getZooKeeperMaxTime" : 0,
-    "getOnlineRegions.aboveOneSec.NumOps" : 0,
-    "getOnlineRegions.aboveOneSec.AvgTime" : 0,
-    "getOnlineRegions.aboveOneSec.MinTime" : -1,
-    "getOnlineRegions.aboveOneSec.MaxTime" : 0,
-    "removeFromOnlineRegions.aboveOneSec.NumOps" : 0,
-    "removeFromOnlineRegions.aboveOneSec.AvgTime" : 0,
-    "removeFromOnlineRegions.aboveOneSec.MinTime" : -1,
-    "removeFromOnlineRegions.aboveOneSec.MaxTime" : 0,
-    "SentBytes" : 0,
-    "existsNumOps" : 0,
-    "existsAvgTime" : 0,
-    "existsMinTime" : -1,
-    "existsMaxTime" : 0,
-    "getFromOnlineRegions.aboveOneSec.NumOps" : 0,
-    "getFromOnlineRegions.aboveOneSec.AvgTime" : 0,
-    "getFromOnlineRegions.aboveOneSec.MinTime" : -1,
-    "getFromOnlineRegions.aboveOneSec.MaxTime" : 0,
-    "unlockRow.aboveOneSec.NumOps" : 0,
-    "unlockRow.aboveOneSec.AvgTime" : 0,
-    "unlockRow.aboveOneSec.MinTime" : -1,
-    "unlockRow.aboveOneSec.MaxTime" : 0,
-    "isStopped.aboveOneSec.NumOps" : 0,
-    "isStopped.aboveOneSec.AvgTime" : 0,
-    "isStopped.aboveOneSec.MinTime" : -1,
-    "isStopped.aboveOneSec.MaxTime" : 0,
-    "regionServerStartupNumOps" : 0,
-    "regionServerStartupAvgTime" : 0,
-    "regionServerStartupMinTime" : -1,
-    "regionServerStartupMaxTime" : 0,
-    "compactRegionNumOps" : 0,
-    "compactRegionAvgTime" : 0,
-    "compactRegionMinTime" : -1,
-    "compactRegionMaxTime" : 0,
-    "abortNumOps" : 0,
-    "abortAvgTime" : 0,
-    "abortMinTime" : -1,
-    "abortMaxTime" : 0,
-    "balanceSwitchNumOps" : 0,
-    "balanceSwitchAvgTime" : 0,
-    "balanceSwitchMinTime" : -1,
-    "balanceSwitchMaxTime" : 0,
-    "rollHLogWriterNumOps" : 0,
-    "rollHLogWriterAvgTime" : 0,
-    "rollHLogWriterMinTime" : -1,
-    "rollHLogWriterMaxTime" : 0,
-    "splitRegionNumOps" : 0,
-    "splitRegionAvgTime" : 0,
-    "splitRegionMinTime" : -1,
-    "splitRegionMaxTime" : 0,
-    "ReceivedBytes" : 0,
-    "getOnlineRegionsNumOps" : 0,
-    "getOnlineRegionsAvgTime" : 0,
-    "getOnlineRegionsMinTime" : -1,
-    "getOnlineRegionsMaxTime" : 0,
-    "closeNumOps" : 0,
-    "closeAvgTime" : 0,
-    "closeMinTime" : 0,
-    "closeMaxTime" : 0,
-    "balanceNumOps" : 0,
-    "balanceAvgTime" : 0,
-    "balanceMinTime" : -1,
-    "balanceMaxTime" : 0,
-    "openRegion.aboveOneSec.NumOps" : 0,
-    "openRegion.aboveOneSec.AvgTime" : 0,
-    "openRegion.aboveOneSec.MinTime" : -1,
-    "openRegion.aboveOneSec.MaxTime" : 0,
-    "getProtocolSignature.aboveOneSec.NumOps" : 0,
-    "getProtocolSignature.aboveOneSec.AvgTime" : 0,
-    "getProtocolSignature.aboveOneSec.MinTime" : -1,
-    "getProtocolSignature.aboveOneSec.MaxTime" : 0,
-    "isAborted.aboveOneSec.NumOps" : 0,
-    "isAborted.aboveOneSec.AvgTime" : 0,
-    "isAborted.aboveOneSec.MinTime" : -1,
-    "isAborted.aboveOneSec.MaxTime" : 0,
-    "getClusterStatusNumOps" : 0,
-    "getClusterStatusAvgTime" : 0,
-    "getClusterStatusMinTime" : -1,
-    "getClusterStatusMaxTime" : 0,
-    "isAbortedNumOps" : 0,
-    "isAbortedAvgTime" : 0,
-    "isAbortedMinTime" : -1,
-    "isAbortedMaxTime" : 0,
-    "exists.aboveOneSec.NumOps" : 0,
-    "exists.aboveOneSec.AvgTime" : 0,
-    "exists.aboveOneSec.MinTime" : -1,
-    "exists.aboveOneSec.MaxTime" : 0,
-    "getBlockCacheColumnFamilySummariesNumOps" : 0,
-    "getBlockCacheColumnFamilySummariesAvgTime" : 0,
-    "getBlockCacheColumnFamilySummariesMinTime" : -1,
-    "getBlockCacheColumnFamilySummariesMaxTime" : 0,
-    "lockRow.aboveOneSec.NumOps" : 0,
-    "lockRow.aboveOneSec.AvgTime" : 0,
-    "lockRow.aboveOneSec.MinTime" : -1,
-    "lockRow.aboveOneSec.MaxTime" : 0,
-    "getConfiguration.aboveOneSec.NumOps" : 0,
-    "getConfiguration.aboveOneSec.AvgTime" : 0,
-    "getConfiguration.aboveOneSec.MinTime" : -1,
-    "getConfiguration.aboveOneSec.MaxTime" : 0,
-    "getNumOps" : 0,
-    "getAvgTime" : 0,
-    "getMinTime" : 1,
-    "getMaxTime" : 6,
-    "stopMasterNumOps" : 0,
-    "stopMasterAvgTime" : 0,
-    "stopMasterMinTime" : -1,
-    "stopMasterMaxTime" : 0,
-    "closeRegion.aboveOneSec.NumOps" : 0,
-    "closeRegion.aboveOneSec.AvgTime" : 0,
-    "closeRegion.aboveOneSec.MinTime" : -1,
-    "closeRegion.aboveOneSec.MaxTime" : 0,
-    "put.aboveOneSec.NumOps" : 0,
-    "put.aboveOneSec.AvgTime" : 0,
-    "put.aboveOneSec.MinTime" : -1,
-    "put.aboveOneSec.MaxTime" : 0,
-    "checkAndPutNumOps" : 0,
-    "checkAndPutAvgTime" : 0,
-    "checkAndPutMinTime" : -1,
-    "checkAndPutMaxTime" : 0,
-    "addToOnlineRegions.aboveOneSec.NumOps" : 0,
-    "addToOnlineRegions.aboveOneSec.AvgTime" : 0,
-    "addToOnlineRegions.aboveOneSec.MinTime" : -1,
-    "addToOnlineRegions.aboveOneSec.MaxTime" : 0,
-    "deleteColumnNumOps" : 0,
-    "deleteColumnAvgTime" : 0,
-    "deleteColumnMinTime" : -1,
-    "deleteColumnMaxTime" : 0,
-    "checkAndDelete.aboveOneSec.NumOps" : 0,
-    "checkAndDelete.aboveOneSec.AvgTime" : 0,
-    "checkAndDelete.aboveOneSec.MinTime" : -1,
-    "checkAndDelete.aboveOneSec.MaxTime" : 0,
-    "getBlockCacheColumnFamilySummaries.aboveOneSec.NumOps" : 0,
-    "getBlockCacheColumnFamilySummaries.aboveOneSec.AvgTime" : 0,
-    "getBlockCacheColumnFamilySummaries.aboveOneSec.MinTime" : -1,
-    "getBlockCacheColumnFamilySummaries.aboveOneSec.MaxTime" : 0,
-    "callQueueLen" : 0,
-    "getConfigurationNumOps" : 0,
-    "getConfigurationAvgTime" : 0,
-    "getConfigurationMinTime" : -1,
-    "getConfigurationMaxTime" : 0,
-    "replicateLogEntriesNumOps" : 0,
-    "replicateLogEntriesAvgTime" : 0,
-    "replicateLogEntriesMinTime" : -1,
-    "replicateLogEntriesMaxTime" : 0,
-    "checkAndPut.aboveOneSec.NumOps" : 0,
-    "checkAndPut.aboveOneSec.AvgTime" : 0,
-    "checkAndPut.aboveOneSec.MinTime" : -1,
-    "checkAndPut.aboveOneSec.MaxTime" : 0,
-    "rpcAuthorizationSuccesses" : 0,
-    "stopNumOps" : 0,
-    "stopAvgTime" : 0,
-    "stopMinTime" : -1,
-    "stopMaxTime" : 0,
-    "incrementColumnValueNumOps" : 0,
-    "incrementColumnValueAvgTime" : 0,
-    "incrementColumnValueMinTime" : -1,
-    "incrementColumnValueMaxTime" : 0,
-    "flushRegionNumOps" : 0,
-    "flushRegionAvgTime" : 0,
-    "flushRegionMinTime" : -1,
-    "flushRegionMaxTime" : 0,
-    "removeFromOnlineRegionsNumOps" : 0,
-    "removeFromOnlineRegionsAvgTime" : 0,
-    "removeFromOnlineRegionsMinTime" : -1,
-    "removeFromOnlineRegionsMaxTime" : 0,
-    "unassignNumOps" : 0,
-    "unassignAvgTime" : 0,
-    "unassignMinTime" : -1,
-    "unassignMaxTime" : 0,
-    "compactRegion.aboveOneSec.NumOps" : 0,
-    "compactRegion.aboveOneSec.AvgTime" : 0,
-    "compactRegion.aboveOneSec.MinTime" : -1,
-    "compactRegion.aboveOneSec.MaxTime" : 0,
-    "multi.aboveOneSec.NumOps" : 0,
-    "multi.aboveOneSec.AvgTime" : 0,
-    "multi.aboveOneSec.MinTime" : -1,
-    "multi.aboveOneSec.MaxTime" : 0,
-    "replicateLogEntries.aboveOneSec.NumOps" : 0,
-    "replicateLogEntries.aboveOneSec.AvgTime" : 0,
-    "replicateLogEntries.aboveOneSec.MinTime" : -1,
-    "replicateLogEntries.aboveOneSec.MaxTime" : 0,
-    "NumOpenConnections" : 0,
-    "rpcAuthenticationSuccesses" : 0,
-    "close.aboveOneSec.NumOps" : 0,
-    "close.aboveOneSec.AvgTime" : 0,
-    "close.aboveOneSec.MinTime" : -1,
-    "close.aboveOneSec.MaxTime" : 0,
-    "modifyTableNumOps" : 0,
-    "modifyTableAvgTime" : 0,
-    "modifyTableMinTime" : -1,
-    "modifyTableMaxTime" : 0,
-    "rpcAuthorizationFailures" : 0,
-    "checkOOMENumOps" : 0,
-    "checkOOMEAvgTime" : 0,
-    "checkOOMEMinTime" : -1,
-    "checkOOMEMaxTime" : 0,
-    "getProtocolVersionNumOps" : 0,
-    "getProtocolVersionAvgTime" : 0,
-    "getProtocolVersionMinTime" : 0,
-    "getProtocolVersionMaxTime" : 0,
-    "RpcQueueTimeNumOps" : 0,
-    "RpcQueueTimeAvgTime" : 0,
-    "RpcQueueTimeMinTime" : 0,
-    "RpcQueueTimeMaxTime" : 1,
-    "checkAndDeleteNumOps" : 0,
-    "checkAndDeleteAvgTime" : 0,
-    "checkAndDeleteMinTime" : -1,
-    "checkAndDeleteMaxTime" : 0,
-    "lockRowNumOps" : 0,
-    "lockRowAvgTime" : 0,
-    "lockRowMinTime" : -1,
-    "lockRowMaxTime" : 0,
-    "isMasterRunningNumOps" : 0,
-    "isMasterRunningAvgTime" : 0,
-    "isMasterRunningMinTime" : -1,
-    "isMasterRunningMaxTime" : 0,
-    "modifyColumnNumOps" : 0,
-    "modifyColumnAvgTime" : 0,
-    "modifyColumnMinTime" : -1,
-    "modifyColumnMaxTime" : 0,
-    "regionServerReportNumOps" : 0,
-    "regionServerReportAvgTime" : 0,
-    "regionServerReportMinTime" : -1,
-    "regionServerReportMaxTime" : 0
-  }, {
-    "name" : "hadoop:service=Master,name=MasterStatistics",
-    "modelerType" : "org.apache.hadoop.hbase.master.metrics.MasterStatistics",
-    "splitTimeNumOps" : 0,
-    "splitTimeAvgTime" : 0,
-    "splitTimeMinTime" : 97,
-    "splitTimeMaxTime" : 97,
-    "splitSizeNumOps" : 0,
-    "splitSizeAvgTime" : 0,
-    "splitSizeMinTime" : 423,
-    "splitSizeMaxTime" : 423,
-    "cluster_requests" : 0.0
-  }, {
-    "name" : "java.lang:type=GarbageCollector,name=ConcurrentMarkSweep",
-    "modelerType" : "sun.management.GarbageCollectorImpl",
-    "LastGcInfo" : null,
-    "CollectionCount" : 0,
-    "CollectionTime" : 0,
-    "Name" : "ConcurrentMarkSweep",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Par Eden Space", "Par Survivor Space", "CMS Old Gen", "CMS Perm Gen" ]
-  }, {
-    "name" : "org.apache.ZooKeeperService:name0=StandaloneServer_port-1",
-    "modelerType" : "org.apache.zookeeper.server.ZooKeeperServerBean",
-    "Version" : "3.4.2-1221870, built on 12/21/2011 20:46 GMT",
-    "StartTime" : "Tue Feb 14 10:31:07 PST 2012",
-    "TickTime" : 2000,
-    "OutstandingRequests" : 0,
-    "MinSessionTimeout" : 4000,
-    "MaxSessionTimeout" : 40000,
-    "ClientPort" : "10.10.10.149:2181",
-    "MaxClientCnxnsPerHost" : 1000,
-    "PacketsReceived" : 174,
-    "PacketsSent" : 209,
-    "AvgRequestLatency" : 1,
-    "MaxRequestLatency" : 44,
-    "MinRequestLatency" : 0
-  }, {
-    "name" : "java.lang:type=Compilation",
-    "modelerType" : "sun.management.CompilationImpl",
-    "Name" : "HotSpot 64-Bit Tiered Compilers",
-    "CompilationTimeMonitoringSupported" : true,
-    "TotalCompilationTime" : 3017
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Par Eden Space",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Par Eden Space",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "Usage" : {
-      "committed" : 17432576,
-      "init" : 17432576,
-      "max" : 104857600,
-      "used" : 6702040
-    },
-    "PeakUsage" : {
-      "committed" : 17432576,
-      "init" : 17432576,
-      "max" : 104857600,
-      "used" : 17432576
-    },
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep", "ParNew" ],
-    "UsageThresholdSupported" : false,
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdCount" : 0,
-    "CollectionUsage" : {
-      "committed" : 17432576,
-      "init" : 17432576,
-      "max" : 104857600,
-      "used" : 0
-    },
-    "CollectionUsageThresholdSupported" : true
-  }, {
-    "name" : "hadoop:service=RegionServer,name=RegionServerStatistics",
-    "modelerType" : "org.apache.hadoop.hbase.regionserver.metrics.RegionServerStatistics",
-    "totalStaticBloomSizeKB" : 0,
-    "totalStaticIndexSizeKB" : 0,
-    "blockCacheFree" : 256740656,
-    "compactionSizeNumOps" : 0,
-    "compactionSizeAvgTime" : 0,
-    "compactionSizeMinTime" : 4249,
-    "compactionSizeMaxTime" : 4249,
-    "memstoreSizeMB" : 0,
-    "regions" : 2,
-    "blockCacheCount" : 1,
-    "blockCacheHitRatio" : 58,
-    "flushQueueSize" : 0,
-    "fsReadLatencyNumOps" : 0,
-    "fsReadLatencyAvgTime" : 0,
-    "fsReadLatencyMinTime" : -1,
-    "fsReadLatencyMaxTime" : 0,
-    "atomicIncrementTimeNumOps" : 0,
-    "atomicIncrementTimeAvgTime" : 0,
-    "atomicIncrementTimeMinTime" : -1,
-    "atomicIncrementTimeMaxTime" : 0,
-    "blockCacheHitCount" : 7,
-    "blockCacheHitCachingRatio" : 87,
-    "hdfsBlocksLocalityIndex" : 0,
-    "writeRequestsCount" : 1,
-    "compactionTimeNumOps" : 0,
-    "compactionTimeAvgTime" : 0,
-    "compactionTimeMinTime" : 27,
-    "compactionTimeMaxTime" : 27,
-    "fsWriteLatencyNumOps" : 0,
-    "fsWriteLatencyAvgTime" : 0,
-    "fsWriteLatencyMinTime" : -1,
-    "fsWriteLatencyMaxTime" : 0,
-    "blockCacheSize" : 2126544,
-    "readRequestsCount" : 24,
-    "rootIndexSizeKB" : 0,
-    "flushTimeNumOps" : 0,
-    "flushTimeAvgTime" : 0,
-    "flushTimeMinTime" : -1,
-    "flushTimeMaxTime" : 0,
-    "blockCacheMissCount" : 5,
-    "storefiles" : 1,
-    "blockCacheEvictedCount" : 0,
-    "storefileIndexSizeMB" : 0,
-    "fsSyncLatencyNumOps" : 0,
-    "fsSyncLatencyAvgTime" : 0,
-    "fsSyncLatencyMinTime" : -1,
-    "fsSyncLatencyMaxTime" : 0,
-    "stores" : 2,
-    "compactionQueueSize" : 0,
-    "flushSizeNumOps" : 0,
-    "flushSizeAvgTime" : 0,
-    "flushSizeMinTime" : -1,
-    "flushSizeMaxTime" : 0,
-    "requests" : 2.0
-  }, {
-    "name" : "hadoop:service=org.apache.hbase,name=Master",
-    "modelerType" : "org.apache.hadoop.hbase.master.MXBeanImpl",
-    "ClusterId" : "d24914d7-75d3-4dcc-9e6f-0d7770833993",
-    "MasterStartTime" : 1329244268503,
-    "MasterActiveTime" : 1329244268505,
-    "Coprocessors" : [ ],
-    "ServerName" : "10.10.10.149,61256,1329244268169",
-    "AverageLoad" : 2.0,
-    "RegionsInTransition" : [ ],
-    "RegionServers" : [ {
-      "key" : "10.10.10.149,61258,1329244268491",
-      "value" : {
-        "coprocessors" : [ ],
-        "load" : 2,
-        "maxHeapMB" : 987,
-        "memStoreSizeInMB" : 0,
-        "numberOfRegions" : 2,
-        "numberOfRequests" : 2,
-        "regionsLoad" : [ {
-          "key" : [ 46, 77, 69, 84, 65, 46, 44, 44, 49 ],
-          "value" : {
-            "currentCompactedKVs" : 0,
-            "memStoreSizeMB" : 0,
-            "name" : [ 46, 77, 69, 84, 65, 46, 44, 44, 49 ],
-            "nameAsString" : ".META.,,1",
-            "readRequestsCount" : 8,
-            "requestsCount" : 8,
-            "storefileIndexSizeMB" : 0,
-            "storefileSizeMB" : 0,
-            "storefiles" : 0,
-            "stores" : 1,
-            "totalCompactingKVs" : 0,
-            "version" : 1,
-            "writeRequestsCount" : 0
-          }
-        }, {
-          "key" : [ 45, 82, 79, 79, 84, 45, 44, 44, 48 ],
-          "value" : {
-            "currentCompactedKVs" : 22,
-            "memStoreSizeMB" : 0,
-            "name" : [ 45, 82, 79, 79, 84, 45, 44, 44, 48 ],
-            "nameAsString" : "-ROOT-,,0",
-            "readRequestsCount" : 16,
-            "requestsCount" : 17,
-            "storefileIndexSizeMB" : 0,
-            "storefileSizeMB" : 0,
-            "storefiles" : 1,
-            "stores" : 1,
-            "totalCompactingKVs" : 28,
-            "version" : 1,
-            "writeRequestsCount" : 1
-          }
-        } ],
-        "storefileIndexSizeInMB" : 0,
-        "storefileSizeInMB" : 0,
-        "storefiles" : 1,
-        "totalNumberOfRequests" : 6,
-        "usedHeapMB" : 29,
-        "version" : 2
-      }
-    } ],
-    "ZookeeperQuorum" : "localhost:2181",
-    "DeadRegionServers" : [ ],
-    "IsActiveMaster" : true
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=CMS Perm Gen",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "CMS Perm Gen",
-    "Type" : "NON_HEAP",
-    "Valid" : true,
-    "Usage" : {
-      "committed" : 26673152,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 26492128
-    },
-    "PeakUsage" : {
-      "committed" : 26673152,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 26492128
-    },
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep" ],
-    "UsageThreshold" : 0,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdCount" : 0,
-    "UsageThresholdSupported" : true,
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdCount" : 0,
-    "CollectionUsage" : {
-      "committed" : 0,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 0
-    },
-    "CollectionUsageThresholdSupported" : true
-  }, {
-    "name" : "org.apache.ZooKeeperService:name0=StandaloneServer_port-1,name1=Connections,name2=\"0:0:0:0:0:0:0:1%0\",name3=0x1357d21e4640001",
-    "modelerType" : "org.apache.zookeeper.server.ConnectionBean",
-    "SessionId" : "0x1357d21e4640001",
-    "OutstandingRequests" : 0,
-    "SessionTimeout" : 40000,
-    "MinLatency" : 0,
-    "AvgLatency" : 1,
-    "MaxLatency" : 44,
-    "PacketsReceived" : 57,
-    "PacketsSent" : 71,
-    "LastOperation" : "GETD",
-    "LastCxid" : "0x37",
-    "LastZxid" : "0x24",
-    "LastResponseTime" : "Tue Feb 14 10:31:38 PST 2012",
-    "LastLatency" : 0,
-    "SourceIP" : "0:0:0:0:0:0:0:1%0:61259",
-    "EphemeralNodes" : [ "/hbase/rs/10.10.10.149,61258,1329244268491" ],
-    "StartedTime" : "Tue Feb 14 10:31:08 PST 2012"
-  }, {
-    "name" : "java.lang:type=OperatingSystem",
-    "modelerType" : "com.sun.management.UnixOperatingSystem",
-    "OpenFileDescriptorCount" : 212,
-    "MaxFileDescriptorCount" : 10240,
-    "CommittedVirtualMemorySize" : 4055478272,
-    "TotalSwapSpaceSize" : 4294967296,
-    "FreeSwapSpaceSize" : 896450560,
-    "ProcessCpuTime" : 6710000000,
-    "FreePhysicalMemorySize" : 1081786368,
-    "TotalPhysicalMemorySize" : 8589934592,
-    "Name" : "Mac OS X",
-    "Version" : "10.6.8",
-    "AvailableProcessors" : 8,
-    "Arch" : "x86_64",
-    "SystemLoadAverage" : 0.54150390625
-  }, {
-    "name" : "org.apache.ZooKeeperService:name0=StandaloneServer_port-1,name1=Connections,name2=\"0:0:0:0:0:0:0:1%0\",name3=0x1357d21e4640000",
-    "modelerType" : "org.apache.zookeeper.server.ConnectionBean",
-    "SessionId" : "0x1357d21e4640000",
-    "OutstandingRequests" : 0,
-    "SessionTimeout" : 40000,
-    "MinLatency" : 0,
-    "AvgLatency" : 1,
-    "MaxLatency" : 25,
-    "PacketsReceived" : 78,
-    "PacketsSent" : 98,
-    "LastOperation" : "GETC",
-    "LastCxid" : "0x4c",
-    "LastZxid" : "0x25",
-    "LastResponseTime" : "Tue Feb 14 10:31:38 PST 2012",
-    "LastLatency" : 0,
-    "SourceIP" : "0:0:0:0:0:0:0:1%0:61257",
-    "EphemeralNodes" : [ "/hbase/master" ],
-    "StartedTime" : "Tue Feb 14 10:31:08 PST 2012"
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Par Survivor Space",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Par Survivor Space",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "Usage" : {
-      "committed" : 2162688,
-      "init" : 2162688,
-      "max" : 13107200,
-      "used" : 269528
-    },
-    "PeakUsage" : {
-      "committed" : 2162688,
-      "init" : 2162688,
-      "max" : 13107200,
-      "used" : 1925296
-    },
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep", "ParNew" ],
-    "UsageThresholdSupported" : false,
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdCount" : 0,
-    "CollectionUsage" : {
-      "committed" : 2162688,
-      "init" : 2162688,
-      "max" : 13107200,
-      "used" : 269528
-    },
-    "CollectionUsageThresholdSupported" : true
-  }, {
-    "name" : "java.lang:type=MemoryManager,name=CodeCacheManager",
-    "modelerType" : "sun.management.MemoryManagerImpl",
-    "Name" : "CodeCacheManager",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Code Cache" ]
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=CMS Old Gen",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "CMS Old Gen",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "Usage" : {
-      "committed" : 65404928,
-      "init" : 65404928,
-      "max" : 917504000,
-      "used" : 28974008
-    },
-    "PeakUsage" : {
-      "committed" : 65404928,
-      "init" : 65404928,
-      "max" : 917504000,
-      "used" : 28974008
-    },
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep" ],
-    "UsageThreshold" : 0,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdCount" : 0,
-    "UsageThresholdSupported" : true,
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdCount" : 0,
-    "CollectionUsage" : {
-      "committed" : 0,
-      "init" : 65404928,
-      "max" : 917504000,
-      "used" : 0
-    },
-    "CollectionUsageThresholdSupported" : true
-  }, {
-    "name" : "hadoop:service=HBase,name=Info",
-    "modelerType" : "org.apache.hadoop.hbase.metrics.HBaseInfo$HBaseInfoMBean",
-    "revision" : "a23f8636efd6dd9d37f3a15d83f2396819509502",
-    "hdfsUser" : "hortonfo",
-    "hdfsDate" : "Fri Dec 16 20:01:27 UTC 2011",
-    "hdfsUrl" : "https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.0",
-    "date" : "Tue Feb 14 10:12:33 PST 2012",
-    "hdfsRevision" : "1214675",
-    "user" : "Hitesh",
-    "hdfsVersion" : "1.0.0",
-    "url" : "git://unknowne4ce8f149fb6/Users/Hitesh/dev/hbase",
-    "version" : "0.92.1-SNAPSHOT"
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Code Cache",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Code Cache",
-    "Type" : "NON_HEAP",
-    "Valid" : true,
-    "Usage" : {
-      "committed" : 2560000,
-      "init" : 2560000,
-      "max" : 50331648,
-      "used" : 1639360
-    },
-    "PeakUsage" : {
-      "committed" : 2560000,
-      "init" : 2560000,
-      "max" : 50331648,
-      "used" : 1656128
-    },
-    "MemoryManagerNames" : [ "CodeCacheManager" ],
-    "UsageThreshold" : 0,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdCount" : 0,
-    "UsageThresholdSupported" : true,
-    "CollectionUsage" : null,
-    "CollectionUsageThresholdSupported" : false
-  }, {
-    "name" : "java.lang:type=Runtime",
-    "modelerType" : "sun.management.RuntimeImpl",
-    "Name" : "92939@Hitesh-Shahs-MacBook-Pro.local",
-    "ClassPath" : "/Users/Hitesh/dev/hbase/bin/../conf:/Library/Java/Home/lib/tools.jar:/Users/Hitesh/.m2/repository/asm/asm/3.1/asm-3.1.jar:/Users/Hitesh/.m2/repository/com/github/stephenc/high-scale-lib/high-scale-lib/1.1.1/high-scale-lib-1.1.1.jar:/Users/Hitesh/.m2/repository/com/google/guava/guava/r09/guava-r09.jar:/Users/Hitesh/.m2/repository/com/google/protobuf/protobuf-java/2.4.0a/protobuf-java-2.4.0a.jar:/Users/Hitesh/.m2/repository/com/sun/jersey/jersey-core/1.4/jersey-core-1.4.jar:/Users/Hitesh/.m2/repository/com/sun/jersey/jersey-json/1.4/jersey-json-1.4.jar:/Users/Hitesh/.m2/repository/com/sun/jersey/jersey-server/1.4/jersey-server-1.4.jar:/Users/Hitesh/.m2/repository/com/sun/xml/bind/jaxb-impl/2.1.12/jaxb-impl-2.1.12.jar:/Users/Hitesh/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/Hitesh/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/Hitesh/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/Hitesh/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/Hitesh/.m2/repository/commons-collections/commons-collections/3.2.1/commons-collections-3.2.1.jar:/Users/Hitesh/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/Hitesh/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/Hitesh/.m2/repository/commons-el/commons-el/1.0/commons-el-1.0.jar:/Users/Hitesh/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/Hitesh/.m2/repository/commons-lang/commons-lang/2.5/commons-lang-2.5.jar:/Users/Hitesh/.m2/repository/commons-logging/commons-logging/1.1.1/commons-logging-1.1.1.jar:/Users/Hitesh/.m2/repository/commons-net/commons-net/1.4.1/commons-net-1.4.1.jar:/Users/Hitesh/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/Hitesh/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/Hitesh/.m2/repository/javax/xml/bind/jaxb-api/2.1/jaxb-api-2.1.jar:/Users/Hitesh/.m2/repository/junit/junit/4.10/junit-4.10.jar:/Users/Hitesh/.m2/repository/log4j/log4j/1.2.16/log4j-1.2.16.jar:/Users/Hitesh/.m2/repository/org/apache/avro/avro/1.5.3/avro-1.5.3.jar:/Users/Hitesh/.m2/repository/org/apache/avro/avro-ipc/1.5.3/avro-ipc-1.5.3.jar:/Users/Hitesh/.m2/repository/org/apache/commons/commons-math/2.1/commons-math-2.1.jar:/Users/Hitesh/.m2/repository/org/apache/ftpserver/ftplet-api/1.0.0/ftplet-api-1.0.0.jar:/Users/Hitesh/.m2/repository/org/apache/ftpserver/ftpserver-core/1.0.0/ftpserver-core-1.0.0.jar:/Users/Hitesh/.m2/repository/org/apache/ftpserver/ftpserver-deprecated/1.0.0-M2/ftpserver-deprecated-1.0.0-M2.jar:/Users/Hitesh/.m2/repository/org/apache/hadoop/hadoop-core/1.0.0/hadoop-core-1.0.0.jar:/Users/Hitesh/.m2/repository/org/apache/hadoop/hadoop-test/1.0.0/hadoop-test-1.0.0.jar:/Users/Hitesh/.m2/repository/org/apache/httpcomponents/httpclient/4.0.1/httpclient-4.0.1.jar:/Users/Hitesh/.m2/repository/org/apache/httpcomponents/httpcore/4.0.1/httpcore-4.0.1.jar:/Users/Hitesh/.m2/repository/org/apache/mina/mina-core/2.0.0-M5/mina-core-2.0.0-M5.jar:/Users/Hitesh/.m2/repository/org/apache/thrift/libthrift/0.7.0/libthrift-0.7.0.jar:/Users/Hitesh/.m2/repository/org/apache/velocity/velocity/1.7/velocity-1.7.jar:/Users/Hitesh/.m2/repository/org/apache/zookeeper/zookeeper/3.4.2/zookeeper-3.4.2.jar:/Users/Hitesh/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.5.5/jackson-core-asl-1.5.5.jar:/Users/Hitesh/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.5.5/jackson-jaxrs-1.5.5.jar:/Users/Hitesh/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.5.5/jackson-mapper-asl-1.5.5.jar:/Users/Hitesh/.m2/repository/org/codehaus/jackson/jackson-xc/1.5.5/jackson-xc-1.5.5.jar:/Users/Hitesh/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/Hitesh/.m2/repository/org/eclipse/jdt/core/3.1.1/core-3.1.1.jar:/Users/Hitesh/.m2/repository/org/hamcrest/hamcrest-core/1.1/hamcrest-core-1.1.jar:/Users/Hitesh/.m2/repository/org/jamon/jamon-runtime/2.3.1/jamon-runtime-2.3.1.jar:/Users/Hitesh/.m2/repository/org/jboss/netty/netty/3.2.4.Final/netty-3.2.4.Final.jar:/Users/Hitesh/.m2/repository/org/jruby/jruby-complete/1.6.5/jruby-complete-1.6.5.jar:/Users/Hitesh/.m2/repository/org/mockito/mockito-all/1.8.5/mockito-all-1.8.5.jar:/Users/Hitesh/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/Hitesh/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/Hitesh/.m2/repository/org/mortbay/jetty/jsp-2.1/6.1.14/jsp-2.1-6.1.14.jar:/Users/Hitesh/.m2/repository/org/mortbay/jetty/jsp-api-2.1/6.1.14/jsp-api-2.1-6.1.14.jar:/Users/Hitesh/.m2/repository/org/mortbay/jetty/servlet-api-2.5/6.1.14/servlet-api-2.5-6.1.14.jar:/Users/Hitesh/.m2/repository/org/slf4j/slf4j-api/1.5.8/slf4j-api-1.5.8.jar:/Users/Hitesh/.m2/repository/org/slf4j/slf4j-log4j12/1.5.8/slf4j-log4j12-1.5.8.jar:/Users/Hitesh/.m2/repository/org/xerial/snappy/snappy-java/1.0.3.2/snappy-java-1.0.3.2.jar:/Users/Hitesh/.m2/repository/stax/stax-api/1.0.1/stax-api-1.0.1.jar:/Users/Hitesh/.m2/repository/tomcat/jasper-compiler/5.5.23/jasper-compiler-5.5.23.jar:/Users/Hitesh/.m2/repository/tomcat/jasper-runtime/5.5.23/jasper-runtime-5.5.23.jar:/Users/Hitesh/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/Hitesh/dev/hbase/bin/../target/classes:/Users/Hitesh/dev/hbase/bin/../target/test-classes:/Users/Hitesh/dev/hbase/bin/../target:/Users/Hitesh/dev/hbase/bin/../lib/*.jar",
-    "StartTime" : 1329244267069,
-    "VmName" : "Java HotSpot(TM) 64-Bit Server VM",
-    "VmVendor" : "Apple Inc.",
-    "VmVersion" : "20.4-b02-402",
-    "InputArguments" : [ "-XX:OnOutOfMemoryError=kill", "-9", "%p", "-Xmx1000m", "-XX:+UseConcMarkSweepGC", "-Dhbase.log.dir=/Users/Hitesh/dev/hbase/bin/../logs", "-Dhbase.log.file=hbase-Hitesh-master-Hitesh-Shahs-MacBook-Pro.local.log", "-Dhbase.home.dir=/Users/Hitesh/dev/hbase/bin/..", "-Dhbase.id.str=Hitesh", "-Dhbase.root.logger=INFO,DRFA" ],
-    "BootClassPathSupported" : true,
-    "LibraryPath" : ".:/Library/Java/Extensions:/System/Library/Java/Extensions:/usr/lib/java",
-    "BootClassPath" : "/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/jsfd.jar:/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/classes.jar:/System/Library/Frameworks/JavaVM.framework/Frameworks/JavaRuntimeSupport.framework/Resources/Java/JavaRuntimeSupport.jar:/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/ui.jar:/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/laf.jar:/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/sunrsasign.jar:/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/jsse.jar:/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/jce.jar:/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/charsets.jar",
-    "ManagementSpecVersion" : "1.2",
-    "SpecName" : "Java Virtual Machine Specification",
-    "SpecVendor" : "Sun Microsystems Inc.",
-    "SpecVersion" : "1.0",
-    "Uptime" : 37843,
-    "SystemProperties" : [ {
-      "key" : "java.ext.dirs",
-      "value" : "/Library/Java/Extensions:/System/Library/Java/Extensions:/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Home/lib/ext"
-    }, {
-      "key" : "java.vm.specification.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "hbase.log.dir",
-      "value" : "/Users/Hitesh/dev/hbase/bin/../logs"
-    }, {
-      "key" : "user.timezone",
-      "value" : "America/Los_Angeles"
-    }, {
-      "key" : "java.vm.vendor",
-      "value" : "Apple Inc."
-    }, {
-      "key" : "awt.nativeDoubleBuffering",
-      "value" : "true"
-    }, {
-      "key" : "zookeeper.preAllocSize",
-      "value" : "100"
-    }, {
-      "key" : "user.name",
-      "value" : "Hitesh"
-    }, {
-      "key" : "java.vm.specification.name",
-      "value" : "Java Virtual Machine Specification"
-    }, {
-      "key" : "user.dir",
-      "value" : "/Users/Hitesh/dev/hbase"
-    }, {
-      "key" : "user.country",
-      "value" : "US"
-    }, {
-      "key" : "user.language",
-      "value" : "en"
-    }, {
-      "key" : "gopherProxySet",
-      "value" : "false"
-    }, {
-      "key" : "java.specification.version",
-      "value" : "1.6"
-    }, {
-      "key" : "socksNonProxyHosts",
-      "value" : "local|*.local|169.254/16|*.169.254/16"
-    }, {
-      "key" : "sun.cpu.endian",
-      "value" : "little"
-    }, {
-      "key" : "java.home",
-      "value" : "/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Home"
-    }, {
-      "key" : "hbase.log.file",
-      "value" : "hbase-Hitesh-master-Hitesh-Shahs-MacBook-Pro.local.log"
-    }, {
-      "key" : "sun.jnu.encoding",
-      "value" : "MacRoman"
-    }, {
-      "key" : "file.separator",
-      "value" : "/"
-    }, {
-      "key" : "java.vendor.url",
-      "value" : "http://www.apple.com/"
-    }, {
-      "key" : "java.awt.graphicsenv",
-      "value" : "apple.awt.CGraphicsEnvironment"
-    }, {
-      "key" : "os.arch",
-      "value" : "x86_64"
-    }, {
-      "key" : "java.io.tmpdir",
-      "value" : "/var/folders/OA/OAEY++JuFPiCaWCgLBBeVk+++TI/-Tmp-/"
-    }, {
-      "key" : "java.runtime.name",
-      "value" : "Java(TM) SE Runtime Environment"
-    }, {
-      "key" : "java.awt.printerjob",
-      "value" : "apple.awt.CPrinterJob"
-    }, {
-      "key" : "file.encoding",
-      "value" : "MacRoman"
-    }, {
-      "key" : "java.version",
-      "value" : "1.6.0_29"
-    }, {
-      "key" : "java.vendor.url.bug",
-      "value" : "http://bugreport.apple.com/"
-    }, {
-      "key" : "java.vm.specification.version",
-      "value" : "1.0"
-    }, {
-      "key" : "file.encoding.pkg",
-      "value" : "sun.io"
-    }, {
-      "key" : "sun.java.command",
-      "value" : "org.apache.hadoop.hbase.master.HMaster start"
-    }, {
-      "key" : "sun.java.launcher",
-      "value" : "SUN_STANDARD"
-    }, {
-      "key" : "path.separator",
-      "value" : ":"
-    }, {
-      "key" : "java.runtime.version",
-      "value" : "1.6.0_29-b11-402-10M3527"
-    }, {
-      "key" : "java.class.path",
-      "value" : "/Users/Hitesh/dev/hbase/bin/../conf:/Library/Java/Home/lib/tools.jar:/Users/Hitesh/.m2/repository/asm/asm/3.1/asm-3.1.jar:/Users/Hitesh/.m2/repository/com/github/stephenc/high-scale-lib/high-scale-lib/1.1.1/high-scale-lib-1.1.1.jar:/Users/Hitesh/.m2/repository/com/google/guava/guava/r09/guava-r09.jar:/Users/Hitesh/.m2/repository/com/google/protobuf/protobuf-java/2.4.0a/protobuf-java-2.4.0a.jar:/Users/Hitesh/.m2/repository/com/sun/jersey/jersey-core/1.4/jersey-core-1.4.jar:/Users/Hitesh/.m2/repository/com/sun/jersey/jersey-json/1.4/jersey-json-1.4.jar:/Users/Hitesh/.m2/repository/com/sun/jersey/jersey-server/1.4/jersey-server-1.4.jar:/Users/Hitesh/.m2/repository/com/sun/xml/bind/jaxb-impl/2.1.12/jaxb-impl-2.1.12.jar:/Users/Hitesh/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/Hitesh/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/Hitesh/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/Hitesh/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/Hitesh/.m2/repository/commons-collections/commons-collections/3.2.1/commons-collections-3.2.1.jar:/Users/Hitesh/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/Hitesh/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/Hitesh/.m2/repository/commons-el/commons-el/1.0/commons-el-1.0.jar:/Users/Hitesh/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/Hitesh/.m2/repository/commons-lang/commons-lang/2.5/commons-lang-2.5.jar:/Users/Hitesh/.m2/repository/commons-logging/commons-logging/1.1.1/commons-logging-1.1.1.jar:/Users/Hitesh/.m2/repository/commons-net/commons-net/1.4.1/commons-net-1.4.1.jar:/Users/Hitesh/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/Hitesh/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/Hitesh/.m2/repository/javax/xml/bind/jaxb-api/2.1/jaxb-api-2.1.jar:/Users/Hitesh/.m2/repository/junit/junit/4.10/junit-4.10.jar:/Users/Hitesh/.m2/repository/log4j/log4j/1.2.16/log4j-1.2.16.jar:/Users/Hitesh/.m2/repository/org/apache/avro/avro/1.5.3/avro-1.5.3.jar:/Users/Hitesh/.m2/repository/org/apache/avro/avro-ipc/1.5.3/avro-ipc-1.5.3.jar:/Users/Hitesh/.m2/repository/org/apache/commons/commons-math/2.1/commons-math-2.1.jar:/Users/Hitesh/.m2/repository/org/apache/ftpserver/ftplet-api/1.0.0/ftplet-api-1.0.0.jar:/Users/Hitesh/.m2/repository/org/apache/ftpserver/ftpserver-core/1.0.0/ftpserver-core-1.0.0.jar:/Users/Hitesh/.m2/repository/org/apache/ftpserver/ftpserver-deprecated/1.0.0-M2/ftpserver-deprecated-1.0.0-M2.jar:/Users/Hitesh/.m2/repository/org/apache/hadoop/hadoop-core/1.0.0/hadoop-core-1.0.0.jar:/Users/Hitesh/.m2/repository/org/apache/hadoop/hadoop-test/1.0.0/hadoop-test-1.0.0.jar:/Users/Hitesh/.m2/repository/org/apache/httpcomponents/httpclient/4.0.1/httpclient-4.0.1.jar:/Users/Hitesh/.m2/repository/org/apache/httpcomponents/httpcore/4.0.1/httpcore-4.0.1.jar:/Users/Hitesh/.m2/repository/org/apache/mina/mina-core/2.0.0-M5/mina-core-2.0.0-M5.jar:/Users/Hitesh/.m2/repository/org/apache/thrift/libthrift/0.7.0/libthrift-0.7.0.jar:/Users/Hitesh/.m2/repository/org/apache/velocity/velocity/1.7/velocity-1.7.jar:/Users/Hitesh/.m2/repository/org/apache/zookeeper/zookeeper/3.4.2/zookeeper-3.4.2.jar:/Users/Hitesh/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.5.5/jackson-core-asl-1.5.5.jar:/Users/Hitesh/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.5.5/jackson-jaxrs-1.5.5.jar:/Users/Hitesh/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.5.5/jackson-mapper-asl-1.5.5.jar:/Users/Hitesh/.m2/repository/org/codehaus/jackson/jackson-xc/1.5.5/jackson-xc-1.5.5.jar:/Users/Hitesh/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/Hitesh/.m2/repository/org/eclipse/jdt/core/3.1.1/core-3.1.1.jar:/Users/Hitesh/.m2/repository/org/hamcrest/hamcrest-core/1.1/hamcrest-core-1.1.jar:/Users/Hitesh/.m2/repository/org/jamon/jamon-runtime/2.3.1/jamon-runtime-2.3.1.jar:/Users/Hitesh/.m2/repository/org/jboss/netty/netty/3.2.4.Final/netty-3.2.4.Final.jar:/Users/Hitesh/.m2/repository/org/jruby/jruby-complete/1.6.5/jruby-complete-1.6.5.jar:/Users/Hitesh/.m2/repository/org/mockito/mockito-all/1.8.5/mockito-all-1.8.5.jar:/Users/Hitesh/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/Hitesh/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/Hitesh/.m2/repository/org/mortbay/jetty/jsp-2.1/6.1.14/jsp-2.1-6.1.14.jar:/Users/Hitesh/.m2/repository/org/mortbay/jetty/jsp-api-2.1/6.1.14/jsp-api-2.1-6.1.14.jar:/Users/Hitesh/.m2/repository/org/mortbay/jetty/servlet-api-2.5/6.1.14/servlet-api-2.5-6.1.14.jar:/Users/Hitesh/.m2/repository/org/slf4j/slf4j-api/1.5.8/slf4j-api-1.5.8.jar:/Users/Hitesh/.m2/repository/org/slf4j/slf4j-log4j12/1.5.8/slf4j-log4j12-1.5.8.jar:/Users/Hitesh/.m2/repository/org/xerial/snappy/snappy-java/1.0.3.2/snappy-java-1.0.3.2.jar:/Users/Hitesh/.m2/repository/stax/stax-api/1.0.1/stax-api-1.0.1.jar:/Users/Hitesh/.m2/repository/tomcat/jasper-compiler/5.5.23/jasper-compiler-5.5.23.jar:/Users/Hitesh/.m2/repository/tomcat/jasper-runtime/5.5.23/jasper-runtime-5.5.23.jar:/Users/Hitesh/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/Hitesh/dev/hbase/bin/../target/classes:/Users/Hitesh/dev/hbase/bin/../target/test-classes:/Users/Hitesh/dev/hbase/bin/../target:/Users/Hitesh/dev/hbase/bin/../lib/*.jar"
-    }, {
-      "key" : "os.name",
-      "value" : "Mac OS X"
-    }, {
-      "key" : "hbase.home.dir",
-      "value" : "/Users/Hitesh/dev/hbase/bin/.."
-    }, {
-      "key" : "http.nonProxyHosts",
-      "value" : "local|*.local|169.254/16|*.169.254/16"
-    }, {
-      "key" : "line.separator",
-      "value" : "\n"
-    }, {
-      "key" : "os.version",
-      "value" : "10.6.8"
-    }, {
-      "key" : "sun.arch.data.model",
-      "value" : "64"
-    }, {
-      "key" : "mrj.version",
-      "value" : "1060.1.6.0_29-402"
-    }, {
-      "key" : "java.class.version",
-      "value" : "50.0"
-    }, {
-      "key" : "sun.io.unicode.encoding",
-      "value" : "UnicodeLittle"
-    }, {
-      "key" : "java.vendor",
-      "value" : "Apple Inc."
-    }, {
-      "key" : "sun.boot.class.path",
-      "value" : "/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/jsfd.jar:/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/classes.jar:/System/Library/Frameworks/JavaVM.framework/Frameworks/JavaRuntimeSupport.framework/Resources/Java/JavaRuntimeSupport.jar:/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/ui.jar:/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/laf.jar:/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/sunrsasign.jar:/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/jsse.jar:/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/jce.jar:/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/charsets.jar"
-    }, {
-      "key" : "ftp.nonProxyHosts",
-      "value" : "local|*.local|169.254/16|*.169.254/16"
-    }, {
-      "key" : "hbase.root.logger",
-      "value" : "INFO,DRFA"
-    }, {
-      "key" : "java.vm.info",
-      "value" : "mixed mode"
-    }, {
-      "key" : "java.specification.name",
-      "value" : "Java Platform API Specification"
-    }, {
-      "key" : "awt.toolkit",
-      "value" : "apple.awt.CToolkit"
-    }, {
-      "key" : "java.vm.name",
-      "value" : "Java HotSpot(TM) 64-Bit Server VM"
-    }, {
-      "key" : "java.vm.version",
-      "value" : "20.4-b02-402"
-    }, {
-      "key" : "sun.boot.library.path",
-      "value" : "/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Libraries"
-    }, {
-      "key" : "java.endorsed.dirs",
-      "value" : "/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Home/lib/endorsed"
-    }, {
-      "key" : "sun.os.patch.level",
-      "value" : "unknown"
-    }, {
-      "key" : "sun.cpu.isalist",
-      "value" : ""
-    }, {
-      "key" : "hbase.id.str",
-      "value" : "Hitesh"
-    }, {
-      "key" : "user.home",
-      "value" : "/Users/Hitesh"
-    }, {
-      "key" : "mrj.build",
-      "value" : "10M3527"
-    }, {
-      "key" : "java.library.path",
-      "value" : ".:/Library/Java/Extensions:/System/Library/Java/Extensions:/usr/lib/java"
-    }, {
-      "key" : "java.specification.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "sun.management.compiler",
-      "value" : "HotSpot 64-Bit Tiered Compilers"
-    } ]
-  }, {
-    "name" : "java.lang:type=ClassLoading",
-    "modelerType" : "sun.management.ClassLoadingImpl",
-    "LoadedClassCount" : 3457,
-    "UnloadedClassCount" : 0,
-    "TotalLoadedClassCount" : 3457,
-    "Verbose" : false
-  }, {
-    "name" : "org.apache.ZooKeeperService:name0=StandaloneServer_port-1,name1=InMemoryDataTree",
-    "modelerType" : "org.apache.zookeeper.server.DataTreeBean",
-    "NodeCount" : 15,
-    "WatchCount" : 12,
-    "LastZxid" : "0x25"
-  }, {
-    "name" : "java.lang:type=Threading",
-    "modelerType" : "sun.management.ThreadImpl",
-    "ThreadAllocatedMemorySupported" : true,
-    "ThreadAllocatedMemoryEnabled" : true,
-    "ThreadContentionMonitoringSupported" : true,
-    "CurrentThreadCpuTimeSupported" : true,
-    "ObjectMonitorUsageSupported" : true,
-    "SynchronizerUsageSupported" : true,
-    "ThreadContentionMonitoringEnabled" : false,
-    "ThreadCpuTimeEnabled" : true,
-    "PeakThreadCount" : 98,
-    "DaemonThreadCount" : 82,
-    "ThreadCount" : 98,
-    "TotalStartedThreadCount" : 106,
-    "ThreadCpuTimeSupported" : true,
-    "AllThreadIds" : [ 121, 120, 119, 118, 114, 112, 110, 108, 107, 105, 103, 102, 101, 100, 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 42, 53, 82, 81, 69, 68, 66, 78, 79, 77, 75, 74, 73, 70, 67, 65, 64, 63, 62, 61, 60, 57, 41, 56, 54, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 17, 28, 30, 29, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 14, 13, 12, 11, 6, 3, 2, 1 ],
-    "CurrentThreadCpuTime" : 546723000,
-    "CurrentThreadUserTime" : 453561000
-  }, {
-    "name" : "java.util.logging:type=Logging",
-    "modelerType" : "java.util.logging.Logging",
-    "LoggerNames" : [ "sun.awt.AppContext", "javax.management", "global", "javax.management.mbeanserver", "" ]
-  }, {
-    "name" : "java.lang:type=GarbageCollector,name=ParNew",
-    "modelerType" : "sun.management.GarbageCollectorImpl",
-    "LastGcInfo" : {
-      "GcThreadCount" : 11,
-      "duration" : 3,
-      "endTime" : 34592,
-      "id" : 15,
-      "memoryUsageAfterGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 26673152,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 26483416
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 2560000,
-          "init" : 2560000,
-          "max" : 50331648,
-          "used" : 1629120
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 65404928,
-          "init" : 65404928,
-          "max" : 917504000,
-          "used" : 28974008
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 17432576,
-          "init" : 17432576,
-          "max" : 104857600,
-          "used" : 0
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 2162688,
-          "init" : 2162688,
-          "max" : 13107200,
-          "used" : 269528
-        }
-      } ],
-      "memoryUsageBeforeGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 26673152,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 26483416
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 2560000,
-          "init" : 2560000,
-          "max" : 50331648,
-          "used" : 1629120
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 65404928,
-          "init" : 65404928,
-          "max" : 917504000,
-          "used" : 26832760
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 17432576,
-          "init" : 17432576,
-          "max" : 104857600,
-          "used" : 17431032
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 2162688,
-          "init" : 2162688,
-          "max" : 13107200,
-          "used" : 240936
-        }
-      } ],
-      "startTime" : 34589
-    },
-    "CollectionCount" : 16,
-    "CollectionTime" : 53,
-    "Name" : "ParNew",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Par Eden Space", "Par Survivor Space" ]
-  }, {
-    "name" : "com.sun.management:type=HotSpotDiagnostic",
-    "modelerType" : "sun.management.HotSpotDiagnostic",
-    "DiagnosticOptions" : [ {
-      "name" : "HeapDumpBeforeFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpAfterFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpOnOutOfMemoryError",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpPath",
-      "origin" : "DEFAULT",
-      "value" : "",
-      "writeable" : true
-    }, {
-      "name" : "CMSAbortablePrecleanWaitMillis",
-      "origin" : "DEFAULT",
-      "value" : "100",
-      "writeable" : true
-    }, {
-      "name" : "CMSWaitDuration",
-      "origin" : "DEFAULT",
-      "value" : "2000",
-      "writeable" : true
-    }, {
-      "name" : "PrintGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCDetails",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCDateStamps",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCTimeStamps",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogramBeforeFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogramAfterFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogram",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintConcurrentLocks",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    } ]
-  }, {
-    "name" : "org.apache.ZooKeeperService:name0=StandaloneServer_port-1,name1=Connections,name2=\"fe80:0:0:0:0:0:0:1%1\",name3=0x1357d21e4640002",
-    "modelerType" : "org.apache.zookeeper.server.ConnectionBean",
-    "SessionId" : "0x1357d21e4640002",
-    "OutstandingRequests" : 0,
-    "SessionTimeout" : 40000,
-    "MinLatency" : 0,
-    "AvgLatency" : 0,
-    "MaxLatency" : 34,
-    "PacketsReceived" : 38,
-    "PacketsSent" : 39,
-    "LastOperation" : "GETD",
-    "LastCxid" : "0x24",
-    "LastZxid" : "0x25",
-    "LastResponseTime" : "Tue Feb 14 10:31:38 PST 2012",
-    "LastLatency" : 0,
-    "SourceIP" : "fe80:0:0:0:0:0:0:1%1:61260",
-    "EphemeralNodes" : [ ],
-    "StartedTime" : "Tue Feb 14 10:31:10 PST 2012"
-  }, {
-    "name" : "hadoop:service=HBase,name=RPCStatistics-61256",
-    "modelerType" : "org.apache.hadoop.hbase.ipc.HBaseRPCStatistics",
-    "splitRegionNumOps" : 0,
-    "splitRegionAvgTime" : 0,
-    "splitRegionMinTime" : -1,
-    "splitRegionMaxTime" : 0,
-    "enableTableNumOps" : 0,
-    "enableTableAvgTime" : 0,
-    "enableTableMinTime" : -1,
-    "enableTableMaxTime" : 0,
-    "ReceivedBytes" : 0,
-    "assignNumOps" : 0,
-    "assignAvgTime" : 0,
-    "assignMinTime" : -1,
-    "assignMaxTime" : 0,
-    "isMasterRunning.aboveOneSec.NumOps" : 0,
-    "isMasterRunning.aboveOneSec.AvgTime" : 0,
-    "isMasterRunning.aboveOneSec.MinTime" : -1,
-    "isMasterRunning.aboveOneSec.MaxTime" : 0,
-    "enableTable.aboveOneSec.NumOps" : 0,
-    "enableTable.aboveOneSec.AvgTime" : 0,
-    "enableTable.aboveOneSec.MinTime" : -1,
-    "enableTable.aboveOneSec.MaxTime" : 0,
-    "addColumn.aboveOneSec.NumOps" : 0,
-    "addColumn.aboveOneSec.AvgTime" : 0,
-    "addColumn.aboveOneSec.MinTime" : -1,
-    "addColumn.aboveOneSec.MaxTime" : 0,
-    "createTable.aboveOneSec.NumOps" : 0,
-    "createTable.aboveOneSec.AvgTime" : 0,
-    "createTable.aboveOneSec.MinTime" : -1,
-    "createTable.aboveOneSec.MaxTime" : 0,
-    "getOnlineRegionsNumOps" : 0,
-    "getOnlineRegionsAvgTime" : 0,
-    "getOnlineRegionsMinTime" : -1,
-    "getOnlineRegionsMaxTime" : 0,
-    "RpcSlowResponseNumOps" : 0,
-    "RpcSlowResponseAvgTime" : 0,
-    "RpcSlowResponseMinTime" : -1,
-    "RpcSlowResponseMaxTime" : 0,
-    "closeNumOps" : 0,
-    "closeAvgTime" : 0,
-    "closeMinTime" : -1,
-    "closeMaxTime" : 0,
-    "getProtocolVersion.aboveOneSec.NumOps" : 0,
-    "getProtocolVersion.aboveOneSec.AvgTime" : 0,
-    "getProtocolVersion.aboveOneSec.MinTime" : -1,
-    "getProtocolVersion.aboveOneSec.MaxTime" : 0,
-    "balanceNumOps" : 0,
-    "balanceAvgTime" : 0,
-    "balanceMinTime" : -1,
-    "balanceMaxTime" : 0,
-    "getProtocolSignature.aboveOneSec.NumOps" : 0,
-    "getProtocolSignature.aboveOneSec.AvgTime" : 0,
-    "getProtocolSignature.aboveOneSec.MinTime" : -1,
-    "getProtocolSignature.aboveOneSec.MaxTime" : 0,
-    "balanceSwitch.aboveOneSec.NumOps" : 0,
-    "balanceSwitch.aboveOneSec.AvgTime" : 0,
-    "balanceSwitch.aboveOneSec.MinTime" : -1,
-    "balanceSwitch.aboveOneSec.MaxTime" : 0,
-    "getClusterStatusNumOps" : 0,
-    "getClusterStatusAvgTime" : 0,
-    "getClusterStatusMinTime" : -1,
-    "getClusterStatusMaxTime" : 0,
-    "modifyTable.aboveOneSec.NumOps" : 0,
-    "modifyTable.aboveOneSec.AvgTime" : 0,
-    "modifyTable.aboveOneSec.MinTime" : -1,
-    "modifyTable.aboveOneSec.MaxTime" : 0,
-    "getAlterStatus.aboveOneSec.NumOps" : 0,
-    "getAlterStatus.aboveOneSec.AvgTime" : 0,
-    "getAlterStatus.aboveOneSec.MinTime" : -1,
-    "getAlterStatus.aboveOneSec.MaxTime" : 0,
-    "moveNumOps" : 0,
-    "moveAvgTime" : 0,
-    "moveMinTime" : -1,
-    "moveMaxTime" : 0,
-    "openRegionNumOps" : 0,
-    "openRegionAvgTime" : 0,
-    "openRegionMinTime" : -1,
-    "openRegionMaxTime" : 0,
-    "getBlockCacheColumnFamilySummariesNumOps" : 0,
-    "getBlockCacheColumnFamilySummariesAvgTime" : 0,
-    "getBlockCacheColumnFamilySummariesMinTime" : -1,
-    "getBlockCacheColumnFamilySummariesMaxTime" : 0,
-    "incrementNumOps" : 0,
-    "incrementAvgTime" : 0,
-    "incrementMinTime" : -1,
-    "incrementMaxTime" : 0,
-    "regionServerStartup.aboveOneSec.NumOps" : 0,
-    "regionServerStartup.aboveOneSec.AvgTime" : 0,
-    "regionServerStartup.aboveOneSec.MinTime" : -1,
-    "regionServerStartup.aboveOneSec.MaxTime" : 0,
-    "stopMasterNumOps" : 0,
-    "stopMasterAvgTime" : 0,
-    "stopMasterMinTime" : -1,
-    "stopMasterMaxTime" : 0,
-    "getNumOps" : 0,
-    "getAvgTime" : 0,
-    "getMinTime" : -1,
-    "getMaxTime" : 0,
-    "deleteTableNumOps" : 0,
-    "deleteTableAvgTime" : 0,
-    "deleteTableMinTime" : -1,
-    "deleteTableMaxTime" : 0,
-    "checkAndPutNumOps" : 0,
-    "checkAndPutAvgTime" : 0,
-    "checkAndPutMinTime" : -1,
-    "checkAndPutMaxTime" : 0,
-    "balance.aboveOneSec.NumOps" : 0,
-    "balance.aboveOneSec.AvgTime" : 0,
-    "balance.aboveOneSec.MinTime" : -1,
-    "balance.aboveOneSec.MaxTime" : 0,
-    "deleteColumnNumOps" : 0,
-    "deleteColumnAvgTime" : 0,
-    "deleteColumnMinTime" : -1,
-    "deleteColumnMaxTime" : 0,
-    "disableTable.aboveOneSec.NumOps" : 0,
-    "disableTable.aboveOneSec.AvgTime" : 0,
-    "disableTable.aboveOneSec.MinTime" : -1,
-    "disableTable.aboveOneSec.MaxTime" : 0,
-    "execCoprocessorNumOps" : 0,
-    "execCoprocessorAvgTime" : 0,
-    "execCoprocessorMinTime" : -1,
-    "execCoprocessorMaxTime" : 0,
-    "stopMaster.aboveOneSec.NumOps" : 0,
-    "stopMaster.aboveOneSec.AvgTime" : 0,
-    "stopMaster.aboveOneSec.MinTime" : -1,
-    "stopMaster.aboveOneSec.MaxTime" : 0,
-    "getHTableDescriptorsNumOps" : 0,
-    "getHTableDescriptorsAvgTime" : 0,
-    "getHTableDescriptorsMinTime" : -1,
-    "getHTableDescriptorsMaxTime" : 0,
-    "addColumnNumOps" : 0,
-    "addColumnAvgTime" : 0,
-    "addColumnMinTime" : -1,
-    "addColumnMaxTime" : 0,
-    "callQueueLen" : 0,
-    "multiNumOps" : 0,
-    "multiAvgTime" : 0,
-    "multiMinTime" : -1,
-    "multiMaxTime" : 0,
-    "closeRegionNumOps" : 0,
-    "closeRegionAvgTime" : 0,
-    "closeRegionMinTime" : -1,
-    "closeRegionMaxTime" : 0,
-    "replicateLogEntriesNumOps" : 0,
-    "replicateLogEntriesAvgTime" : 0,
-    "replicateLogEntriesMinTime" : -1,
-    "replicateLogEntriesMaxTime" : 0,
-    "disableTableNumOps" : 0,
-    "disableTableAvgTime" : 0,
-    "disableTableMinTime" : -1,
-    "disableTableMaxTime" : 0,
-    "rpcAuthorizationSuccesses" : 0,
-    "incrementColumnValueNumOps" : 0,
-    "incrementColumnValueAvgTime" : 0,
-    "incrementColumnValueMinTime" : -1,
-    "incrementColumnValueMaxTime" : 0,
-    "bulkLoadHFilesNumOps" : 0,
-    "bulkLoadHFilesAvgTime" : 0,
-    "bulkLoadHFilesMinTime" : -1,
-    "bulkLoadHFilesMaxTime" : 0,
-    "stopNumOps" : 0,
-    "stopAvgTime" : 0,
-    "stopMinTime" : -1,
-    "stopMaxTime" : 0,
-    "createTableNumOps" : 0,
-    "createTableAvgTime" : 0,
-    "createTableMinTime" : -1,
-    "createTableMaxTime" : 0,
-    "putNumOps" : 0,
-    "putAvgTime" : 0,
-    "putMinTime" : -1,
-    "putMaxTime" : 0,
-    "flushRegionNumOps" : 0,
-    "flushRegionAvgTime" : 0,
-    "flushRegionMinTime" : -1,
-    "flushRegionMaxTime" : 0,
-    "nextNumOps" : 0,
-    "nextAvgTime" : 0,
-    "nextMinTime" : -1,
-    "nextMaxTime" : 0,
-    "unlockRowNumOps" : 0,
-    "unlockRowAvgTime" : 0,
-    "unlockRowMinTime" : -1,
-    "unlockRowMaxTime" : 0,
-    "reportRSFatalErrorNumOps" : 0,
-    "reportRSFatalErrorAvgTime" : 0,
-    "reportRSFatalErrorMinTime" : -1,
-    "reportRSFatalErrorMaxTime" : 0,
-    "rpcAuthenticationFailures" : 0,
-    "unassignNumOps" : 0,
-    "unassignAvgTime" : 0,
-    "unassignMinTime" : -1,
-    "unassignMaxTime" : 0,
-    "getRegionInfoNumOps" : 0,
-    "getRegionInfoAvgTime" : 0,
-    "getRegionInfoMinTime" : -1,
-    "getRegionInfoMaxTime" : 0,
-    "getClusterStatus.aboveOneSec.NumOps" : 0,
-    "getClusterStatus.aboveOneSec.AvgTime" : 0,
-    "getClusterStatus.aboveOneSec.MinTime" : -1,
-    "getClusterStatus.aboveOneSec.MaxTime" : 0,
-    "openScannerNumOps" : 0,
-    "openScannerAvgTime" : 0,
-    "openScannerMinTime" : -1,
-    "openScannerMaxTime" : 0,
-    "reportRSFatalError.aboveOneSec.NumOps" : 0,
-    "reportRSFatalError.aboveOneSec.AvgTime" : 0,
-    "reportRSFatalError.aboveOneSec.MinTime" : -1,
-    "reportRSFatalError.aboveOneSec.MaxTime" : 0,
-    "getAlterStatusNumOps" : 0,
-    "getAlterStatusAvgTime" : 0,
-    "getAlterStatusMinTime" : -1,
-    "getAlterStatusMaxTime" : 0,
-    "NumOpenConnections" : 0,
-    "rpcAuthenticationSuccesses" : 0,
-    "deleteNumOps" : 0,
-    "deleteAvgTime" : 0,
-    "deleteMinTime" : -1,
-    "deleteMaxTime" : 0,
-    "RpcProcessingTimeNumOps" : 0,
-    "RpcProcessingTimeAvgTime" : 0,
-    "RpcProcessingTimeMinTime" : 0,
-    "RpcProcessingTimeMaxTime" : 7,
-    "move.aboveOneSec.NumOps" : 0,
-    "move.aboveOneSec.AvgTime" : 0,
-    "move.aboveOneSec.MinTime" : -1,
-    "move.aboveOneSec.MaxTime" : 0,
-    "modifyTableNumOps" : 0,
-    "modifyTableAvgTime" : 0,
-    "modifyTableMinTime" : -1,
-    "modifyTableMaxTime" : 0,
-    "shutdownNumOps" : 0,
-    "shutdownAvgTime" : 0,
-    "shutdownMinTime" : -1,
-    "shutdownMaxTime" : 0,
-    "openRegionsNumOps" : 0,
-    "openRegionsAvgTime" : 0,
-    "openRegionsMinTime" : -1,
-    "openRegionsMaxTime" : 0,
-    "rpcAuthorizationFailures" : 0,
-    "getClosestRowBeforeNumOps" : 0,
-    "getClosestRowBeforeAvgTime" : 0,
-    "getClosestRowBeforeMinTime" : -1,
-    "getClosestRowBeforeMaxTime" : 0,
-    "getHServerInfoNumOps" : 0,
-    "getHServerInfoAvgTime" : 0,
-    "getHServerInfoMinTime" : -1,
-    "getHServerInfoMaxTime" : 0,
-    "getProtocolSignatureNumOps" : 0,
-    "getProtocolSignatureAvgTime" : 0,
-    "getProtocolSignatureMinTime" : -1,
-    "getProtocolSignatureMaxTime" : 0,
-    "getProtocolVersionNumOps" : 0,
-    "getProtocolVersionAvgTime" : 0,
-    "getProtocolVersionMinTime" : 0,
-    "getProtocolVersionMaxTime" : 0,
-    "RpcQueueTimeNumOps" : 0,
-    "RpcQueueTimeAvgTime" : 0,
-    "RpcQueueTimeMinTime" : 0,
-    "RpcQueueTimeMaxTime" : 1,
-    "checkAndDeleteNumOps" : 0,
-    "checkAndDeleteAvgTime" : 0,
-    "checkAndDeleteMinTime" : -1,
-    "checkAndDeleteMaxTime" : 0,
-    "deleteTable.aboveOneSec.NumOps" : 0,
-    "deleteTable.aboveOneSec.AvgTime" : 0,
-    "deleteTable.aboveOneSec.MinTime" : -1,
-    "deleteTable.aboveOneSec.MaxTime" : 0,
-    "SentBytes" : 0,
-    "modifyColumnNumOps" : 0,
-    "modifyColumnAvgTime" : 0,
-    "modifyColumnMinTime" : -1,
-    "modifyColumnMaxTime" : 0,
-    "isMasterRunningNumOps" : 0,
-    "isMasterRunningAvgTime" : 0,
-    "isMasterRunningMinTime" : -1,
-    "isMasterRunningMaxTime" : 0,
-    "lockRowNumOps" : 0,
-    "lockRowAvgTime" : 0,
-    "lockRowMinTime" : -1,
-    "lockRowMaxTime" : 0,
-    "modifyColumn.aboveOneSec.NumOps" : 0,
-    "modifyColumn.aboveOneSec.AvgTime" : 0,
-    "modifyColumn.aboveOneSec.MinTime" : -1,
-    "modifyColumn.aboveOneSec.MaxTime" : 0,
-    "existsNumOps" : 0,
-    "existsAvgTime" : 0,
-    "existsMinTime" : -1,
-    "existsMaxTime" : 0,
-    "regionServerReport.aboveOneSec.NumOps" : 0,
-    "regionServerReport.aboveOneSec.AvgTime" : 0,
-    "regionServerReport.aboveOneSec.MinTime" : -1,
-    "regionServerReport.aboveOneSec.MaxTime" : 0,
-    "assign.aboveOneSec.NumOps" : 0,
-    "assign.aboveOneSec.AvgTime" : 0,
-    "assign.aboveOneSec.MinTime" : -1,
-    "assign.aboveOneSec.MaxTime" : 0,
-    "shutdown.aboveOneSec.NumOps" : 0,
-    "shutdown.aboveOneSec.AvgTime" : 0,
-    "shutdown.aboveOneSec.MinTime" : -1,
-    "shutdown.aboveOneSec.MaxTime" : 0,
-    "unassign.aboveOneSec.NumOps" : 0,
-    "unassign.aboveOneSec.AvgTime" : 0,
-    "unassign.aboveOneSec.MinTime" : -1,
-    "unassign.aboveOneSec.MaxTime" : 0,
-    "compactRegionNumOps" : 0,
-    "compactRegionAvgTime" : 0,
-    "compactRegionMinTime" : -1,
-    "compactRegionMaxTime" : 0,
-    "regionServerStartupNumOps" : 0,
-    "regionServerStartupAvgTime" : 0,
-    "regionServerStartupMinTime" : 7,
-    "regionServerStartupMaxTime" : 7,
-    "deleteColumn.aboveOneSec.NumOps" : 0,
-    "deleteColumn.aboveOneSec.AvgTime" : 0,
-    "deleteColumn.aboveOneSec.MinTime" : -1,
-    "deleteColumn.aboveOneSec.MaxTime" : 0,
-    "regionServerReportNumOps" : 0,
-    "regionServerReportAvgTime" : 0,
-    "regionServerReportMinTime" : 0,
-    "regionServerReportMaxTime" : 1,
-    "getHTableDescriptors.aboveOneSec.NumOps" : 0,
-    "getHTableDescriptors.aboveOneSec.AvgTime" : 0,
-    "getHTableDescriptors.aboveOneSec.MinTime" : -1,
-    "getHTableDescriptors.aboveOneSec.MaxTime" : 0,
-    "rollHLogWriterNumOps" : 0,
-    "rollHLogWriterAvgTime" : 0,
-    "rollHLogWriterMinTime" : -1,
-    "rollHLogWriterMaxTime" : 0,
-    "balanceSwitchNumOps" : 0,
-    "balanceSwitchAvgTime" : 0,
-    "balanceSwitchMinTime" : -1,
-    "balanceSwitchMaxTime" : 0
-  }, {
-    "name" : "hadoop:service=org.apache.hbase,name=RegionServer",
-    "modelerType" : "org.apache.hadoop.hbase.regionserver.MXBeanImpl",
-    "Coprocessors" : [ ],
-    "ServerName" : "10.10.10.149,61258,1329244268491",
-    "ZookeeperQuorum" : "localhost:2181"
-  }, {
-    "name" : "JMImplementation:type=MBeanServerDelegate",
-    "modelerType" : "javax.management.MBeanServerDelegate",
-    "MBeanServerId" : "Hitesh-Shahs-MacBook-Pro.local_1329244267655",
-    "SpecificationName" : "Java Management Extensions",
-    "SpecificationVersion" : "1.4",
-    "SpecificationVendor" : "Sun Microsystems",
-    "ImplementationName" : "JMX",
-    "ImplementationVersion" : "1.6.0_29-b11-402-10M3527",
-    "ImplementationVendor" : "Sun Microsystems"
-  } ]
-}
diff --git a/branch-1.2/contrib/addons/test/dataServices/jmx/data/sample_jobtracker_jmx.json b/branch-1.2/contrib/addons/test/dataServices/jmx/data/sample_jobtracker_jmx.json
deleted file mode 100644
index 8223911..0000000
--- a/branch-1.2/contrib/addons/test/dataServices/jmx/data/sample_jobtracker_jmx.json
+++ /dev/null
@@ -1,894 +0,0 @@
-{
-  "beans" : [ {
-    "name" : "java.lang:type=Memory",
-    "modelerType" : "sun.management.MemoryImpl",
-    "Verbose" : true,
-    "HeapMemoryUsage" : {
-      "committed" : 194183168,
-      "init" : 0,
-      "max" : 1052770304,
-      "used" : 158277552
-    },
-    "NonHeapMemoryUsage" : {
-      "committed" : 42434560,
-      "init" : 24313856,
-      "max" : 136314880,
-      "used" : 27236408
-    },
-    "ObjectPendingFinalizationCount" : 0
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=RpcActivityForPort50300",
-    "modelerType" : "RpcActivityForPort50300",
-    "tag.context" : "rpc",
-    "tag.port" : "50300",
-    "tag.hostName" : "hrt18n03.cc1.ygridcore.net",
-    "rpcAuthenticationSuccesses" : 111,
-    "rpcAuthenticationFailures" : 0,
-    "rpcAuthorizationSuccesses" : 111,
-    "rpcAuthorizationFailures" : 0,
-    "ReceivedBytes" : 49692876,
-    "SentBytes" : 19940623,
-    "RpcQueueTime_num_ops" : 163293,
-    "RpcQueueTime_avg_time" : 0.029731831738041597,
-    "RpcProcessingTime_num_ops" : 163293,
-    "RpcProcessingTime_avg_time" : 0.05778569810095986,
-    "NumOpenConnections" : 0,
-    "callQueueLen" : 0
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=JobTrackerInfo",
-    "modelerType" : "org.apache.hadoop.mapred.JobTracker",
-    "Hostname" : "hrt18n03",
-    "Version" : "1.0.0, r1224962",
-    "ConfigVersion" : "default",
-    "ThreadCount" : 75,
-    "SummaryJson" : "{\"nodes\":10,\"alive\":10,\"blacklisted\":0,\"graylisted\":0,\"slots\":{\"map_slots\":40,\"map_slots_used\":0,\"reduce_slots\":20,\"reduce_slots_used\":0},\"jobs\":105}",
-    "AliveNodesInfoJson" : "[{\"hostname\":\"hrt18n08.cc1.ygridcore.net\",\"last_seen\":1327602719636,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":0},{\"hostname\":\"hrt18n07.cc1.ygridcore.net\",\"last_seen\":1327602718331,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":1},{\"hostname\":\"hrt18n11.cc1.ygridcore.net\",\"last_seen\":1327602720791,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":0},{\"hostname\":\"hrt18n18.cc1.ygridcore.net\",\"last_seen\":1327602719210,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":0},{\"hostname\":\"hrt18n16.cc1.ygridcore.net\",\"last_seen\":1327602719346,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":0},{\"hostname\":\"hrt18n12.cc1.ygridcore.net\",\"last_seen\":1327602718700,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":0},{\"hostname\":\"hrt18n17.cc1.ygridcore.net\",\"last_seen\":1327602718528,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":1},{\"hostname\":\"hrt18n13.cc1.ygridcore.net\",\"last_seen\":1327602718761,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":1},{\"hostname\":\"hrt18n10.cc1.ygridcore.net\",\"last_seen\":1327602720027,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":0},{\"hostname\":\"hrt18n09.cc1.ygridcore.net\",\"last_seen\":1327602719018,\"health\":\"OK\",\"slots\":{\"map_slots\":4,\"map_slots_used\":0,\"reduce_slots\":2,\"reduce_slots_used\":0},\"failures\":1}]",
-    "BlacklistedNodesInfoJson" : "[]",
-    "GraylistedNodesInfoJson" : "[]",
-    "QueueInfoJson" : "{\"default\":{\"state\":\"running\",\"info\":\"Queue configuration\\nCapacity Percentage: 100.0%\\nUser Limit: 100%\\nPriority Supported: NO\\n-------------\\nMap tasks\\nCapacity: 40 slots\\nUsed capacity: 0 (0.0% of Capacity)\\nRunning tasks: 0\\n-------------\\nReduce tasks\\nCapacity: 20 slots\\nUsed capacity: 0 (0.0% of Capacity)\\nRunning tasks: 0\\n-------------\\nJob info\\nNumber of Waiting Jobs: 3\\nNumber of Initializing Jobs: 0\\nNumber of users who have submitted jobs: 0\\n\"}}"
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=MetricsSystem,sub=Control",
-    "modelerType" : "org.apache.hadoop.metrics2.impl.MetricsSystemImpl"
-  }, {
-    "name" : "java.lang:type=GarbageCollector,name=ConcurrentMarkSweep",
-    "modelerType" : "sun.management.GarbageCollectorImpl",
-    "LastGcInfo" : {
-      "GcThreadCount" : 11,
-      "duration" : 39,
-      "endTime" : 5299661,
-      "id" : 1,
-      "memoryUsageAfterGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 23527424,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 23336384
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 2818048,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 2701504
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 5439488,
-          "init" : 5439488,
-          "max" : 864026624,
-          "used" : 2798368
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 167772160,
-          "init" : 167772160,
-          "max" : 167772160,
-          "used" : 21728
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 20971520,
-          "init" : 20971520,
-          "max" : 20971520,
-          "used" : 2384848
-        }
-      } ],
-      "memoryUsageBeforeGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 23527424,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 23335616
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 2818048,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 2701504
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 5439488,
-          "init" : 5439488,
-          "max" : 864026624,
-          "used" : 2940536
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 167772160,
-          "init" : 167772160,
-          "max" : 167772160,
-          "used" : 21728
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 20971520,
-          "init" : 20971520,
-          "max" : 20971520,
-          "used" : 2384848
-        }
-      } ],
-      "startTime" : 5299622
-    },
-    "CollectionCount" : 1,
-    "CollectionTime" : 3,
-    "Name" : "ConcurrentMarkSweep",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Par Eden Space", "Par Survivor Space", "CMS Old Gen", "CMS Perm Gen" ]
-  }, {
-    "name" : "java.lang:type=Compilation",
-    "modelerType" : "sun.management.CompilationImpl",
-    "Name" : "HotSpot 64-Bit Tiered Compilers",
-    "CompilationTimeMonitoringSupported" : true,
-    "TotalCompilationTime" : 9424
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Par Eden Space",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Par Eden Space",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 167772160,
-      "init" : 167772160,
-      "max" : 167772160,
-      "used" : 0
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep", "ParNew" ],
-    "PeakUsage" : {
-      "committed" : 167772160,
-      "init" : 167772160,
-      "max" : 167772160,
-      "used" : 167772160
-    },
-    "Usage" : {
-      "committed" : 167772160,
-      "init" : 167772160,
-      "max" : 167772160,
-      "used" : 155550224
-    },
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdSupported" : false
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=RpcDetailedActivityForPort50300",
-    "modelerType" : "RpcDetailedActivityForPort50300",
-    "tag.context" : "rpcdetailed",
-    "tag.port" : "50300",
-    "tag.hostName" : "hrt18n03.cc1.ygridcore.net",
-    "getProtocolVersion_num_ops" : 185,
-    "getProtocolVersion_avg_time" : 0.0,
-    "getBuildVersion_num_ops" : 10,
-    "getBuildVersion_avg_time" : 0.0,
-    "getSystemDir_num_ops" : 10,
-    "getSystemDir_avg_time" : 0.0,
-    "heartbeat_num_ops" : 150316,
-    "heartbeat_avg_time" : 0.040461427925171115,
-    "getStagingAreaDir_num_ops" : 114,
-    "getStagingAreaDir_avg_time" : 0.587719298245614,
-    "getNewJobId_num_ops" : 114,
-    "getNewJobId_avg_time" : 0.008771929824561401,
-    "getQueueAdmins_num_ops" : 105,
-    "getQueueAdmins_avg_time" : 0.009523809523809525,
-    "submitJob_num_ops" : 105,
-    "submitJob_avg_time" : 28.5142857142857,
-    "getJobProfile_num_ops" : 4230,
-    "getJobProfile_avg_time" : 0.004255319148936176,
-    "getJobStatus_num_ops" : 6321,
-    "getJobStatus_avg_time" : 0.00854295206454675,
-    "getTaskCompletionEvents_num_ops" : 991,
-    "getTaskCompletionEvents_avg_time" : 0.014127144298688218,
-    "getJobCounters_num_ops" : 509,
-    "getJobCounters_avg_time" : 0.3438113948919449,
-    "getMapTaskReports_num_ops" : 140,
-    "getMapTaskReports_avg_time" : 0.1285714285714286,
-    "getReduceTaskReports_num_ops" : 140,
-    "getReduceTaskReports_avg_time" : 0.08571428571428573,
-    "getTaskDiagnostics_num_ops" : 3,
-    "getTaskDiagnostics_avg_time" : 0.0
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=CMS Perm Gen",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "CMS Perm Gen",
-    "Type" : "NON_HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 23527424,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 23336384
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep" ],
-    "PeakUsage" : {
-      "committed" : 38895616,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 23986152
-    },
-    "Usage" : {
-      "committed" : 38895616,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 23986152
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "java.lang:type=OperatingSystem",
-    "modelerType" : "com.sun.management.UnixOperatingSystem",
-    "MaxFileDescriptorCount" : 32768,
-    "OpenFileDescriptorCount" : 93,
-    "CommittedVirtualMemorySize" : 1666752512,
-    "FreePhysicalMemorySize" : 13394165760,
-    "FreeSwapSpaceSize" : 15999885312,
-    "ProcessCpuTime" : 41790000000,
-    "TotalPhysicalMemorySize" : 16830111744,
-    "TotalSwapSpaceSize" : 15999885312,
-    "Name" : "Linux",
-    "Version" : "2.6.18-238.1.1.el5.YAHOO.20110221",
-    "AvailableProcessors" : 8,
-    "Arch" : "amd64",
-    "SystemLoadAverage" : 0.0
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=QueueMetrics,q=default",
-    "modelerType" : "QueueMetrics,q=default",
-    "tag.context" : "mapred",
-    "tag.sessionId" : "",
-    "tag.Queue" : "default",
-    "tag.hostName" : "hrt18n03.cc1.ygridcore.net",
-    "maps_launched" : 121,
-    "maps_completed" : 117,
-    "maps_failed" : 4,
-    "reduces_launched" : 25,
-    "reduces_completed" : 25,
-    "reduces_failed" : 0,
-    "jobs_submitted" : 105,
-    "jobs_completed" : 104,
-    "waiting_maps" : 1,
-    "waiting_reduces" : 0,
-    "reserved_map_slots" : 0,
-    "reserved_reduce_slots" : 0,
-    "jobs_failed" : 1,
-    "jobs_killed" : 0,
-    "jobs_preparing" : 0,
-    "jobs_running" : 0,
-    "maps_killed" : 0,
-    "reduces_killed" : 0
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Par Survivor Space",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Par Survivor Space",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 20971520,
-      "init" : 20971520,
-      "max" : 20971520,
-      "used" : 92224
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep", "ParNew" ],
-    "PeakUsage" : {
-      "committed" : 20971520,
-      "init" : 20971520,
-      "max" : 20971520,
-      "used" : 6725680
-    },
-    "Usage" : {
-      "committed" : 20971520,
-      "init" : 20971520,
-      "max" : 20971520,
-      "used" : 92224
-    },
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdSupported" : false
-  }, {
-    "name" : "java.lang:type=MemoryManager,name=CodeCacheManager",
-    "modelerType" : "sun.management.MemoryManagerImpl",
-    "Name" : "CodeCacheManager",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Code Cache" ]
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=CMS Old Gen",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "CMS Old Gen",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 5439488,
-      "init" : 5439488,
-      "max" : 864026624,
-      "used" : 2798368
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep" ],
-    "PeakUsage" : {
-      "committed" : 5439488,
-      "init" : 5439488,
-      "max" : 864026624,
-      "used" : 3773152
-    },
-    "Usage" : {
-      "committed" : 5439488,
-      "init" : 5439488,
-      "max" : 864026624,
-      "used" : 3773152
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=JobTrackerMetrics",
-    "modelerType" : "JobTrackerMetrics",
-    "tag.context" : "mapred",
-    "tag.sessionId" : "",
-    "tag.hostName" : "hrt18n03.cc1.ygridcore.net",
-    "map_slots" : 40,
-    "reduce_slots" : 20,
-    "blacklisted_maps" : 0,
-    "blacklisted_reduces" : 0,
-    "maps_launched" : 121,
-    "maps_completed" : 117,
-    "maps_failed" : 4,
-    "reduces_launched" : 25,
-    "reduces_completed" : 25,
-    "reduces_failed" : 0,
-    "jobs_submitted" : 105,
-    "jobs_completed" : 104,
-    "waiting_maps" : 1,
-    "waiting_reduces" : 0,
-    "reserved_map_slots" : 0,
-    "reserved_reduce_slots" : 0,
-    "occupied_map_slots" : 0,
-    "occupied_reduce_slots" : 0,
-    "jobs_failed" : 1,
-    "jobs_killed" : 0,
-    "jobs_preparing" : 0,
-    "jobs_running" : 0,
-    "running_maps" : 0,
-    "running_reduces" : 0,
-    "maps_killed" : 0,
-    "reduces_killed" : 0,
-    "trackers" : 10,
-    "trackers_blacklisted" : 0,
-    "trackers_graylisted" : 0,
-    "trackers_decommissioned" : 0,
-    "heartbeats" : 150316
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Code Cache",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Code Cache",
-    "Type" : "NON_HEAP",
-    "Valid" : true,
-    "CollectionUsage" : null,
-    "MemoryManagerNames" : [ "CodeCacheManager" ],
-    "PeakUsage" : {
-      "committed" : 3538944,
-      "init" : 2555904,
-      "max" : 50331648,
-      "used" : 3454464
-    },
-    "Usage" : {
-      "committed" : 3538944,
-      "init" : 2555904,
-      "max" : 50331648,
-      "used" : 3444160
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdSupported" : false,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "java.lang:type=Runtime",
-    "modelerType" : "sun.management.RuntimeImpl",
-    "Name" : "18357@hrt18n03.cc1.ygridcore.net",
-    "ClassPath" : "/etc/hadoop:/usr/hadoop-jdk1.6.0_26/lib/tools.jar:/usr/libexec/../share/hadoop:/usr/libexec/../share/hadoop/hadoop-core-1.0.0.jar:/usr/libexec/../share/hadoop/lib/asm-3.2.jar:/usr/libexec/../share/hadoop/lib/aspectjrt-1.6.5.jar:/usr/libexec/../share/hadoop/lib/aspectjtools-1.6.5.jar:/usr/libexec/../share/hadoop/lib/commons-beanutils-1.7.0.jar:/usr/libexec/../share/hadoop/lib/commons-beanutils-core-1.8.0.jar:/usr/libexec/../share/hadoop/lib/commons-cli-1.2.jar:/usr/libexec/../share/hadoop/lib/commons-codec-1.4.jar:/usr/libexec/../share/hadoop/lib/commons-collections-3.2.1.jar:/usr/libexec/../share/hadoop/lib/commons-configuration-1.6.jar:/usr/libexec/../share/hadoop/lib/commons-daemon-1.0.1.jar:/usr/libexec/../share/hadoop/lib/commons-digester-1.8.jar:/usr/libexec/../share/hadoop/lib/commons-el-1.0.jar:/usr/libexec/../share/hadoop/lib/commons-httpclient-3.0.1.jar:/usr/libexec/../share/hadoop/lib/commons-lang-2.4.jar:/usr/libexec/../share/hadoop/lib/commons-logging-1.1.1.jar:/usr/libexec/../share/hadoop/lib/commons-logging-api-1.0.4.jar:/usr/libexec/../share/hadoop/lib/commons-math-2.1.jar:/usr/libexec/../share/hadoop/lib/commons-net-1.4.1.jar:/usr/libexec/../share/hadoop/lib/core-3.1.1.jar:/usr/libexec/../share/hadoop/lib/hadoop-capacity-scheduler-0.20.206.0-SNAPSHOT.jar:/usr/libexec/../share/hadoop/lib/hadoop-capacity-scheduler-1.0.0.jar:/usr/libexec/../share/hadoop/lib/hadoop-fairscheduler-1.0.0.jar:/usr/libexec/../share/hadoop/lib/hadoop-thriftfs-1.0.0.jar:/usr/libexec/../share/hadoop/lib/hsqldb-1.8.0.10.jar:/usr/libexec/../share/hadoop/lib/jackson-core-asl-1.0.1.jar:/usr/libexec/../share/hadoop/lib/jackson-mapper-asl-1.0.1.jar:/usr/libexec/../share/hadoop/lib/jasper-compiler-5.5.12.jar:/usr/libexec/../share/hadoop/lib/jasper-runtime-5.5.12.jar:/usr/libexec/../share/hadoop/lib/jdeb-0.8.jar:/usr/libexec/../share/hadoop/lib/jersey-core-1.8.jar:/usr/libexec/../share/hadoop/lib/jersey-json-1.8.jar:/usr/libexec/../share/hadoop/lib/jersey-server-1.8.jar:/usr/libexec/../share/hadoop/lib/jets3t-0.6.1.jar:/usr/libexec/../share/hadoop/lib/jetty-6.1.26.jar:/usr/libexec/../share/hadoop/lib/jetty-util-6.1.26.jar:/usr/libexec/../share/hadoop/lib/jsch-0.1.42.jar:/usr/libexec/../share/hadoop/lib/junit-4.5.jar:/usr/libexec/../share/hadoop/lib/kfs-0.2.2.jar:/usr/libexec/../share/hadoop/lib/log4j-1.2.15.jar:/usr/libexec/../share/hadoop/lib/mockito-all-1.8.5.jar:/usr/libexec/../share/hadoop/lib/oro-2.0.8.jar:/usr/libexec/../share/hadoop/lib/servlet-api-2.5-20081211.jar:/usr/libexec/../share/hadoop/lib/slf4j-api-1.4.3.jar:/usr/libexec/../share/hadoop/lib/slf4j-log4j12-1.4.3.jar:/usr/libexec/../share/hadoop/lib/xmlenc-0.52.jar:/usr/libexec/../share/hadoop/lib/jsp-2.1/jsp-2.1.jar:/usr/libexec/../share/hadoop/lib/jsp-2.1/jsp-api-2.1.jar",
-    "StartTime" : 1327557546375,
-    "BootClassPath" : "/usr/jdk64/jdk1.6.0_26/jre/lib/resources.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/rt.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/sunrsasign.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/jsse.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/jce.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/charsets.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/modules/jdk.boot.jar:/usr/jdk64/jdk1.6.0_26/jre/classes",
-    "LibraryPath" : "/usr/libexec/../lib",
-    "VmName" : "Java HotSpot(TM) 64-Bit Server VM",
-    "VmVendor" : "Sun Microsystems Inc.",
-    "VmVersion" : "20.1-b02",
-    "BootClassPathSupported" : true,
-    "InputArguments" : [ "-Dproc_jobtracker", "-Xmx1000m", "-Djava.net.preferIPv4Stack=true", "-Djava.net.preferIPv4Stack=true", "-Djava.net.preferIPv4Stack=true", "-Djava.net.preferIPv4Stack=true", "-XX:ParallelGCThreads=8", "-XX:+UseConcMarkSweepGC", "-XX:ErrorFile=/grid/0/var/log/hadoop/log/hrt_mr/hs_err_pid%p.log", "-XX:NewSize=200m", "-XX:MaxNewSize=200m", "-Xloggc:/grid/0/var/log/hadoop/log/hrt_mr/gc.log-201201260559", "-verbose:gc", "-XX:+PrintGCDetails", "-XX:+PrintGCTimeStamps", "-XX:+PrintGCDateStamps", "-Xmx1024m", "-Dhadoop.security.logger=INFO,DRFAS", "-Dmapred.audit.logger=INFO,MRAUDIT", "-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA", "-Dhadoop.log.dir=/grid/0/var/log/hadoop/hrt_mr", "-Dhadoop.log.file=hadoop-hrt_mr-jobtracker-hrt18n03.cc1.ygridcore.net.log", "-Dhadoop.home.dir=/usr/libexec/..", "-Dhadoop.id.str=hrt_mr", "-Dhadoop.root.logger=INFO,DRFA", "-Dhadoop.security.logger=INFO,DRFAS", "-Djava.library.path=/usr/libexec/../lib", "-Dhadoop.policy.file=hadoop-policy.xml" ],
-    "ManagementSpecVersion" : "1.2",
-    "SpecName" : "Java Virtual Machine Specification",
-    "SpecVendor" : "Sun Microsystems Inc.",
-    "SpecVersion" : "1.0",
-    "SystemProperties" : [ {
-      "key" : "java.ext.dirs",
-      "value" : "/usr/jdk64/jdk1.6.0_26/jre/lib/ext:/usr/java/packages/lib/ext"
-    }, {
-      "key" : "hadoop.home.dir",
-      "value" : "/usr/libexec/.."
-    }, {
-      "key" : "java.vm.specification.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "user.timezone",
-      "value" : "Etc/UTC"
-    }, {
-      "key" : "java.vm.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "hadoop.id.str",
-      "value" : "hrt_mr"
-    }, {
-      "key" : "user.name",
-      "value" : "hrt_mr"
-    }, {
-      "key" : "java.vm.specification.name",
-      "value" : "Java Virtual Machine Specification"
-    }, {
-      "key" : "user.dir",
-      "value" : "/usr"
-    }, {
-      "key" : "user.country",
-      "value" : "US"
-    }, {
-      "key" : "user.language",
-      "value" : "en"
-    }, {
-      "key" : "java.specification.version",
-      "value" : "1.6"
-    }, {
-      "key" : "hadoop.log.file",
-      "value" : "hadoop-hrt_mr-jobtracker-hrt18n03.cc1.ygridcore.net.log"
-    }, {
-      "key" : "hadoop.policy.file",
-      "value" : "hadoop-policy.xml"
-    }, {
-      "key" : "sun.cpu.endian",
-      "value" : "little"
-    }, {
-      "key" : "java.home",
-      "value" : "/usr/jdk64/jdk1.6.0_26/jre"
-    }, {
-      "key" : "java.net.preferIPv4Stack",
-      "value" : "true"
-    }, {
-      "key" : "sun.jnu.encoding",
-      "value" : "UTF-8"
-    }, {
-      "key" : "file.separator",
-      "value" : "/"
-    }, {
-      "key" : "java.vendor.url",
-      "value" : "http://java.sun.com/"
-    }, {
-      "key" : "hadoop.mapreduce.jobsummary.logger",
-      "value" : "INFO,JSA"
-    }, {
-      "key" : "java.awt.graphicsenv",
-      "value" : "sun.awt.X11GraphicsEnvironment"
-    }, {
-      "key" : "hadoop.log.dir",
-      "value" : "/grid/0/var/log/hadoop/hrt_mr"
-    }, {
-      "key" : "os.arch",
-      "value" : "amd64"
-    }, {
-      "key" : "proc_jobtracker",
-      "value" : ""
-    }, {
-      "key" : "java.io.tmpdir",
-      "value" : "/tmp"
-    }, {
-      "key" : "java.runtime.name",
-      "value" : "Java(TM) SE Runtime Environment"
-    }, {
-      "key" : "java.awt.printerjob",
-      "value" : "sun.print.PSPrinterJob"
-    }, {
-      "key" : "mapred.audit.logger",
-      "value" : "INFO,MRAUDIT"
-    }, {
-      "key" : "file.encoding",
-      "value" : "UTF-8"
-    }, {
-      "key" : "java.version",
-      "value" : "1.6.0_26"
-    }, {
-      "key" : "java.vendor.url.bug",
-      "value" : "http://java.sun.com/cgi-bin/bugreport.cgi"
-    }, {
-      "key" : "java.vm.specification.version",
-      "value" : "1.0"
-    }, {
-      "key" : "file.encoding.pkg",
-      "value" : "sun.io"
-    }, {
-      "key" : "sun.java.command",
-      "value" : "org.apache.hadoop.mapred.JobTracker"
-    }, {
-      "key" : "sun.java.launcher",
-      "value" : "SUN_STANDARD"
-    }, {
-      "key" : "path.separator",
-      "value" : ":"
-    }, {
-      "key" : "java.runtime.version",
-      "value" : "1.6.0_26-b03"
-    }, {
-      "key" : "java.class.path",
-      "value" : "/etc/hadoop:/usr/hadoop-jdk1.6.0_26/lib/tools.jar:/usr/libexec/../share/hadoop:/usr/libexec/../share/hadoop/hadoop-core-1.0.0.jar:/usr/libexec/../share/hadoop/lib/asm-3.2.jar:/usr/libexec/../share/hadoop/lib/aspectjrt-1.6.5.jar:/usr/libexec/../share/hadoop/lib/aspectjtools-1.6.5.jar:/usr/libexec/../share/hadoop/lib/commons-beanutils-1.7.0.jar:/usr/libexec/../share/hadoop/lib/commons-beanutils-core-1.8.0.jar:/usr/libexec/../share/hadoop/lib/commons-cli-1.2.jar:/usr/libexec/../share/hadoop/lib/commons-codec-1.4.jar:/usr/libexec/../share/hadoop/lib/commons-collections-3.2.1.jar:/usr/libexec/../share/hadoop/lib/commons-configuration-1.6.jar:/usr/libexec/../share/hadoop/lib/commons-daemon-1.0.1.jar:/usr/libexec/../share/hadoop/lib/commons-digester-1.8.jar:/usr/libexec/../share/hadoop/lib/commons-el-1.0.jar:/usr/libexec/../share/hadoop/lib/commons-httpclient-3.0.1.jar:/usr/libexec/../share/hadoop/lib/commons-lang-2.4.jar:/usr/libexec/../share/hadoop/lib/commons-logging-1.1.1.jar:/usr/libexec/../share/hadoop/lib/commons-logging-api-1.0.4.jar:/usr/libexec/../share/hadoop/lib/commons-math-2.1.jar:/usr/libexec/../share/hadoop/lib/commons-net-1.4.1.jar:/usr/libexec/../share/hadoop/lib/core-3.1.1.jar:/usr/libexec/../share/hadoop/lib/hadoop-capacity-scheduler-0.20.206.0-SNAPSHOT.jar:/usr/libexec/../share/hadoop/lib/hadoop-capacity-scheduler-1.0.0.jar:/usr/libexec/../share/hadoop/lib/hadoop-fairscheduler-1.0.0.jar:/usr/libexec/../share/hadoop/lib/hadoop-thriftfs-1.0.0.jar:/usr/libexec/../share/hadoop/lib/hsqldb-1.8.0.10.jar:/usr/libexec/../share/hadoop/lib/jackson-core-asl-1.0.1.jar:/usr/libexec/../share/hadoop/lib/jackson-mapper-asl-1.0.1.jar:/usr/libexec/../share/hadoop/lib/jasper-compiler-5.5.12.jar:/usr/libexec/../share/hadoop/lib/jasper-runtime-5.5.12.jar:/usr/libexec/../share/hadoop/lib/jdeb-0.8.jar:/usr/libexec/../share/hadoop/lib/jersey-core-1.8.jar:/usr/libexec/../share/hadoop/lib/jersey-json-1.8.jar:/usr/libexec/../share/hadoop/lib/jersey-server-1.8.jar:/usr/libexec/../share/hadoop/lib/jets3t-0.6.1.jar:/usr/libexec/../share/hadoop/lib/jetty-6.1.26.jar:/usr/libexec/../share/hadoop/lib/jetty-util-6.1.26.jar:/usr/libexec/../share/hadoop/lib/jsch-0.1.42.jar:/usr/libexec/../share/hadoop/lib/junit-4.5.jar:/usr/libexec/../share/hadoop/lib/kfs-0.2.2.jar:/usr/libexec/../share/hadoop/lib/log4j-1.2.15.jar:/usr/libexec/../share/hadoop/lib/mockito-all-1.8.5.jar:/usr/libexec/../share/hadoop/lib/oro-2.0.8.jar:/usr/libexec/../share/hadoop/lib/servlet-api-2.5-20081211.jar:/usr/libexec/../share/hadoop/lib/slf4j-api-1.4.3.jar:/usr/libexec/../share/hadoop/lib/slf4j-log4j12-1.4.3.jar:/usr/libexec/../share/hadoop/lib/xmlenc-0.52.jar:/usr/libexec/../share/hadoop/lib/jsp-2.1/jsp-2.1.jar:/usr/libexec/../share/hadoop/lib/jsp-2.1/jsp-api-2.1.jar"
-    }, {
-      "key" : "os.name",
-      "value" : "Linux"
-    }, {
-      "key" : "hadoop.security.logger",
-      "value" : "INFO,DRFAS"
-    }, {
-      "key" : "line.separator",
-      "value" : "\n"
-    }, {
-      "key" : "os.version",
-      "value" : "2.6.18-238.1.1.el5.YAHOO.20110221"
-    }, {
-      "key" : "sun.arch.data.model",
-      "value" : "64"
-    }, {
-      "key" : "java.class.version",
-      "value" : "50.0"
-    }, {
-      "key" : "sun.io.unicode.encoding",
-      "value" : "UnicodeLittle"
-    }, {
-      "key" : "java.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "sun.boot.class.path",
-      "value" : "/usr/jdk64/jdk1.6.0_26/jre/lib/resources.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/rt.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/sunrsasign.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/jsse.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/jce.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/charsets.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/modules/jdk.boot.jar:/usr/jdk64/jdk1.6.0_26/jre/classes"
-    }, {
-      "key" : "java.vm.info",
-      "value" : "mixed mode"
-    }, {
-      "key" : "java.specification.name",
-      "value" : "Java Platform API Specification"
-    }, {
-      "key" : "java.vm.name",
-      "value" : "Java HotSpot(TM) 64-Bit Server VM"
-    }, {
-      "key" : "java.vm.version",
-      "value" : "20.1-b02"
-    }, {
-      "key" : "sun.boot.library.path",
-      "value" : "/usr/jdk64/jdk1.6.0_26/jre/lib/amd64"
-    }, {
-      "key" : "hadoop.root.logger",
-      "value" : "INFO,DRFA"
-    }, {
-      "key" : "java.endorsed.dirs",
-      "value" : "/usr/jdk64/jdk1.6.0_26/jre/lib/endorsed"
-    }, {
-      "key" : "sun.os.patch.level",
-      "value" : "unknown"
-    }, {
-      "key" : "sun.cpu.isalist",
-      "value" : ""
-    }, {
-      "key" : "user.home",
-      "value" : "/homes/hrt_mr"
-    }, {
-      "key" : "java.library.path",
-      "value" : "/usr/libexec/../lib"
-    }, {
-      "key" : "java.specification.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "sun.management.compiler",
-      "value" : "HotSpot 64-Bit Tiered Compilers"
-    } ],
-    "Uptime" : 45174540
-  }, {
-    "name" : "java.lang:type=ClassLoading",
-    "modelerType" : "sun.management.ClassLoadingImpl",
-    "LoadedClassCount" : 3038,
-    "UnloadedClassCount" : 0,
-    "TotalLoadedClassCount" : 3038,
-    "Verbose" : false
-  }, {
-    "name" : "java.lang:type=Threading",
-    "modelerType" : "sun.management.ThreadImpl",
-    "ThreadAllocatedMemoryEnabled" : true,
-    "ThreadAllocatedMemorySupported" : true,
-    "ThreadCount" : 75,
-    "DaemonThreadCount" : 65,
-    "PeakThreadCount" : 80,
-    "CurrentThreadCpuTimeSupported" : true,
-    "ObjectMonitorUsageSupported" : true,
-    "SynchronizerUsageSupported" : true,
-    "ThreadContentionMonitoringEnabled" : false,
-    "ThreadContentionMonitoringSupported" : true,
-    "ThreadCpuTimeEnabled" : true,
-    "AllThreadIds" : [ 92, 91, 86, 85, 83, 82, 81, 80, 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 16, 18, 33, 13, 32, 31, 29, 30, 27, 25, 23, 22, 21, 17, 15, 12, 11, 5, 3, 2, 1 ],
-    "CurrentThreadCpuTime" : 170000000,
-    "CurrentThreadUserTime" : 170000000,
-    "TotalStartedThreadCount" : 964,
-    "ThreadCpuTimeSupported" : true
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=jvm",
-    "modelerType" : "jvm",
-    "tag.context" : "jvm",
-    "tag.processName" : "JobTracker",
-    "tag.sessionId" : "",
-    "tag.hostName" : "hrt18n03.cc1.ygridcore.net",
-    "memNonHeapUsedM" : 26.174255,
-    "memNonHeapCommittedM" : 40.46875,
-    "memHeapUsedM" : 153.11438,
-    "memHeapCommittedM" : 185.1875,
-    "gcCount" : 15,
-    "gcTimeMillis" : 76,
-    "threadsNew" : 0,
-    "threadsRunnable" : 6,
-    "threadsBlocked" : 0,
-    "threadsWaiting" : 57,
-    "threadsTimedWaiting" : 12,
-    "threadsTerminated" : 0,
-    "logFatal" : 0,
-    "logError" : 0,
-    "logWarn" : 0,
-    "logInfo" : 3
-  }, {
-    "name" : "java.util.logging:type=Logging",
-    "modelerType" : "java.util.logging.Logging",
-    "LoggerNames" : [ "sun.awt.AppContext", "javax.security.sasl", "javax.management", "javax.management.misc", "global", "javax.management.mbeanserver", "" ]
-  }, {
-    "name" : "java.lang:type=GarbageCollector,name=ParNew",
-    "modelerType" : "sun.management.GarbageCollectorImpl",
-    "LastGcInfo" : {
-      "GcThreadCount" : 11,
-      "duration" : 1,
-      "endTime" : 39278735,
-      "id" : 14,
-      "memoryUsageAfterGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 38895616,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 23469032
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 3538944,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 3435392
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 5439488,
-          "init" : 5439488,
-          "max" : 864026624,
-          "used" : 3773152
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 167772160,
-          "init" : 167772160,
-          "max" : 167772160,
-          "used" : 0
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 20971520,
-          "init" : 20971520,
-          "max" : 20971520,
-          "used" : 92224
-        }
-      } ],
-      "memoryUsageBeforeGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 38895616,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 23469032
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 3538944,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 3435392
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 5439488,
-          "init" : 5439488,
-          "max" : 864026624,
-          "used" : 3771976
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 167772160,
-          "init" : 167772160,
-          "max" : 167772160,
-          "used" : 167772160
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 20971520,
-          "init" : 20971520,
-          "max" : 20971520,
-          "used" : 218560
-        }
-      } ],
-      "startTime" : 39278734
-    },
-    "CollectionCount" : 14,
-    "CollectionTime" : 73,
-    "Name" : "ParNew",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Par Eden Space", "Par Survivor Space" ]
-  }, {
-    "name" : "com.sun.management:type=HotSpotDiagnostic",
-    "modelerType" : "sun.management.HotSpotDiagnostic",
-    "DiagnosticOptions" : [ {
-      "name" : "HeapDumpBeforeFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpAfterFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpOnOutOfMemoryError",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpPath",
-      "origin" : "DEFAULT",
-      "value" : "",
-      "writeable" : true
-    }, {
-      "name" : "CMSAbortablePrecleanWaitMillis",
-      "origin" : "DEFAULT",
-      "value" : "100",
-      "writeable" : true
-    }, {
-      "name" : "CMSWaitDuration",
-      "origin" : "DEFAULT",
-      "value" : "2000",
-      "writeable" : true
-    }, {
-      "name" : "PrintGC",
-      "origin" : "VM_CREATION",
-      "value" : "true",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCDetails",
-      "origin" : "VM_CREATION",
-      "value" : "true",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCDateStamps",
-      "origin" : "VM_CREATION",
-      "value" : "true",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCTimeStamps",
-      "origin" : "VM_CREATION",
-      "value" : "true",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogramBeforeFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogramAfterFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogram",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintConcurrentLocks",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    } ]
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=ugi",
-    "modelerType" : "ugi",
-    "tag.context" : "ugi",
-    "tag.hostName" : "hrt18n03.cc1.ygridcore.net",
-    "loginSuccess_num_ops" : 1,
-    "loginSuccess_avg_time" : 24.0,
-    "loginFailure_num_ops" : 0,
-    "loginFailure_avg_time" : 0.0
-  }, {
-    "name" : "Hadoop:service=JobTracker,name=MetricsSystem,sub=Stats",
-    "modelerType" : "MetricsSystem,sub=Stats",
-    "tag.context" : "metricssystem",
-    "tag.hostName" : "hrt18n03.cc1.ygridcore.net",
-    "num_sources" : 6,
-    "num_sinks" : 0,
-    "snapshot_num_ops" : 0,
-    "snapshot_avg_time" : 0.0,
-    "snapshot_stdev_time" : 0.0,
-    "snapshot_imin_time" : 1.7976931348623157E308,
-    "snapshot_imax_time" : 4.9E-324,
-    "snapshot_min_time" : 1.7976931348623157E308,
-    "snapshot_max_time" : 4.9E-324,
-    "publish_num_ops" : 0,
-    "publish_avg_time" : 0.0,
-    "publish_stdev_time" : 0.0,
-    "publish_imin_time" : 1.7976931348623157E308,
-    "publish_imax_time" : 4.9E-324,
-    "publish_min_time" : 1.7976931348623157E308,
-    "publish_max_time" : 4.9E-324,
-    "dropped_pub_all" : 0
-  }, {
-    "name" : "JMImplementation:type=MBeanServerDelegate",
-    "modelerType" : "javax.management.MBeanServerDelegate",
-    "MBeanServerId" : "hrt18n03.cc1.ygridcore.net_1327557546830",
-    "SpecificationName" : "Java Management Extensions",
-    "SpecificationVersion" : "1.4",
-    "SpecificationVendor" : "Sun Microsystems",
-    "ImplementationName" : "JMX",
-    "ImplementationVersion" : "1.6.0_26-b03",
-    "ImplementationVendor" : "Sun Microsystems"
-  } ]
-}
diff --git a/branch-1.2/contrib/addons/test/dataServices/jmx/data/sample_namenode_jmx.json b/branch-1.2/contrib/addons/test/dataServices/jmx/data/sample_namenode_jmx.json
deleted file mode 100644
index f9bc4c3..0000000
--- a/branch-1.2/contrib/addons/test/dataServices/jmx/data/sample_namenode_jmx.json
+++ /dev/null
@@ -1,843 +0,0 @@
-{
-  "beans" : [ {
-    "name" : "java.lang:type=Memory",
-    "modelerType" : "sun.management.MemoryImpl",
-    "Verbose" : true,
-    "HeapMemoryUsage" : {
-      "committed" : 1006632960,
-      "init" : 1073741824,
-      "max" : 1006632960,
-      "used" : 529321952
-    },
-    "NonHeapMemoryUsage" : {
-      "committed" : 28639232,
-      "init" : 24313856,
-      "max" : 136314880,
-      "used" : 28417000
-    },
-    "ObjectPendingFinalizationCount" : 0
-  }, {
-    "name" : "Hadoop:service=NameNode,name=FSNamesystemMetrics",
-    "modelerType" : "FSNamesystemMetrics",
-    "tag.context" : "dfs",
-    "tag.hostName" : "hrt18n00.cc1.ygridcore.net",
-    "FilesTotal" : 554,
-    "BlocksTotal" : 458,
-    "CapacityTotalGB" : 33841,
-    "CapacityUsedGB" : 2,
-    "CapacityRemainingGB" : 33531,
-    "TotalLoad" : 76,
-    "CorruptBlocks" : 0,
-    "ExcessBlocks" : 0,
-    "PendingDeletionBlocks" : 0,
-    "PendingReplicationBlocks" : 0,
-    "UnderReplicatedBlocks" : 0,
-    "ScheduledReplicationBlocks" : 0,
-    "MissingBlocks" : 0,
-    "BlockCapacity" : 2097152
-  }, {
-    "name" : "Hadoop:service=NameNode,name=RpcActivityForPort8020",
-    "modelerType" : "RpcActivityForPort8020",
-    "tag.context" : "rpc",
-    "tag.port" : "8020",
-    "tag.hostName" : "hrt18n00.cc1.ygridcore.net",
-    "rpcAuthenticationSuccesses" : 18763,
-    "rpcAuthenticationFailures" : 0,
-    "rpcAuthorizationSuccesses" : 18763,
-    "rpcAuthorizationFailures" : 0,
-    "ReceivedBytes" : 70468760,
-    "SentBytes" : 32861122,
-    "RpcQueueTime_num_ops" : 195286,
-    "RpcQueueTime_avg_time" : 0.02713456161732017,
-    "RpcProcessingTime_num_ops" : 195286,
-    "RpcProcessingTime_avg_time" : 0.053485656933933944,
-    "NumOpenConnections" : 0,
-    "callQueueLen" : 0
-  }, {
-    "name" : "java.lang:type=GarbageCollector,name=ConcurrentMarkSweep",
-    "modelerType" : "sun.management.GarbageCollectorImpl",
-    "LastGcInfo" : null,
-    "CollectionCount" : 0,
-    "CollectionTime" : 0,
-    "Name" : "ConcurrentMarkSweep",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Par Eden Space", "Par Survivor Space", "CMS Old Gen", "CMS Perm Gen" ]
-  }, {
-    "name" : "java.lang:type=Compilation",
-    "modelerType" : "sun.management.CompilationImpl",
-    "Name" : "HotSpot 64-Bit Tiered Compilers",
-    "CompilationTimeMonitoringSupported" : true,
-    "TotalCompilationTime" : 12145
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Par Eden Space",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Par Eden Space",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 536870912,
-      "init" : 536870912,
-      "max" : 536870912,
-      "used" : 0
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep", "ParNew" ],
-    "PeakUsage" : {
-      "committed" : 536870912,
-      "init" : 536870912,
-      "max" : 536870912,
-      "used" : 536870912
-    },
-    "Usage" : {
-      "committed" : 536870912,
-      "init" : 536870912,
-      "max" : 536870912,
-      "used" : 512406280
-    },
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdSupported" : false
-  }, {
-    "name" : "Hadoop:service=NameNode,name=jvm",
-    "modelerType" : "jvm",
-    "tag.context" : "jvm",
-    "tag.processName" : "NameNode",
-    "tag.sessionId" : null,
-    "tag.hostName" : "hrt18n00.cc1.ygridcore.net",
-    "memNonHeapUsedM" : 27.130127,
-    "memNonHeapCommittedM" : 27.3125,
-    "memHeapUsedM" : 505.29456,
-    "memHeapCommittedM" : 960.0,
-    "gcCount" : 8,
-    "gcTimeMillis" : 491,
-    "threadsNew" : 0,
-    "threadsRunnable" : 11,
-    "threadsBlocked" : 0,
-    "threadsWaiting" : 103,
-    "threadsTimedWaiting" : 9,
-    "threadsTerminated" : 0,
-    "logFatal" : 0,
-    "logError" : 0,
-    "logWarn" : 1,
-    "logInfo" : 6
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=CMS Perm Gen",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "CMS Perm Gen",
-    "Type" : "NON_HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 0,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 0
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep" ],
-    "PeakUsage" : {
-      "committed" : 23920640,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 23845424
-    },
-    "Usage" : {
-      "committed" : 23920640,
-      "init" : 21757952,
-      "max" : 85983232,
-      "used" : 23845424
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "java.lang:type=OperatingSystem",
-    "modelerType" : "com.sun.management.UnixOperatingSystem",
-    "MaxFileDescriptorCount" : 32768,
-    "OpenFileDescriptorCount" : 118,
-    "CommittedVirtualMemorySize" : 1689423872,
-    "FreePhysicalMemorySize" : 13588275200,
-    "FreeSwapSpaceSize" : 15999885312,
-    "ProcessCpuTime" : 60330000000,
-    "TotalPhysicalMemorySize" : 16830111744,
-    "TotalSwapSpaceSize" : 15999885312,
-    "Name" : "Linux",
-    "Version" : "2.6.18-238.1.1.el5.YAHOO.20110221",
-    "AvailableProcessors" : 8,
-    "Arch" : "amd64",
-    "SystemLoadAverage" : 1.0
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Par Survivor Space",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Par Survivor Space",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 67108864,
-      "init" : 67108864,
-      "max" : 67108864,
-      "used" : 3397648
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep", "ParNew" ],
-    "PeakUsage" : {
-      "committed" : 67108864,
-      "init" : 67108864,
-      "max" : 67108864,
-      "used" : 18066680
-    },
-    "Usage" : {
-      "committed" : 67108864,
-      "init" : 67108864,
-      "max" : 67108864,
-      "used" : 3397648
-    },
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdSupported" : false
-  }, {
-    "name" : "Hadoop:service=NameNode,name=NameNode",
-    "modelerType" : "NameNode",
-    "tag.context" : "dfs",
-    "tag.sessionId" : null,
-    "tag.hostName" : "hrt18n00.cc1.ygridcore.net",
-    "FilesCreated" : 2285,
-    "FilesAppended" : 0,
-    "GetBlockLocations" : 1363,
-    "FilesRenamed" : 341,
-    "GetListingOps" : 1455,
-    "CreateFileOps" : 1325,
-    "FilesDeleted" : 1732,
-    "DeleteFileOps" : 529,
-    "FileInfoOps" : 8087,
-    "AddBlockOps" : 1313,
-    "Transactions_num_ops" : 7791,
-    "Transactions_avg_time" : 0.005904248491849567,
-    "Syncs_num_ops" : 6090,
-    "Syncs_avg_time" : 0.6802955665024629,
-    "JournalTransactionsBatchedInSync" : 280,
-    "blockReport_num_ops" : 136,
-    "blockReport_avg_time" : 0.5294117647058818,
-    "SafemodeTime" : 99,
-    "fsImageLoadTime" : 88,
-    "FilesInGetListingOps" : 853
-  }, {
-    "name" : "java.lang:type=MemoryManager,name=CodeCacheManager",
-    "modelerType" : "sun.management.MemoryManagerImpl",
-    "Name" : "CodeCacheManager",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Code Cache" ]
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=CMS Old Gen",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "CMS Old Gen",
-    "Type" : "HEAP",
-    "Valid" : true,
-    "CollectionUsage" : {
-      "committed" : 0,
-      "init" : 402653184,
-      "max" : 402653184,
-      "used" : 0
-    },
-    "CollectionUsageThreshold" : 0,
-    "CollectionUsageThresholdCount" : 0,
-    "MemoryManagerNames" : [ "ConcurrentMarkSweep" ],
-    "PeakUsage" : {
-      "committed" : 402653184,
-      "init" : 402653184,
-      "max" : 402653184,
-      "used" : 13626704
-    },
-    "Usage" : {
-      "committed" : 402653184,
-      "init" : 402653184,
-      "max" : 402653184,
-      "used" : 13626704
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdExceeded" : true,
-    "CollectionUsageThresholdSupported" : true,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "Hadoop:service=NameNode,name=MetricsSystem,sub=Control",
-    "modelerType" : "org.apache.hadoop.metrics2.impl.MetricsSystemImpl"
-  }, {
-    "name" : "Hadoop:service=NameNode,name=RpcDetailedActivityForPort8020",
-    "modelerType" : "RpcDetailedActivityForPort8020",
-    "tag.context" : "rpcdetailed",
-    "tag.port" : "8020",
-    "tag.hostName" : "hrt18n00.cc1.ygridcore.net",
-    "getProtocolVersion_num_ops" : 1584,
-    "getProtocolVersion_avg_time" : 0.001893939393939395,
-    "versionRequest_num_ops" : 11,
-    "versionRequest_avg_time" : 0.0,
-    "register_num_ops" : 10,
-    "register_avg_time" : 0.39999999999999997,
-    "blocksBeingWrittenReport_num_ops" : 10,
-    "blocksBeingWrittenReport_avg_time" : 0.0,
-    "sendHeartbeat_num_ops" : 150227,
-    "sendHeartbeat_avg_time" : 0.006496834790017655,
-    "getFileInfo_num_ops" : 8087,
-    "getFileInfo_avg_time" : 0.09199950537900325,
-    "mkdirs_num_ops" : 772,
-    "mkdirs_avg_time" : 0.7383419689119176,
-    "setOwner_num_ops" : 64,
-    "setOwner_avg_time" : 0.8124999999999999,
-    "setPermission_num_ops" : 868,
-    "setPermission_avg_time" : 0.9493087557603694,
-    "blockReport_num_ops" : 136,
-    "blockReport_avg_time" : 0.5735294117647055,
-    "getListing_num_ops" : 1455,
-    "getListing_avg_time" : 0.6295532646048103,
-    "delete_num_ops" : 621,
-    "delete_avg_time" : 0.9323671497584531,
-    "create_num_ops" : 1325,
-    "create_avg_time" : 1.2196226415094333,
-    "renewLease_num_ops" : 16624,
-    "renewLease_avg_time" : 0.01335418671799813,
-    "addBlock_num_ops" : 1313,
-    "addBlock_avg_time" : 0.2779893373952778,
-    "blockReceived_num_ops" : 7931,
-    "blockReceived_avg_time" : 0.08170470306392642,
-    "complete_num_ops" : 1314,
-    "complete_avg_time" : 0.9056316590563172,
-    "setReplication_num_ops" : 575,
-    "setReplication_avg_time" : 0.7565217391304356,
-    "getDelegationToken_num_ops" : 254,
-    "getDelegationToken_avg_time" : 1.3582677165354338,
-    "getBlockLocations_num_ops" : 1363,
-    "getBlockLocations_avg_time" : 0.25165077035950123,
-    "renewDelegationToken_num_ops" : 105,
-    "renewDelegationToken_avg_time" : 0.6952380952380953,
-    "rename_num_ops" : 341,
-    "rename_avg_time" : 0.9736070381231671,
-    "cancelDelegationToken_num_ops" : 105,
-    "cancelDelegationToken_avg_time" : 0.7999999999999998,
-    "setSafeMode_num_ops" : 9,
-    "setSafeMode_avg_time" : 0.0,
-    "fsync_num_ops" : 24,
-    "fsync_avg_time" : 0.7499999999999999,
-    "getEditLogSize_num_ops" : 150,
-    "getEditLogSize_avg_time" : 0.06666666666666665,
-    "rollEditLog_num_ops" : 1,
-    "rollEditLog_avg_time" : 7.0,
-    "rollFsImage_num_ops" : 1,
-    "rollFsImage_avg_time" : 11.0,
-    "getContentSummary_num_ops" : 6,
-    "getContentSummary_avg_time" : 0.0
-  }, {
-    "name" : "Hadoop:service=NameNode,name=FSNamesystemState",
-    "modelerType" : "org.apache.hadoop.hdfs.server.namenode.FSNamesystem",
-    "CapacityTotal" : 36336891658240,
-    "CapacityUsed" : 1750237184,
-    "CapacityRemaining" : 36003449884672,
-    "TotalLoad" : 76,
-    "BlocksTotal" : 458,
-    "FilesTotal" : 554,
-    "PendingReplicationBlocks" : 0,
-    "UnderReplicatedBlocks" : 0,
-    "ScheduledReplicationBlocks" : 0,
-    "FSState" : "Operational"
-  }, {
-    "name" : "java.lang:type=MemoryPool,name=Code Cache",
-    "modelerType" : "sun.management.MemoryPoolImpl",
-    "Name" : "Code Cache",
-    "Type" : "NON_HEAP",
-    "Valid" : true,
-    "CollectionUsage" : null,
-    "MemoryManagerNames" : [ "CodeCacheManager" ],
-    "PeakUsage" : {
-      "committed" : 4718592,
-      "init" : 2555904,
-      "max" : 50331648,
-      "used" : 4612864
-    },
-    "Usage" : {
-      "committed" : 4718592,
-      "init" : 2555904,
-      "max" : 50331648,
-      "used" : 4602880
-    },
-    "UsageThreshold" : 0,
-    "UsageThresholdCount" : 0,
-    "CollectionUsageThresholdSupported" : false,
-    "UsageThresholdExceeded" : true,
-    "UsageThresholdSupported" : true
-  }, {
-    "name" : "java.lang:type=Runtime",
-    "modelerType" : "sun.management.RuntimeImpl",
-    "Name" : "29613@hrt18n00.cc1.ygridcore.net",
-    "ClassPath" : "/etc/hadoop:/usr/hadoop-jdk1.6.0_26/lib/tools.jar:/usr/libexec/../share/hadoop:/usr/libexec/../share/hadoop/hadoop-core-1.0.0.jar:/usr/libexec/../share/hadoop/lib/asm-3.2.jar:/usr/libexec/../share/hadoop/lib/aspectjrt-1.6.5.jar:/usr/libexec/../share/hadoop/lib/aspectjtools-1.6.5.jar:/usr/libexec/../share/hadoop/lib/commons-beanutils-1.7.0.jar:/usr/libexec/../share/hadoop/lib/commons-beanutils-core-1.8.0.jar:/usr/libexec/../share/hadoop/lib/commons-cli-1.2.jar:/usr/libexec/../share/hadoop/lib/commons-codec-1.4.jar:/usr/libexec/../share/hadoop/lib/commons-collections-3.2.1.jar:/usr/libexec/../share/hadoop/lib/commons-configuration-1.6.jar:/usr/libexec/../share/hadoop/lib/commons-daemon-1.0.1.jar:/usr/libexec/../share/hadoop/lib/commons-digester-1.8.jar:/usr/libexec/../share/hadoop/lib/commons-el-1.0.jar:/usr/libexec/../share/hadoop/lib/commons-httpclient-3.0.1.jar:/usr/libexec/../share/hadoop/lib/commons-lang-2.4.jar:/usr/libexec/../share/hadoop/lib/commons-logging-1.1.1.jar:/usr/libexec/../share/hadoop/lib/commons-logging-api-1.0.4.jar:/usr/libexec/../share/hadoop/lib/commons-math-2.1.jar:/usr/libexec/../share/hadoop/lib/commons-net-1.4.1.jar:/usr/libexec/../share/hadoop/lib/core-3.1.1.jar:/usr/libexec/../share/hadoop/lib/hadoop-capacity-scheduler-0.20.206.0-SNAPSHOT.jar:/usr/libexec/../share/hadoop/lib/hadoop-capacity-scheduler-1.0.0.jar:/usr/libexec/../share/hadoop/lib/hadoop-fairscheduler-1.0.0.jar:/usr/libexec/../share/hadoop/lib/hadoop-thriftfs-1.0.0.jar:/usr/libexec/../share/hadoop/lib/hsqldb-1.8.0.10.jar:/usr/libexec/../share/hadoop/lib/jackson-core-asl-1.0.1.jar:/usr/libexec/../share/hadoop/lib/jackson-mapper-asl-1.0.1.jar:/usr/libexec/../share/hadoop/lib/jasper-compiler-5.5.12.jar:/usr/libexec/../share/hadoop/lib/jasper-runtime-5.5.12.jar:/usr/libexec/../share/hadoop/lib/jdeb-0.8.jar:/usr/libexec/../share/hadoop/lib/jersey-core-1.8.jar:/usr/libexec/../share/hadoop/lib/jersey-json-1.8.jar:/usr/libexec/../share/hadoop/lib/jersey-server-1.8.jar:/usr/libexec/../share/hadoop/lib/jets3t-0.6.1.jar:/usr/libexec/../share/hadoop/lib/jetty-6.1.26.jar:/usr/libexec/../share/hadoop/lib/jetty-util-6.1.26.jar:/usr/libexec/../share/hadoop/lib/jsch-0.1.42.jar:/usr/libexec/../share/hadoop/lib/junit-4.5.jar:/usr/libexec/../share/hadoop/lib/kfs-0.2.2.jar:/usr/libexec/../share/hadoop/lib/log4j-1.2.15.jar:/usr/libexec/../share/hadoop/lib/mockito-all-1.8.5.jar:/usr/libexec/../share/hadoop/lib/oro-2.0.8.jar:/usr/libexec/../share/hadoop/lib/servlet-api-2.5-20081211.jar:/usr/libexec/../share/hadoop/lib/slf4j-api-1.4.3.jar:/usr/libexec/../share/hadoop/lib/slf4j-log4j12-1.4.3.jar:/usr/libexec/../share/hadoop/lib/xmlenc-0.52.jar:/usr/libexec/../share/hadoop/lib/jsp-2.1/jsp-2.1.jar:/usr/libexec/../share/hadoop/lib/jsp-2.1/jsp-api-2.1.jar",
-    "StartTime" : 1327557521840,
-    "BootClassPath" : "/usr/jdk64/jdk1.6.0_26/jre/lib/resources.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/rt.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/sunrsasign.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/jsse.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/jce.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/charsets.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/modules/jdk.boot.jar:/usr/jdk64/jdk1.6.0_26/jre/classes",
-    "LibraryPath" : "/usr/libexec/../lib",
-    "VmName" : "Java HotSpot(TM) 64-Bit Server VM",
-    "VmVendor" : "Sun Microsystems Inc.",
-    "VmVersion" : "20.1-b02",
-    "BootClassPathSupported" : true,
-    "InputArguments" : [ "-Dproc_namenode", "-Xmx1000m", "-Djava.net.preferIPv4Stack=true", "-Djava.net.preferIPv4Stack=true", "-Djava.net.preferIPv4Stack=true", "-Djava.net.preferIPv4Stack=true", "-XX:ParallelGCThreads=8", "-XX:+UseConcMarkSweepGC", "-XX:ErrorFile=/grid/0/var/log/hadoop/log/hrt_hdfs/hs_err_pid%p.log", "-XX:NewSize=640m", "-XX:MaxNewSize=640m", "-Xloggc:/grid/0/var/log/hadoop/log/hrt_hdfs/gc.log-201201260558", "-verbose:gc", "-XX:+PrintGCDetails", "-XX:+PrintGCTimeStamps", "-XX:+PrintGCDateStamps", "-Xms1G", "-Xmx1G", "-Dhadoop.security.logger=INFO,DRFAS", "-Dhdfs.audit.logger=INFO,DRFAAUDIT", "-XX:ParallelGCThreads=8", "-XX:+UseConcMarkSweepGC", "-XX:ErrorFile=/grid/0/var/log/hadoop/log/hrt_hdfs/hs_err_pid%p.log", "-XX:NewSize=640m", "-XX:MaxNewSize=640m", "-Xloggc:/grid/0/var/log/hadoop/log/hrt_hdfs/gc.log-201201260558", "-verbose:gc", "-XX:+PrintGCDetails", "-XX:+PrintGCTimeStamps", "-XX:+PrintGCDateStamps", "-Xms1G", "-Xmx1G", "-Dhadoop.security.logger=INFO,DRFAS", "-Dhdfs.audit.logger=INFO,DRFAAUDIT", "-XX:ParallelGCThreads=8", "-XX:+UseConcMarkSweepGC", "-XX:ErrorFile=/grid/0/var/log/hadoop/log/hrt_hdfs/hs_err_pid%p.log", "-XX:NewSize=640m", "-XX:MaxNewSize=640m", "-Xloggc:/grid/0/var/log/hadoop/log/hrt_hdfs/gc.log-201201260558", "-verbose:gc", "-XX:+PrintGCDetails", "-XX:+PrintGCTimeStamps", "-XX:+PrintGCDateStamps", "-Xms1G", "-Xmx1G", "-Dhadoop.security.logger=INFO,DRFAS", "-Dhdfs.audit.logger=INFO,DRFAAUDIT", "-XX:ParallelGCThreads=8", "-XX:+UseConcMarkSweepGC", "-XX:ErrorFile=/grid/0/var/log/hadoop/log/hrt_hdfs/hs_err_pid%p.log", "-XX:NewSize=640m", "-XX:MaxNewSize=640m", "-Xloggc:/grid/0/var/log/hadoop/log/hrt_hdfs/gc.log-201201260558", "-verbose:gc", "-XX:+PrintGCDetails", "-XX:+PrintGCTimeStamps", "-XX:+PrintGCDateStamps", "-Xms1G", "-Xmx1G", "-Dhadoop.security.logger=INFO,DRFAS", "-Dhdfs.audit.logger=INFO,DRFAAUDIT", "-Dhadoop.log.dir=/grid/0/var/log/hadoop/hrt_hdfs", "-Dhadoop.log.file=hadoop-hrt_hdfs-namenode-hrt18n00.cc1.ygridcore.net.log", "-Dhadoop.home.dir=/usr/libexec/..", "-Dhadoop.id.str=hrt_hdfs", "-Dhadoop.root.logger=INFO,DRFA", "-Dhadoop.security.logger=INFO,DRFAS", "-Djava.library.path=/usr/libexec/../lib", "-Dhadoop.policy.file=hadoop-policy.xml" ],
-    "ManagementSpecVersion" : "1.2",
-    "SpecName" : "Java Virtual Machine Specification",
-    "SpecVendor" : "Sun Microsystems Inc.",
-    "SpecVersion" : "1.0",
-    "SystemProperties" : [ {
-      "key" : "java.ext.dirs",
-      "value" : "/usr/jdk64/jdk1.6.0_26/jre/lib/ext:/usr/java/packages/lib/ext"
-    }, {
-      "key" : "hadoop.home.dir",
-      "value" : "/usr/libexec/.."
-    }, {
-      "key" : "java.vm.specification.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "user.timezone",
-      "value" : "Zulu"
-    }, {
-      "key" : "hadoop.id.str",
-      "value" : "hrt_hdfs"
-    }, {
-      "key" : "java.vm.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "user.name",
-      "value" : "hrt_hdfs"
-    }, {
-      "key" : "java.vm.specification.name",
-      "value" : "Java Virtual Machine Specification"
-    }, {
-      "key" : "user.dir",
-      "value" : "/usr"
-    }, {
-      "key" : "user.country",
-      "value" : "US"
-    }, {
-      "key" : "user.language",
-      "value" : "en"
-    }, {
-      "key" : "java.specification.version",
-      "value" : "1.6"
-    }, {
-      "key" : "hadoop.log.file",
-      "value" : "hadoop-hrt_hdfs-namenode-hrt18n00.cc1.ygridcore.net.log"
-    }, {
-      "key" : "hadoop.policy.file",
-      "value" : "hadoop-policy.xml"
-    }, {
-      "key" : "sun.cpu.endian",
-      "value" : "little"
-    }, {
-      "key" : "java.home",
-      "value" : "/usr/jdk64/jdk1.6.0_26/jre"
-    }, {
-      "key" : "java.net.preferIPv4Stack",
-      "value" : "true"
-    }, {
-      "key" : "sun.jnu.encoding",
-      "value" : "UTF-8"
-    }, {
-      "key" : "file.separator",
-      "value" : "/"
-    }, {
-      "key" : "java.vendor.url",
-      "value" : "http://java.sun.com/"
-    }, {
-      "key" : "java.awt.graphicsenv",
-      "value" : "sun.awt.X11GraphicsEnvironment"
-    }, {
-      "key" : "hadoop.log.dir",
-      "value" : "/grid/0/var/log/hadoop/hrt_hdfs"
-    }, {
-      "key" : "os.arch",
-      "value" : "amd64"
-    }, {
-      "key" : "jetty.ssl.password",
-      "value" : "-7485543109328883355"
-    }, {
-      "key" : "java.io.tmpdir",
-      "value" : "/tmp"
-    }, {
-      "key" : "java.runtime.name",
-      "value" : "Java(TM) SE Runtime Environment"
-    }, {
-      "key" : "java.awt.printerjob",
-      "value" : "sun.print.PSPrinterJob"
-    }, {
-      "key" : "file.encoding",
-      "value" : "UTF-8"
-    }, {
-      "key" : "java.version",
-      "value" : "1.6.0_26"
-    }, {
-      "key" : "java.vendor.url.bug",
-      "value" : "http://java.sun.com/cgi-bin/bugreport.cgi"
-    }, {
-      "key" : "java.vm.specification.version",
-      "value" : "1.0"
-    }, {
-      "key" : "file.encoding.pkg",
-      "value" : "sun.io"
-    }, {
-      "key" : "sun.java.command",
-      "value" : "org.apache.hadoop.hdfs.server.namenode.NameNode"
-    }, {
-      "key" : "sun.java.launcher",
-      "value" : "SUN_STANDARD"
-    }, {
-      "key" : "path.separator",
-      "value" : ":"
-    }, {
-      "key" : "java.runtime.version",
-      "value" : "1.6.0_26-b03"
-    }, {
-      "key" : "java.class.path",
-      "value" : "/etc/hadoop:/usr/hadoop-jdk1.6.0_26/lib/tools.jar:/usr/libexec/../share/hadoop:/usr/libexec/../share/hadoop/hadoop-core-1.0.0.jar:/usr/libexec/../share/hadoop/lib/asm-3.2.jar:/usr/libexec/../share/hadoop/lib/aspectjrt-1.6.5.jar:/usr/libexec/../share/hadoop/lib/aspectjtools-1.6.5.jar:/usr/libexec/../share/hadoop/lib/commons-beanutils-1.7.0.jar:/usr/libexec/../share/hadoop/lib/commons-beanutils-core-1.8.0.jar:/usr/libexec/../share/hadoop/lib/commons-cli-1.2.jar:/usr/libexec/../share/hadoop/lib/commons-codec-1.4.jar:/usr/libexec/../share/hadoop/lib/commons-collections-3.2.1.jar:/usr/libexec/../share/hadoop/lib/commons-configuration-1.6.jar:/usr/libexec/../share/hadoop/lib/commons-daemon-1.0.1.jar:/usr/libexec/../share/hadoop/lib/commons-digester-1.8.jar:/usr/libexec/../share/hadoop/lib/commons-el-1.0.jar:/usr/libexec/../share/hadoop/lib/commons-httpclient-3.0.1.jar:/usr/libexec/../share/hadoop/lib/commons-lang-2.4.jar:/usr/libexec/../share/hadoop/lib/commons-logging-1.1.1.jar:/usr/libexec/../share/hadoop/lib/commons-logging-api-1.0.4.jar:/usr/libexec/../share/hadoop/lib/commons-math-2.1.jar:/usr/libexec/../share/hadoop/lib/commons-net-1.4.1.jar:/usr/libexec/../share/hadoop/lib/core-3.1.1.jar:/usr/libexec/../share/hadoop/lib/hadoop-capacity-scheduler-0.20.206.0-SNAPSHOT.jar:/usr/libexec/../share/hadoop/lib/hadoop-capacity-scheduler-1.0.0.jar:/usr/libexec/../share/hadoop/lib/hadoop-fairscheduler-1.0.0.jar:/usr/libexec/../share/hadoop/lib/hadoop-thriftfs-1.0.0.jar:/usr/libexec/../share/hadoop/lib/hsqldb-1.8.0.10.jar:/usr/libexec/../share/hadoop/lib/jackson-core-asl-1.0.1.jar:/usr/libexec/../share/hadoop/lib/jackson-mapper-asl-1.0.1.jar:/usr/libexec/../share/hadoop/lib/jasper-compiler-5.5.12.jar:/usr/libexec/../share/hadoop/lib/jasper-runtime-5.5.12.jar:/usr/libexec/../share/hadoop/lib/jdeb-0.8.jar:/usr/libexec/../share/hadoop/lib/jersey-core-1.8.jar:/usr/libexec/../share/hadoop/lib/jersey-json-1.8.jar:/usr/libexec/../share/hadoop/lib/jersey-server-1.8.jar:/usr/libexec/../share/hadoop/lib/jets3t-0.6.1.jar:/usr/libexec/../share/hadoop/lib/jetty-6.1.26.jar:/usr/libexec/../share/hadoop/lib/jetty-util-6.1.26.jar:/usr/libexec/../share/hadoop/lib/jsch-0.1.42.jar:/usr/libexec/../share/hadoop/lib/junit-4.5.jar:/usr/libexec/../share/hadoop/lib/kfs-0.2.2.jar:/usr/libexec/../share/hadoop/lib/log4j-1.2.15.jar:/usr/libexec/../share/hadoop/lib/mockito-all-1.8.5.jar:/usr/libexec/../share/hadoop/lib/oro-2.0.8.jar:/usr/libexec/../share/hadoop/lib/servlet-api-2.5-20081211.jar:/usr/libexec/../share/hadoop/lib/slf4j-api-1.4.3.jar:/usr/libexec/../share/hadoop/lib/slf4j-log4j12-1.4.3.jar:/usr/libexec/../share/hadoop/lib/xmlenc-0.52.jar:/usr/libexec/../share/hadoop/lib/jsp-2.1/jsp-2.1.jar:/usr/libexec/../share/hadoop/lib/jsp-2.1/jsp-api-2.1.jar"
-    }, {
-      "key" : "os.name",
-      "value" : "Linux"
-    }, {
-      "key" : "hadoop.security.logger",
-      "value" : "INFO,DRFAS"
-    }, {
-      "key" : "https.cipherSuites",
-      "value" : "TLS_KRB5_WITH_3DES_EDE_CBC_SHA"
-    }, {
-      "key" : "line.separator",
-      "value" : "\n"
-    }, {
-      "key" : "proc_namenode",
-      "value" : ""
-    }, {
-      "key" : "os.version",
-      "value" : "2.6.18-238.1.1.el5.YAHOO.20110221"
-    }, {
-      "key" : "sun.arch.data.model",
-      "value" : "64"
-    }, {
-      "key" : "java.class.version",
-      "value" : "50.0"
-    }, {
-      "key" : "sun.io.unicode.encoding",
-      "value" : "UnicodeLittle"
-    }, {
-      "key" : "java.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "sun.boot.class.path",
-      "value" : "/usr/jdk64/jdk1.6.0_26/jre/lib/resources.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/rt.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/sunrsasign.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/jsse.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/jce.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/charsets.jar:/usr/jdk64/jdk1.6.0_26/jre/lib/modules/jdk.boot.jar:/usr/jdk64/jdk1.6.0_26/jre/classes"
-    }, {
-      "key" : "java.vm.info",
-      "value" : "mixed mode"
-    }, {
-      "key" : "java.specification.name",
-      "value" : "Java Platform API Specification"
-    }, {
-      "key" : "jetty.ssl.keypassword",
-      "value" : "-5213096557016765376"
-    }, {
-      "key" : "java.vm.name",
-      "value" : "Java HotSpot(TM) 64-Bit Server VM"
-    }, {
-      "key" : "java.vm.version",
-      "value" : "20.1-b02"
-    }, {
-      "key" : "sun.boot.library.path",
-      "value" : "/usr/jdk64/jdk1.6.0_26/jre/lib/amd64"
-    }, {
-      "key" : "hadoop.root.logger",
-      "value" : "INFO,DRFA"
-    }, {
-      "key" : "java.endorsed.dirs",
-      "value" : "/usr/jdk64/jdk1.6.0_26/jre/lib/endorsed"
-    }, {
-      "key" : "sun.os.patch.level",
-      "value" : "unknown"
-    }, {
-      "key" : "sun.cpu.isalist",
-      "value" : ""
-    }, {
-      "key" : "hdfs.audit.logger",
-      "value" : "INFO,DRFAAUDIT"
-    }, {
-      "key" : "user.home",
-      "value" : "/homes/hrt_hdfs"
-    }, {
-      "key" : "java.library.path",
-      "value" : "/usr/libexec/../lib"
-    }, {
-      "key" : "java.specification.vendor",
-      "value" : "Sun Microsystems Inc."
-    }, {
-      "key" : "sun.management.compiler",
-      "value" : "HotSpot 64-Bit Tiered Compilers"
-    } ],
-    "Uptime" : 45108092
-  }, {
-    "name" : "java.lang:type=ClassLoading",
-    "modelerType" : "sun.management.ClassLoadingImpl",
-    "LoadedClassCount" : 2975,
-    "UnloadedClassCount" : 0,
-    "TotalLoadedClassCount" : 2975,
-    "Verbose" : false
-  }, {
-    "name" : "java.lang:type=Threading",
-    "modelerType" : "sun.management.ThreadImpl",
-    "ThreadAllocatedMemoryEnabled" : true,
-    "ThreadAllocatedMemorySupported" : true,
-    "DaemonThreadCount" : 114,
-    "PeakThreadCount" : 126,
-    "CurrentThreadCpuTimeSupported" : true,
-    "ObjectMonitorUsageSupported" : true,
-    "SynchronizerUsageSupported" : true,
-    "ThreadContentionMonitoringEnabled" : false,
-    "ThreadContentionMonitoringSupported" : true,
-    "ThreadCpuTimeEnabled" : true,
-    "AllThreadIds" : [ 541, 132, 130, 129, 128, 127, 126, 125, 124, 123, 122, 121, 120, 119, 118, 117, 116, 115, 114, 113, 112, 111, 110, 109, 108, 107, 106, 105, 104, 103, 102, 101, 100, 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 20, 26, 30, 29, 28, 25, 24, 23, 22, 21, 19, 17, 16, 15, 14, 13, 11, 5, 3, 2, 1 ],
-    "CurrentThreadCpuTime" : 60000000,
-    "CurrentThreadUserTime" : 60000000,
-    "ThreadCount" : 123,
-    "TotalStartedThreadCount" : 532,
-    "ThreadCpuTimeSupported" : true
-  }, {
-    "name" : "java.util.logging:type=Logging",
-    "modelerType" : "java.util.logging.Logging",
-    "LoggerNames" : [ "sun.awt.AppContext", "javax.security.sasl", "javax.management", "sun.net.www.protocol.http.HttpURLConnection", "javax.management.misc", "global", "javax.management.mbeanserver", "" ]
-  }, {
-    "name" : "java.lang:type=GarbageCollector,name=ParNew",
-    "modelerType" : "sun.management.GarbageCollectorImpl",
-    "LastGcInfo" : {
-      "GcThreadCount" : 11,
-      "duration" : 19,
-      "endTime" : 37699064,
-      "id" : 8,
-      "memoryUsageAfterGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 23330816,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 23176808
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 4653056,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 4536064
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 402653184,
-          "init" : 402653184,
-          "max" : 402653184,
-          "used" : 13626704
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 536870912,
-          "init" : 536870912,
-          "max" : 536870912,
-          "used" : 0
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 67108864,
-          "init" : 67108864,
-          "max" : 67108864,
-          "used" : 3397648
-        }
-      } ],
-      "memoryUsageBeforeGc" : [ {
-        "key" : "CMS Perm Gen",
-        "value" : {
-          "committed" : 23330816,
-          "init" : 21757952,
-          "max" : 85983232,
-          "used" : 23176808
-        }
-      }, {
-        "key" : "Code Cache",
-        "value" : {
-          "committed" : 4653056,
-          "init" : 2555904,
-          "max" : 50331648,
-          "used" : 4536064
-        }
-      }, {
-        "key" : "CMS Old Gen",
-        "value" : {
-          "committed" : 402653184,
-          "init" : 402653184,
-          "max" : 402653184,
-          "used" : 13512088
-        }
-      }, {
-        "key" : "Par Eden Space",
-        "value" : {
-          "committed" : 536870912,
-          "init" : 536870912,
-          "max" : 536870912,
-          "used" : 536870912
-        }
-      }, {
-        "key" : "Par Survivor Space",
-        "value" : {
-          "committed" : 67108864,
-          "init" : 67108864,
-          "max" : 67108864,
-          "used" : 3370224
-        }
-      } ],
-      "startTime" : 37699045
-    },
-    "CollectionCount" : 8,
-    "CollectionTime" : 491,
-    "Name" : "ParNew",
-    "Valid" : true,
-    "MemoryPoolNames" : [ "Par Eden Space", "Par Survivor Space" ]
-  }, {
-    "name" : "Hadoop:service=NameNode,name=MetricsSystem,sub=Stats",
-    "modelerType" : "MetricsSystem,sub=Stats",
-    "tag.context" : "metricssystem",
-    "tag.hostName" : "hrt18n00.cc1.ygridcore.net",
-    "num_sources" : 6,
-    "num_sinks" : 0,
-    "snapshot_num_ops" : 0,
-    "snapshot_avg_time" : 0.0,
-    "snapshot_stdev_time" : 0.0,
-    "snapshot_imin_time" : 1.7976931348623157E308,
-    "snapshot_imax_time" : 4.9E-324,
-    "snapshot_min_time" : 1.7976931348623157E308,
-    "snapshot_max_time" : 4.9E-324,
-    "publish_num_ops" : 0,
-    "publish_avg_time" : 0.0,
-    "publish_stdev_time" : 0.0,
-    "publish_imin_time" : 1.7976931348623157E308,
-    "publish_imax_time" : 4.9E-324,
-    "publish_min_time" : 1.7976931348623157E308,
-    "publish_max_time" : 4.9E-324,
-    "dropped_pub_all" : 0
-  }, {
-    "name" : "Hadoop:service=NameNode,name=NameNodeInfo",
-    "modelerType" : "org.apache.hadoop.hdfs.server.namenode.FSNamesystem",
-    "Threads" : 123,
-    "HostName" : "hrt18n00.cc1.ygridcore.net",
-    "Total" : 36336891658240,
-    "Version" : "1.0.0",
-    "UpgradeFinalized" : true,
-    "Used" : 1750237184,
-    "Free" : 36003449884672,
-    "Safemode" : "Safemode is ON",
-    "NonDfsUsedSpace" : 331691536384,
-    "PercentUsed" : 0.0048166947,
-    "PercentRemaining" : 99.08236,
-    "TotalBlocks" : 458,
-    "TotalFiles" : 554,
-    "LiveNodes" : "{\"hrt18n08.cc1.ygridcore.net\":{\"usedSpace\":163127296,\"lastContact\":0},\"hrt18n07.cc1.ygridcore.net\":{\"usedSpace\":195764224,\"lastContact\":1},\"hrt18n12.cc1.ygridcore.net\":{\"usedSpace\":164651008,\"lastContact\":2},\"hrt18n16.cc1.ygridcore.net\":{\"usedSpace\":163586048,\"lastContact\":2},\"hrt18n18.cc1.ygridcore.net\":{\"usedSpace\":193626112,\"lastContact\":1},\"hrt18n17.cc1.ygridcore.net\":{\"usedSpace\":154574848,\"lastContact\":0},\"hrt18n11.cc1.ygridcore.net\":{\"usedSpace\":222642176,\"lastContact\":0},\"hrt18n13.cc1.ygridcore.net\":{\"usedSpace\":161054720,\"lastContact\":1},\"hrt18n10.cc1.ygridcore.net\":{\"usedSpace\":172269568,\"lastContact\":2},\"hrt18n09.cc1.ygridcore.net\":{\"usedSpace\":158941184,\"lastContact\":2}}",
-    "DeadNodes" : "{\"hrt18n14.cc1.ygridcore.net\":{\"lastContact\":1327602629}}",
-    "DecomNodes" : "{}"
-  }, {
-    "name" : "com.sun.management:type=HotSpotDiagnostic",
-    "modelerType" : "sun.management.HotSpotDiagnostic",
-    "DiagnosticOptions" : [ {
-      "name" : "HeapDumpBeforeFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpAfterFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpOnOutOfMemoryError",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "HeapDumpPath",
-      "origin" : "DEFAULT",
-      "value" : "",
-      "writeable" : true
-    }, {
-      "name" : "CMSAbortablePrecleanWaitMillis",
-      "origin" : "DEFAULT",
-      "value" : "100",
-      "writeable" : true
-    }, {
-      "name" : "CMSWaitDuration",
-      "origin" : "DEFAULT",
-      "value" : "2000",
-      "writeable" : true
-    }, {
-      "name" : "PrintGC",
-      "origin" : "VM_CREATION",
-      "value" : "true",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCDetails",
-      "origin" : "VM_CREATION",
-      "value" : "true",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCDateStamps",
-      "origin" : "VM_CREATION",
-      "value" : "true",
-      "writeable" : true
-    }, {
-      "name" : "PrintGCTimeStamps",
-      "origin" : "VM_CREATION",
-      "value" : "true",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogramBeforeFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogramAfterFullGC",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintClassHistogram",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    }, {
-      "name" : "PrintConcurrentLocks",
-      "origin" : "DEFAULT",
-      "value" : "false",
-      "writeable" : true
-    } ]
-  }, {
-    "name" : "Hadoop:service=NameNode,name=ugi",
-    "modelerType" : "ugi",
-    "tag.context" : "ugi",
-    "tag.hostName" : "hrt18n00.cc1.ygridcore.net",
-    "loginSuccess_num_ops" : 4,
-    "loginSuccess_avg_time" : 58.0,
-    "loginFailure_num_ops" : 0,
-    "loginFailure_avg_time" : 0.0
-  }, {
-    "name" : "JMImplementation:type=MBeanServerDelegate",
-    "modelerType" : "javax.management.MBeanServerDelegate",
-    "MBeanServerId" : "hrt18n00.cc1.ygridcore.net_1327557522203",
-    "SpecificationName" : "Java Management Extensions",
-    "SpecificationVersion" : "1.4",
-    "SpecificationVendor" : "Sun Microsystems",
-    "ImplementationName" : "JMX",
-    "ImplementationVersion" : "1.6.0_26-b03",
-    "ImplementationVendor" : "Sun Microsystems"
-  } ]
-}
diff --git a/branch-1.2/contrib/addons/test/dataServices/jmx/test_config_load.php b/branch-1.2/contrib/addons/test/dataServices/jmx/test_config_load.php
deleted file mode 100644
index 6dcf942..0000000
--- a/branch-1.2/contrib/addons/test/dataServices/jmx/test_config_load.php
+++ /dev/null
@@ -1,120 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-$GLOBALS["HDP_MON_DEBUG_MODE"] = FALSE;
-$pwd = exec("pwd");
-$GLOBALS["HDP_MON_CLUSTER_CONFIG_LOCATION"] = $pwd
-    ."/data/cluster_configuration.json";
-
-include_once("../../../src/dataServices/common/common.inc");
-include_once("../../../src/dataServices/common/cluster_configuration.inc");
-
-hdp_mon_load_cluster_configuration();
-
-if (!isset($GLOBALS["HDP_MON_CONFIG"])) {
-  error_log("global CONFIG is still not set");
-  exit(1);
-}
-
-assert($GLOBALS["HDP_MON_CONFIG"]["STACK_VERSION"] === "1.0.2");
-assert($GLOBALS["HDP_MON_CONFIG"]["CLUSTER_NAME"] === "MyHDPCluster");
-
-assert($GLOBALS["HDP_MON_CONFIG"]["HDP_MON"]["DASHBOARD_HOST"] ===
-    "dashboard_host");
-assert($GLOBALS["HDP_MON_CONFIG"]["HDP_MON"]["DASHBOARD_PORT"] === 80);
-
-assert($GLOBALS["HDP_MON_CONFIG"]["GANGLIA"]["WEB_HOST"] === "gangliaweb_host");
-assert($GLOBALS["HDP_MON_CONFIG"]["GANGLIA"]["WEB_PORT"] === 80);
-assert($GLOBALS["HDP_MON_CONFIG"]["GANGLIA"]["WEB_ROOT"] ===
-    "/var/www/ganglia2");
-assert($GLOBALS["HDP_MON_CONFIG"]["GANGLIA"]["GRID_NAME"] === "HDP_GRID");
-
-assert($GLOBALS["HDP_MON_CONFIG"]["NAGIOS"]["NAGIOSSERVER_HOST"] ===
-    "nagiosserver_host");
-assert($GLOBALS["HDP_MON_CONFIG"]["NAGIOS"]["NAGIOSSERVER_PORT"] === 80);
-
-assert($GLOBALS["HDP_MON_CONFIG"]["JMX"]["TIMEOUT"] === 1);
-
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HDFS"]["NAMENODE_HOST"] ===
-    "namenode");
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HDFS"]["NAMENODE_PORT"] ===
-    50070);
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HDFS"]["NAMENODE_ADDR"] ===
-    "namenode:50070");
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HDFS"]["SECONDARY_NAMENODE_ADDR"]
-    === "snamenode:50071");
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HDFS"]["TOTAL_DATANODES"] === 10);
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HDFS"]
-    ["GANGLIA_CLUSTERS"]["NAMENODE"] === "HDPNameNode");
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HDFS"]
-    ["GANGLIA_CLUSTERS"]["SLAVES"] === "HDPSlaves");
-
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]["JOBTRACKER_HOST"]
-    === "jobtracker");
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]["JOBTRACKER_PORT"]
-    === 50030);
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]["JOBTRACKER_ADDR"]
-    === "jobtracker:50030");
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]["TOTAL_TASKTRACKERS"]
-    === 20);
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]["JOBHISTORY_HOST"]
-    === "jobhistory_host");
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]["JOBHISTORY_PORT"]
-    === 52890);
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]
-    ["GANGLIA_CLUSTERS"]["JOBTRACKER"] === "HDPJobTracker");
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]
-    ["GANGLIA_CLUSTERS"]["SLAVES"] === "HDPSlaves");
-
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HBASE"]["HBASEMASTER_HOST"]
-    === "hbasemaster");
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HBASE"]["HBASEMASTER_PORT"]
-    === 60010);
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HBASE"]["HBASEMASTER_ADDR"]
-    === "hbasemaster:60010");
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HBASE"]["TOTAL_REGIONSERVERS"]
-    === 30);
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HBASE"]
-    ["GANGLIA_CLUSTERS"]["HBASEMASTER"] === "HDPHBaseMaster");
-assert($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HBASE"]
-    ["GANGLIA_CLUSTERS"]["SLAVES"] === "HDPSlaves");
-assert(!isset($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["ZOOKEEPER"]));
-assert(is_array($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HIVE-METASTORE"]));
-assert(is_array($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["TEMPLETON"]));
-assert(is_array($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["OOZIE"]));
-
-$GLOBALS["HDP_MON_CLUSTER_CONFIG_LOCATION"] = $pwd
-    ."/data/cluster_configuration.json.nohbase";
-
-unset($GLOBALS["HDP_MON_CONFIG_INITIALIZED"]);
-hdp_mon_load_cluster_configuration();
-
-if (!isset($GLOBALS["HDP_MON_CONFIG"])) {
-  error_log("global CONFIG is still not set");
-  exit(1);
-}
-assert(is_array($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HDFS"]));
-assert(is_array($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["MAPREDUCE"]));
-assert(!isset($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HBASE"]));
-assert(is_array($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["ZOOKEEPER"]));
-assert(!isset($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["HIVE-METASTORE"]));
-assert(!isset($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["TEMPLETON"]));
-assert(is_array($GLOBALS["HDP_MON_CONFIG"]["SERVICES"]["OOZIE"]));
-
-?>
diff --git a/branch-1.2/contrib/addons/test/dataServices/jmx/test_jmx_parsing.php b/branch-1.2/contrib/addons/test/dataServices/jmx/test_jmx_parsing.php
deleted file mode 100644
index b0b663d..0000000
--- a/branch-1.2/contrib/addons/test/dataServices/jmx/test_jmx_parsing.php
+++ /dev/null
@@ -1,255 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-$GLOBALS["HDP_MON_DEBUG_MODE"] = FALSE;
-$pwd = exec("pwd");
-$GLOBALS["HDP_MON_CLUSTER_CONFIG_LOCATION"] = $pwd
-    ."/data/cluster_configuration.json";
-
-include_once("../../../src/dataServices/common/common.inc");
-include_once("../../../src/dataServices/common/cluster_configuration.inc");
-include_once("../../../src/dataServices/common/response.inc");
-include_once("../../../src/dataServices/jmx/hdp_mon_jmx_helpers.inc");
-
-function verify_hdfs_info($info) {
-  assert(is_array($info));
-  assert($info["service_type"] === "HDFS");
-  assert($info["installed"]);
-
-  assert($info["namenode_addr"] == "namenode:50070");
-  assert($info["secondary_namenode_addr"] == "snamenode:50071");
-  assert($info["total_nodes"] == 10);
-  assert($info["memory_heap_used"] == 529321952);
-  assert($info["memory_heap_max"] == 1006632960);
-  assert($info["dfs_dirfiles_count"] == 554);
-  assert($info["dfs_blocks_total"] == 458);
-  assert($info["dfs_blocks_underreplicated"] == 0);
-  assert($info["dfs_blocks_missing"] == 0);
-  assert($info["dfs_blocks_corrupt"] == 0);
-  assert($info["dfs_state"] == "Operational");
-  assert($info["start_time"] == 1327557522);
-  assert($info["live_nodes"] == 10);
-  assert($info["dead_nodes"] == 1);
-  assert($info["decommissioning_nodes"] == 0);
-  assert($info["version"] == "1.0.0");
-  assert($info["safemode"] == TRUE);
-  assert($info["pending_upgrades"] == "");
-  assert($info["dfs_configured_capacity"] == 36336891658240);
-  assert($info["dfs_percent_used"] == 0);
-  assert($info["dfs_percent_remaining"] == 99.08);
-  assert($info["dfs_total_bytes"] == 36336891658240);
-  assert($info["dfs_used_bytes"] == 1750237184);
-  assert($info["nondfs_used_bytes"] == 331691536384);
-  assert($info["dfs_free_bytes"] == 36003449884672);
-  assert($info["safemode_reason"] != "");
-}
-
-function verify_mr_info($info) {
-  assert(is_array($info));
-  assert($info["service_type"] === "MAPREDUCE");
-  assert($info["installed"]);
-
-  assert($info["jobtracker_addr"] == "jobtracker:50030");
-  assert($info["trackers_total"] == 20);
-  assert($info["jobhistory_addr"] == "jobhistory_host:52890");
-  assert($info["memory_heap_used"] == 158277552);
-  assert($info["memory_heap_max"] == 1052770304);
-  assert($info["trackers_live"] == 10);
-  assert($info["trackers_graylisted"] == 0);
-  assert($info["trackers_blacklisted"] == 0);
-  assert($info["version"] == "1.0.0, r1224962");
-
-  assert(is_array($info["queue_info"])
-         && $info["queue_info"]["type"] == "CapacityTaskScheduler"
-         && count($info["queue_info"]["queues"]) == 1);
-
-  assert($info["queue_info"]["queues"]["default"]["state"] == "running"
-    && $info["queue_info"]["queues"]["default"]["capacity_percentage"]
-        == 100.0
-    && $info["queue_info"]["queues"]["default"]["user_limit"] ==  100
-    && $info["queue_info"]["queues"]["default"]["priority_supported"] == 1
-    && $info["queue_info"]["queues"]["default"]["map_capacity"] == 40
-    && $info["queue_info"]["queues"]["default"]["map_running_tasks"] ==  0
-    && $info["queue_info"]["queues"]["default"]["reduce_capacity"] == 20
-    && $info["queue_info"]["queues"]["default"]["reduce_running_tasks"] == 0
-    && $info["queue_info"]["queues"]["default"]["waiting_jobs"] == 3
-    && $info["queue_info"]["queues"]["default"]["initializing_jobs"] ==  0
-    && $info["queue_info"]["queues"]["default"]["users_with_submitted_jobs"]
-        == 0);
-
-  assert($info["trackers_excluded"] == 0);
-  assert($info["map_task_capacity"] == 40);
-  assert($info["reduce_task_capacity"] == 20);
-  assert($info["job_total_submissions"] == 105);
-  assert($info["job_total_completions"] == 104);
-  assert($info["running_jobs"] == 0);
-  assert($info["waiting_jobs"] == 3);
-  assert($info["running_map_tasks"] == 0);
-  assert($info["running_reduce_tasks"] == 0);
-  assert($info["occupied_map_slots"] == 0);
-  assert($info["occupied_reduce_slots"] == 0);
-  assert($info["reserved_map_slots"] == 0);
-  assert($info["reserved_reduce_slots"] == 0);
-  assert($info["waiting_maps"] == 1);
-  assert($info["waiting_reduces"] == 0);
-  assert($info["start_time"] == 1327557546);
-  assert($info["average_node_capacity"] == 6);
-}
-
-function verify_hbase_info($info) {
-  assert(is_array($info));
-  assert($info["service_type"] === "HBASE");
-  assert($info["installed"]);
-  assert($info["total_regionservers"] === 30);
-  assert($info["memory_heap_used"] === 32946880);
-  assert($info["memory_heap_max"] === 1035468800);
-  assert($info["cluster_id"] === "d24914d7-75d3-4dcc-9e6f-0d7770833993");
-  assert($info["start_time"] == 1329244267);
-  assert($info["active_time"] == 1329244269);
-  assert(is_array($info["coprocessors"])
-         && count($info["coprocessors"]) == 0);
-  assert($info["average_load"] == 2);
-  assert($info["regions_in_transition_count"] === 0);
-  assert($info["live_regionservers"] === 1);
-  assert($info["dead_regionservers"] === 0);
-  assert(is_array($info["zookeeper_quorum"])
-         && count($info["zookeeper_quorum"]) == 1
-         && $info["zookeeper_quorum"][0] === "localhost:2181");
-  assert($info["version"] ===
-      "0.92.1-SNAPSHOT, ra23f8636efd6dd9d37f3a15d83f2396819509502");
-}
-
-function verify_overall_info($info) {
-  assert(is_array($info));
-  assert(is_array($info["overall"]));
-  assert(is_array($info["hbase"]));
-  assert(is_array($info["hdfs"]));
-  assert(is_array($info["mapreduce"]));
-
-  assert($info["overall"]["ganglia_url"] ==
-      "http://gangliaweb_host:80/var/www/ganglia2");
-  assert($info["overall"]["nagios_url"] == "http://nagiosserver_host:80/nagios");
-  assert($info["overall"]["hdfs_installed"] == 1);
-  assert($info["overall"]["mapreduce_installed"] == 1);
-  assert($info["overall"]["hbase_installed"] == 1);
-  assert($info["overall"]["namenode_addr"] == "namenode:50070");
-  assert($info["overall"]["secondary_namenode_addr"] == "snamenode:50071");
-  assert($info["overall"]["namenode_starttime"] == 1327557522);
-  assert($info["overall"]["total_nodes"] == 10);
-  assert($info["overall"]["live_nodes"] == 10);
-  assert($info["overall"]["dead_nodes"] == 1);
-  assert($info["overall"]["decommissioning_nodes"] == 0);
-  assert($info["overall"]["dfs_blocks_underreplicated"] == 0);
-  assert($info["overall"]["safemode"] == TRUE);
-  assert($info["overall"]["pending_upgrades"] == "");
-  assert($info["overall"]["dfs_configured_capacity"] == 36336891658240);
-  assert($info["overall"]["dfs_percent_used"] == 0);
-  assert($info["overall"]["dfs_percent_remaining"] == 99.08);
-  assert($info["overall"]["dfs_total_bytes"] == 36336891658240);
-  assert($info["overall"]["dfs_used_bytes"] == 1750237184);
-  assert($info["overall"]["nondfs_used_bytes"] == 331691536384);
-  assert($info["overall"]["dfs_free_bytes"] == 36003449884672);
-  assert($info["overall"]["jobtracker_addr"] == "jobtracker:50030");
-  assert($info["overall"]["jobtracker_starttime"] == 1327557546);
-  assert($info["overall"]["running_jobs"] == 0);
-  assert($info["overall"]["waiting_jobs"] == 3);
-  assert($info["overall"]["trackers_total"] == 20);
-  assert($info["overall"]["trackers_live"] == 10);
-  assert($info["overall"]["trackers_graylisted"] == 0);
-  assert($info["overall"]["trackers_blacklisted"] == 0);
-  assert($info["overall"]["hbasemaster_addr"] == "hbasemaster:60010");
-  assert($info["overall"]["total_regionservers"] == 30);
-  assert($info["overall"]["hbasemaster_starttime"] == 1329244267);
-  assert($info["overall"]["live_regionservers"] == 1);
-  assert($info["overall"]["dead_regionservers"] == 0);
-  assert($info["overall"]["regions_in_transition_count"] == 0);
-
-  assert($info["hdfs"]["namenode_addr"] == "namenode:50070");
-  assert($info["hdfs"]["secondary_namenode_addr"] == "snamenode:50071");
-  assert($info["hdfs"]["namenode_starttime"] == 1327557522);
-  assert($info["hdfs"]["total_nodes"] == 10);
-  assert($info["hdfs"]["live_nodes"] == 10);
-  assert($info["hdfs"]["dead_nodes"] == 1);
-  assert($info["hdfs"]["decommissioning_nodes"] == 0);
-  assert($info["hdfs"]["dfs_blocks_underreplicated"] == 0);
-  assert($info["hdfs"]["safemode"] == TRUE);
-  assert($info["hdfs"]["pending_upgrades"] == "");
-  assert($info["hdfs"]["dfs_configured_capacity"] == 36336891658240);
-  assert($info["hdfs"]["dfs_percent_used"] == 0);
-  assert($info["hdfs"]["dfs_percent_remaining"] == 99.08);
-  assert($info["hdfs"]["dfs_total_bytes"] == 36336891658240);
-  assert($info["hdfs"]["dfs_used_bytes"] == 1750237184);
-  assert($info["hdfs"]["nondfs_used_bytes"] == 331691536384);
-  assert($info["hdfs"]["dfs_free_bytes"] == 36003449884672);
-
-  assert($info["mapreduce"]["jobtracker_addr"] == "jobtracker:50030");
-  assert($info["mapreduce"]["jobtracker_starttime"] == 1327557546);
-  assert($info["mapreduce"]["running_jobs"] == 0);
-  assert($info["mapreduce"]["waiting_jobs"] == 3);
-  assert($info["mapreduce"]["trackers_total"] == 20);
-  assert($info["mapreduce"]["trackers_live"] == 10);
-  assert($info["mapreduce"]["trackers_graylisted"] == 0);
-  assert($info["mapreduce"]["trackers_blacklisted"] == 0);
-
-  assert($info["hbase"]["hbasemaster_addr"] == "hbasemaster:60010");
-  assert($info["hbase"]["total_regionservers"] == 30);
-  assert($info["hbase"]["hbasemaster_starttime"] == 1329244267);
-  assert($info["hbase"]["live_regionservers"] == 1);
-  assert($info["hbase"]["dead_regionservers"] == 0);
-  assert($info["hbase"]["regions_in_transition_count"] == 0);
-}
-
-hdp_mon_load_cluster_configuration();
-if (!isset($GLOBALS["HDP_MON_CONFIG"])) {
-  error_log("global CONFIG is still not set");
-  exit(1);
-}
-
-$hdfs_jmx_json = file_get_contents("./data/sample_namenode_jmx.json");
-if (!$hdfs_jmx_json || $hdfs_jmx_json == "") {
-  error_log("Invalid json data for namenode jmx");
-  exit(1);
-}
-
-$hdfsinfo = hdp_mon_jmx_parse_hdfs_info(json_decode($hdfs_jmx_json, true));
-verify_hdfs_info($hdfsinfo);
-
-$mr_jmx_json = file_get_contents("./data/sample_jobtracker_jmx.json");
-if (!$mr_jmx_json || $mr_jmx_json == "") {
-  error_log("Invalid json data for jobtracker jmx");
-  exit(1);
-}
-
-$mrinfo = hdp_mon_jmx_parse_mapreduce_info(json_decode($mr_jmx_json, true));
-verify_mr_info($mrinfo);
-
-$hbase_jmx_json = file_get_contents("./data/sample_hbasemaster_jmx.json");
-if (!$hbase_jmx_json || $hbase_jmx_json == "") {
-  error_log("Invalid json data for hbase master jmx");
-  exit(1);
-}
-
-$hbaseinfo = hdp_mon_jmx_parse_hbase_info(json_decode($hbase_jmx_json, true));
-verify_hbase_info($hbaseinfo);
-
-$overallinfo = hdp_mon_helper_get_cluster_info($hdfsinfo,
-    $mrinfo, $hbaseinfo);
-verify_overall_info($overallinfo);
-
-?>
diff --git a/branch-1.2/contrib/addons/test/nagios/plugins/test_sys_logger.py b/branch-1.2/contrib/addons/test/nagios/plugins/test_sys_logger.py
deleted file mode 100644
index 7f9abff..0000000
--- a/branch-1.2/contrib/addons/test/nagios/plugins/test_sys_logger.py
+++ /dev/null
@@ -1,412 +0,0 @@
-#!/usr/bin/python
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-sys.path.append('../src')
-
-import sys_logger
-
-tests_passed = 0
-tests_failed = 0
-def test_log_tvi_msg(msg):
-    global tests_passed, tests_failed
-    if msg == expected_log_msg:
-        print 'Test Passed'
-        tests_passed += 1
-    else:
-        print '*** TEST FAILED ***'
-        print 'Expected MSG: {0}'.format(expected_log_msg)
-        print 'Actual MSG  : {0}'.format(msg)
-        tests_failed += 1
-
-sys_logger.log_tvi_msg = test_log_tvi_msg
-
-def test(tvi_rule, expected_msg, arg1, arg2, arg3, arg4, arg5):
-    sys.stdout.write(tvi_rule + ': ')
-    global expected_log_msg
-    expected_log_msg = expected_msg
-    sys_logger.generate_tvi_log_msg(arg1, arg2, arg3, arg4, arg5)
-
-def summary():
-    total_tests = tests_passed + tests_failed
-    print '\nTests Run: {0}'.format(total_tests)
-    print 'Passed: {0}, Failed: {1}'.format(tests_passed, tests_failed)
-    if not tests_failed:
-        print 'SUCCESS! All tests pass.'
-
-
-# Hadoop_Host_Down
-test('Hadoop_Host_Down',
-     'Critical: Hadoop: host_down# Event Host=MY_HOST(CRITICAL), PING FAILED - Packet loss = 100%, RTA = 0.00 ms',
-     'HARD', '1', 'CRITICAL', 'Host::Ping', 'Event Host=MY_HOST(CRITICAL), PING FAILED - Packet loss = 100%, RTA = 0.00 ms')
-
-test('Hadoop_Host_Down:OK',
-    'OK: Hadoop: host_down_ok# Event Host=MY_HOST(OK), PING SUCCESS - Packet loss = 0%, RTA = 1.00 ms',
-    'HARD', '1', 'OK', 'Host::Ping', 'Event Host=MY_HOST(OK), PING SUCCESS - Packet loss = 0%, RTA = 1.00 ms')
-
-# Hadoop_Master_Daemon_CPU_Utilization
-test('Hadoop_Master_Daemon_CPU_Utilization',
-     'Critical: Hadoop: master_cpu_utilization# Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(CRITICAL), 4 CPU, average load 2.5%  200%',
-     'HARD', '1', 'CRITICAL', 'HBASEMASTER::HBaseMaster CPU utilization',
-     'Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(CRITICAL), 4 CPU, average load 2.5%  200%')
-
-test('Hadoop_Master_Daemon_CPU_Utilization:Degraded',
-    'Degraded: Hadoop: master_cpu_utilization# Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(CRITICAL), 4 CPU, average load 2.5%  200%',
-    'HARD', '1', 'WARNING', 'HBASEMASTER::HBaseMaster CPU utilization',
-    'Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(CRITICAL), 4 CPU, average load 2.5%  200%')
-
-test('Hadoop_Master_Daemon_CPU_Utilization:OK',
-    'OK: Hadoop: master_cpu_utilization_ok# Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(OK), 4 CPU, average load 2.5%  200%',
-    'HARD', '1', 'OK', 'HBASEMASTER::HBaseMaster CPU utilization',
-    'Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(OK), 4 CPU, average load 2.5%  200%')
-
-# Hadoop_HDFS_Percent_Capacity
-test('Hadoop_HDFS_Percent_Capacity',
-     'Critical: Hadoop: hdfs_percent_capacity# Event Host=MY_HOST Service Description=HDFS::HDFS Capacity utilization(CRITICAL),DFSUsedGB:0.1, DFSTotalGB:1568.7',
-     'HARD', '1', 'CRITICAL', 'HDFS::HDFS Capacity utilization',
-     'Event Host=MY_HOST Service Description=HDFS::HDFS Capacity utilization(CRITICAL),DFSUsedGB:0.1, DFSTotalGB:1568.7')
-
-test('Hadoop_HDFS_Percent_Capacity:OK',
-    'OK: Hadoop: hdfs_percent_capacity_ok# Event Host=MY_HOST Service Description=HDFS::HDFS Capacity utilization(OK),DFSUsedGB:0.1, DFSTotalGB:1568.7',
-    'HARD', '1', 'OK', 'HDFS::HDFS Capacity utilization',
-    'Event Host=MY_HOST Service Description=HDFS::HDFS Capacity utilization(OK),DFSUsedGB:0.1, DFSTotalGB:1568.7')
-
-# Hadoop_HDFS_Corrupt_Missing_Blocks
-test('Hadoop_HDFS_Corrupt_Missing_Blocks',
-     'Critical: Hadoop: hdfs_block# Event Host=MY_HOST Service Description=HDFS::Corrupt/Missing blocks(CRITICAL), corrupt_blocks:0, missing_blocks:0, total_blocks:147',
-     'HARD', '1', 'CRITICAL', 'HDFS::Corrupt/Missing blocks',
-     'Event Host=MY_HOST Service Description=HDFS::Corrupt/Missing blocks(CRITICAL), corrupt_blocks:0, missing_blocks:0, total_blocks:147')
-
-test('Hadoop_HDFS_Corrupt_Missing_Blocks:OK',
-    'OK: Hadoop: hdfs_block_ok# Event Host=MY_HOST Service Description=HDFS::Corrupt/Missing blocks(OK), corrupt_blocks:0, missing_blocks:0, total_blocks:147',
-    'HARD', '1', 'OK', 'HDFS::Corrupt/Missing blocks',
-    'Event Host=MY_HOST Service Description=HDFS::Corrupt/Missing blocks(OK), corrupt_blocks:0, missing_blocks:0, total_blocks:147')
-
-# Hadoop_NameNode_Edit_Log_Dir_Write
-test('Hadoop_NameNode_Edit_Log_Dir_Write',
-     'Critical: Hadoop: namenode_edit_log_write# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'NAMENODE::Namenode Edit logs directory status', 'SERVICE MSG')
-
-test('Hadoop_NameNode_Edit_Log_Dir_Write:OK',
-    'OK: Hadoop: namenode_edit_log_write_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'NAMENODE::Namenode Edit logs directory status', 'SERVICE MSG')
-
-# Hadoop_DataNode_Down
-test('Hadoop_DataNode_Down',
-     'Critical: Hadoop: datanode_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'HDFS::Percent DataNodes down','SERVICE MSG')
-
-test('Hadoop_DataNode_Down:OK',
-    'OK: Hadoop: datanode_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'HDFS::Percent DataNodes down','SERVICE MSG')
-
-# Hadoop_DataNode_Process_Down
-test('Hadoop_DataNode_Process_Down',
-     'Critical: Hadoop: datanode_process_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'DATANODE::Process down', 'SERVICE MSG')
-
-test('Hadoop_DataNode_Process_Down:OK',
-    'OK: Hadoop: datanode_process_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'DATANODE::Process down', 'SERVICE MSG')
-
-# Hadoop_Percent_DataNodes_Storage_Full
-test('Hadoop_Percent_DataNodes_Storage_Full',
-     'Critical: Hadoop: datanodes_percent_storage_full# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'HDFS::Percent DataNodes storage full', 'SERVICE MSG')
-
-test('Hadoop_Percent_DataNodes_Storage_Full:OK',
-    'OK: Hadoop: datanodes_percent_storage_full_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'HDFS::Percent DataNodes storage full', 'SERVICE MSG')
-
-# Hadoop_NameNode_Process_Down
-test('Hadoop_NameNode_Process_Down:CRITICAL',
-     'Fatal: Hadoop: namenode_process_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'NAMENODE::Namenode Process down', 'SERVICE MSG')
-
-test('Hadoop_NameNode_Process_Down:WARNING',
-    'Fatal: Hadoop: namenode_process_down# SERVICE MSG',
-    'HARD', '1', 'WARNING', 'NAMENODE::Namenode Process down', 'SERVICE MSG')
-
-test('Hadoop_NameNode_Process_Down:UNKNOWN',
-    'Fatal: Hadoop: namenode_process_down# SERVICE MSG',
-    'HARD', '1', 'UNKNOWN', 'NAMENODE::Namenode Process down', 'SERVICE MSG')
-
-test('Hadoop_NameNode_Process_Down:OK',
-    'OK: Hadoop: namenode_process_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'NAMENODE::Namenode Process down', 'SERVICE MSG')
-
-# Hadoop_Secondary_NameNode_Process_Down
-test('Hadoop_Secondary_NameNode_Process_Down',
-    'Critical: Hadoop: secondary_namenode_process_down# SERVICE MSG',
-    'HARD', '1', 'CRITICAL', 'NAMENODE::Secondary Namenode Process down', 'SERVICE MSG')
-
-test('Hadoop_Secondary_NameNode_Process_Down:OK',
-    'OK: Hadoop: secondary_namenode_process_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'NAMENODE::Secondary Namenode Process down', 'SERVICE MSG')
-
-# Hadoop_NameNode_RPC_Latency
-test('Hadoop_NameNode_RPC_Latency',
-     'Critical: Hadoop: namenode_rpc_latency# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'HDFS::Namenode RPC Latency', 'SERVICE MSG')
-
-test('Hadoop_NameNode_RPC_Latency:Degraded',
-    'Degraded: Hadoop: namenode_rpc_latency# SERVICE MSG',
-    'HARD', '1', 'WARNING', 'HDFS::Namenode RPC Latency', 'SERVICE MSG')
-
-test('Hadoop_NameNode_RPC_Latency:OK',
-    'OK: Hadoop: namenode_rpc_latency_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'HDFS::Namenode RPC Latency', 'SERVICE MSG')
-
-# Hadoop_DataNodes_Storage_Full
-test('Hadoop_DataNodes_Storage_Full',
-     'Critical: Hadoop: datanodes_storage_full# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'DATANODE::Storage full', 'SERVICE MSG')
-
-test('Hadoop_DataNodes_Storage_Full:OK',
-    'OK: Hadoop: datanodes_storage_full_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'DATANODE::Storage full', 'SERVICE MSG')
-
-# Hadoop_JobTracker_Process_Down
-test('Hadoop_JobTracker_Process_Down',
-     'Critical: Hadoop: jobtracker_process_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'JOBTRACKER::Jobtracker Process down', 'SERVICE MSG')
-
-test('Hadoop_JobTracker_Process_Down:OK',
-    'OK: Hadoop: jobtracker_process_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'JOBTRACKER::Jobtracker Process down', 'SERVICE MSG')
-
-# Hadoop_JobTracker_RPC_Latency
-test('Hadoop_JobTracker_RPC_Latency',
-     'Critical: Hadoop: jobtracker_rpc_latency# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'MAPREDUCE::JobTracker RPC Latency', 'SERVICE MSG')
-
-test('Hadoop_JobTracker_RPC_Latency:Degraded',
-    'Degraded: Hadoop: jobtracker_rpc_latency# SERVICE MSG',
-    'HARD', '1', 'WARNING', 'MAPREDUCE::JobTracker RPC Latency', 'SERVICE MSG')
-
-test('Hadoop_JobTracker_RPC_Latency:OK',
-    'OK: Hadoop: jobtracker_rpc_latency_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'MAPREDUCE::JobTracker RPC Latency', 'SERVICE MSG')
-
-# Hadoop_JobTracker_CPU_Utilization
-test('Hadoop_JobTracker_CPU_Utilization',
-    'Critical: Hadoop: jobtracker_cpu_utilization# SERVICE MSG',
-    'HARD', '1', 'CRITICAL', 'JOBTRACKER::Jobtracker CPU utilization', 'SERVICE MSG')
-
-test('Hadoop_JobTracker_CPU_Utilization:Degraded',
-    'Degraded: Hadoop: jobtracker_cpu_utilization# SERVICE MSG',
-    'HARD', '1', 'WARNING', 'JOBTRACKER::Jobtracker CPU utilization', 'SERVICE MSG')
-
-test('Hadoop_JobTracker_CPU_Utilization:OK',
-    'OK: Hadoop: jobtracker_cpu_utilization_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'JOBTRACKER::Jobtracker CPU utilization', 'SERVICE MSG')
-
-# Hadoop_TaskTracker_Down
-test('Hadoop_TaskTracker_Down',
-     'Critical: Hadoop: tasktrackers_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'MAPREDUCE::Percent TaskTrackers down', 'SERVICE MSG')
-
-test('Hadoop_TaskTracker_Down:OK',
-    'OK: Hadoop: tasktrackers_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'MAPREDUCE::Percent TaskTrackers down', 'SERVICE MSG')
-
-# Hadoop_TaskTracker_Process_Down
-test('Hadoop_TaskTracker_Process_Down',
-     'Critical: Hadoop: tasktracker_process_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'TASKTRACKER::Process down', 'SERVICE MSG')
-
-test('Hadoop_TaskTracker_Process_Down:OK',
-    'OK: Hadoop: tasktracker_process_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'TASKTRACKER::Process down', 'SERVICE MSG')
-
-# Hadoop_HBaseMaster_Process_Down
-test('Hadoop_HBaseMaster_Process_Down',
-     'Critical: Hadoop: hbasemaster_process_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'HBASEMASTER::HBaseMaster Process down', 'SERVICE MSG')
-
-test('Hadoop_HBaseMaster_Process_Down:OK',
-    'OK: Hadoop: hbasemaster_process_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'HBASEMASTER::HBaseMaster Process down', 'SERVICE MSG')
-
-# Hadoop_RegionServer_Process_Down
-test('Hadoop_RegionServer_Process_Down',
-     'Critical: Hadoop: regionserver_process_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'REGIONSERVER::Process down', 'SERVICE MSG')
-
-test('Hadoop_RegionServer_Process_Down:OK',
-    'OK: Hadoop: regionserver_process_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'REGIONSERVER::Process down', 'SERVICE MSG')
-
-# Hadoop_RegionServer_Down
-test('Hadoop_RegionServer_Down',
-     'Critical: Hadoop: regionservers_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'HBASE::Percent region servers down', 'SERVICE MSG')
-
-test('Hadoop_RegionServer_Down:OK',
-    'OK: Hadoop: regionservers_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'HBASE::Percent region servers down', 'SERVICE MSG')
-
-# Hadoop_Hive_Metastore_Process_Down
-test('Hadoop_Hive_Metastore_Process_Down',
-     'Critical: Hadoop: hive_metastore_process_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'HIVE-METASTORE::HIVE-METASTORE status check', 'SERVICE MSG')
-
-test('Hadoop_Hive_Metastore_Process_Down:OK',
-    'OK: Hadoop: hive_metastore_process_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'HIVE-METASTORE::HIVE-METASTORE status check', 'SERVICE MSG')
-
-# Hadoop_Zookeeper_Down
-test('Hadoop_Zookeeper_Down',
-     'Critical: Hadoop: zookeepers_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'ZOOKEEPER::Percent zookeeper servers down', 'SERVICE MSG')
-
-test('Hadoop_Zookeeper_Down:OK',
-    'OK: Hadoop: zookeepers_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'ZOOKEEPER::Percent zookeeper servers down', 'SERVICE MSG')
-
-# Hadoop_Zookeeper_Process_Down
-test('Hadoop_Zookeeper_Process_Down',
-     'Critical: Hadoop: zookeeper_process_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'ZKSERVERS::ZKSERVERS Process down', 'SERVICE MSG')
-
-test('Hadoop_Zookeeper_Process_Down:OK',
-    'OK: Hadoop: zookeeper_process_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'ZKSERVERS::ZKSERVERS Process down', 'SERVICE MSG')
-
-# Hadoop_Oozie_Down
-test('Hadoop_Oozie_Down',
-     'Critical: Hadoop: oozie_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'OOZIE::Oozie status check', 'SERVICE MSG')
-
-test('Hadoop_Oozie_Down:OK',
-    'OK: Hadoop: oozie_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'OOZIE::Oozie status check', 'SERVICE MSG')
-
-# Hadoop_Templeton_Down
-test('Hadoop_Templeton_Down',
-     'Critical: Hadoop: templeton_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'TEMPLETON::Templeton status check', 'SERVICE MSG')
-
-test('Hadoop_Templeton_Down:OK',
-    'OK: Hadoop: templeton_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'TEMPLETON::Templeton status check', 'SERVICE MSG')
-
-# Hadoop_Puppet_Down
-test('Hadoop_Puppet_Down',
-     'Critical: Hadoop: puppet_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'PUPPET::Puppet agent down', 'SERVICE MSG')
-
-test('Hadoop_Puppet_Down:OK',
-    'OK: Hadoop: puppet_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'PUPPET::Puppet agent down', 'SERVICE MSG')
-
-# Hadoop_Nagios_Status_Log_Stale
-test('Hadoop_Nagios_Status_Log_Stale',
-     'Critical: Hadoop: nagios_status_log_stale# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'NAGIOS::Nagios status log staleness', 'SERVICE MSG')
-
-test('Hadoop_Nagios_Status_Log_Stale:OK',
-    'OK: Hadoop: nagios_status_log_stale_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'NAGIOS::Nagios status log staleness', 'SERVICE MSG')
-
-# Hadoop_Ganglia_Process_Down
-test('Hadoop_Ganglia_Process_Down',
-     'Critical: Hadoop: ganglia_process_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'GANGLIA::Ganglia [gmetad] Process down', 'SERVICE MSG')
-
-test('Hadoop_Ganglia_Process_Down:OK',
-    'OK: Hadoop: ganglia_process_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'GANGLIA::Ganglia [gmetad] Process down', 'SERVICE MSG')
-
-# Hadoop_Ganglia_Collector_Process_Down
-test('Hadoop_Ganglia_Collector_Process_Down',
-     'Critical: Hadoop: ganglia_collector_process_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'GANGLIA::Ganglia collector [gmond] Process down alert for hbasemaster', 'SERVICE MSG')
-
-test('Hadoop_Ganglia_Collector_Process_Down:OK',
-    'OK: Hadoop: ganglia_collector_process_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'GANGLIA::Ganglia collector [gmond] Process down alert for hbasemaster', 'SERVICE MSG')
-
-# Hadoop_Ganglia_Collector_Process_Down
-test('Hadoop_Ganglia_Collector_Process_Down',
-     'Critical: Hadoop: ganglia_collector_process_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'GANGLIA::Ganglia collector [gmond] Process down alert for jobtracker', 'SERVICE MSG')
-
-test('Hadoop_Ganglia_Collector_Process_Down:OK',
-    'OK: Hadoop: ganglia_collector_process_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'GANGLIA::Ganglia collector [gmond] Process down alert for jobtracker', 'SERVICE MSG')
-
-# Hadoop_Ganglia_Collector_Process_Down
-test('Hadoop_Ganglia_Collector_Process_Down',
-     'Critical: Hadoop: ganglia_collector_process_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'GANGLIA::Ganglia collector [gmond] Process down alert for namenode', 'SERVICE MSG')
-
-test('Hadoop_Ganglia_Collector_Process_Down:OK',
-    'OK: Hadoop: ganglia_collector_process_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'GANGLIA::Ganglia collector [gmond] Process down alert for namenode', 'SERVICE MSG')
-
-# Hadoop_Ganglia_Collector_Process_Down
-test('Hadoop_Ganglia_Collector_Process_Down',
-     'Critical: Hadoop: ganglia_collector_process_down# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'GANGLIA::Ganglia collector [gmond] Process down alert for slaves', 'SERVICE MSG')
-
-test('Hadoop_Ganglia_Collector_Process_Down:OK',
-    'OK: Hadoop: ganglia_collector_process_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'GANGLIA::Ganglia collector [gmond] Process down alert for slaves', 'SERVICE MSG')
-
-# Hadoop_UNKNOWN_MSG
-test('Hadoop_UNKNOWN_MSG',
-     'Critical: Hadoop: HADOOP_UNKNOWN_MSG# SERVICE MSG',
-     'HARD', '1', 'CRITICAL', 'ANY UNKNOWN SERVICE', 'SERVICE MSG')
-
-# HBase UI Down
-test('Hadoop_HBase_UI_Down',
-    'Critical: Hadoop: hbase_ui_down# SERVICE MSG',
-    'HARD', '1', 'CRITICAL', 'HBASEMASTER::HBase Web UI down', 'SERVICE MSG')
-
-test('Hadoop_HBase_UI_Down:OK',
-    'OK: Hadoop: hbase_ui_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'HBASEMASTER::HBase Web UI down', 'SERVICE MSG')
-
-# Namenode UI Down
-test('Hadoop_NameNode_UI_Down',
-    'Critical: Hadoop: namenode_ui_down# SERVICE MSG',
-    'HARD', '1', 'CRITICAL', 'NAMENODE::Namenode Web UI down', 'SERVICE MSG')
-
-test('Hadoop_NameNode_UI_Down:OK',
-    'OK: Hadoop: namenode_ui_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'NAMENODE::Namenode Web UI down', 'SERVICE MSG')
-
-# JobHistory UI Down
-test('Hadoop_JobHistory_UI_Down',
-    'Critical: Hadoop: jobhistory_ui_down# SERVICE MSG',
-    'HARD', '1', 'CRITICAL', 'JOBTRACKER::JobHistory Web UI down', 'SERVICE MSG')
-
-test('Hadoop_JobHistory_UI_Down:OK',
-    'OK: Hadoop: jobhistory_ui_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'JOBTRACKER::JobHistory Web UI down', 'SERVICE MSG')
-
-# JobTracker UI Down
-test('Hadoop_JobTracker_UI_Down',
-    'Critical: Hadoop: jobtracker_ui_down# SERVICE MSG',
-    'HARD', '1', 'CRITICAL', 'JOBTRACKER::JobTracker Web UI down', 'SERVICE MSG')
-
-test('Hadoop_JobTracker_UI_Down:OK',
-    'OK: Hadoop: jobtracker_ui_down_ok# SERVICE MSG',
-    'HARD', '1', 'OK', 'JOBTRACKER::JobTracker Web UI down', 'SERVICE MSG')
-
-summary()
-
diff --git a/branch-1.2/contrib/addons/test/ui/json/alerts.json b/branch-1.2/contrib/addons/test/ui/json/alerts.json
deleted file mode 100644
index 00f6abe..0000000
--- a/branch-1.2/contrib/addons/test/ui/json/alerts.json
+++ /dev/null
@@ -1,122 +0,0 @@
-{"alerts":
-	[
-		{
-			"service_type":"SYSTEM",
-			"service_description":"0 SYSTEM Load-critical",
-			"host_name":"ip-10-242-191-48.ec2.internal",
-			"current_attempt":"1",
-			"last_hard_state":"2",
-			"plugin_output":"OK - load average: 0.12, 0.11, 0.08 99999999999999999999",
-			"last_hard_state_change":"1327362079",
-			"last_time_ok":"1327385479",
-			"last_time_warning":"0",
-			"last_time_unknown":"0",
-			"last_time_critical":"0",
-			"is_flapping":"0",
-			"last_check":"1328827745"
-		},
-		{
-			"service_type":"MAPREDUCE",
-			"service_description":"1 MAPRED Current Users - 222",
-			"host_name":"ip-10-242-191-48.ec2.internal",
-			"current_attempt":"1",
-			"last_hard_state":"1",
-			"plugin_output":"USERS OK - 1 users currently logged in",
-			"last_hard_state_change":"1327362154",
-			"last_time_ok":"1327385554",
-			"last_time_warning":"0",
-			"last_time_unknown":"0",
-			"last_time_critical":"0",
-			"is_flapping":"0",
-			"last_check":"1328827745"
-		},
-		{
-			"service_type":"HBASE",
-			"service_description":"2 HBASE",
-			"host_name":"ip-10-242-191-48.ec2.internal",
-			"current_attempt":"1",
-			"last_hard_state":"0",
-			"plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-			"last_hard_state_change":"1327362229",
-			"last_time_ok":"1327385629",
-			"last_time_warning":"0",
-			"last_time_unknown":"0",
-			"last_time_critical":"0",
-			"is_flapping":"0",
-			"last_check":"1328827745"
-		},
-		{
-			"service_type":"HDFS",
-			"service_description":"3 HDFS warning",
-			"host_name":"ip-10-242-191-48.ec2.internal",
-			"current_attempt":"1",
-			"last_hard_state":"1",
-			"plugin_output":"DISK OK - free space: \/ 6605 MB (69% inode=88%):",
-			"last_hard_state_change":"1327362304",
-			"last_time_ok":"1327385704",
-			"last_time_warning":"0",
-			"last_time_unknown":"0",
-			"last_time_critical":"0",
-			"is_flapping":"0",
-			"last_check":"1328827745"
-		},
-		{
-			"service_type":"HDFS",
-			"service_description":"4 HDFS critical",
-			"host_name":"ip-10-40-199-111.ec2.internal",
-			"current_attempt":"1",
-			"last_hard_state":"2",
-			"plugin_output":"4:HDFS critical alert",
-			"last_hard_state_change":"1327362104",
-			"last_time_ok":"1327385504",
-			"last_time_warning":"0",
-			"last_time_unknown":"0",
-			"last_time_critical":"0",
-			"is_flapping":"0",
-			"last_check":"1328827745"
-		},
-		{
-			"service_type":"MAPREDUCE",
-			"service_description":"5 MAPREDUCE ok",
-			"host_name":"ip-10-40-199-111.ec2.internal",
-			"current_attempt":"1",
-			"last_hard_state":"0",
-			"plugin_output":"DISK OK - free space: \/ 6605 MB (69% inode=88%):",
-			"last_hard_state_change":"1327362254",
-			"last_time_ok":"1327385654",
-			"last_time_warning":"0",
-			"last_time_unknown":"0",
-			"last_time_critical":"0",
-			"is_flapping":"0",
-			"last_check":"1328827745"
-		},
-		{
-			"service_type":"ZOOKEEPER","service_description":"DATANODE Total Processes","host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"1","plugin_output":"PROCS OK: 73 processes with STATE = RSZDT","last_hard_state_change":"1327362129","last_time_ok":"1327385529","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0","last_check":"1328827745"},
-		{
-			"service_type":"HIVE-METASTORE","service_description":"PING","host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"1","plugin_output":"PING OK - Packet loss = 0%, RTA = 2.92 ms","last_hard_state_change":"1327362204","last_time_ok":"1327385604","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0","last_check":"1328827745"},
-		{
-			"service_type":"HBASE","service_description":"DATANODE Total Processes","host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"PROCS OK: 73 processes with STATE = RSZDT","last_hard_state_change":"1327362129","last_time_ok":"1327385529","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0","last_check":"1328827745"},
-		{
-			"service_type":"HBASE","service_description":"PING","host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"PING OK - Packet loss = 0%, RTA = 2.92 ms","last_hard_state_change":"1327362204","last_time_ok":"1327385604","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0","last_check":"1328827745"},
-		{
-			"service_type":"HBASE","service_description":"Root Partition","host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"DISK OK - free space: \/ 6605 MB (69% inode=88%):","last_hard_state_change":"1327362279","last_time_ok":"1327385679","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0","last_check":"1328827745"},
-		{
-			"service_type":"HBASE","service_description":"Root Partition","host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"DISK OK - free space: \/ 6605 MB (69% inode=88%):","last_hard_state_change":"1327362279","last_time_ok":"1327385679","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0","last_check":"1328827745"
-		}
-	],
-"hosts":
-	[
-		{"host_name":"ip-10-242-191-48.ec2.internal","last_hard_state":"0","last_hard_state":"0","plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms","last_check":"1327385564","current_attempt":"1","last_hard_state_change":"1327362079","last_time_up":"1327385574","last_time_down":"0","last_time_unreachable":"0","is_flapping":"0","alerts":[{"service_description":"HDFS Current Load","host_name":"ip-10-242-191-48.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"OK - load average: 0.12, 0.11, 0.08","last_hard_state_change":"1327362079","last_time_ok":"1327385479","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"},
-		{"service_description":"MAPRED Current Users","host_name":"ip-10-242-191-48.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"USERS OK - 1 users currently logged in","last_hard_state_change":"1327362154","last_time_ok":"1327385554","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"},
-		{"service_description":"PING","host_name":"ip-10-242-191-48.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms","last_hard_state_change":"1327362229","last_time_ok":"1327385629","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"},
-		{"service_description":"Root Partition","host_name":"ip-10-242-191-48.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"DISK OK - free space: \/ 6605 MB (69% inode=88%):","last_hard_state_change":"1327362304","last_time_ok":"1327385704","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"}]},
-		{"host_name":"ip-10-40-199-111.ec2.internal","last_hard_state":"1","last_hard_state":"0","plugin_output":"(Host Check Timed Out)","last_check":"1327385664","current_attempt":"2","last_hard_state_change":"1327383724","last_time_up":"1327385574","last_time_down":"1327385694","last_time_unreachable":"0","is_flapping":"0",
-			"alerts":[{"service_description":"DATANODE Total Processes","host_name":"ip-10-40-199-111.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"PROCS OK: 73 processes with STATE = RSZDT","last_hard_state_change":"1327362104","last_time_ok":"1327385504","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"},
-		{"service_description":"Root Partition","host_name":"ip-10-40-199-111.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"DISK OK - free space: \/ 6605 MB (69% inode=88%):","last_hard_state_change":"1327362254","last_time_ok":"1327385654","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"}]},
-		{"host_name":"ip-10-80-119-243.ec2.internal","last_hard_state":"0","last_hard_state":"0","plugin_output":"PING OK - Packet loss = 0%, RTA = 0.44 ms","last_check":"1327385464","current_attempt":"1","last_hard_state_change":"1327362129","last_time_up":"1327385474","last_time_down":"0","last_time_unreachable":"0","is_flapping":"0",
-			"alerts":[{"service_description":"DATANODE Total Processes","host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"PROCS OK: 73 processes with STATE = RSZDT","last_hard_state_change":"1327362129","last_time_ok":"1327385529","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"},
-		{"service_description":"PING","host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"PING OK - Packet loss = 0%, RTA = 2.92 ms","last_hard_state_change":"1327362204","last_time_ok":"1327385604","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"},{"service_description":"Root Partition",
-		"host_name":"ip-10-80-119-243.ec2.internal","current_attempt":"1","last_hard_state":"0","plugin_output":"DISK OK - free space: \/ 6605 MB (69% inode=88%):","last_hard_state_change":"1327362279","last_time_ok":"1327385679","last_time_warning":"0","last_time_unknown":"0","last_time_critical":"0","is_flapping":"0"}]}],
-"hostcounts":{"up_hosts":9,"down_hosts":0},
-"servicestates":{"HIVE-METASTORE":1,"ZOOKEEPER":0,"HBASE":"0","HDFS":"0","MAPREDUCE":"0"}
-}
diff --git a/branch-1.2/contrib/addons/test/ui/json/clusterSummary.json b/branch-1.2/contrib/addons/test/ui/json/clusterSummary.json
deleted file mode 100644
index 0ba9e78..0000000
--- a/branch-1.2/contrib/addons/test/ui/json/clusterSummary.json
+++ /dev/null
@@ -1,119 +0,0 @@
-{
-  "overall" : {
-     "dfs_configured_capacity": 1234546666,
-     "dfs_percent_used": 25,
-     "dfs_percent_remaining": 75,
-     "dfs_total_bytes": 36336891658240,
-     "dfs_used_bytes": 1750237184,
-     "nondfs_used_bytes": 331691536384,
-  	 "dfs_free_bytes": 36003449884672,
-     "live_nodes": 5,
-     "dead_nodes": 1,
-     "decommissioning_nodes": 1,
-     "dfs_blocks_underreplicated": 23,
-     "jobtracker_starttime": 1243236234,
-     "namenode_starttime": 1243234234,
-     "trackers_live": 4,
-     "trackers_blacklisted": 1,
-      "running_jobs": 10,
-      "waiting_jobs": 1,
-      "namenode_addr": "1.1.1.1:50070",
-      "jobtracker_addr": "2.2.2.2:50030",
-      "hbasemaster_addr": "3.3.3.3:60010",
-      "hbasemaster_starttime": 1243234234,
-      "live_regionservers": 4,
-      "dead_regionservers": 3,
-      "regions_in_transition_count": 3,
-      "ganglia_url": "http://gangliahost/ganglia",
-   	  "nagios_url": "http://nagioshost/nagios",
-      "hdfs_installed": true,
-      "mapreduce_installed": true,
-      "hbase_installed": true
-   },
-   "hdfs": {
-       "service_type": "HDFS",
-       "namenode_addr": "nnhost.amazon.seattle.firstserver.com:50070",
-       "total_nodes": 10,
-       "live_nodes": 8,
-       "dead_nodes": 1,
-       "decommissioning_nodes": 1,
-       "start_time": 23235351,
-       "pending_upgrades": false,
-       "version": "1.0.0, r12345",
-       "safemode": true,
-       "memory_heap_used": 1001312,
-       "memory_heap_max": 800212312,
-       "dfs_configured_capacity": 1234456545644,
-       "dfs_percent_used": 24,
-       "dfs_percent_remaining": 76,
-       "dfs_blocks_total": 100113,
-       "dfs_blocks_underreplicated": 0,
-       "dfs_blocks_missing": 0,
-       "dfs_blocks_corrupt": 0,
-       "dfs_dirfiles_count": 1045
-   },
-   "mapreduce": {
-        "service_type": "MAPREDUCE",
-        "jobtracker_addr": "jthost.amazon.seattle.firstserver.com:50069",
-        "jobhistory_addr": "jthost:51111",
-        "trackers_total": 10,
-        "trackers_live": 7,
-        "trackers_graylisted": 1,
-        "trackers_blacklisted": 1,
-        "trackers_excluded": 1,
-        "start_time": 23235351,
-        "version": "1.0.0, r12345",
-        "memory_heap_used": 10042424,
-        "memory_heap_max": 8003242420,
-        "map_task_capacity": 32,
-        "reduce_task_capacity": 8,
-        "average_node_capacity": 5,
-        "job_total_submissions": 6,
-        "job_total_completions": 3,
-        "running_map_tasks": 5,
-        "running_reduce_tasks": 5,
-        "occupied_map_slots": 3,
-        "occupied_reduce_slots": 4,
-        "reserved_map_slots": 3,
-        "reserved_reduce_slots": 5,
-        "waiting_maps": 3,
-        "waiting_reduces": 3,
-        "queue_info": {
-            "type": "capacity_scheduler",
-            "queues": [
-                {
-                    "default": {
-                        "capacity_percentage": 50,
-                        "user_limit": 100,
-                        "map_capacity": 2,
-                        "map_used_capacity": 1,
-                        "map_running_tasks": 1,
-                        "reduce_capacity": 2,
-                        "reduce_used_capacity": 1,
-                        "reduce_running_tasks": 1,
-                        "waiting_jobs": 1,
-                        "initializing_jobs": 1,
-                        "users_with_submitted_jobs": 1
-                    }
-                }
-            ]
-        }
-    },
-    "hbase": {
-        "version": "0.92.0",
-        "hbasemaster_addr": "1.1.1.1.amazon.seattle.com:60011",
-        "live_regionservers": 5,
-        "dead_regionservers": 1,
-        "regions_in_transition_count": 1,
-        "cluster_id": "ddd-ddd-dddd",
-        "zookeeper_quorum": [
-            "zkhost1: 2181",
-            "zkhost2: 2181"
-        ],
-        "start_time": 1327179894,
-        "active_time": 1327179894,
-        "average_load": 3,
-        "memory_heap_used":1004242423,
-        "memory_heap_max": 8003242423
-    }
-}
\ No newline at end of file
diff --git a/branch-1.2/contrib/addons/test/ui/json/get_graph_info_all.json b/branch-1.2/contrib/addons/test/ui/json/get_graph_info_all.json
deleted file mode 100644
index 0375275..0000000
--- a/branch-1.2/contrib/addons/test/ui/json/get_graph_info_all.json
+++ /dev/null
@@ -1,24 +0,0 @@
-{
-    "Global": [
-        {
-            "description": "Key load metrics, aggregated across the entire grid", 
-            "title": "Load Report", 
-            "url": "http://10.10.10.114:80/ganglia2/graph.php?me=HDP&g=load_report"
-        }, 
-        {
-            "description": "Key memory metrics, aggregated across the entire grid", 
-            "title": "Memory Report", 
-            "url": "http://10.10.10.114:80/ganglia2/graph.php?me=HDP&g=mem_report"
-        }, 
-        {
-            "description": "Key CPU metrics, aggregated across the entire grid", 
-            "title": "CPU Report", 
-            "url": "http://10.10.10.114:80/ganglia2/graph.php?me=HDP&g=cpu_report"
-        }, 
-        {
-            "description": "Key network I/O metrics, aggregated across the entire grid", 
-            "title": "Network I/O Report", 
-            "url": "http://10.10.10.114:80/ganglia2/graph.php?me=HDP&g=network_report"
-        }
-    ] 
-}
\ No newline at end of file
diff --git a/branch-1.2/contrib/addons/test/ui/json/hbaseSummary.json b/branch-1.2/contrib/addons/test/ui/json/hbaseSummary.json
deleted file mode 100644
index e69de29..0000000
--- a/branch-1.2/contrib/addons/test/ui/json/hbaseSummary.json
+++ /dev/null
diff --git a/branch-1.2/contrib/addons/test/ui/json/hdfsSummary.json b/branch-1.2/contrib/addons/test/ui/json/hdfsSummary.json
deleted file mode 100644
index 87531c8..0000000
--- a/branch-1.2/contrib/addons/test/ui/json/hdfsSummary.json
+++ /dev/null
@@ -1,24 +0,0 @@
-{
-    "hdfs": {
-        "service_type": "HDFS",
-        "namenode_addr": "nnhost:50070",
-        "total_nodes": 10,
-        "live_nodes": 8,
-        "dead_nodes": 1,
-        "decommissioned_nodes": 1,
-        "start_time": 23235351,
-        "pending_upgrades": false,
-        "version": "1.0.0, r1234555555555555555555555555555555555555333333333",
-        "safemode": false,
-        "memory_heap_committed": 100131233,
-        "memory_heap_total": 800212312,
-        "dfs_configured_capacity": 1234456545644,
-        "dfs_percent_used": 24,
-        "dfs_percent_remaining": 76,
-        "dfs_blocks_total": 100113,
-        "dfs_blocks_underreplicated": 0,
-        "dfs_blocks_missing": 0,
-        "dfs_blocks_corrupt": 0,
-        "dfs_dirfiles_count": 1045
-    }
-}
\ No newline at end of file
diff --git a/branch-1.2/contrib/addons/test/ui/json/mrSummary.json b/branch-1.2/contrib/addons/test/ui/json/mrSummary.json
deleted file mode 100644
index b9f1525..0000000
--- a/branch-1.2/contrib/addons/test/ui/json/mrSummary.json
+++ /dev/null
@@ -1,26 +0,0 @@
-{
-    "mapreduce": {
-        "service_type": "MAPREDUCE",
-        "jobtracker_addr": "jthost:50030",
-        "trackers_total": 10,
-        "trackers_live": 7,
-        "trackers_graylisted": 1,
-        "trackers_blacklisted": 1,
-        "trackers_excluded": 1,
-        "start_time": 23235351,
-        "version": "1.0.0, r12345",
-        "memory_heap_committed": 1004242423,
-        "memory_heap_total": 8003242423,
-        "map_task_capacity": 32,
-        "reduce_task_capacity": 8,
-        "average_node_capacity": 5,
-        "job_total_submissions": 6,
-        "job_total_completions": 3,
-        "running_map_tasks": 5,
-        "running_reduce_tasks": 5,
-        "occupied map slots": 3,
-        "occupied reduce slots": 4,
-        "reserved map slots": 3,
-        "reserved reduce slots": 5
-    }
-}
\ No newline at end of file
diff --git a/branch-1.2/contrib/addons/utils/dataServices/ganglia/generateAll.sh b/branch-1.2/contrib/addons/utils/dataServices/ganglia/generateAll.sh
deleted file mode 100644
index eb1abf7..0000000
--- a/branch-1.2/contrib/addons/utils/dataServices/ganglia/generateAll.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-GRAPH_INFO_JSON_PATH="../../../src/dataServices/ganglia/graph_info";
-
-JSON_PRETTY_PRINT="python -mjson.tool"
-
-### WARNING: These PHP definitions have diverged from the actual JSON definitions
-###          (which I started to modify directly), so running the scripts below
-###          will result in data loss!
-
-### php ./generate_dashboard_json.php | ${JSON_PRETTY_PRINT} > ${GRAPH_INFO_JSON_PATH}/dashboard/all.json;
-### php ./generate_dashboard_hdp_json.php | ${JSON_PRETTY_PRINT} > ${GRAPH_INFO_JSON_PATH}/dashboard/custom/hdp.json;
-### php ./generate_mapreduce_json.php | ${JSON_PRETTY_PRINT} > ${GRAPH_INFO_JSON_PATH}/mapreduce/all.json;
-### php ./generate_mapreduce_hdp_json.php | ${JSON_PRETTY_PRINT} > ${GRAPH_INFO_JSON_PATH}/mapreduce/custom/hdp.json;
-### php ./generate_hdfs_json.php | ${JSON_PRETTY_PRINT} > ${GRAPH_INFO_JSON_PATH}/hdfs/all.json;
-### php ./generate_hdfs_hdp_json.php | ${JSON_PRETTY_PRINT} > ${GRAPH_INFO_JSON_PATH}/hdfs/custom/hdp.json;
diff --git a/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_dashboard_hdp_json.php b/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_dashboard_hdp_json.php
deleted file mode 100644
index 20514af..0000000
--- a/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_dashboard_hdp_json.php
+++ /dev/null
@@ -1,66 +0,0 @@
-<?php
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
-*/
-
-
-$data = array
-  (
-     'Global' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%HDFSSlavesClusterName%&g=hdp_mon_hdfs_io_report',
-           'title' => 'HDFS I/O',
-           'description' => 'Bytes written to and read from HDFS, aggregated across all the DataNodes',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=hdp_mon_jobtracker_map_slot_report',
-           'title' => 'Map Slot Utilization',
-           'description' => 'Utilized Map slots (occupied + reserved) vs. Total Map slots',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=hdp_mon_jobtracker_reduce_slot_report',
-           'title' => 'Reduce Slot Utilization',
-           'description' => 'Utilized Reduce slots (occupied + reserved) vs. Total Reduce slots',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=hdp_mon_jobtracker_mapreduce_report',
-           'title' => 'MapReduce Backlog',
-           'description' => 'Waiting Maps and Reduces, to give a feel for a combined MapReduce backlog',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%'
-        )
-     )
-  );
-
-echo json_encode($data);
-
-?>
-
diff --git a/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_dashboard_json.php b/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_dashboard_json.php
deleted file mode 100644
index f4cfb4e..0000000
--- a/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_dashboard_json.php
+++ /dev/null
@@ -1,61 +0,0 @@
-<?php
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
-*/
-
-
-$data = array
-  (
-     'Global' => array
-     (
-        array
-        (
-           'url' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%GridSlavesClusterName%&g=load_report',
-           'title' => 'Load Report',
-           'description' => 'Key load metrics, aggregated across the slaves in the grid',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%GridSlavesClusterName%'
-        ),
-        array
-        (
-           'url' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%GridSlavesClusterName%&g=mem_report',
-           'title' => 'Memory Report',
-           'description' => 'Key memory metrics, aggregated across the slaves in the grid',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%GridSlavesClusterName%'
-        ),
-        array
-        (
-           'url' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%GridSlavesClusterName%&g=cpu_report',
-           'title' => 'CPU Report',
-           'description' => 'Key CPU metrics, aggregated across the slaves in the grid',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%GridSlavesClusterName%'
-        ),
-        array
-        (
-           'url' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%GridSlavesClusterName%&g=network_report',
-           'title' => 'Network I/O Report',
-           'description' => 'Key network I/O metrics, aggregated across the slaves in the grid',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%GridSlavesClusterName%'
-        )
-     )
-  );
-
-echo json_encode($data);
-
-?>
diff --git a/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_hdfs_hdp_json.php b/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_hdfs_hdp_json.php
deleted file mode 100644
index d36e986..0000000
--- a/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_hdfs_hdp_json.php
+++ /dev/null
@@ -1,66 +0,0 @@
-<?php
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
-*/
-
-
-$data = array
-  (
-     'Global' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%HDFSSlavesClusterName%&g=hdp_mon_hdfs_io_report',
-           'title' => 'HDFS I/O',
-           'description' => 'Bytes written to and read from HDFS, aggregated across all the DataNodes',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&g=hdp_mon_hdfs_ops_report',
-           'title' => 'NameNode Operation Counts',
-           'description' => 'Counts of key operations on the NameNode, to give a feel for the high-level HDFS activity pattern',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&g=hdp_mon_jvm_gc_report',
-           'title' => 'NameNode: JVM Garbage Collection',
-           'description' => 'Key Garbage Collection stats for the NameNode\'s JVM',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&g=hdp_mon_rpc_latency_report',
-           'title' => 'NameNode: RPC Average Latencies',
-           'description' => 'Average latencies for processing and queue times on the NameNode, to give a feel for potential performance bottlenecks',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%'
-        )
-     )
-  );
-
-echo json_encode($data);
-
-?>
-
diff --git a/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_hdfs_json.php b/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_hdfs_json.php
deleted file mode 100644
index 09c83c1..0000000
--- a/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_hdfs_json.php
+++ /dev/null
@@ -1,1360 +0,0 @@
-<?php
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
-*/
-
-
-$data = array
-  (
-     'Global' => array
-     (
-        array
-        (
-           'url' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&g=load_report',
-           'title' => 'Load Report',
-           'description' => 'Key load metrics on the HDFS NameNode',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=load_report'
-        ),
-        array
-        (
-           'url' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&g=mem_report',
-           'title' => 'Memory Report',
-           'description' => 'Key memory metrics on the HDFS NameNode',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=mem_report'
-        ),
-        array
-        (
-           'url' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&g=cpu_report',
-           'title' => 'CPU Report',
-           'description' => 'Key CPU metrics on the HDFS NameNode',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=cpu_report'
-        ),
-        array
-        (
-           'url' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&g=network_report',
-           'title' => 'Network I/O Report',
-           'description' => 'Key network I/O metrics on the HDFS NameNode',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=network_report'
-        )
-     ),
-     'CPU' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=cpu_aidle',
-           'title' => 'CPU AIdle',
-           'description' => 'Percent of time since boot idle CPU',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=cpu_aidle'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=cpu_idle',
-           'title' => 'CPU Idle',
-           'description' => 'Percent CPU idle',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=cpu_idle'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=cpu_nice',
-           'title' => 'CPU Nice',
-           'description' => 'Percent CPU nice',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=cpu_nice'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=cpu_system',
-           'title' => 'CPU System',
-           'description' => 'Percent CPU system',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=cpu_system'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=cpu_user',
-           'title' => 'CPU User',
-           'description' => 'Percent CPU user',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=cpu_user'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=cpu_wio',
-           'title' => 'CPU Wait I/O',
-           'description' => 'Percent CPU spent waiting on I/O',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=cpu_wio'
-        )
-     ),
-     'Disk' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=disk_free',
-           'title' => 'Disk Free',
-           'description' => 'Total free disk space',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=disk_free'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=disk_total',
-           'title' => 'Disk Total',
-           'description' => 'Total available disk space',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=disk_total'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=part_max_used',
-           'title' => 'Max Disk Partition Used',
-           'description' => 'Maximum percent used for all disk partitions',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=part_max_used'
-        )
-     ),
-     'Load' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=load_fifteen',
-           'title' => 'Load Fifteen',
-           'description' => 'Fifteen minute load average',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=load_fifteen'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=load_five',
-           'title' => 'Load Five',
-           'description' => 'Five minute load average',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=load_five'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=load_one',
-           'title' => 'Load One',
-           'description' => 'One minute load average',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=load_one'
-        )
-     ),
-     'Memory' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=mem_buffers',
-           'title' => 'Memory Buffers',
-           'description' => 'Amount of buffered memory',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=mem_buffers'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=mem_cached',
-           'title' => 'Cached Memory',
-           'description' => 'Amount of cached memory',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=mem_cached'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=mem_free',
-           'title' => 'Free Memory',
-           'description' => 'Amount of available memory',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=mem_free'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=mem_shared',
-           'title' => 'Shared Memory',
-           'description' => 'Amount of shared memory',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=mem_shared'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=swap_free',
-           'title' => 'Free Swap Space',
-           'description' => 'Total amount of swap memory',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=swap_free'
-        )
-     ),
-     'Network' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=bytes_in',
-           'title' => 'Bytes Received',
-           'description' => 'Number of bytes in per second',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=bytes_in'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=bytes_out',
-           'title' => 'Bytes Sent',
-           'description' => 'Number of bytes out per second',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=bytes_out'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=pkts_in',
-           'title' => 'Packets Received',
-           'description' => 'Packets in per second',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=pkts_in'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=pkts_out',
-           'title' => 'Packets Sent',
-           'description' => 'Packets out per second',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=pkts_out'
-        )
-     ),
-     'Process' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=proc_run',
-           'title' => 'Total Running Processes',
-           'description' => 'Total number of running processes',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=proc_run'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=proc_total',
-           'title' => 'Total Processes',
-           'description' => 'Total number of processes',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=proc_total'
-        )
-     ),
-     'FSNamesystem' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.FSNamesystem.BlockCapacity',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.FSNamesystem.BlockCapacity'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.FSNamesystem.BlocksTotal',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.FSNamesystem.BlocksTotal'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.FSNamesystem.CapacityRemainingGB',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.FSNamesystem.CapacityRemainingGB'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.FSNamesystem.CapacityTotalGB',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.FSNamesystem.CapacityTotalGB'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.FSNamesystem.CapacityUsedGB',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.FSNamesystem.CapacityUsedGB'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.FSNamesystem.CorruptBlocks',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.FSNamesystem.CorruptBlocks'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.FSNamesystem.ExcessBlocks',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.FSNamesystem.ExcessBlocks'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.FSNamesystem.FilesTotal',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.FSNamesystem.FilesTotal'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.FSNamesystem.MissingBlocks',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.FSNamesystem.MissingBlocks'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.FSNamesystem.PendingDeletionBlocks',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.FSNamesystem.PendingDeletionBlocks'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.FSNamesystem.PendingReplicationBlocks',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.FSNamesystem.PendingReplicationBlocks'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.FSNamesystem.ScheduledReplicationBlocks',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.FSNamesystem.ScheduledReplicationBlocks'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.FSNamesystem.TotalLoad',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.FSNamesystem.TotalLoad'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.FSNamesystem.UnderReplicatedBlocks',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.FSNamesystem.UnderReplicatedBlocks'
-        )
-     ),
-     'NameNode' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.AddBlockOps',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.AddBlockOps'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.CreateFileOps',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.CreateFileOps'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.DeleteFileOps',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.DeleteFileOps'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.FileInfoOps',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.FileInfoOps'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.FilesAppended',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.FilesAppended'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.FilesCreated',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.FilesCreated'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.FilesDeleted',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.FilesDeleted'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.FilesInGetListingOps',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.FilesInGetListingOps'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.FilesRenamed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.FilesRenamed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.GetBlockLocations',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.GetBlockLocations'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.GetListingOps',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.GetListingOps'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.JournalTransactionsBatchedInSync',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.JournalTransactionsBatchedInSync'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.SafemodeTime',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.SafemodeTime'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.Syncs_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.Syncs_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.Syncs_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.Syncs_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.Transactions_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.Transactions_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.Transactions_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.Transactions_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.blockReport_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.blockReport_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.blockReport_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.blockReport_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=dfs.namenode.fsImageLoadTime',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=dfs.namenode.fsImageLoadTime'
-        )
-     ),
-     'JVM' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=jvm.metrics.gcCount',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=jvm.metrics.gcCount'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=jvm.metrics.gcTimeMillis',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=jvm.metrics.gcTimeMillis'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=jvm.metrics.logError',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=jvm.metrics.logError'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=jvm.metrics.logFatal',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=jvm.metrics.logFatal'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=jvm.metrics.logInfo',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=jvm.metrics.logInfo'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=jvm.metrics.logWarn',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=jvm.metrics.logWarn'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=jvm.metrics.memHeapCommittedM',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=jvm.metrics.memHeapCommittedM'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=jvm.metrics.memHeapUsedM',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=jvm.metrics.memHeapUsedM'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=jvm.metrics.memNonHeapCommittedM',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=jvm.metrics.memNonHeapCommittedM'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=jvm.metrics.memNonHeapUsedM',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=jvm.metrics.memNonHeapUsedM'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=jvm.metrics.threadsBlocked',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=jvm.metrics.threadsBlocked'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=jvm.metrics.threadsNew',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=jvm.metrics.threadsNew'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=jvm.metrics.threadsRunnable',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=jvm.metrics.threadsRunnable'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=jvm.metrics.threadsTerminated',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=jvm.metrics.threadsTerminated'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=jvm.metrics.threadsTimedWaiting',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=jvm.metrics.threadsTimedWaiting'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=jvm.metrics.threadsWaiting',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=jvm.metrics.threadsWaiting'
-        )
-     ),
-     'MetricsSystem' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.dropped_pub_all',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.dropped_pub_all'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.num_sinks',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.num_sinks'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.num_sources',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.num_sources'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.publish_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.publish_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.publish_imax_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.publish_imax_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.publish_imin_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.publish_imin_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.publish_max_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.publish_max_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.publish_min_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.publish_min_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.publish_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.publish_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.publish_stdev_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.publish_stdev_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.sink.ganglia.dropped',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.sink.ganglia.dropped'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.sink.ganglia.latency_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.sink.ganglia.latency_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.sink.ganglia.latency_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.sink.ganglia.latency_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.sink.ganglia.qsize',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.sink.ganglia.qsize'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.snapshot_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.snapshot_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.snapshot_imax_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.snapshot_imax_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.snapshot_imin_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.snapshot_imin_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.snapshot_max_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.snapshot_max_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.snapshot_min_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.snapshot_min_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.snapshot_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.snapshot_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.snapshot_stdev_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=metricssystem.MetricsSystem.snapshot_stdev_time'
-        )
-     ),
-     'RPC' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpc.rpc.NumOpenConnections',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpc.rpc.NumOpenConnections'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpc.rpc.ReceivedBytes',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpc.rpc.ReceivedBytes'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpc.rpc.RpcProcessingTime_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpc.rpc.RpcProcessingTime_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpc.rpc.RpcProcessingTime_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpc.rpc.RpcProcessingTime_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpc.rpc.RpcQueueTime_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpc.rpc.RpcQueueTime_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpc.rpc.RpcQueueTime_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpc.rpc.RpcQueueTime_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpc.rpc.SentBytes',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpc.rpc.SentBytes'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpc.rpc.callQueueLen',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpc.rpc.callQueueLen'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpc.rpc.rpcAuthenticationFailures',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpc.rpc.rpcAuthenticationFailures'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpc.rpc.rpcAuthenticationSuccesses',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpc.rpc.rpcAuthenticationSuccesses'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpc.rpc.rpcAuthorizationFailures',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpc.rpc.rpcAuthorizationFailures'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpc.rpc.rpcAuthorizationSuccesses',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpc.rpc.rpcAuthorizationSuccesses'
-        )
-     ),
-     'RPCDetailed' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.addBlock_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.addBlock_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.blockReceived_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.blockReceived_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.blockReport_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.blockReport_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.complete_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.complete_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.create_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.create_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.delete_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.delete_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.getFileInfo_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.getFileInfo_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.getListing_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.getListing_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.getProtocolVersion_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.getProtocolVersion_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.mkdirs_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.mkdirs_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.register_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.register_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.renewLease_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.renewLease_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.sendHeartbeat_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.sendHeartbeat_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.setPermission_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.setPermission_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.versionRequest_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.versionRequest_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.addBlock_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.addBlock_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.blockReceived_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.blockReceived_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.blockReport_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.blockReport_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.complete_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.complete_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.create_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.create_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.delete_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.delete_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.getFileInfo_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.getFileInfo_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.getListing_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.getListing_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.getProtocolVersion_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.getProtocolVersion_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.mkdirs_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.mkdirs_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.register_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.register_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.renewLease_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.renewLease_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.sendHeartbeat_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.sendHeartbeat_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.setPermission_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.setPermission_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.versionRequest_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=rpcdetailed.rpcdetailed.versionRequest_num_ops'
-        )
-     ),
-     'UGI' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=ugi.ugi.loginFailure_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=ugi.ugi.loginFailure_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=ugi.ugi.loginFailure_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=ugi.ugi.loginFailure_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=ugi.ugi.loginSuccess_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=ugi.ugi.loginSuccess_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&m=ugi.ugi.loginSuccess_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%&m=ugi.ugi.loginSuccess_num_ops'
-        )
-     )
-  );
-
-echo json_encode($data);
-
-?>
diff --git a/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_mapreduce_hdp_json.php b/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_mapreduce_hdp_json.php
deleted file mode 100644
index 41671ba..0000000
--- a/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_mapreduce_hdp_json.php
+++ /dev/null
@@ -1,74 +0,0 @@
-<?php
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
-*/
-
-
-$data = array
-  (
-     'Global' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=hdp_mon_jobtracker_map_slot_report',
-           'title' => 'Map Slot Utilization',
-           'description' => 'Utilized Map slots (occupied + reserved) vs. Total Map slots',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=hdp_mon_jobtracker_reduce_slot_report',
-           'title' => 'Reduce Slot Utilization',
-           'description' => 'Utilized Reduce slots (occupied + reserved) vs. Total Reduce slots',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=hdp_mon_jobtracker_mapreduce_report',
-           'title' => 'MapReduce Backlog',
-           'description' => 'Waiting Maps and Reduces, to give a feel for a combined MapReduce backlog',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=hdp_mon_jvm_gc_report',
-           'title' => 'JobTracker: JVM Garbage Collection',
-           'description' => 'Key Garbage Collection stats for the JobTracker\'s JVM',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=hdp_mon_rpc_latency_report',
-           'title' => 'JobTracker: RPC Average Latencies',
-           'description' => 'Average latencies for processing and queue times on the JobTracker, to give a feel for potential performance bottlenecks',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%'
-        )
-     )
-  );
-
-echo json_encode($data);
-
-?>
-
diff --git a/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_mapreduce_json.php b/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_mapreduce_json.php
deleted file mode 100644
index 1e48267..0000000
--- a/branch-1.2/contrib/addons/utils/dataServices/ganglia/generate_mapreduce_json.php
+++ /dev/null
@@ -1,1441 +0,0 @@
-<?php
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
-*/
-
-
-$data = array
-  (
-     'Global' => array
-     (
-        array
-        (
-           'url' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=load_report',
-           'title' => 'Load Report',
-           'description' => 'Key load metrics on the MapReduce JobTracker',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=load_report'
-        ),
-        array
-        (
-           'url' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=mem_report',
-           'title' => 'Memory Report',
-           'description' => 'Key memory metrics on the MapReduce JobTracker',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mem_report'
-        ),
-        array
-        (
-           'url' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=cpu_report',
-           'title' => 'CPU Report',
-           'description' => 'Key CPU metrics on the MapReduce JobTracker',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=cpu_report'
-        ),
-        array
-        (
-           'url' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=network_report',
-           'title' => 'Network I/O Report',
-           'description' => 'Key network I/O metrics on the MapReduce JobTracker',
-           'link' => 'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=network_report'
-        )
-     ),
-     'CPU' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=cpu_aidle',
-           'title' => 'CPU AIdle',
-           'description' => 'Percent of time since boot idle CPU',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=cpu_aidle'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=cpu_idle',
-           'title' => 'CPU Idle',
-           'description' => 'Percent CPU idle',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=cpu_idle'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=cpu_nice',
-           'title' => 'CPU Nice',
-           'description' => 'Percent CPU nice',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=cpu_nice'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=cpu_system',
-           'title' => 'CPU System',
-           'description' => 'Percent CPU system',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=cpu_system'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=cpu_user',
-           'title' => 'CPU User',
-           'description' => 'Percent CPU user',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=cpu_user'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=cpu_wio',
-           'title' => 'CPU Wait I/O',
-           'description' => 'Percent CPU spent waiting on I/O',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=cpu_wio'
-        )
-     ),
-     'Disk' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=disk_free',
-           'title' => 'Disk Free',
-           'description' => 'Total free disk space',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=disk_free'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=disk_total',
-           'title' => 'Disk Total',
-           'description' => 'Total available disk space',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=disk_total'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=part_max_used',
-           'title' => 'Max Disk Partition Used',
-           'description' => 'Maximum percent used for all disk partitions',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=part_max_used'
-        )
-     ),
-     'Load' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=load_fifteen',
-           'title' => 'Load Fifteen',
-           'description' => 'Fifteen minute load average',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=load_fifteen'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=load_five',
-           'title' => 'Load Five',
-           'description' => 'Five minute load average',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=load_five'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=load_one',
-           'title' => 'Load One',
-           'description' => 'One minute load average',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=load_one'
-        )
-     ),
-     'Memory' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mem_buffers',
-           'title' => 'Memory Buffers',
-           'description' => 'Amount of buffered memory',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mem_buffers'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mem_cached',
-           'title' => 'Cached Memory',
-           'description' => 'Amount of cached memory',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mem_cached'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mem_free',
-           'title' => 'Free Memory',
-           'description' => 'Amount of available memory',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mem_free'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mem_shared',
-           'title' => 'Shared Memory',
-           'description' => 'Amount of shared memory',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mem_shared'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=swap_free',
-           'title' => 'Free Swap Space',
-           'description' => 'Total amount of swap memory',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=swap_free'
-        )
-     ),
-     'Network' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=bytes_in',
-           'title' => 'Bytes Received',
-           'description' => 'Number of bytes in per second',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=bytes_in'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=bytes_out',
-           'title' => 'Bytes Sent',
-           'description' => 'Number of bytes out per second',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=bytes_out'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=pkts_in',
-           'title' => 'Packets Received',
-           'description' => 'Packets in per second',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=pkts_in'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=pkts_out',
-           'title' => 'Packets Sent',
-           'description' => 'Packets out per second',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=pkts_out'
-        )
-     ),
-     'Process' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=proc_run',
-           'title' => 'Total Running Processes',
-           'description' => 'Total number of running processes',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=proc_run'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=proc_total',
-           'title' => 'Total Processes',
-           'description' => 'Total number of processes',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=proc_total'
-        )
-     ),
-     'MapReduce Queue' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.jobs_completed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.jobs_completed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.jobs_failed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.jobs_failed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.jobs_killed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.jobs_killed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.jobs_preparing',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.jobs_preparing'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.jobs_running',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.jobs_running'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.jobs_submitted',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.jobs_submitted'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.maps_completed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.maps_completed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.maps_failed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.maps_failed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.maps_killed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.maps_killed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.maps_launched',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.maps_launched'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.reduces_completed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.reduces_completed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.reduces_failed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.reduces_failed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.reduces_killed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.reduces_killed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.reduces_launched',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.reduces_launched'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.reserved_map_slots',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.reserved_map_slots'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.reserved_reduce_slots',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.reserved_reduce_slots'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.waiting_maps',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.waiting_maps'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.Queue.waiting_reduces',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.Queue.waiting_reduces'
-        )
-     ),
-     'JobTracker' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.blacklisted_maps',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.blacklisted_maps'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.blacklisted_reduces',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.blacklisted_reduces'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.heartbeats',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.heartbeats'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.jobs_completed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.jobs_completed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.jobs_failed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.jobs_failed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.jobs_killed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.jobs_killed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.jobs_preparing',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.jobs_preparing'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.jobs_running',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.jobs_running'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.jobs_submitted',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.jobs_submitted'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.map_slots',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.map_slots'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.maps_completed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.maps_completed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.maps_failed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.maps_failed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.maps_killed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.maps_killed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.maps_launched',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.maps_launched'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.occupied_map_slots',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.occupied_map_slots'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.occupied_reduce_slots',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.occupied_reduce_slots'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.reduce_slots',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.reduce_slots'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.reduces_completed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.reduces_completed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.reduces_failed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.reduces_failed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.reduces_killed',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.reduces_killed'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.reduces_launched',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.reduces_launched'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.reserved_map_slots',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.reserved_map_slots'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.reserved_reduce_slots',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.reserved_reduce_slots'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.running_maps',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.running_maps'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.running_reduces',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.running_reduces'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.trackers',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.trackers'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.trackers_blacklisted',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.trackers_blacklisted'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.trackers_decommissioned',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.trackers_decommissioned'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.trackers_graylisted',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.trackers_graylisted'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.waiting_maps',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.waiting_maps'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=mapred.jobtracker.waiting_reduces',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=mapred.jobtracker.waiting_reduces'
-        )
-     ),
-     'JVM' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=jvm.metrics.gcCount',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=jvm.metrics.gcCount'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=jvm.metrics.gcTimeMillis',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=jvm.metrics.gcTimeMillis'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=jvm.metrics.logError',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=jvm.metrics.logError'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=jvm.metrics.logFatal',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=jvm.metrics.logFatal'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=jvm.metrics.logInfo',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=jvm.metrics.logInfo'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=jvm.metrics.logWarn',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=jvm.metrics.logWarn'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=jvm.metrics.memHeapCommittedM',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=jvm.metrics.memHeapCommittedM'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=jvm.metrics.memHeapUsedM',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=jvm.metrics.memHeapUsedM'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=jvm.metrics.memNonHeapCommittedM',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=jvm.metrics.memNonHeapCommittedM'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=jvm.metrics.memNonHeapUsedM',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=jvm.metrics.memNonHeapUsedM'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=jvm.metrics.threadsBlocked',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=jvm.metrics.threadsBlocked'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=jvm.metrics.threadsNew',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=jvm.metrics.threadsNew'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=jvm.metrics.threadsRunnable',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=jvm.metrics.threadsRunnable'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=jvm.metrics.threadsTerminated',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=jvm.metrics.threadsTerminated'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=jvm.metrics.threadsTimedWaiting',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=jvm.metrics.threadsTimedWaiting'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=jvm.metrics.threadsWaiting',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=jvm.metrics.threadsWaiting'
-        )
-     ),
-     'MetricsSystem' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.dropped_pub_all',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.dropped_pub_all'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.num_sinks',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.num_sinks'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.num_sources',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.num_sources'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.publish_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.publish_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.publish_imax_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.publish_imax_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.publish_imin_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.publish_imin_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.publish_max_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.publish_max_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.publish_min_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.publish_min_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.publish_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.publish_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.publish_stdev_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.publish_stdev_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.sink.ganglia.dropped',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.sink.ganglia.dropped'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.sink.ganglia.latency_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.sink.ganglia.latency_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.sink.ganglia.latency_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.sink.ganglia.latency_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.sink.ganglia.qsize',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.sink.ganglia.qsize'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.snapshot_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.snapshot_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.snapshot_imax_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.snapshot_imax_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.snapshot_imin_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.snapshot_imin_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.snapshot_max_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.snapshot_max_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.snapshot_min_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.snapshot_min_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.snapshot_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.snapshot_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.snapshot_stdev_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=metricssystem.MetricsSystem.snapshot_stdev_time'
-        )
-     ),
-     'RPC' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpc.rpc.NumOpenConnections',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpc.rpc.NumOpenConnections'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpc.rpc.ReceivedBytes',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpc.rpc.ReceivedBytes'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpc.rpc.RpcProcessingTime_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpc.rpc.RpcProcessingTime_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpc.rpc.RpcProcessingTime_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpc.rpc.RpcProcessingTime_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpc.rpc.RpcQueueTime_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpc.rpc.RpcQueueTime_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpc.rpc.RpcQueueTime_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpc.rpc.RpcQueueTime_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpc.rpc.SentBytes',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpc.rpc.SentBytes'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpc.rpc.callQueueLen',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpc.rpc.callQueueLen'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpc.rpc.rpcAuthenticationFailures',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpc.rpc.rpcAuthenticationFailures'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpc.rpc.rpcAuthenticationSuccesses',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpc.rpc.rpcAuthenticationSuccesses'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpc.rpc.rpcAuthorizationFailures',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpc.rpc.rpcAuthorizationFailures'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpc.rpc.rpcAuthorizationSuccesses',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpc.rpc.rpcAuthorizationSuccesses'
-        )
-     ),
-     'RPCDetailed' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getBuildVersion_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getBuildVersion_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getBuildVersion_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getBuildVersion_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getJobCounters_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getJobCounters_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getJobCounters_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getJobCounters_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getJobProfile_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getJobProfile_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getJobProfile_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getJobProfile_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getJobStatus_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getJobStatus_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getJobStatus_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getJobStatus_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getProtocolVersion_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getProtocolVersion_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getNewJobId_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getNewJobId_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getNewJobId_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getNewJobId_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getQueueAdmins_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getQueueAdmins_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getQueueAdmins_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getQueueAdmins_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getStagingAreaDir_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getStagingAreaDir_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getStagingAreaDir_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getStagingAreaDir_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getSystemDir_avg_tim',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getSystemDir_avg_tim'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getSystemDir_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getSystemDir_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getTaskCompletionEvents_avg_tim',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getTaskCompletionEvents_avg_tim'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getTaskCompletionEvents_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getTaskCompletionEvents_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.heartbeat_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.heartbeat_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.heartbeat_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.heartbeat_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.submitJob_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.submitJob_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.submitJob_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.submitJob_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getProtocolVersion_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=rpcdetailed.rpcdetailed.getProtocolVersion_num_ops'
-        )
-     ),
-     'UGI' => array
-     (
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=ugi.ugi.loginFailure_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=ugi.ugi.loginFailure_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=ugi.ugi.loginFailure_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=ugi.ugi.loginFailure_num_ops'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=ugi.ugi.loginSuccess_avg_time',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=ugi.ugi.loginSuccess_avg_time'
-        ),
-        array
-        (
-           'url' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&m=ugi.ugi.loginSuccess_num_ops',
-           'title' => '',
-           'description' => '',
-           'link' =>
-           'http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%&m=ugi.ugi.loginSuccess_num_ops'
-        )
-     )
-  );
-
-echo json_encode($data);
-
-?>
diff --git a/branch-1.2/contrib/ambari-log4j/README.md b/branch-1.2/contrib/ambari-log4j/README.md
deleted file mode 100644
index 9dec768..0000000
--- a/branch-1.2/contrib/ambari-log4j/README.md
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-ambari-log4j
-============
-
-
-log4j add-ons for Apache Ambari.
-
-1. MapReduce JobHistory
-
-JobHistoryAppender allows for all job statistics to be saved into a
-DataBase via JDBC.
-
-Create a postgres DB of name ambari and run the table creation commands
-from src/main/resources/ambari.schema.
-
-Build by running mvn clean package.  Copy the resulting ambari-log4j jar
-from the target directory into the hadoop lib directory for the
-JobTracker.  Also copy postgresql-9.1-902.jdbc4.jar into the hadoop lib 
-directory for the JobTracker.
-
-Add the following to your log4j.properties for the JobTracker, setting
-<username> and <password> as appropriate for your postgres DB.  Then,
-when you start the JobTracker, use the following command:
-HADOOP_OPTS="$HADOOP_OPTS -Dambari.jobhistory.logger=DEBUG,JHA" bin/hadoop-daemon.sh start jobtracker
-
-----
-#
-# JobHistory logger 
-#
-
-ambari.jobhistory.database=jdbc:postgresql://localhost:5432/ambari
-ambari.jobhistory.driver=org.postgresql.Driver
-ambari.jobhistory.user=<username>
-ambari.jobhistory.password=<password>
-ambari.jobhistory.logger=${hadoop.root.logger}
-
-log4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender
-log4j.appender.JHA.database=${ambari.jobhistory.database}
-log4j.appender.JHA.driver=${ambari.jobhistory.driver}
-log4j.appender.JHA.user=${ambari.jobhistory.user}
-log4j.appender.JHA.password=${ambari.jobhistory.password}
-
-log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=${ambari.jobhistory.logger}
-log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=true
-
-----
-
diff --git a/branch-1.2/contrib/ambari-log4j/pom.xml b/branch-1.2/contrib/ambari-log4j/pom.xml
deleted file mode 100644
index 827e33c..0000000
--- a/branch-1.2/contrib/ambari-log4j/pom.xml
+++ /dev/null
@@ -1,189 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.apache.ambari</groupId>
-  <artifactId>ambari-log4j</artifactId>
-  <packaging>jar</packaging>
-  <version>1.2.2-SNAPSHOT</version>
-  <name>ambari-log4j</name>
-  <url>http://maven.apache.org</url>
-  <repositories>
-    <repository>
-      <id>hdp.internal</id>
-      <url>http://s3.amazonaws.com/repo.hortonworks.com/release</url>
-    </repository>
-  </repositories>
-  <dependencies>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <version>3.8.1</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-tools</artifactId>
-      <version>1.1.1.1</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-core</artifactId>
-      <version>1.1.1.1</version>
-    </dependency>
-    <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-      <version>1.2.15</version>
-      <exclusions>
-        <exclusion>
-          <groupId>com.sun.jdmk</groupId>
-          <artifactId>jmxtools</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jmx</groupId>
-          <artifactId>jmxri</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>javax.mail</groupId>
-          <artifactId>mail</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>javax.jms</groupId>
-          <artifactId>jmx</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>javax.jms</groupId>
-          <artifactId>jms</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>commons-logging</groupId>
-      <artifactId>commons-logging</artifactId>
-      <version>1.1.1</version>
-    </dependency>
-    <dependency>
-      <groupId>commons-logging</groupId>
-      <artifactId>commons-logging-api</artifactId>
-      <version>1.1</version>
-    </dependency>
-    <dependency>
-      <groupId>commons-lang</groupId>
-      <artifactId>commons-lang</artifactId>
-      <version>2.4</version>
-    </dependency>
-    <dependency>
-    	<groupId>postgresql</groupId>
-    	<artifactId>postgresql</artifactId>
-    	<version>9.1-901-1.jdbc4</version>
-    </dependency>
-    <dependency>
-      <groupId>org.codehaus.jackson</groupId>
-      <artifactId>jackson-mapper-asl</artifactId>
-      <version>1.9.2</version>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey</groupId>
-      <artifactId>jersey-json</artifactId>
-      <version>1.13</version>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey</groupId>
-      <artifactId>jersey-core</artifactId>
-      <version>1.13</version>
-    </dependency>
-    <dependency>
-      <groupId>com.sun.jersey</groupId>
-      <artifactId>jersey-servlet</artifactId>
-      <version>1.13</version>
-    </dependency>
-    <dependency>
-      <groupId>javax.xml.bind</groupId>
-      <artifactId>jaxb-api</artifactId>
-      <version>2.2.7</version>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.mortbay.jetty</groupId>
-        <artifactId>jetty-maven-plugin</artifactId>
-        <version>8.1.5.v20120716</version>
-        <configuration>
-          <scanIntervalSeconds>5</scanIntervalSeconds>
-          <stopPort>9999</stopPort>
-          <connectors>
-            <connector implementation="org.eclipse.jetty.server.nio.SelectChannelConnector">
-              <port>1984</port>
-              <maxIdleTime>60000</maxIdleTime>
-            </connector>
-          </connectors>
-        </configuration>
-      </plugin>
-
-        <plugin>
-            <groupId>org.codehaus.mojo</groupId>
-            <artifactId>rpm-maven-plugin</artifactId>
-            <version>2.0.1</version>
-            <executions>
-                <execution>
-                    <phase>none</phase>
-                    <goals>
-                        <goal>rpm</goal>
-                    </goals>
-                </execution>
-            </executions>
-            <configuration>
-                <copyright>2012, Apache Software Foundation</copyright>
-                <group>Development</group>
-                <description>Maven Recipe: RPM Package.</description>
-                <mappings>
-                    <mapping>
-                        <directory>/usr/lib/hadoop/lib</directory>
-                        <filemode>644</filemode>
-                        <username>root</username>
-                        <groupname>root</groupname>
-                        <sources>
-                            <source>
-                                <location>target/ambari-log4j-${project.version}.jar</location>
-                            </source>
-                        </sources>
-
-                    </mapping>
-                    <mapping>
-                        <directory>/usr/lib/hadoop/lib</directory>
-                        <filemode>644</filemode>
-                        <username>root</username>
-                        <groupname>root</groupname>
-                        <dependency>
-                            <includes>
-                                <include>postgresql:postgresql</include>
-                            </includes>
-                        </dependency>
-                    </mapping>
-
-                </mappings>
-            </configuration>
-        </plugin>
-
-    </plugins>
-  </build>
-
-</project>
diff --git a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/eventdb/model/WorkflowContext.java b/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/eventdb/model/WorkflowContext.java
deleted file mode 100644
index c2b0468..0000000
--- a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/eventdb/model/WorkflowContext.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.eventdb.model;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-
-
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-public class WorkflowContext {
-  
-  private String workflowId;
-  private String workflowName;
-  private String workflowEntityName;
-  
-  private WorkflowDag workflowDag;
-  
-  private WorkflowContext parentWorkflowContext;
-  
-  public WorkflowContext() {
-    /* Required by JAXB. */
-  }
-  
-  /* Getters. */
-  public String getWorkflowId() {
-    return this.workflowId;
-  }
-  
-  public String getWorkflowName() {
-    return this.workflowName;
-  }
-  
-  public String getWorkflowEntityName() {
-    return this.workflowEntityName;
-  }
-  
-  public WorkflowDag getWorkflowDag() {
-    return this.workflowDag;
-  }
-  
-  public WorkflowContext getParentWorkflowContext() {
-    return this.parentWorkflowContext;
-  }
-  
-  /* Setters. */
-  public void setWorkflowId(String wfId) {
-    this.workflowId = wfId;
-  }
-  
-  public void setWorkflowName(String wfName) {
-    this.workflowName = wfName;
-  }
-  
-  public void setWorkflowEntityName(String wfEntityName) {
-    this.workflowEntityName = wfEntityName;
-  }
-  
-  public void setWorkflowDag(WorkflowDag wfDag) {
-    this.workflowDag = wfDag;
-  }
-  
-  public void setParentWorkflowContext(WorkflowContext pWfContext) {
-    this.parentWorkflowContext = pWfContext;
-  }
-}
diff --git a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/eventdb/model/WorkflowDag.java b/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/eventdb/model/WorkflowDag.java
deleted file mode 100644
index 07056bf..0000000
--- a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/eventdb/model/WorkflowDag.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.eventdb.model;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-public class WorkflowDag {
-  
-  public static class WorkflowDagEntry {
-    
-    private String source;
-    private List<String> targets = new ArrayList<String>();
-    
-    public WorkflowDagEntry() {
-      /* Required by JAXB. */
-    }
-    
-    /* Getters. */
-    public String getSource() {
-      return this.source;
-    }
-    
-    public List<String> getTargets() {
-      return this.targets;
-    }
-    
-    /* Setters. */
-    public void setSource(String source) {
-      this.source = source;
-    }
-    
-    public void setTargets(List<String> targets) {
-      this.targets = targets;
-    }
-    
-    public void addTarget(String target) {
-      this.targets.add(target);
-    }
-  }
-  
-  List<WorkflowDagEntry> entries = new ArrayList<WorkflowDagEntry>();
-  
-  public WorkflowDag() {
-    /* Required by JAXB. */
-  }
-  
-  /* Getters. */
-  public List<WorkflowDagEntry> getEntries() {
-    return this.entries;
-  }
-  
-  /* Setters. */
-  public void setEntries(List<WorkflowDagEntry> entries) {
-    this.entries = entries;
-  }
-  
-  public void addEntry(WorkflowDag.WorkflowDagEntry entry) {
-    this.entries.add(entry);
-  }
-  
-  public int size() {
-    Set<String> nodes = new HashSet<String>();
-    for (WorkflowDagEntry entry : entries) {
-      nodes.add(entry.getSource());
-      nodes.addAll(entry.getTargets());
-    }
-    return nodes.size();
-  }
-}
diff --git a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/common/LogParser.java b/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/common/LogParser.java
deleted file mode 100644
index 1131fd9..0000000
--- a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/common/LogParser.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.log4j.common;
-
-import java.io.IOException;
-
-import org.apache.log4j.spi.LoggingEvent;
-
-public interface LogParser {
-  
-  void addEventToParse(LoggingEvent event);
-  
-  Object getParseResult() throws IOException;
-  
-}
diff --git a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/common/LogStore.java b/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/common/LogStore.java
deleted file mode 100644
index 886f6fe..0000000
--- a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/common/LogStore.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.log4j.common;
-
-import java.io.IOException;
-
-import org.apache.log4j.spi.LoggingEvent;
-
-public interface LogStore {
-  
-  void persist(LoggingEvent originalEvent, Object parsedEvent) 
-      throws IOException;
-  
-  void close() throws IOException;
-}
diff --git a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/common/LogStoreUpdateProvider.java b/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/common/LogStoreUpdateProvider.java
deleted file mode 100644
index 6cb486c..0000000
--- a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/common/LogStoreUpdateProvider.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.log4j.common;
-
-import java.io.IOException;
-import java.sql.Connection;
-
-import org.apache.log4j.spi.LoggingEvent;
-
-public interface LogStoreUpdateProvider {
-  
-  void init(Connection connection) throws IOException;
-  
-  void update(LoggingEvent originalEvent, Object parsedEvent) 
-      throws IOException;
-  
-}
diff --git a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/common/LoggingThreadRunnable.java b/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/common/LoggingThreadRunnable.java
deleted file mode 100644
index abc9b61..0000000
--- a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/common/LoggingThreadRunnable.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.log4j.common;
-
-import java.io.IOException;
-import java.util.Queue;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.log4j.spi.LoggingEvent;
-
-public class LoggingThreadRunnable implements Runnable {
-  private static final Log LOG = LogFactory.getLog(LoggingThreadRunnable.class);
-  private static long WAIT_EMPTY_QUEUE = 60000;
-  private final Queue<LoggingEvent> events;
-  private final LogParser parser;
-  private final LogStore store;
-  private final AtomicBoolean done = new AtomicBoolean(false);
-  
-  public LoggingThreadRunnable(
-      Queue<LoggingEvent> events, 
-      LogParser parser, 
-      LogStore provider) {
-    this.events = events;
-    this.store = provider;
-    this.parser = parser;
-  }
-  
-  @Override
-  public void run() {
-    while (!done.get()) {
-      LoggingEvent event = null;
-      while ((event = events.poll()) != null) {
-        Object result = null;
-        try {
-          parser.addEventToParse(event);
-          while ((result = parser.getParseResult()) != null) {
-            try {
-              store.persist(event, result);
-            } catch (IOException e) {
-              LOG.warn("Failed to persist " + result);
-            }
-          }
-        } catch (IOException ioe) {
-          LOG.warn("Failed to parse log-event: " + event);
-        }
-      }
-      try {
-        Thread.sleep(WAIT_EMPTY_QUEUE);
-      } catch(InterruptedException ie) {
-        //ignore and continue
-      }
-    	  
-    }
-    try {
-      store.close();
-    } catch (IOException ioe) {
-      LOG.info("Failed to close logStore", ioe);
-    }
-  }
-  
-  public void close() throws IOException {
-    done.set(true);
-  }
-}
diff --git a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/common/store/DatabaseStore.java b/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/common/store/DatabaseStore.java
deleted file mode 100644
index 808b43f..0000000
--- a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/common/store/DatabaseStore.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.log4j.common.store;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-
-import org.apache.ambari.log4j.common.LogStore;
-import org.apache.ambari.log4j.common.LogStoreUpdateProvider;
-import org.apache.log4j.spi.LoggingEvent;
-
-public class DatabaseStore implements LogStore {
-
-  final private String database;
-  final private String user;
-  final private String password;
-  
-  final private Connection connection;
-  final private LogStoreUpdateProvider updateProvider;
-  
-  public DatabaseStore(String driver, 
-      String database, String user, String password, 
-      LogStoreUpdateProvider updateProvider) 
-      throws IOException {
-    try {
-      Class.forName(driver);
-    } catch (ClassNotFoundException e) {
-      System.err.println("Can't load driver - " + driver);
-      throw new RuntimeException("Can't load driver - " + driver);
-    }
-    this.database = database;
-    this.user = (user == null) ? "" : user;
-    this.password = (password == null) ? "" : password;
-    try {
-      this.connection = 
-          DriverManager.getConnection(this.database, this.user, this.password);
-    } catch (SQLException sqle) {
-      throw new IOException("Can't connect to database " + this.database, sqle);
-    }
-    this.updateProvider = updateProvider;
-    
-    this.updateProvider.init(this.connection);
-  }
-  
-  @Override
-  public void persist(LoggingEvent originalEvent, Object parsedEvent)
-      throws IOException {
-    updateProvider.update(originalEvent, parsedEvent);
-  }
-
-  @Override
-  public void close() throws IOException {
-    try {
-      connection.close();
-    } catch (SQLException sqle) {
-      throw new IOException(
-          "Failed to close connectionto database " + this.database, sqle);
-    }
-  }
-}
diff --git a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/hadoop/mapreduce/jobhistory/JobHistoryAppender.java b/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/hadoop/mapreduce/jobhistory/JobHistoryAppender.java
deleted file mode 100644
index 7f9769e..0000000
--- a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/hadoop/mapreduce/jobhistory/JobHistoryAppender.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.log4j.hadoop.mapreduce.jobhistory;
-
-import java.io.IOException;
-import java.util.Queue;
-import java.util.concurrent.LinkedBlockingQueue;
-
-import org.apache.ambari.log4j.common.LogParser;
-import org.apache.ambari.log4j.common.LogStore;
-import org.apache.ambari.log4j.common.LoggingThreadRunnable;
-import org.apache.ambari.log4j.common.store.DatabaseStore;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.tools.rumen.HistoryEvent;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.Appender;
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.spi.LoggingEvent;
-
-public class JobHistoryAppender extends AppenderSkeleton implements Appender {
-
-  private static final Log LOG = LogFactory.getLog(JobHistoryAppender.class);
-  
-  private final Queue<LoggingEvent> events;
-  private LoggingThreadRunnable logThreadRunnable;
-  private Thread logThread;
-
-  private final LogParser logParser;
-
-  private final LogStore nullStore =
-      new LogStore() {
-        @Override
-        public void persist(LoggingEvent originalEvent, Object parsedEvent) 
-            throws IOException {
-          LOG.info(((HistoryEvent)parsedEvent).toString());
-        }
-
-        @Override
-        public void close() throws IOException {}
-  };
-
-  private String driver;
-  private String database;
-  private String user;
-  private String password;
-  
-  private LogStore logStore;
-  
-  public JobHistoryAppender() {
-    events = new LinkedBlockingQueue<LoggingEvent>();
-    logParser = new MapReduceJobHistoryParser();
-    logStore = nullStore;
-  }
-  
-  /* Getters & Setters for log4j */
-  
-  public String getDatabase() {
-    return database;
-  }
-
-  public void setDatabase(String database) {
-    this.database = database;
-  }
-  
-  public String getDriver() {
-    return driver;
-  }
-
-  public void setDriver(String driver) {
-    this.driver = driver;
-  }
-
-  public String getUser() {
-    return user;
-  }
-
-  public void setUser(String user) {
-    this.user = user;
-  }
-
-  public String getPassword() {
-    return password;
-  }
-
-  public void setPassword(String password) {
-    this.password = password;
-  }
-
-  /* --------------------------- */
-
-  @Override
-  public void activateOptions() {
-    synchronized (this) {
-      //if (true) { 
-      if (database.equals("none")) {
-        logStore = nullStore;
-        LOG.info("database set to 'none'");
-      } else {
-        try {
-          logStore = 
-              new DatabaseStore(driver, database, user, password, 
-                  new MapReduceJobHistoryUpdater());
-        } catch (IOException ioe) {
-          LOG.debug("Failed to connect to db " + database, ioe);
-          System.err.println("Failed to connect to db " + database + 
-              " as user " + user + " password " + password + 
-              " and driver " + driver + " with " + 
-              StringUtils.stringifyException(ioe));
-          throw new RuntimeException(
-              "Failed to create database store for " + database, ioe);
-        } catch (Exception e) {
-          LOG.debug("Failed to connect to db " + database, e);
-          System.err.println("Failed to connect to db " + database + 
-              " as user " + user + " password " + password + 
-              " and driver " + driver + " with " + 
-              StringUtils.stringifyException(e));
-          throw new RuntimeException(
-              "Failed to create database store for " + database, e);
-        }
-      }
-      logThreadRunnable = 
-          new LoggingThreadRunnable(events, logParser, logStore);
-      logThread = new Thread(logThreadRunnable);
-      logThread.setDaemon(true);
-      logThread.start();
-
-      super.activateOptions();
-    }
-  }
-
-  @Override
-  public void close() {
-    try {
-      logThreadRunnable.close();
-    } catch (IOException ioe) {
-      LOG.info("Failed to close logThreadRunnable", ioe);
-    }
-    try {
-      logThread.join(1000);
-    } catch (InterruptedException ie) {
-      LOG.info("logThread interrupted", ie);
-    }
-  }
-
-  @Override
-  public boolean requiresLayout() {
-    return false;
-  }
-
-  @Override
-  protected void append(LoggingEvent event) {
-    events.add(event);
-  }
-}
diff --git a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/hadoop/mapreduce/jobhistory/MapReduceJobHistoryParser.java b/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/hadoop/mapreduce/jobhistory/MapReduceJobHistoryParser.java
deleted file mode 100644
index 7933a3b..0000000
--- a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/hadoop/mapreduce/jobhistory/MapReduceJobHistoryParser.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.log4j.hadoop.mapreduce.jobhistory;
-
-import java.io.IOException;
-import java.util.Queue;
-import java.util.concurrent.LinkedBlockingQueue;
-
-import org.apache.ambari.log4j.common.LogParser;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.tools.rumen.Hadoop20JHParser;
-import org.apache.hadoop.tools.rumen.JobHistoryParser;
-import org.apache.hadoop.util.LineReader;
-import org.apache.log4j.spi.LoggingEvent;
-
-public class MapReduceJobHistoryParser implements LogParser {
-  private JobHistoryParser parser;
-  private LogLineReader reader = new LogLineReader("Meta VERSION=\"1\" .");
-  
-  public MapReduceJobHistoryParser() {
-    try {
-      parser = new Hadoop20JHParser(reader);
-    } catch (IOException ioe) {
-      // SHOULD NEVER HAPPEN!
-      throw new RuntimeException(ioe);
-    }
-  }
-
-  @Override
-  public void addEventToParse(LoggingEvent event) {
-    reader.addLine(event.getMessage().toString());
-  }
-  
-  @Override
-  public Object getParseResult() throws IOException {
-    return parser.nextEvent();
-  }
-
-  static class LogLineReader extends LineReader {
-
-    private Queue<String> lines = new LinkedBlockingQueue<String>();
-    
-    public LogLineReader(String line) {
-      super(null);
-      addLine(line);
-    }
-
-    private void addLine(String line) {
-      lines.add(line);
-    }
-    
-    public int readLine(Text str) throws IOException {
-      String line = lines.poll();
-      if (line != null) {
-        str.set(line);
-        return line.length();
-      }
-      
-      return 0;
-    }
-    
-    public void close() throws IOException {
-    }
-  }
-}
diff --git a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/hadoop/mapreduce/jobhistory/MapReduceJobHistoryUpdater.java b/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/hadoop/mapreduce/jobhistory/MapReduceJobHistoryUpdater.java
deleted file mode 100644
index aec5415..0000000
--- a/branch-1.2/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/hadoop/mapreduce/jobhistory/MapReduceJobHistoryUpdater.java
+++ /dev/null
@@ -1,1084 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.log4j.hadoop.mapreduce.jobhistory;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.ambari.eventdb.model.WorkflowContext;
-import org.apache.ambari.eventdb.model.WorkflowDag;
-import org.apache.ambari.eventdb.model.WorkflowDag.WorkflowDagEntry;
-import org.apache.ambari.log4j.common.LogStoreUpdateProvider;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapreduce.Counter;
-import org.apache.hadoop.mapreduce.CounterGroup;
-import org.apache.hadoop.mapreduce.Counters;
-import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.tools.rumen.HistoryEvent;
-import org.apache.hadoop.tools.rumen.JhCounter;
-import org.apache.hadoop.tools.rumen.JhCounterGroup;
-import org.apache.hadoop.tools.rumen.JhCounters;
-import org.apache.hadoop.tools.rumen.JobFinishedEvent;
-import org.apache.hadoop.tools.rumen.JobInfoChangeEvent;
-import org.apache.hadoop.tools.rumen.JobInitedEvent;
-import org.apache.hadoop.tools.rumen.JobStatusChangedEvent;
-import org.apache.hadoop.tools.rumen.JobSubmittedEvent;
-import org.apache.hadoop.tools.rumen.JobUnsuccessfulCompletionEvent;
-import org.apache.hadoop.tools.rumen.MapAttemptFinishedEvent;
-import org.apache.hadoop.tools.rumen.ReduceAttemptFinishedEvent;
-import org.apache.hadoop.tools.rumen.TaskAttemptFinishedEvent;
-import org.apache.hadoop.tools.rumen.TaskAttemptStartedEvent;
-import org.apache.hadoop.tools.rumen.TaskAttemptUnsuccessfulCompletionEvent;
-import org.apache.hadoop.tools.rumen.TaskFailedEvent;
-import org.apache.hadoop.tools.rumen.TaskFinishedEvent;
-import org.apache.hadoop.tools.rumen.TaskStartedEvent;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.spi.LoggingEvent;
-import org.codehaus.jackson.map.ObjectMapper;
-
-public class MapReduceJobHistoryUpdater implements LogStoreUpdateProvider {
-  
-  private static final Log LOG = 
-      LogFactory.getLog(MapReduceJobHistoryUpdater.class);
-  
-  private Connection connection;
-  
-  private static final String WORKFLOW_TABLE = "workflow";
-  private static final String JOB_TABLE = "job";
-  private static final String TASK_TABLE = "task";
-  private static final String TASKATTEMPT_TABLE = "taskAttempt";
-  
-  private PreparedStatement workflowPS = null;
-  private PreparedStatement workflowSelectPS = null;
-  private PreparedStatement workflowUpdateTimePS = null;
-  private PreparedStatement workflowUpdateNumCompletedPS = null;
-  
-  private Map<Class<? extends HistoryEvent>, PreparedStatement> entitySqlMap =
-      new HashMap<Class<? extends HistoryEvent>, PreparedStatement>();
-  
-  @Override
-  public void init(Connection connection) throws IOException {
-    this.connection = connection;
-    
-    try {
-      initializePreparedStatements();
-    } catch (SQLException sqle) {
-      throw new IOException(sqle);
-    }
-  }
-  
-  private void initializePreparedStatements() throws SQLException {
-    initializeJobPreparedStatements();
-    initializeTaskPreparedStatements();
-    initializeTaskAttemptPreparedStatements();
-  }
-  
-  private PreparedStatement jobEndUpdate;
-  
-  private void initializeJobPreparedStatements() throws SQLException {
-
-    /** 
-     * Job events
-     */
-
-    // JobSubmittedEvent
-
-    PreparedStatement jobSubmittedPrepStmnt =
-        connection.prepareStatement(
-            "INSERT INTO " + 
-                JOB_TABLE + 
-                " (" +
-                "jobId, " +
-                "jobName, " +
-                "userName, " +
-                "confPath, " +
-                "queue, " +
-                "submitTime, " +
-                "workflowId, " +
-                "workflowEntityName " +
-                ") " +
-                "VALUES" +
-                " (?, ?, ?, ?, ?, ?, ?, ?)"
-            );
-    entitySqlMap.put(JobSubmittedEvent.class, jobSubmittedPrepStmnt);
-    
-    workflowSelectPS =
-        connection.prepareStatement(
-            "SELECT workflowContext FROM " + WORKFLOW_TABLE + " where workflowId = ?"
-            );
-
-    workflowPS = 
-        connection.prepareStatement(
-            "INSERT INTO " +
-                WORKFLOW_TABLE +
-                " (" +
-                "workflowId, " +
-                "workflowName, " +
-                "workflowContext, " +
-                "userName, " +
-                "startTime, " +
-                "lastUpdateTime, " +
-                "duration, " +
-                "numJobsTotal, " +
-                "numJobsCompleted" +
-                ") " +
-                "VALUES" +
-                " (?, ?, ?, ?, ?, ?, 0, ?, 0)"
-            );
-    
-    workflowUpdateTimePS =
-        connection.prepareStatement(
-            "UPDATE " +
-                WORKFLOW_TABLE +
-                " SET " +
-                "workflowContext = ?, " +
-                "numJobsTotal = ?, " +
-                "lastUpdateTime = ?, " +
-                "duration = ? - (SELECT startTime FROM " +
-                WORKFLOW_TABLE +
-                " WHERE workflowId = ?) " +
-                "WHERE workflowId = ?"
-            );
-    
-    workflowUpdateNumCompletedPS =
-        connection.prepareStatement(
-            "UPDATE " +
-                WORKFLOW_TABLE +
-                " SET " +
-                "lastUpdateTime = ?, " +
-                "duration = ? - (SELECT startTime FROM " +
-                WORKFLOW_TABLE +
-                " WHERE workflowId = selectid), " +
-                "numJobsCompleted = rows, " +
-                "inputBytes = input, " +
-                "outputBytes = output " +
-            "FROM (SELECT count(*) as rows, sum(inputBytes) as input, " +
-                "sum(outputBytes) as output, workflowId as selectid FROM " +
-                JOB_TABLE +
-                " WHERE workflowId = (SELECT workflowId FROM " +
-                JOB_TABLE +
-                " WHERE jobId = ?) AND status = 'SUCCESS' " +
-                "GROUP BY workflowId) as jobsummary " +
-            "WHERE workflowId = selectid"
-            );
-    
-    // JobFinishedEvent
-
-    PreparedStatement jobFinishedPrepStmnt = 
-        connection.prepareStatement(
-            "UPDATE " +
-                JOB_TABLE +
-                " SET " +
-                "finishTime = ?, " +
-                "finishedMaps = ?, " +
-                "finishedReduces= ?, " +
-                "failedMaps = ?, " +
-                "failedReduces = ?, " +
-                "inputBytes = ?, " +
-                "outputBytes = ? " +
-                "WHERE " +
-                "jobId = ?" 
-            );
-    entitySqlMap.put(JobFinishedEvent.class, jobFinishedPrepStmnt);
-
-    // JobInitedEvent
-    
-    PreparedStatement jobInitedPrepStmnt = 
-        connection.prepareStatement(
-            "UPDATE " +
-                JOB_TABLE +
-                " SET " +
-                "launchTime = ?, " +
-                "maps = ?, " +
-                "reduces = ?, " +
-                "status = ? "+
-                "WHERE " +
-                "jobId = ?" 
-            );
-    entitySqlMap.put(JobInitedEvent.class, jobInitedPrepStmnt);
-
-    // JobStatusChangedEvent
-    
-    PreparedStatement jobStatusChangedPrepStmnt = 
-        connection.prepareStatement(
-            "UPDATE " +
-                JOB_TABLE +
-                " SET " +
-                "status = ? "+
-                "WHERE " +
-                "jobId = ?" 
-            );
-    entitySqlMap.put(JobStatusChangedEvent.class, jobStatusChangedPrepStmnt);
-
-    // JobInfoChangedEvent
-    
-    PreparedStatement jobInfoChangedPrepStmnt = 
-        connection.prepareStatement(
-            "UPDATE " +
-                JOB_TABLE +
-                " SET " +
-                "submitTime = ?, " +
-                "launchTime = ? " +
-                "WHERE " +
-                "jobId = ?" 
-            );
-    entitySqlMap.put(JobInfoChangeEvent.class, jobInfoChangedPrepStmnt);
-
-    // JobUnsuccessfulCompletionEvent
-    PreparedStatement jobUnsuccessfulPrepStmnt = 
-        connection.prepareStatement(
-            "UPDATE " +
-                JOB_TABLE +
-                " SET " +
-                "finishTime = ?, " +
-                "finishedMaps = ?, " +
-                "finishedReduces = ?, " +
-                "status = ? " +
-                "WHERE " +
-                "jobId = ?" 
-            );
-    entitySqlMap.put(
-        JobUnsuccessfulCompletionEvent.class, jobUnsuccessfulPrepStmnt);
-
-    // Job update at the end
-    jobEndUpdate =
-        connection.prepareStatement(
-            "UPDATE " +
-              JOB_TABLE +
-              " SET " +
-              " mapsRuntime = (" +
-                "SELECT " +
-                "SUM(" + 
-                      TASKATTEMPT_TABLE + ".finishTime" +  " - " + 
-                      TASKATTEMPT_TABLE + ".startTime" +
-                		  ")" +
-                " FROM " +
-                TASKATTEMPT_TABLE + 
-                " WHERE " +
-                TASKATTEMPT_TABLE + ".jobId = " + JOB_TABLE + ".jobId " +
-                " AND " +
-                TASKATTEMPT_TABLE + ".taskType = ?)" +
-              ", " +
-              " reducesRuntime = (" +
-                "SELECT SUM(" + 
-                             TASKATTEMPT_TABLE + ".finishTime" +  " - " + 
-                             TASKATTEMPT_TABLE + ".startTime" +
-                		        ")" +
-              	" FROM " +
-                TASKATTEMPT_TABLE + 
-                " WHERE " +
-                TASKATTEMPT_TABLE + ".jobId = " + JOB_TABLE + ".jobId " +
-                " AND " +
-                TASKATTEMPT_TABLE + ".taskType = ?) " +
-              " WHERE " +
-                 "jobId = ?"
-            );
-  }
-  
-  private void initializeTaskPreparedStatements() throws SQLException {
-
-    /** 
-     * Task events
-     */
-
-    // TaskStartedEvent 
-    
-    PreparedStatement taskStartedPrepStmnt =
-        connection.prepareStatement(
-            "INSERT INTO " +
-                TASK_TABLE +
-                " (" +
-                "jobId, " +
-                "taskType, " +
-                "splits, " +
-                "startTime, " +
-                "taskId" +
-                ") " +
-                "VALUES (?, ?, ?, ?, ?)"
-            );
-    entitySqlMap.put(TaskStartedEvent.class, taskStartedPrepStmnt);
-    
-    // TaskFinishedEvent
-    
-    PreparedStatement taskFinishedPrepStmnt =
-        connection.prepareStatement(
-            "UPDATE " +
-                TASK_TABLE +
-                " SET " +
-                "jobId = ?, " +
-                "taskType = ?, " +
-                "status = ?, " +
-                "finishTime = ? " +
-                " WHERE " +
-                "taskId = ?"
-            );
-    entitySqlMap.put(TaskFinishedEvent.class, taskFinishedPrepStmnt);
-
-    // TaskFailedEvent
-
-    PreparedStatement taskFailedPrepStmnt =
-        connection.prepareStatement(
-            "UPDATE " + 
-                TASK_TABLE + 
-                " SET " +
-                "jobId = ?, " +
-                "taskType = ?, " +
-                "status = ?, " +
-                "finishTime = ?, " +
-                "error = ?, " +
-                "failedAttempt = ? " +
-                "WHERE " +
-                "taskId = ?"
-            );
-    entitySqlMap.put(TaskFailedEvent.class, taskFailedPrepStmnt);
-  }
-
-  private void initializeTaskAttemptPreparedStatements() throws SQLException {
-
-    /**
-     * TaskAttempt events
-     */
-
-    // TaskAttemptStartedEvent
-    
-    PreparedStatement taskAttemptStartedPrepStmnt =
-        connection.prepareStatement(
-            "INSERT INTO " +
-                TASKATTEMPT_TABLE +
-                " (" +
-                "jobId, " +
-                "taskId, " +
-                "taskType, " +
-                "startTime, " +
-                "taskTracker, " +
-                "locality, " +
-                "avataar, " +
-                "taskAttemptId" +
-                ") " +
-                "VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
-            );
-    entitySqlMap.put(
-        TaskAttemptStartedEvent.class, taskAttemptStartedPrepStmnt);
-
-    // TaskAttemptFinishedEvent
-    
-    PreparedStatement taskAttemptFinishedPrepStmnt =
-        connection.prepareStatement(
-            "UPDATE " +
-                TASKATTEMPT_TABLE +
-                " SET " +
-                "jobId = ?, " +
-                "taskId = ?, " +
-                "taskType = ?, " +
-                "finishTime = ?, " +
-                "status = ?, " +
-                "taskTracker = ? " +
-                " WHERE " +
-                "taskAttemptId = ?"
-            );
-    entitySqlMap.put(
-        TaskAttemptFinishedEvent.class, taskAttemptFinishedPrepStmnt);
-
-    // TaskAttemptUnsuccessfulEvent
-    
-    PreparedStatement taskAttemptUnsuccessfulPrepStmnt =
-        connection.prepareStatement(
-            "UPDATE " +
-                TASKATTEMPT_TABLE +
-                " SET " +
-                "jobId = ?, " +
-                "taskId = ?, " +
-                "taskType = ?, " +
-                "finishTime = ?, " +
-                "status = ?, " +
-                "taskTracker = ?, " +
-                "error = ? " +
-                " WHERE " +
-                "taskAttemptId = ?"
-            );
-    entitySqlMap.put(
-        TaskAttemptUnsuccessfulCompletionEvent.class, 
-        taskAttemptUnsuccessfulPrepStmnt);
-
-    // MapAttemptFinishedEvent
-    
-    PreparedStatement mapAttemptFinishedPrepStmnt =
-        connection.prepareStatement(
-            "UPDATE " +
-                TASKATTEMPT_TABLE +
-                " SET " +
-                "jobId = ?, " +
-                "taskId = ?, " +
-                "taskType = ?, " +
-                "mapFinishTime = ?, " +
-                "finishTime = ?, " +
-                "inputBytes = ?, " +
-                "outputBytes = ?, " +
-                "status = ?, " +
-                "taskTracker = ? " +
-                " WHERE " +
-                "taskAttemptId = ?"
-            );
-    entitySqlMap.put(
-        MapAttemptFinishedEvent.class, mapAttemptFinishedPrepStmnt);
-
-    // ReduceAttemptFinishedEvent
-    
-    PreparedStatement reduceAttemptFinishedPrepStmnt =
-        connection.prepareStatement(
-            "UPDATE " +
-                TASKATTEMPT_TABLE +
-                " SET " +
-                "jobId = ?, " +
-                "taskId = ?, " +
-                "taskType = ?, " +
-                "shuffleFinishTime = ?, " +
-                "sortFinishTime = ?, " +
-                "finishTime = ?, " +
-                "inputBytes = ?, " +
-                "outputBytes = ?, " +
-                "status = ?, " +
-                "taskTracker = ? " +
-                " WHERE " +
-                "taskAttemptId = ?"
-            );
-    entitySqlMap.put(
-        ReduceAttemptFinishedEvent.class, reduceAttemptFinishedPrepStmnt);
-  }
-
-  private void doUpdates(LoggingEvent originalEvent,
-      Object parsedEvent) throws SQLException {
-    Class<?> eventClass = parsedEvent.getClass();
-    
-    PreparedStatement entityPS = entitySqlMap.get(eventClass);
-    if (entityPS == null) {
-      LOG.debug("No prepared statement for " + eventClass);
-      return;
-    }
-  
-    if (eventClass == JobSubmittedEvent.class) {
-      processJobSubmittedEvent(entityPS, workflowSelectPS, workflowPS, 
-          workflowUpdateTimePS, originalEvent, 
-          (JobSubmittedEvent)parsedEvent);
-    } else if (eventClass == JobFinishedEvent.class) {
-      processJobFinishedEvent(entityPS, workflowUpdateNumCompletedPS,
-          originalEvent, (JobFinishedEvent)parsedEvent);
-    } else if (eventClass == JobInitedEvent.class){
-      processJobInitedEvent(entityPS, 
-          originalEvent, (JobInitedEvent)parsedEvent);
-    } else if (eventClass == JobStatusChangedEvent.class) {
-      processJobStatusChangedEvent(entityPS,
-          originalEvent, (JobStatusChangedEvent)parsedEvent);
-    } else if (eventClass == JobInfoChangeEvent.class) {
-      processJobInfoChangeEvent(entityPS, 
-          originalEvent, (JobInfoChangeEvent)parsedEvent);
-    } else if (eventClass == JobUnsuccessfulCompletionEvent.class) {
-      processJobUnsuccessfulEvent(entityPS, 
-          originalEvent, (JobUnsuccessfulCompletionEvent)parsedEvent);
-    } else if (eventClass == TaskStartedEvent.class) {
-      processTaskStartedEvent(entityPS, 
-          originalEvent, (TaskStartedEvent)parsedEvent);
-    } else if (eventClass == TaskFinishedEvent.class) {
-      processTaskFinishedEvent(entityPS, 
-          originalEvent, (TaskFinishedEvent)parsedEvent);
-    } else if (eventClass == TaskFailedEvent.class) {
-      processTaskFailedEvent(entityPS, 
-          originalEvent, (TaskFailedEvent)parsedEvent);
-    } else if (eventClass == TaskAttemptStartedEvent.class) {
-      processTaskAttemptStartedEvent(entityPS, 
-          originalEvent, (TaskAttemptStartedEvent)parsedEvent);
-    } else if (eventClass == TaskAttemptFinishedEvent.class) {
-      processTaskAttemptFinishedEvent(entityPS, 
-          originalEvent, (TaskAttemptFinishedEvent)parsedEvent);
-    } else if (eventClass == TaskAttemptUnsuccessfulCompletionEvent.class) {
-      processTaskAttemptUnsuccessfulEvent(entityPS, 
-          originalEvent, (TaskAttemptUnsuccessfulCompletionEvent)parsedEvent);
-    } else if (eventClass == MapAttemptFinishedEvent.class) {
-      processMapAttemptFinishedEvent(entityPS, 
-          originalEvent, (MapAttemptFinishedEvent)parsedEvent);
-    } else if (eventClass == ReduceAttemptFinishedEvent.class) {
-      processReduceAttemptFinishedEvent(entityPS, 
-          originalEvent, (ReduceAttemptFinishedEvent)parsedEvent);
-    }
-  }
-  
-  private void updateJobStatsAtFinish(String jobId) {
-    try {
-      jobEndUpdate.setString(1, "MAP");
-      jobEndUpdate.setString(2, "REDUCE");
-      jobEndUpdate.setString(3, jobId);
-      jobEndUpdate.executeUpdate();
-    } catch (SQLException sqle) {
-      LOG.info("Failed to update mapsRuntime/reducesRuntime for " + jobId, 
-          sqle);
-    }
-  }
-  
-  private static WorkflowContext generateWorkflowContext(
-      JobSubmittedEvent historyEvent) {
-    WorkflowDag wfDag = new WorkflowDag();
-    WorkflowDagEntry wfDagEntry = new WorkflowDagEntry();
-    wfDagEntry.setSource("X");
-    wfDag.addEntry(wfDagEntry);
-    
-    WorkflowContext wc = new WorkflowContext();
-    wc.setWorkflowId(historyEvent.getJobId().toString().replace("job_", "mr_"));
-    wc.setWorkflowName(historyEvent.getJobName());
-    wc.setWorkflowEntityName("X");
-    wc.setWorkflowDag(wfDag);
-    return wc;
-  }
-  
-  // this is based on the regex in org.apache.hadoop.tools.rumen.ParsedLine
-  // except this assumes the format "key"="value" so that both key and value
-  // are quoted and may contain escaped characters
-  private static final Pattern adjPattern = 
-      Pattern.compile("\"([^\"\\\\]*+(?:\\\\.[^\"\\\\]*+)*+)\"" + "=" + 
-          "\"([^\"\\\\]*+(?:\\\\.[^\"\\\\]*+)*+)\" ");
-  
-  public static WorkflowContext buildWorkflowContext(JobSubmittedEvent historyEvent) {
-    String workflowId = historyEvent.getWorkflowId()
-        .replace("\\", "");
-    if (workflowId.isEmpty())
-      return generateWorkflowContext(historyEvent);
-    String workflowName = historyEvent.getWorkflowName()
-        .replace("\\", "");
-    String workflowNodeName = historyEvent.getWorkflowNodeName()
-        .replace("\\", "");
-    String workflowAdjacencies = StringUtils.unEscapeString(
-        historyEvent.getWorkflowAdjacencies(),
-        StringUtils.ESCAPE_CHAR, new char[] {'"', '=', '.'});
-    WorkflowContext context = new WorkflowContext();
-    context.setWorkflowId(workflowId);
-    context.setWorkflowName(workflowName);
-    context.setWorkflowEntityName(workflowNodeName);
-    WorkflowDag dag = new WorkflowDag();
-    Matcher matcher = adjPattern.matcher(workflowAdjacencies);
-
-    while(matcher.find()){
-      WorkflowDagEntry dagEntry = new WorkflowDagEntry();
-      dagEntry.setSource(matcher.group(1).replace("\\", ""));
-      String[] values = StringUtils.getStrings(
-          matcher.group(2).replace("\\", ""));
-      if (values != null) {
-        for (String target : values) {
-          dagEntry.addTarget(target);
-        }
-      }
-      dag.addEntry(dagEntry);
-    }
-    if (dag.getEntries().isEmpty()) {
-      WorkflowDagEntry wfDagEntry = new WorkflowDagEntry();
-      wfDagEntry.setSource(workflowNodeName);
-      dag.addEntry(wfDagEntry);
-    }
-    context.setWorkflowDag(dag);
-    return context;
-  }
-  
-  public static void mergeEntries(Map<String, Set<String>> edges, List<WorkflowDagEntry> entries) {
-    if (entries == null)
-      return;
-    for (WorkflowDagEntry entry : entries) {
-      if (!edges.containsKey(entry.getSource()))
-        edges.put(entry.getSource(), new TreeSet<String>());
-      Set<String> targets = edges.get(entry.getSource());
-      targets.addAll(entry.getTargets());
-    }
-  }
-  
-  public static WorkflowDag constructMergedDag(WorkflowContext workflowContext, WorkflowContext existingWorkflowContext) {
-    Map<String, Set<String>> edges = new TreeMap<String, Set<String>>();
-    if (existingWorkflowContext.getWorkflowDag() != null)
-      mergeEntries(edges, existingWorkflowContext.getWorkflowDag().getEntries());
-    if (workflowContext.getWorkflowDag() != null)
-      mergeEntries(edges, workflowContext.getWorkflowDag().getEntries());
-    WorkflowDag mergedDag = new WorkflowDag();
-    for (Entry<String,Set<String>> edge : edges.entrySet()) {
-      WorkflowDagEntry entry = new WorkflowDagEntry();
-      entry.setSource(edge.getKey());
-      entry.getTargets().addAll(edge.getValue());
-      mergedDag.addEntry(entry);
-    }
-    return mergedDag;
-  }
-  
-  private static WorkflowContext getSanitizedWorkflow(WorkflowContext workflowContext, WorkflowContext existingWorkflowContext) {
-    WorkflowContext sanitizedWC = new WorkflowContext();
-    if (existingWorkflowContext == null) {
-      sanitizedWC.setWorkflowDag(workflowContext.getWorkflowDag());
-      sanitizedWC.setParentWorkflowContext(workflowContext.getParentWorkflowContext());
-    } else {
-      sanitizedWC.setWorkflowDag(constructMergedDag(existingWorkflowContext, workflowContext));
-      sanitizedWC.setParentWorkflowContext(existingWorkflowContext.getParentWorkflowContext());
-    }
-    return sanitizedWC;
-  }
-  
-  private static String getWorkflowString(WorkflowContext sanitizedWC) {
-    String sanitizedWCString = null;
-    try {
-      ObjectMapper om = new ObjectMapper();
-      sanitizedWCString = om.writeValueAsString(sanitizedWC);
-    } catch (IOException e) {
-      e.printStackTrace();
-      sanitizedWCString = "";
-    }
-    return sanitizedWCString;
-  }
-  
-  private void processJobSubmittedEvent(
-      PreparedStatement jobPS, 
-      PreparedStatement workflowSelectPS, PreparedStatement workflowPS, 
-      PreparedStatement workflowUpdateTimePS, LoggingEvent logEvent, 
-      JobSubmittedEvent historyEvent) {
-
-    try {
-      String jobId = historyEvent.getJobId().toString();
-      jobPS.setString(1, jobId);
-      jobPS.setString(2, historyEvent.getJobName());
-      jobPS.setString(3, historyEvent.getUserName());
-      jobPS.setString(4, historyEvent.getJobConfPath());
-      jobPS.setString(5, historyEvent.getJobQueueName());
-      jobPS.setLong(6, historyEvent.getSubmitTime());
-      
-      WorkflowContext workflowContext = buildWorkflowContext(historyEvent);
-      
-      // Get workflow information
-      boolean insertWorkflow = false;
-      String existingContextString = null;
-      
-      ResultSet rs = null;
-      try {
-        workflowSelectPS.setString(1, workflowContext.getWorkflowId());
-        workflowSelectPS.execute();
-        rs = workflowSelectPS.getResultSet();
-        if (rs.next()) {
-          existingContextString = rs.getString(1);
-        } else {
-          insertWorkflow = true;
-        }
-      } catch (SQLException sqle) {
-        LOG.warn("workflow select failed with: ", sqle);
-        insertWorkflow = false;
-      } finally {
-        try {
-          if (rs != null)
-            rs.close();
-        } catch (SQLException e) {
-          LOG.error("Exception while closing ResultSet", e);
-        }
-      }
-
-      // Insert workflow 
-      if (insertWorkflow) {
-        workflowPS.setString(1, workflowContext.getWorkflowId());
-        workflowPS.setString(2, workflowContext.getWorkflowName());
-        workflowPS.setString(3, getWorkflowString(getSanitizedWorkflow(workflowContext, null)));
-        workflowPS.setString(4, historyEvent.getUserName());
-        workflowPS.setLong(5, historyEvent.getSubmitTime());
-        workflowPS.setLong(6, historyEvent.getSubmitTime());
-        workflowPS.setLong(7, workflowContext.getWorkflowDag().size());
-        workflowPS.executeUpdate();
-        LOG.debug("Successfully inserted workflowId = " + 
-            workflowContext.getWorkflowId());
-      } else {
-        ObjectMapper om = new ObjectMapper();
-        WorkflowContext existingWorkflowContext = null;
-        try {
-          if (existingContextString != null)
-            existingWorkflowContext = om.readValue(existingContextString.getBytes(), WorkflowContext.class);
-        } catch (IOException e) {
-          LOG.warn("Couldn't read existing workflow context for " + workflowContext.getWorkflowId(), e);
-        }
-        
-        WorkflowContext sanitizedWC = getSanitizedWorkflow(workflowContext, existingWorkflowContext);
-        workflowUpdateTimePS.setString(1, getWorkflowString(sanitizedWC));
-        workflowUpdateTimePS.setLong(2, sanitizedWC.getWorkflowDag().size());
-        workflowUpdateTimePS.setLong(3, historyEvent.getSubmitTime());
-        workflowUpdateTimePS.setLong(4, historyEvent.getSubmitTime());
-        workflowUpdateTimePS.setString(5, workflowContext.getWorkflowId());
-        workflowUpdateTimePS.setString(6, workflowContext.getWorkflowId());
-        workflowUpdateTimePS.executeUpdate();
-        LOG.debug("Successfully updated workflowId = " + 
-            workflowContext.getWorkflowId());
-      }
-
-      // Insert job
-      jobPS.setString(7, workflowContext.getWorkflowId());
-      jobPS.setString(8, workflowContext.getWorkflowEntityName());
-      jobPS.executeUpdate();
-      LOG.debug("Successfully inserted job = " + jobId + 
-          " and workflowId = " + workflowContext.getWorkflowId());
-
-    } catch (SQLException sqle) {
-      LOG.info("Failed to store " + historyEvent.getEventType() + " for job " + 
-          historyEvent.getJobId() + " into " + JOB_TABLE, sqle);
-    } catch (Exception e) {
-      LOG.info("Failed to store " + historyEvent.getEventType() + " for job " + 
-          historyEvent.getJobId() + " into " + JOB_TABLE, e);
-    }
-  }
-  
-  private void processJobFinishedEvent(
-      PreparedStatement entityPS,
-      PreparedStatement workflowUpdateNumCompletedPS,
-      LoggingEvent logEvent, JobFinishedEvent historyEvent) {
-    Counters counters = historyEvent.getMapCounters();
-    long inputBytes = 0;
-    if (counters != null) {
-      for (CounterGroup group : counters) {
-        for (Counter counter : group) {
-          if (counter.getName().equals("HDFS_BYTES_READ"))
-            inputBytes += counter.getValue();
-        }
-      }
-    }
-    if (historyEvent.getFinishedReduces() != 0)
-      counters = historyEvent.getReduceCounters();
-    long outputBytes = 0;
-    if (counters != null) {
-      for (CounterGroup group : counters) {
-        for (Counter counter : group) {
-          if (counter.getName().equals("HDFS_BYTES_WRITTEN"))
-            outputBytes += counter.getValue();
-        }
-      }
-    }
-    try {
-      entityPS.setLong(1, historyEvent.getFinishTime());
-      entityPS.setInt(2, historyEvent.getFinishedMaps());
-      entityPS.setInt(3, historyEvent.getFinishedReduces());
-      entityPS.setInt(4, historyEvent.getFailedMaps());
-      entityPS.setInt(5, historyEvent.getFailedReduces());
-      entityPS.setLong(6, inputBytes);
-      entityPS.setLong(7, outputBytes);
-      entityPS.setString(8, historyEvent.getJobid().toString());
-      entityPS.executeUpdate();
-      // job finished events always have success status
-      workflowUpdateNumCompletedPS.setLong(1, historyEvent.getFinishTime());
-      workflowUpdateNumCompletedPS.setLong(2, historyEvent.getFinishTime());
-      workflowUpdateNumCompletedPS.setString(3, historyEvent.getJobid().toString());
-      workflowUpdateNumCompletedPS.executeUpdate();
-    } catch (SQLException sqle) {
-      LOG.info("Failed to store " + historyEvent.getEventType() + " for job " + 
-          historyEvent.getJobid() + " into " + JOB_TABLE, sqle);
-    }
-    
-    updateJobStatsAtFinish(historyEvent.getJobid().toString());
-
-  }
-
-  private void processJobInitedEvent(
-      PreparedStatement entityPS, 
-      LoggingEvent logEvent, JobInitedEvent historyEvent) {
-    try {
-      entityPS.setLong(1, historyEvent.getLaunchTime());
-      entityPS.setInt(2, historyEvent.getTotalMaps());
-      entityPS.setInt(3, historyEvent.getTotalReduces());
-      entityPS.setString(4, historyEvent.getStatus());
-      entityPS.setString(5, historyEvent.getJobId().toString());
-      entityPS.executeUpdate();
-    } catch (SQLException sqle) {
-      LOG.info("Failed to store " + historyEvent.getEventType() + " for job " + 
-          historyEvent.getJobId() + " into " + JOB_TABLE, sqle);
-    }
-  }
-
-  private void processJobStatusChangedEvent(
-      PreparedStatement entityPS, 
-      LoggingEvent logEvent, JobStatusChangedEvent historyEvent) {
-    try {
-      entityPS.setString(1, historyEvent.getStatus());
-      entityPS.setString(2, historyEvent.getJobId().toString());
-      entityPS.executeUpdate();
-    } catch (SQLException sqle) {
-      LOG.info("Failed to store " + historyEvent.getEventType() + " for job " + 
-          historyEvent.getJobId() + " into " + JOB_TABLE, sqle);
-    }
-  }
-
-  private void processJobInfoChangeEvent(
-      PreparedStatement entityPS, 
-      LoggingEvent logEvent, JobInfoChangeEvent historyEvent) {
-    try {
-      entityPS.setLong(1, historyEvent.getSubmitTime());
-      entityPS.setLong(2, historyEvent.getLaunchTime());
-      entityPS.setString(3, historyEvent.getJobId().toString());
-      entityPS.executeUpdate();
-    } catch (SQLException sqle) {
-      LOG.info("Failed to store " + historyEvent.getEventType() + " for job " + 
-          historyEvent.getJobId() + " into " + JOB_TABLE, sqle);
-    }
-  }
-
-  private void processJobUnsuccessfulEvent(
-      PreparedStatement entityPS, 
-      LoggingEvent logEvent, JobUnsuccessfulCompletionEvent historyEvent) {
-    try {
-      entityPS.setLong(1, historyEvent.getFinishTime());
-      entityPS.setLong(2, historyEvent.getFinishedMaps());
-      entityPS.setLong(3, historyEvent.getFinishedReduces());
-      entityPS.setString(4, historyEvent.getStatus());
-      entityPS.setString(5, historyEvent.getJobId().toString());
-      entityPS.executeUpdate();
-    } catch (SQLException sqle) {
-      LOG.info("Failed to store " + historyEvent.getEventType() + " for job " + 
-          historyEvent.getJobId() + " into " + JOB_TABLE, sqle);
-    }
-    
-    updateJobStatsAtFinish(historyEvent.getJobId().toString());
-  }
-
-  private void processTaskStartedEvent(PreparedStatement entityPS,
-      LoggingEvent logEvent, TaskStartedEvent historyEvent) {
-    try {
-      entityPS.setString(1, 
-          historyEvent.getTaskId().getJobID().toString());
-      entityPS.setString(2, historyEvent.getTaskType().toString());
-      entityPS.setString(3, historyEvent.getSplitLocations());
-      entityPS.setLong(4, historyEvent.getStartTime());
-      entityPS.setString(5, historyEvent.getTaskId().toString());
-      entityPS.executeUpdate();
-    } catch (SQLException sqle) {
-      LOG.info("Failed to store " + historyEvent.getEventType() + " for task " + 
-          historyEvent.getTaskId() + " into " + TASK_TABLE, sqle);
-    }
-  }
-
-  private void processTaskFinishedEvent(
-      PreparedStatement entityPS,  
-      LoggingEvent logEvent, TaskFinishedEvent historyEvent) {
-    try {
-      entityPS.setString(1, 
-          historyEvent.getTaskId().getJobID().toString());
-      entityPS.setString(2, historyEvent.getTaskType().toString());
-      entityPS.setString(3, historyEvent.getTaskStatus());
-      entityPS.setLong(4, historyEvent.getFinishTime());
-      entityPS.setString(5, historyEvent.getTaskId().toString());
-      entityPS.executeUpdate();
-    } catch (SQLException sqle) {
-      LOG.info("Failed to store " + historyEvent.getEventType() + " for task " + 
-          historyEvent.getTaskId() + " into " + TASK_TABLE, sqle);
-    }
-  }
-
-  private void processTaskFailedEvent(
-      PreparedStatement entityPS,  
-      LoggingEvent logEvent, TaskFailedEvent historyEvent) {
-    try {
-      entityPS.setString(1, 
-          historyEvent.getTaskId().getJobID().toString());
-      entityPS.setString(2, historyEvent.getTaskType().toString());
-      entityPS.setString(3, historyEvent.getTaskStatus());
-      entityPS.setLong(4, historyEvent.getFinishTime());
-      entityPS.setString(5, historyEvent.getError());
-      if (historyEvent.getFailedAttemptID() != null) {
-        entityPS.setString(6, historyEvent.getFailedAttemptID().toString());
-      } else {
-        entityPS.setString(6, "task_na");
-      }
-      entityPS.setString(7, historyEvent.getTaskId().toString());
-      entityPS.executeUpdate();
-    } catch (SQLException sqle) {
-      LOG.info("Failed to store " + historyEvent.getEventType() + " for task " + 
-          historyEvent.getTaskId() + " into " + TASK_TABLE, sqle);
-    }
-  }
-
-  private void processTaskAttemptStartedEvent(
-      PreparedStatement entityPS,  
-      LoggingEvent logEvent, TaskAttemptStartedEvent historyEvent) {
-    try {
-      entityPS.setString(1, 
-          historyEvent.getTaskId().getJobID().toString());
-      entityPS.setString(2, historyEvent.getTaskId().toString());
-      entityPS.setString(3, historyEvent.getTaskType().toString());
-      entityPS.setLong(4, historyEvent.getStartTime());
-      entityPS.setString(5, historyEvent.getTrackerName());
-      entityPS.setString(6, historyEvent.getLocality().toString());
-      entityPS.setString(7, historyEvent.getAvataar().toString());
-      entityPS.setString(8, historyEvent.getTaskAttemptId().toString());
-      entityPS.executeUpdate();
-    } catch (SQLException sqle) {
-      LOG.info("Failed to store " + historyEvent.getEventType() + 
-          " for taskAttempt " + historyEvent.getTaskAttemptId() + 
-          " into " + TASKATTEMPT_TABLE, sqle);
-    }
-  }
-  
-  private void processTaskAttemptFinishedEvent(
-      PreparedStatement entityPS,  
-      LoggingEvent logEvent, TaskAttemptFinishedEvent historyEvent) {
-    
-    if (historyEvent.getTaskType() == TaskType.MAP || 
-        historyEvent.getTaskType() == TaskType.REDUCE) {
-      LOG.debug("Ignoring TaskAttemptFinishedEvent for " + 
-        historyEvent.getTaskType());
-      return;
-    }
-    
-    try {
-      entityPS.setString(1, 
-          historyEvent.getTaskId().getJobID().toString());
-      entityPS.setString(2, historyEvent.getTaskId().toString());
-      entityPS.setString(3, historyEvent.getTaskType().toString());
-      entityPS.setLong(4, historyEvent.getFinishTime());
-      entityPS.setString(5, historyEvent.getTaskStatus());
-      entityPS.setString(6, historyEvent.getHostname());
-      entityPS.setString(7, historyEvent.getAttemptId().toString());
-      entityPS.executeUpdate();
-    } catch (SQLException sqle) {
-      LOG.info("Failed to store " + historyEvent.getEventType() + 
-          " for taskAttempt " + historyEvent.getAttemptId() + 
-          " into " + TASKATTEMPT_TABLE, sqle);
-    }
-  }
-  
-  private void processTaskAttemptUnsuccessfulEvent(
-      PreparedStatement entityPS,  
-      LoggingEvent logEvent, 
-      TaskAttemptUnsuccessfulCompletionEvent historyEvent) {
-    try {
-      entityPS.setString(1, 
-          historyEvent.getTaskId().getJobID().toString());
-      entityPS.setString(2, historyEvent.getTaskId().toString());
-      entityPS.setString(3, historyEvent.getTaskType().toString());
-      entityPS.setLong(4, historyEvent.getFinishTime());
-      entityPS.setString(5, historyEvent.getTaskStatus());
-      entityPS.setString(6, historyEvent.getHostname());
-      entityPS.setString(7, historyEvent.getError());
-      entityPS.setString(8, historyEvent.getTaskAttemptId().toString());
-      entityPS.executeUpdate();
-    } catch (SQLException sqle) {
-      LOG.info("Failed to store " + historyEvent.getEventType() + 
-          " for taskAttempt " + historyEvent.getTaskAttemptId() + 
-          " into " + TASKATTEMPT_TABLE, sqle);
-    }
-  }
-  
-  private void processMapAttemptFinishedEvent(
-      PreparedStatement entityPS,  
-      LoggingEvent logEvent, MapAttemptFinishedEvent historyEvent) {
-    
-    if (historyEvent.getTaskType() != TaskType.MAP) {
-      LOG.debug("Ignoring MapAttemptFinishedEvent for " + 
-        historyEvent.getTaskType());
-      return;
-    }
-    
-    long[] ioBytes = getInputOutputBytes(historyEvent.getCounters());
-
-    try {
-      entityPS.setString(1, 
-          historyEvent.getTaskId().getJobID().toString());
-      entityPS.setString(2, historyEvent.getTaskId().toString());
-      entityPS.setString(3, historyEvent.getTaskType().toString());
-      entityPS.setLong(4, historyEvent.getMapFinishTime());
-      entityPS.setLong(5, historyEvent.getFinishTime());
-      entityPS.setLong(6, ioBytes[0]);
-      entityPS.setLong(7, ioBytes[1]);
-      entityPS.setString(8, historyEvent.getTaskStatus());
-      entityPS.setString(9, historyEvent.getHostname());
-      entityPS.setString(10, historyEvent.getAttemptId().toString());
-      entityPS.executeUpdate();
-    } catch (SQLException sqle) {
-      LOG.info("Failed to store " + historyEvent.getEventType() + 
-          " for taskAttempt " + historyEvent.getAttemptId() + 
-          " into " + TASKATTEMPT_TABLE, sqle);
-    }
-  }
-  
-  
-  private void processReduceAttemptFinishedEvent(
-      PreparedStatement entityPS,  
-      LoggingEvent logEvent, ReduceAttemptFinishedEvent historyEvent) {
-    if (historyEvent.getTaskType() != TaskType.REDUCE) {
-      LOG.debug("Ignoring ReduceAttemptFinishedEvent for " + 
-        historyEvent.getTaskType());
-      return;
-    }
-    
-    long[] ioBytes = getInputOutputBytes(historyEvent.getCounters());
-
-    try {
-      entityPS.setString(1, 
-          historyEvent.getTaskId().getJobID().toString());
-      entityPS.setString(2, historyEvent.getTaskId().toString());
-      entityPS.setString(3, historyEvent.getTaskType().toString());
-      entityPS.setLong(4, historyEvent.getShuffleFinishTime());
-      entityPS.setLong(5, historyEvent.getSortFinishTime());
-      entityPS.setLong(6, historyEvent.getFinishTime());
-      entityPS.setLong(7, ioBytes[0]);
-      entityPS.setLong(8, ioBytes[1]);
-      entityPS.setString(9, historyEvent.getTaskStatus());
-      entityPS.setString(10, historyEvent.getHostname());
-      entityPS.setString(11, historyEvent.getAttemptId().toString());
-      entityPS.executeUpdate();
-    } catch (SQLException sqle) {
-      LOG.info("Failed to store " + historyEvent.getEventType() + 
-          " for taskAttempt " + historyEvent.getAttemptId() + 
-          " into " + TASKATTEMPT_TABLE, sqle);
-    }
-  }
-  
-  public static long[] getInputOutputBytes(JhCounters counters) {
-    long inputBytes = 0;
-    long outputBytes = 0;
-    if (counters != null) {
-      for (JhCounterGroup counterGroup : counters.groups) {
-        if (counterGroup.name.equals("FileSystemCounters")) {
-          for (JhCounter counter : counterGroup.counts) {
-            if (counter.name.equals("HDFS_BYTES_READ") || 
-                counter.name.equals("FILE_BYTES_READ"))
-              inputBytes += counter.value;
-            else if (counter.name.equals("HDFS_BYTES_WRITTEN") || 
-                counter.name.equals("FILE_BYTES_WRITTEN"))
-              outputBytes += counter.value;
-          }
-        }
-      }
-    }
-    return new long[]{inputBytes, outputBytes};
-  }
-  
-  
-  @Override
-  public void update(LoggingEvent originalEvent, Object parsedEvent) 
-      throws IOException {
-    try {
-      doUpdates(originalEvent, parsedEvent);
-    } catch (SQLException sqle) {
-      throw new IOException(sqle);
-    }
-  }
-
-}
diff --git a/branch-1.2/contrib/ambari-log4j/src/main/resources/ambari.schema b/branch-1.2/contrib/ambari-log4j/src/main/resources/ambari.schema
deleted file mode 100644
index bd418a2..0000000
--- a/branch-1.2/contrib/ambari-log4j/src/main/resources/ambari.schema
+++ /dev/null
@@ -1,88 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one
--- or more contributor license agreements.  See the NOTICE file
--- distributed with this work for additional information
--- regarding copyright ownership.  The ASF licenses this file
--- to you under the Apache License, Version 2.0 (the
--- "License"); you may not use this file except in compliance
--- with the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-CREATE TABLE workflow (
-  workflowId TEXT, workflowName TEXT,
-  parentWorkflowId TEXT,  
-  workflowContext TEXT, userName TEXT,
-  startTime BIGINT, lastUpdateTime BIGINT,
-  numJobsTotal INTEGER, numJobsCompleted INTEGER,
-  PRIMARY KEY (workflowId),
-  FOREIGN KEY (parentWorkflowId) REFERENCES workflow(workflowId)
-);
-
-CREATE TABLE job (
-  jobId TEXT, workflowId TEXT, jobName TEXT, workflowEntityName TEXT,
-  userName TEXT, queue TEXT, acls TEXT, confPath TEXT, 
-  submitTime BIGINT, launchTime BIGINT, finishTime BIGINT, 
-  maps INTEGER, reduces INTEGER, status TEXT, priority TEXT, 
-  finishedMaps INTEGER, finishedReduces INTEGER, 
-  failedMaps INTEGER, failedReduces INTEGER, 
-  mapsRuntime BIGINT, reducesRuntime BIGINT,
-  mapCounters TEXT, reduceCounters TEXT, jobCounters TEXT, 
-  inputBytes BIGINT, outputBytes BIGINT,
-  PRIMARY KEY(jobId),
-  FOREIGN KEY(workflowId) REFERENCES workflow(workflowId)
-);
-
-CREATE TABLE task (
-  taskId TEXT, jobId TEXT, taskType TEXT, splits TEXT, 
-  startTime BIGINT, finishTime BIGINT, status TEXT, error TEXT, counters TEXT, 
-  failedAttempt TEXT, 
-  PRIMARY KEY(taskId), 
-  FOREIGN KEY(jobId) REFERENCES job(jobId)
-);
-
-CREATE TABLE taskAttempt (
-  taskAttemptId TEXT, taskId TEXT, jobId TEXT, taskType TEXT, taskTracker TEXT, 
-  startTime BIGINT, finishTime BIGINT, 
-  mapFinishTime BIGINT, shuffleFinishTime BIGINT, sortFinishTime BIGINT, 
-  locality TEXT, avataar TEXT, 
-  status TEXT, error TEXT, counters TEXT, 
-  inputBytes BIGINT, outputBytes BIGINT,
-  PRIMARY KEY(taskAttemptId), 
-  FOREIGN KEY(jobId) REFERENCES job(jobId), 
-  FOREIGN KEY(taskId) REFERENCES task(taskId)
-); 
-
-CREATE TABLE hdfsEvent (
-  timestamp BIGINT,
-  userName TEXT,
-  clientIP TEXT,
-  operation TEXT,
-  srcPath TEXT,
-  dstPath TEXT,
-  permissions TEXT
-);
-
-CREATE TABLE mapreduceEvent (
-  timestamp BIGINT,
-  userName TEXT,
-  clientIP TEXT,
-  operation TEXT,
-  target TEXT,
-  result TEXT,
-  description TEXT,
-  permissions TEXT
-);
-
-CREATE TABLE clusterEvent (
-  timestamp BIGINT, 
-  service TEXT, status TEXT, 
-  error TEXT, data TEXT , 
-  host TEXT, rack TEXT
-);
diff --git a/branch-1.2/contrib/ambari-log4j/src/test/java/org/apache/ambari/TestJobHistoryParsing.java b/branch-1.2/contrib/ambari-log4j/src/test/java/org/apache/ambari/TestJobHistoryParsing.java
deleted file mode 100644
index ec85f13..0000000
--- a/branch-1.2/contrib/ambari-log4j/src/test/java/org/apache/ambari/TestJobHistoryParsing.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import junit.framework.TestCase;
-
-import org.apache.ambari.eventdb.model.WorkflowContext;
-import org.apache.ambari.eventdb.model.WorkflowDag.WorkflowDagEntry;
-import org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.MapReduceJobHistoryUpdater;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapred.JobHistory;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.tools.rumen.JobSubmittedEvent;
-import org.apache.hadoop.util.StringUtils;
-
-/**
- * 
- */
-public class TestJobHistoryParsing extends TestCase {
-  static final char LINE_DELIMITER_CHAR = '.';
-  static final char[] charsToEscape = new char[] {'"', '=', LINE_DELIMITER_CHAR};
-  private static final char DELIMITER = ' ';
-  private static final String ID = "WORKFLOW_ID";
-  private static final String NAME = "WORKFLOW_NAME";
-  private static final String NODE = "WORKFLOW_NODE_NAME";
-  private static final String ADJ = "WORKFLOW_ADJACENCIES";
-  private static final String ID_PROP = "mapreduce.workflow.id";
-  private static final String NAME_PROP = "mapreduce.workflow.name";
-  private static final String NODE_PROP = "mapreduce.workflow.node.name";
-  private static final String ADJ_PROP = "mapreduce.workflow.adjacency";
-  
-  public void test1() {
-    Map<String,String[]> adj = new HashMap<String,String[]>();
-    adj.put("10", new String[] {"20", "30"});
-    adj.put("20", new String[] {"30"});
-    adj.put("30", new String[] {});
-    test("id_0-1", "something.name", "10", adj);
-  }
-  
-  public void test2() {
-    Map<String,String[]> adj = new HashMap<String,String[]>();
-    adj.put("1=0", new String[] {"2 0", "3\"0."});
-    adj.put("2 0", new String[] {"3\"0."});
-    adj.put("3\"0.", new String[] {});
-    test("id_= 0-1", "something.name", "1=0", adj);
-  }
-  
-  public void test3() {
-    String s = "`~!@#$%^&*()-_=+[]{}|,.<>/?;:'\"";
-    test(s, s, s, new HashMap<String,String[]>());
-  }
-  
-  public void test4() {
-    Map<String,String[]> adj = new HashMap<String,String[]>();
-    adj.put("X", new String[] {});
-    test("", "jobName", "X", adj);
-  }
-  
-  public void test(String workflowId, String workflowName, String workflowNodeName, Map<String,String[]> adjacencies) {
-    Configuration conf = new Configuration();
-    setProperties(conf, workflowId, workflowName, workflowNodeName, adjacencies);
-    String log = log("JOB", new String[] {ID, NAME, NODE, ADJ},
-        new String[] {conf.get(ID_PROP), conf.get(NAME_PROP), conf.get(NODE_PROP), JobHistory.JobInfo.getWorkflowAdjacencies(conf)});
-    ParsedLine line = new ParsedLine(log);
-    JobID jobid = new JobID("id", 1);
-    JobSubmittedEvent event = new JobSubmittedEvent(jobid, workflowName, "", 0l, "", null, "", line.get(ID), line.get(NAME), line.get(NODE), line.get(ADJ));
-    WorkflowContext context = MapReduceJobHistoryUpdater.buildWorkflowContext(event);
-    
-    String resultingWorkflowId = workflowId;
-    if (workflowId.isEmpty())
-      resultingWorkflowId = jobid.toString().replace("job_", "mr_");
-    assertEquals("Didn't recover workflowId", resultingWorkflowId, context.getWorkflowId());
-    assertEquals("Didn't recover workflowName", workflowName, context.getWorkflowName());
-    assertEquals("Didn't recover workflowNodeName", workflowNodeName, context.getWorkflowEntityName());
-    
-    Map<String,String[]> resultingAdjacencies = adjacencies;
-    if (resultingAdjacencies.size() == 0) {
-      resultingAdjacencies = new HashMap<String,String[]>();
-      resultingAdjacencies.put(workflowNodeName, new String[] {});
-    }
-    assertEquals("Got incorrect number of adjacencies", resultingAdjacencies.size(), context.getWorkflowDag().getEntries().size());
-    for (WorkflowDagEntry entry : context.getWorkflowDag().getEntries()) {
-      String[] sTargets = resultingAdjacencies.get(entry.getSource());
-      assertNotNull("No original targets for " + entry.getSource(), sTargets);
-      List<String> dTargets = entry.getTargets();
-      assertEquals("Got incorrect number of targets for " + entry.getSource(), sTargets.length, dTargets.size());
-      for (int i = 0; i < sTargets.length; i++) {
-        assertEquals("Got incorrect target for " + entry.getSource(), sTargets[i], dTargets.get(i));
-      }
-    }
-  }
-  
-  private static void setProperties(Configuration conf, String workflowId, String workflowName, String workflowNodeName, Map<String,String[]> adj) {
-    conf.set(ID_PROP, workflowId);
-    conf.set(NAME_PROP, workflowName);
-    conf.set(NODE_PROP, workflowNodeName);
-    for (Entry<String,String[]> entry : adj.entrySet()) {
-      conf.setStrings(ADJ_PROP + "." + entry.getKey(), entry.getValue());
-    }
-  }
-  
-  private static String log(String recordType, String[] keys, String[] values) {
-    int length = recordType.length() + keys.length * 4 + 2;
-    for (int i = 0; i < keys.length; i++) {
-      values[i] = StringUtils.escapeString(values[i], StringUtils.ESCAPE_CHAR, charsToEscape);
-      length += values[i].length() + keys[i].toString().length();
-    }
-    
-    // We have the length of the buffer, now construct it.
-    StringBuilder builder = new StringBuilder(length);
-    builder.append(recordType);
-    builder.append(DELIMITER);
-    for (int i = 0; i < keys.length; i++) {
-      builder.append(keys[i]);
-      builder.append("=\"");
-      builder.append(values[i]);
-      builder.append("\"");
-      builder.append(DELIMITER);
-    }
-    builder.append(LINE_DELIMITER_CHAR);
-    
-    return builder.toString();
-  }
-  
-  private static class ParsedLine {
-    static final String KEY = "(\\w+)";
-    static final String VALUE = "([^\"\\\\]*+(?:\\\\.[^\"\\\\]*+)*+)";
-    static final Pattern keyValPair = Pattern.compile(KEY + "=" + "\"" + VALUE + "\"");
-    Map<String,String> props = new HashMap<String,String>();
-    private String type;
-    
-    ParsedLine(String fullLine) {
-      int firstSpace = fullLine.indexOf(" ");
-      
-      if (firstSpace < 0) {
-        firstSpace = fullLine.length();
-      }
-      
-      if (firstSpace == 0) {
-        return; // This is a junk line of some sort
-      }
-      
-      type = fullLine.substring(0, firstSpace);
-      
-      String propValPairs = fullLine.substring(firstSpace + 1);
-      
-      Matcher matcher = keyValPair.matcher(propValPairs);
-      
-      while (matcher.find()) {
-        String key = matcher.group(1);
-        String value = matcher.group(2);
-        props.put(key, value);
-      }
-    }
-    
-    protected String getType() {
-      return type;
-    }
-    
-    protected String get(String key) {
-      return props.get(key);
-    }
-  }
-}
diff --git a/branch-1.2/contrib/ambari-log4j/src/test/java/org/apache/ambari/TestMapReduceJobHistoryUpdater.java b/branch-1.2/contrib/ambari-log4j/src/test/java/org/apache/ambari/TestMapReduceJobHistoryUpdater.java
deleted file mode 100644
index 6c9ed6f..0000000
--- a/branch-1.2/contrib/ambari-log4j/src/test/java/org/apache/ambari/TestMapReduceJobHistoryUpdater.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari;
-
-import java.util.List;
-
-import junit.framework.TestCase;
-
-import org.apache.ambari.eventdb.model.WorkflowContext;
-import org.apache.ambari.eventdb.model.WorkflowDag;
-import org.apache.ambari.eventdb.model.WorkflowDag.WorkflowDagEntry;
-import org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.MapReduceJobHistoryUpdater;
-
-/**
- * 
- */
-public class TestMapReduceJobHistoryUpdater extends TestCase {
-  public void testDagMerging() {
-    WorkflowDag dag1 = new WorkflowDag();
-    dag1.addEntry(getEntry("a", "b", "c"));
-    dag1.addEntry(getEntry("b", "d"));
-    WorkflowContext one = new WorkflowContext();
-    one.setWorkflowDag(dag1);
-    
-    WorkflowDag dag2 = new WorkflowDag();
-    dag2.addEntry(getEntry("a", "d"));
-    dag2.addEntry(getEntry("c", "e"));
-    WorkflowContext two = new WorkflowContext();
-    two.setWorkflowDag(dag2);
-    
-    WorkflowDag emptyDag = new WorkflowDag();
-    WorkflowContext three = new WorkflowContext();
-    three.setWorkflowDag(emptyDag);
-    
-    WorkflowDag mergedDag = new WorkflowDag();
-    mergedDag.addEntry(getEntry("a", "b", "c", "d"));
-    mergedDag.addEntry(getEntry("b", "d"));
-    mergedDag.addEntry(getEntry("c", "e"));
-    
-    assertEquals(mergedDag, MapReduceJobHistoryUpdater.constructMergedDag(one, two));
-    assertEquals(mergedDag, MapReduceJobHistoryUpdater.constructMergedDag(two, one));
-    
-    // test blank dag
-    assertEquals(dag1, MapReduceJobHistoryUpdater.constructMergedDag(three, one));
-    assertEquals(dag1, MapReduceJobHistoryUpdater.constructMergedDag(one, three));
-    assertEquals(dag2, MapReduceJobHistoryUpdater.constructMergedDag(three, two));
-    assertEquals(dag2, MapReduceJobHistoryUpdater.constructMergedDag(two, three));
-    
-    // test null dag
-    assertEquals(dag1, MapReduceJobHistoryUpdater.constructMergedDag(new WorkflowContext(), one));
-    assertEquals(dag1, MapReduceJobHistoryUpdater.constructMergedDag(one, new WorkflowContext()));
-    assertEquals(dag2, MapReduceJobHistoryUpdater.constructMergedDag(new WorkflowContext(), two));
-    assertEquals(dag2, MapReduceJobHistoryUpdater.constructMergedDag(two, new WorkflowContext()));
-    
-    // test same dag
-    assertEquals(dag1, MapReduceJobHistoryUpdater.constructMergedDag(one, one));
-    assertEquals(dag2, MapReduceJobHistoryUpdater.constructMergedDag(two, two));
-    assertEquals(emptyDag, MapReduceJobHistoryUpdater.constructMergedDag(three, three));
-  }
-  
-  private static WorkflowDagEntry getEntry(String source, String... targets) {
-    WorkflowDagEntry entry = new WorkflowDagEntry();
-    entry.setSource(source);
-    for (String target : targets) {
-      entry.addTarget(target);
-    }
-    return entry;
-  }
-  
-  private static void assertEquals(WorkflowDag dag1, WorkflowDag dag2) {
-    assertEquals(dag1.size(), dag2.size());
-    List<WorkflowDagEntry> entries1 = dag1.getEntries();
-    List<WorkflowDagEntry> entries2 = dag2.getEntries();
-    assertEquals(entries1.size(), entries2.size());
-    for (int i = 0; i < entries1.size(); i++) {
-      WorkflowDagEntry e1 = entries1.get(i);
-      WorkflowDagEntry e2 = entries2.get(i);
-      assertEquals(e1.getSource(), e2.getSource());
-      List<String> t1 = e1.getTargets();
-      List<String> t2 = e2.getTargets();
-      assertEquals(t1.size(), t2.size());
-      for (int j = 0; j < t1.size(); j++) {
-        assertEquals(t1.get(j), t2.get(j));
-      }
-    }
-  }
-}
diff --git a/branch-1.2/pass.txt b/branch-1.2/pass.txt
deleted file mode 100644
index bdd5497..0000000
--- a/branch-1.2/pass.txt
+++ /dev/null
@@ -1 +0,0 @@
-I99HVT6d4BeC3pkhKOrWj5p7412OCfUAaENpg1W4WHEs5BgaYU
\ No newline at end of file
diff --git a/branch-1.2/pom.xml b/branch-1.2/pom.xml
deleted file mode 100644
index 3d0ddeb..0000000
--- a/branch-1.2/pom.xml
+++ /dev/null
@@ -1,152 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.apache.ambari</groupId>
-  <artifactId>ambari</artifactId>
-  <packaging>pom</packaging>
-  <name>Ambari Main</name>
-  <version>1.2.2-SNAPSHOT</version>
-  <description>Ambari</description>
-  <properties>
-    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-  </properties>
-  <pluginRepositories>
-    <pluginRepository>
-      <id>maven2-repository.dev.java.net</id>
-      <name>Java.net Repository for Maven</name>
-      <url>http://download.java.net/maven/2/</url>
-      <layout>default</layout>
-    </pluginRepository>
-    <pluginRepository>
-      <id>maven2-glassfish-repository.dev.java.net</id>
-      <name>Java.net Repository for Maven</name>
-      <url>http://download.java.net/maven/glassfish/</url>
-    </pluginRepository>
-  </pluginRepositories>
-  <modules>
-    <module>ambari-web</module>
-    <module>ambari-project</module>
-    <module>ambari-server</module>
-    <module>ambari-agent</module>
-  </modules>
-  <build>
-    <pluginManagement>
-      <plugins>
-        <plugin>
-          <groupId>org.apache.rat</groupId>
-          <artifactId>apache-rat-plugin</artifactId>
-          <version>0.8</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-surefire-plugin</artifactId>
-          <version>2.12</version>
-        </plugin>
-        <plugin>
-          <groupId>org.codehaus.mojo</groupId>
-          <artifactId>rpm-maven-plugin</artifactId>
-          <version>2.0.1</version>
-        </plugin>
-      </plugins>
-    </pluginManagement>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-compiler-plugin</artifactId>
-        <version>3.0</version>
-        <configuration>
-            <source>1.6</source>
-            <target>1.6</target>
-        </configuration>
-      </plugin>
-      <plugin>
-        <artifactId>maven-assembly-plugin</artifactId>
-        <configuration>
-          <descriptors>
-            <descriptor>ambari-project/src/main/assemblies/empty.xml</descriptor>
-          </descriptors>
-        </configuration>
-        <executions>
-          <execution>
-            <id>make-assembly</id>
-            <phase>package</phase>
-            <goals>
-              <goal>single</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>rpm-maven-plugin</artifactId>
-        <version>2.0.1</version>
-        <executions>
-          <execution>
-            <!-- unbinds rpm creation from maven lifecycle -->
-            <phase>none</phase>
-            <goals>
-              <goal>attached-rpm</goal>
-            </goals>
-          </execution>
-        </executions>
-        <configuration>
-          <copyright>2012, Apache Software Foundation</copyright>
-          <group>Development</group>
-          <description>Maven Recipe: RPM Package.</description>
-          <mappings/>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <configuration>
-          <excludes>
-            <exclude>**/*.json</exclude>
-            <exclude>derby.log</exclude>
-            <exclude>CHANGES.txt</exclude>
-            <exclude>pass.txt</exclude>
-            <exclude>contrib/addons/test/dataServices/jmx/data/cluster_configuration.json.nohbase</exclude>
-
-            <!--IDE and GIT files-->
-            <exclude>.idea/</exclude>
-            <exclude>.git/</exclude>
-            <exclude>**/.gitignore</exclude>
-            <exclude>**/.gitattributes</exclude>
-            <!--gitignore content-->
-            <exclude>.DS_Store</exclude>
-            <exclude>.iml/</exclude>
-            <exclude>.classpath</exclude>
-            <exclude>.project</exclude>
-            <exclude>.settings</exclude>
-            <exclude>*.pyc</exclude>
-            <exclude>*.py~</exclude>
-            <exclude>.hg</exclude>
-            <exclude>.hgignore</exclude>
-            <exclude>.hgtags</exclude>
-
-
-            <!--Python Mock library (BSD license)-->
-            <exclude>ambari-common/src/test/python/mock/**</exclude>
-
-          </excludes>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-</project>